diff --git a/drivers/net/ethernet/mellanox/mlx4/fw_qos.h b/drivers/net/ethernet/mellanox/mlx4/fw_qos.h index 582997577a049..954b86faac29e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw_qos.h +++ b/drivers/net/ethernet/mellanox/mlx4/fw_qos.h @@ -135,7 +135,7 @@ int mlx4_SET_VPORT_QOS_get(struct mlx4_dev *dev, u8 port, u8 vport, * @dev: mlx4_dev. * @port: Physical port number. * @vport: Vport id. - * @out_param: Array of mlx4_vport_qos_param which holds the requested values. + * @in_param: Array of mlx4_vport_qos_param which holds the requested values. * * Returns 0 on success or a negative mlx4_core errno code. **/ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 2d477f9a8cb7d..83a67ca43a416 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -81,7 +81,7 @@ mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/t mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o \ steering/dr_matcher.o steering/dr_rule.o \ - steering/dr_icm_pool.o \ + steering/dr_icm_pool.o steering/dr_buddy.o \ steering/dr_ste.o steering/dr_send.o \ steering/dr_cmd.o steering/dr_fw.o \ steering/dr_action.o steering/fs_dr.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index 38e4f19d69f86..43271a3856ca3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -2,6 +2,8 @@ /* Copyright (c) 2019 Mellanox Technologies. */ #include "en/params.h" +#include "en/txrx.h" +#include "en_accel/tls_rxtx.h" static inline bool mlx5e_rx_is_xdp(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk) @@ -152,3 +154,35 @@ u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev, return is_linear_skb ? mlx5e_get_linear_rq_headroom(params, xsk) : 0; } + +u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params) +{ + bool is_mpwqe = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE); + u16 stop_room; + + stop_room = mlx5e_tls_get_stop_room(mdev, params); + stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS); + if (is_mpwqe) + /* A MPWQE can take up to the maximum-sized WQE + all the normal + * stop room can be taken if a new packet breaks the active + * MPWQE session and allocates its WQEs right away. + */ + stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS); + + return stop_room; +} + +int mlx5e_validate_params(struct mlx5e_priv *priv, struct mlx5e_params *params) +{ + size_t sq_size = 1 << params->log_sq_size; + u16 stop_room; + + stop_room = mlx5e_calc_sq_stop_room(priv->mdev, params); + if (stop_room >= sq_size) { + netdev_err(priv->netdev, "Stop room %hu is bigger than the SQ size %zu\n", + stop_room, sq_size); + return -EINVAL; + } + + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h index a87273e801b2d..187007ad3349c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h @@ -30,6 +30,7 @@ struct mlx5e_sq_param { u32 sqc[MLX5_ST_SZ_DW(sqc)]; struct mlx5_wq_param wq; bool is_mpw; + u16 stop_room; }; struct mlx5e_channel_param { @@ -124,4 +125,7 @@ void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv, struct mlx5e_params *params, struct mlx5e_sq_param *param); +u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params); +int mlx5e_validate_params(struct mlx5e_priv *priv, struct mlx5e_params *params); + #endif /* __MLX5_EN_PARAMS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c index b140e13fdcc88..d16def68ecff7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c @@ -13,20 +13,20 @@ struct mlx5e_dump_wqe { (DIV_ROUND_UP(sizeof(struct mlx5e_dump_wqe), MLX5_SEND_WQE_BB)) static u8 -mlx5e_ktls_dumps_num_wqes(struct mlx5e_txqsq *sq, unsigned int nfrags, +mlx5e_ktls_dumps_num_wqes(struct mlx5e_params *params, unsigned int nfrags, unsigned int sync_len) { /* Given the MTU and sync_len, calculates an upper bound for the * number of DUMP WQEs needed for the TX resync of a record. */ - return nfrags + DIV_ROUND_UP(sync_len, sq->hw_mtu); + return nfrags + DIV_ROUND_UP(sync_len, MLX5E_SW2HW_MTU(params, params->sw_mtu)); } -u16 mlx5e_ktls_get_stop_room(struct mlx5e_txqsq *sq) +u16 mlx5e_ktls_get_stop_room(struct mlx5e_params *params) { u16 num_dumps, stop_room = 0; - num_dumps = mlx5e_ktls_dumps_num_wqes(sq, MAX_SKB_FRAGS, TLS_MAX_PAYLOAD_SIZE); + num_dumps = mlx5e_ktls_dumps_num_wqes(params, MAX_SKB_FRAGS, TLS_MAX_PAYLOAD_SIZE); stop_room += mlx5e_stop_room_for_wqe(MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS); stop_room += mlx5e_stop_room_for_wqe(MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h index 7521c9be735b2..ee04e916fa210 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h @@ -14,7 +14,7 @@ struct mlx5e_accel_tx_tls_state { u32 tls_tisn; }; -u16 mlx5e_ktls_get_stop_room(struct mlx5e_txqsq *sq); +u16 mlx5e_ktls_get_stop_room(struct mlx5e_params *params); bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq, struct sk_buff *skb, int datalen, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c index 6982b193ee8ae..f51c04284e4da 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c @@ -385,15 +385,13 @@ void mlx5e_tls_handle_rx_skb_metadata(struct mlx5e_rq *rq, struct sk_buff *skb, *cqe_bcnt -= MLX5E_METADATA_ETHER_LEN; } -u16 mlx5e_tls_get_stop_room(struct mlx5e_txqsq *sq) +u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params) { - struct mlx5_core_dev *mdev = sq->channel->mdev; - if (!mlx5_accel_is_tls_device(mdev)) return 0; if (mlx5_accel_is_ktls_device(mdev)) - return mlx5e_ktls_get_stop_room(sq); + return mlx5e_ktls_get_stop_room(params); /* FPGA */ /* Resync SKB. */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h index 5f162ad2ee8f3..9923132c94407 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h @@ -43,7 +43,7 @@ #include "en.h" #include "en/txrx.h" -u16 mlx5e_tls_get_stop_room(struct mlx5e_txqsq *sq); +u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params); bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state); @@ -71,7 +71,7 @@ mlx5e_accel_is_tls(struct mlx5_cqe64 *cqe, struct sk_buff *skb) { return false; static inline void mlx5e_tls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb, struct mlx5_cqe64 *cqe, u32 *cqe_bcnt) {} -static inline u16 mlx5e_tls_get_stop_room(struct mlx5e_txqsq *sq) +static inline u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params) { return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index d25a56ec6876a..42e61dc28ead7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -32,6 +32,7 @@ #include "en.h" #include "en/port.h" +#include "en/params.h" #include "en/xsk/pool.h" #include "lib/clock.h" @@ -369,6 +370,10 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv, new_channels.params.log_rq_mtu_frames = log_rq_size; new_channels.params.log_sq_size = log_sq_size; + err = mlx5e_validate_params(priv, &new_channels.params); + if (err) + goto unlock; + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { priv->channels.params = new_channels.params; goto unlock; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index b3f02aac7f268..8226a9d2b45ea 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1121,28 +1121,6 @@ static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa) return 0; } -static int mlx5e_calc_sq_stop_room(struct mlx5e_txqsq *sq, u8 log_sq_size) -{ - int sq_size = 1 << log_sq_size; - - sq->stop_room = mlx5e_tls_get_stop_room(sq); - sq->stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS); - if (test_bit(MLX5E_SQ_STATE_MPWQE, &sq->state)) - /* A MPWQE can take up to the maximum-sized WQE + all the normal - * stop room can be taken if a new packet breaks the active - * MPWQE session and allocates its WQEs right away. - */ - sq->stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS); - - if (WARN_ON(sq->stop_room >= sq_size)) { - netdev_err(sq->channel->netdev, "Stop room %hu is bigger than the SQ size %d\n", - sq->stop_room, sq_size); - return -ENOSPC; - } - - return 0; -} - static void mlx5e_tx_err_cqe_work(struct work_struct *recover_work); static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, int txq_ix, @@ -1176,9 +1154,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, set_bit(MLX5E_SQ_STATE_TLS, &sq->state); if (param->is_mpw) set_bit(MLX5E_SQ_STATE_MPWQE, &sq->state); - err = mlx5e_calc_sq_stop_room(sq, params->log_sq_size); - if (err) - return err; + sq->stop_room = param->stop_room; param->wq.db_numa_node = cpu_to_node(c->cpu); err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl); @@ -2225,6 +2201,7 @@ static void mlx5e_build_sq_param(struct mlx5e_priv *priv, MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size); MLX5_SET(sqc, sqc, allow_swp, allow_swp); param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE); + param->stop_room = mlx5e_calc_sq_stop_room(priv->mdev, params); mlx5e_build_tx_cq_param(priv, params, ¶m->cqp); } @@ -3999,6 +3976,9 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, new_channels.params = *params; new_channels.params.sw_mtu = new_mtu; + err = mlx5e_validate_params(priv, &new_channels.params); + if (err) + goto out; if (params->xdp_prog && !mlx5e_rx_is_linear_skb(&new_channels.params, NULL)) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 8ebfe782f95e5..4ea5d6ddf56ad 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -189,19 +189,21 @@ u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq) return count_eqe; } -static void mlx5_eq_async_int_lock(struct mlx5_eq_async *eq, unsigned long *flags) +static void mlx5_eq_async_int_lock(struct mlx5_eq_async *eq, bool recovery, + unsigned long *flags) __acquires(&eq->lock) { - if (in_irq()) + if (!recovery) spin_lock(&eq->lock); else spin_lock_irqsave(&eq->lock, *flags); } -static void mlx5_eq_async_int_unlock(struct mlx5_eq_async *eq, unsigned long *flags) +static void mlx5_eq_async_int_unlock(struct mlx5_eq_async *eq, bool recovery, + unsigned long *flags) __releases(&eq->lock) { - if (in_irq()) + if (!recovery) spin_unlock(&eq->lock); else spin_unlock_irqrestore(&eq->lock, *flags); @@ -223,11 +225,13 @@ static int mlx5_eq_async_int(struct notifier_block *nb, struct mlx5_eqe *eqe; unsigned long flags; int num_eqes = 0; + bool recovery; dev = eq->dev; eqt = dev->priv.eq_table; - mlx5_eq_async_int_lock(eq_async, &flags); + recovery = action == ASYNC_EQ_RECOVER; + mlx5_eq_async_int_lock(eq_async, recovery, &flags); eqe = next_eqe_sw(eq); if (!eqe) @@ -249,9 +253,9 @@ static int mlx5_eq_async_int(struct notifier_block *nb, out: eq_update_ci(eq, 1); - mlx5_eq_async_int_unlock(eq_async, &flags); + mlx5_eq_async_int_unlock(eq_async, recovery, &flags); - return unlikely(action == ASYNC_EQ_RECOVER) ? num_eqes : 0; + return unlikely(recovery) ? num_eqes : 0; } void mlx5_cmd_eq_recover(struct mlx5_core_dev *dev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h index 656f96be6e200..89ef592656c8d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h @@ -47,11 +47,12 @@ /** * enum mlx5_fpga_access_type - Enumerated the different methods possible for * accessing the device memory address space + * + * @MLX5_FPGA_ACCESS_TYPE_I2C: Use the slow CX-FPGA I2C bus + * @MLX5_FPGA_ACCESS_TYPE_DONTCARE: Use the fastest available method */ enum mlx5_fpga_access_type { - /** Use the slow CX-FPGA I2C bus */ MLX5_FPGA_ACCESS_TYPE_I2C = 0x0, - /** Use the fastest available method */ MLX5_FPGA_ACCESS_TYPE_DONTCARE = 0x0, }; @@ -113,6 +114,7 @@ struct mlx5_fpga_conn_attr { * subsequent receives. */ void (*recv_cb)(void *cb_arg, struct mlx5_fpga_dma_buf *buf); + /** @cb_arg: A context to be passed to recv_cb callback */ void *cb_arg; }; @@ -145,7 +147,7 @@ void mlx5_fpga_sbu_conn_destroy(struct mlx5_fpga_conn *conn); /** * mlx5_fpga_sbu_conn_sendmsg() - Queue the transmission of a packet - * @fdev: An FPGA SBU connection + * @conn: An FPGA SBU connection * @buf: The packet buffer * * Queues a packet for transmission over an FPGA SBU connection. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_buddy.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_buddy.c new file mode 100644 index 0000000000000..7df11a019df96 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_buddy.c @@ -0,0 +1,170 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 - 2008 Mellanox Technologies. All rights reserved. + * Copyright (c) 2006 - 2007 Cisco Systems, Inc. All rights reserved. + * Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. + */ + +#include "dr_types.h" + +int mlx5dr_buddy_init(struct mlx5dr_icm_buddy_mem *buddy, + unsigned int max_order) +{ + int i; + + buddy->max_order = max_order; + + INIT_LIST_HEAD(&buddy->list_node); + INIT_LIST_HEAD(&buddy->used_list); + INIT_LIST_HEAD(&buddy->hot_list); + + buddy->bitmap = kcalloc(buddy->max_order + 1, + sizeof(*buddy->bitmap), + GFP_KERNEL); + buddy->num_free = kcalloc(buddy->max_order + 1, + sizeof(*buddy->num_free), + GFP_KERNEL); + + if (!buddy->bitmap || !buddy->num_free) + goto err_free_all; + + /* Allocating max_order bitmaps, one for each order */ + + for (i = 0; i <= buddy->max_order; ++i) { + unsigned int size = 1 << (buddy->max_order - i); + + buddy->bitmap[i] = bitmap_zalloc(size, GFP_KERNEL); + if (!buddy->bitmap[i]) + goto err_out_free_each_bit_per_order; + } + + /* In the beginning, we have only one order that is available for + * use (the biggest one), so mark the first bit in both bitmaps. + */ + + bitmap_set(buddy->bitmap[buddy->max_order], 0, 1); + + buddy->num_free[buddy->max_order] = 1; + + return 0; + +err_out_free_each_bit_per_order: + for (i = 0; i <= buddy->max_order; ++i) + bitmap_free(buddy->bitmap[i]); + +err_free_all: + kfree(buddy->num_free); + kfree(buddy->bitmap); + return -ENOMEM; +} + +void mlx5dr_buddy_cleanup(struct mlx5dr_icm_buddy_mem *buddy) +{ + int i; + + list_del(&buddy->list_node); + + for (i = 0; i <= buddy->max_order; ++i) + bitmap_free(buddy->bitmap[i]); + + kfree(buddy->num_free); + kfree(buddy->bitmap); +} + +static int dr_buddy_find_free_seg(struct mlx5dr_icm_buddy_mem *buddy, + unsigned int start_order, + unsigned int *segment, + unsigned int *order) +{ + unsigned int seg, order_iter, m; + + for (order_iter = start_order; + order_iter <= buddy->max_order; ++order_iter) { + if (!buddy->num_free[order_iter]) + continue; + + m = 1 << (buddy->max_order - order_iter); + seg = find_first_bit(buddy->bitmap[order_iter], m); + + if (WARN(seg >= m, + "ICM Buddy: failed finding free mem for order %d\n", + order_iter)) + return -ENOMEM; + + break; + } + + if (order_iter > buddy->max_order) + return -ENOMEM; + + *segment = seg; + *order = order_iter; + return 0; +} + +/** + * mlx5dr_buddy_alloc_mem() - Update second level bitmap. + * @buddy: Buddy to update. + * @order: Order of the buddy to update. + * @segment: Segment number. + * + * This function finds the first area of the ICM memory managed by this buddy. + * It uses the data structures of the buddy system in order to find the first + * area of free place, starting from the current order till the maximum order + * in the system. + * + * Return: 0 when segment is set, non-zero error status otherwise. + * + * The function returns the location (segment) in the whole buddy ICM memory + * area - the index of the memory segment that is available for use. + */ +int mlx5dr_buddy_alloc_mem(struct mlx5dr_icm_buddy_mem *buddy, + unsigned int order, + unsigned int *segment) +{ + unsigned int seg, order_iter; + int err; + + err = dr_buddy_find_free_seg(buddy, order, &seg, &order_iter); + if (err) + return err; + + bitmap_clear(buddy->bitmap[order_iter], seg, 1); + --buddy->num_free[order_iter]; + + /* If we found free memory in some order that is bigger than the + * required order, we need to split every order between the required + * order and the order that we found into two parts, and mark accordingly. + */ + while (order_iter > order) { + --order_iter; + seg <<= 1; + bitmap_set(buddy->bitmap[order_iter], seg ^ 1, 1); + ++buddy->num_free[order_iter]; + } + + seg <<= order; + *segment = seg; + + return 0; +} + +void mlx5dr_buddy_free_mem(struct mlx5dr_icm_buddy_mem *buddy, + unsigned int seg, unsigned int order) +{ + seg >>= order; + + /* Whenever a segment is free, + * the mem is added to the buddy that gave it. + */ + while (test_bit(seg ^ 1, buddy->bitmap[order])) { + bitmap_clear(buddy->bitmap[order], seg ^ 1, 1); + --buddy->num_free[order]; + seg >>= 1; + ++order; + } + bitmap_set(buddy->bitmap[order], seg, 1); + + ++buddy->num_free[order]; +} + diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c index 6bd34b2930071..ebc879052e42b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c @@ -93,12 +93,12 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev, caps->gvmi = MLX5_CAP_GEN(mdev, vhca_id); caps->flex_protocols = MLX5_CAP_GEN(mdev, flex_parser_protocols); - if (mlx5dr_matcher_supp_flex_parser_icmp_v4(caps)) { + if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED) { caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0); caps->flex_parser_id_icmp_dw1 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw1); } - if (mlx5dr_matcher_supp_flex_parser_icmp_v6(caps)) { + if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED) { caps->flex_parser_id_icmpv6_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw0); caps->flex_parser_id_icmpv6_dw1 = diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c index cc33515b9abac..66c24767e3b00 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c @@ -4,50 +4,16 @@ #include "dr_types.h" #define DR_ICM_MODIFY_HDR_ALIGN_BASE 64 -#define DR_ICM_SYNC_THRESHOLD (64 * 1024 * 1024) - -struct mlx5dr_icm_pool; - -struct mlx5dr_icm_bucket { - struct mlx5dr_icm_pool *pool; - - /* Chunks that aren't visible to HW not directly and not in cache */ - struct list_head free_list; - unsigned int free_list_count; - - /* Used chunks, HW may be accessing this memory */ - struct list_head used_list; - unsigned int used_list_count; - - /* HW may be accessing this memory but at some future, - * undetermined time, it might cease to do so. Before deciding to call - * sync_ste, this list is moved to sync_list - */ - struct list_head hot_list; - unsigned int hot_list_count; - - /* Pending sync list, entries from the hot list are moved to this list. - * sync_ste is executed and then sync_list is concatenated to the free list - */ - struct list_head sync_list; - unsigned int sync_list_count; - - u32 total_chunks; - u32 num_of_entries; - u32 entry_size; - /* protect the ICM bucket */ - struct mutex mutex; -}; +#define DR_ICM_SYNC_THRESHOLD_POOL (64 * 1024 * 1024) struct mlx5dr_icm_pool { - struct mlx5dr_icm_bucket *buckets; enum mlx5dr_icm_type icm_type; enum mlx5dr_icm_chunk_size max_log_chunk_sz; - enum mlx5dr_icm_chunk_size num_of_buckets; - struct list_head icm_mr_list; - /* protect the ICM MR list */ - struct mutex mr_mutex; struct mlx5dr_domain *dmn; + /* memory management */ + struct mutex mutex; /* protect the ICM pool and ICM buddy */ + struct list_head buddy_mem_list; + u64 hot_memory_size; }; struct mlx5dr_icm_dm { @@ -58,13 +24,11 @@ struct mlx5dr_icm_dm { }; struct mlx5dr_icm_mr { - struct mlx5dr_icm_pool *pool; struct mlx5_core_mkey mkey; struct mlx5dr_icm_dm dm; - size_t used_length; + struct mlx5dr_domain *dmn; size_t length; u64 icm_start_addr; - struct list_head mr_list; }; static int dr_icm_create_dm_mkey(struct mlx5_core_dev *mdev, @@ -107,8 +71,7 @@ dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool) if (!icm_mr) return NULL; - icm_mr->pool = pool; - INIT_LIST_HEAD(&icm_mr->mr_list); + icm_mr->dmn = pool->dmn; icm_mr->dm.length = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz, pool->icm_type); @@ -150,8 +113,6 @@ dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool) goto free_mkey; } - list_add_tail(&icm_mr->mr_list, &pool->icm_mr_list); - return icm_mr; free_mkey: @@ -166,10 +127,9 @@ dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool) static void dr_icm_pool_mr_destroy(struct mlx5dr_icm_mr *icm_mr) { - struct mlx5_core_dev *mdev = icm_mr->pool->dmn->mdev; + struct mlx5_core_dev *mdev = icm_mr->dmn->mdev; struct mlx5dr_icm_dm *dm = &icm_mr->dm; - list_del(&icm_mr->mr_list); mlx5_core_destroy_mkey(mdev, &icm_mr->mkey); mlx5_dm_sw_icm_dealloc(mdev, dm->type, dm->length, 0, dm->addr, dm->obj_id); @@ -178,19 +138,17 @@ static void dr_icm_pool_mr_destroy(struct mlx5dr_icm_mr *icm_mr) static int dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk) { - struct mlx5dr_icm_bucket *bucket = chunk->bucket; - - chunk->ste_arr = kvzalloc(bucket->num_of_entries * + chunk->ste_arr = kvzalloc(chunk->num_of_entries * sizeof(chunk->ste_arr[0]), GFP_KERNEL); if (!chunk->ste_arr) return -ENOMEM; - chunk->hw_ste_arr = kvzalloc(bucket->num_of_entries * + chunk->hw_ste_arr = kvzalloc(chunk->num_of_entries * DR_STE_SIZE_REDUCED, GFP_KERNEL); if (!chunk->hw_ste_arr) goto out_free_ste_arr; - chunk->miss_list = kvmalloc(bucket->num_of_entries * + chunk->miss_list = kvmalloc(chunk->num_of_entries * sizeof(chunk->miss_list[0]), GFP_KERNEL); if (!chunk->miss_list) goto out_free_hw_ste_arr; @@ -204,72 +162,6 @@ static int dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk) return -ENOMEM; } -static int dr_icm_chunks_create(struct mlx5dr_icm_bucket *bucket) -{ - size_t mr_free_size, mr_req_size, mr_row_size; - struct mlx5dr_icm_pool *pool = bucket->pool; - struct mlx5dr_icm_mr *icm_mr = NULL; - struct mlx5dr_icm_chunk *chunk; - int i, err = 0; - - mr_req_size = bucket->num_of_entries * bucket->entry_size; - mr_row_size = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz, - pool->icm_type); - mutex_lock(&pool->mr_mutex); - if (!list_empty(&pool->icm_mr_list)) { - icm_mr = list_last_entry(&pool->icm_mr_list, - struct mlx5dr_icm_mr, mr_list); - - if (icm_mr) - mr_free_size = icm_mr->dm.length - icm_mr->used_length; - } - - if (!icm_mr || mr_free_size < mr_row_size) { - icm_mr = dr_icm_pool_mr_create(pool); - if (!icm_mr) { - err = -ENOMEM; - goto out_err; - } - } - - /* Create memory aligned chunks */ - for (i = 0; i < mr_row_size / mr_req_size; i++) { - chunk = kvzalloc(sizeof(*chunk), GFP_KERNEL); - if (!chunk) { - err = -ENOMEM; - goto out_err; - } - - chunk->bucket = bucket; - chunk->rkey = icm_mr->mkey.key; - /* mr start addr is zero based */ - chunk->mr_addr = icm_mr->used_length; - chunk->icm_addr = (uintptr_t)icm_mr->icm_start_addr + icm_mr->used_length; - icm_mr->used_length += mr_req_size; - chunk->num_of_entries = bucket->num_of_entries; - chunk->byte_size = chunk->num_of_entries * bucket->entry_size; - - if (pool->icm_type == DR_ICM_TYPE_STE) { - err = dr_icm_chunk_ste_init(chunk); - if (err) - goto out_free_chunk; - } - - INIT_LIST_HEAD(&chunk->chunk_list); - list_add(&chunk->chunk_list, &bucket->free_list); - bucket->free_list_count++; - bucket->total_chunks++; - } - mutex_unlock(&pool->mr_mutex); - return 0; - -out_free_chunk: - kvfree(chunk); -out_err: - mutex_unlock(&pool->mr_mutex); - return err; -} - static void dr_icm_chunk_ste_cleanup(struct mlx5dr_icm_chunk *chunk) { kvfree(chunk->miss_list); @@ -277,166 +169,199 @@ static void dr_icm_chunk_ste_cleanup(struct mlx5dr_icm_chunk *chunk) kvfree(chunk->ste_arr); } -static void dr_icm_chunk_destroy(struct mlx5dr_icm_chunk *chunk) +static enum mlx5dr_icm_type +get_chunk_icm_type(struct mlx5dr_icm_chunk *chunk) +{ + return chunk->buddy_mem->pool->icm_type; +} + +static void dr_icm_chunk_destroy(struct mlx5dr_icm_chunk *chunk, + struct mlx5dr_icm_buddy_mem *buddy) { - struct mlx5dr_icm_bucket *bucket = chunk->bucket; + enum mlx5dr_icm_type icm_type = get_chunk_icm_type(chunk); + buddy->used_memory -= chunk->byte_size; list_del(&chunk->chunk_list); - bucket->total_chunks--; - if (bucket->pool->icm_type == DR_ICM_TYPE_STE) + if (icm_type == DR_ICM_TYPE_STE) dr_icm_chunk_ste_cleanup(chunk); kvfree(chunk); } -static void dr_icm_bucket_init(struct mlx5dr_icm_pool *pool, - struct mlx5dr_icm_bucket *bucket, - enum mlx5dr_icm_chunk_size chunk_size) +static int dr_icm_buddy_create(struct mlx5dr_icm_pool *pool) { - if (pool->icm_type == DR_ICM_TYPE_STE) - bucket->entry_size = DR_STE_SIZE; - else - bucket->entry_size = DR_MODIFY_ACTION_SIZE; - - bucket->num_of_entries = mlx5dr_icm_pool_chunk_size_to_entries(chunk_size); - bucket->pool = pool; - mutex_init(&bucket->mutex); - INIT_LIST_HEAD(&bucket->free_list); - INIT_LIST_HEAD(&bucket->used_list); - INIT_LIST_HEAD(&bucket->hot_list); - INIT_LIST_HEAD(&bucket->sync_list); + struct mlx5dr_icm_buddy_mem *buddy; + struct mlx5dr_icm_mr *icm_mr; + + icm_mr = dr_icm_pool_mr_create(pool); + if (!icm_mr) + return -ENOMEM; + + buddy = kvzalloc(sizeof(*buddy), GFP_KERNEL); + if (!buddy) + goto free_mr; + + if (mlx5dr_buddy_init(buddy, pool->max_log_chunk_sz)) + goto err_free_buddy; + + buddy->icm_mr = icm_mr; + buddy->pool = pool; + + /* add it to the -start- of the list in order to search in it first */ + list_add(&buddy->list_node, &pool->buddy_mem_list); + + return 0; + +err_free_buddy: + kvfree(buddy); +free_mr: + dr_icm_pool_mr_destroy(icm_mr); + return -ENOMEM; } -static void dr_icm_bucket_cleanup(struct mlx5dr_icm_bucket *bucket) +static void dr_icm_buddy_destroy(struct mlx5dr_icm_buddy_mem *buddy) { struct mlx5dr_icm_chunk *chunk, *next; - mutex_destroy(&bucket->mutex); - list_splice_tail_init(&bucket->sync_list, &bucket->free_list); - list_splice_tail_init(&bucket->hot_list, &bucket->free_list); + list_for_each_entry_safe(chunk, next, &buddy->hot_list, chunk_list) + dr_icm_chunk_destroy(chunk, buddy); + + list_for_each_entry_safe(chunk, next, &buddy->used_list, chunk_list) + dr_icm_chunk_destroy(chunk, buddy); - list_for_each_entry_safe(chunk, next, &bucket->free_list, chunk_list) - dr_icm_chunk_destroy(chunk); + dr_icm_pool_mr_destroy(buddy->icm_mr); - WARN_ON(bucket->total_chunks != 0); + mlx5dr_buddy_cleanup(buddy); - /* Cleanup of unreturned chunks */ - list_for_each_entry_safe(chunk, next, &bucket->used_list, chunk_list) - dr_icm_chunk_destroy(chunk); + kvfree(buddy); } -static u64 dr_icm_hot_mem_size(struct mlx5dr_icm_pool *pool) +static struct mlx5dr_icm_chunk * +dr_icm_chunk_create(struct mlx5dr_icm_pool *pool, + enum mlx5dr_icm_chunk_size chunk_size, + struct mlx5dr_icm_buddy_mem *buddy_mem_pool, + unsigned int seg) { - u64 hot_size = 0; - int chunk_order; + struct mlx5dr_icm_chunk *chunk; + int offset; - for (chunk_order = 0; chunk_order < pool->num_of_buckets; chunk_order++) - hot_size += pool->buckets[chunk_order].hot_list_count * - mlx5dr_icm_pool_chunk_size_to_byte(chunk_order, pool->icm_type); + chunk = kvzalloc(sizeof(*chunk), GFP_KERNEL); + if (!chunk) + return NULL; - return hot_size; -} + offset = mlx5dr_icm_pool_dm_type_to_entry_size(pool->icm_type) * seg; + + chunk->rkey = buddy_mem_pool->icm_mr->mkey.key; + chunk->mr_addr = offset; + chunk->icm_addr = + (uintptr_t)buddy_mem_pool->icm_mr->icm_start_addr + offset; + chunk->num_of_entries = + mlx5dr_icm_pool_chunk_size_to_entries(chunk_size); + chunk->byte_size = + mlx5dr_icm_pool_chunk_size_to_byte(chunk_size, pool->icm_type); + chunk->seg = seg; + + if (pool->icm_type == DR_ICM_TYPE_STE && dr_icm_chunk_ste_init(chunk)) { + mlx5dr_err(pool->dmn, + "Failed to init ste arrays (order: %d)\n", + chunk_size); + goto out_free_chunk; + } -static bool dr_icm_reuse_hot_entries(struct mlx5dr_icm_pool *pool, - struct mlx5dr_icm_bucket *bucket) -{ - u64 bytes_for_sync; + buddy_mem_pool->used_memory += chunk->byte_size; + chunk->buddy_mem = buddy_mem_pool; + INIT_LIST_HEAD(&chunk->chunk_list); - bytes_for_sync = dr_icm_hot_mem_size(pool); - if (bytes_for_sync < DR_ICM_SYNC_THRESHOLD || !bucket->hot_list_count) - return false; + /* chunk now is part of the used_list */ + list_add_tail(&chunk->chunk_list, &buddy_mem_pool->used_list); - return true; -} + return chunk; -static void dr_icm_chill_bucket_start(struct mlx5dr_icm_bucket *bucket) -{ - list_splice_tail_init(&bucket->hot_list, &bucket->sync_list); - bucket->sync_list_count += bucket->hot_list_count; - bucket->hot_list_count = 0; +out_free_chunk: + kvfree(chunk); + return NULL; } -static void dr_icm_chill_bucket_end(struct mlx5dr_icm_bucket *bucket) +static bool dr_icm_pool_is_sync_required(struct mlx5dr_icm_pool *pool) { - list_splice_tail_init(&bucket->sync_list, &bucket->free_list); - bucket->free_list_count += bucket->sync_list_count; - bucket->sync_list_count = 0; -} + if (pool->hot_memory_size > DR_ICM_SYNC_THRESHOLD_POOL) + return true; -static void dr_icm_chill_bucket_abort(struct mlx5dr_icm_bucket *bucket) -{ - list_splice_tail_init(&bucket->sync_list, &bucket->hot_list); - bucket->hot_list_count += bucket->sync_list_count; - bucket->sync_list_count = 0; + return false; } -static void dr_icm_chill_buckets_start(struct mlx5dr_icm_pool *pool, - struct mlx5dr_icm_bucket *cb, - bool buckets[DR_CHUNK_SIZE_MAX]) +static int dr_icm_pool_sync_all_buddy_pools(struct mlx5dr_icm_pool *pool) { - struct mlx5dr_icm_bucket *bucket; - int i; - - for (i = 0; i < pool->num_of_buckets; i++) { - bucket = &pool->buckets[i]; - if (bucket == cb) { - dr_icm_chill_bucket_start(bucket); - continue; - } + struct mlx5dr_icm_buddy_mem *buddy, *tmp_buddy; + int err; - /* Freeing the mutex is done at the end of that process, after - * sync_ste was executed at dr_icm_chill_buckets_end func. - */ - if (mutex_trylock(&bucket->mutex)) { - dr_icm_chill_bucket_start(bucket); - buckets[i] = true; - } + err = mlx5dr_cmd_sync_steering(pool->dmn->mdev); + if (err) { + mlx5dr_err(pool->dmn, "Failed to sync to HW (err: %d)\n", err); + return err; } -} -static void dr_icm_chill_buckets_end(struct mlx5dr_icm_pool *pool, - struct mlx5dr_icm_bucket *cb, - bool buckets[DR_CHUNK_SIZE_MAX]) -{ - struct mlx5dr_icm_bucket *bucket; - int i; - - for (i = 0; i < pool->num_of_buckets; i++) { - bucket = &pool->buckets[i]; - if (bucket == cb) { - dr_icm_chill_bucket_end(bucket); - continue; - } + list_for_each_entry_safe(buddy, tmp_buddy, &pool->buddy_mem_list, list_node) { + struct mlx5dr_icm_chunk *chunk, *tmp_chunk; - if (!buckets[i]) - continue; + list_for_each_entry_safe(chunk, tmp_chunk, &buddy->hot_list, chunk_list) { + mlx5dr_buddy_free_mem(buddy, chunk->seg, + ilog2(chunk->num_of_entries)); + pool->hot_memory_size -= chunk->byte_size; + dr_icm_chunk_destroy(chunk, buddy); + } - dr_icm_chill_bucket_end(bucket); - mutex_unlock(&bucket->mutex); + if (!buddy->used_memory && pool->icm_type == DR_ICM_TYPE_STE) + dr_icm_buddy_destroy(buddy); } + + return 0; } -static void dr_icm_chill_buckets_abort(struct mlx5dr_icm_pool *pool, - struct mlx5dr_icm_bucket *cb, - bool buckets[DR_CHUNK_SIZE_MAX]) +static int dr_icm_handle_buddies_get_mem(struct mlx5dr_icm_pool *pool, + enum mlx5dr_icm_chunk_size chunk_size, + struct mlx5dr_icm_buddy_mem **buddy, + unsigned int *seg) { - struct mlx5dr_icm_bucket *bucket; - int i; - - for (i = 0; i < pool->num_of_buckets; i++) { - bucket = &pool->buckets[i]; - if (bucket == cb) { - dr_icm_chill_bucket_abort(bucket); - continue; - } + struct mlx5dr_icm_buddy_mem *buddy_mem_pool; + bool new_mem = false; + int err; - if (!buckets[i]) - continue; +alloc_buddy_mem: + /* find the next free place from the buddy list */ + list_for_each_entry(buddy_mem_pool, &pool->buddy_mem_list, list_node) { + err = mlx5dr_buddy_alloc_mem(buddy_mem_pool, + chunk_size, seg); + if (!err) + goto found; + + if (WARN_ON(new_mem)) { + /* We have new memory pool, first in the list */ + mlx5dr_err(pool->dmn, + "No memory for order: %d\n", + chunk_size); + goto out; + } + } - dr_icm_chill_bucket_abort(bucket); - mutex_unlock(&bucket->mutex); + /* no more available allocators in that pool, create new */ + err = dr_icm_buddy_create(pool); + if (err) { + mlx5dr_err(pool->dmn, + "Failed creating buddy for order %d\n", + chunk_size); + goto out; } + + /* mark we have new memory, first in list */ + new_mem = true; + goto alloc_buddy_mem; + +found: + *buddy = buddy_mem_pool; +out: + return err; } /* Allocate an ICM chunk, each chunk holds a piece of ICM memory and @@ -446,68 +371,48 @@ struct mlx5dr_icm_chunk * mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool, enum mlx5dr_icm_chunk_size chunk_size) { - struct mlx5dr_icm_chunk *chunk = NULL; /* Fix compilation warning */ - bool buckets[DR_CHUNK_SIZE_MAX] = {}; - struct mlx5dr_icm_bucket *bucket; - int err; + struct mlx5dr_icm_chunk *chunk = NULL; + struct mlx5dr_icm_buddy_mem *buddy; + unsigned int seg; + int ret; if (chunk_size > pool->max_log_chunk_sz) return NULL; - bucket = &pool->buckets[chunk_size]; - - mutex_lock(&bucket->mutex); - - /* Take chunk from pool if available, otherwise allocate new chunks */ - if (list_empty(&bucket->free_list)) { - if (dr_icm_reuse_hot_entries(pool, bucket)) { - dr_icm_chill_buckets_start(pool, bucket, buckets); - err = mlx5dr_cmd_sync_steering(pool->dmn->mdev); - if (err) { - dr_icm_chill_buckets_abort(pool, bucket, buckets); - mlx5dr_err(pool->dmn, "Sync_steering failed\n"); - chunk = NULL; - goto out; - } - dr_icm_chill_buckets_end(pool, bucket, buckets); - } else { - dr_icm_chunks_create(bucket); - } - } + mutex_lock(&pool->mutex); + /* find mem, get back the relevant buddy pool and seg in that mem */ + ret = dr_icm_handle_buddies_get_mem(pool, chunk_size, &buddy, &seg); + if (ret) + goto out; - if (!list_empty(&bucket->free_list)) { - chunk = list_last_entry(&bucket->free_list, - struct mlx5dr_icm_chunk, - chunk_list); - if (chunk) { - list_del_init(&chunk->chunk_list); - list_add_tail(&chunk->chunk_list, &bucket->used_list); - bucket->free_list_count--; - bucket->used_list_count++; - } - } + chunk = dr_icm_chunk_create(pool, chunk_size, buddy, seg); + if (!chunk) + goto out_err; + + goto out; + +out_err: + mlx5dr_buddy_free_mem(buddy, seg, chunk_size); out: - mutex_unlock(&bucket->mutex); + mutex_unlock(&pool->mutex); return chunk; } void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk) { - struct mlx5dr_icm_bucket *bucket = chunk->bucket; + struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem; + struct mlx5dr_icm_pool *pool = buddy->pool; - if (bucket->pool->icm_type == DR_ICM_TYPE_STE) { - memset(chunk->ste_arr, 0, - bucket->num_of_entries * sizeof(chunk->ste_arr[0])); - memset(chunk->hw_ste_arr, 0, - bucket->num_of_entries * DR_STE_SIZE_REDUCED); - } + /* move the memory to the waiting list AKA "hot" */ + mutex_lock(&pool->mutex); + list_move_tail(&chunk->chunk_list, &buddy->hot_list); + pool->hot_memory_size += chunk->byte_size; + + /* Check if we have chunks that are waiting for sync-ste */ + if (dr_icm_pool_is_sync_required(pool)) + dr_icm_pool_sync_all_buddy_pools(pool); - mutex_lock(&bucket->mutex); - list_del_init(&chunk->chunk_list); - list_add_tail(&chunk->chunk_list, &bucket->hot_list); - bucket->hot_list_count++; - bucket->used_list_count--; - mutex_unlock(&bucket->mutex); + mutex_unlock(&pool->mutex); } struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn, @@ -515,7 +420,6 @@ struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn, { enum mlx5dr_icm_chunk_size max_log_chunk_sz; struct mlx5dr_icm_pool *pool; - int i; if (icm_type == DR_ICM_TYPE_STE) max_log_chunk_sz = dmn->info.max_log_sw_icm_sz; @@ -526,43 +430,24 @@ struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn, if (!pool) return NULL; - pool->buckets = kcalloc(max_log_chunk_sz + 1, - sizeof(pool->buckets[0]), - GFP_KERNEL); - if (!pool->buckets) - goto free_pool; - pool->dmn = dmn; pool->icm_type = icm_type; pool->max_log_chunk_sz = max_log_chunk_sz; - pool->num_of_buckets = max_log_chunk_sz + 1; - INIT_LIST_HEAD(&pool->icm_mr_list); - for (i = 0; i < pool->num_of_buckets; i++) - dr_icm_bucket_init(pool, &pool->buckets[i], i); + INIT_LIST_HEAD(&pool->buddy_mem_list); - mutex_init(&pool->mr_mutex); + mutex_init(&pool->mutex); return pool; - -free_pool: - kvfree(pool); - return NULL; } void mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool *pool) { - struct mlx5dr_icm_mr *icm_mr, *next; - int i; - - mutex_destroy(&pool->mr_mutex); - - list_for_each_entry_safe(icm_mr, next, &pool->icm_mr_list, mr_list) - dr_icm_pool_mr_destroy(icm_mr); + struct mlx5dr_icm_buddy_mem *buddy, *tmp_buddy; - for (i = 0; i < pool->num_of_buckets; i++) - dr_icm_bucket_cleanup(&pool->buckets[i]); + list_for_each_entry_safe(buddy, tmp_buddy, &pool->buddy_mem_list, list_node) + dr_icm_buddy_destroy(buddy); - kfree(pool->buckets); + mutex_destroy(&pool->mutex); kvfree(pool); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c index 7df883686d46b..cb5202e17856a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c @@ -85,7 +85,7 @@ static bool dr_mask_is_ttl_set(struct mlx5dr_match_spec *spec) (_misc2)._inner_outer##_first_mpls_s_bos || \ (_misc2)._inner_outer##_first_mpls_ttl) -static bool dr_mask_is_gre_set(struct mlx5dr_match_misc *misc) +static bool dr_mask_is_tnl_gre_set(struct mlx5dr_match_misc *misc) { return (misc->gre_key_h || misc->gre_key_l || misc->gre_protocol || misc->gre_c_present || @@ -98,12 +98,12 @@ static bool dr_mask_is_gre_set(struct mlx5dr_match_misc *misc) (_misc2).outer_first_mpls_over_##gre_udp##_s_bos || \ (_misc2).outer_first_mpls_over_##gre_udp##_ttl) -#define DR_MASK_IS_FLEX_PARSER_0_SET(_misc2) ( \ +#define DR_MASK_IS_TNL_MPLS_SET(_misc2) ( \ DR_MASK_IS_OUTER_MPLS_OVER_GRE_UDP_SET((_misc2), gre) || \ DR_MASK_IS_OUTER_MPLS_OVER_GRE_UDP_SET((_misc2), udp)) static bool -dr_mask_is_misc3_vxlan_gpe_set(struct mlx5dr_match_misc3 *misc3) +dr_mask_is_vxlan_gpe_set(struct mlx5dr_match_misc3 *misc3) { return (misc3->outer_vxlan_gpe_vni || misc3->outer_vxlan_gpe_next_protocol || @@ -111,21 +111,20 @@ dr_mask_is_misc3_vxlan_gpe_set(struct mlx5dr_match_misc3 *misc3) } static bool -dr_matcher_supp_flex_parser_vxlan_gpe(struct mlx5dr_cmd_caps *caps) +dr_matcher_supp_vxlan_gpe(struct mlx5dr_cmd_caps *caps) { - return caps->flex_protocols & - MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED; + return caps->flex_protocols & MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED; } static bool -dr_mask_is_flex_parser_tnl_vxlan_gpe_set(struct mlx5dr_match_param *mask, - struct mlx5dr_domain *dmn) +dr_mask_is_tnl_vxlan_gpe(struct mlx5dr_match_param *mask, + struct mlx5dr_domain *dmn) { - return dr_mask_is_misc3_vxlan_gpe_set(&mask->misc3) && - dr_matcher_supp_flex_parser_vxlan_gpe(&dmn->info.caps); + return dr_mask_is_vxlan_gpe_set(&mask->misc3) && + dr_matcher_supp_vxlan_gpe(&dmn->info.caps); } -static bool dr_mask_is_misc_geneve_set(struct mlx5dr_match_misc *misc) +static bool dr_mask_is_tnl_geneve_set(struct mlx5dr_match_misc *misc) { return misc->geneve_vni || misc->geneve_oam || @@ -134,26 +133,46 @@ static bool dr_mask_is_misc_geneve_set(struct mlx5dr_match_misc *misc) } static bool -dr_matcher_supp_flex_parser_geneve(struct mlx5dr_cmd_caps *caps) +dr_matcher_supp_tnl_geneve(struct mlx5dr_cmd_caps *caps) { - return caps->flex_protocols & - MLX5_FLEX_PARSER_GENEVE_ENABLED; + return caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_ENABLED; } static bool -dr_mask_is_flex_parser_tnl_geneve_set(struct mlx5dr_match_param *mask, - struct mlx5dr_domain *dmn) +dr_mask_is_tnl_geneve(struct mlx5dr_match_param *mask, + struct mlx5dr_domain *dmn) { - return dr_mask_is_misc_geneve_set(&mask->misc) && - dr_matcher_supp_flex_parser_geneve(&dmn->info.caps); + return dr_mask_is_tnl_geneve_set(&mask->misc) && + dr_matcher_supp_tnl_geneve(&dmn->info.caps); } -static bool dr_mask_is_flex_parser_icmpv6_set(struct mlx5dr_match_misc3 *misc3) +static int dr_matcher_supp_icmp_v4(struct mlx5dr_cmd_caps *caps) +{ + return caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED; +} + +static int dr_matcher_supp_icmp_v6(struct mlx5dr_cmd_caps *caps) +{ + return caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED; +} + +static bool dr_mask_is_icmpv6_set(struct mlx5dr_match_misc3 *misc3) { return (misc3->icmpv6_type || misc3->icmpv6_code || misc3->icmpv6_header_data); } +static bool dr_mask_is_icmp(struct mlx5dr_match_param *mask, + struct mlx5dr_domain *dmn) +{ + if (DR_MASK_IS_ICMPV4_SET(&mask->misc3)) + return dr_matcher_supp_icmp_v4(&dmn->info.caps); + else if (dr_mask_is_icmpv6_set(&mask->misc3)) + return dr_matcher_supp_icmp_v6(&dmn->info.caps); + + return false; +} + static bool dr_mask_is_wqe_metadata_set(struct mlx5dr_match_misc2 *misc2) { return misc2->metadata_reg_a; @@ -257,7 +276,7 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, if (dr_mask_is_smac_set(&mask.outer) && dr_mask_is_dmac_set(&mask.outer)) { - mlx5dr_ste_build_eth_l2_src_des(&sb[idx++], &mask, + mlx5dr_ste_build_eth_l2_src_dst(&sb[idx++], &mask, inner, rx); } @@ -277,8 +296,8 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, inner, rx); if (DR_MASK_IS_ETH_L4_SET(mask.outer, mask.misc, outer)) - mlx5dr_ste_build_ipv6_l3_l4(&sb[idx++], &mask, - inner, rx); + mlx5dr_ste_build_eth_ipv6_l3_l4(&sb[idx++], &mask, + inner, rx); } else { if (dr_mask_is_ipv4_5_tuple_set(&mask.outer)) mlx5dr_ste_build_eth_l3_ipv4_5_tuple(&sb[idx++], &mask, @@ -289,14 +308,12 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, inner, rx); } - if (dr_mask_is_flex_parser_tnl_vxlan_gpe_set(&mask, dmn)) - mlx5dr_ste_build_flex_parser_tnl_vxlan_gpe(&sb[idx++], - &mask, - inner, rx); - else if (dr_mask_is_flex_parser_tnl_geneve_set(&mask, dmn)) - mlx5dr_ste_build_flex_parser_tnl_geneve(&sb[idx++], - &mask, - inner, rx); + if (dr_mask_is_tnl_vxlan_gpe(&mask, dmn)) + mlx5dr_ste_build_tnl_vxlan_gpe(&sb[idx++], &mask, + inner, rx); + else if (dr_mask_is_tnl_geneve(&mask, dmn)) + mlx5dr_ste_build_tnl_geneve(&sb[idx++], &mask, + inner, rx); if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, outer)) mlx5dr_ste_build_eth_l4_misc(&sb[idx++], &mask, inner, rx); @@ -304,22 +321,18 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, if (DR_MASK_IS_FIRST_MPLS_SET(mask.misc2, outer)) mlx5dr_ste_build_mpls(&sb[idx++], &mask, inner, rx); - if (DR_MASK_IS_FLEX_PARSER_0_SET(mask.misc2)) - mlx5dr_ste_build_flex_parser_0(&sb[idx++], &mask, - inner, rx); + if (DR_MASK_IS_TNL_MPLS_SET(mask.misc2)) + mlx5dr_ste_build_tnl_mpls(&sb[idx++], &mask, inner, rx); - if ((DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(&mask.misc3) && - mlx5dr_matcher_supp_flex_parser_icmp_v4(&dmn->info.caps)) || - (dr_mask_is_flex_parser_icmpv6_set(&mask.misc3) && - mlx5dr_matcher_supp_flex_parser_icmp_v6(&dmn->info.caps))) { - ret = mlx5dr_ste_build_flex_parser_1(&sb[idx++], - &mask, &dmn->info.caps, - inner, rx); + if (dr_mask_is_icmp(&mask, dmn)) { + ret = mlx5dr_ste_build_icmp(&sb[idx++], + &mask, &dmn->info.caps, + inner, rx); if (ret) return ret; } - if (dr_mask_is_gre_set(&mask.misc)) - mlx5dr_ste_build_gre(&sb[idx++], &mask, inner, rx); + if (dr_mask_is_tnl_gre_set(&mask.misc)) + mlx5dr_ste_build_tnl_gre(&sb[idx++], &mask, inner, rx); } /* Inner */ @@ -334,7 +347,7 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, if (dr_mask_is_smac_set(&mask.inner) && dr_mask_is_dmac_set(&mask.inner)) { - mlx5dr_ste_build_eth_l2_src_des(&sb[idx++], + mlx5dr_ste_build_eth_l2_src_dst(&sb[idx++], &mask, inner, rx); } @@ -354,8 +367,8 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, inner, rx); if (DR_MASK_IS_ETH_L4_SET(mask.inner, mask.misc, inner)) - mlx5dr_ste_build_ipv6_l3_l4(&sb[idx++], &mask, - inner, rx); + mlx5dr_ste_build_eth_ipv6_l3_l4(&sb[idx++], &mask, + inner, rx); } else { if (dr_mask_is_ipv4_5_tuple_set(&mask.inner)) mlx5dr_ste_build_eth_l3_ipv4_5_tuple(&sb[idx++], &mask, @@ -372,8 +385,8 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, if (DR_MASK_IS_FIRST_MPLS_SET(mask.misc2, inner)) mlx5dr_ste_build_mpls(&sb[idx++], &mask, inner, rx); - if (DR_MASK_IS_FLEX_PARSER_0_SET(mask.misc2)) - mlx5dr_ste_build_flex_parser_0(&sb[idx++], &mask, inner, rx); + if (DR_MASK_IS_TNL_MPLS_SET(mask.misc2)) + mlx5dr_ste_build_tnl_mpls(&sb[idx++], &mask, inner, rx); } /* Empty matcher, takes all */ if (matcher->match_criteria == DR_MATCHER_CRITERIA_EMPTY) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c index b01aaec75622f..d275823bff2ff 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c @@ -1090,7 +1090,7 @@ static int dr_ste_build_eth_l2_src_des_tag(struct mlx5dr_match_param *value, return 0; } -void mlx5dr_ste_build_eth_l2_src_des(struct mlx5dr_ste_build *sb, +void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx) { @@ -1594,9 +1594,9 @@ static int dr_ste_build_ipv6_l3_l4_tag(struct mlx5dr_match_param *value, return 0; } -void mlx5dr_ste_build_ipv6_l3_l4(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask, - bool inner, bool rx) +void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx) { dr_ste_build_ipv6_l3_l4_bit_mask(mask, inner, sb->bit_mask); @@ -1693,8 +1693,8 @@ static int dr_ste_build_gre_tag(struct mlx5dr_match_param *value, return 0; } -void mlx5dr_ste_build_gre(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask, bool inner, bool rx) +void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, bool inner, bool rx) { dr_ste_build_gre_bit_mask(mask, inner, sb->bit_mask); @@ -1771,9 +1771,9 @@ static int dr_ste_build_flex_parser_0_tag(struct mlx5dr_match_param *value, return 0; } -void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask, - bool inner, bool rx) +void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx) { dr_ste_build_flex_parser_0_bit_mask(mask, inner, sb->bit_mask); @@ -1792,8 +1792,8 @@ static int dr_ste_build_flex_parser_1_bit_mask(struct mlx5dr_match_param *mask, struct mlx5dr_cmd_caps *caps, u8 *bit_mask) { + bool is_ipv4_mask = DR_MASK_IS_ICMPV4_SET(&mask->misc3); struct mlx5dr_match_misc3 *misc_3_mask = &mask->misc3; - bool is_ipv4_mask = DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(misc_3_mask); u32 icmp_header_data_mask; u32 icmp_type_mask; u32 icmp_code_mask; @@ -1869,7 +1869,7 @@ static int dr_ste_build_flex_parser_1_tag(struct mlx5dr_match_param *value, u32 icmp_code; bool is_ipv4; - is_ipv4 = DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(misc_3); + is_ipv4 = DR_MASK_IS_ICMPV4_SET(misc_3); if (is_ipv4) { icmp_header_data = misc_3->icmpv4_header_data; icmp_type = misc_3->icmpv4_type; @@ -1928,10 +1928,10 @@ static int dr_ste_build_flex_parser_1_tag(struct mlx5dr_match_param *value, return 0; } -int mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask, - struct mlx5dr_cmd_caps *caps, - bool inner, bool rx) +int mlx5dr_ste_build_icmp(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + struct mlx5dr_cmd_caps *caps, + bool inner, bool rx) { int ret; @@ -2069,9 +2069,9 @@ dr_ste_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value, return 0; } -void mlx5dr_ste_build_flex_parser_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask, - bool inner, bool rx) +void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx) { dr_ste_build_flex_parser_tnl_vxlan_gpe_bit_mask(mask, inner, sb->bit_mask); @@ -2122,9 +2122,9 @@ dr_ste_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value, return 0; } -void mlx5dr_ste_build_flex_parser_tnl_geneve(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask, - bool inner, bool rx) +void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx) { dr_ste_build_flex_parser_tnl_geneve_bit_mask(mask, sb->bit_mask); sb->rx = rx; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index f50f3b107aa31..3e423c8ed22fa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -114,7 +114,7 @@ enum mlx5dr_ipv { struct mlx5dr_icm_pool; struct mlx5dr_icm_chunk; -struct mlx5dr_icm_bucket; +struct mlx5dr_icm_buddy_mem; struct mlx5dr_ste_htbl; struct mlx5dr_match_param; struct mlx5dr_cmd_caps; @@ -288,7 +288,7 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher, struct mlx5dr_matcher_rx_tx *nic_matcher, struct mlx5dr_match_param *value, u8 *ste_arr); -void mlx5dr_ste_build_eth_l2_src_des(struct mlx5dr_ste_build *builder, +void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_build *builder, struct mlx5dr_match_param *mask, bool inner, bool rx); void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_build *sb, @@ -312,31 +312,31 @@ void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_build *sb, void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx); -void mlx5dr_ste_build_ipv6_l3_l4(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask, - bool inner, bool rx); +void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx); void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx); -void mlx5dr_ste_build_gre(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask, - bool inner, bool rx); +void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx); void mlx5dr_ste_build_mpls(struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx); -void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_build *sb, +void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx); +int mlx5dr_ste_build_icmp(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + struct mlx5dr_cmd_caps *caps, + bool inner, bool rx); +void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx); -int mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask, - struct mlx5dr_cmd_caps *caps, - bool inner, bool rx); -void mlx5dr_ste_build_flex_parser_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask, - bool inner, bool rx); -void mlx5dr_ste_build_flex_parser_tnl_geneve(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask, - bool inner, bool rx); +void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx); void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx); @@ -588,9 +588,9 @@ struct mlx5dr_match_param { struct mlx5dr_match_misc3 misc3; }; -#define DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(_misc3) ((_misc3)->icmpv4_type || \ - (_misc3)->icmpv4_code || \ - (_misc3)->icmpv4_header_data) +#define DR_MASK_IS_ICMPV4_SET(_misc3) ((_misc3)->icmpv4_type || \ + (_misc3)->icmpv4_code || \ + (_misc3)->icmpv4_header_data) struct mlx5dr_esw_caps { u64 drop_icm_address_rx; @@ -731,7 +731,6 @@ struct mlx5dr_action { struct mlx5dr_domain *dmn; struct mlx5dr_icm_chunk *chunk; u8 *data; - u32 data_size; u16 num_of_actions; u32 index; u8 allow_rx:1; @@ -804,7 +803,7 @@ void mlx5dr_rule_update_rule_member(struct mlx5dr_ste *new_ste, struct mlx5dr_ste *ste); struct mlx5dr_icm_chunk { - struct mlx5dr_icm_bucket *bucket; + struct mlx5dr_icm_buddy_mem *buddy_mem; struct list_head chunk_list; u32 rkey; u32 num_of_entries; @@ -812,6 +811,11 @@ struct mlx5dr_icm_chunk { u64 icm_addr; u64 mr_addr; + /* indicates the index of this chunk in the whole memory, + * used for deleting the chunk from the buddy + */ + unsigned int seg; + /* Memory optimisation */ struct mlx5dr_ste *ste_arr; u8 *hw_ste_arr; @@ -840,23 +844,20 @@ static inline void mlx5dr_domain_unlock(struct mlx5dr_domain *dmn) mlx5dr_domain_nic_unlock(&dmn->info.rx); } -static inline int -mlx5dr_matcher_supp_flex_parser_icmp_v4(struct mlx5dr_cmd_caps *caps) -{ - return caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED; -} - -static inline int -mlx5dr_matcher_supp_flex_parser_icmp_v6(struct mlx5dr_cmd_caps *caps) -{ - return caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED; -} - int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher, struct mlx5dr_matcher_rx_tx *nic_matcher, enum mlx5dr_ipv outer_ipv, enum mlx5dr_ipv inner_ipv); +static inline int +mlx5dr_icm_pool_dm_type_to_entry_size(enum mlx5dr_icm_type icm_type) +{ + if (icm_type == DR_ICM_TYPE_STE) + return DR_STE_SIZE; + + return DR_MODIFY_ACTION_SIZE; +} + static inline u32 mlx5dr_icm_pool_chunk_size_to_entries(enum mlx5dr_icm_chunk_size chunk_size) { @@ -870,11 +871,7 @@ mlx5dr_icm_pool_chunk_size_to_byte(enum mlx5dr_icm_chunk_size chunk_size, int num_of_entries; int entry_size; - if (icm_type == DR_ICM_TYPE_STE) - entry_size = DR_STE_SIZE; - else - entry_size = DR_MODIFY_ACTION_SIZE; - + entry_size = mlx5dr_icm_pool_dm_type_to_entry_size(icm_type); num_of_entries = mlx5dr_icm_pool_chunk_size_to_entries(chunk_size); return entry_size * num_of_entries; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h index 7914fe3fc68d8..4177786b8eaf2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h @@ -127,4 +127,36 @@ mlx5dr_is_supported(struct mlx5_core_dev *dev) return MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner); } +/* buddy functions & structure */ + +struct mlx5dr_icm_mr; + +struct mlx5dr_icm_buddy_mem { + unsigned long **bitmap; + unsigned int *num_free; + u32 max_order; + struct list_head list_node; + struct mlx5dr_icm_mr *icm_mr; + struct mlx5dr_icm_pool *pool; + + /* This is the list of used chunks. HW may be accessing this memory */ + struct list_head used_list; + u64 used_memory; + + /* Hardware may be accessing this memory but at some future, + * undetermined time, it might cease to do so. + * sync_ste command sets them free. + */ + struct list_head hot_list; +}; + +int mlx5dr_buddy_init(struct mlx5dr_icm_buddy_mem *buddy, + unsigned int max_order); +void mlx5dr_buddy_cleanup(struct mlx5dr_icm_buddy_mem *buddy); +int mlx5dr_buddy_alloc_mem(struct mlx5dr_icm_buddy_mem *buddy, + unsigned int order, + unsigned int *segment); +void mlx5dr_buddy_free_mem(struct mlx5dr_icm_buddy_mem *buddy, + unsigned int seg, unsigned int order); + #endif /* _MLX5DR_H_ */