diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 351d385158e6..9d5a6f8d7438 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -610,130 +610,25 @@ static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) return 0; } -static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, - int mtu) -{ - return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu); -} - -#define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */ - -static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, - u16 delay) -{ - delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay, - BITS_PER_BYTE)); - return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp, - mtu); -} - -/* Maximum delay buffer needed in case of PAUSE frames, in bytes. - * Assumes 100m cable and maximum MTU. - */ -#define MLXSW_SP_PAUSE_DELAY 58752 - -static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu, - u16 delay, bool pfc, bool pause) -{ - if (pfc) - return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay); - else if (pause) - return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY); - else - return 0; -} - -static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, - bool lossy) +static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) { - if (lossy) - mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size); - else - mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size, - thres); -} + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp_hdroom orig_hdroom; + struct mlxsw_sp_hdroom hdroom; + int err; -int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, - u8 *prio_tc, bool pause_en, - struct ieee_pfc *my_pfc) -{ - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0; - u16 delay = !!my_pfc ? my_pfc->delay : 0; - char pbmc_pl[MLXSW_REG_PBMC_LEN]; - u32 taken_headroom_cells = 0; - u32 max_headroom_cells; - int i, j, err; + orig_hdroom = *mlxsw_sp_port->hdroom; - max_headroom_cells = mlxsw_sp_sb_max_headroom_cells(mlxsw_sp); + hdroom = orig_hdroom; + hdroom.mtu = mtu; + mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom); - mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); - err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); - if (err) + err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom); + if (err) { + netdev_err(dev, "Failed to configure port's headroom\n"); return err; - - for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { - bool configure = false; - bool pfc = false; - u16 thres_cells; - u16 delay_cells; - u16 total_cells; - bool lossy; - - for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { - if (prio_tc[j] == i) { - pfc = pfc_en & BIT(j); - configure = true; - break; - } - } - - if (!configure) - continue; - - lossy = !(pfc || pause_en); - thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); - thres_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, thres_cells); - delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, - pfc, pause_en); - delay_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, delay_cells); - total_cells = thres_cells + delay_cells; - - taken_headroom_cells += total_cells; - if (taken_headroom_cells > max_headroom_cells) - return -ENOBUFS; - - mlxsw_sp_pg_buf_pack(pbmc_pl, i, total_cells, - thres_cells, lossy); } - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); -} - -int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, - int mtu, bool pause_en) -{ - u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0}; - bool dcb_en = !!mlxsw_sp_port->dcb.ets; - struct ieee_pfc *my_pfc; - u8 *prio_tc; - - prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc; - my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL; - - return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc, - pause_en, my_pfc); -} - -static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) -{ - struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); - bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); - int err; - - err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en); - if (err) - return err; err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); if (err) goto err_port_mtu_set; @@ -741,7 +636,7 @@ static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) return 0; err_port_mtu_set: - mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); + mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom); return err; } @@ -1709,6 +1604,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); err_port_tc_mc_mode: err_port_ets_init: + mlxsw_sp_port_buffers_fini(mlxsw_sp_port); err_port_buffers_init: err_port_admin_status_set: err_port_mtu_set: @@ -1745,6 +1641,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) mlxsw_sp_port_fids_fini(mlxsw_sp_port); mlxsw_sp_port_dcb_fini(mlxsw_sp_port); mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); + mlxsw_sp_port_buffers_fini(mlxsw_sp_port); mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); mlxsw_sp_port_module_unmap(mlxsw_sp_port); free_percpu(mlxsw_sp_port->pcpu_stats); @@ -2800,6 +2697,7 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask; mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr; mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals; + mlxsw_sp->sb_ops = &mlxsw_sp1_sb_ops; mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops; mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops; mlxsw_sp->span_ops = &mlxsw_sp1_span_ops; @@ -2828,6 +2726,7 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr; mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; + mlxsw_sp->sb_ops = &mlxsw_sp2_sb_ops; mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; mlxsw_sp->span_ops = &mlxsw_sp2_span_ops; @@ -2854,6 +2753,7 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr; mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; + mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops; mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; mlxsw_sp->span_ops = &mlxsw_sp3_span_ops; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index a23b677d2144..247a6aebd402 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -125,6 +125,7 @@ struct mlxsw_sp_mr_tcam_ops; struct mlxsw_sp_acl_rulei_ops; struct mlxsw_sp_acl_tcam_ops; struct mlxsw_sp_nve_ops; +struct mlxsw_sp_sb_ops; struct mlxsw_sp_sb_vals; struct mlxsw_sp_port_type_speed_ops; struct mlxsw_sp_ptp_state; @@ -171,6 +172,7 @@ struct mlxsw_sp { const struct mlxsw_sp_nve_ops **nve_ops_arr; const struct mlxsw_sp_rif_ops **rif_ops_arr; const struct mlxsw_sp_sb_vals *sb_vals; + const struct mlxsw_sp_sb_ops *sb_ops; const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops; const struct mlxsw_sp_ptp_ops *ptp_ops; const struct mlxsw_sp_span_ops *span_ops; @@ -316,6 +318,7 @@ struct mlxsw_sp_port { u8 split_base_local_port; int max_mtu; u32 max_speed; + struct mlxsw_sp_hdroom *hdroom; }; struct mlxsw_sp_port_type_speed_ops { @@ -412,34 +415,62 @@ mlxsw_sp_port_vlan_find_by_vid(const struct mlxsw_sp_port *mlxsw_sp_port, return NULL; } -static inline u32 -mlxsw_sp_port_headroom_8x_adjust(const struct mlxsw_sp_port *mlxsw_sp_port, - u32 size_cells) -{ - /* Ports with eight lanes use two headroom buffers between which the - * configured headroom size is split. Therefore, multiply the calculated - * headroom size by two. - */ - return mlxsw_sp_port->mapping.width == 8 ? 2 * size_cells : size_cells; -} - enum mlxsw_sp_flood_type { MLXSW_SP_FLOOD_TYPE_UC, MLXSW_SP_FLOOD_TYPE_BC, MLXSW_SP_FLOOD_TYPE_MC, }; -int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, - int mtu, bool pause_en); int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, int prio, char *ppcnt_pl); int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, bool is_up); /* spectrum_buffers.c */ +struct mlxsw_sp_hdroom_prio { + /* Number of port buffer associated with this priority. This is the + * actually configured value. + */ + u8 buf_idx; + /* Value of buf_idx deduced from the DCB ETS configuration. */ + u8 ets_buf_idx; + bool lossy; +}; + +struct mlxsw_sp_hdroom_buf { + u32 thres_cells; + u32 size_cells; + bool lossy; +}; + +#define MLXSW_SP_PB_COUNT 10 + +struct mlxsw_sp_hdroom { + struct { + struct mlxsw_sp_hdroom_prio prio[IEEE_8021Q_MAX_PRIORITIES]; + } prios; + struct { + struct mlxsw_sp_hdroom_buf buf[MLXSW_SP_PB_COUNT]; + } bufs; + struct { + /* Size actually configured for the internal buffer. Equal to + * reserve when internal buffer is enabled. + */ + u32 size_cells; + /* Space reserved in the headroom for the internal buffer. Port + * buffers are not allowed to grow into this space. + */ + u32 reserve_cells; + bool enable; + } int_buf; + int delay_bytes; + int mtu; +}; + int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp); int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port); +void mlxsw_sp_port_buffers_fini(struct mlxsw_sp_port *mlxsw_sp_port); int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core, unsigned int sb_index, u16 pool_index, struct devlink_sb_pool_info *pool_info); @@ -475,11 +506,20 @@ int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port, u32 *p_cur, u32 *p_max); u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells); u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes); -u32 mlxsw_sp_sb_max_headroom_cells(const struct mlxsw_sp *mlxsw_sp); +void mlxsw_sp_hdroom_prios_reset_buf_idx(struct mlxsw_sp_hdroom *hdroom); +void mlxsw_sp_hdroom_bufs_reset_lossiness(struct mlxsw_sp_hdroom *hdroom); +void mlxsw_sp_hdroom_bufs_reset_sizes(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_hdroom *hdroom); +int mlxsw_sp_hdroom_configure(struct mlxsw_sp_port *mlxsw_sp_port, + const struct mlxsw_sp_hdroom *hdroom); extern const struct mlxsw_sp_sb_vals mlxsw_sp1_sb_vals; extern const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals; +extern const struct mlxsw_sp_sb_ops mlxsw_sp1_sb_ops; +extern const struct mlxsw_sp_sb_ops mlxsw_sp2_sb_ops; +extern const struct mlxsw_sp_sb_ops mlxsw_sp3_sb_ops; + /* spectrum_switchdev.c */ int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp); @@ -517,9 +557,6 @@ int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, bool dwrr, u8 dwrr_weight); int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 switch_prio, u8 tclass); -int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, - u8 *prio_tc, bool pause_en, - struct ieee_pfc *my_pfc); int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, u32 maxrate, u8 burst_size); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index 6f84557a5a6f..68286cd70c33 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -121,6 +121,10 @@ struct mlxsw_sp_sb_vals { unsigned int cms_cpu_count; }; +struct mlxsw_sp_sb_ops { + u32 (*int_buf_size_get)(int mtu, u32 speed); +}; + u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells) { return mlxsw_sp->sb->cell_size * cells; @@ -131,9 +135,14 @@ u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes) return DIV_ROUND_UP(bytes, mlxsw_sp->sb->cell_size); } -u32 mlxsw_sp_sb_max_headroom_cells(const struct mlxsw_sp *mlxsw_sp) +static u32 mlxsw_sp_port_headroom_8x_adjust(const struct mlxsw_sp_port *mlxsw_sp_port, + u32 size_cells) { - return mlxsw_sp->sb->max_headroom_cells; + /* Ports with eight lanes use two headroom buffers between which the + * configured headroom size is split. Therefore, multiply the calculated + * headroom size by two. + */ + return mlxsw_sp_port->mapping.width == 8 ? 2 * size_cells : size_cells; } static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp, @@ -291,55 +300,292 @@ static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port, (unsigned long) pm); } -/* 1/4 of a headroom necessary for 100Gbps port and 100m cable. */ -#define MLXSW_SP_PB_HEADROOM 25632 +void mlxsw_sp_hdroom_prios_reset_buf_idx(struct mlxsw_sp_hdroom *hdroom) +{ + int prio; + + for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) + hdroom->prios.prio[prio].buf_idx = hdroom->prios.prio[prio].ets_buf_idx; +} + +void mlxsw_sp_hdroom_bufs_reset_lossiness(struct mlxsw_sp_hdroom *hdroom) +{ + int prio; + int i; + + for (i = 0; i < DCBX_MAX_BUFFERS; i++) + hdroom->bufs.buf[i].lossy = true; + + for (prio = 0; prio < IEEE_8021Q_MAX_PRIORITIES; prio++) { + if (!hdroom->prios.prio[prio].lossy) + hdroom->bufs.buf[hdroom->prios.prio[prio].buf_idx].lossy = false; + } +} + +static u16 mlxsw_sp_hdroom_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, int mtu) +{ + return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu); +} + +static void mlxsw_sp_hdroom_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, bool lossy) +{ + if (lossy) + mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size); + else + mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size, + thres); +} + +static u16 mlxsw_sp_hdroom_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_hdroom *hdroom) +{ + u16 delay_cells; + + delay_cells = mlxsw_sp_bytes_cells(mlxsw_sp, hdroom->delay_bytes); + + /* In the worst case scenario the delay will be made up of packets that + * are all of size CELL_SIZE + 1, which means each packet will require + * almost twice its true size when buffered in the switch. We therefore + * multiply this value by the "cell factor", which is close to 2. + * + * Another MTU is added in case the transmitting host already started + * transmitting a maximum length frame when the PFC packet was received. + */ + return 2 * delay_cells + mlxsw_sp_bytes_cells(mlxsw_sp, hdroom->mtu); +} + +static u32 mlxsw_sp_hdroom_int_buf_size_get(struct mlxsw_sp *mlxsw_sp, int mtu, u32 speed) +{ + u32 buffsize = mlxsw_sp->sb_ops->int_buf_size_get(speed, mtu); + + return mlxsw_sp_bytes_cells(mlxsw_sp, buffsize) + 1; +} + +static bool mlxsw_sp_hdroom_buf_is_used(const struct mlxsw_sp_hdroom *hdroom, int buf) +{ + int prio; + + for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) { + if (hdroom->prios.prio[prio].buf_idx == buf) + return true; + } + return false; +} + +void mlxsw_sp_hdroom_bufs_reset_sizes(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_hdroom *hdroom) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u16 reserve_cells; + int i; + + /* Internal buffer. */ + reserve_cells = mlxsw_sp_hdroom_int_buf_size_get(mlxsw_sp, mlxsw_sp_port->max_speed, + mlxsw_sp_port->max_mtu); + reserve_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, reserve_cells); + hdroom->int_buf.reserve_cells = reserve_cells; + + if (hdroom->int_buf.enable) + hdroom->int_buf.size_cells = reserve_cells; + else + hdroom->int_buf.size_cells = 0; + + /* PG buffers. */ + for (i = 0; i < DCBX_MAX_BUFFERS; i++) { + struct mlxsw_sp_hdroom_buf *buf = &hdroom->bufs.buf[i]; + u16 thres_cells; + u16 delay_cells; + + if (!mlxsw_sp_hdroom_buf_is_used(hdroom, i)) { + thres_cells = 0; + delay_cells = 0; + } else if (buf->lossy) { + thres_cells = mlxsw_sp_hdroom_buf_threshold_get(mlxsw_sp, hdroom->mtu); + delay_cells = 0; + } else { + thres_cells = mlxsw_sp_hdroom_buf_threshold_get(mlxsw_sp, hdroom->mtu); + delay_cells = mlxsw_sp_hdroom_buf_delay_get(mlxsw_sp, hdroom); + } + + thres_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, thres_cells); + delay_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, delay_cells); + + buf->thres_cells = thres_cells; + buf->size_cells = thres_cells + delay_cells; + } +} + #define MLXSW_SP_PB_UNUSED 8 -static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port) +static int mlxsw_sp_hdroom_configure_buffers(struct mlxsw_sp_port *mlxsw_sp_port, + const struct mlxsw_sp_hdroom *hdroom, bool force) { - const u32 pbs[] = { - [0] = MLXSW_SP_PB_HEADROOM * mlxsw_sp_port->mapping.width, - [9] = MLXSW_PORT_MAX_MTU, - }; struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; char pbmc_pl[MLXSW_REG_PBMC_LEN]; + bool dirty; + int err; int i; - mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, - 0xffff, 0xffff / 2); - for (i = 0; i < ARRAY_SIZE(pbs); i++) { - u16 size = mlxsw_sp_bytes_cells(mlxsw_sp, pbs[i]); + dirty = memcmp(&mlxsw_sp_port->hdroom->bufs, &hdroom->bufs, sizeof(hdroom->bufs)); + if (!dirty && !force) + return 0; + + mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0xffff, 0xffff / 2); + for (i = 0; i < MLXSW_SP_PB_COUNT; i++) { + const struct mlxsw_sp_hdroom_buf *buf = &hdroom->bufs.buf[i]; if (i == MLXSW_SP_PB_UNUSED) continue; - size = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, size); - mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, size); + + mlxsw_sp_hdroom_buf_pack(pbmc_pl, i, buf->size_cells, buf->thres_cells, buf->lossy); } - mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, - MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); + + mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); + if (err) + return err; + + mlxsw_sp_port->hdroom->bufs = hdroom->bufs; + return 0; } -static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port *mlxsw_sp_port) +static int mlxsw_sp_hdroom_configure_priomap(struct mlxsw_sp_port *mlxsw_sp_port, + const struct mlxsw_sp_hdroom *hdroom, bool force) { char pptb_pl[MLXSW_REG_PPTB_LEN]; - int i; + bool dirty; + int prio; + int err; + + dirty = memcmp(&mlxsw_sp_port->hdroom->prios, &hdroom->prios, sizeof(hdroom->prios)); + if (!dirty && !force) + return 0; mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port); - for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) - mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, 0); - return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb), - pptb_pl); + for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) + mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, prio, hdroom->prios.prio[prio].buf_idx); + + err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb), pptb_pl); + if (err) + return err; + + mlxsw_sp_port->hdroom->prios = hdroom->prios; + return 0; } -static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port) +static int mlxsw_sp_hdroom_configure_int_buf(struct mlxsw_sp_port *mlxsw_sp_port, + const struct mlxsw_sp_hdroom *hdroom, bool force) +{ + char sbib_pl[MLXSW_REG_SBIB_LEN]; + bool dirty; + int err; + + dirty = memcmp(&mlxsw_sp_port->hdroom->int_buf, &hdroom->int_buf, sizeof(hdroom->int_buf)); + if (!dirty && !force) + return 0; + + mlxsw_reg_sbib_pack(sbib_pl, mlxsw_sp_port->local_port, hdroom->int_buf.size_cells); + err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); + if (err) + return err; + + mlxsw_sp_port->hdroom->int_buf = hdroom->int_buf; + return 0; +} + +static bool mlxsw_sp_hdroom_bufs_fit(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_hdroom *hdroom) +{ + u32 taken_headroom_cells = 0; + int i; + + for (i = 0; i < MLXSW_SP_PB_COUNT; i++) + taken_headroom_cells += hdroom->bufs.buf[i].size_cells; + + taken_headroom_cells += hdroom->int_buf.reserve_cells; + return taken_headroom_cells <= mlxsw_sp->sb->max_headroom_cells; +} + +static int __mlxsw_sp_hdroom_configure(struct mlxsw_sp_port *mlxsw_sp_port, + const struct mlxsw_sp_hdroom *hdroom, bool force) { + struct mlxsw_sp_hdroom orig_hdroom; + struct mlxsw_sp_hdroom tmp_hdroom; int err; + int i; + + /* Port buffers need to be configured in three steps. First, all buffers + * with non-zero size are configured. Then, prio-to-buffer map is + * updated, allowing traffic to flow to the now non-zero buffers. + * Finally, zero-sized buffers are configured, because now no traffic + * should be directed to them anymore. This way, in a non-congested + * system, no packet drops are introduced by the reconfiguration. + */ + + orig_hdroom = *mlxsw_sp_port->hdroom; + tmp_hdroom = orig_hdroom; + for (i = 0; i < MLXSW_SP_PB_COUNT; i++) { + if (hdroom->bufs.buf[i].size_cells) + tmp_hdroom.bufs.buf[i] = hdroom->bufs.buf[i]; + } - err = mlxsw_sp_port_pb_init(mlxsw_sp_port); + if (!mlxsw_sp_hdroom_bufs_fit(mlxsw_sp_port->mlxsw_sp, &tmp_hdroom) || + !mlxsw_sp_hdroom_bufs_fit(mlxsw_sp_port->mlxsw_sp, hdroom)) + return -ENOBUFS; + + err = mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port, &tmp_hdroom, force); if (err) return err; - return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port); + + err = mlxsw_sp_hdroom_configure_priomap(mlxsw_sp_port, hdroom, force); + if (err) + goto err_configure_priomap; + + err = mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port, hdroom, false); + if (err) + goto err_configure_buffers; + + err = mlxsw_sp_hdroom_configure_int_buf(mlxsw_sp_port, hdroom, false); + if (err) + goto err_configure_int_buf; + + *mlxsw_sp_port->hdroom = *hdroom; + return 0; + +err_configure_int_buf: + mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port, &tmp_hdroom, false); +err_configure_buffers: + mlxsw_sp_hdroom_configure_priomap(mlxsw_sp_port, &tmp_hdroom, false); +err_configure_priomap: + mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port, &orig_hdroom, false); + return err; +} + +int mlxsw_sp_hdroom_configure(struct mlxsw_sp_port *mlxsw_sp_port, + const struct mlxsw_sp_hdroom *hdroom) +{ + return __mlxsw_sp_hdroom_configure(mlxsw_sp_port, hdroom, false); +} + +static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_hdroom hdroom = {}; + u32 size9; + int prio; + + hdroom.mtu = mlxsw_sp_port->dev->mtu; + for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) + hdroom.prios.prio[prio].lossy = true; + + mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom); + mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom); + + /* Buffer 9 is used for control traffic. */ + size9 = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, mlxsw_sp_port->max_mtu); + hdroom.bufs.buf[9].size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size9); + + return __mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom, true); } static int mlxsw_sp_sb_port_init(struct mlxsw_sp *mlxsw_sp, @@ -916,6 +1162,46 @@ const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals = { .cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms), }; +static u32 mlxsw_sp1_pb_int_buf_size_get(int mtu, u32 speed) +{ + return mtu * 5 / 2; +} + +static u32 __mlxsw_sp_pb_int_buf_size_get(int mtu, u32 speed, u32 buffer_factor) +{ + return 3 * mtu + buffer_factor * speed / 1000; +} + +#define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38 + +static u32 mlxsw_sp2_pb_int_buf_size_get(int mtu, u32 speed) +{ + int factor = MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR; + + return __mlxsw_sp_pb_int_buf_size_get(mtu, speed, factor); +} + +#define MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR 50 + +static u32 mlxsw_sp3_pb_int_buf_size_get(int mtu, u32 speed) +{ + int factor = MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR; + + return __mlxsw_sp_pb_int_buf_size_get(mtu, speed, factor); +} + +const struct mlxsw_sp_sb_ops mlxsw_sp1_sb_ops = { + .int_buf_size_get = mlxsw_sp1_pb_int_buf_size_get, +}; + +const struct mlxsw_sp_sb_ops mlxsw_sp2_sb_ops = { + .int_buf_size_get = mlxsw_sp2_pb_int_buf_size_get, +}; + +const struct mlxsw_sp_sb_ops mlxsw_sp3_sb_ops = { + .int_buf_size_get = mlxsw_sp3_pb_int_buf_size_get, +}; + int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp) { u32 max_headroom_size; @@ -995,17 +1281,34 @@ int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port) { int err; + mlxsw_sp_port->hdroom = kzalloc(sizeof(*mlxsw_sp_port->hdroom), GFP_KERNEL); + if (!mlxsw_sp_port->hdroom) + return -ENOMEM; + mlxsw_sp_port->hdroom->mtu = mlxsw_sp_port->dev->mtu; + err = mlxsw_sp_port_headroom_init(mlxsw_sp_port); if (err) - return err; + goto err_headroom_init; err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port); if (err) - return err; + goto err_port_sb_cms_init; err = mlxsw_sp_port_sb_pms_init(mlxsw_sp_port); + if (err) + goto err_port_sb_pms_init; + return 0; +err_port_sb_pms_init: +err_port_sb_cms_init: +err_headroom_init: + kfree(mlxsw_sp_port->hdroom); return err; } +void mlxsw_sp_port_buffers_fini(struct mlxsw_sp_port *mlxsw_sp_port) +{ + kfree(mlxsw_sp_port->hdroom); +} + int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core, unsigned int sb_index, u16 pool_index, struct devlink_sb_pool_info *pool_info) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c index 0d3fb2e51ea5..d9a556dbe85e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c @@ -64,87 +64,28 @@ static int mlxsw_sp_port_ets_validate(struct mlxsw_sp_port *mlxsw_sp_port, return 0; } -static int mlxsw_sp_port_pg_prio_map(struct mlxsw_sp_port *mlxsw_sp_port, - u8 *prio_tc) -{ - char pptb_pl[MLXSW_REG_PPTB_LEN]; - int i; - - mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port); - for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) - mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, prio_tc[i]); - - return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb), - pptb_pl); -} - -static bool mlxsw_sp_ets_has_pg(u8 *prio_tc, u8 pg) -{ - int i; - - for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) - if (prio_tc[i] == pg) - return true; - return false; -} - -static int mlxsw_sp_port_pg_destroy(struct mlxsw_sp_port *mlxsw_sp_port, - u8 *old_prio_tc, u8 *new_prio_tc) -{ - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - char pbmc_pl[MLXSW_REG_PBMC_LEN]; - int err, i; - - mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); - err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); - if (err) - return err; - - for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { - u8 pg = old_prio_tc[i]; - - if (!mlxsw_sp_ets_has_pg(new_prio_tc, pg)) - mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg, 0); - } - - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); -} - static int mlxsw_sp_port_headroom_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, struct ieee_ets *ets) { - bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); - struct ieee_ets *my_ets = mlxsw_sp_port->dcb.ets; struct net_device *dev = mlxsw_sp_port->dev; + struct mlxsw_sp_hdroom hdroom; + int prio; int err; - /* Create the required PGs, but don't destroy existing ones, as - * traffic is still directed to them. - */ - err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, - ets->prio_tc, pause_en, - mlxsw_sp_port->dcb.pfc); + hdroom = *mlxsw_sp_port->hdroom; + for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) + hdroom.prios.prio[prio].ets_buf_idx = ets->prio_tc[prio]; + mlxsw_sp_hdroom_prios_reset_buf_idx(&hdroom); + mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom); + mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom); + + err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom); if (err) { netdev_err(dev, "Failed to configure port's headroom\n"); return err; } - err = mlxsw_sp_port_pg_prio_map(mlxsw_sp_port, ets->prio_tc); - if (err) { - netdev_err(dev, "Failed to set PG-priority mapping\n"); - goto err_port_prio_pg_map; - } - - err = mlxsw_sp_port_pg_destroy(mlxsw_sp_port, my_ets->prio_tc, - ets->prio_tc); - if (err) - netdev_warn(dev, "Failed to remove unused PGs\n"); - return 0; - -err_port_prio_pg_map: - mlxsw_sp_port_pg_destroy(mlxsw_sp_port, ets->prio_tc, my_ets->prio_tc); - return err; } static int __mlxsw_sp_dcbnl_ieee_setets(struct mlxsw_sp_port *mlxsw_sp_port, @@ -605,6 +546,9 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev, { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); + struct mlxsw_sp_hdroom orig_hdroom; + struct mlxsw_sp_hdroom hdroom; + int prio; int err; if (pause_en && pfc->pfc_en) { @@ -612,9 +556,21 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev, return -EINVAL; } - err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, - mlxsw_sp_port->dcb.ets->prio_tc, - pause_en, pfc); + orig_hdroom = *mlxsw_sp_port->hdroom; + + hdroom = orig_hdroom; + if (pfc->pfc_en) + hdroom.delay_bytes = DIV_ROUND_UP(pfc->delay, BITS_PER_BYTE); + else + hdroom.delay_bytes = 0; + + for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) + hdroom.prios.prio[prio].lossy = !(pfc->pfc_en & BIT(prio)); + + mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom); + mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom); + + err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom); if (err) { netdev_err(dev, "Failed to configure port's headroom for PFC\n"); return err; @@ -632,9 +588,7 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev, return 0; err_port_pfc_set: - __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, - mlxsw_sp_port->dcb.ets->prio_tc, pause_en, - mlxsw_sp_port->dcb.pfc); + mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom); return err; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c index 6ee0479b189f..6045d3df00ef 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c @@ -192,11 +192,19 @@ static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port, pfcc_pl); } +/* Maximum delay buffer needed in case of PAUSE frames. Similar to PFC delay, but is + * measured in bytes. Assumes 100m cable and does not take into account MTU. + */ +#define MLXSW_SP_PAUSE_DELAY_BYTES 19476 + static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); bool pause_en = pause->tx_pause || pause->rx_pause; + struct mlxsw_sp_hdroom orig_hdroom; + struct mlxsw_sp_hdroom hdroom; + int prio; int err; if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) { @@ -209,7 +217,21 @@ static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, return -EINVAL; } - err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); + orig_hdroom = *mlxsw_sp_port->hdroom; + + hdroom = orig_hdroom; + if (pause_en) + hdroom.delay_bytes = MLXSW_SP_PAUSE_DELAY_BYTES; + else + hdroom.delay_bytes = 0; + + for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) + hdroom.prios.prio[prio].lossy = !pause_en; + + mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom); + mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom); + + err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom); if (err) { netdev_err(dev, "Failed to configure port's headroom\n"); return err; @@ -227,8 +249,7 @@ static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, return 0; err_port_pause_configure: - pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); - mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); + mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom); return err; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index 38b3131c4027..c6c5826aba41 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -968,35 +968,26 @@ static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp, return 0; } -static u32 mlxsw_sp_span_buffsize_get(struct mlxsw_sp *mlxsw_sp, int mtu, - u32 speed) +static int mlxsw_sp_span_port_buffer_update(struct mlxsw_sp_port *mlxsw_sp_port, bool enable) { - u32 buffsize = mlxsw_sp->span_ops->buffsize_get(speed, mtu); + struct mlxsw_sp_hdroom hdroom; - return mlxsw_sp_bytes_cells(mlxsw_sp, buffsize) + 1; + hdroom = *mlxsw_sp_port->hdroom; + hdroom.int_buf.enable = enable; + mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom); + + return mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom); } static int mlxsw_sp_span_port_buffer_enable(struct mlxsw_sp_port *mlxsw_sp_port) { - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - char sbib_pl[MLXSW_REG_SBIB_LEN]; - u32 buffsize; - - buffsize = mlxsw_sp_span_buffsize_get(mlxsw_sp, mlxsw_sp_port->max_speed, - mlxsw_sp_port->max_mtu); - buffsize = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, buffsize); - mlxsw_reg_sbib_pack(sbib_pl, mlxsw_sp_port->local_port, buffsize); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); + return mlxsw_sp_span_port_buffer_update(mlxsw_sp_port, true); } -static void mlxsw_sp_span_port_buffer_disable(struct mlxsw_sp *mlxsw_sp, - u8 local_port) +static void mlxsw_sp_span_port_buffer_disable(struct mlxsw_sp_port *mlxsw_sp_port) { - char sbib_pl[MLXSW_REG_SBIB_LEN]; - - mlxsw_reg_sbib_pack(sbib_pl, local_port, 0); - mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); + mlxsw_sp_span_port_buffer_update(mlxsw_sp_port, false); } static struct mlxsw_sp_span_analyzed_port * @@ -1145,18 +1136,15 @@ mlxsw_sp_span_analyzed_port_create(struct mlxsw_sp_span *span, } static void -mlxsw_sp_span_analyzed_port_destroy(struct mlxsw_sp_span *span, +mlxsw_sp_span_analyzed_port_destroy(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_span_analyzed_port * analyzed_port) { - struct mlxsw_sp *mlxsw_sp = span->mlxsw_sp; - /* Remove egress mirror buffer now that port is no longer analyzed * at egress. */ if (!analyzed_port->ingress) - mlxsw_sp_span_port_buffer_disable(mlxsw_sp, - analyzed_port->local_port); + mlxsw_sp_span_port_buffer_disable(mlxsw_sp_port); list_del(&analyzed_port->list); kfree(analyzed_port); @@ -1207,7 +1195,7 @@ void mlxsw_sp_span_analyzed_port_put(struct mlxsw_sp_port *mlxsw_sp_port, if (!refcount_dec_and_test(&analyzed_port->ref_count)) goto out_unlock; - mlxsw_sp_span_analyzed_port_destroy(mlxsw_sp->span, analyzed_port); + mlxsw_sp_span_analyzed_port_destroy(mlxsw_sp_port, analyzed_port); out_unlock: mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock); @@ -1661,11 +1649,6 @@ static int mlxsw_sp1_span_init(struct mlxsw_sp *mlxsw_sp) return 0; } -static u32 mlxsw_sp1_span_buffsize_get(int mtu, u32 speed) -{ - return mtu * 5 / 2; -} - static int mlxsw_sp1_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp, u16 policer_id_base) { @@ -1674,7 +1657,6 @@ static int mlxsw_sp1_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops = { .init = mlxsw_sp1_span_init, - .buffsize_get = mlxsw_sp1_span_buffsize_get, .policer_id_base_set = mlxsw_sp1_span_policer_id_base_set, }; @@ -1699,18 +1681,6 @@ static int mlxsw_sp2_span_init(struct mlxsw_sp *mlxsw_sp) #define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38 #define MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR 50 -static u32 __mlxsw_sp_span_buffsize_get(int mtu, u32 speed, u32 buffer_factor) -{ - return 3 * mtu + buffer_factor * speed / 1000; -} - -static u32 mlxsw_sp2_span_buffsize_get(int mtu, u32 speed) -{ - int factor = MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR; - - return __mlxsw_sp_span_buffsize_get(mtu, speed, factor); -} - static int mlxsw_sp2_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp, u16 policer_id_base) { @@ -1727,19 +1697,10 @@ static int mlxsw_sp2_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops = { .init = mlxsw_sp2_span_init, - .buffsize_get = mlxsw_sp2_span_buffsize_get, .policer_id_base_set = mlxsw_sp2_span_policer_id_base_set, }; -static u32 mlxsw_sp3_span_buffsize_get(int mtu, u32 speed) -{ - int factor = MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR; - - return __mlxsw_sp_span_buffsize_get(mtu, speed, factor); -} - const struct mlxsw_sp_span_ops mlxsw_sp3_span_ops = { .init = mlxsw_sp2_span_init, - .buffsize_get = mlxsw_sp3_span_buffsize_get, .policer_id_base_set = mlxsw_sp2_span_policer_id_base_set, }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h index 1c746dd3b1bd..d907718bc8c5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h @@ -47,7 +47,6 @@ struct mlxsw_sp_span_entry_ops; struct mlxsw_sp_span_ops { int (*init)(struct mlxsw_sp *mlxsw_sp); - u32 (*buffsize_get)(int mtu, u32 speed); int (*policer_id_base_set)(struct mlxsw_sp *mlxsw_sp, u16 policer_id_base); };