Skip to content

Commit

Permalink
IB/mlx5: Introduce ODP diagnostic counters
Browse files Browse the repository at this point in the history
Introduce ODP diagnostic counters and count the following
per MR within IB/mlx5 driver:
 1) Page faults:
	Total number of faulted pages.
 2) Page invalidations:
	Total number of pages invalidated by the OS during all
	invalidation events. The translations can be no longer
	valid due to either non-present pages or mapping changes.

Link: https://lore.kernel.org/r/20191016062308.11886-2-leon@kernel.org
Signed-off-by: Erez Alfasi <ereza@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Reviewed-by: Jason Gunthorpe <jgg@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
  • Loading branch information
erezamellanox authored and jgunthorpe committed Oct 22, 2019
1 parent 75e70ad commit a3de94e
Show file tree
Hide file tree
Showing 3 changed files with 24 additions and 0 deletions.
4 changes: 4 additions & 0 deletions drivers/infiniband/hw/mlx5/mlx5_ib.h
Original file line number Diff line number Diff line change
Expand Up @@ -585,6 +585,9 @@ struct mlx5_ib_dm {
IB_ACCESS_REMOTE_READ |\
IB_ZERO_BASED)

#define mlx5_update_odp_stats(mr, counter_name, value) \
atomic64_add(value, &((mr)->odp_stats.counter_name))

struct mlx5_ib_mr {
struct ib_mr ibmr;
void *descs;
Expand Down Expand Up @@ -622,6 +625,7 @@ struct mlx5_ib_mr {
wait_queue_head_t q_leaf_free;
struct mlx5_async_work cb_work;
atomic_t num_pending_prefetch;
struct ib_odp_counters odp_stats;
};

static inline bool is_odp_mr(struct mlx5_ib_mr *mr)
Expand Down
15 changes: 15 additions & 0 deletions drivers/infiniband/hw/mlx5/odp.c
Original file line number Diff line number Diff line change
Expand Up @@ -224,6 +224,7 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
const u64 umr_block_mask = (MLX5_UMR_MTT_ALIGNMENT /
sizeof(struct mlx5_mtt)) - 1;
u64 idx = 0, blk_start_idx = 0;
u64 invalidations = 0;
int in_block = 0;
u64 addr;

Expand Down Expand Up @@ -261,6 +262,9 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
blk_start_idx = idx;
in_block = 1;
}

/* Count page invalidations */
invalidations += idx - blk_start_idx + 1;
} else {
u64 umr_offset = idx & umr_block_mask;

Expand All @@ -279,6 +283,9 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
MLX5_IB_UPD_XLT_ZAP |
MLX5_IB_UPD_XLT_ATOMIC);
mutex_unlock(&umem_odp->umem_mutex);

mlx5_update_odp_stats(mr, invalidations, invalidations);

/*
* We are now sure that the device will not access the
* memory. We can safely unmap it, and mark it as dirty if
Expand All @@ -287,6 +294,7 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,

ib_umem_odp_unmap_dma_pages(umem_odp, start, end);


if (unlikely(!umem_odp->npages && mr->parent &&
!umem_odp->dying)) {
WRITE_ONCE(umem_odp->dying, 1);
Expand Down Expand Up @@ -801,6 +809,13 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
if (ret < 0)
goto srcu_unlock;

/*
* When prefetching a page, page fault is generated
* in order to bring the page to the main memory.
* In the current flow, page faults are being counted.
*/
mlx5_update_odp_stats(mr, faults, ret);

npages += ret;
ret = 0;
break;
Expand Down
5 changes: 5 additions & 0 deletions include/rdma/ib_verbs.h
Original file line number Diff line number Diff line change
Expand Up @@ -2220,6 +2220,11 @@ struct rdma_netdev_alloc_params {
struct net_device *netdev, void *param);
};

struct ib_odp_counters {
atomic64_t faults;
atomic64_t invalidations;
};

struct ib_counters {
struct ib_device *device;
struct ib_uobject *uobject;
Expand Down

0 comments on commit a3de94e

Please sign in to comment.