Skip to content

Commit

Permalink
vhost: improve RARP handling in dequeue paths
Browse files Browse the repository at this point in the history
With previous refactoring, we can now simplify the RARP
packet injection handling in both the sync and async
dequeue paths.

Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Reviewed-by: David Marchand <david.marchand@redhat.com>
Reviewed-by: Chenbo Xia <chenbox@nvidia.com>
  • Loading branch information
mcoquelin committed Jan 17, 2025
1 parent 5c4ad75 commit d92688b
Showing 1 changed file with 30 additions and 42 deletions.
72 changes: 30 additions & 42 deletions lib/vhost/virtio_net.c
Original file line number Diff line number Diff line change
Expand Up @@ -3593,7 +3593,6 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
{
struct virtio_net *dev;
struct rte_mbuf *rarp_mbuf = NULL;
struct vhost_virtqueue *vq;
int16_t success = 1;
uint16_t nb_rx = 0;
Expand Down Expand Up @@ -3654,32 +3653,32 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
if (unlikely(rte_atomic_load_explicit(&dev->broadcast_rarp, rte_memory_order_acquire) &&
rte_atomic_compare_exchange_strong_explicit(&dev->broadcast_rarp,
&success, 0, rte_memory_order_release, rte_memory_order_relaxed))) {

rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
if (rarp_mbuf == NULL) {
/*
* Inject the RARP packet to the head of "pkts" array,
* so that switch's mac learning table will get updated first.
*/
pkts[nb_rx] = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
if (pkts[nb_rx] == NULL) {
VHOST_DATA_LOG(dev->ifname, ERR, "failed to make RARP packet.");
goto out;
}
/*
* Inject it to the head of "pkts" array, so that switch's mac
* learning table will get updated first.
*/
pkts[0] = rarp_mbuf;
vhost_queue_stats_update(dev, vq, pkts, 1);
pkts++;
count -= 1;
nb_rx += 1;
}

if (vq_is_packed(dev)) {
if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
nb_rx = virtio_dev_tx_packed_legacy(dev, vq, mbuf_pool, pkts, count);
nb_rx += virtio_dev_tx_packed_legacy(dev, vq, mbuf_pool,
pkts + nb_rx, count - nb_rx);
else
nb_rx = virtio_dev_tx_packed_compliant(dev, vq, mbuf_pool, pkts, count);
nb_rx += virtio_dev_tx_packed_compliant(dev, vq, mbuf_pool,
pkts + nb_rx, count - nb_rx);
} else {
if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
nb_rx = virtio_dev_tx_split_legacy(dev, vq, mbuf_pool, pkts, count);
nb_rx += virtio_dev_tx_split_legacy(dev, vq, mbuf_pool,
pkts + nb_rx, count - nb_rx);
else
nb_rx = virtio_dev_tx_split_compliant(dev, vq, mbuf_pool, pkts, count);
nb_rx += virtio_dev_tx_split_compliant(dev, vq, mbuf_pool,
pkts + nb_rx, count - nb_rx);
}

vhost_queue_stats_update(dev, vq, pkts, nb_rx);
Expand All @@ -3690,9 +3689,6 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
out_access_unlock:
rte_rwlock_read_unlock(&vq->access_lock);

if (unlikely(rarp_mbuf != NULL))
nb_rx += 1;

out_no_unlock:
return nb_rx;
}
Expand Down Expand Up @@ -4197,7 +4193,6 @@ rte_vhost_async_try_dequeue_burst(int vid, uint16_t queue_id,
int *nr_inflight, int16_t dma_id, uint16_t vchan_id)
{
struct virtio_net *dev;
struct rte_mbuf *rarp_mbuf = NULL;
struct vhost_virtqueue *vq;
int16_t success = 1;
uint16_t nb_rx = 0;
Expand Down Expand Up @@ -4276,36 +4271,32 @@ rte_vhost_async_try_dequeue_burst(int vid, uint16_t queue_id,
if (unlikely(rte_atomic_load_explicit(&dev->broadcast_rarp, rte_memory_order_acquire) &&
rte_atomic_compare_exchange_strong_explicit(&dev->broadcast_rarp,
&success, 0, rte_memory_order_release, rte_memory_order_relaxed))) {

rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
if (rarp_mbuf == NULL) {
/*
* Inject the RARP packet to the head of "pkts" array,
* so that switch's mac learning table will get updated first.
*/
pkts[nb_rx] = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
if (pkts[nb_rx] == NULL) {
VHOST_DATA_LOG(dev->ifname, ERR, "failed to make RARP packet.");
goto out;
}
/*
* Inject it to the head of "pkts" array, so that switch's mac
* learning table will get updated first.
*/
pkts[0] = rarp_mbuf;
vhost_queue_stats_update(dev, vq, pkts, 1);
pkts++;
count -= 1;
nb_rx += 1;
}

if (vq_is_packed(dev)) {
if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
nb_rx = virtio_dev_tx_async_packed_legacy(dev, vq, mbuf_pool,
pkts, count, dma_id, vchan_id);
nb_rx += virtio_dev_tx_async_packed_legacy(dev, vq, mbuf_pool,
pkts + nb_rx, count - nb_rx, dma_id, vchan_id);
else
nb_rx = virtio_dev_tx_async_packed_compliant(dev, vq, mbuf_pool,
pkts, count, dma_id, vchan_id);
nb_rx += virtio_dev_tx_async_packed_compliant(dev, vq, mbuf_pool,
pkts + nb_rx, count - nb_rx, dma_id, vchan_id);
} else {
if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
nb_rx = virtio_dev_tx_async_split_legacy(dev, vq, mbuf_pool,
pkts, count, dma_id, vchan_id);
nb_rx += virtio_dev_tx_async_split_legacy(dev, vq, mbuf_pool,
pkts + nb_rx, count - nb_rx, dma_id, vchan_id);
else
nb_rx = virtio_dev_tx_async_split_compliant(dev, vq, mbuf_pool,
pkts, count, dma_id, vchan_id);
nb_rx += virtio_dev_tx_async_split_compliant(dev, vq, mbuf_pool,
pkts + nb_rx, count - nb_rx, dma_id, vchan_id);
}

*nr_inflight = vq->async->pkts_inflight_n;
Expand All @@ -4317,9 +4308,6 @@ rte_vhost_async_try_dequeue_burst(int vid, uint16_t queue_id,
out_access_unlock:
rte_rwlock_read_unlock(&vq->access_lock);

if (unlikely(rarp_mbuf != NULL))
nb_rx += 1;

out_no_unlock:
return nb_rx;
}

0 comments on commit d92688b

Please sign in to comment.