Skip to content

Commit

Permalink
lib: use atomic thread fence recommended API
Browse files Browse the repository at this point in the history
Use rte_atomic_thread_fence() instead of directly using
__atomic_thread_fence() builtin GCC intrinsic
or __rte_atomic_thread_fence() internal function.

Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
  • Loading branch information
Tyler Retzlaff authored and tmonjalo committed Feb 18, 2024
1 parent 93998f3 commit 283d843
Show file tree
Hide file tree
Showing 7 changed files with 14 additions and 14 deletions.
2 changes: 1 addition & 1 deletion lib/distributor/rte_distributor.c
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,7 @@ rte_distributor_return_pkt(struct rte_distributor *d,
}

/* Sync with distributor to acquire retptrs */
__atomic_thread_fence(rte_memory_order_acquire);
rte_atomic_thread_fence(rte_memory_order_acquire);
for (i = 0; i < RTE_DIST_BURST_SIZE; i++)
/* Switch off the return bit first */
buf->retptr64[i] = 0;
Expand Down
2 changes: 1 addition & 1 deletion lib/eal/common/eal_common_trace.c
Original file line number Diff line number Diff line change
Expand Up @@ -526,7 +526,7 @@ __rte_trace_point_register(rte_trace_point_t *handle, const char *name,

/* Add the trace point at tail */
STAILQ_INSERT_TAIL(&tp_list, tp, next);
__atomic_thread_fence(rte_memory_order_release);
rte_atomic_thread_fence(rte_memory_order_release);

/* All Good !!! */
return 0;
Expand Down
4 changes: 2 additions & 2 deletions lib/eal/include/rte_mcslock.h
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ rte_mcslock_lock(RTE_ATOMIC(rte_mcslock_t *) *msl, rte_mcslock_t *me)
* store to prev->next. Otherwise it will cause a deadlock. Need a
* store-load barrier.
*/
__rte_atomic_thread_fence(rte_memory_order_acq_rel);
rte_atomic_thread_fence(rte_memory_order_acq_rel);
/* If the lock has already been acquired, it first atomically
* places the node at the end of the queue and then proceeds
* to spin on me->locked until the previous lock holder resets
Expand Down Expand Up @@ -117,7 +117,7 @@ rte_mcslock_unlock(RTE_ATOMIC(rte_mcslock_t *) *msl, RTE_ATOMIC(rte_mcslock_t *)
* while-loop first. This has the potential to cause a
* deadlock. Need a load barrier.
*/
__rte_atomic_thread_fence(rte_memory_order_acquire);
rte_atomic_thread_fence(rte_memory_order_acquire);
/* More nodes added to the queue by other CPUs.
* Wait until the next pointer is set.
*/
Expand Down
10 changes: 5 additions & 5 deletions lib/hash/rte_cuckoo_hash.c
Original file line number Diff line number Diff line change
Expand Up @@ -878,7 +878,7 @@ rte_hash_cuckoo_move_insert_mw(const struct rte_hash *h,
/* The store to sig_current should not
* move above the store to tbl_chng_cnt.
*/
__atomic_thread_fence(rte_memory_order_release);
rte_atomic_thread_fence(rte_memory_order_release);
}

/* Need to swap current/alt sig to allow later
Expand Down Expand Up @@ -910,7 +910,7 @@ rte_hash_cuckoo_move_insert_mw(const struct rte_hash *h,
/* The store to sig_current should not
* move above the store to tbl_chng_cnt.
*/
__atomic_thread_fence(rte_memory_order_release);
rte_atomic_thread_fence(rte_memory_order_release);
}

curr_bkt->sig_current[curr_slot] = sig;
Expand Down Expand Up @@ -1403,7 +1403,7 @@ __rte_hash_lookup_with_hash_lf(const struct rte_hash *h, const void *key,
/* The loads of sig_current in search_one_bucket
* should not move below the load from tbl_chng_cnt.
*/
__atomic_thread_fence(rte_memory_order_acquire);
rte_atomic_thread_fence(rte_memory_order_acquire);
/* Re-read the table change counter to check if the
* table has changed during search. If yes, re-do
* the search.
Expand Down Expand Up @@ -1632,7 +1632,7 @@ __rte_hash_compact_ll(const struct rte_hash *h,
/* The store to sig_current should
* not move above the store to tbl_chng_cnt.
*/
__atomic_thread_fence(rte_memory_order_release);
rte_atomic_thread_fence(rte_memory_order_release);
}
last_bkt->sig_current[i] = NULL_SIGNATURE;
rte_atomic_store_explicit(&last_bkt->key_idx[i],
Expand Down Expand Up @@ -2223,7 +2223,7 @@ __bulk_lookup_lf(const struct rte_hash *h, const void **keys,
/* The loads of sig_current in compare_signatures
* should not move below the load from tbl_chng_cnt.
*/
__atomic_thread_fence(rte_memory_order_acquire);
rte_atomic_thread_fence(rte_memory_order_acquire);
/* Re-read the table change counter to check if the
* table has changed during search. If yes, re-do
* the search.
Expand Down
4 changes: 2 additions & 2 deletions lib/lpm/rte_lpm.c
Original file line number Diff line number Diff line change
Expand Up @@ -1116,7 +1116,7 @@ delete_depth_big(struct __rte_lpm *i_lpm, uint32_t ip_masked,
* Prevent the free of the tbl8 group from hoisting.
*/
i_lpm->lpm.tbl24[tbl24_index].valid = 0;
__atomic_thread_fence(__ATOMIC_RELEASE);
rte_atomic_thread_fence(rte_memory_order_release);
status = tbl8_free(i_lpm, tbl8_group_start);
} else if (tbl8_recycle_index > -1) {
/* Update tbl24 entry. */
Expand All @@ -1132,7 +1132,7 @@ delete_depth_big(struct __rte_lpm *i_lpm, uint32_t ip_masked,
*/
__atomic_store(&i_lpm->lpm.tbl24[tbl24_index], &new_tbl24_entry,
__ATOMIC_RELAXED);
__atomic_thread_fence(__ATOMIC_RELEASE);
rte_atomic_thread_fence(rte_memory_order_release);
status = tbl8_free(i_lpm, tbl8_group_start);
}
#undef group_idx
Expand Down
4 changes: 2 additions & 2 deletions lib/ring/rte_ring_c11_pvt.h
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ __rte_ring_move_prod_head(struct rte_ring *r, unsigned int is_sp,
n = max;

/* Ensure the head is read before tail */
__atomic_thread_fence(rte_memory_order_acquire);
rte_atomic_thread_fence(rte_memory_order_acquire);

/* load-acquire synchronize with store-release of ht->tail
* in update_tail.
Expand Down Expand Up @@ -145,7 +145,7 @@ __rte_ring_move_cons_head(struct rte_ring *r, int is_sc,
n = max;

/* Ensure the head is read before tail */
__atomic_thread_fence(rte_memory_order_acquire);
rte_atomic_thread_fence(rte_memory_order_acquire);

/* this load-acquire synchronize with store-release of ht->tail
* in update_tail.
Expand Down
2 changes: 1 addition & 1 deletion lib/stack/rte_stack_lf_c11.h
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ __rte_stack_lf_pop_elems(struct rte_stack_lf_list *list,
* elements are properly ordered with respect to the head
* pointer read.
*/
__atomic_thread_fence(rte_memory_order_acquire);
rte_atomic_thread_fence(rte_memory_order_acquire);

rte_prefetch0(old_head.top);

Expand Down

0 comments on commit 283d843

Please sign in to comment.