Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update rwlock #342

Merged
merged 2 commits into from
Sep 12, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion examples/phil/phil.c
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ a number of seconds equal to the philosopher's order number around the table
The two solutions implemented here are 1) a ticket-based one, and 2) a
claim-based one. The ticket-based solution is fair in the sense that all
philosophers get to access the resource for the same amount of time in average
whether they think quick (or shallowy...) or long. The drawback is that the
whether they think quick (or shallow...) or long. The drawback is that the
faster thinkers get to wait more for accessing the resource. The claim-based
solution is addressing this by letting the faster thinkers access the resource
as long as it is not claimed by another philosopher (in which case the
Expand Down
6 changes: 3 additions & 3 deletions lib/core/hash/cne_fbk_hash.h
Original file line number Diff line number Diff line change
Expand Up @@ -325,9 +325,9 @@ struct cne_fbk_hash_table *cne_fbk_hash_find_existing(const char *name);
* - E_CNE_NO_CONFIG - function could not get pointer to cne_config structure
* - E_CNE_SECONDARY - function was called from a secondary process instance
* - EINVAL - invalid parameter value passed to function
* - ENOSPC - the maximum number of memzones has already been allocated
* - EEXIST - a memzone with the same name already exists
* - ENOMEM - no appropriate memory area found in which to create memzone
* - ENOSPC - the maximum number of memory has already been allocated
* - EEXIST - a mempool with the same name already exists
* - ENOMEM - no appropriate memory area found in which to create memory
*/
struct cne_fbk_hash_table *cne_fbk_hash_create(const struct cne_fbk_hash_params *params);

Expand Down
6 changes: 3 additions & 3 deletions lib/core/hash/cne_hash.h
Original file line number Diff line number Diff line change
Expand Up @@ -93,9 +93,9 @@ struct cne_hash;
* - E_CNE_SECONDARY - function was called from a secondary process instance
* - ENOENT - missing entry
* - EINVAL - invalid parameter passed to function
* - ENOSPC - the maximum number of memzones has already been allocated
* - EEXIST - a memzone with the same name already exists
* - ENOMEM - no appropriate memory area found in which to create memzone
* - ENOSPC - the maximum number of memory has already been allocated
* - EEXIST - a mempool with the same name already exists
* - ENOMEM - no appropriate memory area found in which to create memory
*/
struct cne_hash *cne_hash_create(const struct cne_hash_parameters *params);

Expand Down
6 changes: 3 additions & 3 deletions lib/core/mempool/mempool.h
Original file line number Diff line number Diff line change
Expand Up @@ -112,9 +112,9 @@ typedef struct mempool_cfg {
* @return
* The pointer to the new allocated mempool, on success. NULL on error
* with errno set appropriately. Possible errno values include:
* - ENOSPC - the maximum number of memzones has already been allocated
* - EEXIST - a memzone with the same name already exists
* - ENOMEM - no appropriate memory area found in which to create memzone
* - ENOSPC - the maximum number of mempools has already been allocated
* - EEXIST - a mempool with the same name already exists
* - ENOMEM - no appropriate memory area found in which to create mempool
*/
CNDP_API mempool_t *mempool_create(struct mempool_cfg *cinfo);

Expand Down
62 changes: 62 additions & 0 deletions lib/include/cne_lock_annotations.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2022 Red Hat, Inc.
*/

#ifndef CNE_LOCK_ANNOTATIONS_H
#define CNE_LOCK_ANNOTATIONS_H

#ifdef __cplusplus
extern "C" {
#endif

#ifdef CNE_ANNOTATE_LOCKS

#define __cne_lockable __attribute__((lockable))

#define __cne_guarded_by(...) __attribute__((guarded_by(__VA_ARGS__)))
#define __cne_guarded_var __attribute__((guarded_var))

#define __cne_exclusive_locks_required(...) __attribute__((exclusive_locks_required(__VA_ARGS__)))
#define __cne_exclusive_lock_function(...) __attribute__((exclusive_lock_function(__VA_ARGS__)))
#define __cne_exclusive_trylock_function(ret, ...) \
__attribute__((exclusive_trylock_function(ret, __VA_ARGS__)))
#define __cne_assert_exclusive_lock(...) __attribute__((assert_exclusive_lock(__VA_ARGS__)))

#define __cne_shared_locks_required(...) __attribute__((shared_locks_required(__VA_ARGS__)))
#define __cne_shared_lock_function(...) __attribute__((shared_lock_function(__VA_ARGS__)))
#define __cne_shared_trylock_function(ret, ...) \
__attribute__((shared_trylock_function(ret, __VA_ARGS__)))
#define __cne_assert_shared_lock(...) __attribute__((assert_shared_lock(__VA_ARGS__)))

#define __cne_unlock_function(...) __attribute__((unlock_function(__VA_ARGS__)))

#define __cne_no_thread_safety_analysis __attribute__((no_thread_safety_analysis))

#else /* ! CNE_ANNOTATE_LOCKS */

#define __cne_lockable

#define __cne_guarded_by(...)
#define __cne_guarded_var

#define __cne_exclusive_locks_required(...)
#define __cne_exclusive_lock_function(...)
#define __cne_exclusive_trylock_function(...)
#define __cne_assert_exclusive_lock(...)

#define __cne_shared_locks_required(...)
#define __cne_shared_lock_function(...)
#define __cne_shared_trylock_function(...)
#define __cne_assert_shared_lock(...)

#define __cne_unlock_function(...)

#define __cne_no_thread_safety_analysis

#endif /* CNE_ANNOTATE_LOCKS */

#ifdef __cplusplus
}
#endif

#endif /* CNE_LOCK_ANNOTATIONS_H */
139 changes: 101 additions & 38 deletions lib/include/cne_rwlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,17 @@
* one writer. All readers are blocked until the writer is finished
* writing.
*
* This version does not give preference to readers or writers
* and does not starve either readers or writers.
*
* See also:
* https://locklessinc.com/articles/locks/
*/

#include <cne_branch_prediction.h>
#include <cne_spinlock.h>
#include <cne_common.h>
#include <cne_lock_annotations.h>
#include <cne_pause.h>

#ifdef __cplusplus
Expand All @@ -29,10 +35,28 @@ extern "C" {
/**
* The cne_rwlock_t type.
*
* cnt is -1 when write lock is held, and > 0 when read locks are held.
* Readers increment the counter by CNE_RWLOCK_READ (4)
* Writers set the CNE_RWLOCK_WRITE bit when lock is held
* and set the CNE_RWLOCK_WAIT bit while waiting.
*
* 31 2 1 0
* +-------------------+-+-+
* | readers | | |
* +-------------------+-+-+
* ^ ^
* | |
* WRITE: lock held ----/ |
* WAIT: writer pending --/
*/
typedef struct {
volatile int32_t cnt; /**< -1 when W lock held, > 0 when R locks held. */

#define CNE_RWLOCK_WAIT 0x1 /* Writer is waiting */
#define CNE_RWLOCK_WRITE 0x2 /* Writer has the lock */
#define CNE_RWLOCK_MASK (CNE_RWLOCK_WAIT | CNE_RWLOCK_WRITE)
/* Writer is waiting or has lock */
#define CNE_RWLOCK_READ 0x4 /* Reader increment */

typedef struct __cne_lockable {
int32_t cnt;
} cne_rwlock_t;

/**
Expand Down Expand Up @@ -63,24 +87,29 @@ cne_rwlock_init(cne_rwlock_t *rwl)
*/
static inline void
cne_rwlock_read_lock(cne_rwlock_t *rwl)
__cne_shared_lock_function(rwl) __cne_no_thread_safety_analysis
{
int32_t x;
int success = 0;

while (success == 0) {
x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED);
/* write lock is held */
if (x < 0) {
while (1) {
/* Wait while writer is present or pending */
while (__atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED) & CNE_RWLOCK_MASK)
cne_pause();
continue;
}
success = __atomic_compare_exchange_n(&rwl->cnt, &x, x + 1, 1, __ATOMIC_ACQUIRE,
__ATOMIC_RELAXED);

/* Try to get read lock */
x = __atomic_fetch_add(&rwl->cnt, CNE_RWLOCK_READ, __ATOMIC_ACQUIRE) + CNE_RWLOCK_READ;

/* If no writer, then acquire was successful */
if (likely(!(x & CNE_RWLOCK_MASK)))
return;

/* Lost race with writer, backout the change. */
__atomic_fetch_sub(&rwl->cnt, CNE_RWLOCK_READ, __ATOMIC_RELAXED);
}
}

/**
* try to take a read lock.
* Try to take a read lock.
*
* @param rwl
* A pointer to a rwlock structure.
Expand All @@ -91,19 +120,25 @@ cne_rwlock_read_lock(cne_rwlock_t *rwl)
*/
static inline int
cne_rwlock_read_trylock(cne_rwlock_t *rwl)
__cne_shared_trylock_function(0, rwl) __cne_no_thread_safety_analysis
{
int32_t x;
int success = 0;

while (success == 0) {
x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED);
/* write lock is held */
if (x < 0)
return -EBUSY;
success = __atomic_compare_exchange_n(&rwl->cnt, &x, x + 1, 1, __ATOMIC_ACQUIRE,
__ATOMIC_RELAXED);
}
x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED);

/* fail if write lock is held or writer is pending */
if (x & CNE_RWLOCK_MASK)
return -EBUSY;

/* Try to get read lock */
x = __atomic_fetch_add(&rwl->cnt, CNE_RWLOCK_READ, __ATOMIC_ACQUIRE) + CNE_RWLOCK_READ;

/* Back out if writer raced in */
if (unlikely(x & CNE_RWLOCK_MASK)) {
__atomic_fetch_sub(&rwl->cnt, CNE_RWLOCK_READ, __ATOMIC_RELEASE);

return -EBUSY;
}
return 0;
}

Expand All @@ -114,13 +149,13 @@ cne_rwlock_read_trylock(cne_rwlock_t *rwl)
* A pointer to the rwlock structure.
*/
static inline void
cne_rwlock_read_unlock(cne_rwlock_t *rwl)
cne_rwlock_read_unlock(cne_rwlock_t *rwl) __cne_unlock_function(rwl) __cne_no_thread_safety_analysis
{
__atomic_fetch_sub(&rwl->cnt, 1, __ATOMIC_RELEASE);
__atomic_fetch_sub(&rwl->cnt, CNE_RWLOCK_READ, __ATOMIC_RELEASE);
}

/**
* try to take a write lock.
* Try to take a write lock.
*
* @param rwl
* A pointer to a rwlock structure.
Expand All @@ -131,15 +166,16 @@ cne_rwlock_read_unlock(cne_rwlock_t *rwl)
*/
static inline int
cne_rwlock_write_trylock(cne_rwlock_t *rwl)
__cne_exclusive_trylock_function(0, rwl) __cne_no_thread_safety_analysis
{
int32_t x;

x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED);
if (x != 0 ||
__atomic_compare_exchange_n(&rwl->cnt, &x, -1, 1, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED) == 0)
if (x < CNE_RWLOCK_WRITE && __atomic_compare_exchange_n(&rwl->cnt, &x, x + CNE_RWLOCK_WRITE, 1,
__ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
return 0;
else
return -EBUSY;

return 0;
}

/**
Expand All @@ -150,19 +186,28 @@ cne_rwlock_write_trylock(cne_rwlock_t *rwl)
*/
static inline void
cne_rwlock_write_lock(cne_rwlock_t *rwl)
__cne_exclusive_lock_function(rwl) __cne_no_thread_safety_analysis
{
int32_t x;
int success = 0;

while (success == 0) {
while (1) {
x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED);
/* a lock is held */
if (x != 0) {
cne_pause();
continue;

/* No readers or writers? */
if (likely(x < CNE_RWLOCK_WRITE)) {
/* Turn off CNE_RWLOCK_WAIT, turn on CNE_RWLOCK_WRITE */
if (__atomic_compare_exchange_n(&rwl->cnt, &x, CNE_RWLOCK_WRITE, 1, __ATOMIC_ACQUIRE,
__ATOMIC_RELAXED))
return;
}
success =
__atomic_compare_exchange_n(&rwl->cnt, &x, -1, 1, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);

/* Turn on writer wait bit */
if (!(x & CNE_RWLOCK_WAIT))
__atomic_fetch_or(&rwl->cnt, CNE_RWLOCK_WAIT, __ATOMIC_RELAXED);

/* Wait until no readers before trying again */
while (__atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED) > CNE_RWLOCK_WAIT)
cne_pause();
}
}

Expand All @@ -174,8 +219,26 @@ cne_rwlock_write_lock(cne_rwlock_t *rwl)
*/
static inline void
cne_rwlock_write_unlock(cne_rwlock_t *rwl)
__cne_unlock_function(rwl) __cne_no_thread_safety_analysis
{
__atomic_store_n(&rwl->cnt, 0, __ATOMIC_RELEASE);
__atomic_fetch_sub(&rwl->cnt, CNE_RWLOCK_WRITE, __ATOMIC_RELEASE);
}

/**
* Test if the write lock is taken.
*
* @param rwl
* A pointer to a rwlock structure.
* @return
* 1 if the write lock is currently taken; 0 otherwise.
*/
static inline int
cne_rwlock_write_is_locked(cne_rwlock_t *rwl)
{
if (__atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED) & CNE_RWLOCK_WRITE)
return 1;

return 0;
}

/**
Expand Down
22 changes: 2 additions & 20 deletions lib/include/cne_tailq.h
Original file line number Diff line number Diff line change
Expand Up @@ -77,10 +77,8 @@ struct cne_tailq_elem {
* first parameter passed to TAILQ_HEAD macro)
*
* @return
* The return value, typecast to the appropriate
* structure pointer type.
* NULL on error, since the tailq_head is the first
* element in the cne_tailq_head structure.
* The return value, typecast to the appropriate structure pointer type. NULL on error,
* since the tailq_head is the first element in the cne_tailq_head structure.
*/
#define CNE_TAILQ_LOOKUP(name, struct_name) CNE_TAILQ_CAST(cne_tailq_lookup(name), struct_name)

Expand Down Expand Up @@ -123,22 +121,6 @@ CNDP_API void cne_dump_tailq(void);
*/
CNDP_API struct cne_tailq_head *cne_tailq_lookup(const char *name);

/**
* Register a tail queue.
*
* Register a tail queue from shared memory.
* This function is mainly used by some, which is used to
* register tailq from the different cndp libraries. Since this macro is a
* constructor.
*
* @param t
* The tailq element which contains the name of the tailq you want to
* create (/retrieve when in secondary process).
* @return
* 0 on success or -1 in case of an error.
*/
CNDP_API int cne_eal_tailq_register(struct cne_tailq_elem *t);

/**
* This macro permits both remove and free var within the loop safely.
*/
Expand Down
1 change: 1 addition & 0 deletions lib/include/meson.build
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ headers = files(
'cne_inet4.h',
'cne_inet6.h',
'cne_isa.h',
'cne_lock_annotations.h',
'cne_lport.h',
'cne_mutex_helper.h',
'cne_pause.h',
Expand Down
2 changes: 1 addition & 1 deletion lib/usr/clib/graph/graph_private.h
Original file line number Diff line number Diff line change
Expand Up @@ -266,7 +266,7 @@ int graph_fp_mem_create(struct graph *graph);
*
* @return
* - 0: Success.
* - <0: Graph memzone related error.
* - <0: Graph memory related error.
*/
int graph_fp_mem_destroy(struct graph *graph);

Expand Down
Loading
Loading