Skip to content

Commit

Permalink
update rwclock to be fair
Browse files Browse the repository at this point in the history
Update rwlock code from dpdk to be fair when locking/unlocking for all
threads.

Signed-off-by: Keith Wiles <[email protected]>
  • Loading branch information
KeithWiles committed Sep 12, 2023
1 parent 88abd89 commit 46e7e9d
Show file tree
Hide file tree
Showing 3 changed files with 164 additions and 38 deletions.
62 changes: 62 additions & 0 deletions lib/include/cne_lock_annotations.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2022 Red Hat, Inc.
*/

#ifndef CNE_LOCK_ANNOTATIONS_H
#define CNE_LOCK_ANNOTATIONS_H

#ifdef __cplusplus
extern "C" {
#endif

#ifdef CNE_ANNOTATE_LOCKS

#define __cne_lockable __attribute__((lockable))

#define __cne_guarded_by(...) __attribute__((guarded_by(__VA_ARGS__)))
#define __cne_guarded_var __attribute__((guarded_var))

#define __cne_exclusive_locks_required(...) __attribute__((exclusive_locks_required(__VA_ARGS__)))
#define __cne_exclusive_lock_function(...) __attribute__((exclusive_lock_function(__VA_ARGS__)))
#define __cne_exclusive_trylock_function(ret, ...) \
__attribute__((exclusive_trylock_function(ret, __VA_ARGS__)))
#define __cne_assert_exclusive_lock(...) __attribute__((assert_exclusive_lock(__VA_ARGS__)))

#define __cne_shared_locks_required(...) __attribute__((shared_locks_required(__VA_ARGS__)))
#define __cne_shared_lock_function(...) __attribute__((shared_lock_function(__VA_ARGS__)))
#define __cne_shared_trylock_function(ret, ...) \
__attribute__((shared_trylock_function(ret, __VA_ARGS__)))
#define __cne_assert_shared_lock(...) __attribute__((assert_shared_lock(__VA_ARGS__)))

#define __cne_unlock_function(...) __attribute__((unlock_function(__VA_ARGS__)))

#define __cne_no_thread_safety_analysis __attribute__((no_thread_safety_analysis))

#else /* ! CNE_ANNOTATE_LOCKS */

#define __cne_lockable

#define __cne_guarded_by(...)
#define __cne_guarded_var

#define __cne_exclusive_locks_required(...)
#define __cne_exclusive_lock_function(...)
#define __cne_exclusive_trylock_function(...)
#define __cne_assert_exclusive_lock(...)

#define __cne_shared_locks_required(...)
#define __cne_shared_lock_function(...)
#define __cne_shared_trylock_function(...)
#define __cne_assert_shared_lock(...)

#define __cne_unlock_function(...)

#define __cne_no_thread_safety_analysis

#endif /* CNE_ANNOTATE_LOCKS */

#ifdef __cplusplus
}
#endif

#endif /* CNE_LOCK_ANNOTATIONS_H */
139 changes: 101 additions & 38 deletions lib/include/cne_rwlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,17 @@
* one writer. All readers are blocked until the writer is finished
* writing.
*
* This version does not give preference to readers or writers
* and does not starve either readers or writers.
*
* See also:
* https://locklessinc.com/articles/locks/
*/

#include <cne_branch_prediction.h>
#include <cne_spinlock.h>
#include <cne_common.h>
#include <cne_lock_annotations.h>
#include <cne_pause.h>

#ifdef __cplusplus
Expand All @@ -29,10 +35,28 @@ extern "C" {
/**
* The cne_rwlock_t type.
*
* cnt is -1 when write lock is held, and > 0 when read locks are held.
* Readers increment the counter by CNE_RWLOCK_READ (4)
* Writers set the CNE_RWLOCK_WRITE bit when lock is held
* and set the CNE_RWLOCK_WAIT bit while waiting.
*
* 31 2 1 0
* +-------------------+-+-+
* | readers | | |
* +-------------------+-+-+
* ^ ^
* | |
* WRITE: lock held ----/ |
* WAIT: writer pending --/
*/
typedef struct {
volatile int32_t cnt; /**< -1 when W lock held, > 0 when R locks held. */

#define CNE_RWLOCK_WAIT 0x1 /* Writer is waiting */
#define CNE_RWLOCK_WRITE 0x2 /* Writer has the lock */
#define CNE_RWLOCK_MASK (CNE_RWLOCK_WAIT | CNE_RWLOCK_WRITE)
/* Writer is waiting or has lock */
#define CNE_RWLOCK_READ 0x4 /* Reader increment */

typedef struct __cne_lockable {
int32_t cnt;
} cne_rwlock_t;

/**
Expand Down Expand Up @@ -63,24 +87,29 @@ cne_rwlock_init(cne_rwlock_t *rwl)
*/
static inline void
cne_rwlock_read_lock(cne_rwlock_t *rwl)
__cne_shared_lock_function(rwl) __cne_no_thread_safety_analysis
{
int32_t x;
int success = 0;

while (success == 0) {
x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED);
/* write lock is held */
if (x < 0) {
while (1) {
/* Wait while writer is present or pending */
while (__atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED) & CNE_RWLOCK_MASK)
cne_pause();
continue;
}
success = __atomic_compare_exchange_n(&rwl->cnt, &x, x + 1, 1, __ATOMIC_ACQUIRE,
__ATOMIC_RELAXED);

/* Try to get read lock */
x = __atomic_fetch_add(&rwl->cnt, CNE_RWLOCK_READ, __ATOMIC_ACQUIRE) + CNE_RWLOCK_READ;

/* If no writer, then acquire was successful */
if (likely(!(x & CNE_RWLOCK_MASK)))
return;

/* Lost race with writer, backout the change. */
__atomic_fetch_sub(&rwl->cnt, CNE_RWLOCK_READ, __ATOMIC_RELAXED);
}
}

/**
* try to take a read lock.
* Try to take a read lock.
*
* @param rwl
* A pointer to a rwlock structure.
Expand All @@ -91,19 +120,25 @@ cne_rwlock_read_lock(cne_rwlock_t *rwl)
*/
static inline int
cne_rwlock_read_trylock(cne_rwlock_t *rwl)
__cne_shared_trylock_function(0, rwl) __cne_no_thread_safety_analysis
{
int32_t x;
int success = 0;

while (success == 0) {
x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED);
/* write lock is held */
if (x < 0)
return -EBUSY;
success = __atomic_compare_exchange_n(&rwl->cnt, &x, x + 1, 1, __ATOMIC_ACQUIRE,
__ATOMIC_RELAXED);
}
x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED);

/* fail if write lock is held or writer is pending */
if (x & CNE_RWLOCK_MASK)
return -EBUSY;

/* Try to get read lock */
x = __atomic_fetch_add(&rwl->cnt, CNE_RWLOCK_READ, __ATOMIC_ACQUIRE) + CNE_RWLOCK_READ;

/* Back out if writer raced in */
if (unlikely(x & CNE_RWLOCK_MASK)) {
__atomic_fetch_sub(&rwl->cnt, CNE_RWLOCK_READ, __ATOMIC_RELEASE);

return -EBUSY;
}
return 0;
}

Expand All @@ -114,13 +149,13 @@ cne_rwlock_read_trylock(cne_rwlock_t *rwl)
* A pointer to the rwlock structure.
*/
static inline void
cne_rwlock_read_unlock(cne_rwlock_t *rwl)
cne_rwlock_read_unlock(cne_rwlock_t *rwl) __cne_unlock_function(rwl) __cne_no_thread_safety_analysis
{
__atomic_fetch_sub(&rwl->cnt, 1, __ATOMIC_RELEASE);
__atomic_fetch_sub(&rwl->cnt, CNE_RWLOCK_READ, __ATOMIC_RELEASE);
}

/**
* try to take a write lock.
* Try to take a write lock.
*
* @param rwl
* A pointer to a rwlock structure.
Expand All @@ -131,15 +166,16 @@ cne_rwlock_read_unlock(cne_rwlock_t *rwl)
*/
static inline int
cne_rwlock_write_trylock(cne_rwlock_t *rwl)
__cne_exclusive_trylock_function(0, rwl) __cne_no_thread_safety_analysis
{
int32_t x;

x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED);
if (x != 0 ||
__atomic_compare_exchange_n(&rwl->cnt, &x, -1, 1, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED) == 0)
if (x < CNE_RWLOCK_WRITE && __atomic_compare_exchange_n(&rwl->cnt, &x, x + CNE_RWLOCK_WRITE, 1,
__ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
return 0;
else
return -EBUSY;

return 0;
}

/**
Expand All @@ -150,19 +186,28 @@ cne_rwlock_write_trylock(cne_rwlock_t *rwl)
*/
static inline void
cne_rwlock_write_lock(cne_rwlock_t *rwl)
__cne_exclusive_lock_function(rwl) __cne_no_thread_safety_analysis
{
int32_t x;
int success = 0;

while (success == 0) {
while (1) {
x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED);
/* a lock is held */
if (x != 0) {
cne_pause();
continue;

/* No readers or writers? */
if (likely(x < CNE_RWLOCK_WRITE)) {
/* Turn off CNE_RWLOCK_WAIT, turn on CNE_RWLOCK_WRITE */
if (__atomic_compare_exchange_n(&rwl->cnt, &x, CNE_RWLOCK_WRITE, 1, __ATOMIC_ACQUIRE,
__ATOMIC_RELAXED))
return;
}
success =
__atomic_compare_exchange_n(&rwl->cnt, &x, -1, 1, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);

/* Turn on writer wait bit */
if (!(x & CNE_RWLOCK_WAIT))
__atomic_fetch_or(&rwl->cnt, CNE_RWLOCK_WAIT, __ATOMIC_RELAXED);

/* Wait until no readers before trying again */
while (__atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED) > CNE_RWLOCK_WAIT)
cne_pause();
}
}

Expand All @@ -174,8 +219,26 @@ cne_rwlock_write_lock(cne_rwlock_t *rwl)
*/
static inline void
cne_rwlock_write_unlock(cne_rwlock_t *rwl)
__cne_unlock_function(rwl) __cne_no_thread_safety_analysis
{
__atomic_store_n(&rwl->cnt, 0, __ATOMIC_RELEASE);
__atomic_fetch_sub(&rwl->cnt, CNE_RWLOCK_WRITE, __ATOMIC_RELEASE);
}

/**
* Test if the write lock is taken.
*
* @param rwl
* A pointer to a rwlock structure.
* @return
* 1 if the write lock is currently taken; 0 otherwise.
*/
static inline int
cne_rwlock_write_is_locked(cne_rwlock_t *rwl)
{
if (__atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED) & CNE_RWLOCK_WRITE)
return 1;

return 0;
}

/**
Expand Down
1 change: 1 addition & 0 deletions lib/include/meson.build
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ headers = files(
'cne_inet4.h',
'cne_inet6.h',
'cne_isa.h',
'cne_lock_annotations.h',
'cne_lport.h',
'cne_mutex_helper.h',
'cne_pause.h',
Expand Down

0 comments on commit 46e7e9d

Please sign in to comment.