diff --git a/BitFaster.Caching.Benchmarks/LockBench.cs b/BitFaster.Caching.Benchmarks/LockBench.cs new file mode 100644 index 00000000..3443fa9a --- /dev/null +++ b/BitFaster.Caching.Benchmarks/LockBench.cs @@ -0,0 +1,61 @@ + +using System.Threading; +using Benchly; +using BenchmarkDotNet.Attributes; +using BenchmarkDotNet.Jobs; + +namespace BitFaster.Caching.Benchmarks +{ + [SimpleJob(RuntimeMoniker.Net90)] + [MemoryDiagnoser(displayGenColumns: false)] + [HideColumns("Job", "Median", "RatioSD", "Alloc Ratio")] + [ColumnChart(Title ="Try enter ({JOB})")] + public class LockBench + { + private int _value; + private readonly object monitorLock = new object(); +#if NET9_0_OR_GREATER + private readonly Lock threadingLock = new Lock(); +#endif + + [Benchmark(Baseline = true)] + public void UseMonitor() + { + bool lockTaken = false; + Monitor.TryEnter(monitorLock, ref lockTaken); + + if (lockTaken) + { + try + { + _value++; + } + finally + { + if (lockTaken) + { + Monitor.Exit(monitorLock); + } + } + } + } + + [Benchmark()] + public void UseLock() + { +#if NET9_0_OR_GREATER + if (threadingLock.TryEnter()) + { + try + { + _value++; + } + finally + { + threadingLock.Exit(); + } + } +#endif + } + } +} diff --git a/BitFaster.Caching/BitFaster.Caching.csproj b/BitFaster.Caching/BitFaster.Caching.csproj index 55f0e8dc..3d7b1e68 100644 --- a/BitFaster.Caching/BitFaster.Caching.csproj +++ b/BitFaster.Caching/BitFaster.Caching.csproj @@ -1,8 +1,8 @@ - netstandard2.0;netcoreapp3.1;net6.0 - 11.0 + netstandard2.0;netcoreapp3.1;net6.0;net9.0 + 13.0 Alex Peck BitFaster.Caching diff --git a/BitFaster.Caching/Lfu/ConcurrentLfuCore.cs b/BitFaster.Caching/Lfu/ConcurrentLfuCore.cs index 67f6dbc1..22c10c1a 100644 --- a/BitFaster.Caching/Lfu/ConcurrentLfuCore.cs +++ b/BitFaster.Caching/Lfu/ConcurrentLfuCore.cs @@ -64,8 +64,12 @@ internal struct ConcurrentLfuCore : IBoundedPolicy private readonly LfuCapacityPartition capacity; internal readonly DrainStatus drainStatus = new(); - private readonly object maintenanceLock = new(); +#if NET9_0_OR_GREATER + private readonly Lock maintenanceLock = new(); +#else + private readonly object maintenanceLock = new(); +#endif private readonly IScheduler scheduler; private readonly Action drainBuffers; @@ -481,12 +485,15 @@ private void TryScheduleDrain() return; } +#if NET9_0_OR_GREATER + if (maintenanceLock.TryEnter()) +#else bool lockTaken = false; - try + Monitor.TryEnter(maintenanceLock, ref lockTaken); + if (lockTaken) +#endif { - Monitor.TryEnter(maintenanceLock, ref lockTaken); - - if (lockTaken) + try { int status = this.drainStatus.NonVolatileRead(); @@ -498,12 +505,16 @@ private void TryScheduleDrain() this.drainStatus.VolatileWrite(DrainStatus.ProcessingToIdle); scheduler.Run(this.drainBuffers); } - } - finally - { - if (lockTaken) - { - Monitor.Exit(maintenanceLock); + finally + { +#if NET9_0_OR_GREATER + maintenanceLock.Exit(); +#else + if (lockTaken) + { + Monitor.Exit(maintenanceLock); + } +#endif } } } diff --git a/BitFaster.Caching/Lru/ConcurrentLruCore.cs b/BitFaster.Caching/Lru/ConcurrentLruCore.cs index 6e66987c..7719d3ce 100644 --- a/BitFaster.Caching/Lru/ConcurrentLruCore.cs +++ b/BitFaster.Caching/Lru/ConcurrentLruCore.cs @@ -892,6 +892,151 @@ private static Optional> CreateEvents(ConcurrentLruCore(ConcurrentDictionary d) + where TAlternateKey : notnull, allows ref struct + { + return d.Comparer is IAlternateEqualityComparer; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static IAlternateEqualityComparer GetAlternateComparer(ConcurrentDictionary d) + where TAlternateKey : notnull, allows ref struct + { + Debug.Assert(IsCompatibleKey(d)); + return Unsafe.As>(d.Comparer!); + } + + public IAlternateCache GetAlternateCache() where TAlternateKey : notnull, allows ref struct + { + if (!IsCompatibleKey(this.dictionary)) + { + Throw.IncompatibleComparer(); + } + + return new AlternateCache(this); + } + + public bool TryGetAlternateCache([MaybeNullWhen(false)] out IAlternateCache lookup) where TAlternateKey : notnull, allows ref struct + { + if (IsCompatibleKey(this.dictionary)) + { + lookup = new AlternateCache(this); + return true; + } + + lookup = default; + return false; + } + + // Rough idea of alternate cache interface + // Note: we need a sync and async variant, plumbed into ICache and IAsyncCache. + public interface IAlternateCache where TAlternateKey : notnull, allows ref struct + { + bool TryGet(TAlternateKey key, [MaybeNullWhen(false)] out V value); + + bool TryRemove(TAlternateKey key, [MaybeNullWhen(false)] out K actualKey, [MaybeNullWhen(false)] out V value); + + V GetOrAdd(TAlternateKey altKey, Func valueFactory); + + V GetOrAdd(TAlternateKey altKey, Func valueFactory, TArg factoryArgument); + + // TryUpdate + // AddOrUpdate + } + + internal readonly struct AlternateCache : IAlternateCache where TAlternateKey : notnull, allows ref struct + { + /// Initialize the instance. The dictionary must have already been verified to have a compatible comparer. + internal AlternateCache(ConcurrentLruCore lru) + { + Debug.Assert(lru is not null); + Debug.Assert(IsCompatibleKey(lru.dictionary)); + Lru = lru; + } + + internal ConcurrentLruCore Lru { get; } + + public bool TryGet(TAlternateKey key, [MaybeNullWhen(false)] out V value) + { + var alternate = this.Lru.dictionary.GetAlternateLookup(); + + if (alternate.TryGetValue(key, out var item)) + { + return Lru.GetOrDiscard(item, out value); + } + + value = default; + Lru.telemetryPolicy.IncrementMiss(); + return false; + } + + public bool TryRemove(TAlternateKey key, [MaybeNullWhen(false)] out K actualKey, [MaybeNullWhen(false)] out V value) + { + var alternate = this.Lru.dictionary.GetAlternateLookup(); + + if (alternate.TryGetValue(key, out var item)) + { + Lru.OnRemove(item.Key, item, ItemRemovedReason.Removed); + actualKey = item.Key; + value = item.Value; + return true; + } + + actualKey = default; + value = default; + return false; + } + + public V GetOrAdd(TAlternateKey altKey, Func valueFactory) + { + var alternate = this.Lru.dictionary.GetAlternateLookup(); + + while (true) + { + if (alternate.TryGetValue(altKey, out var item)) + { + return item.Value; + } + + // We cannot avoid allocating the key since it is required for item policy etc. Thus fall back to Lru for add. + // The value factory may be called concurrently for the same key, but the first write to the dictionary wins. + K key = GetAlternateComparer(this.Lru.dictionary).Create(altKey); + V value = valueFactory(altKey); + if (Lru.TryAdd(key, value)) + { + return value; + } + } + } + + public V GetOrAdd(TAlternateKey altKey, Func valueFactory, TArg factoryArgument) + { + var alternate = this.Lru.dictionary.GetAlternateLookup(); + + while (true) + { + if (alternate.TryGetValue(altKey, out var item)) + { + return item.Value; + } + + // We cannot avoid allocating the key since it is required for item policy etc. Thus fall back to Lru for add. + // The value factory may be called concurrently for the same key, but the first write to the dictionary wins. + K key = GetAlternateComparer(this.Lru.dictionary).Create(altKey); + V value = valueFactory(altKey, factoryArgument); + if (Lru.TryAdd(key, value)) + { + return value; + } + } + } + } + +#endif + // To get JIT optimizations, policies must be structs. // If the structs are returned directly via properties, they will be copied. Since // telemetryPolicy is a mutable struct, copy is bad. One workaround is to store the diff --git a/BitFaster.Caching/Throw.cs b/BitFaster.Caching/Throw.cs index 9870e697..b33bf705 100644 --- a/BitFaster.Caching/Throw.cs +++ b/BitFaster.Caching/Throw.cs @@ -26,6 +26,9 @@ internal static class Throw [DoesNotReturn] public static void Disposed() => throw CreateObjectDisposedException(); + [DoesNotReturn] + public static void IncompatibleComparer() => throw new InvalidOperationException("Incompatible comparer"); + [MethodImpl(MethodImplOptions.NoInlining)] private static ArgumentNullException CreateArgumentNullException(ExceptionArgument arg) => new ArgumentNullException(GetArgumentString(arg));