Yet another SeqLock implementation
⚓ Rust 📅 2025-09-10 👤 surdeus 👁️ 7The "special" feature of this seqlock implementation, compared to the seqlock crate for example, is that it wraps a Mutex but it does not need the whole mutex-protected type to be Copy. Instead, the user passes a closure with higher-kinded generic arguments to return a Copy subset of the struct.
I think this is sound, but I'd like a second opinion.
// This version uses the unstable feature `mutex_data_ptr` for simplicity,
// but the same logic can be used in stable Rust through parking_lot.
#![feature(mutex_data_ptr)]
use std::cell::UnsafeCell;
use std::marker::PhantomPinned;
use std::mem::MaybeUninit;
use std::ops::Deref;
use std::sync::{Mutex, MutexGuard};
use std::sync::atomic::{Ordering, AtomicU32};
/// A datum that can be accessed from the outside world through
/// a seqlock. Because of this, direct writes are not possible
/// even when holding a `&mut SeqLockData<T>`; they need to go
/// through a containing [`WithSeqLock`].
// PhantomPinned avoids retag on writes
pub struct SeqLockData<T: Copy>(UnsafeCell<MaybeUninit<T>>, PhantomPinned);
unsafe impl<T: Copy> Send for SeqLockData<T> {}
unsafe impl<T: Copy> Sync for SeqLockData<T> {}
impl<T: Copy> SeqLockData<T> {
pub const fn new(data: T) -> Self {
Self(UnsafeCell::new(MaybeUninit::new(data)), PhantomPinned)
}
}
impl<T: Copy> From<T> for SeqLockData<T> {
fn from(data: T) -> Self {
Self::new(data)
}
}
impl<T: Copy> Deref for SeqLockData<T> {
type Target = T;
fn deref(&self) -> &T {
// SAFETY: the field is always initialized by `new()`. Furthermore,
// writes always go through the seqlock's `write()` function and
// they only happen while a mutable reference to the `SeqLockData`
// is alive, therefore a concurrent write is impossible.
unsafe { (&*self.0.get()).assume_init_ref() }
}
}
/// A wrapper for [`Mutex`], that allows accessing one or more fields
/// through a seqlock.
///
/// The safety of `WithSeqLock` hinges on two constraints. First, writes
/// to a `SeqLockData` need to go through this struct which in turn
/// requires a `MutexGuard`, meaning that a `SeqLockData`
/// which is not wrapped by a `WithSeqLock` is unmodifiable, and there
/// would be no races if the closure returns it. The only exception is
/// `static mut`, but that is unsafe.
///
/// Second, the higher-kinded type in the [`WithSeqLock::read()`] and
/// [`WithSeqLock::write()`] methods prevents extracting data (for
/// example via a guard) from a `SeqLockData` that is unrelated to
/// the input argument, because the returned value would have to live
/// for `'static`. Again, the only exception are `static`s, which are
/// either unmodifiable or unsafe as in the previous paragraph.
pub struct WithSeqLock<Outer> {
count: AtomicU32,
outer: Mutex<Outer>,
}
impl<Outer> WithSeqLock<Outer> {
pub const fn new(data: Outer) -> Self {
WithSeqLock {
count: AtomicU32::new(0),
outer: Mutex::new(data),
}
}
pub fn read<T: Copy, F: for<'a> Fn(&'a Outer) -> &'a SeqLockData<T>>(&self, f: F) -> T {
// SAFETY: the data is wrapped with MaybeUninit<>, and it is not
// interpreted before ascertaining that it's consistent
let outer: &Outer = unsafe { &*self.outer.data_ptr() };
let inner = f(outer).0.get();
loop {
let ver = self.read_begin();
// SAFETY: as above
unsafe {
let result = std::ptr::read_volatile(inner);
if !self.read_retry(ver) {
break result.assume_init()
}
}
}
}
pub fn write<T: Copy, F: for<'a> FnOnce(&'a mut Outer) -> &'a mut SeqLockData<T>>(&self, g: &mut MutexGuard<Outer>, value: T, f: F) {
let inner = f(&mut **g).0.get();
self.write_begin();
// SAFETY: the data is never written concurrently, thanks to `g`
unsafe { std::ptr::write_volatile(inner as *mut T, value); }
self.write_end();
}
fn read_begin(&self) -> u32 {
self.count.load(Ordering::Acquire) & !1
}
fn read_retry(&self, ver: u32) -> bool {
std::sync::atomic::fence(Ordering::Acquire);
// ordering provided by the preceding fence
self.count.load(Ordering::Relaxed) != ver
}
fn write_begin(&self) {
// ordering provided by the subsequent fence
self.count.store(self.count.load(Ordering::Relaxed) + 1, Ordering::Relaxed);
std::sync::atomic::fence(Ordering::Release);
}
fn write_end(&self) {
self.count.store(self.count.load(Ordering::Relaxed) + 1, Ordering::Release);
}
}
impl<Outer> Deref for WithSeqLock<Outer> {
type Target = Mutex<Outer>;
fn deref(&self) -> &Mutex<Outer> {
&self.outer
}
}
#[cfg(test)]
mod tests {
use super::*;
struct S {
datum: SeqLockData<u32>,
}
#[test]
fn test_seqlock() {
let s = S { datum: 42.into() };
let seqlock = WithSeqLock::new(s);
// read outside lock
assert_eq!(seqlock.read(|s| &s.datum), 42);
{
let mut g = seqlock.lock().unwrap();
// read transparently under lock
assert_eq!(*g.datum, 42);
seqlock.write(&mut g, 123, |s| &mut s.datum);
}
// check that change took effect
assert_eq!(seqlock.read(|s| &s.datum), 123);
}
#[test]
fn test_seqlock_race() {
let s = S { datum: 42.into() };
let seqlock = WithSeqLock::new(s);
let ver = seqlock.read_begin();
assert!(!seqlock.read_retry(ver));
seqlock.write_begin();
assert!(seqlock.read_retry(ver));
seqlock.write_end();
assert!(seqlock.read_retry(ver));
}
#[test]
fn test_seqlock_race2() {
let s = S { datum: 42.into() };
let seqlock = WithSeqLock::new(s);
seqlock.write_begin();
let ver = seqlock.read_begin();
assert!(seqlock.read_retry(ver));
seqlock.write_end();
assert!(seqlock.read_retry(ver));
}
#[test]
#[allow(static_mut_refs)]
fn test_seqlock_wtf_but_sound() {
static mut SOMETHING_ELSE: SeqLockData<u32> = SeqLockData::new(77);
let s = S { datum: 42.into() };
let seqlock = WithSeqLock::new(s);
assert_eq!(seqlock.read(|_| unsafe { &SOMETHING_ELSE }), 77);
}
}
6 posts - 4 participants
🏷️ Rust_feed