Skip to content

Commit

Permalink
util: Rename SpinLock functions
Browse files Browse the repository at this point in the history
  • Loading branch information
marv7000 committed Nov 20, 2024
1 parent 1010a04 commit 770cc6f
Show file tree
Hide file tree
Showing 21 changed files with 101 additions and 98 deletions.
12 changes: 6 additions & 6 deletions include/menix/util/spin.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,22 +17,22 @@ typedef struct
0 \
}

#define spin_lock(lock, scope) \
spin_acquire_force(lock); \
#define spin_lock_scope(lock, scope) \
spin_lock(lock); \
do \
scope while (0); \
spin_free(lock);
spin_unlock(lock);

// Toggles if spinlocks do anything or not. Used for single processor machines/during setup.
void spin_use(bool on);

// Attempt to acquire the lock.
// Returns true if successful.
bool spin_acquire(SpinLock* lock);
bool spin_try_lock(SpinLock* lock);

// Attempt to acquire the lock.
// If unsuccessful, attempts again.
void spin_acquire_force(SpinLock* lock);
void spin_lock(SpinLock* lock);

// Frees the lock if it was previously locked.
void spin_free(SpinLock* lock);
void spin_unlock(SpinLock* lock);
6 changes: 3 additions & 3 deletions kernel/arch/riscv64/system/arch.c
Original file line number Diff line number Diff line change
Expand Up @@ -13,20 +13,20 @@ static SpinLock cpu_lock = spin_new();
void arch_init_cpu(Cpu* cpu, Cpu* boot)
{
// Make sure no other memory accesses happen before the CPUs are initialized.
spin_acquire_force(&cpu_lock);
spin_lock(&cpu_lock);

// TODO: CPU init.

if (cpu->id != boot->id)
{
boot_info->cpu_active += 1;
spin_free(&cpu_lock);
spin_unlock(&cpu_lock);
asm_interrupt_disable();
while (1)
asm volatile("wfi");
}
boot_info->cpu_active += 1;
spin_free(&cpu_lock);
spin_unlock(&cpu_lock);
}

void arch_early_init(BootInfo* info)
Expand Down
32 changes: 16 additions & 16 deletions kernel/arch/x86_64/memory/vm.c
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ static usize* vm_x86_get_pte(PageMap* page_map, VirtAddr virt_addr, bool allocat
cur_head = vm_x86_traverse(cur_head, index, allocate);
if (cur_head == NULL)
{
spin_free(&page_map->lock);
spin_unlock(&page_map->lock);
return NULL;
}
}
Expand Down Expand Up @@ -148,14 +148,14 @@ void vm_page_map_destroy(PageMap* map)

PageMap* vm_page_map_fork(PageMap* source)
{
spin_acquire_force(&source->lock);
spin_lock(&source->lock);
PageMap* result = vm_page_map_new(source->size);

if (result == NULL)
goto fail;

fail:
spin_free(&source->lock);
spin_unlock(&source->lock);
if (result != NULL)
vm_page_map_destroy(result);
return result;
Expand All @@ -166,7 +166,7 @@ bool vm_x86_remap(PageMap* page_map, VirtAddr virt_addr, usize flags)
kassert(page_map != NULL, "No page map was provided! Unable to remap page 0x%p to 0x%p!", pm_get_phys_base(),
virt_addr);

spin_acquire_force(&page_map->lock);
spin_lock(&page_map->lock);
u64* cur_head = page_map->head;
usize index = 0;

Expand All @@ -189,22 +189,22 @@ bool vm_x86_remap(PageMap* page_map, VirtAddr virt_addr, usize flags)
cur_head = vm_x86_traverse(cur_head, index, false);
if (cur_head == NULL)
{
spin_free(&page_map->lock);
spin_unlock(&page_map->lock);
return false;
}
}

if ((cur_head[index] & PT_PRESENT) == 0)
{
spin_free(&page_map->lock);
spin_unlock(&page_map->lock);
return false;
}

// Clear old flags.
cur_head[index] &= PT_ADDR_MASK;
// Set new ones.
cur_head[index] |= (flags & ~(PT_ADDR_MASK));
spin_free(&page_map->lock);
spin_unlock(&page_map->lock);

return true;
}
Expand All @@ -214,7 +214,7 @@ bool vm_unmap(PageMap* page_map, VirtAddr virt_addr)
kassert(page_map != NULL, "No page map was provided! Unable to remap page 0x%p to 0x%p!", pm_get_phys_base(),
virt_addr);

spin_acquire_force(&page_map->lock);
spin_lock(&page_map->lock);
const usize virt_val = (usize)virt_addr;
u64* cur_head = page_map->head;
usize index = 0;
Expand All @@ -233,31 +233,31 @@ bool vm_unmap(PageMap* page_map, VirtAddr virt_addr)
cur_head = vm_x86_traverse(cur_head, index, false);
if (cur_head == NULL)
{
spin_free(&page_map->lock);
spin_unlock(&page_map->lock);
return false;
}
}

if ((cur_head[index] & PT_PRESENT) == 0)
{
spin_free(&page_map->lock);
spin_unlock(&page_map->lock);
return false;
}

// Clear everything.
cur_head[index] = 0;
vm_flush_tlb(virt_addr);
spin_free(&page_map->lock);
spin_unlock(&page_map->lock);

return true;
}

PhysAddr vm_virt_to_phys(PageMap* page_map, VirtAddr address)
{
kassert(page_map != NULL, "page_map may not be null!");
spin_acquire_force(&page_map->lock);
spin_lock(&page_map->lock);
usize* pte = vm_x86_get_pte(page_map, address, false);
spin_free(&page_map->lock);
spin_unlock(&page_map->lock);

// If the page is not present or the entry doesn't exist, we can't return a physical address.
if (pte == NULL || (*pte & PT_PRESENT) == false)
Expand Down Expand Up @@ -300,7 +300,7 @@ bool vm_map(PageMap* page_map, PhysAddr phys_addr, VirtAddr virt_addr, VMProt pr
kassert(page_map != NULL, "No page map was provided! Unable to map page 0x%p to 0x%p!", phys_addr, virt_addr);
kassert(phys_addr % arch_page_size == 0, "Physical address is not page aligned! Value: 0x%p", phys_addr);

spin_acquire_force(&page_map->lock);
spin_lock(&page_map->lock);

usize x86_flags = vm_flags_to_x86(prot, flags);
u64* cur_head = page_map->head;
Expand Down Expand Up @@ -329,13 +329,13 @@ bool vm_map(PageMap* page_map, PhysAddr phys_addr, VirtAddr virt_addr, VMProt pr

if (cur_head == NULL)
{
spin_free(&page_map->lock);
spin_unlock(&page_map->lock);
return false;
}
}

cur_head[index] = (phys_addr & PT_ADDR_MASK) | (x86_flags & ~(PT_ADDR_MASK));
spin_free(&page_map->lock);
spin_unlock(&page_map->lock);

return true;
}
Expand Down
6 changes: 3 additions & 3 deletions kernel/arch/x86_64/system/arch.c
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ extern bool can_smap;
void arch_init_cpu(Cpu* cpu, Cpu* boot)
{
// Make sure no other memory accesses happen before the CPUs are initialized.
spin_acquire_force(&cpu_lock);
spin_lock(&cpu_lock);

// Allocate stack.
cpu->tss.rsp0 = pm_alloc(CONFIG_user_stack_size / vm_get_page_size(VMLevel_0)) + (u64)pm_get_phys_base();
Expand Down Expand Up @@ -128,15 +128,15 @@ void arch_init_cpu(Cpu* cpu, Cpu* boot)
if (cpu->id != boot->id)
{
// TODO: Init local APIC.
spin_free(&cpu_lock);
spin_unlock(&cpu_lock);

// Stop all other cores.
asm_interrupt_disable();
while (true)
asm_halt();
}

spin_free(&cpu_lock);
spin_unlock(&cpu_lock);
}

void arch_early_init(BootInfo* info)
Expand Down
4 changes: 2 additions & 2 deletions kernel/arch/x86_64/system/gdt.c
Original file line number Diff line number Diff line change
Expand Up @@ -73,9 +73,9 @@ static SpinLock gdt_lock = spin_new();

void gdt_load_tss(usize addr)
{
spin_acquire_force(&gdt_lock);
spin_lock(&gdt_lock);
GDT_ENCODE_LONG(gdt_table.tss, addr, sizeof(TaskStateSegment),
GDTA_PRESENT | GDTA_PRIV_LVL(0) | GDTA_EXECUTABLE | GDTA_ACCESSED, 0);
tss_reload();
spin_free(&gdt_lock);
spin_unlock(&gdt_lock);
}
12 changes: 6 additions & 6 deletions kernel/fs/devtmpfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ static isize zero_write(Handle* self, FileDescriptor* fd, const void* buffer, us

static isize devtmpfs_handle_read(struct Handle* self, FileDescriptor* fd, void* buffer, usize amount, off_t offset)
{
spin_acquire_force(&self->lock);
spin_lock(&self->lock);

TmpHandle* const handle = (TmpHandle*)self;
isize total_read = amount;
Expand All @@ -67,14 +67,14 @@ static isize devtmpfs_handle_read(struct Handle* self, FileDescriptor* fd, void*
// Copy all data to the buffer.
memcpy(buffer, handle->buffer, total_read);

spin_free(&self->lock);
spin_unlock(&self->lock);
return total_read;
}

static isize devtmpfs_handle_write(struct Handle* self, FileDescriptor* fd, const void* buffer, usize amount,
off_t offset)
{
spin_acquire_force(&self->lock);
spin_lock(&self->lock);

TmpHandle* const handle = (TmpHandle*)self;

Expand Down Expand Up @@ -108,7 +108,7 @@ static isize devtmpfs_handle_write(struct Handle* self, FileDescriptor* fd, cons
written = amount;

fail:
spin_free(&self->lock);
spin_unlock(&self->lock);
return written;
}

Expand Down Expand Up @@ -272,11 +272,11 @@ bool devtmpfs_add_device(Handle* device, const char* name)
device->stat.st_ino = inode_counter++;
device->stat.st_nlink = 1;

spin_acquire_force(&vfs_lock);
spin_lock(&vfs_lock);
hashmap_insert(&devtmpfs_root->children, name, strlen(name), node);
char path[256];
vfs_get_path(node, path, 256);
spin_free(&vfs_lock);
spin_unlock(&vfs_lock);

return true;
}
4 changes: 2 additions & 2 deletions kernel/fs/fd.c
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ FileDescriptor* fd_from_num(Process* proc, int fd)
if (proc == NULL)
proc = arch_current_cpu()->thread->parent;

spin_acquire_force(&proc->fd_lock);
spin_lock(&proc->fd_lock);

// Check if fd is inside bounds.
if (fd < 0 || fd >= OPEN_MAX)
Expand All @@ -33,6 +33,6 @@ FileDescriptor* fd_from_num(Process* proc, int fd)
result->num_refs++;

leave:
spin_free(&proc->fd_lock);
spin_unlock(&proc->fd_lock);
return result;
}
4 changes: 2 additions & 2 deletions kernel/fs/handle.c
Original file line number Diff line number Diff line change
Expand Up @@ -52,8 +52,8 @@ static SpinLock device_counter_lock = spin_new();

usize handle_new_device()
{
spin_acquire_force(&device_counter_lock);
spin_lock(&device_counter_lock);
usize dev = device_counter++;
spin_free(&device_counter_lock);
spin_unlock(&device_counter_lock);
return dev;
}
8 changes: 4 additions & 4 deletions kernel/fs/tmpfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ static ino_t inode_counter = 0;

static isize tmpfs_handle_read(struct Handle* self, FileDescriptor* fd, void* buffer, usize amount, off_t offset)
{
spin_acquire_force(&self->lock);
spin_lock(&self->lock);

TmpHandle* const handle = (TmpHandle*)self;
isize total_read = amount;
Expand All @@ -32,13 +32,13 @@ static isize tmpfs_handle_read(struct Handle* self, FileDescriptor* fd, void* bu
// Copy all data to the buffer.
memcpy(buffer, handle->buffer + offset, total_read);

spin_free(&self->lock);
spin_unlock(&self->lock);
return total_read;
}

static isize tmpfs_handle_write(struct Handle* self, FileDescriptor* fd, const void* buffer, usize amount, off_t offset)
{
spin_acquire_force(&self->lock);
spin_lock(&self->lock);

TmpHandle* const handle = (TmpHandle*)self;

Expand Down Expand Up @@ -74,7 +74,7 @@ static isize tmpfs_handle_write(struct Handle* self, FileDescriptor* fd, const v
written = amount;

fail:
spin_free(&self->lock);
spin_unlock(&self->lock);
return written;
}

Expand Down
Loading

0 comments on commit 770cc6f

Please sign in to comment.