From 33009a6f6b7d272886fd5556a4a39174fb6e341c Mon Sep 17 00:00:00 2001 From: Robot Date: Mon, 12 Aug 2024 15:28:09 +0200 Subject: [PATCH] Scheduler re-write to support multiple CPUs (#78) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Rebase sched onto main * a * FixĂ© scheduler 2.0 * Update config script, make scheduler own the stacks * Check for FileNotFound exception --- Makefile | 2 +- files/CMakeLists.txt | 2 +- kernel/Makefile | 1 - kernel/cpu/riscv/include/cpu/isr.h | 10 +- kernel/cpu/riscv/include/cpu/isr_ctx.h | 4 + kernel/cpu/riscv/src/backtrace.c | 8 +- kernel/cpu/riscv/src/entrypoint.S | 13 - kernel/cpu/riscv/src/memprotect/riscv_pmp.c | 11 +- kernel/cpu/riscv/src/scheduler.c | 68 +- kernel/include/badgelib/mutex.h | 15 + kernel/include/cpulocal.h | 25 + kernel/include/process/internal.h | 6 +- kernel/include/process/types.h | 36 +- kernel/include/scheduler/cpu.h | 4 +- kernel/include/scheduler/scheduler.h | 186 ++-- kernel/include/scheduler/types.h | 108 ++- kernel/port/esp32c6/src/port.c | 6 + kernel/port/esp32p4/port.mk | 2 +- kernel/port/esp32p4/src/port.c | 9 + kernel/port/generic/gdbinit | 12 +- kernel/port/generic/src/port.c | 5 + kernel/src/badgelib/mutex.c | 82 +- kernel/src/housekeeping.c | 12 +- kernel/src/main.c | 18 +- kernel/src/process/process.c | 77 +- kernel/src/process/sighandler.c | 4 +- kernel/src/scheduler/scheduler.c | 899 +++++++++----------- tools/config.py | 378 +++++--- 28 files changed, 1041 insertions(+), 962 deletions(-) create mode 100644 kernel/include/cpulocal.h diff --git a/Makefile b/Makefile index 5656234..a7d2be5 100644 --- a/Makefile +++ b/Makefile @@ -25,7 +25,7 @@ why2025_defconfig: .PHONY: unmatched_defconfig unmatched_defconfig: - ./tools/config.py --target generic --use-default --vec-spec none + ./tools/config.py --target generic --use-default --vec_spec none .PHONY: build build: diff --git a/files/CMakeLists.txt b/files/CMakeLists.txt index a2c3718..320e0a5 100644 --- a/files/CMakeLists.txt +++ b/files/CMakeLists.txt @@ -36,7 +36,7 @@ set(badge_libs crt badgelib) macro(badgeros_executable exec installdir) add_executable(${exec}) target_compile_options(${exec} PRIVATE ${badge_cflags} -ffunction-sections) - target_link_options(${exec} PRIVATE ${badge_cflags} -Wl,--gc-sections -nostartfiles) + target_link_options(${exec} PRIVATE ${badge_cflags} -pie -Wl,--gc-sections -nostartfiles) target_include_directories(${exec} PRIVATE ${badge_include}) target_link_libraries(${exec} PRIVATE ${badge_libs}) install(TARGETS ${exec} RUNTIME DESTINATION ${installdir}) diff --git a/kernel/Makefile b/kernel/Makefile index 0fe1a30..e99dcb6 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -5,7 +5,6 @@ CONFIG_PATH ?= ../.config/config.mk include $(CONFIG_PATH) MAKEFLAGS += --silent -IDF_PATH ?= $(shell pwd)/../esp-idf SHELL := /usr/bin/env bash OUTPUT ?= $(shell pwd)/firmware BUILDDIR ?= build diff --git a/kernel/cpu/riscv/include/cpu/isr.h b/kernel/cpu/riscv/include/cpu/isr.h index 76953e7..1c6fd40 100644 --- a/kernel/cpu/riscv/include/cpu/isr.h +++ b/kernel/cpu/riscv/include/cpu/isr.h @@ -50,9 +50,15 @@ static inline bool isr_global_disable() { static inline void isr_global_enable() { asm volatile("csrs " CSR_STATUS_STR ", %0" ::"r"((1U << CSR_STATUS_IE_BIT))); } -// Explicit context switch from M-mode. +// Explicit context switch from kernel. // Interrupts must be disabled on entry and will be re-enabled on exit. // If the context switch target is not set, this is a NOP. -extern void isr_context_switch(); +extern void isr_context_switch(); +// Pause the CPU briefly. +static inline void isr_pause() { + // RISC-V Zihintpause instruction. + // This is a fence with PRED=W and SUCC=none. + asm(".word 0x0100000f"); +} #endif diff --git a/kernel/cpu/riscv/include/cpu/isr_ctx.h b/kernel/cpu/riscv/include/cpu/isr_ctx.h index 2ed9eeb..4d88c0b 100644 --- a/kernel/cpu/riscv/include/cpu/isr_ctx.h +++ b/kernel/cpu/riscv/include/cpu/isr_ctx.h @@ -6,6 +6,8 @@ #include "cpu/regs.h" #ifndef __ASSEMBLER__ +#include "cpulocal.h" +#include "log.h" #include "memprotect.h" #include @@ -76,6 +78,8 @@ STRUCT_FIELD_WORD(isr_ctx_t, flags, 42) STRUCT_FIELD_STRUCT(isr_ctx_t, isr_noexc_cb_t, noexc_cb, 43) // Cookie for custom trap handler. STRUCT_FIELD_PTR(isr_ctx_t, void, noexc_cookie, 44) +// Pointer to CPU-local struct. +STRUCT_FIELD_PTR(isr_ctx_t, cpulocal_t, cpulocal, 45) STRUCT_END(isr_ctx_t) // `isr_ctx_t` flag: Is a kernel thread. diff --git a/kernel/cpu/riscv/src/backtrace.c b/kernel/cpu/riscv/src/backtrace.c index 837c839..62ff509 100644 --- a/kernel/cpu/riscv/src/backtrace.c +++ b/kernel/cpu/riscv/src/backtrace.c @@ -41,13 +41,13 @@ void backtrace() NAKED; #if __riscv_xlen == 64 void backtrace() { asm volatile("addi sp, sp, -16"); - asm volatile("sw ra, 8(sp)"); - asm volatile("sw s0, 0(sp)"); + asm volatile("sd ra, 8(sp)"); + asm volatile("sd s0, 0(sp)"); asm volatile("addi s0, sp, 16"); asm volatile("mv a0, s0"); asm volatile("jal backtrace_from_ptr"); - asm volatile("lw ra, 8(sp)"); - asm volatile("lw s0, 0(sp)"); + asm volatile("ld ra, 8(sp)"); + asm volatile("ld s0, 0(sp)"); asm volatile("addi sp, sp, 16"); asm volatile("ret"); } diff --git a/kernel/cpu/riscv/src/entrypoint.S b/kernel/cpu/riscv/src/entrypoint.S index 8d85cdd..d9fc99e 100644 --- a/kernel/cpu/riscv/src/entrypoint.S +++ b/kernel/cpu/riscv/src/entrypoint.S @@ -18,18 +18,6 @@ - # Reserve stack. - .section ".bss" - .align 4 - .global __stack_top - .global __stack_bottom - .global __stack_size - .equ __stack_size, 8192 - .lcomm __stack_bottom, __stack_size - .equ __stack_top, __stack_bottom + __stack_size - - - # Entrypoint from the bootloader. .text .align 2 @@ -42,7 +30,6 @@ _start: la gp, __global_pointer$ .option pop mv tp, x0 - la sp, __stack_top # Zero out .bss section. #ifndef CONFIG_TARGET_generic diff --git a/kernel/cpu/riscv/src/memprotect/riscv_pmp.c b/kernel/cpu/riscv/src/memprotect/riscv_pmp.c index 4b23d62..d40a2c2 100644 --- a/kernel/cpu/riscv/src/memprotect/riscv_pmp.c +++ b/kernel/cpu/riscv/src/memprotect/riscv_pmp.c @@ -8,6 +8,7 @@ #include "log.h" #include "memprotect.h" #include "port/hardware_allocation.h" +#include "scheduler/types.h" // PMP granularity. static size_t grain; @@ -532,6 +533,14 @@ void riscv_pmp_memprotect_swap(riscv_pmp_ctx_t *ctx) { void memprotect_swap_from_isr() { isr_ctx_t *ctx = isr_ctx_get(); if (!(ctx->flags & ISR_CTX_FLAG_KERNEL)) { + if (!ctx->mpu_ctx) { + logkf_from_isr(LOG_FATAL, "User ISR context 0x%{size;x} has no MPU context", ctx); + sched_thread_t *thread = ctx->thread; + if (thread) { + logkf_from_isr(LOG_DEBUG, "Thread #%{d} '%{cs}'", thread->id, thread->name); + } + panic_abort(); + } assert_dev_drop(ctx->mpu_ctx); riscv_pmp_memprotect_swap(ctx->mpu_ctx); } @@ -539,5 +548,5 @@ void memprotect_swap_from_isr() { // Swap in memory protections for a given context. void memprotect_swap(mpu_ctx_t *mpu) { - (void)mpu; + riscv_pmp_memprotect_swap(mpu); } diff --git a/kernel/cpu/riscv/src/scheduler.c b/kernel/cpu/riscv/src/scheduler.c index 895ebd2..1f1bf87 100644 --- a/kernel/cpu/riscv/src/scheduler.c +++ b/kernel/cpu/riscv/src/scheduler.c @@ -12,6 +12,9 @@ #include "process/types.h" #include "scheduler/cpu.h" #include "scheduler/isr.h" +#if MEMMAP_VMEM +#include "cpu/mmu.h" +#endif @@ -47,10 +50,10 @@ void sched_raise_from_isr(sched_thread_t *thread, bool syscall, void *entry_poin // Requests the scheduler to prepare a switch from kernel to userland for a user thread. // Resumes the userland thread where it left off. void sched_lower_from_isr() { - sched_thread_t *thread = sched_get_current_thread_unsafe(); + sched_thread_t *thread = sched_current_thread_unsafe(); process_t *process = thread->process; assert_dev_drop(!(thread->flags & THREAD_KERNEL) && (thread->flags & THREAD_PRIVILEGED)); - thread->flags &= ~THREAD_PRIVILEGED; + atomic_fetch_and(&thread->flags, ~THREAD_PRIVILEGED); // Set context switch target to user thread. isr_ctx_switch_set(&thread->user_isr_ctx); @@ -58,7 +61,6 @@ void sched_lower_from_isr() { if (atomic_load(&process->flags) & PROC_EXITING) { // Request a context switch to a different thread. - thread->flags &= ~THREAD_RUNNING; sched_request_switch_from_isr(); } } @@ -66,14 +68,14 @@ void sched_lower_from_isr() { // Check whether the current thread is in a signal handler. // Returns signal number, or 0 if not in a signal handler. bool sched_is_sighandler() { - sched_thread_t *thread = sched_get_current_thread(); - return thread->flags & THREAD_SIGHANDLER; + sched_thread_t *thread = sched_current_thread(); + return atomic_load(&thread->flags) & THREAD_SIGHANDLER; } // Enters a signal handler in the current thread. // Returns false if there isn't enough resources to do so. bool sched_signal_enter(size_t handler_vaddr, size_t return_vaddr, int signum) { - sched_thread_t *thread = sched_get_current_thread(); + sched_thread_t *thread = sched_current_thread(); // Ensure the user has enough stack. size_t usp = thread->user_isr_ctx.regs.sp; @@ -85,7 +87,9 @@ bool sched_signal_enter(size_t handler_vaddr, size_t return_vaddr, int signum) { thread->user_isr_ctx.regs.sp -= usize; // Save context to user's stack. - // TODO: Enable SUM bit for S-mode kernel. +#if MEMMAP_VMEM + mmu_enable_sum(); +#endif size_t *stackptr = (size_t *)thread->user_isr_ctx.regs.sp; stackptr[0] = thread->user_isr_ctx.regs.t0; stackptr[1] = thread->user_isr_ctx.regs.t1; @@ -105,7 +109,9 @@ bool sched_signal_enter(size_t handler_vaddr, size_t return_vaddr, int signum) { stackptr[17] = thread->user_isr_ctx.regs.pc; stackptr[18] = thread->user_isr_ctx.regs.s0; stackptr[19] = thread->user_isr_ctx.regs.ra; - // TODO: Disable SUM bit for S-mode kernel. +#if MEMMAP_VMEM + mmu_disable_sum(); +#endif // Set up registers for entering signal handler. thread->user_isr_ctx.regs.s0 = thread->user_isr_ctx.regs.sp + usize; @@ -114,15 +120,17 @@ bool sched_signal_enter(size_t handler_vaddr, size_t return_vaddr, int signum) { thread->user_isr_ctx.regs.a0 = signum; // Successfully entered signal handler. - thread->flags |= THREAD_SIGHANDLER; + atomic_fetch_or(&thread->flags, THREAD_SIGHANDLER); return true; } // Exits a signal handler in the current thread. // Returns false if the process cannot be resumed. bool sched_signal_exit() { - sched_thread_t *thread = sched_get_current_thread_unsafe(); - thread->flags &= ~THREAD_SIGHANDLER; + sched_thread_t *thread = sched_current_thread_unsafe(); + if (!(atomic_fetch_and(&thread->flags, ~THREAD_SIGHANDLER) & THREAD_SIGHANDLER)) { + return false; + } // Ensure the user still has the stack. size_t usp = thread->user_isr_ctx.regs.sp; @@ -132,8 +140,10 @@ bool sched_signal_exit() { return false; } - // Restore user's state. - // TODO: Enable SUM bit for S-mode kernel. +// Restore user's state. +#if MEMMAP_VMEM + mmu_enable_sum(); +#endif size_t *stackptr = (size_t *)thread->user_isr_ctx.regs.sp; thread->user_isr_ctx.regs.t0 = stackptr[0]; thread->user_isr_ctx.regs.t1 = stackptr[1]; @@ -153,7 +163,9 @@ bool sched_signal_exit() { thread->user_isr_ctx.regs.pc = stackptr[17]; thread->user_isr_ctx.regs.s0 = stackptr[18]; thread->user_isr_ctx.regs.ra = stackptr[19]; - // TODO: Disable SUM bit for S-mode kernel. +#if MEMMAP_VMEM + mmu_disable_sum(); +#endif // Restore user's stack pointer. thread->user_isr_ctx.regs.sp += usize; @@ -163,35 +175,43 @@ bool sched_signal_exit() { } // Return to exit the thread. -static void sched_exit_self() { +static void sched_exit_self(int code) { #ifndef NDEBUG - sched_thread_t *const this_thread = sched_get_current_thread(); - logkf(LOG_INFO, "Kernel thread '%{cs}' returned", sched_get_name(this_thread)); + sched_thread_t *const thread = sched_current_thread(); + logkf(LOG_DEBUG, "Kernel thread '%{cs}' returned %{d}", thread->name, code); #endif - sched_exit(0); + thread_exit(code); } // Prepares a context to be invoked as a kernel thread. -void sched_prepare_kernel_entry(sched_thread_t *thread, sched_entry_point_t entry_point, void *arg) { +void sched_prepare_kernel_entry(sched_thread_t *thread, void *entry_point, void *arg) { // Initialize registers. mem_set(&thread->kernel_isr_ctx.regs, 0, sizeof(thread->kernel_isr_ctx.regs)); thread->kernel_isr_ctx.regs.pc = (size_t)entry_point; thread->kernel_isr_ctx.regs.sp = thread->kernel_stack_top; thread->kernel_isr_ctx.regs.a0 = (size_t)arg; thread->kernel_isr_ctx.regs.ra = (size_t)sched_exit_self; - asm("mv %0, gp" : "=r"(thread->kernel_isr_ctx.regs.gp)); +#if __riscv_xlen == 64 + asm("sd gp, %0" ::"m"(thread->kernel_isr_ctx.regs.gp)); +#else + asm("sw gp, %0" ::"m"(thread->kernel_isr_ctx.regs.gp)); +#endif } // Prepares a pair of contexts to be invoked as a userland thread. // Kernel-side in these threads is always started by an ISR and the entry point is given at that time. -void sched_prepare_user_entry(sched_thread_t *thread, sched_entry_point_t entry_point, void *arg) { +void sched_prepare_user_entry(sched_thread_t *thread, size_t entry_point, size_t arg) { // Initialize kernel registers. mem_set(&thread->kernel_isr_ctx.regs, 0, sizeof(thread->kernel_isr_ctx.regs)); thread->kernel_isr_ctx.regs.sp = thread->kernel_stack_top; - asm("mv %0, gp" : "=r"(thread->kernel_isr_ctx.regs.gp)); +#if __riscv_xlen == 64 + asm("sd gp, %0" ::"m"(thread->kernel_isr_ctx.regs.gp)); +#else + asm("sw gp, %0" ::"m"(thread->kernel_isr_ctx.regs.gp)); +#endif // Initialize userland registers. mem_set(&thread->user_isr_ctx.regs, 0, sizeof(thread->user_isr_ctx.regs)); - thread->user_isr_ctx.regs.pc = (size_t)entry_point; - thread->user_isr_ctx.regs.a0 = (size_t)arg; + thread->user_isr_ctx.regs.pc = entry_point; + thread->user_isr_ctx.regs.a0 = arg; } diff --git a/kernel/include/badgelib/mutex.h b/kernel/include/badgelib/mutex.h index 7564c21..b8b71dc 100644 --- a/kernel/include/badgelib/mutex.h +++ b/kernel/include/badgelib/mutex.h @@ -40,6 +40,14 @@ bool mutex_acquire(badge_err_t *ec, mutex_t *mutex, timestamp_us_t max_wait_us); // Release `mutex`, if it was initially acquired by this thread. // Returns true if the mutex was successfully released. bool mutex_release(badge_err_t *ec, mutex_t *mutex); +// Try to acquire `mutex` within `max_wait_us` microseconds. +// If `max_wait_us` is too long or negative, do not use the timeout. +// Returns true if the mutex was successully acquired. +bool mutex_acquire_from_isr(badge_err_t *ec, mutex_t *mutex, timestamp_us_t max_wait_us); +// Release `mutex`, if it was initially acquired by this thread. +// Returns true if the mutex was successfully released. +bool mutex_release_from_isr(badge_err_t *ec, mutex_t *mutex); + // Try to acquire a share in `mutex` within `max_wait_us` microseconds. // If `max_wait_us` is too long or negative, do not use the timeout. // Returns true if the share was successfully acquired. @@ -47,3 +55,10 @@ bool mutex_acquire_shared(badge_err_t *ec, mutex_t *mutex, timestamp_us_t max_wa // Release `mutex`, if it was initially acquired by this thread. // Returns true if the mutex was successfully released. bool mutex_release_shared(badge_err_t *ec, mutex_t *mutex); +// Try to acquire a share in `mutex` within `max_wait_us` microseconds. +// If `max_wait_us` is too long or negative, do not use the timeout. +// Returns true if the share was successfully acquired. +bool mutex_acquire_shared_from_isr(badge_err_t *ec, mutex_t *mutex, timestamp_us_t max_wait_us); +// Release `mutex`, if it was initially acquired by this thread. +// Returns true if the mutex was successfully released. +bool mutex_release_shared_from_isr(badge_err_t *ec, mutex_t *mutex); diff --git a/kernel/include/cpulocal.h b/kernel/include/cpulocal.h new file mode 100644 index 0000000..5e524dd --- /dev/null +++ b/kernel/include/cpulocal.h @@ -0,0 +1,25 @@ + +// SPDX-License-Identifier: MIT + +#pragma once + +#include "scheduler/scheduler.h" + +#include + + + +// CPU-local data. +typedef struct { + // Current CPU ID. + size_t cpuid; + // ISR stack top. + size_t isr_stack_top; + // ISR stack bottom. + size_t isr_stack_bottom; + // CPU-local scheduler data. + sched_cpulocal_t *sched; +} cpulocal_t; + +// Per-CPU CPU-local data. +extern cpulocal_t *cpulocal; diff --git a/kernel/include/process/internal.h b/kernel/include/process/internal.h index e925f2d..0a461da 100644 --- a/kernel/include/process/internal.h +++ b/kernel/include/process/internal.h @@ -17,7 +17,7 @@ void proc_exit_self(int code); process_t *proc_get_unsafe(pid_t pid); // Suspend all threads for a process except the current. -void proc_suspend(process_t *process, sched_thread_t *current); +void proc_suspend(process_t *process, tid_t current); // Resume all threads for a process. void proc_resume(process_t *process); // Release all process runtime resources (threads, memory, files, etc.). @@ -38,9 +38,7 @@ void proc_start_raw(badge_err_t *ec, process_t *process); // Create a new thread in a process. // Returns created thread handle. -sched_thread_t *proc_create_thread_raw_unsafe( - badge_err_t *ec, process_t *process, sched_entry_point_t entry_point, void *arg, sched_prio_t priority -); +tid_t proc_create_thread_raw(badge_err_t *ec, process_t *process, size_t entry_point, size_t arg, int priority); // Delete a thread in a process. void proc_delete_thread_raw_unsafe(badge_err_t *ec, process_t *process, sched_thread_t *thread); // Allocate more memory to a process. diff --git a/kernel/include/process/types.h b/kernel/include/process/types.h index 9cc9876..e1cc80f 100644 --- a/kernel/include/process/types.h +++ b/kernel/include/process/types.h @@ -71,40 +71,40 @@ typedef int pid_t; // A process and all of its resources. typedef struct process_t { // Node for child process list. - dlist_node_t node; + dlist_node_t node; // Parent process, NULL for process 1. - process_t *parent; + process_t *parent; // Process binary. - char const *binary; + char const *binary; // Number of arguments. - int argc; + int argc; // Value of arguments. - char **argv; + char **argv; // Size required to store all of argv. - size_t argv_size; + size_t argv_size; // Number of file descriptors. - size_t fds_len; + size_t fds_len; // File descriptors. - proc_fd_t *fds; + proc_fd_t *fds; // Number of threads. - size_t threads_len; + size_t threads_len; // Thread handles. - sched_thread_t **threads; + tid_t *threads; // Process ID. - pid_t pid; + pid_t pid; // Memory map information. - proc_memmap_t memmap; + proc_memmap_t memmap; // Resource mutex used for multithreading processes. - mutex_t mtx; + mutex_t mtx; // Process status flags. - atomic_int flags; + atomic_int flags; // Pending signals list. - dlist_t sigpending; + dlist_t sigpending; // Child process list. - dlist_t children; + dlist_t children; // Signal handler virtual addresses. // First index is for signal handler returns. - size_t sighandlers[SIG_COUNT]; + size_t sighandlers[SIG_COUNT]; // Exit code if applicable. - int state_code; + int state_code; } process_t; diff --git a/kernel/include/scheduler/cpu.h b/kernel/include/scheduler/cpu.h index a81f6fa..9a9988d 100644 --- a/kernel/include/scheduler/cpu.h +++ b/kernel/include/scheduler/cpu.h @@ -34,8 +34,8 @@ bool sched_signal_enter(size_t handler_vaddr, size_t return_vaddr, int signum); bool sched_signal_exit(); // Prepares a context to be invoked as a kernel thread. -void sched_prepare_kernel_entry(sched_thread_t *thread, sched_entry_point_t entry_point, void *arg); +void sched_prepare_kernel_entry(sched_thread_t *thread, void *entry_point, void *arg); // Prepares a pair of contexts to be invoked as a userland thread. // Kernel-side in these threads is always started by an ISR and the entry point is given at that time. -void sched_prepare_user_entry(sched_thread_t *thread, sched_entry_point_t entry_point, void *arg); +void sched_prepare_user_entry(sched_thread_t *thread, size_t entry_point, size_t arg); diff --git a/kernel/include/scheduler/scheduler.h b/kernel/include/scheduler/scheduler.h index 4696919..ab8ea80 100644 --- a/kernel/include/scheduler/scheduler.h +++ b/kernel/include/scheduler/scheduler.h @@ -10,156 +10,66 @@ #include #include -typedef struct process_t process_t; - +// Processs struct. +typedef struct process_t process_t; // Globally unique thread ID. -typedef int tid_t; - +typedef int tid_t; +// Thread struct. typedef struct sched_thread_t sched_thread_t; +// Kernel thread entrypoint. +typedef int (*sched_entry_t)(void *arg); +// CPU-local scheduler data. +typedef struct sched_cpulocal_t sched_cpulocal_t; -typedef void (*sched_entry_point_t)(void *arg); +// will be scheduled with smaller time slices than normal +#define SCHED_PRIO_LOW 0 +// default value +#define SCHED_PRIO_NORMAL 10 +// will be scheduled with bigger time slices than normal +#define SCHED_PRIO_HIGH 20 -typedef enum { - // will be scheduled with smaller time slices than normal - SCHED_PRIO_LOW = 0, - // default value - SCHED_PRIO_NORMAL = 10, - // will be scheduled with bigger time slices than normal - SCHED_PRIO_HIGH = 20, -} sched_prio_t; -// Initializes the scheduler and setups up the system to be ready to -// create threads and execute them. -void sched_init(); -// Create a thread for the current code path and start the schedulder on this core. +// Global scheduler initialization. +void sched_init(); +// Start executing the scheduler on this CPU. void sched_exec() NORETURN; +// Exit the scheduler and subsequenty shut down the CPU. +void sched_exit(int cpu); -// Creates a new suspended userland thread. -// -// Userland threads have no initial stack pointer set, this must be done by -// `entry_point` in the userland application itself. -// -// - `process` is the process that is associated with this thread. -// - `entry_point` is the function the thread will execute. -// - `arg` is passed to `entry_point` upon start. -// - `priority` defines how much time the thread gets in regards to all other -// threads. Higher priority threads will have -// a higher time contigent as others. -// -// Returns a handle to the thread or NULL if the thread could not be created. -// -// Potential errors: -// - `ECAUSE_NOMEM` is issued when the thread could not be allocated. -sched_thread_t *sched_create_userland_thread( - badge_err_t *ec, - process_t *process, - sched_entry_point_t entry_point, - void *arg, - void *stack_bottom, - size_t stack_size, - sched_prio_t priority +// Create a new suspended userland thread. +// If `kernel_stack_bottom` is NULL, the scheduler will allocate a stack. +tid_t thread_new_user( + badge_err_t *ec, char const *name, process_t *process, size_t user_entrypoint, size_t user_arg, int priority ); - -// Creates a new suspended kernel thread. -// -// - `process` is the process that is associated with this thread. -// - `entry_point` is the function the thread will execute. -// - `arg` is passed to `entry_point` upon start. -// - `stack_bottom` is a pointer to the stack space for this thread. Pointer -// must be aligned to `STACK_ALIGNMENT` and -// must not be `NULL`. -// - `stack_size` is the number of bytes available from `stack_bottom` on. Must -// be a multiple of `STACK_ALIGNMENT`. -// - `priority` defines how much time the thread gets in regards to all other -// threads. Higher priority threads will have -// a higher time contigent as others. -// -// Returns a handle to the thread or NULL if the thread could not be created. -// -// Potential errors: -// - `ECAUSE_NOMEM` is issued when the thread could not be allocated. -sched_thread_t *sched_create_kernel_thread( - badge_err_t *ec, - sched_entry_point_t entry_point, - void *arg, - void *stack_bottom, - size_t stack_size, - sched_prio_t priority -); - -// Kills the given thread and releases all scheduler resources allocated by the -// operating system associated with this thread. -// -// NOTE: If `thread` is the current thread, this function is equivalent of -// detaching the thread and then calling `sched_exit(0)`. -// -// NOTE: This does only destroys scheduler-related resources, but not other -// kernel resources. -void sched_destroy_thread(badge_err_t *ec, sched_thread_t *thread); - -// Detaches the thread. This means that when the thread stops by returning from -// `entry_point`, the thread is automatically destroyed. -void sched_detach_thread(badge_err_t *ec, sched_thread_t *thread); - -// Halts the thread and prevents it from being scheduled again. -// This effect can be undone with `sched_resume_thread`. -// -// If the thread is already suspended, nothing will happen. -// -// If `thread` is NULL, the current thread will be suspended and `sched_yield()` -// is invoked implicitly. It will return with success after the thread will be -// resumed again. -// -// Potential errors: -// - `ECAUSE_ILLEGAL` is issued when the thread has finished. -void sched_suspend_thread(badge_err_t *ec, sched_thread_t *thread); - +// Create new suspended kernel thread. +// If `stack_bottom` is NULL, the scheduler will allocate a stack. +tid_t thread_new_kernel(badge_err_t *ec, char const *name, sched_entry_t entry_point, void *arg, int priority); +// Do not wait for thread to be joined; clean up immediately. +void thread_detach(badge_err_t *ec, tid_t thread); + +// Pauses execution of the thread. +void thread_suspend(badge_err_t *ec, tid_t thread); // Resumes a previously suspended thread or starts it. -// After that, the thread will be scheduled in a regular manner again. -// -// If the thread is already running, nothing will happen. -// -// Potential errors: -// - `ECAUSE_ILLEGAL` is issued when the thread has finished. -void sched_resume_thread(badge_err_t *ec, sched_thread_t *thread); - +void thread_resume(badge_err_t *ec, tid_t thread); // Resumes a previously suspended thread or starts it. -// Will move the thread to the start of the wait queue and will make it the -// next thread executed. -// -// NOTE: When `thread` is the current thread, this function will do nothing. -// -// Potential errors: -// - `ECAUSE_ILLEGAL` is issued when the thread has finished. -void sched_resume_thread_next(badge_err_t *ec, sched_thread_t *thread); - +// Immediately schedules the thread instead of putting it in the queue first. +void thread_resume_now(badge_err_t *ec, tid_t thread); // Returns whether a thread is running; it is neither suspended nor has it exited. -bool sched_thread_is_running(badge_err_t *ec, sched_thread_t *thread); - -// Returns the currently active thread or NULL if the scheduler isn't running. -sched_thread_t *sched_get_current_thread(void); - -// Returns the associated process for a given thread. -process_t *sched_get_associated_process(sched_thread_t const *thread); +bool thread_is_running(badge_err_t *ec, tid_t thread); -// thread self-control interface: +// Returns the current thread ID. +tid_t sched_current_tid(); +// Returns the current thread struct. +sched_thread_t *sched_current_thread(); +// Returns the associated thread struct. +sched_thread_t *sched_get_thread(tid_t thread); -// Announces that all work is done for now and the scheduler can now -// schedule other threads. -// -// NOTE: It's illegal to invoke this function outside a thread context! +// Explicitly yield to the scheduler; the scheduler may run other threads without waiting for preemption. +// Use this function to reduce the CPU time used by a thread. void sched_yield(void); - -// Exits the current thread and notifies the scheduler that it must not be -// scheduled anymore, as all work is done. -// -// NOTE: It's illegal to invoke this function outside a thread context! -void sched_exit(uint32_t exit_code) NORETURN; - - -// Debug: Set thread name shown in logs. -void sched_set_name(badge_err_t *ec, sched_thread_t *thread, char const *name); - -// Debug: Get thread name shown in logs. -char const *sched_get_name(sched_thread_t *thread); +// Exits the current thread. +// If the thread is detached, resources will be cleaned up. +void thread_exit(int code) NORETURN; +// Wait for another thread to exit. +void thread_join(tid_t thread); diff --git a/kernel/include/scheduler/types.h b/kernel/include/scheduler/types.h index b581477..3336ee7 100644 --- a/kernel/include/scheduler/types.h +++ b/kernel/include/scheduler/types.h @@ -10,69 +10,91 @@ #include "process/process.h" #include "scheduler.h" +#include #include #include #include -enum { - // The minimum time a thread will run. `SCHED_PRIO_LOW` maps to this. - SCHEDULER_MIN_TASK_TIME_US = 5000, // 5ms - // The time quota increment per increased priority. - SCHEDULER_TIME_QUOTA_INCR_US = 500, // 0.5ms * priority - // Quota for the idle task. This can be pretty high as the idle task - // will only run when nothing else runs. - // 1 second is a good measure, idle task will always be interrupted by other - // means. - SCHEDULER_IDLE_TASK_QUOTA_US = 1000000, - // Defines how many threads are available in the kernel. - // TODO: Replace this constant with a dynamically configurable allocator! - SCHEDULER_MAX_THREADS = 16, - // Debug: Maximum length of a thread's name. - SCHED_THREAD_NAME_LEN = 32, -}; +// The minimum time a thread will run. `SCHED_PRIO_LOW` maps to this. +#define SCHEDULER_MIN_TASK_TIME_US 5000 // 5ms +// The time quota increment per increased priority. +#define SCHEDULER_TIME_QUOTA_INCR_US 500 // 0.5ms * priority -enum { - // The thread is currently in the scheduling queues - THREAD_RUNNING = (1 << 0), - // The thread has finished and is waiting for destruction - THREAD_COMPLETED = (1 << 1), - // The thread is detached and will self-destroy after exit - THREAD_DETACHED = (1 << 2), - // The thread is a kernel thread. - THREAD_KERNEL = (1 << 3), - // The thread is a kernel thread or a user thread running in kernel mode. - THREAD_PRIVILEGED = (1 << 4), - // The user thread is running a signal handler. - THREAD_SIGHANDLER = (1 << 5), -}; + +// The thread is currently in the scheduling queues. +#define THREAD_RUNNING (1 << 0) +// The thread has finished and is waiting for destruction. +#define THREAD_EXITING (1 << 1) +// The thread is detached or has been joined. +#define THREAD_DETACHED (1 << 2) +// The thread is a kernel thread. +#define THREAD_KERNEL (1 << 3) +// The thread is a kernel thread or a user thread running in kernel mode. +#define THREAD_PRIVILEGED (1 << 4) +// The user thread is running a signal handler. +#define THREAD_SIGHANDLER (1 << 5) +// The thread should be added to the front of the queue. +#define THREAD_STARTNOW (1 << 6) +// The thread should be suspended. +#define THREAD_SUSPENDING (1 << 7) +// The thread has exited and is awaiting join. +#define THREAD_EXITED (1 << 8) + +// The scheduler is running on this CPU. +#define SCHED_RUNNING (1 << 0) +// The scheduler is pending exit on this CPU. +#define SCHED_EXITING (1 << 1) + +// Thread struct. struct sched_thread_t { + // Thread queue link. + dlist_node_t node; + // Process to which this thread belongs. - process_t *process; + process_t *process; // Lowest address of the kernel stack. - size_t kernel_stack_bottom; + size_t kernel_stack_bottom; // Highest address of the kernel stack. - size_t kernel_stack_top; + size_t kernel_stack_top; // Priority of this thread. - sched_prio_t priority; + int priority; - // dynamic info: - uint32_t flags; - dlist_node_t schedule_node; - uint32_t exit_code; + // Thread flags. + atomic_int flags; + // Exit code from `thread_exit` + int exit_code; // ISR context for threads running in kernel mode. isr_ctx_t kernel_isr_ctx; // ISR context for userland thread running in user mode. isr_ctx_t user_isr_ctx; -#ifndef NDEBUG - // Name for debug printing. - char name[SCHED_THREAD_NAME_LEN]; -#endif + // Thread ID. + tid_t id; + // Thread name. + char *name; +}; + +// CPU-local scheduler data. +struct sched_cpulocal_t { + // Scheduler start/stop mutex. + mutex_t run_mtx; + // Incoming threads list mutex. + mutex_t incoming_mtx; + // Threads pending handover to this CPU. + dlist_t incoming; + // CPU-local thread queue. + dlist_t queue; + // CPU-local scheduler state flags. + atomic_int flags; + // CPU load estimate in 0.01% increments. + atomic_int load; + // Idle thread. + sched_thread_t idle_thread; }; // Returns the current thread without using a critical section. -sched_thread_t *sched_get_current_thread_unsafe(); +sched_thread_t *sched_current_thread_unsafe(); diff --git a/kernel/port/esp32c6/src/port.c b/kernel/port/esp32c6/src/port.c index 46f36f8..e8ca77f 100644 --- a/kernel/port/esp32c6/src/port.c +++ b/kernel/port/esp32c6/src/port.c @@ -3,7 +3,9 @@ #include "port/port.h" +#include "cpulocal.h" #include "interrupt.h" +#include "isr_ctx.h" #include "port/clkconfig.h" #include "port/hardware_allocation.h" #include "port/pmu_init.h" @@ -15,9 +17,13 @@ #include +cpulocal_t port_cpu_local; + + // Early hardware initialization. void port_early_init() { + isr_ctx_get()->cpulocal = &port_cpu_local; // Initialise PMU. pmu_init(); // Power up UART. diff --git a/kernel/port/esp32p4/port.mk b/kernel/port/esp32p4/port.mk index c44e49d..530dcc8 100644 --- a/kernel/port/esp32p4/port.mk +++ b/kernel/port/esp32p4/port.mk @@ -7,7 +7,7 @@ openocd: .PHONY: flash flash: build - ../.venv/bin/python -m esptool -b 921600 --port "$(PORT)" \ + ../.venv/bin/python -m esptool -b 921600 --port "$(PORT)" --no-stub \ write_flash --flash_mode dio --flash_freq 80m --flash_size 2MB \ 0x2000 port/esp32p4/bootloader.bin \ 0x10000 "$(OUTPUT)/badger-os.bin" \ diff --git a/kernel/port/esp32p4/src/port.c b/kernel/port/esp32p4/src/port.c index 4710df4..48bb885 100644 --- a/kernel/port/esp32p4/src/port.c +++ b/kernel/port/esp32p4/src/port.c @@ -3,7 +3,9 @@ #include "port/port.h" +#include "cpulocal.h" #include "interrupt.h" +#include "isr_ctx.h" #include "log.h" #include "port/pmu_init.h" #include "rom/cache.h" @@ -11,10 +13,17 @@ #include "soc/interrupts.h" #include "soc/uart_struct.h" +// CPU0 local data. +cpulocal_t port_cpu0_local; +// CPU1 local data. +cpulocal_t port_cpu1_local; + // Early hardware initialization. void port_early_init() { + // Set CPU-local data pointer. + isr_ctx_get()->cpulocal = &port_cpu0_local; // Initialize PMU. pmu_init(); } diff --git a/kernel/port/generic/gdbinit b/kernel/port/generic/gdbinit index 002792b..7aa3c11 100644 --- a/kernel/port/generic/gdbinit +++ b/kernel/port/generic/gdbinit @@ -1,3 +1,11 @@ file firmware/badger-os.elf -b basic_runtime_init -target remote :1234 \ No newline at end of file +target remote :1234 + +define reset + mon system_reset + maintenance flush register-cache + thb basic_runtime_init + c +end + +reset diff --git a/kernel/port/generic/src/port.c b/kernel/port/generic/src/port.c index 52ad1d1..1759555 100644 --- a/kernel/port/generic/src/port.c +++ b/kernel/port/generic/src/port.c @@ -6,6 +6,7 @@ #include "assertions.h" #include "cpu/mmu.h" #include "cpu/panic.h" +#include "isr_ctx.h" #include "limine.h" #include "memprotect.h" #include "port/hardware_allocation.h" @@ -50,9 +51,13 @@ __attribute__((section(".requests_end"))) LIMINE_REQUESTS_END_MARKER; // Memory map entry selected to be early alloc pool. static size_t early_alloc_index; +// CPU0 local data. +cpulocal_t port_cpu0_local; + // Early hardware initialization. void port_early_init() { rawprint("\033[0m\033[2J"); + isr_ctx_get()->cpulocal = &port_cpu0_local; // Verify needed requests have been answered. if (!mm_req.response) { diff --git a/kernel/src/badgelib/mutex.c b/kernel/src/badgelib/mutex.c index afa46df..3c0e86f 100644 --- a/kernel/src/badgelib/mutex.c +++ b/kernel/src/badgelib/mutex.c @@ -4,6 +4,7 @@ #include "mutex.h" #include "assertions.h" +#include "cpu/isr.h" #include "log.h" #include "scheduler/scheduler.h" #include "time.h" @@ -14,12 +15,15 @@ // Atomically await the expected value and swap in the new value. -static bool - await_swap_atomic_int(atomic_int *var, timestamp_us_t timeout, int expected, int new_value, memory_order order) { +static inline bool await_swap_atomic_int( + atomic_int *var, timestamp_us_t timeout, int expected, int new_value, memory_order order, bool from_isr +) { do { int old_value = expected; if (atomic_compare_exchange_weak_explicit(var, &old_value, new_value, order, memory_order_relaxed)) { return true; + } else if (from_isr) { + isr_pause(); } else { sched_yield(); } @@ -28,13 +32,16 @@ static bool } // Atomically check the value does not exceed a threshold and add 1. -static bool thresh_add_atomic_int(atomic_int *var, timestamp_us_t timeout, int threshold, memory_order order) { +static inline bool + thresh_add_atomic_int(atomic_int *var, timestamp_us_t timeout, int threshold, memory_order order, bool from_isr) { do { int old_value = atomic_load(var); int new_value = old_value + 1; if (!(old_value >= threshold || new_value >= threshold) && atomic_compare_exchange_weak_explicit(var, &old_value, new_value, order, memory_order_relaxed)) { return true; + } else if (from_isr) { + isr_pause(); } else { sched_yield(); } @@ -43,13 +50,16 @@ static bool thresh_add_atomic_int(atomic_int *var, timestamp_us_t timeout, int t } // Atomically check the value doesn't equal either illegal values and subtract 1. -static bool unequal_sub_atomic_int(atomic_int *var, int unequal0, int unequal1, memory_order order) { +static inline bool + unequal_sub_atomic_int(atomic_int *var, int unequal0, int unequal1, memory_order order, bool from_isr) { while (1) { int old_value = atomic_load(var); int new_value = old_value - 1; if (!(old_value == unequal0 || old_value == unequal1) && atomic_compare_exchange_weak_explicit(var, &old_value, new_value, order, memory_order_relaxed)) { return true; + } else if (from_isr) { + isr_pause(); } else { sched_yield(); } @@ -94,9 +104,10 @@ void mutex_destroy(badge_err_t *ec, mutex_t *mutex) { atomic_thread_fence(memory_order_release); } + // Try to acquire `mutex` within `timeout` microseconds. // Returns true if the mutex was successully acquired. -bool mutex_acquire(badge_err_t *ec, mutex_t *mutex, timestamp_us_t timeout) { +static bool mutex_acquire_impl(badge_err_t *ec, mutex_t *mutex, timestamp_us_t timeout, bool from_isr) { if (atomic_load_explicit(&mutex->magic, memory_order_acquire) != MUTEX_MAGIC) { badge_err_set(ec, ELOC_UNKNOWN, ECAUSE_ILLEGAL); return false; @@ -109,7 +120,7 @@ bool mutex_acquire(badge_err_t *ec, mutex_t *mutex, timestamp_us_t timeout) { timeout += now; } // Await the shared portion to reach 0 and then lock. - if (await_swap_atomic_int(&mutex->shares, timeout, 0, EXCLUSIVE_MAGIC, memory_order_acquire)) { + if (await_swap_atomic_int(&mutex->shares, timeout, 0, EXCLUSIVE_MAGIC, memory_order_acquire, from_isr)) { // If that succeeds, the mutex was acquired. badge_err_set_ok(ec); return true; @@ -122,13 +133,13 @@ bool mutex_acquire(badge_err_t *ec, mutex_t *mutex, timestamp_us_t timeout) { // Release `mutex`, if it was initially acquired by this thread. // Returns true if the mutex was successfully released. -bool mutex_release(badge_err_t *ec, mutex_t *mutex) { +static bool mutex_release_impl(badge_err_t *ec, mutex_t *mutex, bool from_isr) { if (atomic_load_explicit(&mutex->magic, memory_order_acquire) != MUTEX_MAGIC) { badge_err_set(ec, ELOC_UNKNOWN, ECAUSE_ILLEGAL); return false; } assert_dev_drop(atomic_load(&mutex->shares) >= EXCLUSIVE_MAGIC); - if (await_swap_atomic_int(&mutex->shares, TIMESTAMP_US_MAX, EXCLUSIVE_MAGIC, 0, memory_order_release)) { + if (await_swap_atomic_int(&mutex->shares, TIMESTAMP_US_MAX, EXCLUSIVE_MAGIC, 0, memory_order_release, from_isr)) { // Successful release. badge_err_set_ok(ec); return true; @@ -141,7 +152,7 @@ bool mutex_release(badge_err_t *ec, mutex_t *mutex) { // Try to acquire a share in `mutex` within `timeout` microseconds. // Returns true if the share was successfully acquired. -bool mutex_acquire_shared(badge_err_t *ec, mutex_t *mutex, timestamp_us_t timeout) { +static bool mutex_acquire_shared_impl(badge_err_t *ec, mutex_t *mutex, timestamp_us_t timeout, bool from_isr) { if (atomic_load_explicit(&mutex->magic, memory_order_acquire) != MUTEX_MAGIC) { badge_err_set(ec, ELOC_UNKNOWN, ECAUSE_ILLEGAL); return false; @@ -158,7 +169,7 @@ bool mutex_acquire_shared(badge_err_t *ec, mutex_t *mutex, timestamp_us_t timeou timeout += now; } // Take a share. - if (thresh_add_atomic_int(&mutex->shares, timeout, EXCLUSIVE_MAGIC, memory_order_acquire)) { + if (thresh_add_atomic_int(&mutex->shares, timeout, EXCLUSIVE_MAGIC, memory_order_acquire, from_isr)) { // If that succeeds, the mutex was successfully acquired. badge_err_set_ok(ec); return true; @@ -171,13 +182,13 @@ bool mutex_acquire_shared(badge_err_t *ec, mutex_t *mutex, timestamp_us_t timeou // Release `mutex`, if it was initially acquired by this thread. // Returns true if the mutex was successfully released. -bool mutex_release_shared(badge_err_t *ec, mutex_t *mutex) { +static bool mutex_release_shared_impl(badge_err_t *ec, mutex_t *mutex, bool from_isr) { if (atomic_load_explicit(&mutex->magic, memory_order_acquire) != MUTEX_MAGIC) { badge_err_set(ec, ELOC_UNKNOWN, ECAUSE_ILLEGAL); return false; } assert_dev_drop(atomic_load(&mutex->shares) < EXCLUSIVE_MAGIC); - if (!unequal_sub_atomic_int(&mutex->shares, 0, EXCLUSIVE_MAGIC, memory_order_release)) { + if (!unequal_sub_atomic_int(&mutex->shares, 0, EXCLUSIVE_MAGIC, memory_order_release, from_isr)) { // Prevent the counter from underflowing. badge_err_set(ec, ELOC_UNKNOWN, ECAUSE_ILLEGAL); return false; @@ -187,3 +198,50 @@ bool mutex_release_shared(badge_err_t *ec, mutex_t *mutex) { return true; } } + + +// Try to acquire `mutex` within `max_wait_us` microseconds. +// If `max_wait_us` is too long or negative, do not use the timeout. +// Returns true if the mutex was successully acquired. +bool mutex_acquire(badge_err_t *ec, mutex_t *mutex, timestamp_us_t max_wait_us) { + return mutex_acquire_impl(ec, mutex, max_wait_us, false); +} +// Release `mutex`, if it was initially acquired by this thread. +// Returns true if the mutex was successfully released. +bool mutex_release(badge_err_t *ec, mutex_t *mutex) { + return mutex_release_impl(ec, mutex, false); +} +// Try to acquire `mutex` within `max_wait_us` microseconds. +// If `max_wait_us` is too long or negative, do not use the timeout. +// Returns true if the mutex was successully acquired. +bool mutex_acquire_from_isr(badge_err_t *ec, mutex_t *mutex, timestamp_us_t max_wait_us) { + return mutex_acquire_impl(ec, mutex, max_wait_us, true); +} +// Release `mutex`, if it was initially acquired by this thread. +// Returns true if the mutex was successfully released. +bool mutex_release_from_isr(badge_err_t *ec, mutex_t *mutex) { + return mutex_release_impl(ec, mutex, true); +} + +// Try to acquire a share in `mutex` within `max_wait_us` microseconds. +// If `max_wait_us` is too long or negative, do not use the timeout. +// Returns true if the share was successfully acquired. +bool mutex_acquire_shared(badge_err_t *ec, mutex_t *mutex, timestamp_us_t max_wait_us) { + return mutex_acquire_shared_impl(ec, mutex, max_wait_us, false); +} +// Release `mutex`, if it was initially acquired by this thread. +// Returns true if the mutex was successfully released. +bool mutex_release_shared(badge_err_t *ec, mutex_t *mutex) { + return mutex_release_shared_impl(ec, mutex, false); +} +// Try to acquire a share in `mutex` within `max_wait_us` microseconds. +// If `max_wait_us` is too long or negative, do not use the timeout. +// Returns true if the share was successfully acquired. +bool mutex_acquire_shared_from_isr(badge_err_t *ec, mutex_t *mutex, timestamp_us_t max_wait_us) { + return mutex_acquire_shared_impl(ec, mutex, max_wait_us, true); +} +// Release `mutex`, if it was initially acquired by this thread. +// Returns true if the mutex was successfully released. +bool mutex_release_shared_from_isr(badge_err_t *ec, mutex_t *mutex) { + return mutex_release_shared_impl(ec, mutex, true); +} diff --git a/kernel/src/housekeeping.c b/kernel/src/housekeeping.c index a7e5c72..c5ab14c 100644 --- a/kernel/src/housekeeping.c +++ b/kernel/src/housekeeping.c @@ -48,15 +48,13 @@ int hk_task_time_cmp(void const *a, void const *b) { -// Stack for the housekeeping thread. -static uint8_t hk_stack[8192] ALIGNED_TO(16); // The housekeeping thread handle. -static sched_thread_t *hk_thread; +static tid_t hk_thread; // Task mutex. -static mutex_t hk_mtx = MUTEX_T_INIT; +static mutex_t hk_mtx = MUTEX_T_INIT; // Runs housekeeping tasks. -void hk_thread_func(void *ignored) { +int hk_thread_func(void *ignored) { (void)ignored; while (1) { @@ -89,9 +87,9 @@ void hk_thread_func(void *ignored) { // Initialize the housekeeping system. void hk_init() { badge_err_t ec; - hk_thread = sched_create_kernel_thread(&ec, hk_thread_func, NULL, hk_stack, sizeof(hk_stack), SCHED_PRIO_NORMAL); + hk_thread = thread_new_kernel(&ec, "housekeeping", hk_thread_func, NULL, SCHED_PRIO_NORMAL); badge_err_assert_always(&ec); - sched_resume_thread(&ec, hk_thread); + thread_resume(&ec, hk_thread); badge_err_assert_always(&ec); } diff --git a/kernel/src/main.c b/kernel/src/main.c index e8fa16c..826c847 100644 --- a/kernel/src/main.c +++ b/kernel/src/main.c @@ -21,9 +21,6 @@ -// The initial kernel stack. -extern char stack_bottom[] asm("__stack_bottom"); -extern char stack_top[] asm("__stack_top"); // When set, a shutdown is initiated. // 0: Do nothing. // 1: Shut down (default). @@ -100,21 +97,16 @@ void basic_runtime_init() { // Kernel memory allocator initialization. kernel_heap_init(); - // Scheduler initialization. + // Global scheduler initialization. sched_init(); + // Housekeeping thread initialization. hk_init(); // Add the remainder of the kernel lifetime as a new thread. - sched_thread_t *thread = sched_create_kernel_thread( - &ec, - kernel_lifetime_func, - NULL, - stack_bottom, - stack_top - stack_bottom, - SCHED_PRIO_NORMAL - ); + tid_t thread = thread_new_kernel(&ec, "main", (void *)kernel_lifetime_func, NULL, SCHED_PRIO_NORMAL); badge_err_assert_always(&ec); - sched_resume_thread(&ec, thread); + thread_resume(&ec, thread); + thread_resume(&ec, thread); badge_err_assert_always(&ec); // Start the scheduler and enter the next phase in the kernel's lifetime. diff --git a/kernel/src/process/process.c b/kernel/src/process/process.c index 79b8ab0..e715ef6 100644 --- a/kernel/src/process/process.c +++ b/kernel/src/process/process.c @@ -88,7 +88,7 @@ static void clean_up_from_housekeeping(int taskno, void *arg) { // Kill a process from one of its own threads. void proc_exit_self(int code) { // Mark this process as exiting. - sched_thread_t *thread = sched_get_current_thread(); + sched_thread_t *thread = sched_current_thread(); process_t *process = thread->process; mutex_acquire(NULL, &process->mtx, TIMESTAMP_US_MAX); atomic_fetch_or(&process->flags, PROC_EXITING); @@ -216,7 +216,7 @@ uint32_t proc_getflags_raw(process_t *process) { // Get a handle to the current process, if any. process_t *proc_current() { - return sched_get_current_thread()->process; + return sched_current_thread()->process; } // Get the PID of the current process, if any. @@ -296,13 +296,7 @@ void proc_start_raw(badge_err_t *ec, process_t *process) { } // Create the process' main thread. - sched_thread_t *thread = proc_create_thread_raw_unsafe( - ec, - process, - (sched_entry_point_t)kbelf_dyn_entrypoint(dyn), - NULL, - SCHED_PRIO_NORMAL - ); + tid_t thread = proc_create_thread_raw(ec, process, (size_t)kbelf_dyn_entrypoint(dyn), 0, SCHED_PRIO_NORMAL); if (!thread) { kbelf_dyn_unload(dyn); kbelf_dyn_destroy(dyn); @@ -311,7 +305,7 @@ void proc_start_raw(badge_err_t *ec, process_t *process) { } port_fencei(); atomic_store(&process->flags, PROC_RUNNING); - sched_resume_thread(ec, thread); + thread_resume(ec, thread); mutex_release(NULL, &process->mtx); kbelf_dyn_destroy(dyn); logkf(LOG_INFO, "Process %{d} started", process->pid); @@ -320,40 +314,31 @@ void proc_start_raw(badge_err_t *ec, process_t *process) { // Create a new thread in a process. // Returns created thread handle. -sched_thread_t *proc_create_thread_raw_unsafe( - badge_err_t *ec, process_t *process, sched_entry_point_t entry_point, void *arg, sched_prio_t priority -) { +tid_t proc_create_thread_raw(badge_err_t *ec, process_t *process, size_t entry_point, size_t arg, int priority) { // Create an entry for a new thread. - void *mem = realloc(process->threads, sizeof(sched_thread_t *) * (process->threads_len + 1)); + void *mem = realloc(process->threads, sizeof(tid_t) * (process->threads_len + 1)); if (!mem) { badge_err_set(ec, ELOC_PROCESS, ECAUSE_NOMEM); - return NULL; + return 0; } process->threads = mem; - // TODO: Use a proper allocator for the kernel stack? - size_t const kstack_size = 8192; - void *kstack = malloc(kstack_size); - if (!kstack) { - badge_err_set(ec, ELOC_PROCESS, ECAUSE_NOMEM); - return NULL; - } - // Create a thread. - sched_thread_t *thread = sched_create_userland_thread(ec, process, entry_point, arg, kstack, kstack_size, priority); - if (!thread) { - free(kstack); - return NULL; + tid_t tid = thread_new_user(ec, NULL, process, entry_point, arg, priority); + if (!tid) { + return 0; } + sched_thread_t *thread = sched_get_thread(tid); + thread->user_isr_ctx.mpu_ctx = &process->memmap.mpu_ctx; thread->kernel_isr_ctx.mpu_ctx = &process->memmap.mpu_ctx; // Add the thread to the list. - array_insert(process->threads, sizeof(sched_thread_t *), process->threads_len, &thread, process->threads_len); + array_insert(process->threads, sizeof(tid_t), process->threads_len, &tid, process->threads_len); process->threads_len++; // logkf(LOG_DEBUG, "Creating user thread, PC: 0x%{size;x}", entry_point); - return thread; + return tid; } @@ -439,11 +424,11 @@ void proc_raise_signal_raw(badge_err_t *ec, process_t *process, int signum) { // Suspend all threads for a process except the current. -void proc_suspend(process_t *process, sched_thread_t *current) { +void proc_suspend(process_t *process, tid_t current) { mutex_acquire(NULL, &process->mtx, TIMESTAMP_US_MAX); for (size_t i = 0; i < process->threads_len; i++) { if (process->threads[i] != current) { - sched_suspend_thread(NULL, process->threads[i]); + thread_suspend(NULL, process->threads[i]); } } mutex_release(NULL, &process->mtx); @@ -453,7 +438,7 @@ void proc_suspend(process_t *process, sched_thread_t *current) { void proc_resume(process_t *process) { mutex_acquire(NULL, &process->mtx, TIMESTAMP_US_MAX); for (size_t i = 0; i < process->threads_len; i++) { - sched_resume_thread(NULL, process->threads[i]); + thread_resume(NULL, process->threads[i]); } mutex_release(NULL, &process->mtx); } @@ -463,7 +448,7 @@ void proc_resume(process_t *process) { void proc_delete_runtime_raw(process_t *process) { // This may not be run from one of the process' threads because it kills all of them. for (size_t i = 0; i < process->threads_len; i++) { - assert_dev_drop(sched_get_current_thread() != process->threads[i]); + assert_dev_drop(sched_current_tid() != process->threads[i]); } if (process->pid == 1 && !allow_proc1_death()) { @@ -486,18 +471,14 @@ void proc_delete_runtime_raw(process_t *process) { // Wait for the scheduler to suspend all the threads. for (size_t i = 0; i < process->threads_len; i++) { - sched_resume_thread(NULL, process->threads[i]); - } - bool waiting = true; - while (waiting) { - waiting = false; - for (size_t i = 0; i < process->threads_len; i++) { - if (sched_thread_is_running(NULL, process->threads[i])) { - waiting = true; - } - } - sched_yield(); + thread_resume(NULL, process->threads[i]); + } + // Destroy all threads. + for (size_t i = 0; i < process->threads_len; i++) { + thread_join(process->threads[i]); } + process->threads_len = 0; + free(process->threads); // Adopt all children to init. if (process->pid != 1) { @@ -506,14 +487,6 @@ void proc_delete_runtime_raw(process_t *process) { dlist_concat(&init->children, &process->children); } - // Destroy all threads. - for (size_t i = 0; i < process->threads_len; i++) { - free((void *)process->threads[i]->kernel_stack_bottom); - sched_destroy_thread(NULL, process->threads[i]); - } - process->threads_len = 0; - free(process->threads); - // Unmap all memory regions. while (process->memmap.regions_len) { #if MEMMAP_VMEM diff --git a/kernel/src/process/sighandler.c b/kernel/src/process/sighandler.c index 354a409..d51f099 100644 --- a/kernel/src/process/sighandler.c +++ b/kernel/src/process/sighandler.c @@ -50,7 +50,7 @@ static inline void memmap_info(process_t *const proc, size_t vaddr) { // Runs the appropriate handler for a signal. static void run_sighandler(int signum, uint64_t cause) { - sched_thread_t *thread = sched_get_current_thread_unsafe(); + sched_thread_t *thread = sched_current_thread_unsafe(); process_t *const proc = thread->process; // Check for signal handler. if (proc->sighandlers[signum] == SIG_DFL) { @@ -103,7 +103,7 @@ void proc_signal_handler() { // If the thread is already running a signal handler, the process is killed. static void trap_signal_handler(int signum, uint64_t cause) NORETURN; static void trap_signal_handler(int signum, uint64_t cause) { - sched_thread_t *thread = sched_get_current_thread_unsafe(); + sched_thread_t *thread = sched_current_thread_unsafe(); process_t *const proc = thread->process; int current = sched_is_sighandler(); if (current) { diff --git a/kernel/src/scheduler/scheduler.c b/kernel/src/scheduler/scheduler.c index a08b168..e363ba6 100644 --- a/kernel/src/scheduler/scheduler.c +++ b/kernel/src/scheduler/scheduler.c @@ -3,606 +3,511 @@ #include "scheduler/scheduler.h" +#include "arrays.h" #include "assertions.h" -#include "attributes.h" #include "badge_strings.h" +#include "config.h" #include "cpu/isr.h" +#include "housekeeping.h" +#include "interrupt.h" #include "isr_ctx.h" -#include "list.h" -#include "meta.h" -#include "port/hardware_allocation.h" -#include "port/interrupt.h" -#include "process/internal.h" -#include "process/process.h" +#include "malloc.h" #include "scheduler/cpu.h" #include "scheduler/isr.h" #include "scheduler/types.h" -#include "syscall.h" -#include "time.h" -#include -/// Returns non-0 value if `V` is aligned to `A`. -#define is_aligned(V, A) (((V) & ((A) - 1)) == 0) - -#define is_flag_set(C, F) (((C) & (F)) != 0) -#define reset_flag(C, F) ((C) &= ~(F)) -#define set_flag(C, F) ((C) |= (F)) - - -static_assert((STACK_ALIGNMENT & (STACK_ALIGNMENT - 1)) == 0, "STACK_ALIGNMENT must be a power of two!"); - -typedef enum thread_insert_position_t { - INSERT_THREAD_BACK, - INSERT_THREAD_FRONT, -} thread_insert_position_t; - - -// List of currently queued threads. `head` will be queued next, `tail` will be -// queued last. -static dlist_t thread_wait_queue = DLIST_EMPTY; - -enum { - // Size of the - IDLE_TASK_STACK_LEN = 128 -}; -static uint8_t idle_thread_stack[IDLE_TASK_STACK_LEN] ALIGNED_TO(STACK_ALIGNMENT); - -static_assert( - is_aligned(IDLE_TASK_STACK_LEN, STACK_ALIGNMENT), "IDLE_TASK_STACK_LEN must be aligned to STACK_ALIGNMENT!" -); - -// The scheduler must schedule something, and the idle task is what -// the scheduler will schedule when nothing can be scheduled. -static sched_thread_t idle_thread = { - .kernel_stack_bottom = (size_t)&idle_thread_stack, - .kernel_stack_top = (size_t)&idle_thread_stack + IDLE_TASK_STACK_LEN, - .flags = THREAD_KERNEL | THREAD_PRIVILEGED, - .kernel_isr_ctx = {.thread = &idle_thread, .flags = ISR_CTX_FLAG_KERNEL}, - -#ifndef NDEBUG - .name = "idle", -#endif -}; - -// Variable for safety. Is set to `true` once `sched_exec()` was called. -static bool scheduler_enabled = false; - -// We need to have a flag for the first task switch, so don't suspend a -// non-existing task. -static bool scheduler_bootstrapped = false; - -// Stores the time when the next interrupt routine should come along. -// This is then incremented gradually to keep the system running at a steady -// pace. -static timestamp_us_t next_isr_invocation_time; - - -enum { - // A sentinel value stored in thread->flags to mark the thread as - // non-allocated. - // This is just a safety measure! - THREAD_ALLOCATOR_SENTINEL = 0xBADC0DEUL, -}; - -// Backing store for the thread allocator. -static sched_thread_t thread_alloc_pool_storage[SCHEDULER_MAX_THREADS]; - -// Linked list of all available, non-allocated threads. -static dlist_t thread_alloc_pool = DLIST_EMPTY; - - -// Sanity check for critical sections -static bool critical_section_active = false; - -// Stores whether a critical section had interrupts enabled before or not. -static bool critical_section_had_interrupts = false; - -// Enters a scheduler-local critical section that cannot be interrupted from the -// scheduler itself. Call `leave_critical_section` after the critical section -// has ended. -// -// During a critical section, no thread switches can occurr. -static void enter_critical_section(void) { - assert_dev_drop(!critical_section_active); - critical_section_had_interrupts = isr_global_disable(); - critical_section_active = true; +// TODO: Replace this dummy `smp_cur_cpu()` when `generic` branch is merged. +int smp_cur_cpu() { + return 0; +} +// TODO: Replace this dummy `smp_count` when `generic` branch is merged. +static int smp_count = 1; + +// CPU-local scheduler structs. +static sched_cpulocal_t *cpu_ctx; +// Threads list mutex. +static mutex_t threads_mtx = MUTEX_T_INIT_SHARED; +// Number of threads that exist. +static size_t threads_len; +// Capacity for thread list. +static size_t threads_cap; +// Array of all threads that exist. +static sched_thread_t **threads; +// Thread ID counter. +static atomic_int tid_counter = 1; +// Unused thread pool mutex. +static mutex_t unused_mtx = MUTEX_T_INIT; +// Pool of unused thread handles. +static dlist_t dead_threads; + +// Compare the ID of `sched_thread_t *` to an `int`. +static int tid_int_cmp(void const *a, void const *b) { + sched_thread_t *thread = *(sched_thread_t **)a; + tid_t tid = (tid_t)(ptrdiff_t)b; + return thread->id - tid; } -static void leave_critical_section(void) { - assert_dev_drop(critical_section_active); - if (critical_section_had_interrupts) { - isr_global_enable(); - } - critical_section_active = false; +// Find a thread by TID. +static sched_thread_t *find_thread(tid_t tid) { + array_binsearch_t res = array_binsearch(threads, sizeof(void *), threads_len, (void *)(ptrdiff_t)tid, tid_int_cmp); + return res.found ? threads[res.index] : NULL; } -// Allocates a new thread. Release memory with `thread_free` again. -static sched_thread_t *thread_alloc(void) { - dlist_node_t *const node = dlist_pop_front(&thread_alloc_pool); - if (node == NULL) { - // out of memory - return NULL; +// Idle function ran when a CPU has no threads. +static void idle_func(void *arg) { + (void)arg; + while (1) { + isr_pause(); + sched_yield(); } +} - sched_thread_t *const thread = field_parent_ptr(sched_thread_t, schedule_node, node); - - assert_dev_drop(thread->flags == THREAD_ALLOCATOR_SENTINEL); - -#ifndef NDEBUG - // for debug sessions, fill the thread with bogus data, so we can be sure - // we initialized everything properly: - mem_set(thread, 0xAA, sizeof(sched_thread_t)); -#endif +// Set the context switch to a certain thread. +static inline void set_switch(sched_thread_t *thread) { + int flags = atomic_load(&thread->flags); + isr_ctx_t *next = (flags & THREAD_PRIVILEGED) ? &thread->kernel_isr_ctx : &thread->user_isr_ctx; + next->cpulocal = isr_ctx_get()->cpulocal; + isr_ctx_switch_set(next); +} - return thread; +// Try to hand a thread off to another CPU. +static bool thread_handoff(sched_thread_t *thread, int cpu, bool force) { + sched_cpulocal_t *info = cpu_ctx + cpu; + assert_dev_keep(mutex_acquire_shared_from_isr(NULL, &info->run_mtx, TIMESTAMP_US_MAX)); + int flags = atomic_load(&info->flags); + if (force || ((flags & SCHED_RUNNING) && !(flags & SCHED_EXITING))) { + assert_dev_keep(mutex_acquire_from_isr(NULL, &info->incoming_mtx, TIMESTAMP_US_MAX)); + dlist_append(&info->incoming, &thread->node); + assert_dev_keep(mutex_release_from_isr(NULL, &info->incoming_mtx)); + } + assert_dev_keep(mutex_release_shared_from_isr(NULL, &info->run_mtx)); + return (flags & SCHED_RUNNING) && !(flags & SCHED_EXITING); } -// Frees a thread allocated with `thread_alloc`. -static void thread_free(sched_thread_t *thread) { +// Requests the scheduler to prepare a switch from inside an interrupt routine. +void sched_request_switch_from_isr() { + int cur_cpu = smp_cur_cpu(); + sched_cpulocal_t *info = cpu_ctx + cur_cpu; + + // Check the exiting flag. + if (atomic_load(&info->flags) & SCHED_EXITING) { + // Exit the scheduler on this CPU. + assert_dev_keep(mutex_acquire_from_isr(NULL, &info->run_mtx, TIMESTAMP_US_MAX)); + atomic_fetch_and(&info->flags, ~(SCHED_RUNNING | SCHED_EXITING)); + + // Hand all threads over to other CPUs. + int cpu = 0; + dlist_concat(&info->queue, &info->incoming); + while (info->queue.len) { + sched_thread_t *thread = (void *)dlist_pop_front(&info->queue); + do { + cpu = (cpu + 1) % smp_count; + } while (cpu == cur_cpu || !thread_handoff(thread, cpu, false)); + } + assert_dev_keep(mutex_release_from_isr(NULL, &info->run_mtx)); -#ifndef NDEBUG - // Set the thread memory to bogus data to make sure use-after-free is - // more likely to be detected - mem_set(thread, 0xAA, sizeof(sched_thread_t)); -#endif + // TODO: Power off this CPU. + while (1) asm("wfi"); + } - // Store our sentinel and reset the schedule_node: - thread->flags = THREAD_ALLOCATOR_SENTINEL; - thread->schedule_node = DLIST_NODE_EMPTY; + // Check for incoming threads. + assert_dev_keep(mutex_acquire_from_isr(NULL, &info->incoming_mtx, TIMESTAMP_US_MAX)); + while (info->incoming.len) { + sched_thread_t *thread = (void *)dlist_pop_front(&info->incoming); + assert_dev_drop(atomic_load(&thread->flags) & THREAD_RUNNING); + if (atomic_load(&thread->flags) & THREAD_STARTNOW) { + dlist_prepend(&info->queue, &thread->node); + } else { + dlist_append(&info->queue, &thread->node); + } + } + assert_dev_keep(mutex_release_from_isr(NULL, &info->incoming_mtx)); + + // Check for runnable threads. + while (info->queue.len) { + // Take the first thread. + sched_thread_t *thread = (void *)dlist_pop_front(&info->queue); + int flags = atomic_load(&thread->flags); + if (thread->process && (atomic_load(&thread->process->flags) & PROC_EXITING)) { + if ((flags & THREAD_PRIVILEGED) && (flags & THREAD_RUNNING)) { + // Kernel code still running; let it finish. + dlist_append(&info->queue, &thread->node); + set_switch(thread); + return; + } else { + // Process exiting; suspend thread. + atomic_fetch_and(&thread->flags, ~(THREAD_RUNNING | THREAD_SUSPENDING)); + } + } else if (flags & THREAD_EXITING) { + // Clean up thread. + assert_dev_keep(mutex_acquire_from_isr(NULL, &unused_mtx, TIMESTAMP_US_MAX)); + dlist_append(&dead_threads, &thread->node); + assert_dev_keep(mutex_release_from_isr(NULL, &unused_mtx)); + } else if (!(flags & THREAD_PRIVILEGED) && (flags & THREAD_SUSPENDING)) { + // Suspend thread. + atomic_fetch_and(&thread->flags, ~(THREAD_RUNNING | THREAD_SUSPENDING)); + } else { + // Perform context switch. + assert_dev_drop(flags & THREAD_RUNNING); + dlist_append(&info->queue, &thread->node); + // logkf_from_isr(LOG_DEBUG, "Switch to thread #%{d} '%{cs}' @0x%{size;x}", thread->id, thread->name, + // thread); + set_switch(thread); + return; + } + } - // Push the thread back into the pool so `thread_alloc` can find it again - dlist_append(&thread_alloc_pool, &thread->schedule_node); + // If nothing is running on this CPU, run the idle thread. + // logk_from_isr(LOG_DEBUG, "Switch to idle"); + set_switch(&info->idle_thread); } -static void idle_thread_function(void *arg) { +// Scheduler housekeeping. +static void sched_housekeeping(int taskno, void *arg) { + (void)taskno; (void)arg; - while (true) { - // make the loop not be undefined behaviour - asm volatile("" ::: "memory"); - - // TODO: Enable CPU sleeping here to save unnecessary power usage + assert_dev_keep(mutex_acquire(NULL, &threads_mtx, TIMESTAMP_US_MAX)); + + // Get list of dead threads. + irq_enable(false); + assert_dev_keep(mutex_acquire_from_isr(NULL, &unused_mtx, TIMESTAMP_US_MAX)); + dlist_t tmp = DLIST_EMPTY; + sched_thread_t *node = (void *)dead_threads.head; + while (node) { + void *next = (void *)node->node.next; + if (atomic_load(&node->flags) & THREAD_DETACHED) { + dlist_remove(&dead_threads, &node->node); + dlist_append(&tmp, &node->node); + } + node = next; } -} - -// Returns the current thread without using a critical section. -sched_thread_t *sched_get_current_thread_unsafe() { - return isr_ctx_get()->thread; -} - -// Destroys a thread and releases its resources. -static void destroy_thread(sched_thread_t *thread) { - assert_dev_drop(thread != NULL); - - if (is_flag_set(thread->flags, THREAD_RUNNING)) { - // thread is still running, we have to remove it from the thread queue: - sched_suspend_thread(NULL, thread); + assert_dev_keep(mutex_release_from_isr(NULL, &unused_mtx)); + irq_enable(true); + + // Clean up all dead threads. + while (tmp.len) { + sched_thread_t *thread = (void *)dlist_pop_front(&tmp); + free((void *)thread->kernel_stack_bottom); + if (thread->name) { + free(thread->name); + } + array_binsearch_t res = + array_binsearch(threads, sizeof(void *), threads_len, (void *)(ptrdiff_t)thread->id, tid_int_cmp); + assert_dev_drop(res.found); + array_lencap_remove(&threads, sizeof(void *), &threads_len, &threads_cap, NULL, res.index); } - // At last, we free the memory: - thread_free(thread); + assert_dev_keep(mutex_release(NULL, &threads_mtx)); } -sched_thread_t *sched_get_current_thread(void) { - return isr_ctx_get()->thread; -} -#include "rawprint.h" +// Global scheduler initialization. void sched_init() { - // Set up the idle task: - sched_prepare_kernel_entry(&idle_thread, idle_thread_function, NULL); - - // Initialize thread allocator: - for (size_t i = 0; i < SCHEDULER_MAX_THREADS; i++) { - thread_alloc_pool_storage[i] = (sched_thread_t){ - .flags = THREAD_ALLOCATOR_SENTINEL, - .schedule_node = DLIST_NODE_EMPTY, - }; - dlist_append(&thread_alloc_pool, &thread_alloc_pool_storage[i].schedule_node); + cpu_ctx = malloc(smp_count * sizeof(sched_cpulocal_t)); + assert_always(cpu_ctx); + mem_set(cpu_ctx, 0, smp_count * sizeof(sched_cpulocal_t)); + for (int i = 0; i < smp_count; i++) { + cpu_ctx[i].run_mtx = MUTEX_T_INIT_SHARED; + cpu_ctx[i].incoming_mtx = MUTEX_T_INIT; + void *stack = malloc(8192); + assert_always(stack); + cpu_ctx[i].idle_thread.kernel_stack_bottom = (size_t)stack; + cpu_ctx[i].idle_thread.kernel_stack_top = (size_t)stack + 8192; + cpu_ctx[i].idle_thread.kernel_isr_ctx.flags = ISR_CTX_FLAG_KERNEL; + cpu_ctx[i].idle_thread.flags = THREAD_PRIVILEGED; + sched_prepare_kernel_entry(&cpu_ctx[i].idle_thread, idle_func, NULL); } + hk_add_repeated(0, 1000000, sched_housekeeping, NULL); } +// Start executing the scheduler on this CPU. void sched_exec() { - // Set the first preemption time to now. - next_isr_invocation_time = time_us(); + // Allocate CPU-local scheduler data. + sched_cpulocal_t *info = cpu_ctx + smp_cur_cpu(); + isr_ctx_get()->cpulocal->sched = info; + logkf_from_isr(LOG_DEBUG, "Starting scheduler on CPU%{d}", smp_cur_cpu()); - // Mark the scheduler as enabled. - scheduler_enabled = true; + // Mark as running. + atomic_store_explicit(&info->flags, SCHED_RUNNING, memory_order_release); - // Invoke the first context switch. - isr_global_disable(); + // Start handed over threads or idle until one is handed over to this CPU. + isr_ctx_get()->flags |= ISR_CTX_FLAG_USE_SP; sched_request_switch_from_isr(); isr_context_switch(); - - // The context switch will always happen, so this execution context dies. __builtin_unreachable(); } -#include "log.h" - -void sched_request_switch_from_isr(void) { - if (!scheduler_enabled) { - // only switch tasks when the scheduler is ready to run - return; - } - - // logk(LOG_INFO, "sched_request_switch_from_isr"); - - timestamp_us_t const now = time_us(); - - int64_t const time_quota_left = next_isr_invocation_time - now; - - - if (scheduler_bootstrapped) { - sched_thread_t *const current_thread = sched_get_current_thread_unsafe(); - // logkf( - // LOG_DEBUG, - // "yielding from task '%{cs}', flags=%{u32;x}", - // sched_get_name(current_thread), - // current_thread->flags - // ); - if (current_thread == &idle_thread) { - // logk(LOG_DEBUG, "thread is idle task, do nothing special"); - // Idle task cannot be destroyed, idle task cannot be killed. - - if (time_quota_left > 0) { - // TODO: Implement CPU usage statistics for threads - } +// Exit the scheduler and subsequenty shut down the CPU. +void sched_exit(int cpu) { + assert_dev_keep(mutex_acquire(NULL, &cpu_ctx[cpu].run_mtx, TIMESTAMP_US_MAX)); + atomic_fetch_or_explicit(&cpu_ctx[cpu].flags, SCHED_EXITING, memory_order_relaxed); + assert_dev_keep(mutex_release(NULL, &cpu_ctx[cpu].run_mtx)); +} - } else if (current_thread != NULL) { - if (time_quota_left > 0) { - // Thread is a good boy and didn't use up all of its time in the - // CPU. We should give it some credit here. - // TODO: Implement CPU usage statistics for threads - } +// Create a new suspended userland thread. +tid_t thread_new_user( + badge_err_t *ec, char const *name, process_t *process, size_t user_entrypoint, size_t user_arg, int priority +) { + // Allocate thread. + sched_thread_t *thread = malloc(sizeof(sched_thread_t)); + if (!thread) { + badge_err_set(ec, ELOC_THREADS, ECAUSE_NOMEM); + return 0; + } + mem_set(thread, 0, sizeof(sched_thread_t)); - if (is_flag_set(current_thread->flags, THREAD_RUNNING)) { - // logk(LOG_DEBUG, "thread is still running, put into queue again"); + thread->kernel_stack_bottom = (size_t)malloc(CONFIG_STACK_SIZE); + if (!thread->kernel_stack_bottom) { + free(thread); + badge_err_set(ec, ELOC_THREADS, ECAUSE_NOMEM); + return 0; + } - // if we have a current thread, append it to the wait queue again - // before popping the next task. This is necessary as we if we only - // have a single task, that should be scheduled again. Otheriwse, - // `dlist_pop_front` would return `NULL` instead of - // `current_thread`. - dlist_append(&thread_wait_queue, ¤t_thread->schedule_node); - } else { - // current thread is dead, we don't push it into the scheduler again - - if (is_flag_set(current_thread->flags, THREAD_COMPLETED)) { - if (is_flag_set(current_thread->flags, THREAD_DETACHED)) { - // logk(LOG_DEBUG, "thread is finished+detached, kill"); - destroy_thread(current_thread); - } else { - // logk(LOG_DEBUG, "thread is finished, just stop"); - } - } else { - // logk(LOG_DEBUG, "thread is suspended"); - } - } + if (name) { + size_t name_len = cstr_length(name); + thread->name = malloc(name_len + 1); + if (!thread->name) { + free((void *)thread->kernel_stack_bottom); + free(thread); + badge_err_set(ec, ELOC_THREADS, ECAUSE_NOMEM); + return 0; } - } else { - // First run must never call `sched_get_current_thread_unsafe` as we don't even have - // a thread set already. - // - // The next thread switch ISR must use the regular path tho: - scheduler_bootstrapped = true; - // logk(LOG_DEBUG, "scheduler bootstrapping done..."); + cstr_copy(thread->name, name_len + 1, name); } - // { - // dlist_node_t *iter = thread_wait_queue.head; - // - // logkf(LOG_DEBUG, "queued threads(%{size;x}):", (size_t)&thread_wait_queue); - // while (iter != NULL) { - // - // sched_thread_t *const thread = field_parent_ptr(sched_thread_t, schedule_node, iter); - // - // logkf(LOG_DEBUG, " - %{cs}", sched_get_name(thread)); - // - // iter = iter->next; - // } - // } - - uint32_t task_time_quota = 0; - dlist_node_t *next_thread_node; -pop_thread: - next_thread_node = dlist_pop_front(&thread_wait_queue); - - if (next_thread_node != NULL) { - sched_thread_t *const next_thread = field_parent_ptr(sched_thread_t, schedule_node, next_thread_node); - - // Set the switch target. - if (next_thread->flags & THREAD_PRIVILEGED) { - isr_ctx_switch_set(&next_thread->kernel_isr_ctx); - } else { - if (proc_getflags_raw(next_thread->process) & PROC_EXITING) { - // If a thread's process is exiting, suspend it and get the next one instead. - reset_flag(next_thread->flags, THREAD_RUNNING); - goto pop_thread; - } - isr_ctx_switch_set(&next_thread->user_isr_ctx); - proc_pre_resume_cb(next_thread); + thread->priority = priority; + thread->process = process; + thread->id = atomic_fetch_add(&tid_counter, 1); + thread->kernel_stack_top = thread->kernel_stack_bottom + CONFIG_STACK_SIZE; + thread->kernel_isr_ctx.flags = ISR_CTX_FLAG_KERNEL; + thread->kernel_isr_ctx.thread = thread; + thread->user_isr_ctx.thread = thread; + thread->user_isr_ctx.mpu_ctx = &process->memmap.mpu_ctx; + sched_prepare_user_entry(thread, user_entrypoint, user_arg); + + assert_dev_keep(mutex_acquire(NULL, &threads_mtx, TIMESTAMP_US_MAX)); + bool success = array_lencap_insert(&threads, sizeof(void *), &threads_len, &threads_cap, &thread, threads_len); + assert_dev_keep(mutex_release(NULL, &threads_mtx)); + if (!success) { + if (thread->name) { + free(thread->name); } - - task_time_quota = SCHEDULER_MIN_TASK_TIME_US + (uint32_t)next_thread->priority * SCHEDULER_TIME_QUOTA_INCR_US; - // logkf(LOG_DEBUG, "switch to task '%{cs}'", next_thread->name); - - } else { - // nothing to do, switch to idle task: - - isr_ctx_switch_set(&idle_thread.kernel_isr_ctx); - task_time_quota = SCHEDULER_IDLE_TASK_QUOTA_US; - // logk(LOG_DEBUG, "switch to idle"); + free((void *)thread->kernel_stack_bottom); + free(thread); + badge_err_set(ec, ELOC_THREADS, ECAUSE_NOMEM); + return 0; } - assert_dev_drop(task_time_quota > 0); - next_isr_invocation_time = now + task_time_quota; - time_set_next_task_switch(next_isr_invocation_time); + return thread->id; } -sched_thread_t *sched_create_userland_thread( - badge_err_t *ec, - process_t *process, - sched_entry_point_t entry_point, - void *arg, - void *kernel_stack_bottom, - size_t stack_size, - sched_prio_t priority -) { - size_t const kernel_stack_bottom_addr = (size_t)kernel_stack_bottom; - - assert_dev_drop(process != NULL); - assert_dev_drop(entry_point != NULL); - assert_dev_drop(is_aligned(kernel_stack_bottom_addr, STACK_ALIGNMENT)); - assert_dev_drop(is_aligned(stack_size, STACK_ALIGNMENT)); - - sched_thread_t *const thread = thread_alloc(); - if (thread == NULL) { +// Create new suspended kernel thread. +tid_t thread_new_kernel(badge_err_t *ec, char const *name, sched_entry_t entrypoint, void *arg, int priority) { + // Allocate thread. + sched_thread_t *thread = malloc(sizeof(sched_thread_t)); + if (!thread) { badge_err_set(ec, ELOC_THREADS, ECAUSE_NOMEM); - return NULL; + return 0; } + mem_set(thread, 0, sizeof(sched_thread_t)); - *thread = (sched_thread_t){ - .process = process, - .kernel_stack_bottom = kernel_stack_bottom_addr, - .kernel_stack_top = kernel_stack_bottom_addr + stack_size, - .priority = priority, - .flags = 0, - .schedule_node = DLIST_NODE_EMPTY, - .exit_code = 0, - .kernel_isr_ctx = {.thread = thread, .flags = ISR_CTX_FLAG_KERNEL}, - .user_isr_ctx = {.thread = thread, .flags = 0}, - }; - - sched_prepare_user_entry(thread, entry_point, arg); - - badge_err_set_ok(ec); - return thread; -} - -sched_thread_t *sched_create_kernel_thread( - badge_err_t *const ec, - sched_entry_point_t const entry_point, - void *const arg, - void *const kernel_stack_bottom, - size_t const stack_size, - sched_prio_t const priority -) { - size_t const kernel_stack_bottom_addr = (size_t)kernel_stack_bottom; - - assert_dev_drop(entry_point != NULL); - assert_dev_drop(is_aligned(kernel_stack_bottom_addr, STACK_ALIGNMENT)); - assert_dev_drop(is_aligned(stack_size, STACK_ALIGNMENT)); - - sched_thread_t *const thread = thread_alloc(); - if (thread == NULL) { + thread->kernel_stack_bottom = (size_t)malloc(CONFIG_STACK_SIZE); + if (!thread->kernel_stack_bottom) { + free(thread); badge_err_set(ec, ELOC_THREADS, ECAUSE_NOMEM); - return NULL; + return 0; } - *thread = (sched_thread_t){ - .process = NULL, - .kernel_stack_bottom = kernel_stack_bottom_addr, - .kernel_stack_top = kernel_stack_bottom_addr + stack_size, - .priority = priority, - .flags = THREAD_KERNEL | THREAD_PRIVILEGED, - .schedule_node = DLIST_NODE_EMPTY, - .exit_code = 0, - .kernel_isr_ctx = {.thread = thread, .flags = ISR_CTX_FLAG_KERNEL}, - }; - - sched_prepare_kernel_entry(thread, entry_point, arg); - - badge_err_set_ok(ec); - return thread; -} - -void sched_destroy_thread(badge_err_t *ec, sched_thread_t *thread) { - assert_dev_drop(thread != NULL); - - if (thread == sched_get_current_thread()) { - sched_detach_thread(ec, thread); - if (!badge_err_is_ok(ec)) { - return; + if (name) { + size_t name_len = cstr_length(name); + thread->name = malloc(name_len + 1); + if (!thread->name) { + free((void *)thread->kernel_stack_bottom); + free(thread); + badge_err_set(ec, ELOC_THREADS, ECAUSE_NOMEM); + return 0; } - sched_exit(0); + cstr_copy(thread->name, name_len + 1, name); } - destroy_thread(thread); - badge_err_set_ok(ec); -} - -void sched_detach_thread(badge_err_t *ec, sched_thread_t *thread) { - assert_dev_drop(thread != NULL); - - enter_critical_section(); - - set_flag(thread->flags, THREAD_DETACHED); + thread->priority = priority; + thread->id = atomic_fetch_add(&tid_counter, 1); + thread->kernel_stack_top = thread->kernel_stack_bottom + CONFIG_STACK_SIZE; + thread->kernel_isr_ctx.flags = ISR_CTX_FLAG_KERNEL; + thread->kernel_isr_ctx.thread = thread; + thread->flags |= THREAD_PRIVILEGED | THREAD_KERNEL; + sched_prepare_kernel_entry(thread, entrypoint, arg); + + assert_dev_keep(mutex_acquire(NULL, &threads_mtx, TIMESTAMP_US_MAX)); + bool success = array_lencap_insert(&threads, sizeof(void *), &threads_len, &threads_cap, &thread, threads_len); + assert_dev_keep(mutex_release(NULL, &threads_mtx)); + if (!success) { + if (thread->name) { + free(thread->name); + } + free((void *)thread->kernel_stack_bottom); + free(thread); + badge_err_set(ec, ELOC_THREADS, ECAUSE_NOMEM); + return 0; + } - leave_critical_section(); + logkf(LOG_DEBUG, "Kernel thread #%{d} '%{cs}' @0x%{size;x} created", thread->id, thread->name, thread); badge_err_set_ok(ec); + return thread->id; } -void sched_suspend_thread(badge_err_t *const ec, sched_thread_t *const thread) { - assert_dev_drop(thread != NULL); - - enter_critical_section(); - - if (is_flag_set(thread->flags, THREAD_COMPLETED)) { - - // we cannot suspend the thread when it's already completed: - badge_err_set(ec, ELOC_THREADS, ECAUSE_ILLEGAL); - - } else if (thread == sched_get_current_thread_unsafe()) { - - // Thread currently active. Remove the running flag, and yield. - // The function will return after resumption of the thread. - reset_flag(thread->flags, THREAD_RUNNING); - badge_err_set_ok(ec); - - leave_critical_section(); - sched_yield(); - return; - - } else if (is_flag_set(thread->flags, THREAD_RUNNING)) { - - // thread is currently queued for running. drop it from the queue - // and remove the running flag. - reset_flag(thread->flags, THREAD_RUNNING); - dlist_remove(&thread_wait_queue, &thread->schedule_node); +// Do not wait for thread to be joined; clean up immediately. +void thread_detach(badge_err_t *ec, tid_t tid) { + assert_always(mutex_acquire_shared(NULL, &threads_mtx, TIMESTAMP_US_MAX)); + sched_thread_t *thread = find_thread(tid); + if (thread) { + atomic_fetch_or(&thread->flags, THREAD_DETACHED); badge_err_set_ok(ec); - } else { - - // thread is neither finished nor running right now, everything is ok: - badge_err_set_ok(ec); + badge_err_set(ec, ELOC_THREADS, ECAUSE_NOTFOUND); } - - leave_critical_section(); + assert_always(mutex_release_shared(NULL, &threads_mtx)); } -// Implementation for both `sched_resume_thread` and `sched_resume_thread_next`. -static void sched_resume_thread_inner( - badge_err_t *const ec, sched_thread_t *const thread, thread_insert_position_t const position -) { - assert_dev_drop(thread != NULL); - enter_critical_section(); - - if (is_flag_set(thread->flags, THREAD_COMPLETED)) { - - // thread is already completed and cannot be resumed: - badge_err_set(ec, ELOC_THREADS, ECAUSE_ILLEGAL); - leave_critical_section(); - return; - } else if (!is_flag_set(thread->flags, THREAD_RUNNING)) { - // Thread is not running, but ready to run. Put it into the - // wait queue and mark it as ready. - set_flag(thread->flags, THREAD_RUNNING); - - if (position == INSERT_THREAD_FRONT) { - dlist_prepend(&thread_wait_queue, &thread->schedule_node); +// Pauses execution of the thread. +void thread_suspend(badge_err_t *ec, tid_t tid) { + assert_always(mutex_acquire_shared(NULL, &threads_mtx, TIMESTAMP_US_MAX)); + sched_thread_t *thread = find_thread(tid); + if (thread) { + if (thread->flags & THREAD_KERNEL) { + badge_err_set(ec, ELOC_THREADS, ECAUSE_ILLEGAL); } else { - dlist_append(&thread_wait_queue, &thread->schedule_node); + int exp; + do { + exp = atomic_load(&thread->flags); + } while (!atomic_compare_exchange_strong(&thread->flags, &exp, exp | THREAD_SUSPENDING)); + badge_err_set_ok(ec); } - - } else if (position == INSERT_THREAD_FRONT) { - // Thread is already running and in the wait queue. Remove it and push - // it to the front: - assert_dev_drop(dlist_contains(&thread_wait_queue, &thread->schedule_node)); - - dlist_remove(&thread_wait_queue, &thread->schedule_node); - dlist_prepend(&thread_wait_queue, &thread->schedule_node); - - badge_err_set_ok(ec); } else { - // Thread is already running and in the wait queue - assert_dev_drop(dlist_contains(&thread_wait_queue, &thread->schedule_node)); - badge_err_set_ok(ec); + badge_err_set(ec, ELOC_THREADS, ECAUSE_NOTFOUND); } - - leave_critical_section(); + assert_always(mutex_release_shared(NULL, &threads_mtx)); } -void sched_resume_thread(badge_err_t *const ec, sched_thread_t *const thread) { - sched_resume_thread_inner(ec, thread, INSERT_THREAD_BACK); +// Resumes a previously suspended thread or starts it. +static void thread_resume_impl(badge_err_t *ec, tid_t tid, bool now) { + assert_always(mutex_acquire_shared(NULL, &threads_mtx, TIMESTAMP_US_MAX)); + sched_thread_t *thread = find_thread(tid); + if (thread) { + int setfl = (now * THREAD_STARTNOW) | THREAD_RUNNING; + irq_enable(false); + if (!(atomic_fetch_or(&thread->flags, setfl) & THREAD_RUNNING)) { + if (dlist_contains(&cpu_ctx[smp_cur_cpu()].queue, &thread->node)) { + logk(LOG_FATAL, "NOOOOOOOOOOO!!!!!!!!!!!!"); + } + thread_handoff(thread, smp_cur_cpu(), true); + } + irq_enable(true); + badge_err_set_ok(ec); + } else { + badge_err_set(ec, ELOC_THREADS, ECAUSE_NOTFOUND); + } + assert_always(mutex_release_shared(NULL, &threads_mtx)); } -void sched_resume_thread_next(badge_err_t *const ec, sched_thread_t *const thread) { - sched_resume_thread_inner(ec, thread, INSERT_THREAD_FRONT); +// Resumes a previously suspended thread or starts it. +void thread_resume(badge_err_t *ec, tid_t tid) { + thread_resume_impl(ec, tid, false); } -bool sched_thread_is_running(badge_err_t *ec, sched_thread_t *thread) { - assert_dev_drop(thread != NULL); - badge_err_set_ok(ec); - return is_flag_set(thread->flags, THREAD_RUNNING); +// Resumes a previously suspended thread or starts it. +// Immediately schedules the thread instead of putting it in the queue first. +void thread_resume_now(badge_err_t *ec, tid_t tid) { + thread_resume_impl(ec, tid, true); } -process_t *sched_get_associated_process(sched_thread_t const *const thread) { - enter_critical_section(); - process_t *process = NULL; - if (thread != NULL) { - process = thread->process; +// Returns whether a thread is running; it is neither suspended nor has it exited. +bool thread_is_running(badge_err_t *ec, tid_t tid) { + assert_always(mutex_acquire_shared(NULL, &threads_mtx, TIMESTAMP_US_MAX)); + sched_thread_t *thread = find_thread(tid); + bool res = false; + if (thread) { + res = !!(atomic_load(&thread->flags) & THREAD_RUNNING); + badge_err_set_ok(ec); + } else { + badge_err_set(ec, ELOC_THREADS, ECAUSE_NOTFOUND); } - leave_critical_section(); - return process; + assert_always(mutex_release_shared(NULL, &threads_mtx)); + return res; } -void sched_yield(void) { - sched_thread_t *const current_thread = sched_get_current_thread(); - assert_always(current_thread != NULL); - isr_global_disable(); - sched_request_switch_from_isr(); - isr_context_switch(); +// Returns the current thread ID. +tid_t sched_current_tid() { + return isr_ctx_get()->thread->id; } -void sched_exit(uint32_t const exit_code) { - enter_critical_section(); +// Returns the current thread struct. +sched_thread_t *sched_current_thread() { + irq_enable(false); + sched_thread_t *thread = isr_ctx_get()->thread; + irq_enable(true); + return thread; +} - sched_thread_t *const current_thread = sched_get_current_thread_unsafe(); - assert_always(current_thread != NULL); +// Returns the current thread without using a critical section. +sched_thread_t *sched_current_thread_unsafe() { + return isr_ctx_get()->thread; +} - current_thread->exit_code = exit_code; - set_flag(current_thread->flags, THREAD_COMPLETED); - reset_flag(current_thread->flags, THREAD_RUNNING); +// Returns the associated thread struct. +sched_thread_t *sched_get_thread(tid_t tid) { + assert_always(mutex_acquire_shared(NULL, &threads_mtx, TIMESTAMP_US_MAX)); + sched_thread_t *thread = find_thread(tid); + assert_always(mutex_release_shared(NULL, &threads_mtx)); + return thread; +} - leave_critical_section(); - sched_yield(); +// Explicitly yield to the scheduler; the scheduler may run other threads without waiting for preemption. +// Use this function to reduce the CPU time used by a thread. +void sched_yield() { + irq_enable(false); + sched_request_switch_from_isr(); + isr_context_switch(); +} - // hint the compiler that we cannot reach this part of the code and - // it will never be reached: +// Exits the current thread. +// If the thread is detached, resources will be cleaned up. +void thread_exit(int code) { + irq_enable(false); + sched_thread_t *thread = isr_ctx_get()->thread; + thread->exit_code = code; + atomic_fetch_or(&thread->flags, THREAD_EXITING); + sched_request_switch_from_isr(); + isr_context_switch(); __builtin_unreachable(); } - -void sched_set_name(badge_err_t *ec, sched_thread_t *thread, char const *name) { -#if NDEBUG - badge_err_set(ec, ELOC_THREADS, ECAUSE_UNSUPPORTED); -#else - size_t l = 0; - for (l = 0; name[l]; l++) { - } - if (l + 1 >= sizeof(thread->name)) { - badge_err_set(ec, ELOC_THREADS, ECAUSE_TOOLONG); - return; +// Wait for another thread to exit. +void thread_join(tid_t tid) { + while (1) { + assert_always(mutex_acquire_shared(NULL, &threads_mtx, TIMESTAMP_US_MAX)); + sched_thread_t *thread = find_thread(tid); + if (thread) { + if (atomic_load(&thread->flags) & THREAD_EXITED) { + atomic_fetch_or(&thread->flags, THREAD_DETACHED); + assert_always(mutex_release_shared(NULL, &threads_mtx)); + return; + } + } else { + assert_always(mutex_release_shared(NULL, &threads_mtx)); + return; + } + assert_always(mutex_release_shared(NULL, &threads_mtx)); + sched_yield(); } - mem_copy(thread->name, name, l); - thread->name[l] = 0; -#endif -} - -char const *sched_get_name(sched_thread_t *thread) { -#if NDEBUG - return ""; -#else - return thread->name; -#endif } diff --git a/tools/config.py b/tools/config.py index e5044c7..895f79b 100755 --- a/tools/config.py +++ b/tools/config.py @@ -3,159 +3,285 @@ # SPDX-License-Identifier: MIT from argparse import * -import os, re +import os, re, typing assert __name__ == "__main__" -parser = ArgumentParser() +T = typing.TypeVar('T') +parser = ArgumentParser() +options = {} -float_spec = [ - "none", - "single", - "double", -] + + +class Desc: + def __init__(self, id: str, name: str, help: str): + self.id = id + self.name = name + self.help = help + + def argument(self, parser: ArgumentParser): + parser.add_argument(f"--{self.id}", action="store", required=False, help=self.help) + + +class Option(typing.Generic[T]): + def __init__(self): + self.defval = None + + def use_default(self) -> T: return self.defval + def select(self, desc: Desc) -> T: raise NotImplementedError() + def parse(self, desc: Desc, value: str) -> T: raise NotImplementedError() + + +class OptConst(Option[T]): + def __init__(self, value: T): + self.defval = value + def select(self, desc: Desc) -> T: + print(f"Using {desc.name} `{self.defval}`") + return self.defval + def parse(self, desc: Desc, val: str) -> int: + if val != str(self.defval): + print(f"Error: Unsupported {desc.name} `{val}`") + exit(1) + return self.defval + + +class OptInt(Option[int]): + def __init__(self, min: int, max: int, inc: int = 1, defval: int = None): + assert inc >= 1 + assert (max - min) % inc == 0 + assert defval == None or (defval - min) % inc == 0 + self.min = min + self.max = max + self.inc = inc + self.defval = min if defval == None else defval + + def select(self, desc: Desc) -> int: + prompt = f"{self.min} - {self.max}" + if self.inc != 1: prompt += f" step {self.inc}" + prompt = f"{desc.name}: {prompt} [{self.defval}] " + while True: + val = input(prompt) + if len(val) == 0: + return self.defval + try: + val = int(val) + if (val - self.min) % self.inc != 0 or val < self.min or val > self.max: + continue + return val + except ValueError: + continue + + def parse(self, desc: Desc, val: str) -> int: + try: + val = int(val) + if (val - self.min) % self.inc != 0 or val < self.min or val > self.max: + print(f"{desc.name} {val} out of range {self.xhelp}") + exit(1) + return val + except ValueError: + print(f"{desc.name} invalid integer {val}") + exit(1) + + +class OptEnum(Option[T]): + def __init__(self, options: dict[str, T]|list[str], defval: T = None): + self.options: dict[str, T] + if type(options) != dict: + self.options = {x: x for x in options} + else: + self.options = options + if defval == None: + self.defval = self.options.values()[0] + else: + assert defval in self.options.values() + self.defval = defval + + def select(self, desc: Desc) -> T: + keys = list(self.options.keys()) + if len(keys) == 1: + print(f"Using {desc.name} `{self.defval}`") + return self.defval + while True: + print(f"Available {desc.name} options:") + for i in range(len(keys)): + print(f"[{i+1}] {options[keys[i]]}") + defidx = keys.index(self.defval)+1 + idx = input(f"Select {desc.name} [{defidx}] ") or str(defidx+1) + try: + idx = str(idx) + if idx >= 1 and idx <= len(keys): + return options[keys[idx-1]] + except ValueError: + continue + + def parse(self, desc: Desc, val: str) -> T: + try: + return self.options[val] + except KeyError: + print(f"Invalid {desc.name} `{val}`") + exit(1) + + +class OptCompiler(Option[str]): + def __init__(self, match: str, prefer: list[str]): + self.match = re.compile(match) + self.prefer = [re.compile(x) for x in prefer] + self.options = [] + self.defidx = 0 + self.defval = None + + def _prio(self, cc: str) -> int|None: + for i in range(len(self.prefer)): + if self.prefer[i].match(cc): + return i + return None + + def _search(self): + self.options = [] + self.defidx = 0 + priority = None + for dir in os.getenv("PATH").split(os.pathsep): + try: + for bin in os.listdir(dir): + if not bin.endswith("-gcc") and not bin.endswith("-cc"): continue + if not self.match.match(bin): continue + self.options.append(dir + os.path.sep + bin) + opt_prio = self._prio(bin) + if opt_prio != None and priority == None: + priority = opt_prio + self.defidx = len(self.options)-1 + elif opt_prio != None and priority != None and opt_prio < priority: + priority = opt_prio + self.defidx = len(self.options)-1 + except FileNotFoundError: + continue + self.defval = self.options[self.defidx] + + def use_default(self) -> str: + self._search() + return self.defval + + def select(self, desc: Desc) -> str: + self._search() + if len(self.options) == 0: + print("Warning: No suitable compilers found!") + return input("Select compiler: ") + while True: + print("Available compilers:") + for i in range(len(self.options)): + print(f"[{i+1}] {self.options[i]}") + try: + val = input(f"Select compiler [{self.defidx+1}] ") + if len(val) == 0: + return self.options[self.defidx] + else: + return self.options[int(val)-1] + except ValueError: + print(f"Warning: Selected unsupported compiler: {val}") + return val + except IndexError: + continue + + def parse(self, desc: Desc, val: str) -> T: + self._search() + if len(self.options) == 0: + print("Warning: No suitable compilers found!") + elif val not in self.options: + print(f"Warning: Selected unsupported compiler: {val}") + return val + + +class OptStr(Option[str]): + def __init__(self, defval: str): + self.defval = defval + + def select(self, desc: Desc) -> str: + return input(f"{desc.name} [{self.defval}] ") or self.defval + + def parse(self, desc: Desc, value: str) -> T: + print(f"Selected {desc.name} `{value}`") + return value + + + +option_desc = {x.id: x for x in [ + Desc("compiler", "compiler", "C compiler to use for building BadgerOS and apps."), + Desc("cpu", "CPU architecture", "CPU architecture to build for."), + Desc("float_spec", "floating-point", "Largest floating-point type to support."), + Desc("vec_spec", "vector", "Largest vector type to support."), + Desc("stack_size", "stack size", "Stack size to use for kernel threads."), +]} default_options = { - "float_spec": (["none"], 0), - "vec_spec": (["none"], 0), + "stack_size": OptInt(8192, 65536, 4096, 8192), + "float_spec": OptConst("none"), + "vec_spec": OptConst("none"), } +default_target = "esp32p4" targets = { "esp32c6": { - "cpu": ["riscv32"], - "cc-match": "^riscv.*-linux-", - "cc-prefer": ["^riscv32-badgeros-", "^riscv32-"], - "port": "esp32c6", - "options": {}, + "compiler": OptCompiler("^riscv32.*-linux-", ["^riscv32-badgeros-", "^riscv32-linux-"]), + "cpu": OptConst("riscv32"), }, "esp32p4": { - "cpu": ["riscv32"], - "cc-match": "^riscv.*-linux-", - "cc-prefer": ["^riscv32-badgeros-", "^riscv32-"], - "float": True, - "port": "esp32p4", - "options": { - "float_spec": (["single"], 0), - }, + "compiler": OptCompiler("^riscv32.*-linux-", ["^riscv32-badgeros-", "^riscv32-linux-"]), + "cpu": OptConst("riscv32"), + "float_spec": OptConst("single"), }, "generic": { - "cpu": ["riscv64"], - "cc-match": "^riscv64.*-linux-", - "cc-prefer": ["^riscv64-linux-"], - "float": True, - "port": "generic", - "options": { - "float_spec": (float_spec, 2), - "vec_spec": (["none", "rvv_1"], 1), - }, + "compiler": OptCompiler("^riscv64.*-linux-", ["^riscv64-badgeros-", "^riscv64-linux-"]), + "cpu": OptConst("riscv64"), + "float_spec": OptEnum(["none", "single", "double"], "double"), + "vec_spec": OptEnum(["none", "rvv_1"], "rvv_1"), } } + + parser.add_argument("--target", action="store", default=None, choices=list(targets.keys()), - help="Target chip, one of: "+", ".join(targets.keys())) + help="Target platform, one of: "+", ".join(targets.keys())) parser.add_argument("--use-default", action="store_true", help="Use the default option values instead of prompting") -parser.add_argument("--cpu", "--arch", - action="store", default=None, - help="CPU architecture, one of: riscv32, riscv64") - -parser.add_argument("--compiler", "--cc", - action="store", default=None, - help="C compiler, toolchain prefix is derived from this") +for desc in option_desc.values(): + desc.argument(parser) -parser.add_argument("--fp-spec", "--float-spec", "--fp", "--float", - action="store", default=None, - help="Floating-point type to enable") +args = vars(parser.parse_args()) -parser.add_argument("--vec-spec", "--vector-spec", "--vec", "--vector", - action="store", default=None, - help="Vector type to enable") +while args["target"] == None: + print("Available targets:") + keys = list(targets.keys()) + for i in range(len(keys)): + print(f"[{i+1}] {keys[i]}") + val = input(f"Select target [{keys.index(default_target)+1}] ") + if len(val) == 0: + args["target"] = default_target + try: + args["target"] = keys[int(val)-1] + except ValueError or IndexError: + continue -args = parser.parse_args() -use_default = args.use_default -def option_select(prompt: str, options: list, prefer=0): - global use_default - prefer += 1 - if len(options) == 0: - print(f"Error: No valid {prompt}s found") - exit(1) - elif len(options) == 1: - return options[0] - elif use_default: - return options[prefer-1] - else: - print(f"Available {prompt}s:") - for i in range(len(options)): - print(f"[{i+1:d}] {options[i]}") - sel = input(f"Select a {prompt} [{prefer}] ") - try: - i = int(sel) if len(sel) else prefer - if i < 1 or i > len(options): - exit(1) - return options[i-1] - except ValueError: - exit(1) - -def find_compilers(): - global target - candidates = [] - prefer_idx = 1 - prefer_prio = 99999999 - for path in os.getenv("PATH").split(":"): - path = os.path.abspath(path) - try: - for bin in os.listdir(path): - if not bin.endswith("gcc"): continue - if not re.findall(targets[target]["cc-match"], bin): continue - for i in range(len(targets[target]["cc-prefer"])): - prefer = targets[target]["cc-prefer"][i] - if i >= prefer_prio: break - if re.findall(prefer, bin): - prefer_idx = len(candidates) - prefer_prio = i - break - candidates.append(path + "/" + bin) - except FileNotFoundError: - pass - return candidates, prefer_idx - -def handle_option_arg(arg: str|None, id: str, name: str) -> str: - global target, target_options - if not arg: return option_select(name, *target_options[id]) - if arg not in target_options[id][0]: - print(f"ERROR: Chosen {name} `{arg}` not supported by target `{target}`") - exit(1) - return arg - - -target = args.target or option_select("target", list(targets.keys())) config = {} -target_options = default_options.copy() -for opt in targets[target]["options"]: - target_options[opt] = targets[target]["options"][opt] - -if args.cpu and args.cpu not in targets[target]["cpu"]: - print(f"ERROR: Chosen CPU architecture `{args.cpu}` not supported by target `{target}`") - exit(1) -config["cpu"] = args.cpu or option_select("CPU", targets[target]["cpu"]) - -if args.compiler: - if not re.findall(targets[target]["cc-match"], args.compiler): - print(f"WARNING: Chosen compiler `{args.compiler}` does not match /{targets[target]['cc-match']}/") - config["compiler"] = args.compiler -else: - config["compiler"] = option_select("compiler", *find_compilers()) +config["target"] = args["target"] +options: dict[str, Option] = default_options.copy() +for k in targets[args["target"]]: + options[k] = targets[args["target"]][k] -config["fp_spec"] = handle_option_arg(args.fp_spec, "float_spec", "float spec") -config["vec_spec"] = handle_option_arg(args.vec_spec, "vec_spec", "vector spec") +for k in options: + if args[k] != None: + config[k] = options[k].parse(option_desc[k], args[k]) + elif args["use_default"]: + config[k] = options[k].use_default() + print(f"Using {option_desc[k].name} `{config[k]}`") + else: + config[k] = options[k].select(option_desc[k]) -config["target"] = target cc_re = re.match("^(.+?)\\w+$", config["compiler"]) if not cc_re: print("ERROR: Cannot determine toolchain prefix") @@ -163,6 +289,7 @@ def handle_option_arg(arg: str|None, id: str, name: str) -> str: config["tc_prefix"] = cc_re.group(1) + os.makedirs(".config", exist_ok=True) with open(".config/config.mk", "w") as fd: @@ -180,6 +307,9 @@ def handle_option_arg(arg: str|None, id: str, name: str) -> str: fd.write(f'// clang-format off\n') fd.write(f'#pragma once\n') for opt in config: - fd.write(f'#define CONFIG_{opt.upper()} "{config[opt]}"\n') - if re.match('^\\w+$', config[opt]): - fd.write(f'#define CONFIG_{opt.upper()}_{config[opt]}\n') + if type(config[opt]) == int: + fd.write(f'#define CONFIG_{opt.upper()} {config[opt]}\n') + else: + fd.write(f'#define CONFIG_{opt.upper()} "{config[opt]}"\n') + if re.match('^\\w+$', config[opt]): + fd.write(f'#define CONFIG_{opt.upper()}_{config[opt]}\n')