Removal of CHOPSTX_PRIO_INHIBIT_PREEMPTION.

Signed-off-by: NIIBE Yutaka <gniibe@fsij.org>
This commit is contained in:
NIIBE Yutaka
2019-11-21 10:35:14 +09:00
parent 89523f22bf
commit cffc8bf96c
7 changed files with 221 additions and 168 deletions

View File

@@ -1,3 +1,40 @@
2019-11-21 NIIBE Yutaka <gniibe@fsij.org>
* chopstx.h (CHOPSTX_PRIO_INHIBIT_PREEMPTION): Remove.
* chopstx.c (chx_init): Remove support of
CHOPSTX_PRIO_INHIBIT_PREEMPTION.
(chopstx_setpriority): Likewise.
* chopstx-cortex-m.c (CPU_EXCEPTION_PRIORITY_PENDSV): Change the
value. Now PendSV exception runs at same priority level of
all other exceptions (systick and interrupt exception).
(chx_cpu_sched_lock, chx_cpu_sched_unlock):
Remove support of CHOPSTX_PRIO_INHIBIT_PREEMPTION.
(chx_request_preemption): Change the argument with none.
(chx_timer_handler): New. Use common function of
chx_timer_expired, local one, chx_request_preemption.
(chx_handle_intr): Use chx_recv_irq.
(chx_sched): Remove support of CHOPSTX_PRIO_INHIBIT_PREEMPTION.
(preempt): Have an argument from tail-chaining.
Remove support of CHOPSTX_PRIO_INHIBIT_PREEMPTION.
* chopstx-gnu-linux.c (chx_idle): Rename.
(chx_init_arch): Follow the rename.
(chx_handle_intr): Use chx_recv_irq, chx_running_preempted, and
chx_preempt_into.
(sigalrm_handler): Use chx_timer_expired, chx_running_preempted,
and chx_preempt_into.
(chx_preempt_into): Rename from chx_request_preemption, changing
argument. Remove the handling of preempted thread, which should
be done by caller using chx_running_preempted, beforehand.
* entry.c (vector_table): Use chx_timer_handler.
* chopstx.c (chx_timer_expired): Pop from the ready queue and
return the next thread to switch.
(chx_recv_irq): New.
(chx_running_preempted): New.
2019-11-21 NIIBE Yutaka <gniibe@fsij.org>
* chopstx-cortex-m.c (chx_set_running): New.

13
NEWS
View File

@@ -1,6 +1,19 @@
NEWS - Noteworthy changes
* Major changes in Chopstx 2.0
** Remove support of CHOPSTX_PRIO_INHIBIT_PREEMPTION
We used to have (an experimental) feature of higher priority of thread
(>= CHOPSTX_PRIO_INHIBIT_PREEMPTION). The feature offered: When a
thread has such a priority, Chostx guarantees that it keeps running
and no other threads can preempt the thread until the thread
voluntarily leaves running. I had introduced this (questionable)
feature as an excuse against no support of masking/unmasking interrupt
API. From the experience of Chopstx, it is rather consistent to
remove this feature.
* Major changes in Chopstx 1.17
Released 2019-11-20

View File

@@ -76,17 +76,15 @@ struct chx_stack_regs {
* ---------------------
* Prio 0x40: thread temporarily inhibiting schedule for critical region
* ...
* Prio 0xb0: systick, external interrupt
* Prio 0xc0: pendsv
* Prio 0xb0: systick, external interrupt, pendsv
* =====================================
*
* Cortex-M0
* =====================================
* Prio 0x00: thread temporarily inhibiting schedule for critical region
* ...
* Prio 0x40: systick, external interrupt
* Prio 0x80: pendsv
* Prio 0x80: svc
* Prio 0x40: systick, external interrupt, pendsv
* Prio 0x80: svc (not used)
* =====================================
*/
@@ -95,18 +93,18 @@ struct chx_stack_regs {
#if defined(__ARM_ARCH_6M__)
#define CPU_EXCEPTION_PRIORITY_INHIBIT_SCHED 0x00
/* ... */
#define CPU_EXCEPTION_PRIORITY_SYSTICK CPU_EXCEPTION_PRIORITY_INTERRUPT
#define CPU_EXCEPTION_PRIORITY_INTERRUPT 0x40
#define CPU_EXCEPTION_PRIORITY_PENDSV 0x80
#define CPU_EXCEPTION_PRIORITY_SYSTICK 0x40
#define CPU_EXCEPTION_PRIORITY_INTERRUPT CPU_EXCEPTION_PRIORITY_SYSTICK
#define CPU_EXCEPTION_PRIORITY_PENDSV CPU_EXCEPTION_PRIORITY_SYSTICK
#define CPU_EXCEPTION_PRIORITY_SVC 0x80 /* No use in this arch */
#elif defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7EM__)
#define CPU_EXCEPTION_PRIORITY_SVC 0x30
#define CPU_EXCEPTION_PRIORITY_INHIBIT_SCHED 0x40
/* ... */
#define CPU_EXCEPTION_PRIORITY_SYSTICK CPU_EXCEPTION_PRIORITY_INTERRUPT
#define CPU_EXCEPTION_PRIORITY_INTERRUPT 0xb0
#define CPU_EXCEPTION_PRIORITY_PENDSV 0xc0
#define CPU_EXCEPTION_PRIORITY_SYSTICK 0xb0
#define CPU_EXCEPTION_PRIORITY_INTERRUPT CPU_EXCEPTION_PRIORITY_SYSTICK
#define CPU_EXCEPTION_PRIORITY_PENDSV CPU_EXCEPTION_PRIORITY_SYSTICK
#else
#error "no support for this arch"
#endif
@@ -238,55 +236,57 @@ chx_interrupt_controller_init (void)
static void
chx_cpu_sched_lock (void)
{
if (running->prio < CHOPSTX_PRIO_INHIBIT_PREEMPTION)
{
#if defined(__ARM_ARCH_6M__)
asm volatile ("cpsid i" : : : "memory");
asm volatile ("cpsid i" : : : "memory");
#else
register uint32_t tmp = CPU_EXCEPTION_PRIORITY_INHIBIT_SCHED;
asm volatile ("msr BASEPRI, %0" : : "r" (tmp) : "memory");
register uint32_t tmp = CPU_EXCEPTION_PRIORITY_INHIBIT_SCHED;
asm volatile ("msr BASEPRI, %0" : : "r" (tmp) : "memory");
#endif
}
}
static void
chx_cpu_sched_unlock (void)
{
if (running->prio < CHOPSTX_PRIO_INHIBIT_PREEMPTION)
{
#if defined(__ARM_ARCH_6M__)
asm volatile ("cpsie i" : : : "memory");
asm volatile ("cpsie i" : : : "memory");
#else
register uint32_t tmp = CPU_EXCEPTION_PRIORITY_CLEAR;
asm volatile ("msr BASEPRI, %0" : : "r" (tmp) : "memory");
register uint32_t tmp = CPU_EXCEPTION_PRIORITY_CLEAR;
asm volatile ("msr BASEPRI, %0" : : "r" (tmp) : "memory");
#endif
}
}
void
static void
chx_request_preemption (void)
{
*ICSR = (1 << 28);
asm volatile ("" : : : "memory");
}
struct chx_thread *
chx_timer_handler (void)
{
struct chx_thread *tp_next;
tp_next = chx_timer_expired ();
if (tp_next)
chx_request_preemption ();
return tp_next;
}
struct chx_thread *
chx_handle_intr (void)
{
struct chx_pq *p;
register uint32_t irq_num;
struct chx_thread *tp_next;
asm volatile ("mrs %0, IPSR\n\t"
"sub %0, #16" /* Exception # - 16 = interrupt number. */
: "=r" (irq_num) : /* no input */ : "memory");
chx_disable_intr (irq_num);
chx_spin_lock (&q_intr.lock);
for (p = q_intr.q.next; p != (struct chx_pq *)&q_intr.q; p = p->next)
if (p->v == irq_num)
{ /* should be one at most. */
struct chx_px *px = (struct chx_px *)p;
ll_dequeue (p);
chx_wakeup (p);
chx_request_preemption (px->master->prio);
break;
}
chx_spin_unlock (&q_intr.lock);
tp_next = chx_recv_irq (irq_num);
if (tp_next)
chx_request_preemption ();
return tp_next;
}
static void
@@ -296,16 +296,6 @@ chx_init_arch (struct chx_thread *tp)
chx_set_running (tp);
}
static void
chx_request_preemption (uint16_t prio)
{
if (running == NULL || (uint16_t)running->prio < prio)
{
*ICSR = (1 << 28);
asm volatile ("" : : : "memory");
}
}
/*
* chx_sched: switch to another thread.
@@ -417,15 +407,9 @@ chx_sched (uint32_t yield)
"ldm r0!, {r1, r2}\n\t"
"mov r11, r1\n\t"
"mov sp, r2\n\t"
"sub r0, #45\n\t"
"ldrb r1, [r0]\n\t" /* ->PRIO field. */
"cmp r1, #247\n\t"
"bhi 1f\n\t" /* Leave interrupt disabled if >= 248 */
/**/
/* Unmask interrupts. */
"cpsie i\n"
/**/
"1:\n\t"
"cpsie i\n\t"
/*
0: r0
4: r1
@@ -520,26 +504,18 @@ chopstx_create_arch (uintptr_t stack_addr, size_t stack_size,
*/
void __attribute__ ((naked))
preempt (void)
preempt (struct chx_thread * tp_next)
{
register struct chx_thread *tp asm ("r0");
register struct chx_thread *cur asm ("r1");
register struct chx_thread *tp_current asm ("r1");
asm volatile (
#if defined(__ARM_ARCH_6M__)
"cpsid i\n\t"
#else
"msr BASEPRI, r0\n\t"
#endif
"ldr r2, =running\n\t"
"ldr r0, [r2]\n\t"
"mov r1, r0"
: "=r" (tp), "=r" (cur)
: "0" (CPU_EXCEPTION_PRIORITY_INHIBIT_SCHED)
asm ( "ldr r2, =running\n\t"
"ldr r1, [r2]"
: "=r" (tp_current)
: /* no input */
: "r2");
if (!cur)
/* It's idle thread. It's ok to clobber registers. */
if (!tp_current)
/* It's idle thread. No need to save registers. */
;
else
{
@@ -553,38 +529,20 @@ preempt (void)
"mov r5, r11\n\t"
"mrs r6, PSP\n\t" /* r13(=SP) in user space. */
"stm %0!, {r2, r3, r4, r5, r6}"
: "=r" (cur)
: "0" (cur)
/*
: "=r" (tp_current)
: "0" (tp_current)
/*
* Memory clobber constraint here is not accurate, but this
* works. R7 keeps its value, but having "r7" here prevents
* use of R7 before this asm statement.
*/
: "r2", "r3", "r4", "r5", "r6", "r7", "memory");
if (tp)
{
if (tp->flag_sched_rr)
{
if (tp->state == THREAD_RUNNING)
{
chx_timer_dequeue (tp);
chx_ready_enqueue (tp);
}
/*
* It may be THREAD_READY after chx_timer_expired.
* Then, do nothing.
*/
}
else
chx_ready_push (tp);
}
tp_next = chx_running_preempted (tp_next);
}
/* Registers on stack (PSP): r0, r1, r2, r3, r12, lr, pc, xpsr */
tp = chx_ready_pop ();
asm volatile (
".L_CONTEXT_SWITCH:\n\t"
/* Now, r0 points to the thread to be switched. */
@@ -617,20 +575,13 @@ preempt (void)
"ldr r1, [r0], #4\n\t"
"msr PSP, r1\n\t"
#endif
"sub r0, #45\n\t"
"ldrb r1, [r0]\n\t" /* ->PRIO field. */
"mov r0, #0\n\t"
"cmp r1, #247\n\t"
"bhi 0f\n\t" /* Leave interrupt disabled if >= 248 */
/**/
/* Unmask interrupts. */
#if defined(__ARM_ARCH_6M__)
"cpsie i\n"
"cpsie i\n\t"
#else
"msr BASEPRI, r0\n"
"msr BASEPRI, r0\n\t"
#endif
/**/
"0:\n\t"
"sub r0, #3\n\t" /* EXC_RETURN to a thread with PSP */
"bx r0\n"
"1:\n\t"
@@ -657,7 +608,7 @@ preempt (void)
/**/
"sub r0, #3\n\t" /* EXC_RETURN to a thread with PSP */
"bx r0"
: /* no output */ : "r" (tp) : "memory");
: /* no output */ : "r" (tp_next) : "memory");
}
#if defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7EM__)

View File

@@ -54,6 +54,8 @@ chx_dmb (void)
}
static void chx_preempt_into (struct chx_thread *tp_next);
static sigset_t ss_cur;
static void
@@ -145,7 +147,7 @@ chx_cpu_sched_unlock (void)
}
static void
idle (void)
chx_idle (void)
{
for (;;)
pause ();
@@ -154,22 +156,14 @@ idle (void)
void
chx_handle_intr (uint32_t irq_num)
{
struct chx_pq *p;
struct chx_thread *tp_next;
chx_disable_intr (irq_num);
chx_spin_lock (&q_intr.lock);
for (p = q_intr.q.next; p != (struct chx_pq *)&q_intr.q; p = p->next)
if (p->v == irq_num)
{ /* should be one at most. */
struct chx_px *px = (struct chx_px *)p;
tp_next = chx_recv_irq (irq_num);
if (!tp_next)
return;
ll_dequeue (p);
chx_wakeup (p);
chx_spin_unlock (&q_intr.lock);
chx_request_preemption (px->master->prio);
return;
}
chx_spin_unlock (&q_intr.lock);
tp_next = chx_running_preempted (tp_next);
chx_preempt_into (tp_next);
}
@@ -193,11 +187,17 @@ chx_sigmask (ucontext_t *uc)
static void
sigalrm_handler (int sig, siginfo_t *siginfo, void *arg)
{
extern void chx_timer_expired (void);
struct chx_thread *tp_next;
ucontext_t *uc = arg;
(void)sig;
(void)siginfo;
chx_timer_expired ();
tp_next = chx_timer_expired ();
if (tp_next)
{
tp_next = chx_running_preempted (tp_next);
chx_preempt_into (tp_next);
}
chx_sigmask (uc);
}
@@ -217,46 +217,17 @@ chx_init_arch (struct chx_thread *tp)
idle_tc.uc_stack.ss_sp = idle_stack;
idle_tc.uc_stack.ss_size = sizeof (idle_stack);
idle_tc.uc_link = NULL;
makecontext (&idle_tc, idle, 0);
makecontext (&idle_tc, chx_idle, 0);
getcontext (&tp->tc);
chx_set_running (tp);
}
static void
chx_request_preemption (uint16_t prio)
chx_preempt_into (struct chx_thread *tp_next)
{
ucontext_t *tcp;
struct chx_thread *tp_prev;
struct chx_thread *tp = chx_running ();
if (tp && (uint16_t)tp->prio >= prio)
return;
/* Change the context to another thread with higher priority. */
tp_prev = tp;
if (tp)
{
if (tp->flag_sched_rr)
{
if (tp->state == THREAD_RUNNING)
{
chx_timer_dequeue (tp);
chx_ready_enqueue (tp);
}
}
else
chx_ready_push (tp);
}
tp = chx_ready_pop ();
if (tp)
tcp = &tp->tc;
else
tcp = &idle_tc;
chx_set_running (tp);
struct chx_thread *tp_prev = chx_running ();
chx_set_running (tp_next);
if (tp_prev)
{
/*
@@ -275,11 +246,11 @@ chx_request_preemption (uint16_t prio)
* of the thread, and the condition of chx_sched function which
* mandates holding cpu_sched_lock.
*/
swapcontext (&tp_prev->tc, tcp);
swapcontext (&tp_prev->tc, &tp_next->tc);
}
else if (tp)
else
{
setcontext (tcp);
setcontext (&tp_next->tc);
}
}

View File

@@ -101,9 +101,8 @@ static struct chx_queue q_join;
static struct chx_queue q_intr;
/* Forward declaration(s). */
static void chx_request_preemption (uint16_t prio);
static int chx_wakeup (struct chx_pq *p);
static struct chx_thread * chx_timer_insert (struct chx_thread *tp, uint32_t usec);
static struct chx_thread *chx_timer_insert (struct chx_thread *tp, uint32_t usec);
static uint32_t chx_timer_dequeue (struct chx_thread *tp);
@@ -297,6 +296,10 @@ chx_ready_enqueue (struct chx_thread *tp)
chx_spin_unlock (&q_ready.lock);
}
static struct chx_thread *chx_timer_expired (void);
static struct chx_thread *chx_recv_irq (uint32_t irq_num);
static struct chx_thread *chx_running_preempted (struct chx_thread *tp_next);
/*
* Here comes architecture specific code.
*/
@@ -384,7 +387,7 @@ chx_timer_dequeue (struct chx_thread *tp)
}
void
static struct chx_thread *
chx_timer_expired (void)
{
struct chx_thread *tp;
@@ -430,7 +433,74 @@ chx_timer_expired (void)
}
chx_spin_unlock (&q_timer.lock);
chx_request_preemption (prio);
if (running == NULL || (uint16_t)running->prio < prio)
{
tp = chx_ready_pop ();
if (tp != running)
return tp;
else
/* When tp->flag_sched_rr == 1, it's possible. No context switch. */
return NULL;
}
else
return NULL;
}
static struct chx_thread *
chx_recv_irq (uint32_t irq_num)
{
struct chx_pq *p;
struct chx_thread *r = chx_running ();
chx_disable_intr (irq_num);
chx_spin_lock (&q_intr.lock);
for (p = q_intr.q.next; p != (struct chx_pq *)&q_intr.q; p = p->next)
if (p->v == irq_num)
/* should be one at most. */
break;
chx_spin_unlock (&q_intr.lock);
if (p)
{
struct chx_px *px = (struct chx_px *)p;
ll_dequeue (p);
chx_wakeup (p);
if (r == NULL || (uint16_t)r->prio < px->master->prio)
return chx_ready_pop ();
}
return NULL;
}
static struct chx_thread * __attribute__ ((noinline))
chx_running_preempted (struct chx_thread *tp_next)
{
struct chx_thread *r = chx_running ();
if (r == NULL)
return tp_next;
if (r->flag_sched_rr)
{
if (r->state == THREAD_RUNNING)
{
chx_timer_dequeue (r);
chx_ready_enqueue (r);
}
/*
* It may be THREAD_READY after chx_timer_expired.
* Then, do nothing. It's in the ready queue.
*/
}
else
chx_ready_push (r);
return tp_next;
}
@@ -482,9 +552,6 @@ chx_init (struct chx_thread *tp)
tp->parent = NULL;
tp->v = 0;
if (CHX_PRIO_MAIN_INIT >= CHOPSTX_PRIO_INHIBIT_PREEMPTION)
chx_cpu_sched_lock ();
tp->prio = CHX_PRIO_MAIN_INIT;
chopstx_main = (chopstx_t)tp;
@@ -1544,7 +1611,7 @@ chopstx_setpriority (chopstx_prio_t prio_new)
if (tp->prio < prio_cur)
chx_sched (CHX_YIELD);
else if (tp->prio < CHOPSTX_PRIO_INHIBIT_PREEMPTION)
else
chx_cpu_sched_unlock ();
return prio_orig;

View File

@@ -45,8 +45,6 @@ chopstx_create (uint32_t flags_and_prio,
#define CHOPSTX_DETACHED 0x10000
#define CHOPSTX_SCHED_RR 0x20000
#define CHOPSTX_PRIO_INHIBIT_PREEMPTION 248
void chopstx_usec_wait (uint32_t usec);
struct chx_spinlock {

20
entry.c
View File

@@ -64,8 +64,24 @@ extern uint8_t __main_stack_end__;
#if defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7EM__)
extern void svc (void);
#endif
/*
* In ARMv6-M Architecture Reference Manual and ARM v7-M Architecture
* Reference Manual, you can find a section B1.5.12 for tail-chaining.
*
* B1.5.12 Exceptions on exception return, and tail-chaining exceptions
*/
/*
* Because it is tail-chained, the preempt function has an argument
* with type of (struct chx_thread *), in fact.
*/
extern void preempt (void);
extern void chx_timer_expired (void);
/*
* Following functions return type of (struct chx_thread *) for
* tail-chained function (the preempt function), for its argument.
*/
extern void chx_timer_handler (void);
extern void chx_handle_intr (void);
static void nmi (void)
@@ -193,7 +209,7 @@ handler vector_table[] __attribute__ ((section(".startup.vectors"))) = {
none, /* Debug */
none, /* reserved */
preempt, /* PendSV */
chx_timer_expired, /* SysTick */
chx_timer_handler, /* SysTick */
/* 0x40 */
chx_handle_intr /* WWDG */, chx_handle_intr /* PVD */,
chx_handle_intr /* TAMPER */, chx_handle_intr /* RTC */,