=================================================================== RCS file: /cvs/prex-old/sys/kern/sched.c,v retrieving revision 1.1.1.1 retrieving revision 1.1.1.1.2.1 diff -u -r1.1.1.1 -r1.1.1.1.2.1 --- prex-old/sys/kern/sched.c 2008/06/03 10:38:46 1.1.1.1 +++ prex-old/sys/kern/sched.c 2008/08/13 17:12:32 1.1.1.1.2.1 @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2005-2007, Kohsuke Ohtani + * Copyright (c) 2005-2008, Kohsuke Ohtani * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -34,21 +34,22 @@ /** * General design: * - * The Prex scheduler is based on the algorithm known as priority based - * multi level queue. Each thread is assigned the priority between - * 0 and 255. The lower number means higher priority like BSD unix. - * The scheduler maintains 256 level run queues mapped to each priority. - * The lowest priority (=255) is used only by an idle thread. + * The Prex scheduler is based on the algorithm known as priority + * based multi level queue. Each thread has its own priority + * assigned between 0 and 255. The lower number means higher + * priority like BSD unix. The scheduler maintains 256 level run + * queues mapped to each priority. The lowest priority (=255) is + * used only for an idle thread. * * All threads have two different types of priorities: * - * - Base priority - * This is a static priority used for priority computation. A user - * mode program can change this value via system call. + * Base priority: + * This is a static priority used for priority computation. + * A user mode program can change this value via system call. * - * - Current priority - * An actual scheduling priority. A kernel may adjust this priority - * dynamically if it's needed. + * Current priority: + * An actual scheduling priority. A kernel may adjust this + * priority dynamically if it's needed. * * Each thread has one of the following state. * @@ -60,22 +61,22 @@ * The thread is always preemptive even in the kernel mode. * There are following 4 reasons to switch thread. * - * 1) Block + * (1) Block * Thread is blocked for sleep or suspend. * It is put on the tail of the run queue when it becomes * runnable again. * - * 2) Preemption + * (2) Preemption * If higher priority thread becomes runnable, the current - * thread is put on the the _head_ of the run queue. + * thread is put on the _head_ of the run queue. * - * 3) Quantum expiration + * (3) Quantum expiration * If the thread consumes its time quantum, it is put on * the tail of the run queue. * - * 4) Yield - * If the thread releases CPU by itself, it is put on - * the tail of the run queue. + * (4) Yield + * If the thread releases CPU by itself, it is put on the + * tail of the run queue. * * There are following three types of scheduling policies. * @@ -95,13 +96,12 @@ #include #include -static struct queue runq[NR_PRIOS]; /* run queues */ -static struct queue wakeq; /* queue for waking threads */ -static struct queue dpcq; /* queue for DPC threads */ -static int top_prio; /* highest priority in runq */ +static struct queue runq[NPRIO]; /* run queues */ +static struct queue wakeq; /* queue for waking threads */ +static struct queue dpcq; /* DPC queue */ +static int top_prio; /* highest priority in runq */ +static struct event dpc_event; /* event for DPC */ -static struct event dpc_event; /* event for dpc */ - /* * Search for highest-priority runnable thread. */ @@ -118,8 +118,6 @@ /* * Put a thread on the tail of the run queue. - * The rescheduling flag is set if the priority is better than - * the currently running process. */ static void runq_enqueue(thread_t th) @@ -128,14 +126,13 @@ enqueue(&runq[th->prio], &th->link); if (th->prio < top_prio) { top_prio = th->prio; - cur_thread->need_resched = 1; + cur_thread->resched = 1; } } /* * Insert a thread to the head of the run queue. - * We don't change rescheduling flag here because this is called - * while thread switching. + * We assume this routine is called while thread switching. */ static void runq_insert(thread_t th) @@ -147,9 +144,8 @@ } /* - * Pick up and remove the highest-priority thread from the run - * queue. At least, an idle thread will be returned because it - * always residents in the lowest priority queue. + * Pick up and remove the highest-priority thread + * from the run queue. */ static thread_t runq_dequeue(void) @@ -177,8 +173,6 @@ /* * Process all pending woken threads. - * Rescheduling flag may be set. - * Note: The thread may be still in a suspend state after wakeup. */ static void wakeq_flush(void) @@ -192,43 +186,31 @@ */ q = dequeue(&wakeq); th = queue_entry(q, struct thread, link); - th->sleep_event = 0; + th->slpevt = NULL; th->state &= ~TH_SLEEP; - if (th != cur_thread && th->state == TH_RUN) runq_enqueue(th); } } /* - * sleep_expire - sleep timer is expired: - * @arg: thread to unsleep. + * sched_switch - this is the scheduler proper: * - * Wake up the passed thread that is sleeping in sched_tsleep(). + * If the scheduling reason is preemption, the current + * thread will remain at the head of the run queue. So, + * the thread still has right to run first again among + * the same priority threads. For other scheduling reason, + * the current thread is inserted into the tail of the run + * queue. */ static void -sleep_expire(void *arg) -{ - - sched_unsleep((thread_t)arg, SLP_TIMEOUT); -} - -/* - * sched_switch - This routine is called to reschedule the CPU. - * - * If the scheduling reason is preemption, the current thread - * will remain at the head of the run queue. So, the thread - * still has right to run first again among the same priority - * threads. For other scheduling reason, the current thread is - * inserted into the tail of the run queue. - */ -static void sched_switch(void) { thread_t prev, next; - ASSERT(irq_level == 0); - + /* + * Move a current thread to the run queue. + */ prev = cur_thread; if (prev->state == TH_RUN) { if (prev->prio > top_prio) @@ -236,62 +218,78 @@ else runq_enqueue(prev); } - prev->need_resched = 0; + prev->resched = 0; /* - * This is the scheduler proper. + * Select the thread to run the CPU next. */ next = runq_dequeue(); if (next == prev) return; cur_thread = next; + /* + * Switch to the new thread. + * You are expected to understand this.. + */ if (prev->task != next->task) vm_switch(next->task->map); + context_switch(&prev->ctx, &next->ctx); +} - context_switch(&prev->context, &next->context); +/* + * sleep_expire - sleep timer is expired: + * + * Wake up the thread which is sleeping in sched_tsleep(). + */ +static void +sleep_expire(void *arg) +{ + + sched_unsleep((thread_t)arg, SLP_TIMEOUT); } /* - * sched_tsleep - sleep the current thread until a wakeup is - * performed on the specified event. - * @timeout: time out value in msec. (0 means no timeout) + * sched_tsleep - sleep the current thread until a wakeup + * is performed on the specified event. * - * This routine returns a sleep result. If the thread is woken - * by sched_wakeup()/sched_wakeone(), it returns 0. Otherwise, - * it will return the result value which is passed by sched_unsleep(). - * We allow calling sched_sleep() with interrupt disabled. + * This routine returns a sleep result. If the thread is + * woken by sched_wakeup() or sched_wakeone(), it returns 0. + * Otherwise, it will return the result value which is passed + * by sched_unsleep(). We allow calling sched_sleep() with + * interrupt disabled. * - * sched_sleep() is also defined as a wrapper macro for sched_tsleep() - * without timeout. - * Note that all sleep requests are interruptible with this kernel. + * sched_sleep() is also defined as a wrapper macro for + * sched_tsleep() without timeout. Note that all sleep + * requests are interruptible with this kernel. */ int -sched_tsleep(struct event *evt, u_long timeout) +sched_tsleep(struct event *evt, u_long msec) { - int s; ASSERT(irq_level == 0); ASSERT(evt); sched_lock(); - interrupt_save(&s); - interrupt_disable(); + irq_lock(); - cur_thread->sleep_event = evt; + cur_thread->slpevt = evt; cur_thread->state |= TH_SLEEP; enqueue(&evt->sleepq, &cur_thread->link); - if (timeout != 0) { - timer_callout(&cur_thread->timeout, sleep_expire, - cur_thread, timeout); + if (msec != 0) { + /* + * Program timer to wake us up at timeout. + */ + timer_callout(&cur_thread->timeout, msec, &sleep_expire, + cur_thread); } wakeq_flush(); sched_switch(); /* Sleep here. Zzzz.. */ - interrupt_restore(s); + irq_unlock(); sched_unlock(); - return cur_thread->sleep_result; + return cur_thread->slpret; } /* @@ -300,15 +298,15 @@ * A thread can have sleep and suspend state simultaneously. * So, the thread does not always run even if it woke up. * - * Since this routine can be called from ISR at interrupt level, it - * should not touch any data of runq. Otherwise, we must frequently - * disable interrupts while accessing runq. Thus, this routine will - * temporary move the waking thread into wakeq, and the thread is - * moved to runq at more safer time in wakeq_flush(). + * Since this routine can be called from ISR at interrupt + * level, there may be contention for access to some data. + * Thus, this routine will temporary move the waking thread + * into wakeq, and they will be moved to runq at more safer + * time in wakeq_flush(). * - * The woken thread will be put on the tail of runq regardless - * of its policy. If woken threads have same priority, next running - * thread is selected by FIFO order. + * The woken thread will be put on the tail of runq + * regardless of its scheduling policy. If woken threads have + * same priority, next running thread is selected by FIFO order. */ void sched_wakeup(struct event *evt) @@ -316,6 +314,9 @@ queue_t q; thread_t th; + ASSERT(evt); + + sched_lock(); irq_lock(); while (!queue_empty(&evt->sleepq)) { /* @@ -323,19 +324,20 @@ */ q = dequeue(&evt->sleepq); th = queue_entry(q, struct thread, link); - th->sleep_result = 0; + th->slpret = 0; enqueue(&wakeq, q); timer_stop(&th->timeout); } irq_unlock(); + sched_unlock(); } /* - * sched_wakeone - wake up one thread sleeping for the event. + * sched_wakeone - wake up one thread sleeping on event. * - * The highest priority thread is woken among sleeping threads. - * sched_wakeone() returns the thread ID of the woken thread, or - * NULL if no threads are sleeping. + * The highest priority thread is woken among sleeping + * threads. This routine returns the thread ID of the + * woken thread, or NULL if no threads are sleeping. */ thread_t sched_wakeone(struct event *evt) @@ -343,13 +345,13 @@ queue_t head, q; thread_t top, th = NULL; + sched_lock(); irq_lock(); head = &evt->sleepq; if (!queue_empty(head)) { /* * Select the highet priority thread in - * the sleeping threads, and move it to - * the wake queue. + * the sleep queue, and wakeup it. */ q = queue_first(head); top = queue_entry(q, struct thread, link); @@ -360,32 +362,35 @@ q = queue_next(q); } queue_remove(&top->link); + top->slpret = 0; enqueue(&wakeq, &top->link); timer_stop(&top->timeout); + th = top; } irq_unlock(); + sched_unlock(); return th; } /* * sched_unsleep - cancel sleep. * - * sched_unsleep() removes the specified thread from its sleep - * queue. The specified sleep result will be passed to the sleeping - * thread as a return value of sched_tsleep(). + * sched_unsleep() removes the specified thread from its + * sleep queue. The specified sleep result will be passed + * to the sleeping thread as a return value of sched_tsleep(). */ void sched_unsleep(thread_t th, int result) { + sched_lock(); if (th->state & TH_SLEEP) { irq_lock(); queue_remove(&th->link); - th->sleep_result = result; + th->slpret = result; enqueue(&wakeq, &th->link); timer_stop(&th->timeout); irq_unlock(); - } sched_unlock(); } @@ -393,37 +398,32 @@ /* * Yield the current processor to another thread. * - * If a thread switching occurs, the current thread will be moved - * on the tail of the run queue regardless of its policy. - * Note that the current thread may run immediately again, if no - * other thread exists in the same priority queue. + * Note that the current thread may run immediately again, + * if no other thread exists in the same priority queue. */ void sched_yield(void) { - ASSERT(irq_level == 0); sched_lock(); if (!queue_empty(&runq[cur_thread->prio])) - cur_thread->need_resched = 1; + cur_thread->resched = 1; sched_unlock(); /* Switch current thread here */ } /* * Suspend the specified thread. - * The scheduler must be locked before calling this routine. - * Note that the suspend count is handled in thread_suspend(). + * Called with scheduler locked. */ void sched_suspend(thread_t th) { - ASSERT(cur_thread->lock_count > 0); if (th->state == TH_RUN) { if (th == cur_thread) - cur_thread->need_resched = 1; + cur_thread->resched = 1; else runq_remove(th); } @@ -432,12 +432,11 @@ /* * Resume the specified thread. - * The scheduler must be locked before calling this routine. + * Called with scheduler locked. */ void sched_resume(thread_t th) { - ASSERT(cur_thread->lock_count > 0); if (th->state & TH_SUSPEND) { th->state &= ~TH_SUSPEND; @@ -455,12 +454,17 @@ sched_tick(void) { - cur_thread->total_ticks++; + /* Profile running time. */ + cur_thread->time++; if (cur_thread->policy == SCHED_RR) { - if (--cur_thread->ticks_left <= 0) { - cur_thread->ticks_left = QUANTUM; - cur_thread->need_resched = 1; + if (--cur_thread->timeleft <= 0) { + /* + * The quantum is up. + * Give the thread another. + */ + cur_thread->timeleft += QUANTUM; + cur_thread->resched = 1; } } } @@ -475,8 +479,8 @@ th->state = TH_RUN | TH_SUSPEND; th->policy = SCHED_RR; th->prio = PRIO_USER; - th->base_prio = PRIO_USER; - th->ticks_left = QUANTUM; + th->baseprio = PRIO_USER; + th->timeleft = QUANTUM; } /* @@ -485,18 +489,16 @@ void sched_stop(thread_t th) { - ASSERT(irq_level == 0); - ASSERT(cur_thread->lock_count > 0); if (th == cur_thread) { /* - * If specified thread is current thread, the - * scheduling lock count is force set to 1 to - * ensure the thread switching in the next - * sched_unlock(). + * If specified thread is current thread, + * the scheduling lock count is force set + * to 1 to ensure the thread switching in + * the next sched_unlock(). */ - cur_thread->lock_count = 1; - cur_thread->need_resched = 1; + cur_thread->locks = 1; + cur_thread->resched = 1; } else { if (th->state == TH_RUN) runq_remove(th); @@ -510,46 +512,45 @@ /* * sched_lock - lock the scheduler. * - * The thread switch is disabled during scheduler locked. This - * is mainly used to synchronize the thread execution to protect - * global resources. Even when scheduler is locked, any interrupt - * handler can run. So, we have to use irq_lock() to synchronize - * a global data with ISR. + * The thread switch is disabled during scheduler locked. + * This is mainly used to synchronize the thread execution + * to protect global resources. Even when scheduler is + * locked, an interrupt handler can run. So, we have to + * use irq_lock() to synchronize a global data with ISR. * - * Since the scheduling lock can be nested any number of times, - * the caller has the responsible to unlock the same number of - * locks. + * Since the scheduling lock can be nested any number of + * times, the caller has the responsible to unlock the same + * number of locks. */ void sched_lock(void) { - cur_thread->lock_count++; + cur_thread->locks++; } /* * sched_unlock - unlock scheduler. * - * If nobody locks the scheduler anymore, it runs pending wake - * threads and check the reschedule flag. The thread switch is - * invoked if the rescheduling request exists. + * If nobody locks the scheduler anymore, it checks the + * rescheduling flag and kick scheduler if it's marked. * - * Note that this routine will be called at the end of the - * interrupt handler. + * Note that this routine will be always called at the end + * of each interrupt handler. */ void sched_unlock(void) { int s; - ASSERT(cur_thread->lock_count > 0); + ASSERT(cur_thread->locks > 0); interrupt_save(&s); interrupt_disable(); - if (cur_thread->lock_count == 1) { + if (cur_thread->locks == 1) { wakeq_flush(); - while (cur_thread->need_resched) { + while (cur_thread->resched) { /* Kick scheduler */ sched_switch(); @@ -566,7 +567,7 @@ wakeq_flush(); } } - cur_thread->lock_count--; + cur_thread->locks--; interrupt_restore(s); } @@ -580,17 +581,16 @@ /* * sched_setprio - set priority of thread. - * @base: Base priority - * @prio: Current priority * - * Thread switch may be invoked here by priority change. + * The rescheduling flag is set if the priority is + * better than the currently running thread. * Called with scheduler locked. */ void -sched_setprio(thread_t th, int base, int prio) +sched_setprio(thread_t th, int baseprio, int prio) { - th->base_prio = base; + th->baseprio = baseprio; if (th == cur_thread) { /* @@ -600,12 +600,12 @@ th->prio = prio; top_prio = runq_top(); if (prio != top_prio) - cur_thread->need_resched = 1; + cur_thread->resched = 1; } else { if (th->state == TH_RUN) { /* - * Update the thread priority and adjust the - * run queue position for new priority. + * Update the thread priority and adjust + * the run queue position for new priority. */ runq_remove(th); th->prio = prio; @@ -630,7 +630,7 @@ switch (policy) { case SCHED_RR: case SCHED_FIFO: - th->ticks_left = QUANTUM; + th->timeleft = QUANTUM; th->policy = policy; break; default: @@ -641,22 +641,53 @@ } /* - * DPC thread + * Schedule DPC callback. * - * This is a kernel thread to process the pending call back request - * within DPC queue. Each DPC routine is called with the following - * conditions. + * DPC (Deferred Procedure Call) is used to call the specific + * function at some later time with a DPC priority. It is also + * known as AST or SoftIRQ in other kernels. DPC is typically + * used by device drivers to do the low-priority jobs without + * degrading real-time performance. + * This routine can be called from ISR. + */ +void +sched_dpc(struct dpc *dpc, void (*func)(void *), void *arg) +{ + ASSERT(dpc); + ASSERT(func); + + irq_lock(); + /* + * Insert request to DPC queue. + */ + dpc->func = func; + dpc->arg = arg; + if (dpc->state != DPC_PENDING) + enqueue(&dpcq, &dpc->link); + dpc->state = DPC_PENDING; + + /* Wake DPC thread */ + sched_wakeup(&dpc_event); + + irq_unlock(); +} + +/* + * DPC thread. + * + * This is a kernel thread to process the pending call back + * request within DPC queue. Each DPC routine is called with + * the following conditions. * - Interrupt is enabled. * - Scheduler is unlocked. */ static void -dpc_thread(u_long unused) +dpc_thread(void *arg) { queue_t q; struct dpc *dpc; for (;;) { - /* Wait until next DPC request. */ sched_sleep(&dpc_event); @@ -665,6 +696,9 @@ dpc = queue_entry(q, struct dpc, link); dpc->state = DPC_FREE; + /* + * Call DPC routine. + */ interrupt_enable(); (*dpc->func)(dpc->arg); interrupt_disable(); @@ -674,50 +708,26 @@ } /* - * Qeueue DPC (Deferred Procedure Call) request - * - * Call function at some later time in a DPC priority. This is - * typically used by device drivers to do the low-priority jobs. - * This routine can be called from ISR. - */ -void -sched_dpc(struct dpc *dpc, void (*func)(void *), void *arg) -{ - ASSERT(dpc); - ASSERT(func); - - irq_lock(); - dpc->func = func; - dpc->arg = arg; - if (dpc->state != DPC_PENDING) - enqueue(&dpcq, &dpc->link); - dpc->state = DPC_PENDING; - sched_wakeup(&dpc_event); - irq_unlock(); -} - -/* * Initialize the global scheduler state. */ void sched_init(void) { + thread_t th; int i; - for (i = 0; i < NR_PRIOS; i++) + for (i = 0; i < NPRIO; i++) queue_init(&runq[i]); - queue_init(&wakeq); queue_init(&dpcq); event_init(&dpc_event, "dpc"); top_prio = PRIO_IDLE; - cur_thread->need_resched = 1; + cur_thread->resched = 1; - /* - * Create a DPC thread. - */ - if (kernel_thread(PRIO_DPC, dpc_thread, 0) == NULL) + /* Create a DPC thread. */ + th = kthread_create(dpc_thread, NULL, PRIO_DPC); + if (th == NULL) panic("sched_init"); - printk("Time slice is %d msec\n", CONFIG_TIME_SLICE); + DPRINTF(("Time slice is %d msec\n", CONFIG_TIME_SLICE)); }