[BACK]Return to sched.c CVS log [TXT][DIR] Up to [local] / prex / sys / kern

Annotation of prex/sys/kern/sched.c, Revision 1.1.1.1

1.1       nbrk        1: /*-
                      2:  * Copyright (c) 2005-2008, Kohsuke Ohtani
                      3:  * All rights reserved.
                      4:  *
                      5:  * Redistribution and use in source and binary forms, with or without
                      6:  * modification, are permitted provided that the following conditions
                      7:  * are met:
                      8:  * 1. Redistributions of source code must retain the above copyright
                      9:  *    notice, this list of conditions and the following disclaimer.
                     10:  * 2. Redistributions in binary form must reproduce the above copyright
                     11:  *    notice, this list of conditions and the following disclaimer in the
                     12:  *    documentation and/or other materials provided with the distribution.
                     13:  * 3. Neither the name of the author nor the names of any co-contributors
                     14:  *    may be used to endorse or promote products derived from this software
                     15:  *    without specific prior written permission.
                     16:  *
                     17:  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
                     18:  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
                     19:  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
                     20:  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
                     21:  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
                     22:  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
                     23:  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
                     24:  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
                     25:  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
                     26:  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
                     27:  * SUCH DAMAGE.
                     28:  */
                     29:
                     30: /*
                     31:  * sched.c - scheduler
                     32:  */
                     33:
                     34: /**
                     35:  * General design:
                     36:  *
                     37:  * The Prex scheduler is based on the algorithm known as priority
                     38:  * based multi level queue. Each thread has its own priority
                     39:  * assigned between 0 and 255. The lower number means higher
                     40:  * priority like BSD unix.  The scheduler maintains 256 level run
                     41:  * queues mapped to each priority.  The lowest priority (=255) is
                     42:  * used only for an idle thread.
                     43:  *
                     44:  * All threads have two different types of priorities:
                     45:  *
                     46:  *  Base priority:
                     47:  *      This is a static priority used for priority computation.
                     48:  *      A user mode program can change this value via system call.
                     49:  *
                     50:  *  Current priority:
                     51:  *      An actual scheduling priority. A kernel may adjust this
                     52:  *      priority dynamically if it's needed.
                     53:  *
                     54:  * Each thread has one of the following state.
                     55:  *
                     56:  *  - TH_RUN     Running or ready to run
                     57:  *  - TH_SLEEP   Sleep for some event
                     58:  *  - TH_SUSPEND Suspend count is not 0
                     59:  *  - TH_EXIT    Terminated
                     60:  *
                     61:  * The thread is always preemptive even in the kernel mode.
                     62:  * There are following 4 reasons to switch thread.
                     63:  *
                     64:  * (1) Block
                     65:  *      Thread is blocked for sleep or suspend.
                     66:  *      It is put on the tail of the run queue when it becomes
                     67:  *      runnable again.
                     68:  *
                     69:  * (2) Preemption
                     70:  *      If higher priority thread becomes runnable, the current
                     71:  *      thread is put on the _head_ of the run queue.
                     72:  *
                     73:  * (3) Quantum expiration
                     74:  *      If the thread consumes its time quantum, it is put on
                     75:  *      the tail of the run queue.
                     76:  *
                     77:  * (4) Yield
                     78:  *      If the thread releases CPU by itself, it is put on the
                     79:  *      tail of the run queue.
                     80:  *
                     81:  * There are following three types of scheduling policies.
                     82:  *
                     83:  *  - SCHED_FIFO   First in-first-out
                     84:  *  - SCHED_RR     Round robin (SCHED_FIFO + timeslice)
                     85:  *  - SCHED_OTHER  Another scheduling (not supported)
                     86:  */
                     87:
                     88: #include <kernel.h>
                     89: #include <queue.h>
                     90: #include <event.h>
                     91: #include <irq.h>
                     92: #include <thread.h>
                     93: #include <timer.h>
                     94: #include <vm.h>
                     95: #include <task.h>
                     96: #include <system.h>
                     97: #include <sched.h>
                     98:
                     99: static struct queue    runq[NPRIO];    /* run queues */
                    100: static struct queue    wakeq;          /* queue for waking threads */
                    101: static struct queue    dpcq;           /* DPC queue */
                    102: static int             top_prio;       /* highest priority in runq */
                    103: static struct event    dpc_event;      /* event for DPC */
                    104:
                    105: /*
                    106:  * Search for highest-priority runnable thread.
                    107:  */
                    108: static int
                    109: runq_top(void)
                    110: {
                    111:        int prio;
                    112:
                    113:        for (prio = 0; prio < MIN_PRIO; prio++)
                    114:                if (!queue_empty(&runq[prio]))
                    115:                        break;
                    116:        return prio;
                    117: }
                    118:
                    119: /*
                    120:  * Put a thread on the tail of the run queue.
                    121:  */
                    122: static void
                    123: runq_enqueue(thread_t th)
                    124: {
                    125:
                    126:        enqueue(&runq[th->prio], &th->link);
                    127:        if (th->prio < top_prio) {
                    128:                top_prio = th->prio;
                    129:                cur_thread->resched = 1;
                    130:        }
                    131: }
                    132:
                    133: /*
                    134:  * Insert a thread to the head of the run queue.
                    135:  * We assume this routine is called while thread switching.
                    136:  */
                    137: static void
                    138: runq_insert(thread_t th)
                    139: {
                    140:
                    141:        queue_insert(&runq[th->prio], &th->link);
                    142:        if (th->prio < top_prio)
                    143:                top_prio = th->prio;
                    144: }
                    145:
                    146: /*
                    147:  * Pick up and remove the highest-priority thread
                    148:  * from the run queue.
                    149:  */
                    150: static thread_t
                    151: runq_dequeue(void)
                    152: {
                    153:        queue_t q;
                    154:        thread_t th;
                    155:
                    156:        q = dequeue(&runq[top_prio]);
                    157:        th = queue_entry(q, struct thread, link);
                    158:        if (queue_empty(&runq[top_prio]))
                    159:                top_prio = runq_top();
                    160:        return th;
                    161: }
                    162:
                    163: /*
                    164:  * Remove the specified thread from the run queue.
                    165:  */
                    166: static void
                    167: runq_remove(thread_t th)
                    168: {
                    169:
                    170:        queue_remove(&th->link);
                    171:        top_prio = runq_top();
                    172: }
                    173:
                    174: /*
                    175:  * Process all pending woken threads.
                    176:  */
                    177: static void
                    178: wakeq_flush(void)
                    179: {
                    180:        queue_t q;
                    181:        thread_t th;
                    182:
                    183:        while (!queue_empty(&wakeq)) {
                    184:                /*
                    185:                 * Set a thread runnable.
                    186:                 */
                    187:                q = dequeue(&wakeq);
                    188:                th = queue_entry(q, struct thread, link);
                    189:                th->slpevt = NULL;
                    190:                th->state &= ~TH_SLEEP;
                    191:                if (th != cur_thread && th->state == TH_RUN)
                    192:                        runq_enqueue(th);
                    193:        }
                    194: }
                    195:
                    196: /*
                    197:  * sched_switch - this is the scheduler proper:
                    198:  *
                    199:  * If the scheduling reason is preemption, the current
                    200:  * thread will remain at the head of the run queue.  So,
                    201:  * the thread still has right to run first again among
                    202:  * the same priority threads. For other scheduling reason,
                    203:  * the current thread is inserted into the tail of the run
                    204:  * queue.
                    205:  */
                    206: static void
                    207: sched_switch(void)
                    208: {
                    209:        thread_t prev, next;
                    210:
                    211:        /*
                    212:         * Move a current thread to the run queue.
                    213:         */
                    214:        prev = cur_thread;
                    215:        if (prev->state == TH_RUN) {
                    216:                if (prev->prio > top_prio)
                    217:                        runq_insert(prev);      /* preemption */
                    218:                else
                    219:                        runq_enqueue(prev);
                    220:        }
                    221:        prev->resched = 0;
                    222:
                    223:        /*
                    224:         * Select the thread to run the CPU next.
                    225:         */
                    226:        next = runq_dequeue();
                    227:        if (next == prev)
                    228:                return;
                    229:        cur_thread = next;
                    230:
                    231:        /*
                    232:         * Switch to the new thread.
                    233:         * You are expected to understand this..
                    234:         */
                    235:        if (prev->task != next->task)
                    236:                vm_switch(next->task->map);
                    237:        context_switch(&prev->ctx, &next->ctx);
                    238: }
                    239:
                    240: /*
                    241:  * sleep_expire - sleep timer is expired:
                    242:  *
                    243:  * Wake up the thread which is sleeping in sched_tsleep().
                    244:  */
                    245: static void
                    246: sleep_expire(void *arg)
                    247: {
                    248:
                    249:        sched_unsleep((thread_t)arg, SLP_TIMEOUT);
                    250: }
                    251:
                    252: /*
                    253:  * sched_tsleep - sleep the current thread until a wakeup
                    254:  * is performed on the specified event.
                    255:  *
                    256:  * This routine returns a sleep result. If the thread is
                    257:  * woken by sched_wakeup() or sched_wakeone(), it returns 0.
                    258:  * Otherwise, it will return the result value which is passed
                    259:  * by sched_unsleep().  We allow calling sched_sleep() with
                    260:  * interrupt disabled.
                    261:  *
                    262:  * sched_sleep() is also defined as a wrapper macro for
                    263:  * sched_tsleep() without timeout. Note that all sleep
                    264:  * requests are interruptible with this kernel.
                    265:  */
                    266: int
                    267: sched_tsleep(struct event *evt, u_long msec)
                    268: {
                    269:
                    270:        ASSERT(irq_level == 0);
                    271:        ASSERT(evt);
                    272:
                    273:        sched_lock();
                    274:        irq_lock();
                    275:
                    276:        cur_thread->slpevt = evt;
                    277:        cur_thread->state |= TH_SLEEP;
                    278:        enqueue(&evt->sleepq, &cur_thread->link);
                    279:
                    280:        if (msec != 0) {
                    281:                /*
                    282:                 * Program timer to wake us up at timeout.
                    283:                 */
                    284:                timer_callout(&cur_thread->timeout, msec, &sleep_expire,
                    285:                              cur_thread);
                    286:        }
                    287:        wakeq_flush();
                    288:        sched_switch(); /* Sleep here. Zzzz.. */
                    289:
                    290:        irq_unlock();
                    291:        sched_unlock();
                    292:        return cur_thread->slpret;
                    293: }
                    294:
                    295: /*
                    296:  * sched_wakeup - wake up all threads sleeping on event.
                    297:  *
                    298:  * A thread can have sleep and suspend state simultaneously.
                    299:  * So, the thread does not always run even if it woke up.
                    300:  *
                    301:  * Since this routine can be called from ISR at interrupt
                    302:  * level, there may be contention for access to some data.
                    303:  * Thus, this routine will temporary move the waking thread
                    304:  * into wakeq, and they will be moved to runq at more safer
                    305:  * time in wakeq_flush().
                    306:  *
                    307:  * The woken thread will be put on the tail of runq
                    308:  * regardless of its scheduling policy. If woken threads have
                    309:  * same priority, next running thread is selected by FIFO order.
                    310:  */
                    311: void
                    312: sched_wakeup(struct event *evt)
                    313: {
                    314:        queue_t q;
                    315:        thread_t th;
                    316:
                    317:        ASSERT(evt);
                    318:
                    319:        sched_lock();
                    320:        irq_lock();
                    321:        while (!queue_empty(&evt->sleepq)) {
                    322:                /*
                    323:                 * Move a sleeping thread to the wake queue.
                    324:                 */
                    325:                q = dequeue(&evt->sleepq);
                    326:                th = queue_entry(q, struct thread, link);
                    327:                th->slpret = 0;
                    328:                enqueue(&wakeq, q);
                    329:                timer_stop(&th->timeout);
                    330:        }
                    331:        irq_unlock();
                    332:        sched_unlock();
                    333: }
                    334:
                    335: /*
                    336:  * sched_wakeone - wake up one thread sleeping on event.
                    337:  *
                    338:  * The highest priority thread is woken among sleeping
                    339:  * threads. This routine returns the thread ID of the
                    340:  * woken thread, or NULL if no threads are sleeping.
                    341:  */
                    342: thread_t
                    343: sched_wakeone(struct event *evt)
                    344: {
                    345:        queue_t head, q;
                    346:        thread_t top, th = NULL;
                    347:
                    348:        sched_lock();
                    349:        irq_lock();
                    350:        head = &evt->sleepq;
                    351:        if (!queue_empty(head)) {
                    352:                /*
                    353:                 * Select the highet priority thread in
                    354:                 * the sleep queue, and wakeup it.
                    355:                 */
                    356:                q = queue_first(head);
                    357:                top = queue_entry(q, struct thread, link);
                    358:                while (!queue_end(head, q)) {
                    359:                        th = queue_entry(q, struct thread, link);
                    360:                        if (th->prio < top->prio)
                    361:                                top = th;
                    362:                        q = queue_next(q);
                    363:                }
                    364:                queue_remove(&top->link);
                    365:                top->slpret = 0;
                    366:                enqueue(&wakeq, &top->link);
                    367:                timer_stop(&top->timeout);
                    368:                th = top;
                    369:        }
                    370:        irq_unlock();
                    371:        sched_unlock();
                    372:        return th;
                    373: }
                    374:
                    375: /*
                    376:  * sched_unsleep - cancel sleep.
                    377:  *
                    378:  * sched_unsleep() removes the specified thread from its
                    379:  * sleep queue. The specified sleep result will be passed
                    380:  * to the sleeping thread as a return value of sched_tsleep().
                    381:  */
                    382: void
                    383: sched_unsleep(thread_t th, int result)
                    384: {
                    385:
                    386:        sched_lock();
                    387:        if (th->state & TH_SLEEP) {
                    388:                irq_lock();
                    389:                queue_remove(&th->link);
                    390:                th->slpret = result;
                    391:                enqueue(&wakeq, &th->link);
                    392:                timer_stop(&th->timeout);
                    393:                irq_unlock();
                    394:        }
                    395:        sched_unlock();
                    396: }
                    397:
                    398: /*
                    399:  * Yield the current processor to another thread.
                    400:  *
                    401:  * Note that the current thread may run immediately again,
                    402:  * if no other thread exists in the same priority queue.
                    403:  */
                    404: void
                    405: sched_yield(void)
                    406: {
                    407:
                    408:        sched_lock();
                    409:
                    410:        if (!queue_empty(&runq[cur_thread->prio]))
                    411:                cur_thread->resched = 1;
                    412:
                    413:        sched_unlock(); /* Switch current thread here */
                    414: }
                    415:
                    416: /*
                    417:  * Suspend the specified thread.
                    418:  * Called with scheduler locked.
                    419:  */
                    420: void
                    421: sched_suspend(thread_t th)
                    422: {
                    423:
                    424:        if (th->state == TH_RUN) {
                    425:                if (th == cur_thread)
                    426:                        cur_thread->resched = 1;
                    427:                else
                    428:                        runq_remove(th);
                    429:        }
                    430:        th->state |= TH_SUSPEND;
                    431: }
                    432:
                    433: /*
                    434:  * Resume the specified thread.
                    435:  * Called with scheduler locked.
                    436:  */
                    437: void
                    438: sched_resume(thread_t th)
                    439: {
                    440:
                    441:        if (th->state & TH_SUSPEND) {
                    442:                th->state &= ~TH_SUSPEND;
                    443:                if (th->state == TH_RUN)
                    444:                        runq_enqueue(th);
                    445:        }
                    446: }
                    447:
                    448: /*
                    449:  * sched_tick() is called from timer_tick() once every tick.
                    450:  * Check quantum expiration, and mark a rescheduling flag.
                    451:  * We don't need locking in here.
                    452:  */
                    453: void
                    454: sched_tick(void)
                    455: {
                    456:
                    457:        /* Profile running time. */
                    458:        cur_thread->time++;
                    459:
                    460:        if (cur_thread->policy == SCHED_RR) {
                    461:                if (--cur_thread->timeleft <= 0) {
                    462:                        /*
                    463:                         * The quantum is up.
                    464:                         * Give the thread another.
                    465:                         */
                    466:                        cur_thread->timeleft += QUANTUM;
                    467:                        cur_thread->resched = 1;
                    468:                }
                    469:        }
                    470: }
                    471:
                    472: /*
                    473:  * Set up stuff for thread scheduling.
                    474:  */
                    475: void
                    476: sched_start(thread_t th)
                    477: {
                    478:
                    479:        th->state = TH_RUN | TH_SUSPEND;
                    480:        th->policy = SCHED_RR;
                    481:        th->prio = PRIO_USER;
                    482:        th->baseprio = PRIO_USER;
                    483:        th->timeleft = QUANTUM;
                    484: }
                    485:
                    486: /*
                    487:  * Stop thread scheduling.
                    488:  */
                    489: void
                    490: sched_stop(thread_t th)
                    491: {
                    492:
                    493:        if (th == cur_thread) {
                    494:                /*
                    495:                 * If specified thread is current thread,
                    496:                 * the scheduling lock count is force set
                    497:                 * to 1 to ensure the thread switching in
                    498:                 * the next sched_unlock().
                    499:                 */
                    500:                cur_thread->locks = 1;
                    501:                cur_thread->resched = 1;
                    502:        } else {
                    503:                if (th->state == TH_RUN)
                    504:                        runq_remove(th);
                    505:                else if (th->state & TH_SLEEP)
                    506:                        queue_remove(&th->link);
                    507:        }
                    508:        timer_stop(&th->timeout);
                    509:        th->state = TH_EXIT;
                    510: }
                    511:
                    512: /*
                    513:  * sched_lock - lock the scheduler.
                    514:  *
                    515:  * The thread switch is disabled during scheduler locked.
                    516:  * This is mainly used to synchronize the thread execution
                    517:  * to protect global resources. Even when scheduler is
                    518:  * locked, an interrupt handler can run. So, we have to
                    519:  * use irq_lock() to synchronize a global data with ISR.
                    520:  *
                    521:  * Since the scheduling lock can be nested any number of
                    522:  * times, the caller has the responsible to unlock the same
                    523:  * number of locks.
                    524:  */
                    525: void
                    526: sched_lock(void)
                    527: {
                    528:
                    529:        cur_thread->locks++;
                    530: }
                    531:
                    532: /*
                    533:  * sched_unlock - unlock scheduler.
                    534:  *
                    535:  * If nobody locks the scheduler anymore, it checks the
                    536:  * rescheduling flag and kick scheduler if it's marked.
                    537:  *
                    538:  * Note that this routine will be always called at the end
                    539:  * of each interrupt handler.
                    540:  */
                    541: void
                    542: sched_unlock(void)
                    543: {
                    544:        int s;
                    545:
                    546:        ASSERT(cur_thread->locks > 0);
                    547:
                    548:        interrupt_save(&s);
                    549:        interrupt_disable();
                    550:
                    551:        if (cur_thread->locks == 1) {
                    552:                wakeq_flush();
                    553:                while (cur_thread->resched) {
                    554:
                    555:                        /* Kick scheduler */
                    556:                        sched_switch();
                    557:
                    558:                        /*
                    559:                         * Now we run pending interrupts which fired
                    560:                         * during the thread switch. So, we can catch
                    561:                         * the rescheduling request from such ISRs.
                    562:                         * Otherwise, the reschedule may be deferred
                    563:                         * until _next_ sched_unlock() call.
                    564:                         */
                    565:                        interrupt_restore(s);
                    566:                        interrupt_disable();
                    567:                        wakeq_flush();
                    568:                }
                    569:        }
                    570:        cur_thread->locks--;
                    571:
                    572:        interrupt_restore(s);
                    573: }
                    574:
                    575: int
                    576: sched_getprio(thread_t th)
                    577: {
                    578:
                    579:        return th->prio;
                    580: }
                    581:
                    582: /*
                    583:  * sched_setprio - set priority of thread.
                    584:  *
                    585:  * The rescheduling flag is set if the priority is
                    586:  * better than the currently running thread.
                    587:  * Called with scheduler locked.
                    588:  */
                    589: void
                    590: sched_setprio(thread_t th, int baseprio, int prio)
                    591: {
                    592:
                    593:        th->baseprio = baseprio;
                    594:
                    595:        if (th == cur_thread) {
                    596:                /*
                    597:                 * If we change the current thread's priority,
                    598:                 * rescheduling may be happened.
                    599:                 */
                    600:                th->prio = prio;
                    601:                top_prio = runq_top();
                    602:                if (prio != top_prio)
                    603:                        cur_thread->resched = 1;
                    604:        } else {
                    605:                if (th->state == TH_RUN) {
                    606:                        /*
                    607:                         * Update the thread priority and adjust
                    608:                         * the run queue position for new priority.
                    609:                         */
                    610:                        runq_remove(th);
                    611:                        th->prio = prio;
                    612:                        runq_enqueue(th);
                    613:                } else
                    614:                        th->prio = prio;
                    615:        }
                    616: }
                    617:
                    618: int
                    619: sched_getpolicy(thread_t th)
                    620: {
                    621:
                    622:        return th->policy;
                    623: }
                    624:
                    625: int
                    626: sched_setpolicy(thread_t th, int policy)
                    627: {
                    628:        int err = 0;
                    629:
                    630:        switch (policy) {
                    631:        case SCHED_RR:
                    632:        case SCHED_FIFO:
                    633:                th->timeleft = QUANTUM;
                    634:                th->policy = policy;
                    635:                break;
                    636:        default:
                    637:                err = -1;
                    638:                break;
                    639:        }
                    640:        return err;
                    641: }
                    642:
                    643: /*
                    644:  * Schedule DPC callback.
                    645:  *
                    646:  * DPC (Deferred Procedure Call) is used to call the specific
                    647:  * function at some later time with a DPC priority. It is also
                    648:  * known as AST or SoftIRQ in other kernels.  DPC is typically
                    649:  * used by device drivers to do the low-priority jobs without
                    650:  * degrading real-time performance.
                    651:  * This routine can be called from ISR.
                    652:  */
                    653: void
                    654: sched_dpc(struct dpc *dpc, void (*func)(void *), void *arg)
                    655: {
                    656:        ASSERT(dpc);
                    657:        ASSERT(func);
                    658:
                    659:        irq_lock();
                    660:        /*
                    661:         * Insert request to DPC queue.
                    662:         */
                    663:        dpc->func = func;
                    664:        dpc->arg = arg;
                    665:        if (dpc->state != DPC_PENDING)
                    666:                enqueue(&dpcq, &dpc->link);
                    667:        dpc->state = DPC_PENDING;
                    668:
                    669:        /* Wake DPC thread */
                    670:        sched_wakeup(&dpc_event);
                    671:
                    672:        irq_unlock();
                    673: }
                    674:
                    675: /*
                    676:  * DPC thread.
                    677:  *
                    678:  * This is a kernel thread to process the pending call back
                    679:  * request within DPC queue. Each DPC routine is called with
                    680:  * the following conditions.
                    681:  *  - Interrupt is enabled.
                    682:  *  - Scheduler is unlocked.
                    683:  */
                    684: static void
                    685: dpc_thread(void *arg)
                    686: {
                    687:        queue_t q;
                    688:        struct dpc *dpc;
                    689:
                    690:        for (;;) {
                    691:                /* Wait until next DPC request. */
                    692:                sched_sleep(&dpc_event);
                    693:
                    694:                while (!queue_empty(&dpcq)) {
                    695:                        q = dequeue(&dpcq);
                    696:                        dpc = queue_entry(q, struct dpc, link);
                    697:                        dpc->state = DPC_FREE;
                    698:
                    699:                        /*
                    700:                         * Call DPC routine.
                    701:                         */
                    702:                        interrupt_enable();
                    703:                        (*dpc->func)(dpc->arg);
                    704:                        interrupt_disable();
                    705:                }
                    706:        }
                    707:        /* NOTREACHED */
                    708: }
                    709:
                    710: /*
                    711:  * Initialize the global scheduler state.
                    712:  */
                    713: void
                    714: sched_init(void)
                    715: {
                    716:        thread_t th;
                    717:        int i;
                    718:
                    719:        for (i = 0; i < NPRIO; i++)
                    720:                queue_init(&runq[i]);
                    721:        queue_init(&wakeq);
                    722:        queue_init(&dpcq);
                    723:        event_init(&dpc_event, "dpc");
                    724:        top_prio = PRIO_IDLE;
                    725:        cur_thread->resched = 1;
                    726:
                    727:        /* Create a DPC thread. */
                    728:        th = kthread_create(dpc_thread, NULL, PRIO_DPC);
                    729:        if (th == NULL)
                    730:                panic("sched_init");
                    731:
                    732:        DPRINTF(("Time slice is %d msec\n", CONFIG_TIME_SLICE));
                    733: }

CVSweb