[BACK]Return to sched.c CVS log [TXT][DIR] Up to [local] / prex-old / sys / kern

Diff for /prex-old/sys/kern/sched.c between version 1.1.1.1 and 1.1.1.1.2.1

version 1.1.1.1, 2008/06/03 10:38:46 version 1.1.1.1.2.1, 2008/08/13 17:12:32
Line 1 
Line 1 
 /*-  /*-
  * Copyright (c) 2005-2007, Kohsuke Ohtani   * Copyright (c) 2005-2008, Kohsuke Ohtani
  * All rights reserved.   * All rights reserved.
  *   *
  * Redistribution and use in source and binary forms, with or without   * Redistribution and use in source and binary forms, with or without
Line 34 
Line 34 
 /**  /**
  * General design:   * General design:
  *   *
  * The Prex scheduler is based on the algorithm known as priority based   * The Prex scheduler is based on the algorithm known as priority
  * multi level queue. Each thread is assigned the priority between   * based multi level queue. Each thread has its own priority
  * 0 and 255. The lower number means higher priority like BSD unix.   * assigned between 0 and 255. The lower number means higher
  * The scheduler maintains 256 level run queues mapped to each priority.   * priority like BSD unix.  The scheduler maintains 256 level run
  * The lowest priority (=255) is used only by an idle thread.   * queues mapped to each priority.  The lowest priority (=255) is
    * used only for an idle thread.
  *   *
  * All threads have two different types of priorities:   * All threads have two different types of priorities:
  *   *
  *  - Base priority   *  Base priority:
  *      This is a static priority used for priority computation. A user   *      This is a static priority used for priority computation.
  *      mode program can change this value via system call.   *      A user mode program can change this value via system call.
  *   *
  *  - Current priority   *  Current priority:
  *      An actual scheduling priority. A kernel may adjust this priority   *      An actual scheduling priority. A kernel may adjust this
  *      dynamically if it's needed.   *      priority dynamically if it's needed.
  *   *
  * Each thread has one of the following state.   * Each thread has one of the following state.
  *   *
Line 60 
Line 61 
  * The thread is always preemptive even in the kernel mode.   * The thread is always preemptive even in the kernel mode.
  * There are following 4 reasons to switch thread.   * There are following 4 reasons to switch thread.
  *   *
  *  1) Block   * (1) Block
  *      Thread is blocked for sleep or suspend.   *      Thread is blocked for sleep or suspend.
  *      It is put on the tail of the run queue when it becomes   *      It is put on the tail of the run queue when it becomes
  *      runnable again.   *      runnable again.
  *   *
  *  2) Preemption   * (2) Preemption
  *      If higher priority thread becomes runnable, the current   *      If higher priority thread becomes runnable, the current
  *      thread is put on the the _head_ of the run queue.   *      thread is put on the _head_ of the run queue.
  *   *
  *  3) Quantum expiration   * (3) Quantum expiration
  *      If the thread consumes its time quantum, it is put on   *      If the thread consumes its time quantum, it is put on
  *      the tail of the run queue.   *      the tail of the run queue.
  *   *
  *  4) Yield   * (4) Yield
  *      If the thread releases CPU by itself, it is put on   *      If the thread releases CPU by itself, it is put on the
  *      the tail of the run queue.   *      tail of the run queue.
  *   *
  * There are following three types of scheduling policies.   * There are following three types of scheduling policies.
  *   *
Line 95 
Line 96 
 #include <system.h>  #include <system.h>
 #include <sched.h>  #include <sched.h>
   
 static struct queue runq[NR_PRIOS];     /* run queues */  static struct queue     runq[NPRIO];    /* run queues */
 static struct queue wakeq;              /* queue for waking threads */  static struct queue     wakeq;          /* queue for waking threads */
 static struct queue dpcq;               /* queue for DPC threads */  static struct queue     dpcq;           /* DPC queue */
 static int top_prio;                    /* highest priority in runq */  static int              top_prio;       /* highest priority in runq */
   static struct event     dpc_event;      /* event for DPC */
   
 static struct event dpc_event;          /* event for dpc */  
   
 /*  /*
  * Search for highest-priority runnable thread.   * Search for highest-priority runnable thread.
  */   */
Line 118 
Line 118 
   
 /*  /*
  * Put a thread on the tail of the run queue.   * Put a thread on the tail of the run queue.
  * The rescheduling flag is set if the priority is better than  
  * the currently running process.  
  */   */
 static void  static void
 runq_enqueue(thread_t th)  runq_enqueue(thread_t th)
Line 128 
Line 126 
         enqueue(&runq[th->prio], &th->link);          enqueue(&runq[th->prio], &th->link);
         if (th->prio < top_prio) {          if (th->prio < top_prio) {
                 top_prio = th->prio;                  top_prio = th->prio;
                 cur_thread->need_resched = 1;                  cur_thread->resched = 1;
         }          }
 }  }
   
 /*  /*
  * Insert a thread to the head of the run queue.   * Insert a thread to the head of the run queue.
  * We don't change rescheduling flag here because this is called   * We assume this routine is called while thread switching.
  * while thread switching.  
  */   */
 static void  static void
 runq_insert(thread_t th)  runq_insert(thread_t th)
Line 147 
Line 144 
 }  }
   
 /*  /*
  * Pick up and remove the highest-priority thread from the run   * Pick up and remove the highest-priority thread
  * queue. At least, an idle thread will be returned because it   * from the run queue.
  * always residents in the lowest priority queue.  
  */   */
 static thread_t  static thread_t
 runq_dequeue(void)  runq_dequeue(void)
Line 177 
Line 173 
   
 /*  /*
  * Process all pending woken threads.   * Process all pending woken threads.
  * Rescheduling flag may be set.  
  * Note: The thread may be still in a suspend state after wakeup.  
  */   */
 static void  static void
 wakeq_flush(void)  wakeq_flush(void)
Line 192 
Line 186 
                  */                   */
                 q = dequeue(&wakeq);                  q = dequeue(&wakeq);
                 th = queue_entry(q, struct thread, link);                  th = queue_entry(q, struct thread, link);
                 th->sleep_event = 0;                  th->slpevt = NULL;
                 th->state &= ~TH_SLEEP;                  th->state &= ~TH_SLEEP;
   
                 if (th != cur_thread && th->state == TH_RUN)                  if (th != cur_thread && th->state == TH_RUN)
                         runq_enqueue(th);                          runq_enqueue(th);
         }          }
 }  }
   
 /*  /*
  * sleep_expire - sleep timer is expired:   * sched_switch - this is the scheduler proper:
  * @arg: thread to unsleep.  
  *   *
  * Wake up the passed thread that is sleeping in sched_tsleep().   * If the scheduling reason is preemption, the current
    * thread will remain at the head of the run queue.  So,
    * the thread still has right to run first again among
    * the same priority threads. For other scheduling reason,
    * the current thread is inserted into the tail of the run
    * queue.
  */   */
 static void  static void
 sleep_expire(void *arg)  
 {  
   
         sched_unsleep((thread_t)arg, SLP_TIMEOUT);  
 }  
   
 /*  
  * sched_switch - This routine is called to reschedule the CPU.  
  *  
  * If the scheduling reason is preemption, the current thread  
  * will remain at the head of the run queue. So, the thread  
  * still has right to run first again among the same priority  
  * threads. For other scheduling reason, the current thread is  
  * inserted into the tail of the run queue.  
  */  
 static void  
 sched_switch(void)  sched_switch(void)
 {  {
         thread_t prev, next;          thread_t prev, next;
   
         ASSERT(irq_level == 0);          /*
            * Move a current thread to the run queue.
            */
         prev = cur_thread;          prev = cur_thread;
         if (prev->state == TH_RUN) {          if (prev->state == TH_RUN) {
                 if (prev->prio > top_prio)                  if (prev->prio > top_prio)
Line 236 
Line 218 
                 else                  else
                         runq_enqueue(prev);                          runq_enqueue(prev);
         }          }
         prev->need_resched = 0;          prev->resched = 0;
   
         /*          /*
          * This is the scheduler proper.           * Select the thread to run the CPU next.
          */           */
         next = runq_dequeue();          next = runq_dequeue();
         if (next == prev)          if (next == prev)
                 return;                  return;
         cur_thread = next;          cur_thread = next;
   
           /*
            * Switch to the new thread.
            * You are expected to understand this..
            */
         if (prev->task != next->task)          if (prev->task != next->task)
                 vm_switch(next->task->map);                  vm_switch(next->task->map);
           context_switch(&prev->ctx, &next->ctx);
   }
   
         context_switch(&prev->context, &next->context);  /*
    * sleep_expire - sleep timer is expired:
    *
    * Wake up the thread which is sleeping in sched_tsleep().
    */
   static void
   sleep_expire(void *arg)
   {
   
           sched_unsleep((thread_t)arg, SLP_TIMEOUT);
 }  }
   
 /*  /*
  * sched_tsleep - sleep the current thread until a wakeup is   * sched_tsleep - sleep the current thread until a wakeup
  * performed on the specified event.   * is performed on the specified event.
  * @timeout: time out value in msec. (0 means no timeout)  
  *   *
  * This routine returns a sleep result. If the thread is woken   * This routine returns a sleep result. If the thread is
  * by sched_wakeup()/sched_wakeone(), it returns 0. Otherwise,   * woken by sched_wakeup() or sched_wakeone(), it returns 0.
  * it will return the result value which is passed by sched_unsleep().   * Otherwise, it will return the result value which is passed
  * We allow calling sched_sleep() with interrupt disabled.   * by sched_unsleep().  We allow calling sched_sleep() with
    * interrupt disabled.
  *   *
  * sched_sleep() is also defined as a wrapper macro for sched_tsleep()   * sched_sleep() is also defined as a wrapper macro for
  * without timeout.   * sched_tsleep() without timeout. Note that all sleep
  * Note that all sleep requests are interruptible with this kernel.   * requests are interruptible with this kernel.
  */   */
 int  int
 sched_tsleep(struct event *evt, u_long timeout)  sched_tsleep(struct event *evt, u_long msec)
 {  {
         int s;  
   
         ASSERT(irq_level == 0);          ASSERT(irq_level == 0);
         ASSERT(evt);          ASSERT(evt);
   
         sched_lock();          sched_lock();
         interrupt_save(&s);          irq_lock();
         interrupt_disable();  
   
         cur_thread->sleep_event = evt;          cur_thread->slpevt = evt;
         cur_thread->state |= TH_SLEEP;          cur_thread->state |= TH_SLEEP;
         enqueue(&evt->sleepq, &cur_thread->link);          enqueue(&evt->sleepq, &cur_thread->link);
   
         if (timeout != 0) {          if (msec != 0) {
                 timer_callout(&cur_thread->timeout, sleep_expire,                  /*
                               cur_thread, timeout);                   * Program timer to wake us up at timeout.
                    */
                   timer_callout(&cur_thread->timeout, msec, &sleep_expire,
                                 cur_thread);
         }          }
         wakeq_flush();          wakeq_flush();
         sched_switch(); /* Sleep here. Zzzz.. */          sched_switch(); /* Sleep here. Zzzz.. */
   
         interrupt_restore(s);          irq_unlock();
         sched_unlock();          sched_unlock();
         return cur_thread->sleep_result;          return cur_thread->slpret;
 }  }
   
 /*  /*
Line 300 
Line 298 
  * A thread can have sleep and suspend state simultaneously.   * A thread can have sleep and suspend state simultaneously.
  * So, the thread does not always run even if it woke up.   * So, the thread does not always run even if it woke up.
  *   *
  * Since this routine can be called from ISR at interrupt level, it   * Since this routine can be called from ISR at interrupt
  * should not touch any data of runq. Otherwise, we must frequently   * level, there may be contention for access to some data.
  * disable interrupts while accessing runq. Thus, this routine will   * Thus, this routine will temporary move the waking thread
  * temporary move the waking thread into wakeq, and the thread is   * into wakeq, and they will be moved to runq at more safer
  * moved to runq at more safer time in wakeq_flush().   * time in wakeq_flush().
  *   *
  * The woken thread will be put on the tail of runq regardless   * The woken thread will be put on the tail of runq
  * of its policy. If woken threads have same priority, next running   * regardless of its scheduling policy. If woken threads have
  * thread is selected by FIFO order.   * same priority, next running thread is selected by FIFO order.
  */   */
 void  void
 sched_wakeup(struct event *evt)  sched_wakeup(struct event *evt)
Line 316 
Line 314 
         queue_t q;          queue_t q;
         thread_t th;          thread_t th;
   
           ASSERT(evt);
   
           sched_lock();
         irq_lock();          irq_lock();
         while (!queue_empty(&evt->sleepq)) {          while (!queue_empty(&evt->sleepq)) {
                 /*                  /*
Line 323 
Line 324 
                  */                   */
                 q = dequeue(&evt->sleepq);                  q = dequeue(&evt->sleepq);
                 th = queue_entry(q, struct thread, link);                  th = queue_entry(q, struct thread, link);
                 th->sleep_result = 0;                  th->slpret = 0;
                 enqueue(&wakeq, q);                  enqueue(&wakeq, q);
                 timer_stop(&th->timeout);                  timer_stop(&th->timeout);
         }          }
         irq_unlock();          irq_unlock();
           sched_unlock();
 }  }
   
 /*  /*
  * sched_wakeone - wake up one thread sleeping for the event.   * sched_wakeone - wake up one thread sleeping on event.
  *   *
  * The highest priority thread is woken among sleeping threads.   * The highest priority thread is woken among sleeping
  * sched_wakeone() returns the thread ID of the woken thread, or   * threads. This routine returns the thread ID of the
  * NULL if no threads are sleeping.   * woken thread, or NULL if no threads are sleeping.
  */   */
 thread_t  thread_t
 sched_wakeone(struct event *evt)  sched_wakeone(struct event *evt)
Line 343 
Line 345 
         queue_t head, q;          queue_t head, q;
         thread_t top, th = NULL;          thread_t top, th = NULL;
   
           sched_lock();
         irq_lock();          irq_lock();
         head = &evt->sleepq;          head = &evt->sleepq;
         if (!queue_empty(head)) {          if (!queue_empty(head)) {
                 /*                  /*
                  * Select the highet priority thread in                   * Select the highet priority thread in
                  * the sleeping threads, and move it to                   * the sleep queue, and wakeup it.
                  * the wake queue.  
                  */                   */
                 q = queue_first(head);                  q = queue_first(head);
                 top = queue_entry(q, struct thread, link);                  top = queue_entry(q, struct thread, link);
Line 360 
Line 362 
                         q = queue_next(q);                          q = queue_next(q);
                 }                  }
                 queue_remove(&top->link);                  queue_remove(&top->link);
                   top->slpret = 0;
                 enqueue(&wakeq, &top->link);                  enqueue(&wakeq, &top->link);
                 timer_stop(&top->timeout);                  timer_stop(&top->timeout);
                   th = top;
         }          }
         irq_unlock();          irq_unlock();
           sched_unlock();
         return th;          return th;
 }  }
   
 /*  /*
  * sched_unsleep - cancel sleep.   * sched_unsleep - cancel sleep.
  *   *
  * sched_unsleep() removes the specified thread from its sleep   * sched_unsleep() removes the specified thread from its
  * queue. The specified sleep result will be passed to the sleeping   * sleep queue. The specified sleep result will be passed
  * thread as a return value of sched_tsleep().   * to the sleeping thread as a return value of sched_tsleep().
  */   */
 void  void
 sched_unsleep(thread_t th, int result)  sched_unsleep(thread_t th, int result)
 {  {
   
         sched_lock();          sched_lock();
         if (th->state & TH_SLEEP) {          if (th->state & TH_SLEEP) {
                 irq_lock();                  irq_lock();
                 queue_remove(&th->link);                  queue_remove(&th->link);
                 th->sleep_result = result;                  th->slpret = result;
                 enqueue(&wakeq, &th->link);                  enqueue(&wakeq, &th->link);
                 timer_stop(&th->timeout);                  timer_stop(&th->timeout);
                 irq_unlock();                  irq_unlock();
   
         }          }
         sched_unlock();          sched_unlock();
 }  }
Line 393 
Line 398 
 /*  /*
  * Yield the current processor to another thread.   * Yield the current processor to another thread.
  *   *
  * If a thread switching occurs, the current thread will be moved   * Note that the current thread may run immediately again,
  * on the tail of the run queue regardless of its policy.   * if no other thread exists in the same priority queue.
  * Note that the current thread may run immediately again, if no  
  * other thread exists in the same priority queue.  
  */   */
 void  void
 sched_yield(void)  sched_yield(void)
 {  {
         ASSERT(irq_level == 0);  
   
         sched_lock();          sched_lock();
   
         if (!queue_empty(&runq[cur_thread->prio]))          if (!queue_empty(&runq[cur_thread->prio]))
                 cur_thread->need_resched = 1;                  cur_thread->resched = 1;
   
         sched_unlock(); /* Switch current thread here */          sched_unlock(); /* Switch current thread here */
 }  }
   
 /*  /*
  * Suspend the specified thread.   * Suspend the specified thread.
  * The scheduler must be locked before calling this routine.   * Called with scheduler locked.
  * Note that the suspend count is handled in thread_suspend().  
  */   */
 void  void
 sched_suspend(thread_t th)  sched_suspend(thread_t th)
 {  {
         ASSERT(cur_thread->lock_count > 0);  
   
         if (th->state == TH_RUN) {          if (th->state == TH_RUN) {
                 if (th == cur_thread)                  if (th == cur_thread)
                         cur_thread->need_resched = 1;                          cur_thread->resched = 1;
                 else                  else
                         runq_remove(th);                          runq_remove(th);
         }          }
Line 432 
Line 432 
   
 /*  /*
  * Resume the specified thread.   * Resume the specified thread.
  * The scheduler must be locked before calling this routine.   * Called with scheduler locked.
  */   */
 void  void
 sched_resume(thread_t th)  sched_resume(thread_t th)
 {  {
         ASSERT(cur_thread->lock_count > 0);  
   
         if (th->state & TH_SUSPEND) {          if (th->state & TH_SUSPEND) {
                 th->state &= ~TH_SUSPEND;                  th->state &= ~TH_SUSPEND;
Line 455 
Line 454 
 sched_tick(void)  sched_tick(void)
 {  {
   
         cur_thread->total_ticks++;          /* Profile running time. */
           cur_thread->time++;
   
         if (cur_thread->policy == SCHED_RR) {          if (cur_thread->policy == SCHED_RR) {
                 if (--cur_thread->ticks_left <= 0) {                  if (--cur_thread->timeleft <= 0) {
                         cur_thread->ticks_left = QUANTUM;                          /*
                         cur_thread->need_resched = 1;                           * The quantum is up.
                            * Give the thread another.
                            */
                           cur_thread->timeleft += QUANTUM;
                           cur_thread->resched = 1;
                 }                  }
         }          }
 }  }
Line 475 
Line 479 
         th->state = TH_RUN | TH_SUSPEND;          th->state = TH_RUN | TH_SUSPEND;
         th->policy = SCHED_RR;          th->policy = SCHED_RR;
         th->prio = PRIO_USER;          th->prio = PRIO_USER;
         th->base_prio = PRIO_USER;          th->baseprio = PRIO_USER;
         th->ticks_left = QUANTUM;          th->timeleft = QUANTUM;
 }  }
   
 /*  /*
Line 485 
Line 489 
 void  void
 sched_stop(thread_t th)  sched_stop(thread_t th)
 {  {
         ASSERT(irq_level == 0);  
         ASSERT(cur_thread->lock_count > 0);  
   
         if (th == cur_thread) {          if (th == cur_thread) {
                 /*                  /*
                  * If specified thread is current thread, the                   * If specified thread is current thread,
                  * scheduling lock count is force set to 1 to                   * the scheduling lock count is force set
                  * ensure the thread switching in the next                   * to 1 to ensure the thread switching in
                  * sched_unlock().                   * the next sched_unlock().
                  */                   */
                 cur_thread->lock_count = 1;                  cur_thread->locks = 1;
                 cur_thread->need_resched = 1;                  cur_thread->resched = 1;
         } else {          } else {
                 if (th->state == TH_RUN)                  if (th->state == TH_RUN)
                         runq_remove(th);                          runq_remove(th);
Line 510 
Line 512 
 /*  /*
  * sched_lock - lock the scheduler.   * sched_lock - lock the scheduler.
  *   *
  * The thread switch is disabled during scheduler locked. This   * The thread switch is disabled during scheduler locked.
  * is mainly used to synchronize the thread execution to protect   * This is mainly used to synchronize the thread execution
  * global resources. Even when scheduler is locked, any interrupt   * to protect global resources. Even when scheduler is
  * handler can run. So, we have to use irq_lock() to synchronize   * locked, an interrupt handler can run. So, we have to
  * a global data with ISR.   * use irq_lock() to synchronize a global data with ISR.
  *   *
  * Since the scheduling lock can be nested any number of times,   * Since the scheduling lock can be nested any number of
  * the caller has the responsible to unlock the same number of   * times, the caller has the responsible to unlock the same
  * locks.   * number of locks.
  */   */
 void  void
 sched_lock(void)  sched_lock(void)
 {  {
   
         cur_thread->lock_count++;          cur_thread->locks++;
 }  }
   
 /*  /*
  * sched_unlock - unlock scheduler.   * sched_unlock - unlock scheduler.
  *   *
  * If nobody locks the scheduler anymore, it runs pending wake   * If nobody locks the scheduler anymore, it checks the
  * threads and check the reschedule flag. The thread switch is   * rescheduling flag and kick scheduler if it's marked.
  * invoked if the rescheduling request exists.  
  *   *
  * Note that this routine will be called at the end of the   * Note that this routine will be always called at the end
  * interrupt handler.   * of each interrupt handler.
  */   */
 void  void
 sched_unlock(void)  sched_unlock(void)
 {  {
         int s;          int s;
   
         ASSERT(cur_thread->lock_count > 0);          ASSERT(cur_thread->locks > 0);
   
         interrupt_save(&s);          interrupt_save(&s);
         interrupt_disable();          interrupt_disable();
   
         if (cur_thread->lock_count == 1) {          if (cur_thread->locks == 1) {
                 wakeq_flush();                  wakeq_flush();
                 while (cur_thread->need_resched) {                  while (cur_thread->resched) {
   
                         /* Kick scheduler */                          /* Kick scheduler */
                         sched_switch();                          sched_switch();
Line 566 
Line 567 
                         wakeq_flush();                          wakeq_flush();
                 }                  }
         }          }
         cur_thread->lock_count--;          cur_thread->locks--;
   
         interrupt_restore(s);          interrupt_restore(s);
 }  }
Line 580 
Line 581 
   
 /*  /*
  * sched_setprio - set priority of thread.   * sched_setprio - set priority of thread.
  * @base: Base priority  
  * @prio: Current priority  
  *   *
  * Thread switch may be invoked here by priority change.   * The rescheduling flag is set if the priority is
    * better than the currently running thread.
  * Called with scheduler locked.   * Called with scheduler locked.
  */   */
 void  void
 sched_setprio(thread_t th, int base, int prio)  sched_setprio(thread_t th, int baseprio, int prio)
 {  {
   
         th->base_prio = base;          th->baseprio = baseprio;
   
         if (th == cur_thread) {          if (th == cur_thread) {
                 /*                  /*
Line 600 
Line 600 
                 th->prio = prio;                  th->prio = prio;
                 top_prio = runq_top();                  top_prio = runq_top();
                 if (prio != top_prio)                  if (prio != top_prio)
                         cur_thread->need_resched = 1;                          cur_thread->resched = 1;
         } else {          } else {
                 if (th->state == TH_RUN) {                  if (th->state == TH_RUN) {
                         /*                          /*
                          * Update the thread priority and adjust the                           * Update the thread priority and adjust
                          * run queue position for new priority.                           * the run queue position for new priority.
                          */                           */
                         runq_remove(th);                          runq_remove(th);
                         th->prio = prio;                          th->prio = prio;
Line 630 
Line 630 
         switch (policy) {          switch (policy) {
         case SCHED_RR:          case SCHED_RR:
         case SCHED_FIFO:          case SCHED_FIFO:
                 th->ticks_left = QUANTUM;                  th->timeleft = QUANTUM;
                 th->policy = policy;                  th->policy = policy;
                 break;                  break;
         default:          default:
Line 641 
Line 641 
 }  }
   
 /*  /*
  * DPC thread   * Schedule DPC callback.
  *   *
  * This is a kernel thread to process the pending call back request   * DPC (Deferred Procedure Call) is used to call the specific
  * within DPC queue. Each DPC routine is called with the following   * function at some later time with a DPC priority. It is also
  * conditions.   * known as AST or SoftIRQ in other kernels.  DPC is typically
    * used by device drivers to do the low-priority jobs without
    * degrading real-time performance.
    * This routine can be called from ISR.
    */
   void
   sched_dpc(struct dpc *dpc, void (*func)(void *), void *arg)
   {
           ASSERT(dpc);
           ASSERT(func);
   
           irq_lock();
           /*
            * Insert request to DPC queue.
            */
           dpc->func = func;
           dpc->arg = arg;
           if (dpc->state != DPC_PENDING)
                   enqueue(&dpcq, &dpc->link);
           dpc->state = DPC_PENDING;
   
           /* Wake DPC thread */
           sched_wakeup(&dpc_event);
   
           irq_unlock();
   }
   
   /*
    * DPC thread.
    *
    * This is a kernel thread to process the pending call back
    * request within DPC queue. Each DPC routine is called with
    * the following conditions.
  *  - Interrupt is enabled.   *  - Interrupt is enabled.
  *  - Scheduler is unlocked.   *  - Scheduler is unlocked.
  */   */
 static void  static void
 dpc_thread(u_long unused)  dpc_thread(void *arg)
 {  {
         queue_t q;          queue_t q;
         struct dpc *dpc;          struct dpc *dpc;
   
         for (;;) {          for (;;) {
   
                 /* Wait until next DPC request. */                  /* Wait until next DPC request. */
                 sched_sleep(&dpc_event);                  sched_sleep(&dpc_event);
   
Line 665 
Line 696 
                         dpc = queue_entry(q, struct dpc, link);                          dpc = queue_entry(q, struct dpc, link);
                         dpc->state = DPC_FREE;                          dpc->state = DPC_FREE;
   
                           /*
                            * Call DPC routine.
                            */
                         interrupt_enable();                          interrupt_enable();
                         (*dpc->func)(dpc->arg);                          (*dpc->func)(dpc->arg);
                         interrupt_disable();                          interrupt_disable();
Line 674 
Line 708 
 }  }
   
 /*  /*
  * Qeueue DPC (Deferred Procedure Call) request  
  *  
  * Call function at some later time in a DPC priority. This is  
  * typically used by device drivers to do the low-priority jobs.  
  * This routine can be called from ISR.  
  */  
 void  
 sched_dpc(struct dpc *dpc, void (*func)(void *), void *arg)  
 {  
         ASSERT(dpc);  
         ASSERT(func);  
   
         irq_lock();  
         dpc->func = func;  
         dpc->arg = arg;  
         if (dpc->state != DPC_PENDING)  
                 enqueue(&dpcq, &dpc->link);  
         dpc->state = DPC_PENDING;  
         sched_wakeup(&dpc_event);  
         irq_unlock();  
 }  
   
 /*  
  * Initialize the global scheduler state.   * Initialize the global scheduler state.
  */   */
 void  void
 sched_init(void)  sched_init(void)
 {  {
           thread_t th;
         int i;          int i;
   
         for (i = 0; i < NR_PRIOS; i++)          for (i = 0; i < NPRIO; i++)
                 queue_init(&runq[i]);                  queue_init(&runq[i]);
   
         queue_init(&wakeq);          queue_init(&wakeq);
         queue_init(&dpcq);          queue_init(&dpcq);
         event_init(&dpc_event, "dpc");          event_init(&dpc_event, "dpc");
         top_prio = PRIO_IDLE;          top_prio = PRIO_IDLE;
         cur_thread->need_resched = 1;          cur_thread->resched = 1;
   
         /*          /* Create a DPC thread. */
          * Create a DPC thread.          th = kthread_create(dpc_thread, NULL, PRIO_DPC);
          */          if (th == NULL)
         if (kernel_thread(PRIO_DPC, dpc_thread, 0) == NULL)  
                 panic("sched_init");                  panic("sched_init");
   
         printk("Time slice is %d msec\n", CONFIG_TIME_SLICE);          DPRINTF(("Time slice is %d msec\n", CONFIG_TIME_SLICE));
 }  }

Legend:
Removed from v.1.1.1.1  
changed lines
  Added in v.1.1.1.1.2.1

CVSweb