[BACK]Return to thread.c CVS log [TXT][DIR] Up to [local] / prex-old / sys / kern

Diff for /prex-old/sys/kern/thread.c between version 1.1.1.1 and 1.1.1.1.2.1

version 1.1.1.1, 2008/06/03 10:38:46 version 1.1.1.1.2.1, 2008/08/13 17:12:32
Line 1 
Line 1 
 /*-  /*-
  * Copyright (c) 2005-2007, Kohsuke Ohtani   * Copyright (c) 2005-2008, Kohsuke Ohtani
  * All rights reserved.   * All rights reserved.
  *   *
  * Redistribution and use in source and binary forms, with or without   * Redistribution and use in source and binary forms, with or without
Line 40 
Line 40 
 #include <sync.h>  #include <sync.h>
 #include <system.h>  #include <system.h>
   
 struct thread idle_thread;  /* forward */
   static void do_terminate(thread_t);
   
   static struct thread    idle_thread;
   static thread_t         zombie;
   
   /* global */
 thread_t cur_thread = &idle_thread;  thread_t cur_thread = &idle_thread;
 static thread_t zombie;  
   
 /*  /*
  * Allocate a new thread and attach kernel stack for it.   * Allocate a new thread and attach a kernel stack to it.
  * Returns thread pointer on success, or NULL on failure.   * Returns thread pointer on success, or NULL on failure.
  */   */
 static thread_t  static thread_t
Line 56 
Line 61 
   
         if ((th = kmem_alloc(sizeof(struct thread))) == NULL)          if ((th = kmem_alloc(sizeof(struct thread))) == NULL)
                 return NULL;                  return NULL;
         memset(th, 0, sizeof(struct thread));  
   
         if ((stack = kmem_alloc(KSTACK_SIZE)) == NULL) {          if ((stack = kmem_alloc(KSTACK_SIZE)) == NULL) {
                 kmem_free(th);                  kmem_free(th);
                 return NULL;                  return NULL;
         }          }
           memset(th, 0, sizeof(struct thread));
         th->kstack = stack;          th->kstack = stack;
         th->magic = THREAD_MAGIC;          th->magic = THREAD_MAGIC;
         list_init(&th->mutexes);          list_init(&th->mutexes);
Line 77 
Line 82 
 }  }
   
 /*  /*
  * Create a new thread within the specified task.   * Create a new thread.
  *   *
  * The context of a current thread will be copied to the new thread.   * The context of a current thread will be copied to the
  * The new thread will start from the return address of thread_create()   * new thread. The new thread will start from the return
  * call in user mode code. Since a new thread will share the user   * address of thread_create() call in user mode code.
  * mode stack with a current thread, user mode applications are   * Since a new thread will share the user mode stack with
  * responsible to allocate stack for it. The new thread is initially   * a current thread, user mode applications are
  * set to suspend state, and so, thread_resume() must be called to   * responsible to allocate stack for it. The new thread is
  * start it.   * initially set to suspend state, and so, thread_resume()
  *   * must be called to start it.
  * The following scheduling parameters are reset to default values  
  * in the created thread.  
  *  - Thread State  
  *  - Scheduling Policy  
  *  - Scheduling Priority  
  */   */
 int  int
 thread_create(task_t task, thread_t *thp)  thread_create(task_t task, thread_t *thp)
 {  {
         thread_t th;          thread_t th;
         int err = 0;          int err = 0;
           vaddr_t sp;
   
         sched_lock();          sched_lock();
         if (!task_valid(task)) {          if (!task_valid(task)) {
Line 113 
Line 114 
                 goto out;                  goto out;
         }          }
         /*          /*
          * At first, we copy a new thread id as return value.           * First, we copy a new thread id as return value.
          * This is done here to simplify all error recoveries           * This is done here to simplify all error recoveries
          * of the subsequent code.           * of the subsequent code.
          */           */
         if (cur_task() == &kern_task)          if (cur_task() == &kern_task)
                 *thp = th;                  *thp = th;
         else {          else {
                 if (umem_copyout(&th, thp, sizeof(thread_t))) {                  if (umem_copyout(&th, thp, sizeof(th))) {
                         thread_free(th);                          thread_free(th);
                         err = EFAULT;                          err = EFAULT;
                         goto out;                          goto out;
Line 130 
Line 131 
          * Initialize thread state.           * Initialize thread state.
          */           */
         th->task = task;          th->task = task;
         th->suspend_count = task->suspend_count + 1;          th->suscnt = task->suscnt + 1;
         memcpy(th->kstack, cur_thread->kstack, KSTACK_SIZE);          memcpy(th->kstack, cur_thread->kstack, KSTACK_SIZE);
         context_init(&th->context, (u_long)th->kstack + KSTACK_SIZE);          sp = (vaddr_t)th->kstack + KSTACK_SIZE;
           context_set(&th->ctx, CTX_KSTACK, sp);
           context_set(&th->ctx, CTX_KENTRY, (vaddr_t)&syscall_ret);
         list_insert(&task->threads, &th->task_link);          list_insert(&task->threads, &th->task_link);
         sched_start(th);          sched_start(th);
  out:   out:
Line 142 
Line 145 
   
 /*  /*
  * Permanently stop execution of the specified thread.   * Permanently stop execution of the specified thread.
  * If given thread is a current thread, this routine never returns.   * If given thread is a current thread, this routine
    * never returns.
  */   */
 int  int
 thread_terminate(thread_t th)  thread_terminate(thread_t th)
 {  {
         int err;  
   
         sched_lock();          sched_lock();
         if (!thread_valid(th)) {          if (!thread_valid(th)) {
                 err = ESRCH;                  sched_unlock();
         } else if (!task_access(th->task)) {                  return ESRCH;
                 err = EPERM;  
         } else {  
                 err = thread_kill(th);  
         }          }
           if (!task_access(th->task)) {
                   sched_unlock();
                   return EPERM;
           }
           do_terminate(th);
         sched_unlock();          sched_unlock();
         return err;          return 0;
 }  }
   
 /*  /*
  * Kill a thread regardless of the current task state.   * Terminate thread-- the internal version of thread_terminate.
  *  
  * This may be used to terminate a kernel thread under the non-context  
  * condition. For example, a device driver may terminate its interrupt  
  * thread even if a current task does not have the capability to  
  * terminate it.  
  */   */
 int  static void
 thread_kill(thread_t th)  do_terminate(thread_t th)
 {  {
         /*          /*
          * Clean up thread state.           * Clean up thread state.
Line 180 
Line 180 
         mutex_cleanup(th);          mutex_cleanup(th);
         list_remove(&th->task_link);          list_remove(&th->task_link);
         sched_stop(th);          sched_stop(th);
         th->exc_bitmap = 0;          th->excbits = 0;
         th->magic = 0;          th->magic = 0;
   
         /*          /*
          * We can not release the context of the "current" thread           * We can not release the context of the "current"
          * because our thread switching always requires the current           * thread because our thread switching always
          * context. So, the resource deallocation is deferred until           * requires the current context. So, the resource
          * another thread calls thread_kill().           * deallocation is deferred until another thread
            * calls thread_terminate().
          */           */
         if (zombie != NULL) {          if (zombie != NULL) {
                 /*                  /*
Line 201 
Line 202 
         if (th == cur_thread) {          if (th == cur_thread) {
                 /*                  /*
                  * If the current thread is being terminated,                   * If the current thread is being terminated,
                  * enter zombie state and wait for sombody                   * enter zombie state and wait for somebody
                  * to be killed us.                   * to be killed us.
                  */                   */
                 zombie = th;                  zombie = th;
         } else          } else {
                 thread_free(th);                  thread_free(th);
         return 0;          }
 }  }
   
 /*  /*
Line 219 
Line 220 
 int  int
 thread_load(thread_t th, void (*entry)(void), void *stack)  thread_load(thread_t th, void (*entry)(void), void *stack)
 {  {
         int err = 0;  
   
         if ((entry != NULL && !user_area(entry)) ||          if (entry != NULL && !user_area(entry))
             (stack != NULL && !user_area(stack)))  
                 return EINVAL;                  return EINVAL;
           if (stack != NULL && !user_area(stack))
                   return EINVAL;
   
         sched_lock();          sched_lock();
         if (!thread_valid(th)) {          if (!thread_valid(th)) {
                 err = ESRCH;                  sched_unlock();
         } else if (!task_access(th->task)) {                  return ESRCH;
                 err = EPERM;  
         } else {  
                 if (entry != NULL)  
                         context_set(&th->context, CTX_UENTRY, (u_long)entry);  
                 if (stack != NULL)  
                         context_set(&th->context, CTX_USTACK, (u_long)stack);  
         }          }
           if (!task_access(th->task)) {
                   sched_unlock();
                   return EPERM;
           }
           if (entry != NULL)
                   context_set(&th->ctx, CTX_UENTRY, (vaddr_t)entry);
           if (stack != NULL)
                   context_set(&th->ctx, CTX_USTACK, (vaddr_t)stack);
   
         sched_unlock();          sched_unlock();
         return 0;          return 0;
 }  }
Line 260 
Line 264 
 /*  /*
  * Suspend thread.   * Suspend thread.
  *   *
  * A thread can be suspended any number of times. And, it does   * A thread can be suspended any number of times.
  * not start to run again unless the thread is resumed by the   * And, it does not start to run again unless the
  * same count of suspend request.   * thread is resumed by the same count of suspend
    * request.
  */   */
 int  int
 thread_suspend(thread_t th)  thread_suspend(thread_t th)
 {  {
         int err = 0;  
   
         sched_lock();          sched_lock();
         if (!thread_valid(th)) {          if (!thread_valid(th)) {
                 err = ESRCH;                  sched_unlock();
         } else if (!task_access(th->task)) {                  return ESRCH;
                 err = EPERM;  
         } else {  
                 if (++th->suspend_count == 1)  
                         sched_suspend(th);  
         }          }
           if (!task_access(th->task)) {
                   sched_unlock();
                   return EPERM;
           }
           if (++th->suscnt == 1)
                   sched_suspend(th);
   
         sched_unlock();          sched_unlock();
         return 0;          return 0;
 }  }
Line 298 
Line 305 
         sched_lock();          sched_lock();
         if (!thread_valid(th)) {          if (!thread_valid(th)) {
                 err = ESRCH;                  err = ESRCH;
         } else if (!task_access(th->task)) {                  goto out;
                 err= EPERM;          }
         } else if (th->suspend_count == 0) {          if (!task_access(th->task)) {
                   err = EPERM;
                   goto out;
           }
           if (th->suscnt == 0) {
                 err = EINVAL;                  err = EINVAL;
         } else {                  goto out;
                 th->suspend_count--;          }
                 if (th->suspend_count == 0 && th->task->suspend_count == 0)  
           th->suscnt--;
           if (th->suscnt == 0) {
                   if (th->task->suscnt == 0) {
                         sched_resume(th);                          sched_resume(th);
                   }
         }          }
   out:
         sched_unlock();          sched_unlock();
         return err;          return err;
 }  }
   
 /*  /*
  * thread_schedparam - get/set scheduling parameter.   * thread_schedparam - get/set scheduling parameter.
  * @th:    target thread  
  * @op:    operation ID  
  * @param: pointer to parameter  
  *   *
  * If the caller has CAP_NICE capability, all operations are allowed.   * If the caller has CAP_NICE capability, all operations are
  * Otherwise, the caller can change the parameter for the threads in   * allowed.  Otherwise, the caller can change the parameter
  * the same task, and it can not set the priority to higher value.   * for the threads in the same task, and it can not set the
    * priority to higher value.
  */   */
 int  int
 thread_schedparam(thread_t th, int op, int *param)  thread_schedparam(thread_t th, int op, int *param)
 {  {
         int prio, policy, err = 0;          int prio, policy, err = 0;
         int capable = 0;  
   
         sched_lock();          sched_lock();
         if (!thread_valid(th)) {          if (!thread_valid(th)) {
                 sched_unlock();                  err = ESRCH;
                 return ESRCH;                  goto out;
         }          }
         if (task_capable(CAP_NICE))          if (th->task == &kern_task) {
                 capable = 1;                  err = EPERM;
                   goto out;
         if (th->task != cur_task() && !capable) {  
                 sched_unlock();  
                 return EPERM;  
         }          }
         if ((th->task == &kern_task) &&          if (th->task != cur_task() && !task_capable(CAP_NICE)) {
             (op == OP_SETPRIO || op == OP_SETPOLICY)) {                  err = EPERM;
                 sched_unlock();                  goto out;
                 return EPERM;  
         }          }
   
         switch (op) {          switch (op) {
         case OP_GETPRIO:          case OP_GETPRIO:
                 prio = sched_getprio(th);                  prio = sched_getprio(th);
                 err = umem_copyout(&prio, param, sizeof(int));                  err = umem_copyout(&prio, param, sizeof(prio));
                 break;                  break;
   
         case OP_SETPRIO:          case OP_SETPRIO:
                 if ((err = umem_copyin(param, &prio, sizeof(int))))                  if ((err = umem_copyin(param, &prio, sizeof(prio))))
                         break;                          break;
                 if (prio < 0)                  if (prio < 0) {
                         prio = 0;                          prio = 0;
                 else if (prio >= PRIO_IDLE)                  } else if (prio >= PRIO_IDLE) {
                         prio = PRIO_IDLE - 1;                          prio = PRIO_IDLE - 1;
                   } else {
                           /* DO NOTHING */
                   }
   
                 if (prio < th->prio && !capable) {                  if (prio < th->prio && !task_capable(CAP_NICE)) {
                         err = EPERM;                          err = EPERM;
                         break;                          break;
                 }                  }
Line 365 
Line 379 
                  * If a current priority is inherited for mutex,                   * If a current priority is inherited for mutex,
                  * we can not change the priority to lower value.                   * we can not change the priority to lower value.
                  * In this case, only the base priority is changed,                   * In this case, only the base priority is changed,
                  * and a current priority will be adjusted to correct                   * and a current priority will be adjusted to
                  * value, later.                   * correct value, later.
                  */                   */
                 if (th->prio != th->base_prio && prio > th->prio)                  if (th->prio != th->baseprio && prio > th->prio)
                         prio = th->prio;                          prio = th->prio;
   
                 mutex_setprio(th, prio);                  mutex_setprio(th, prio);
                 sched_setprio(th, prio, prio);                  sched_setprio(th, prio, prio);
                 break;                  break;
   
         case OP_GETPOLICY:          case OP_GETPOLICY:
                 policy = sched_getpolicy(th);                  policy = sched_getpolicy(th);
                 err = umem_copyout(&policy, param, sizeof(int));                  err = umem_copyout(&policy, param, sizeof(policy));
                 break;                  break;
   
         case OP_SETPOLICY:          case OP_SETPOLICY:
                 if ((err = umem_copyin(param, &policy, sizeof(int))))                  if ((err = umem_copyin(param, &policy, sizeof(policy))))
                         break;                          break;
                 if (sched_setpolicy(th, policy))                  if (sched_setpolicy(th, policy))
                         err = EINVAL;                          err = EINVAL;
                 break;                  break;
   
         default:          default:
                 err = EINVAL;                  err = EINVAL;
                 break;                  break;
         }          }
    out:
         sched_unlock();          sched_unlock();
         return err;          return err;
 }  }
Line 395 
Line 413 
 /*  /*
  * Idle thread.   * Idle thread.
  *   *
  * This routine is called only once after kernel initialization   * This routine is called only once after kernel
  * is completed. An idle thread has the role of cutting down the power   * initialization is completed. An idle thread has the
  * consumption of a system. An idle thread has FIFO scheduling policy   * role of cutting down the power consumption of a
    * system. An idle thread has FIFO scheduling policy
  * because it does not have time quantum.   * because it does not have time quantum.
  */   */
 void  void
Line 415 
Line 434 
  * Create a thread running in the kernel address space.   * Create a thread running in the kernel address space.
  *   *
  * A kernel thread does not have user mode context, and its   * A kernel thread does not have user mode context, and its
  * scheduling policy is set to SCHED_FIFO. kernel_thread() returns   * scheduling policy is set to SCHED_FIFO. kthread_create()
  * thread ID on success, or NULL on failure. We assume scheduler   * returns thread ID on success, or NULL on failure.
  * is already locked.  
  *   *
  * Important: Since sched_switch() will disable interrupts in CPU,   * Important: Since sched_switch() will disable interrupts in
  * the interrupt is always disabled at the entry point of the kernel   * CPU, the interrupt is always disabled at the entry point of
  * thread. So, the kernel thread must enable the interrupt first when   * the kernel thread. So, the kernel thread must enable the
  * it gets control.   * interrupt first when it gets control.
    *
    * This routine assumes the scheduler is already locked.
  */   */
 thread_t  thread_t
 kernel_thread(int prio, void (*entry)(u_long), u_long arg)  kthread_create(void (*entry)(void *), void *arg, int prio)
 {  {
         thread_t th;          thread_t th;
           vaddr_t sp;
   
           ASSERT(cur_thread->locks > 0);
   
           /*
            * If there is not enough core for the new thread,
            * just drop to panic().
            */
         if ((th = thread_alloc()) == NULL)          if ((th = thread_alloc()) == NULL)
                 return NULL;                  return NULL;
   
         th->task = &kern_task;          th->task = &kern_task;
         memset(th->kstack, 0, KSTACK_SIZE);          memset(th->kstack, 0, KSTACK_SIZE);
         context_init(&th->context, (u_long)th->kstack + KSTACK_SIZE);          sp = (vaddr_t)th->kstack + KSTACK_SIZE;
         context_set(&th->context, CTX_KENTRY, (u_long)entry);          context_set(&th->ctx, CTX_KSTACK, sp);
         context_set(&th->context, CTX_KARG, arg);          context_set(&th->ctx, CTX_KENTRY, (vaddr_t)entry);
           context_set(&th->ctx, CTX_KARG, (vaddr_t)arg);
         list_insert(&kern_task.threads, &th->task_link);          list_insert(&kern_task.threads, &th->task_link);
   
           /*
            * Start scheduling of this thread.
            */
         sched_start(th);          sched_start(th);
         sched_setpolicy(th, SCHED_FIFO);          sched_setpolicy(th, SCHED_FIFO);
         sched_setprio(th, prio, prio);          sched_setprio(th, prio, prio);
Line 447 
Line 478 
 }  }
   
 /*  /*
    * Terminate kernel thread.
    */
   void
   kthread_terminate(thread_t th)
   {
   
           ASSERT(th);
           ASSERT(th->task == &kern_task);
   
           sched_lock();
           do_terminate(th);
           sched_unlock();
   }
   
   /*
  * Return thread information for ps command.   * Return thread information for ps command.
  */   */
 int  int
Line 456 
Line 502 
         list_t i, j;          list_t i, j;
         thread_t th;          thread_t th;
         task_t task;          task_t task;
           int err = 0, found = 0;
   
         sched_lock();          sched_lock();
   
           /*
            * Search a target thread from the given index.
            */
         index = 0;          index = 0;
         i = &kern_task.link;          i = &kern_task.link;
         do {          do {
                 task = list_entry(i, struct task, link);                  task = list_entry(i, struct task, link);
                 j = &task->threads;                  j = list_first(&task->threads);
                 j = list_first(j);  
                 do {                  do {
                         th = list_entry(j, struct thread, task_link);                          th = list_entry(j, struct thread, task_link);
                         if (index++ == target)                          if (index++ == target) {
                                 goto found;                                  found = 1;
                                   goto done;
                           }
                         j = list_next(j);                          j = list_next(j);
                 } while (j != &task->threads);                  } while (j != &task->threads);
                 i = list_next(i);                  i = list_next(i);
         } while (i != &kern_task.link);          } while (i != &kern_task.link);
    done:
           if (found) {
                   info->policy = th->policy;
                   info->prio = th->prio;
                   info->time = th->time;
                   info->task = th->task;
                   strlcpy(info->taskname, task->name, MAXTASKNAME);
                   strlcpy(info->slpevt,
                           th->slpevt ? th->slpevt->name : "-", MAXEVTNAME);
           } else {
                   err = ESRCH;
           }
         sched_unlock();          sched_unlock();
         return ESRCH;          return err;
  found:  
         info->state = th->state;  
         info->policy = th->policy;  
         info->prio = th->prio;  
         info->base_prio = th->base_prio;  
         info->suspend_count = th->suspend_count;  
         info->total_ticks = th->total_ticks;  
         info->id = th;  
         info->task = th->task;  
         strlcpy(info->task_name, task->name, MAXTASKNAME);  
         strlcpy(info->sleep_event,  
                 th->sleep_event ? th->sleep_event->name : "-", 12);  
   
         sched_unlock();  
         return 0;  
 }  }
   
 #if defined(DEBUG) && defined(CONFIG_KDUMP)  #ifdef DEBUG
 void  void
 thread_dump(void)  thread_dump(void)
 {  {
Line 503 
Line 551 
         thread_t th;          thread_t th;
         task_t task;          task_t task;
   
         printk("Thread dump:\n");          printf("\nThread dump:\n");
         printk(" mod thread   task     stat pol  prio base ticks    "          printf(" mod thread   task     stat pol  prio base time     "
                "susp sleep event\n");                 "susp sleep event\n");
         printk(" --- -------- -------- ---- ---- ---- ---- -------- "          printf(" --- -------- -------- ---- ---- ---- ---- -------- "
                "---- ------------\n");                 "---- ------------\n");
   
         i = &kern_task.link;          i = &kern_task.link;
         do {          do {
                 task = list_entry(i, struct task, link);                  task = list_entry(i, struct task, link);
                 j = &task->threads;                  j = list_first(&task->threads);
                 j = list_first(j);  
                 do {                  do {
                         th = list_entry(j, struct thread, task_link);                          th = list_entry(j, struct thread, task_link);
                         printk(" %s %08x %8s %s%c %s  %3d  %3d %8d %4d %s\n",  
                           printf(" %s %08x %8s %s%c %s  %3d  %3d %8d %4d %s\n",
                                (task == &kern_task) ? "Knl" : "Usr", th,                                 (task == &kern_task) ? "Knl" : "Usr", th,
                                task->name, state[th->state],                                 task->name, state[th->state],
                                (th == cur_thread) ? '*' : ' ',                                 (th == cur_thread) ? '*' : ' ',
                                pol[th->policy], th->prio, th->base_prio,                                 pol[th->policy], th->prio, th->baseprio,
                                th->total_ticks, th->suspend_count,                                 th->time, th->suscnt,
                                th->sleep_event ? th->sleep_event->name : "-");                                 th->slpevt != NULL ? th->slpevt->name : "-");
   
                         j = list_next(j);                          j = list_next(j);
                 } while (j != &task->threads);                  } while (j != &task->threads);
                 i = list_next(i);                  i = list_next(i);
Line 531 
Line 580 
 #endif  #endif
   
 /*  /*
  * The first thread in system is created here by hand. This thread   * The first thread in system is created here by hand.
  * will become an idle thread when thread_idle() is called later.   * This thread will become an idle thread when thread_idle()
    * is called later in main().
  */   */
 void  void
 thread_init(void)  thread_init(void)
 {  {
         void *stack;          void *stack;
           vaddr_t sp;
   
         if ((stack = kmem_alloc(KSTACK_SIZE)) == NULL)          if ((stack = kmem_alloc(KSTACK_SIZE)) == NULL)
                 panic("thread_init");                  panic("thread_init: out of memory");
   
         memset(stack, 0, KSTACK_SIZE);          memset(stack, 0, KSTACK_SIZE);
         idle_thread.kstack = stack;          idle_thread.kstack = stack;
Line 549 
Line 600 
         idle_thread.state = TH_RUN;          idle_thread.state = TH_RUN;
         idle_thread.policy = SCHED_FIFO;          idle_thread.policy = SCHED_FIFO;
         idle_thread.prio = PRIO_IDLE;          idle_thread.prio = PRIO_IDLE;
         idle_thread.base_prio = PRIO_IDLE;          idle_thread.baseprio = PRIO_IDLE;
         idle_thread.lock_count = 1;          idle_thread.locks = 1;
   
         context_init(&idle_thread.context, (u_long)stack + KSTACK_SIZE);          sp = (vaddr_t)stack + KSTACK_SIZE;
           context_set(&idle_thread.ctx, CTX_KSTACK, sp);
         list_insert(&kern_task.threads, &idle_thread.task_link);          list_insert(&kern_task.threads, &idle_thread.task_link);
 }  }

Legend:
Removed from v.1.1.1.1  
changed lines
  Added in v.1.1.1.1.2.1

CVSweb