[BACK]Return to device.c CVS log [TXT][DIR] Up to [local] / prex-old / sys / kern

Diff for /prex-old/sys/kern/device.c between version 1.1.1.1 and 1.1.1.1.2.1

version 1.1.1.1, 2008/06/03 10:38:46 version 1.1.1.1.2.1, 2008/08/13 17:12:31
Line 32 
Line 32 
  */   */
   
 /*  /*
  * The device_* system calls are interfaces for user mode applications   * The device_* system calls are interfaces for user mode
  * to access the specific device object which is handled by the related   * applications to access the specific device object which is
  * device driver. A device driver is an execution module different from   * handled by the related device driver. A device driver is an
  * a kernel on Prex. The routines in this file have the following role   * execution module different from a kernel on Prex. The routines
  * to handle the device I/O.   * in this file have the following role to handle the device I/O.
  *   *
  *  - Manage the name space for device objects.   *  - Manage the name space for device objects.
  *  - Forward user I/O requests to the drivers after checking parameters.   *  - Forward user I/O requests to the drivers after checking
    *    parameters.
  *   *
  * The driver module(s) and kernel are dynamically linked at system boot.   * The driver module(s) and kernel are dynamically linked
    * at system boot.
  */   */
   
 #include <kernel.h>  #include <kernel.h>
Line 57 
Line 59 
 #include <system.h>  #include <system.h>
   
 /* forward declarations */  /* forward declarations */
 static device_t device_create(struct devio *, const char *, int);  static device_t  device_create(struct devio *, const char *, int);
 static int device_destroy(device_t);  static int       device_destroy(device_t);
 static int device_broadcast(int, int);  static int       device_broadcast(int, int);
 static void machine_bootinfo(struct boot_info **);  
 static void machine__reset(void);  
 static void machine__idle(void);  
 static int task__capable(cap_t cap);  
 static void *phys__to_virt(void *);  
 static void *virt__to_phys(void *);  
   
 #ifndef DEBUG  static void      machine_bootinfo(struct boot_info **);
 static void nosys(void);  static void      _machine_reset(void);
 #undef printk  static void      _machine_idle(void);
 #define printk nosys  static int       _task_capable(cap_t);
   static void     *_phys_to_virt(void *);
   static void     *_virt_to_phys(void *);
   
 #undef panic  #ifdef DEBUG
 #define panic machine_reset  #define _debug_attach   debug_attach
   #define _debug_dump     debug_dump
   #define _printf         printf
   #define _panic          panic
   #else
   #define _debug_attach   nosys
   #define _debug_dump     nosys
   #define _printf         nosys
   #define _panic          machine_reset
   static void nosys(void);
 #endif  #endif
   
 typedef void (*dkifn_t)(void);  typedef void (*dkifn_t)(void);
Line 110 
Line 117 
         /* 23 */ DKIENT(sched_tsleep),          /* 23 */ DKIENT(sched_tsleep),
         /* 24 */ DKIENT(sched_wakeup),          /* 24 */ DKIENT(sched_wakeup),
         /* 25 */ DKIENT(sched_dpc),          /* 25 */ DKIENT(sched_dpc),
         /* 26 */ DKIENT(task__capable),          /* 26 */ DKIENT(_task_capable),
         /* 27 */ DKIENT(exception_post),          /* 27 */ DKIENT(exception_post),
         /* 28 */ DKIENT(machine_bootinfo),          /* 28 */ DKIENT(machine_bootinfo),
         /* 29 */ DKIENT(machine__reset),          /* 29 */ DKIENT(_machine_reset),
         /* 30 */ DKIENT(machine__idle),          /* 30 */ DKIENT(_machine_idle),
         /* 31 */ DKIENT(phys__to_virt),          /* 31 */ DKIENT(_phys_to_virt),
         /* 32 */ DKIENT(virt__to_phys),          /* 32 */ DKIENT(_virt_to_phys),
         /* 33 */ DKIENT(debug_attach),          /* 33 */ DKIENT(_debug_attach),
         /* 34 */ DKIENT(debug_dump),          /* 34 */ DKIENT(_debug_dump),
         /* 35 */ DKIENT(printk),          /* 35 */ DKIENT(_printf),
         /* 36 */ DKIENT(panic),          /* 36 */ DKIENT(_panic),
 };  };
   
 static struct list device_list;         /* list of the device objects */  static struct list device_list;         /* list of the device objects */
   
 /*  /*
  * Increment reference count on an active device.   * Increment reference count on an active device.
  * This routine checks whether the specified device is valid.   * It returns 0 on success, or -1 if the device is invalid.
  * It returns 0 on success, or -1 on failure.  
  */   */
 static int  static int
 device_hold(device_t dev)  device_hold(device_t dev)
Line 137 
Line 143 
   
         sched_lock();          sched_lock();
         if (device_valid(dev)) {          if (device_valid(dev)) {
                 dev->ref_count++;                  dev->refcnt++;
                 err = 0;                  err = 0;
         }          }
         sched_unlock();          sched_unlock();
Line 145 
Line 151 
 }  }
   
 /*  /*
  * Decrement the reference count on a device. If the reference   * Decrement the reference count on a device. If the
  * count becomes zero, we can release the resource for the   * reference count becomes zero, we can release the
  * target device. Assumes the device is already validated by caller.   * resource for the target device. Assumes the device
    * is already validated by caller.
  */   */
 static void  static void
 device_release(device_t dev)  device_release(device_t dev)
 {  {
   
         sched_lock();          sched_lock();
         if (--dev->ref_count == 0) {          if (--dev->refcnt == 0) {
                 list_remove(&dev->link);                  list_remove(&dev->link);
                 kmem_free(dev);                  kmem_free(dev);
         }          }
Line 186 
Line 193 
   
 /*  /*
  * device_create - create new device object.   * device_create - create new device object.
  * @io:    pointer to device I/O routines  
  * @name:  string for device name  
  * @flags: flags for device object. (ex. block or character)  
  *   *
  * A device object is created by the device driver to provide   * A device object is created by the device driver to provide
  * I/O services to applications.   * I/O services to applications.
Line 221 
Line 225 
         strlcpy(dev->name, name, len + 1);          strlcpy(dev->name, name, len + 1);
         dev->devio = io;          dev->devio = io;
         dev->flags = flags;          dev->flags = flags;
         dev->ref_count = 1;          dev->refcnt = 1;
         dev->magic = DEVICE_MAGIC;          dev->magic = DEVICE_MAGIC;
         list_insert(&device_list, &dev->link);          list_insert(&device_list, &dev->link);
         sched_unlock();          sched_unlock();
Line 229 
Line 233 
 }  }
   
 /*  /*
  * Destroy a device object. If some other threads still refer   * Destroy a device object. If some other threads still
  * the target device, the destroy operating will be pending   * refer the target device, the destroy operating will be
  * until its reference count becomes 0.   * pending until its reference count becomes 0.
  */   */
 static int  static int
 device_destroy(device_t dev)  device_destroy(device_t dev)
Line 251 
Line 255 
   
 /*  /*
  * device_open - open the specified device.   * device_open - open the specified device.
  * @name: device name (null-terminated)  
  * @mode: open mode. (like O_RDONLY etc.)  
  * @devp: device handle of opened device to be returned.  
  *   *
  * Even if the target driver does not have an open routine, this   * Even if the target driver does not have an open
  * function does not return an error. By using this mechanism, an   * routine, this function does not return an error. By
  * application can check whether the specific device exists or not.   * using this mechanism, an application can check whether
  * The open mode should be handled by an each device driver if it   * the specific device exists or not. The open mode
  * is needed.   * should be handled by an each device driver if it is
    * needed.
  */   */
 int  int
 device_open(const char *name, int mode, device_t *devp)  device_open(const char *name, int mode, device_t *devp)
Line 279 
Line 281 
         if (len >= MAXDEVNAME)          if (len >= MAXDEVNAME)
                 return ENAMETOOLONG;                  return ENAMETOOLONG;
   
         if (umem_copyin((void *)name, str, len + 1))          if (umem_copyin(name, str, len + 1))
                 return EFAULT;                  return EFAULT;
   
         sched_lock();          sched_lock();
Line 291 
Line 293 
         sched_unlock();          sched_unlock();
   
         if (dev->devio->open != NULL)          if (dev->devio->open != NULL)
                 err = (dev->devio->open)(dev, mode);                  err = (*dev->devio->open)(dev, mode);
   
         if (!err)          if (!err)
                 err = umem_copyout(&dev, devp, sizeof(device_t));                  err = umem_copyout(&dev, devp, sizeof(dev));
         device_release(dev);          device_release(dev);
         return err;          return err;
 }  }
Line 317 
Line 319 
                 return ENODEV;                  return ENODEV;
   
         if (dev->devio->close != NULL)          if (dev->devio->close != NULL)
                 err = (dev->devio->close)(dev);                  err = (*dev->devio->close)(dev);
   
         device_release(dev);          device_release(dev);
         return err;          return err;
Line 325 
Line 327 
   
 /*  /*
  * device_read - read from a device.   * device_read - read from a device.
  * @dev:   device id  
  * @buf:   pointer to read buffer  
  * @nbyte: number of bytes to read. actual read count is set in return.  
  * @blkno: block number (for block device)  
  *   *
    * Actual read count is set in "nbyte" as return.
  * Note: The size of one block is device dependent.   * Note: The size of one block is device dependent.
  */   */
 int  int
Line 348 
Line 347 
                 device_release(dev);                  device_release(dev);
                 return EBADF;                  return EBADF;
         }          }
         if (umem_copyin(nbyte, &count, sizeof(u_long)) ||          if (umem_copyin(nbyte, &count, sizeof(count))) {
             vm_access(buf, count, VMA_WRITE)) {  
                 device_release(dev);                  device_release(dev);
                 return EFAULT;                  return EFAULT;
         }          }
         err = (dev->devio->read)(dev, buf, &count, blkno);          err = (*dev->devio->read)(dev, buf, &count, blkno);
         if (err == 0)          if (err == 0)
                 err = umem_copyout(&count, nbyte, sizeof(u_long));                  err = umem_copyout(&count, nbyte, sizeof(count));
   
         device_release(dev);          device_release(dev);
         return err;          return err;
 }  }
   
 /*  /*
  * device_write - write to a device.   * device_write - write to a device.
  * @dev:   device id   *
  * @buf:   pointer to write buffer   * Actual write count is set in "nbyte" as return.
  * @nbyte: number of bytes to write. actual write count is set in return.  
  * @blkno: block number (for block device)  
  */   */
 int  int
 device_write(device_t dev, void *buf, size_t *nbyte, int blkno)  device_write(device_t dev, void *buf, size_t *nbyte, int blkno)
Line 383 
Line 380 
                 device_release(dev);                  device_release(dev);
                 return EBADF;                  return EBADF;
         }          }
         if (umem_copyin(nbyte, &count, sizeof(u_long)) ||          if (umem_copyin(nbyte, &count, sizeof(count))) {
             vm_access(buf, count, VMA_READ)) {  
                 device_release(dev);                  device_release(dev);
                 return EFAULT;                  return EFAULT;
         }          }
         err = (dev->devio->write)(dev, buf, &count, blkno);          err = (*dev->devio->write)(dev, buf, &count, blkno);
         if (err == 0)          if (err == 0)
                 err = umem_copyout(&count, nbyte, sizeof(u_long));                  err = umem_copyout(&count, nbyte, sizeof(count));
   
         device_release(dev);          device_release(dev);
         return err;          return err;
 }  }
   
 /*  /*
  * deivce_ioctl - I/O control request.   * device_ioctl - I/O control request.
  * @dev: device id  
  * @cmd: command  
  * @arg: argument  
  *   *
  * A command and an argument are completely device dependent.   * A command and an argument are completely device dependent.
  * If argument type is pointer, the driver routine must validate   * The ioctl routine of each driver must validate the user buffer
  * the pointer address.   * pointed by the arg value.
  */   */
 int  int
 device_ioctl(device_t dev, int cmd, u_long arg)  device_ioctl(device_t dev, u_long cmd, void *arg)
 {  {
         int err;          int err = EBADF;
   
         if (!task_capable(CAP_DEVIO))          if (!task_capable(CAP_DEVIO))
                 return EPERM;                  return EPERM;
Line 417 
Line 410 
         if (device_hold(dev))          if (device_hold(dev))
                 return ENODEV;                  return ENODEV;
   
         err = EBADF;  
         if (dev->devio->ioctl != NULL)          if (dev->devio->ioctl != NULL)
                 err = (dev->devio->ioctl)(dev, cmd, arg);                  err = (*dev->devio->ioctl)(dev, cmd, arg);
   
         device_release(dev);          device_release(dev);
         return err;          return err;
Line 427 
Line 419 
   
 /*  /*
  * device_broadcast - broadcast an event to all device objects.   * device_broadcast - broadcast an event to all device objects.
  * @event: event code  
  * @force: true to ignore the return value from driver.  
  *   *
  * If force argument is true, a kernel will continue event   * If "force" argument is true, a kernel will continue event
  * notification even if some driver returns error. In this case,   * notification even if some driver returns error. In this case,
  * this routine returns EIO error if at least one driver returns   * this routine returns EIO error if at least one driver returns
  * an error.   * an error.
Line 449 
Line 439 
   
         sched_lock();          sched_lock();
         head = &device_list;          head = &device_list;
   
 #ifdef DEBUG  
         printk("Broadcasting device event:%d\n", event);  
 #endif  
         for (n = list_first(head); n != head; n = list_next(n)) {          for (n = list_first(head); n != head; n = list_next(n)) {
                 dev = list_entry(n, struct device, link);                  dev = list_entry(n, struct device, link);
                 if (dev->devio->event == NULL)                  if (dev->devio->event != NULL) {
                         continue;                          /*
                            * Call driver's event routine.
                 err = (dev->devio->event)(event);                           */
                 if (err) {                          err = (*dev->devio->event)(event);
                         if (force)                          if (err) {
                                 ret = EIO;                                  if (force)
                         else {                                          ret = EIO;
                                 ret = err;                                  else {
                                 break;                                          ret = err;
                                           break;
                                   }
                         }                          }
                 }                  }
         }          }
Line 482 
Line 470 
         device_t dev;          device_t dev;
         struct devio *io;          struct devio *io;
         list_t head, n;          list_t head, n;
           int err = ESRCH;
   
         sched_lock();          sched_lock();
   
         index = 0;          index = 0;
         head = &device_list;          head = &device_list;
         for (n = list_first(head); n != head; n = list_next(n), index++) {          for (n = list_first(head); n != head; n = list_next(n)) {
                 dev = list_entry(n, struct device, link);                  dev = list_entry(n, struct device, link);
                 io = dev->devio;                  io = dev->devio;
                 if (index == target)                  if (index == target) {
                           info->id = dev;
                           info->flags = dev->flags;
                           strlcpy(info->name, dev->name, MAXDEVNAME);
                           err = 0;
                         break;                          break;
                   }
                   index++;
         }          }
         if (n == head) {  
                 sched_unlock();  
                 return ESRCH;  
         }  
         info->id = dev;  
         info->flags = dev->flags;  
         strlcpy(info->name, dev->name, MAXDEVNAME);  
   
         sched_unlock();          sched_unlock();
         return 0;          return err;
 }  }
   
 #if defined(DEBUG) && defined(CONFIG_KDUMP)  
 void  
 device_dump(void)  
 {  
         device_t dev;  
         struct devio *io;  
         list_t head, n;  
   
         printk("Device dump:\n");  
         printk(" device   open     close    read     write    ioctl    "  
                "event    name\n");  
         printk(" -------- -------- -------- -------- -------- -------- "  
                "-------- ------------\n");  
   
         head = &device_list;  
         for (n = list_first(head); n != head; n = list_next(n)) {  
                 dev = list_entry(n, struct device, link);  
                 io = dev->devio;  
                 printk(" %08x %08x %08x %08x %08x %08x %08x %s\n",  
                        dev, io->open, io->close, io->read, io->write,  
                        io->ioctl, io->event, dev->name);  
         }  
 }  
 #endif  
   
 #ifndef DEBUG  #ifndef DEBUG
   /*
    * nonexistent driver service.
    */
 static void  static void
 nosys(void)  nosys(void)
 {  {
Line 541 
Line 505 
  * Check the capability of the current task.   * Check the capability of the current task.
  */   */
 static int  static int
 task__capable(cap_t cap)  _task_capable(cap_t cap)
 {  {
   
         return task_capable(cap);          return task_capable(cap);
Line 559 
Line 523 
 }  }
   
 static void  static void
 machine__reset(void)  _machine_reset(void)
 {  {
   
         machine_reset();          machine_reset();
 }  }
   
 static void  static void
 machine__idle(void)  _machine_idle(void)
 {  {
   
         machine_idle();          machine_idle();
 }  }
   
 /*  /*
  *  Address transtion (physical -> virtual)   *  Address translation (physical -> virtual)
  */   */
 static void *  static void *
 phys__to_virt(void *phys)  _phys_to_virt(void *phys)
 {  {
   
         return phys_to_virt(phys);          return phys_to_virt(phys);
 }  }
   
 /*  /*
  *  Address transtion (virtual -> physical)   *  Address translation (virtual -> physical)
  */   */
 static void *  static void *
 virt__to_phys(void *virt)  _virt_to_phys(void *virt)
 {  {
   
         return virt_to_phys(virt);          return virt_to_phys(virt);
Line 598 
Line 562 
 void  void
 device_init(void)  device_init(void)
 {  {
         struct module *m;          struct module *mod;
         void (*drv_entry)(const dkifn_t *);          void (*drv_entry)(const dkifn_t *);
   
         list_init(&device_list);          list_init(&device_list);
   
         m = &boot_info->driver;          mod = &boot_info->driver;
         if (m == NULL)          if (mod == NULL)
                 return;                  return;
   
         drv_entry = (void (*)(const dkifn_t *))m->entry;          drv_entry = (void (*)(const dkifn_t *))mod->entry;
         if (drv_entry == NULL)          if (drv_entry == NULL)
                 return;                  return;
         /*          /*
          * Call all driver initialization functions.           * Call all initialization functions in drivers.
          */           */
         drv_entry(driver_service);          (*drv_entry)(driver_service);
 }  }

Legend:
Removed from v.1.1.1.1  
changed lines
  Added in v.1.1.1.1.2.1

CVSweb