Lines Matching refs:idev

216 	struct uio_device *idev = dev_get_drvdata(dev);  in name_show()  local
219 mutex_lock(&idev->info_lock); in name_show()
220 if (!idev->info) { in name_show()
226 ret = sprintf(buf, "%s\n", idev->info->name); in name_show()
229 mutex_unlock(&idev->info_lock); in name_show()
237 struct uio_device *idev = dev_get_drvdata(dev); in version_show() local
240 mutex_lock(&idev->info_lock); in version_show()
241 if (!idev->info) { in version_show()
247 ret = sprintf(buf, "%s\n", idev->info->version); in version_show()
250 mutex_unlock(&idev->info_lock); in version_show()
258 struct uio_device *idev = dev_get_drvdata(dev); in event_show() local
259 return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event)); in event_show()
282 static int uio_dev_add_attributes(struct uio_device *idev) in uio_dev_add_attributes() argument
294 mem = &idev->info->mem[mi]; in uio_dev_add_attributes()
299 idev->map_dir = kobject_create_and_add("maps", in uio_dev_add_attributes()
300 &idev->dev.kobj); in uio_dev_add_attributes()
301 if (!idev->map_dir) { in uio_dev_add_attributes()
314 ret = kobject_add(&map->kobj, idev->map_dir, "map%d", mi); in uio_dev_add_attributes()
323 port = &idev->info->port[pi]; in uio_dev_add_attributes()
328 idev->portio_dir = kobject_create_and_add("portio", in uio_dev_add_attributes()
329 &idev->dev.kobj); in uio_dev_add_attributes()
330 if (!idev->portio_dir) { in uio_dev_add_attributes()
343 ret = kobject_add(&portio->kobj, idev->portio_dir, in uio_dev_add_attributes()
358 port = &idev->info->port[pi]; in uio_dev_add_attributes()
362 kobject_put(idev->portio_dir); in uio_dev_add_attributes()
367 mem = &idev->info->mem[mi]; in uio_dev_add_attributes()
371 kobject_put(idev->map_dir); in uio_dev_add_attributes()
372 dev_err(&idev->dev, "error creating sysfs files (%d)\n", ret); in uio_dev_add_attributes()
376 static void uio_dev_del_attributes(struct uio_device *idev) in uio_dev_del_attributes() argument
383 mem = &idev->info->mem[i]; in uio_dev_del_attributes()
388 kobject_put(idev->map_dir); in uio_dev_del_attributes()
391 port = &idev->info->port[i]; in uio_dev_del_attributes()
396 kobject_put(idev->portio_dir); in uio_dev_del_attributes()
399 static int uio_get_minor(struct uio_device *idev) in uio_get_minor() argument
404 retval = idr_alloc(&uio_idr, idev, 0, UIO_MAX_DEVICES, GFP_KERNEL); in uio_get_minor()
406 idev->minor = retval; in uio_get_minor()
409 dev_err(&idev->dev, "too many uio devices\n"); in uio_get_minor()
429 struct uio_device *idev = info->uio_dev; in uio_event_notify() local
431 atomic_inc(&idev->event); in uio_event_notify()
432 wake_up_interruptible(&idev->wait); in uio_event_notify()
433 kill_fasync(&idev->async_queue, SIGIO, POLL_IN); in uio_event_notify()
444 struct uio_device *idev = (struct uio_device *)dev_id; in uio_interrupt() local
447 ret = idev->info->handler(irq, idev->info); in uio_interrupt()
449 uio_event_notify(idev->info); in uio_interrupt()
461 struct uio_device *idev; in uio_open() local
466 idev = idr_find(&uio_idr, iminor(inode)); in uio_open()
468 if (!idev) { in uio_open()
473 get_device(&idev->dev); in uio_open()
475 if (!try_module_get(idev->owner)) { in uio_open()
486 listener->dev = idev; in uio_open()
487 listener->event_count = atomic_read(&idev->event); in uio_open()
490 mutex_lock(&idev->info_lock); in uio_open()
491 if (!idev->info) { in uio_open()
492 mutex_unlock(&idev->info_lock); in uio_open()
497 if (idev->info->open) in uio_open()
498 ret = idev->info->open(idev->info, inode); in uio_open()
499 mutex_unlock(&idev->info_lock); in uio_open()
509 module_put(idev->owner); in uio_open()
512 put_device(&idev->dev); in uio_open()
521 struct uio_device *idev = listener->dev; in uio_fasync() local
523 return fasync_helper(fd, filep, on, &idev->async_queue); in uio_fasync()
530 struct uio_device *idev = listener->dev; in uio_release() local
532 mutex_lock(&idev->info_lock); in uio_release()
533 if (idev->info && idev->info->release) in uio_release()
534 ret = idev->info->release(idev->info, inode); in uio_release()
535 mutex_unlock(&idev->info_lock); in uio_release()
537 module_put(idev->owner); in uio_release()
539 put_device(&idev->dev); in uio_release()
546 struct uio_device *idev = listener->dev; in uio_poll() local
549 mutex_lock(&idev->info_lock); in uio_poll()
550 if (!idev->info || !idev->info->irq) in uio_poll()
552 mutex_unlock(&idev->info_lock); in uio_poll()
557 poll_wait(filep, &idev->wait, wait); in uio_poll()
558 if (listener->event_count != atomic_read(&idev->event)) in uio_poll()
567 struct uio_device *idev = listener->dev; in uio_read() local
575 add_wait_queue(&idev->wait, &wait); in uio_read()
578 mutex_lock(&idev->info_lock); in uio_read()
579 if (!idev->info || !idev->info->irq) { in uio_read()
581 mutex_unlock(&idev->info_lock); in uio_read()
584 mutex_unlock(&idev->info_lock); in uio_read()
588 event_count = atomic_read(&idev->event); in uio_read()
613 remove_wait_queue(&idev->wait, &wait); in uio_read()
622 struct uio_device *idev = listener->dev; in uio_write() local
632 mutex_lock(&idev->info_lock); in uio_write()
633 if (!idev->info) { in uio_write()
638 if (!idev->info->irq) { in uio_write()
643 if (!idev->info->irqcontrol) { in uio_write()
648 retval = idev->info->irqcontrol(idev->info, irq_on); in uio_write()
651 mutex_unlock(&idev->info_lock); in uio_write()
657 struct uio_device *idev = vma->vm_private_data; in uio_find_mem_index() local
660 if (idev->info->mem[vma->vm_pgoff].size == 0) in uio_find_mem_index()
669 struct uio_device *idev = vmf->vma->vm_private_data; in uio_vma_fault() local
676 mutex_lock(&idev->info_lock); in uio_vma_fault()
677 if (!idev->info) { in uio_vma_fault()
694 addr = (void *)(unsigned long)idev->info->mem[mi].addr + offset; in uio_vma_fault()
695 if (idev->info->mem[mi].memtype == UIO_MEM_LOGICAL) in uio_vma_fault()
703 mutex_unlock(&idev->info_lock); in uio_vma_fault()
727 struct uio_device *idev = vma->vm_private_data; in uio_mmap_physical() local
733 mem = idev->info->mem + mi; in uio_mmap_physical()
741 if (idev->info->mem[mi].memtype == UIO_MEM_PHYS) in uio_mmap_physical()
763 struct uio_device *idev = listener->dev; in uio_mmap() local
771 vma->vm_private_data = idev; in uio_mmap()
773 mutex_lock(&idev->info_lock); in uio_mmap()
774 if (!idev->info) { in uio_mmap()
786 actual_pages = ((idev->info->mem[mi].addr & ~PAGE_MASK) in uio_mmap()
787 + idev->info->mem[mi].size + PAGE_SIZE -1) >> PAGE_SHIFT; in uio_mmap()
793 if (idev->info->mmap) { in uio_mmap()
794 ret = idev->info->mmap(idev->info, vma); in uio_mmap()
798 switch (idev->info->mem[mi].memtype) { in uio_mmap()
812 mutex_unlock(&idev->info_lock); in uio_mmap()
903 struct uio_device *idev = dev_get_drvdata(dev); in uio_device_release() local
905 kfree(idev); in uio_device_release()
920 struct uio_device *idev; in __uio_register_device() local
931 idev = kzalloc(sizeof(*idev), GFP_KERNEL); in __uio_register_device()
932 if (!idev) { in __uio_register_device()
936 idev->owner = owner; in __uio_register_device()
937 idev->info = info; in __uio_register_device()
938 mutex_init(&idev->info_lock); in __uio_register_device()
939 init_waitqueue_head(&idev->wait); in __uio_register_device()
940 atomic_set(&idev->event, 0); in __uio_register_device()
942 ret = uio_get_minor(idev); in __uio_register_device()
944 kfree(idev); in __uio_register_device()
948 device_initialize(&idev->dev); in __uio_register_device()
949 idev->dev.devt = MKDEV(uio_major, idev->minor); in __uio_register_device()
950 idev->dev.class = &uio_class; in __uio_register_device()
951 idev->dev.parent = parent; in __uio_register_device()
952 idev->dev.release = uio_device_release; in __uio_register_device()
953 dev_set_drvdata(&idev->dev, idev); in __uio_register_device()
955 ret = dev_set_name(&idev->dev, "uio%d", idev->minor); in __uio_register_device()
959 ret = device_add(&idev->dev); in __uio_register_device()
963 ret = uio_dev_add_attributes(idev); in __uio_register_device()
967 info->uio_dev = idev; in __uio_register_device()
979 info->irq_flags, info->name, idev); in __uio_register_device()
989 uio_dev_del_attributes(idev); in __uio_register_device()
991 device_del(&idev->dev); in __uio_register_device()
993 uio_free_minor(idev->minor); in __uio_register_device()
994 put_device(&idev->dev); in __uio_register_device()
1044 struct uio_device *idev; in uio_unregister_device() local
1050 idev = info->uio_dev; in uio_unregister_device()
1051 minor = idev->minor; in uio_unregister_device()
1053 mutex_lock(&idev->info_lock); in uio_unregister_device()
1054 uio_dev_del_attributes(idev); in uio_unregister_device()
1057 free_irq(info->irq, idev); in uio_unregister_device()
1059 idev->info = NULL; in uio_unregister_device()
1060 mutex_unlock(&idev->info_lock); in uio_unregister_device()
1062 wake_up_interruptible(&idev->wait); in uio_unregister_device()
1063 kill_fasync(&idev->async_queue, SIGIO, POLL_HUP); in uio_unregister_device()
1065 device_unregister(&idev->dev); in uio_unregister_device()