Lines Matching refs:fh
27 static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event) in __v4l2_event_dequeue() argument
33 spin_lock_irqsave(&fh->vdev->fh_lock, flags); in __v4l2_event_dequeue()
35 if (list_empty(&fh->available)) { in __v4l2_event_dequeue()
36 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); in __v4l2_event_dequeue()
40 WARN_ON(fh->navailable == 0); in __v4l2_event_dequeue()
42 kev = list_first_entry(&fh->available, struct v4l2_kevent, list); in __v4l2_event_dequeue()
44 fh->navailable--; in __v4l2_event_dequeue()
46 kev->event.pending = fh->navailable; in __v4l2_event_dequeue()
54 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); in __v4l2_event_dequeue()
59 int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event, in v4l2_event_dequeue() argument
65 return __v4l2_event_dequeue(fh, event); in v4l2_event_dequeue()
68 if (fh->vdev->lock) in v4l2_event_dequeue()
69 mutex_unlock(fh->vdev->lock); in v4l2_event_dequeue()
72 ret = wait_event_interruptible(fh->wait, in v4l2_event_dequeue()
73 fh->navailable != 0); in v4l2_event_dequeue()
77 ret = __v4l2_event_dequeue(fh, event); in v4l2_event_dequeue()
80 if (fh->vdev->lock) in v4l2_event_dequeue()
81 mutex_lock(fh->vdev->lock); in v4l2_event_dequeue()
89 struct v4l2_fh *fh, u32 type, u32 id) in v4l2_event_subscribed() argument
93 assert_spin_locked(&fh->vdev->fh_lock); in v4l2_event_subscribed()
95 list_for_each_entry(sev, &fh->subscribed, list) in v4l2_event_subscribed()
102 static void __v4l2_event_queue_fh(struct v4l2_fh *fh, in __v4l2_event_queue_fh() argument
110 sev = v4l2_event_subscribed(fh, ev->type, ev->id); in __v4l2_event_queue_fh()
115 fh->sequence++; in __v4l2_event_queue_fh()
124 fh->navailable--; in __v4l2_event_queue_fh()
144 kev->event.sequence = fh->sequence; in __v4l2_event_queue_fh()
146 list_add_tail(&kev->list, &fh->available); in __v4l2_event_queue_fh()
148 fh->navailable++; in __v4l2_event_queue_fh()
150 wake_up_all(&fh->wait); in __v4l2_event_queue_fh()
155 struct v4l2_fh *fh; in v4l2_event_queue() local
166 list_for_each_entry(fh, &vdev->fh_list, list) in v4l2_event_queue()
167 __v4l2_event_queue_fh(fh, ev, ts); in v4l2_event_queue()
173 void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev) in v4l2_event_queue_fh() argument
178 spin_lock_irqsave(&fh->vdev->fh_lock, flags); in v4l2_event_queue_fh()
179 __v4l2_event_queue_fh(fh, ev, ts); in v4l2_event_queue_fh()
180 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); in v4l2_event_queue_fh()
184 int v4l2_event_pending(struct v4l2_fh *fh) in v4l2_event_pending() argument
186 return fh->navailable; in v4l2_event_pending()
192 struct v4l2_fh *fh; in v4l2_event_wake_all() local
200 list_for_each_entry(fh, &vdev->fh_list, list) in v4l2_event_wake_all()
201 wake_up_all(&fh->wait); in v4l2_event_wake_all()
209 struct v4l2_fh *fh = sev->fh; in __v4l2_event_unsubscribe() local
212 lockdep_assert_held(&fh->subscribe_lock); in __v4l2_event_unsubscribe()
213 assert_spin_locked(&fh->vdev->fh_lock); in __v4l2_event_unsubscribe()
218 fh->navailable--; in __v4l2_event_unsubscribe()
223 int v4l2_event_subscribe(struct v4l2_fh *fh, in v4l2_event_subscribe() argument
246 sev->fh = fh; in v4l2_event_subscribe()
250 mutex_lock(&fh->subscribe_lock); in v4l2_event_subscribe()
252 spin_lock_irqsave(&fh->vdev->fh_lock, flags); in v4l2_event_subscribe()
253 found_ev = v4l2_event_subscribed(fh, sub->type, sub->id); in v4l2_event_subscribe()
255 list_add(&sev->list, &fh->subscribed); in v4l2_event_subscribe()
256 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); in v4l2_event_subscribe()
264 spin_lock_irqsave(&fh->vdev->fh_lock, flags); in v4l2_event_subscribe()
266 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); in v4l2_event_subscribe()
271 mutex_unlock(&fh->subscribe_lock); in v4l2_event_subscribe()
277 void v4l2_event_unsubscribe_all(struct v4l2_fh *fh) in v4l2_event_unsubscribe_all() argument
286 spin_lock_irqsave(&fh->vdev->fh_lock, flags); in v4l2_event_unsubscribe_all()
287 if (!list_empty(&fh->subscribed)) { in v4l2_event_unsubscribe_all()
288 sev = list_first_entry(&fh->subscribed, in v4l2_event_unsubscribe_all()
293 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); in v4l2_event_unsubscribe_all()
295 v4l2_event_unsubscribe(fh, &sub); in v4l2_event_unsubscribe_all()
300 int v4l2_event_unsubscribe(struct v4l2_fh *fh, in v4l2_event_unsubscribe() argument
307 v4l2_event_unsubscribe_all(fh); in v4l2_event_unsubscribe()
311 mutex_lock(&fh->subscribe_lock); in v4l2_event_unsubscribe()
313 spin_lock_irqsave(&fh->vdev->fh_lock, flags); in v4l2_event_unsubscribe()
315 sev = v4l2_event_subscribed(fh, sub->type, sub->id); in v4l2_event_unsubscribe()
319 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); in v4l2_event_unsubscribe()
324 mutex_unlock(&fh->subscribe_lock); in v4l2_event_unsubscribe()
332 int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd, struct v4l2_fh *fh, in v4l2_event_subdev_unsubscribe() argument
335 return v4l2_event_unsubscribe(fh, sub); in v4l2_event_subdev_unsubscribe()
359 int v4l2_src_change_event_subscribe(struct v4l2_fh *fh, in v4l2_src_change_event_subscribe() argument
363 return v4l2_event_subscribe(fh, sub, 0, &v4l2_event_src_ch_ops); in v4l2_src_change_event_subscribe()
369 struct v4l2_fh *fh, struct v4l2_event_subscription *sub) in v4l2_src_change_event_subdev_subscribe() argument
371 return v4l2_src_change_event_subscribe(fh, sub); in v4l2_src_change_event_subdev_subscribe()