Lines Matching refs:ring
25 #define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring") argument
40 static int ring_interrupt_index(struct tb_ring *ring) in ring_interrupt_index() argument
42 int bit = ring->hop; in ring_interrupt_index()
43 if (!ring->is_tx) in ring_interrupt_index()
44 bit += ring->nhi->hop_count; in ring_interrupt_index()
53 static void ring_interrupt_active(struct tb_ring *ring, bool active) in ring_interrupt_active() argument
56 ring_interrupt_index(ring) / 32 * 4; in ring_interrupt_active()
57 int bit = ring_interrupt_index(ring) & 31; in ring_interrupt_active()
61 if (ring->irq > 0) { in ring_interrupt_active()
66 if (ring->is_tx) in ring_interrupt_active()
67 index = ring->hop; in ring_interrupt_active()
69 index = ring->hop + ring->nhi->hop_count; in ring_interrupt_active()
71 if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT) { in ring_interrupt_active()
77 misc = ioread32(ring->nhi->iobase + REG_DMA_MISC); in ring_interrupt_active()
80 iowrite32(misc, ring->nhi->iobase + REG_DMA_MISC); in ring_interrupt_active()
84 ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE; in ring_interrupt_active()
90 ivr |= ring->vector << shift; in ring_interrupt_active()
94 old = ioread32(ring->nhi->iobase + reg); in ring_interrupt_active()
100 dev_dbg(&ring->nhi->pdev->dev, in ring_interrupt_active()
105 dev_WARN(&ring->nhi->pdev->dev, in ring_interrupt_active()
107 RING_TYPE(ring), ring->hop, in ring_interrupt_active()
109 iowrite32(new, ring->nhi->iobase + reg); in ring_interrupt_active()
131 static void __iomem *ring_desc_base(struct tb_ring *ring) in ring_desc_base() argument
133 void __iomem *io = ring->nhi->iobase; in ring_desc_base()
134 io += ring->is_tx ? REG_TX_RING_BASE : REG_RX_RING_BASE; in ring_desc_base()
135 io += ring->hop * 16; in ring_desc_base()
139 static void __iomem *ring_options_base(struct tb_ring *ring) in ring_options_base() argument
141 void __iomem *io = ring->nhi->iobase; in ring_options_base()
142 io += ring->is_tx ? REG_TX_OPTIONS_BASE : REG_RX_OPTIONS_BASE; in ring_options_base()
143 io += ring->hop * 32; in ring_options_base()
147 static void ring_iowrite_cons(struct tb_ring *ring, u16 cons) in ring_iowrite_cons() argument
154 iowrite32(cons, ring_desc_base(ring) + 8); in ring_iowrite_cons()
157 static void ring_iowrite_prod(struct tb_ring *ring, u16 prod) in ring_iowrite_prod() argument
160 iowrite32(prod << 16, ring_desc_base(ring) + 8); in ring_iowrite_prod()
163 static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset) in ring_iowrite32desc() argument
165 iowrite32(value, ring_desc_base(ring) + offset); in ring_iowrite32desc()
168 static void ring_iowrite64desc(struct tb_ring *ring, u64 value, u32 offset) in ring_iowrite64desc() argument
170 iowrite32(value, ring_desc_base(ring) + offset); in ring_iowrite64desc()
171 iowrite32(value >> 32, ring_desc_base(ring) + offset + 4); in ring_iowrite64desc()
174 static void ring_iowrite32options(struct tb_ring *ring, u32 value, u32 offset) in ring_iowrite32options() argument
176 iowrite32(value, ring_options_base(ring) + offset); in ring_iowrite32options()
179 static bool ring_full(struct tb_ring *ring) in ring_full() argument
181 return ((ring->head + 1) % ring->size) == ring->tail; in ring_full()
184 static bool ring_empty(struct tb_ring *ring) in ring_empty() argument
186 return ring->head == ring->tail; in ring_empty()
194 static void ring_write_descriptors(struct tb_ring *ring) in ring_write_descriptors() argument
198 list_for_each_entry_safe(frame, n, &ring->queue, list) { in ring_write_descriptors()
199 if (ring_full(ring)) in ring_write_descriptors()
201 list_move_tail(&frame->list, &ring->in_flight); in ring_write_descriptors()
202 descriptor = &ring->descriptors[ring->head]; in ring_write_descriptors()
206 if (ring->is_tx) { in ring_write_descriptors()
211 ring->head = (ring->head + 1) % ring->size; in ring_write_descriptors()
212 if (ring->is_tx) in ring_write_descriptors()
213 ring_iowrite_prod(ring, ring->head); in ring_write_descriptors()
215 ring_iowrite_cons(ring, ring->head); in ring_write_descriptors()
230 struct tb_ring *ring = container_of(work, typeof(*ring), work); in ring_work() local
236 spin_lock_irqsave(&ring->lock, flags); in ring_work()
238 if (!ring->running) { in ring_work()
240 list_splice_tail_init(&ring->in_flight, &done); in ring_work()
241 list_splice_tail_init(&ring->queue, &done); in ring_work()
246 while (!ring_empty(ring)) { in ring_work()
247 if (!(ring->descriptors[ring->tail].flags in ring_work()
250 frame = list_first_entry(&ring->in_flight, typeof(*frame), in ring_work()
253 if (!ring->is_tx) { in ring_work()
254 frame->size = ring->descriptors[ring->tail].length; in ring_work()
255 frame->eof = ring->descriptors[ring->tail].eof; in ring_work()
256 frame->sof = ring->descriptors[ring->tail].sof; in ring_work()
257 frame->flags = ring->descriptors[ring->tail].flags; in ring_work()
259 ring->tail = (ring->tail + 1) % ring->size; in ring_work()
261 ring_write_descriptors(ring); in ring_work()
265 spin_unlock_irqrestore(&ring->lock, flags); in ring_work()
274 frame->callback(ring, frame, canceled); in ring_work()
278 int __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame) in __tb_ring_enqueue() argument
283 spin_lock_irqsave(&ring->lock, flags); in __tb_ring_enqueue()
284 if (ring->running) { in __tb_ring_enqueue()
285 list_add_tail(&frame->list, &ring->queue); in __tb_ring_enqueue()
286 ring_write_descriptors(ring); in __tb_ring_enqueue()
290 spin_unlock_irqrestore(&ring->lock, flags); in __tb_ring_enqueue()
304 struct ring_frame *tb_ring_poll(struct tb_ring *ring) in tb_ring_poll() argument
309 spin_lock_irqsave(&ring->lock, flags); in tb_ring_poll()
310 if (!ring->running) in tb_ring_poll()
312 if (ring_empty(ring)) in tb_ring_poll()
315 if (ring->descriptors[ring->tail].flags & RING_DESC_COMPLETED) { in tb_ring_poll()
316 frame = list_first_entry(&ring->in_flight, typeof(*frame), in tb_ring_poll()
320 if (!ring->is_tx) { in tb_ring_poll()
321 frame->size = ring->descriptors[ring->tail].length; in tb_ring_poll()
322 frame->eof = ring->descriptors[ring->tail].eof; in tb_ring_poll()
323 frame->sof = ring->descriptors[ring->tail].sof; in tb_ring_poll()
324 frame->flags = ring->descriptors[ring->tail].flags; in tb_ring_poll()
327 ring->tail = (ring->tail + 1) % ring->size; in tb_ring_poll()
331 spin_unlock_irqrestore(&ring->lock, flags); in tb_ring_poll()
336 static void __ring_interrupt_mask(struct tb_ring *ring, bool mask) in __ring_interrupt_mask() argument
338 int idx = ring_interrupt_index(ring); in __ring_interrupt_mask()
343 val = ioread32(ring->nhi->iobase + reg); in __ring_interrupt_mask()
348 iowrite32(val, ring->nhi->iobase + reg); in __ring_interrupt_mask()
352 static void __ring_interrupt(struct tb_ring *ring) in __ring_interrupt() argument
354 if (!ring->running) in __ring_interrupt()
357 if (ring->start_poll) { in __ring_interrupt()
358 __ring_interrupt_mask(ring, true); in __ring_interrupt()
359 ring->start_poll(ring->poll_data); in __ring_interrupt()
361 schedule_work(&ring->work); in __ring_interrupt()
372 void tb_ring_poll_complete(struct tb_ring *ring) in tb_ring_poll_complete() argument
376 spin_lock_irqsave(&ring->nhi->lock, flags); in tb_ring_poll_complete()
377 spin_lock(&ring->lock); in tb_ring_poll_complete()
378 if (ring->start_poll) in tb_ring_poll_complete()
379 __ring_interrupt_mask(ring, false); in tb_ring_poll_complete()
380 spin_unlock(&ring->lock); in tb_ring_poll_complete()
381 spin_unlock_irqrestore(&ring->nhi->lock, flags); in tb_ring_poll_complete()
385 static void ring_clear_msix(const struct tb_ring *ring) in ring_clear_msix() argument
387 if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT) in ring_clear_msix()
390 if (ring->is_tx) in ring_clear_msix()
391 ioread32(ring->nhi->iobase + REG_RING_NOTIFY_BASE); in ring_clear_msix()
393 ioread32(ring->nhi->iobase + REG_RING_NOTIFY_BASE + in ring_clear_msix()
394 4 * (ring->nhi->hop_count / 32)); in ring_clear_msix()
399 struct tb_ring *ring = data; in ring_msix() local
401 spin_lock(&ring->nhi->lock); in ring_msix()
402 ring_clear_msix(ring); in ring_msix()
403 spin_lock(&ring->lock); in ring_msix()
404 __ring_interrupt(ring); in ring_msix()
405 spin_unlock(&ring->lock); in ring_msix()
406 spin_unlock(&ring->nhi->lock); in ring_msix()
411 static int ring_request_msix(struct tb_ring *ring, bool no_suspend) in ring_request_msix() argument
413 struct tb_nhi *nhi = ring->nhi; in ring_request_msix()
424 ring->vector = ret; in ring_request_msix()
426 ret = pci_irq_vector(ring->nhi->pdev, ring->vector); in ring_request_msix()
430 ring->irq = ret; in ring_request_msix()
433 ret = request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring); in ring_request_msix()
440 ida_simple_remove(&nhi->msix_ida, ring->vector); in ring_request_msix()
445 static void ring_release_msix(struct tb_ring *ring) in ring_release_msix() argument
447 if (ring->irq <= 0) in ring_release_msix()
450 free_irq(ring->irq, ring); in ring_release_msix()
451 ida_simple_remove(&ring->nhi->msix_ida, ring->vector); in ring_release_msix()
452 ring->vector = 0; in ring_release_msix()
453 ring->irq = 0; in ring_release_msix()
456 static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring) in nhi_alloc_hop() argument
462 if (ring->hop < 0) { in nhi_alloc_hop()
470 if (ring->is_tx) { in nhi_alloc_hop()
472 ring->hop = i; in nhi_alloc_hop()
477 ring->hop = i; in nhi_alloc_hop()
484 if (ring->hop < 0 || ring->hop >= nhi->hop_count) { in nhi_alloc_hop()
485 dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop); in nhi_alloc_hop()
489 if (ring->is_tx && nhi->tx_rings[ring->hop]) { in nhi_alloc_hop()
491 ring->hop); in nhi_alloc_hop()
494 } else if (!ring->is_tx && nhi->rx_rings[ring->hop]) { in nhi_alloc_hop()
496 ring->hop); in nhi_alloc_hop()
501 if (ring->is_tx) in nhi_alloc_hop()
502 nhi->tx_rings[ring->hop] = ring; in nhi_alloc_hop()
504 nhi->rx_rings[ring->hop] = ring; in nhi_alloc_hop()
518 struct tb_ring *ring = NULL; in tb_ring_alloc() local
523 ring = kzalloc(sizeof(*ring), GFP_KERNEL); in tb_ring_alloc()
524 if (!ring) in tb_ring_alloc()
527 spin_lock_init(&ring->lock); in tb_ring_alloc()
528 INIT_LIST_HEAD(&ring->queue); in tb_ring_alloc()
529 INIT_LIST_HEAD(&ring->in_flight); in tb_ring_alloc()
530 INIT_WORK(&ring->work, ring_work); in tb_ring_alloc()
532 ring->nhi = nhi; in tb_ring_alloc()
533 ring->hop = hop; in tb_ring_alloc()
534 ring->is_tx = transmit; in tb_ring_alloc()
535 ring->size = size; in tb_ring_alloc()
536 ring->flags = flags; in tb_ring_alloc()
537 ring->e2e_tx_hop = e2e_tx_hop; in tb_ring_alloc()
538 ring->sof_mask = sof_mask; in tb_ring_alloc()
539 ring->eof_mask = eof_mask; in tb_ring_alloc()
540 ring->head = 0; in tb_ring_alloc()
541 ring->tail = 0; in tb_ring_alloc()
542 ring->running = false; in tb_ring_alloc()
543 ring->start_poll = start_poll; in tb_ring_alloc()
544 ring->poll_data = poll_data; in tb_ring_alloc()
546 ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev, in tb_ring_alloc()
547 size * sizeof(*ring->descriptors), in tb_ring_alloc()
548 &ring->descriptors_dma, GFP_KERNEL | __GFP_ZERO); in tb_ring_alloc()
549 if (!ring->descriptors) in tb_ring_alloc()
552 if (ring_request_msix(ring, flags & RING_FLAG_NO_SUSPEND)) in tb_ring_alloc()
555 if (nhi_alloc_hop(nhi, ring)) in tb_ring_alloc()
558 return ring; in tb_ring_alloc()
561 ring_release_msix(ring); in tb_ring_alloc()
563 dma_free_coherent(&ring->nhi->pdev->dev, in tb_ring_alloc()
564 ring->size * sizeof(*ring->descriptors), in tb_ring_alloc()
565 ring->descriptors, ring->descriptors_dma); in tb_ring_alloc()
567 kfree(ring); in tb_ring_alloc()
616 void tb_ring_start(struct tb_ring *ring) in tb_ring_start() argument
621 spin_lock_irq(&ring->nhi->lock); in tb_ring_start()
622 spin_lock(&ring->lock); in tb_ring_start()
623 if (ring->nhi->going_away) in tb_ring_start()
625 if (ring->running) { in tb_ring_start()
626 dev_WARN(&ring->nhi->pdev->dev, "ring already started\n"); in tb_ring_start()
629 dev_dbg(&ring->nhi->pdev->dev, "starting %s %d\n", in tb_ring_start()
630 RING_TYPE(ring), ring->hop); in tb_ring_start()
632 if (ring->flags & RING_FLAG_FRAME) { in tb_ring_start()
641 ring_iowrite64desc(ring, ring->descriptors_dma, 0); in tb_ring_start()
642 if (ring->is_tx) { in tb_ring_start()
643 ring_iowrite32desc(ring, ring->size, 12); in tb_ring_start()
644 ring_iowrite32options(ring, 0, 4); /* time releated ? */ in tb_ring_start()
645 ring_iowrite32options(ring, flags, 0); in tb_ring_start()
647 u32 sof_eof_mask = ring->sof_mask << 16 | ring->eof_mask; in tb_ring_start()
649 ring_iowrite32desc(ring, (frame_size << 16) | ring->size, 12); in tb_ring_start()
650 ring_iowrite32options(ring, sof_eof_mask, 4); in tb_ring_start()
651 ring_iowrite32options(ring, flags, 0); in tb_ring_start()
658 if (ring->flags & RING_FLAG_E2E) { in tb_ring_start()
659 if (!ring->is_tx) { in tb_ring_start()
662 hop = ring->e2e_tx_hop << REG_RX_OPTIONS_E2E_HOP_SHIFT; in tb_ring_start()
666 dev_dbg(&ring->nhi->pdev->dev, in tb_ring_start()
668 RING_TYPE(ring), ring->hop, ring->e2e_tx_hop); in tb_ring_start()
670 dev_dbg(&ring->nhi->pdev->dev, "enabling E2E for %s %d\n", in tb_ring_start()
671 RING_TYPE(ring), ring->hop); in tb_ring_start()
675 ring_iowrite32options(ring, flags, 0); in tb_ring_start()
678 ring_interrupt_active(ring, true); in tb_ring_start()
679 ring->running = true; in tb_ring_start()
681 spin_unlock(&ring->lock); in tb_ring_start()
682 spin_unlock_irq(&ring->nhi->lock); in tb_ring_start()
700 void tb_ring_stop(struct tb_ring *ring) in tb_ring_stop() argument
702 spin_lock_irq(&ring->nhi->lock); in tb_ring_stop()
703 spin_lock(&ring->lock); in tb_ring_stop()
704 dev_dbg(&ring->nhi->pdev->dev, "stopping %s %d\n", in tb_ring_stop()
705 RING_TYPE(ring), ring->hop); in tb_ring_stop()
706 if (ring->nhi->going_away) in tb_ring_stop()
708 if (!ring->running) { in tb_ring_stop()
709 dev_WARN(&ring->nhi->pdev->dev, "%s %d already stopped\n", in tb_ring_stop()
710 RING_TYPE(ring), ring->hop); in tb_ring_stop()
713 ring_interrupt_active(ring, false); in tb_ring_stop()
715 ring_iowrite32options(ring, 0, 0); in tb_ring_stop()
716 ring_iowrite64desc(ring, 0, 0); in tb_ring_stop()
717 ring_iowrite32desc(ring, 0, 8); in tb_ring_stop()
718 ring_iowrite32desc(ring, 0, 12); in tb_ring_stop()
719 ring->head = 0; in tb_ring_stop()
720 ring->tail = 0; in tb_ring_stop()
721 ring->running = false; in tb_ring_stop()
724 spin_unlock(&ring->lock); in tb_ring_stop()
725 spin_unlock_irq(&ring->nhi->lock); in tb_ring_stop()
730 schedule_work(&ring->work); in tb_ring_stop()
731 flush_work(&ring->work); in tb_ring_stop()
745 void tb_ring_free(struct tb_ring *ring) in tb_ring_free() argument
747 spin_lock_irq(&ring->nhi->lock); in tb_ring_free()
752 if (ring->is_tx) in tb_ring_free()
753 ring->nhi->tx_rings[ring->hop] = NULL; in tb_ring_free()
755 ring->nhi->rx_rings[ring->hop] = NULL; in tb_ring_free()
757 if (ring->running) { in tb_ring_free()
758 dev_WARN(&ring->nhi->pdev->dev, "%s %d still running\n", in tb_ring_free()
759 RING_TYPE(ring), ring->hop); in tb_ring_free()
761 spin_unlock_irq(&ring->nhi->lock); in tb_ring_free()
763 ring_release_msix(ring); in tb_ring_free()
765 dma_free_coherent(&ring->nhi->pdev->dev, in tb_ring_free()
766 ring->size * sizeof(*ring->descriptors), in tb_ring_free()
767 ring->descriptors, ring->descriptors_dma); in tb_ring_free()
769 ring->descriptors = NULL; in tb_ring_free()
770 ring->descriptors_dma = 0; in tb_ring_free()
773 dev_dbg(&ring->nhi->pdev->dev, "freeing %s %d\n", RING_TYPE(ring), in tb_ring_free()
774 ring->hop); in tb_ring_free()
781 flush_work(&ring->work); in tb_ring_free()
782 kfree(ring); in tb_ring_free()
848 struct tb_ring *ring; in nhi_interrupt_work() local
875 ring = nhi->tx_rings[hop]; in nhi_interrupt_work()
877 ring = nhi->rx_rings[hop]; in nhi_interrupt_work()
878 if (ring == NULL) { in nhi_interrupt_work()
886 spin_lock(&ring->lock); in nhi_interrupt_work()
887 __ring_interrupt(ring); in nhi_interrupt_work()
888 spin_unlock(&ring->lock); in nhi_interrupt_work()