Lines Matching refs:gpd

69 				  struct qmu_gpd *gpd)  in gpd_virt_to_dma()  argument
75 offset = gpd - gpd_head; in gpd_virt_to_dma()
79 return dma_base + (offset * sizeof(*gpd)); in gpd_virt_to_dma()
82 static void gpd_ring_init(struct mtu3_gpd_ring *ring, struct qmu_gpd *gpd) in gpd_ring_init() argument
84 ring->start = gpd; in gpd_ring_init()
85 ring->enqueue = gpd; in gpd_ring_init()
86 ring->dequeue = gpd; in gpd_ring_init()
87 ring->end = gpd + MAX_GPD_NUM - 1; in gpd_ring_init()
93 struct qmu_gpd *gpd = ring->start; in reset_gpd_list() local
95 if (gpd) { in reset_gpd_list()
96 gpd->flag &= ~GPD_FLAGS_HWO; in reset_gpd_list()
97 gpd_ring_init(ring, gpd); in reset_gpd_list()
98 mtu3_flush_cache((uintptr_t)gpd, sizeof(*gpd)); in reset_gpd_list()
104 struct qmu_gpd *gpd; in mtu3_gpd_ring_alloc() local
108 gpd = memalign(DCACHELINE_SIZE, QMU_GPD_RING_SIZE); in mtu3_gpd_ring_alloc()
109 if (!gpd) in mtu3_gpd_ring_alloc()
112 memset(gpd, 0, QMU_GPD_RING_SIZE); in mtu3_gpd_ring_alloc()
113 ring->dma = (dma_addr_t)gpd; in mtu3_gpd_ring_alloc()
114 gpd_ring_init(ring, gpd); in mtu3_gpd_ring_alloc()
185 struct qmu_gpd *gpd = ring->enqueue; in mtu3_prepare_tx_gpd() local
189 memset(gpd, 0, sizeof(*gpd)); in mtu3_prepare_tx_gpd()
191 gpd->buffer = cpu_to_le32((u32)req->dma); in mtu3_prepare_tx_gpd()
192 gpd->buf_len = cpu_to_le16(req->length); in mtu3_prepare_tx_gpd()
197 mep->epnum, gpd, enq); in mtu3_prepare_tx_gpd()
200 gpd->next_gpd = cpu_to_le32((u32)gpd_virt_to_dma(ring, enq)); in mtu3_prepare_tx_gpd()
201 mtu3_flush_cache((uintptr_t)enq, sizeof(*gpd)); in mtu3_prepare_tx_gpd()
204 gpd->ext_flag |= GPD_EXT_FLAG_ZLP; in mtu3_prepare_tx_gpd()
206 gpd->flag |= GPD_FLAGS_IOC | GPD_FLAGS_HWO; in mtu3_prepare_tx_gpd()
208 mreq->gpd = gpd; in mtu3_prepare_tx_gpd()
213 mtu3_flush_cache((uintptr_t)gpd, sizeof(*gpd)); in mtu3_prepare_tx_gpd()
222 struct qmu_gpd *gpd = ring->enqueue; in mtu3_prepare_rx_gpd() local
226 memset(gpd, 0, sizeof(*gpd)); in mtu3_prepare_rx_gpd()
228 gpd->buffer = cpu_to_le32((u32)req->dma); in mtu3_prepare_rx_gpd()
229 gpd->data_buf_len = cpu_to_le16(req->length); in mtu3_prepare_rx_gpd()
234 mep->epnum, gpd, enq); in mtu3_prepare_rx_gpd()
237 gpd->next_gpd = cpu_to_le32((u32)gpd_virt_to_dma(ring, enq)); in mtu3_prepare_rx_gpd()
238 mtu3_flush_cache((uintptr_t)enq, sizeof(*gpd)); in mtu3_prepare_rx_gpd()
240 gpd->flag |= GPD_FLAGS_IOC | GPD_FLAGS_HWO; in mtu3_prepare_rx_gpd()
242 mreq->gpd = gpd; in mtu3_prepare_rx_gpd()
245 mtu3_flush_cache((uintptr_t)gpd, sizeof(*gpd)); in mtu3_prepare_rx_gpd()
351 struct qmu_gpd *gpd = ring->dequeue; in qmu_done_tx() local
360 mtu3_inval_cache((uintptr_t)gpd, sizeof(*gpd)); in qmu_done_tx()
363 __func__, epnum, gpd, gpd_current, ring->enqueue); in qmu_done_tx()
365 while (gpd != gpd_current && !(gpd->flag & GPD_FLAGS_HWO)) { in qmu_done_tx()
368 if (!mreq || mreq->gpd != gpd) { in qmu_done_tx()
374 req->actual = le16_to_cpu(gpd->buf_len); in qmu_done_tx()
377 gpd = advance_deq_gpd(ring); in qmu_done_tx()
378 mtu3_inval_cache((uintptr_t)gpd, sizeof(*gpd)); in qmu_done_tx()
390 struct qmu_gpd *gpd = ring->dequeue; in qmu_done_rx() local
398 mtu3_inval_cache((uintptr_t)gpd, sizeof(*gpd)); in qmu_done_rx()
401 __func__, epnum, gpd, gpd_current, ring->enqueue); in qmu_done_rx()
403 while (gpd != gpd_current && !(gpd->flag & GPD_FLAGS_HWO)) { in qmu_done_rx()
406 if (!mreq || mreq->gpd != gpd) { in qmu_done_rx()
412 req->actual = le16_to_cpu(gpd->buf_len); in qmu_done_rx()
415 gpd = advance_deq_gpd(ring); in qmu_done_rx()
416 mtu3_inval_cache((uintptr_t)gpd, sizeof(*gpd)); in qmu_done_rx()