Lines Matching refs:ctlr

112 	struct cpdma_ctlr		*ctlr;  member
173 #define dma_reg_read(ctlr, ofs) readl((ctlr)->dmaregs + (ofs)) argument
176 #define dma_reg_write(ctlr, ofs, v) writel(v, (ctlr)->dmaregs + (ofs)) argument
190 static void cpdma_desc_pool_destroy(struct cpdma_ctlr *ctlr) in cpdma_desc_pool_destroy() argument
192 struct cpdma_desc_pool *pool = ctlr->pool; in cpdma_desc_pool_destroy()
202 dma_free_coherent(ctlr->dev, pool->mem_size, pool->cpumap, in cpdma_desc_pool_destroy()
212 static int cpdma_desc_pool_create(struct cpdma_ctlr *ctlr) in cpdma_desc_pool_create() argument
214 struct cpdma_params *cpdma_params = &ctlr->params; in cpdma_desc_pool_create()
218 pool = devm_kzalloc(ctlr->dev, sizeof(*pool), GFP_KERNEL); in cpdma_desc_pool_create()
221 ctlr->pool = pool; in cpdma_desc_pool_create()
240 pool->gen_pool = devm_gen_pool_create(ctlr->dev, ilog2(pool->desc_size), in cpdma_desc_pool_create()
244 dev_err(ctlr->dev, "pool create failed %d\n", ret); in cpdma_desc_pool_create()
250 pool->iomap = devm_ioremap(ctlr->dev, pool->phys, in cpdma_desc_pool_create()
254 pool->cpumap = dma_alloc_coherent(ctlr->dev, pool->mem_size, in cpdma_desc_pool_create()
266 dev_err(ctlr->dev, "pool add failed %d\n", ret); in cpdma_desc_pool_create()
273 cpdma_desc_pool_destroy(ctlr); in cpdma_desc_pool_create()
275 ctlr->pool = NULL; in cpdma_desc_pool_create()
306 static int _cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value) in _cpdma_control_set() argument
311 if (!ctlr->params.has_ext_regs) in _cpdma_control_set()
314 if (ctlr->state != CPDMA_STATE_ACTIVE) in _cpdma_control_set()
323 val = dma_reg_read(ctlr, info->reg); in _cpdma_control_set()
326 dma_reg_write(ctlr, info->reg, val); in _cpdma_control_set()
331 static int _cpdma_control_get(struct cpdma_ctlr *ctlr, int control) in _cpdma_control_get() argument
336 if (!ctlr->params.has_ext_regs) in _cpdma_control_get()
339 if (ctlr->state != CPDMA_STATE_ACTIVE) in _cpdma_control_get()
348 ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask; in _cpdma_control_get()
357 struct cpdma_ctlr *ctlr = chan->ctlr; in cpdma_chan_set_chan_shaper() local
366 dma_reg_write(ctlr, rate_reg, chan->rate_factor); in cpdma_chan_set_chan_shaper()
368 rmask = _cpdma_control_get(ctlr, CPDMA_TX_RLIM); in cpdma_chan_set_chan_shaper()
371 ret = _cpdma_control_set(ctlr, CPDMA_TX_RLIM, rmask); in cpdma_chan_set_chan_shaper()
377 struct cpdma_ctlr *ctlr = chan->ctlr; in cpdma_chan_on() local
378 struct cpdma_desc_pool *pool = ctlr->pool; in cpdma_chan_on()
386 if (ctlr->state != CPDMA_STATE_ACTIVE) { in cpdma_chan_on()
390 dma_reg_write(ctlr, chan->int_set, chan->mask); in cpdma_chan_on()
409 struct cpdma_ctlr *ctlr = ch->ctlr; in cpdma_chan_fit_rate() local
417 chan = ctlr->channels[i]; in cpdma_chan_fit_rate()
440 dev_err(ctlr->dev, "Upper cpdma ch%d is not rate limited\n", in cpdma_chan_fit_rate()
445 static u32 cpdma_chan_set_factors(struct cpdma_ctlr *ctlr, in cpdma_chan_set_factors() argument
460 freq = ctlr->params.bus_freq_mhz * 1000 * 32; in cpdma_chan_set_factors()
462 dev_err(ctlr->dev, "The bus frequency is not set\n"); in cpdma_chan_set_factors()
506 dma_reg_write(ctlr, rate_reg, ch->rate_factor); in cpdma_chan_set_factors()
512 struct cpdma_ctlr *ctlr; in cpdma_ctlr_create() local
514 ctlr = devm_kzalloc(params->dev, sizeof(*ctlr), GFP_KERNEL); in cpdma_ctlr_create()
515 if (!ctlr) in cpdma_ctlr_create()
518 ctlr->state = CPDMA_STATE_IDLE; in cpdma_ctlr_create()
519 ctlr->params = *params; in cpdma_ctlr_create()
520 ctlr->dev = params->dev; in cpdma_ctlr_create()
521 ctlr->chan_num = 0; in cpdma_ctlr_create()
522 spin_lock_init(&ctlr->lock); in cpdma_ctlr_create()
524 if (cpdma_desc_pool_create(ctlr)) in cpdma_ctlr_create()
527 ctlr->num_tx_desc = ctlr->pool->num_desc / 2; in cpdma_ctlr_create()
528 ctlr->num_rx_desc = ctlr->pool->num_desc - ctlr->num_tx_desc; in cpdma_ctlr_create()
530 if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS)) in cpdma_ctlr_create()
531 ctlr->num_chan = CPDMA_MAX_CHANNELS; in cpdma_ctlr_create()
532 return ctlr; in cpdma_ctlr_create()
535 int cpdma_ctlr_start(struct cpdma_ctlr *ctlr) in cpdma_ctlr_start() argument
541 spin_lock_irqsave(&ctlr->lock, flags); in cpdma_ctlr_start()
542 if (ctlr->state != CPDMA_STATE_IDLE) { in cpdma_ctlr_start()
543 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_ctlr_start()
547 if (ctlr->params.has_soft_reset) { in cpdma_ctlr_start()
550 dma_reg_write(ctlr, CPDMA_SOFTRESET, 1); in cpdma_ctlr_start()
552 if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0) in cpdma_ctlr_start()
560 for (i = 0; i < ctlr->num_chan; i++) { in cpdma_ctlr_start()
561 writel(0, ctlr->params.txhdp + 4 * i); in cpdma_ctlr_start()
562 writel(0, ctlr->params.rxhdp + 4 * i); in cpdma_ctlr_start()
563 writel(0, ctlr->params.txcp + 4 * i); in cpdma_ctlr_start()
564 writel(0, ctlr->params.rxcp + 4 * i); in cpdma_ctlr_start()
567 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff); in cpdma_ctlr_start()
568 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff); in cpdma_ctlr_start()
570 dma_reg_write(ctlr, CPDMA_TXCONTROL, 1); in cpdma_ctlr_start()
571 dma_reg_write(ctlr, CPDMA_RXCONTROL, 1); in cpdma_ctlr_start()
573 ctlr->state = CPDMA_STATE_ACTIVE; in cpdma_ctlr_start()
576 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { in cpdma_ctlr_start()
577 chan = ctlr->channels[i]; in cpdma_ctlr_start()
588 _cpdma_control_set(ctlr, CPDMA_TX_PRIO_FIXED, prio_mode); in cpdma_ctlr_start()
589 _cpdma_control_set(ctlr, CPDMA_RX_BUFFER_OFFSET, 0); in cpdma_ctlr_start()
591 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_ctlr_start()
595 int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr) in cpdma_ctlr_stop() argument
600 spin_lock_irqsave(&ctlr->lock, flags); in cpdma_ctlr_stop()
601 if (ctlr->state != CPDMA_STATE_ACTIVE) { in cpdma_ctlr_stop()
602 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_ctlr_stop()
606 ctlr->state = CPDMA_STATE_TEARDOWN; in cpdma_ctlr_stop()
607 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_ctlr_stop()
609 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { in cpdma_ctlr_stop()
610 if (ctlr->channels[i]) in cpdma_ctlr_stop()
611 cpdma_chan_stop(ctlr->channels[i]); in cpdma_ctlr_stop()
614 spin_lock_irqsave(&ctlr->lock, flags); in cpdma_ctlr_stop()
615 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff); in cpdma_ctlr_stop()
616 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff); in cpdma_ctlr_stop()
618 dma_reg_write(ctlr, CPDMA_TXCONTROL, 0); in cpdma_ctlr_stop()
619 dma_reg_write(ctlr, CPDMA_RXCONTROL, 0); in cpdma_ctlr_stop()
621 ctlr->state = CPDMA_STATE_IDLE; in cpdma_ctlr_stop()
623 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_ctlr_stop()
627 int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr) in cpdma_ctlr_destroy() argument
631 if (!ctlr) in cpdma_ctlr_destroy()
634 if (ctlr->state != CPDMA_STATE_IDLE) in cpdma_ctlr_destroy()
635 cpdma_ctlr_stop(ctlr); in cpdma_ctlr_destroy()
637 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) in cpdma_ctlr_destroy()
638 cpdma_chan_destroy(ctlr->channels[i]); in cpdma_ctlr_destroy()
640 cpdma_desc_pool_destroy(ctlr); in cpdma_ctlr_destroy()
644 int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable) in cpdma_ctlr_int_ctrl() argument
649 spin_lock_irqsave(&ctlr->lock, flags); in cpdma_ctlr_int_ctrl()
650 if (ctlr->state != CPDMA_STATE_ACTIVE) { in cpdma_ctlr_int_ctrl()
651 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_ctlr_int_ctrl()
655 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { in cpdma_ctlr_int_ctrl()
656 if (ctlr->channels[i]) in cpdma_ctlr_int_ctrl()
657 cpdma_chan_int_ctrl(ctlr->channels[i], enable); in cpdma_ctlr_int_ctrl()
660 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_ctlr_int_ctrl()
664 void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value) in cpdma_ctlr_eoi() argument
666 dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value); in cpdma_ctlr_eoi()
669 u32 cpdma_ctrl_rxchs_state(struct cpdma_ctlr *ctlr) in cpdma_ctrl_rxchs_state() argument
671 return dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED); in cpdma_ctrl_rxchs_state()
674 u32 cpdma_ctrl_txchs_state(struct cpdma_ctlr *ctlr) in cpdma_ctrl_txchs_state() argument
676 return dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED); in cpdma_ctrl_txchs_state()
679 static void cpdma_chan_set_descs(struct cpdma_ctlr *ctlr, in cpdma_chan_set_descs() argument
700 chan = ctlr->channels[i]; in cpdma_chan_set_descs()
725 static int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr) in cpdma_chan_split_pool() argument
734 if (!ctlr->chan_num) in cpdma_chan_split_pool()
737 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { in cpdma_chan_split_pool()
738 chan = ctlr->channels[i]; in cpdma_chan_split_pool()
756 tx_desc_num = ctlr->num_tx_desc; in cpdma_chan_split_pool()
757 rx_desc_num = ctlr->num_rx_desc; in cpdma_chan_split_pool()
768 cpdma_chan_set_descs(ctlr, 0, tx_desc_num, tx_per_ch_desc); in cpdma_chan_split_pool()
769 cpdma_chan_set_descs(ctlr, 1, rx_desc_num, rx_per_ch_desc); in cpdma_chan_split_pool()
786 struct cpdma_ctlr *ctlr = ch->ctlr; in cpdma_chan_set_weight() local
790 spin_lock_irqsave(&ctlr->lock, flags); in cpdma_chan_set_weight()
794 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_chan_set_weight()
801 ret = cpdma_chan_split_pool(ctlr); in cpdma_chan_set_weight()
802 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_chan_set_weight()
810 u32 cpdma_chan_get_min_rate(struct cpdma_ctlr *ctlr) in cpdma_chan_get_min_rate() argument
814 divident = ctlr->params.bus_freq_mhz * 32 * 1000; in cpdma_chan_get_min_rate()
828 struct cpdma_ctlr *ctlr; in cpdma_chan_set_rate() local
838 ctlr = ch->ctlr; in cpdma_chan_set_rate()
839 spin_lock_irqsave(&ctlr->lock, flags); in cpdma_chan_set_rate()
846 ret = cpdma_chan_set_factors(ctlr, ch); in cpdma_chan_set_rate()
853 _cpdma_control_set(ctlr, CPDMA_TX_RLIM, rmask); in cpdma_chan_set_rate()
854 _cpdma_control_set(ctlr, CPDMA_TX_PRIO_FIXED, prio_mode); in cpdma_chan_set_rate()
855 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_chan_set_rate()
860 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_chan_set_rate()
876 struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, in cpdma_chan_create() argument
885 if (__chan_linear(chan_num) >= ctlr->num_chan) in cpdma_chan_create()
888 chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL); in cpdma_chan_create()
892 spin_lock_irqsave(&ctlr->lock, flags); in cpdma_chan_create()
893 if (ctlr->channels[chan_num]) { in cpdma_chan_create()
894 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_chan_create()
895 devm_kfree(ctlr->dev, chan); in cpdma_chan_create()
899 chan->ctlr = ctlr; in cpdma_chan_create()
907 chan->hdp = ctlr->params.rxhdp + offset; in cpdma_chan_create()
908 chan->cp = ctlr->params.rxcp + offset; in cpdma_chan_create()
909 chan->rxfree = ctlr->params.rxfree + offset; in cpdma_chan_create()
915 chan->hdp = ctlr->params.txhdp + offset; in cpdma_chan_create()
916 chan->cp = ctlr->params.txcp + offset; in cpdma_chan_create()
926 ctlr->channels[chan_num] = chan; in cpdma_chan_create()
927 ctlr->chan_num++; in cpdma_chan_create()
929 cpdma_chan_split_pool(ctlr); in cpdma_chan_create()
931 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_chan_create()
949 struct cpdma_ctlr *ctlr; in cpdma_chan_destroy() local
954 ctlr = chan->ctlr; in cpdma_chan_destroy()
956 spin_lock_irqsave(&ctlr->lock, flags); in cpdma_chan_destroy()
959 ctlr->channels[chan->chan_num] = NULL; in cpdma_chan_destroy()
960 ctlr->chan_num--; in cpdma_chan_destroy()
961 devm_kfree(ctlr->dev, chan); in cpdma_chan_destroy()
962 cpdma_chan_split_pool(ctlr); in cpdma_chan_destroy()
964 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_chan_destroy()
983 struct cpdma_ctlr *ctlr = chan->ctlr; in __cpdma_chan_submit() local
985 struct cpdma_desc_pool *pool = ctlr->pool; in __cpdma_chan_submit()
1019 struct cpdma_ctlr *ctlr = chan->ctlr; in cpdma_chan_submit_si() local
1031 desc = cpdma_desc_alloc(ctlr->pool); in cpdma_chan_submit_si()
1037 if (len < ctlr->params.min_packet_size) { in cpdma_chan_submit_si()
1038 len = ctlr->params.min_packet_size; in cpdma_chan_submit_si()
1047 dma_sync_single_for_device(ctlr->dev, buffer, len, chan->dir); in cpdma_chan_submit_si()
1049 buffer = dma_map_single(ctlr->dev, si->data_virt, len, chan->dir); in cpdma_chan_submit_si()
1050 ret = dma_mapping_error(ctlr->dev, buffer); in cpdma_chan_submit_si()
1052 cpdma_desc_free(ctlr->pool, desc, 1); in cpdma_chan_submit_si()
1181 struct cpdma_ctlr *ctlr = chan->ctlr; in cpdma_check_free_tx_desc() local
1182 struct cpdma_desc_pool *pool = ctlr->pool; in cpdma_check_free_tx_desc()
1197 struct cpdma_ctlr *ctlr = chan->ctlr; in __cpdma_chan_free() local
1198 struct cpdma_desc_pool *pool = ctlr->pool; in __cpdma_chan_free()
1209 dma_sync_single_for_cpu(ctlr->dev, buff_dma, origlen, in __cpdma_chan_free()
1212 dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir); in __cpdma_chan_free()
1221 struct cpdma_ctlr *ctlr = chan->ctlr; in __cpdma_chan_process() local
1225 struct cpdma_desc_pool *pool = ctlr->pool; in __cpdma_chan_process()
1295 struct cpdma_ctlr *ctlr = chan->ctlr; in cpdma_chan_start() local
1299 spin_lock_irqsave(&ctlr->lock, flags); in cpdma_chan_start()
1301 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_chan_start()
1314 struct cpdma_ctlr *ctlr = chan->ctlr; in cpdma_chan_stop() local
1315 struct cpdma_desc_pool *pool = ctlr->pool; in cpdma_chan_stop()
1327 dma_reg_write(ctlr, chan->int_clear, chan->mask); in cpdma_chan_stop()
1330 dma_reg_write(ctlr, chan->td, chan_linear(chan)); in cpdma_chan_stop()
1384 dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear, in cpdma_chan_int_ctrl()
1391 int cpdma_control_get(struct cpdma_ctlr *ctlr, int control) in cpdma_control_get() argument
1396 spin_lock_irqsave(&ctlr->lock, flags); in cpdma_control_get()
1397 ret = _cpdma_control_get(ctlr, control); in cpdma_control_get()
1398 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_control_get()
1403 int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value) in cpdma_control_set() argument
1408 spin_lock_irqsave(&ctlr->lock, flags); in cpdma_control_set()
1409 ret = _cpdma_control_set(ctlr, control, value); in cpdma_control_set()
1410 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_control_set()
1415 int cpdma_get_num_rx_descs(struct cpdma_ctlr *ctlr) in cpdma_get_num_rx_descs() argument
1417 return ctlr->num_rx_desc; in cpdma_get_num_rx_descs()
1420 int cpdma_get_num_tx_descs(struct cpdma_ctlr *ctlr) in cpdma_get_num_tx_descs() argument
1422 return ctlr->num_tx_desc; in cpdma_get_num_tx_descs()
1425 int cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc) in cpdma_set_num_rx_descs() argument
1430 spin_lock_irqsave(&ctlr->lock, flags); in cpdma_set_num_rx_descs()
1432 temp = ctlr->num_rx_desc; in cpdma_set_num_rx_descs()
1433 ctlr->num_rx_desc = num_rx_desc; in cpdma_set_num_rx_descs()
1434 ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc; in cpdma_set_num_rx_descs()
1435 ret = cpdma_chan_split_pool(ctlr); in cpdma_set_num_rx_descs()
1437 ctlr->num_rx_desc = temp; in cpdma_set_num_rx_descs()
1438 ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc; in cpdma_set_num_rx_descs()
1441 spin_unlock_irqrestore(&ctlr->lock, flags); in cpdma_set_num_rx_descs()