Home
last modified time | relevance | path

Searched refs:async_tx (Results 1 – 25 of 33) sorted by relevance

12

/linux/drivers/dma/
A Dmmp_pdma.c378 desc->async_tx.phys = pdesc; in mmp_pdma_alloc_descriptor()
488 new->async_tx.cookie = 0; in mmp_pdma_prep_memcpy()
489 async_tx_ack(&new->async_tx); in mmp_pdma_prep_memcpy()
516 return &first->async_tx; in mmp_pdma_prep_memcpy()
573 new->async_tx.cookie = 0; in mmp_pdma_prep_slave_sg()
574 async_tx_ack(&new->async_tx); in mmp_pdma_prep_slave_sg()
587 first->async_tx.flags = flags; in mmp_pdma_prep_slave_sg()
596 return &first->async_tx; in mmp_pdma_prep_slave_sg()
661 new->async_tx.cookie = 0; in mmp_pdma_prep_dma_cyclic()
662 async_tx_ack(&new->async_tx); in mmp_pdma_prep_dma_cyclic()
[all …]
A Diop-adma.c122 async_tx_test_ack(&iter->async_tx)); in __iop_adma_slot_cleanup()
124 prefetch(&_iter->async_tx); in __iop_adma_slot_cleanup()
280 prefetch(&_iter->async_tx); in iop_adma_alloc_slots()
316 async_tx_ack(&iter->async_tx); in iop_adma_alloc_slots()
320 iter->async_tx.cookie = 0; in iop_adma_alloc_slots()
498 sw_desc->async_tx.flags = flags; in iop_adma_prep_dma_interrupt()
529 sw_desc->async_tx.flags = flags; in iop_adma_prep_dma_memcpy()
561 sw_desc->async_tx.flags = flags; in iop_adma_prep_dma_xor()
596 sw_desc->async_tx.flags = flags; in iop_adma_prep_dma_xor_val()
648 sw_desc->async_tx.flags = flags; in iop_adma_prep_dma_pq()
[all …]
A Dmv_xor_v2.c183 struct dma_async_tx_descriptor async_tx; member
314 __func__, sw_desc, &sw_desc->async_tx); in mv_xor_v2_tx_submit()
355 if (async_tx_test_ack(&sw_desc->async_tx)) { in mv_xor_v2_prep_sw_desc()
395 sw_desc->async_tx.flags = flags; in mv_xor_v2_prep_dma_memcpy()
424 return &sw_desc->async_tx; in mv_xor_v2_prep_dma_memcpy()
451 sw_desc->async_tx.flags = flags; in mv_xor_v2_prep_dma_xor()
483 return &sw_desc->async_tx; in mv_xor_v2_prep_dma_xor()
513 return &sw_desc->async_tx; in mv_xor_v2_prep_dma_interrupt()
578 if (next_pending_sw_desc->async_tx.cookie > 0) { in mv_xor_v2_tasklet()
588 &next_pending_sw_desc->async_tx, NULL); in mv_xor_v2_tasklet()
[all …]
A Dmv_xor.c196 BUG_ON(desc->async_tx.cookie < 0); in mv_desc_run_tx_complete_actions()
198 if (desc->async_tx.cookie > 0) { in mv_desc_run_tx_complete_actions()
199 cookie = desc->async_tx.cookie; in mv_desc_run_tx_complete_actions()
201 dma_descriptor_unmap(&desc->async_tx); in mv_desc_run_tx_complete_actions()
209 dma_run_dependencies(&desc->async_tx); in mv_desc_run_tx_complete_actions()
365 async_tx_ack(&iter->async_tx); in mv_chan_alloc_slot()
366 iter->async_tx.cookie = -EBUSY; in mv_chan_alloc_slot()
392 __func__, sw_desc, &sw_desc->async_tx); in mv_xor_tx_submit()
408 &old_chain_tail->async_tx.phys); in mv_xor_tx_submit()
579 sw_desc->async_tx.flags = flags; in mv_xor_prep_dma_xor()
[all …]
A Dfsldma.c428 cookie = dma_cookie_assign(&child->async_tx); in fsl_dma_tx_submit()
471 desc->async_tx.tx_submit = fsl_dma_tx_submit; in fsl_dma_alloc_descriptor()
472 desc->async_tx.phys = pdesc; in fsl_dma_alloc_descriptor()
493 if (async_tx_test_ack(&desc->async_tx)) in fsldma_clean_completed_descriptor()
547 if (!async_tx_test_ack(&desc->async_tx)) { in fsldma_clean_running_descriptor()
619 set_cdar(chan, desc->async_tx.phys); in fsl_chan_xfer_ld_queue()
658 if (desc->async_tx.phys == curr_phys) { in fsldma_cleanup_descriptors()
795 new->async_tx.cookie = 0; in fsl_dma_prep_memcpy()
796 async_tx_ack(&new->async_tx); in fsl_dma_prep_memcpy()
808 new->async_tx.cookie = -EBUSY; in fsl_dma_prep_memcpy()
[all …]
A Dfsl_raid.c137 dma_cookie_complete(&desc->async_tx); in fsl_re_desc_done()
138 dma_descriptor_unmap(&desc->async_tx); in fsl_re_desc_done()
139 dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL); in fsl_re_desc_done()
149 if (async_tx_test_ack(&desc->async_tx)) in fsl_re_cleanup_descs()
254 desc->async_tx.tx_submit = fsl_re_tx_submit; in fsl_re_init_desc()
255 dma_async_tx_descriptor_init(&desc->async_tx, &re_chan->chan); in fsl_re_init_desc()
287 desc->async_tx.flags = flags; in fsl_re_chan_alloc_desc()
304 desc->async_tx.flags = flags; in fsl_re_chan_alloc_desc()
382 return &desc->async_tx; in fsl_re_prep_dma_genq()
516 return &desc->async_tx; in fsl_re_prep_dma_pq()
[all …]
A Daltera-msgdma.c161 struct dma_async_tx_descriptor async_tx; member
202 #define tx_to_desc(tx) container_of(tx, struct msgdma_sw_desc, async_tx)
371 async_tx_ack(&first->async_tx); in msgdma_prep_memcpy()
372 first->async_tx.flags = flags; in msgdma_prep_memcpy()
374 return &first->async_tx; in msgdma_prep_memcpy()
456 first->async_tx.flags = flags; in msgdma_prep_slave_sg()
458 return &first->async_tx; in msgdma_prep_slave_sg()
592 dmaengine_desc_get_callback(&desc->async_tx, &cb); in msgdma_chan_desc_cleanup()
617 dma_cookie_complete(&desc->async_tx); in msgdma_complete_descriptor()
670 dma_async_tx_descriptor_init(&desc->async_tx, &mdev->dmachan); in msgdma_alloc_chan_resources()
[all …]
A Dnbpfaxi.c152 struct dma_async_tx_descriptor async_tx; member
648 if (desc->async_tx.cookie == cookie) { in nbpf_tx_status()
655 if (desc->async_tx.cookie == cookie) { in nbpf_tx_status()
724 desc->async_tx.tx_submit = nbpf_tx_submit; in nbpf_desc_page_alloc()
858 __func__, desc, desc->async_tx.cookie); in nbpf_chan_idle()
956 desc->async_tx.flags = flags; in nbpf_prep_sg()
957 desc->async_tx.cookie = -EBUSY; in nbpf_prep_sg()
986 return &desc->async_tx; in nbpf_prep_sg()
1132 } else if (async_tx_test_ack(&desc->async_tx)) { in nbpf_chan_tasklet()
1154 dma_cookie_complete(&desc->async_tx); in nbpf_chan_tasklet()
[all …]
A Dfsldma.h104 struct dma_async_tx_descriptor async_tx; member
192 #define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx)
A Dmv_xor.h147 struct dma_async_tx_descriptor async_tx; member
A Dfsl_raid.h294 struct dma_async_tx_descriptor async_tx; member
/linux/drivers/dma/sh/
A Dshdma-base.c92 chunk->async_tx.cookie > 0 || in shdma_tx_submit()
98 chunk->async_tx.callback = callback; in shdma_tx_submit()
102 chunk->async_tx.callback = NULL; in shdma_tx_submit()
389 async_tx_ack(&desc->async_tx); in __ld_cleanup()
524 new->async_tx.cookie = -EBUSY; in shdma_add_desc()
528 new->async_tx.cookie = -EINVAL; in shdma_add_desc()
534 new->async_tx.cookie); in shdma_add_desc()
537 new->async_tx.flags = flags; in shdma_add_desc()
620 new->async_tx.cookie = -ENOSPC; in shdma_prep_sg()
627 return &first->async_tx; in shdma_prep_sg()
[all …]
A Drcar-dmac.c73 struct dma_async_tx_descriptor async_tx; member
440 else if (desc->async_tx.callback) in rcar_dmac_chan_start_xfer()
599 if (async_tx_test_ack(&desc->async_tx)) { in rcar_dmac_desc_recycle_acked()
950 desc->async_tx.flags = dma_flags; in rcar_dmac_chan_prep_sg()
951 desc->async_tx.cookie = -EBUSY; in rcar_dmac_chan_prep_sg()
1047 return &desc->async_tx; in rcar_dmac_chan_prep_sg()
1351 if (cookie != desc->async_tx.cookie) { in rcar_dmac_chan_get_residue()
1353 if (cookie == desc->async_tx.cookie) in rcar_dmac_chan_get_residue()
1357 if (cookie == desc->async_tx.cookie) in rcar_dmac_chan_get_residue()
1361 if (cookie == desc->async_tx.cookie) in rcar_dmac_chan_get_residue()
[all …]
A Dshdma.h57 #define tx_to_sh_desc(tx) container_of(tx, struct sh_desc, async_tx)
/linux/drivers/dma/xilinx/
A Dxilinx_dma.c1469 head_desc->async_tx.phys); in xilinx_cdma_start_transfer()
1537 head_desc->async_tx.phys); in xilinx_dma_start_transfer()
1618 head_desc->async_tx.phys); in xilinx_mcdma_start_transfer()
2027 async_tx_ack(&desc->async_tx); in xilinx_vdma_dma_prep_interleaved()
2065 desc->async_tx.phys = segment->phys; in xilinx_vdma_dma_prep_interleaved()
2067 return &desc->async_tx; in xilinx_vdma_dma_prep_interleaved()
2120 desc->async_tx.phys = segment->phys; in xilinx_cdma_prep_memcpy()
2123 return &desc->async_tx; in xilinx_cdma_prep_memcpy()
2222 return &desc->async_tx; in xilinx_dma_prep_slave_sg()
2329 return &desc->async_tx; in xilinx_dma_prep_dma_cyclic()
[all …]
A Dzynqmp_dma.c145 async_tx)
182 struct dma_async_tx_descriptor async_tx; member
483 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); in zynqmp_dma_alloc_chan_resources()
484 desc->async_tx.tx_submit = zynqmp_dma_tx_submit; in zynqmp_dma_alloc_chan_resources()
610 dmaengine_desc_get_callback(&desc->async_tx, &cb); in zynqmp_dma_chan_desc_cleanup()
637 dma_cookie_complete(&desc->async_tx); in zynqmp_dma_complete_descriptor()
850 async_tx_ack(&first->async_tx); in zynqmp_dma_prep_memcpy()
851 first->async_tx.flags = flags; in zynqmp_dma_prep_memcpy()
852 return &first->async_tx; in zynqmp_dma_prep_memcpy()
/linux/drivers/dma/ppc4xx/
A Dadma.c1467 if (desc->async_tx.cookie > 0) { in ppc440spe_adma_run_tx_complete_actions()
1468 cookie = desc->async_tx.cookie; in ppc440spe_adma_run_tx_complete_actions()
1469 desc->async_tx.cookie = 0; in ppc440spe_adma_run_tx_complete_actions()
1563 prefetch(&_iter->async_tx); in __ppc440spe_adma_slot_cleanup()
1709 prefetch(&_iter->async_tx); in ppc440spe_adma_alloc_slots()
1732 iter->async_tx.cookie = 0; in ppc440spe_adma_alloc_slots()
1965 sw_desc->async_tx.flags = flags; in ppc440spe_adma_prep_dma_interrupt()
2006 sw_desc->async_tx.flags = flags; in ppc440spe_adma_prep_dma_memcpy()
2051 sw_desc->async_tx.flags = flags; in ppc440spe_adma_prep_dma_xor()
2156 sw_desc->async_tx.flags = flags; in ppc440spe_dma01_prep_mult()
[all …]
A Dadma.h23 container_of(tx, struct ppc440spe_adma_desc_slot, async_tx)
150 struct dma_async_tx_descriptor async_tx; member
/linux/Documentation/crypto/
A Dasync-tx-api.rst31 The async_tx API provides methods for describing a chain of asynchronous
106 async_tx call will implicitly set the acknowledged state.
191 See include/linux/async_tx.h for more information on the flags. See the
202 accommodate assumptions made by applications using the async_tx API:
263 include/linux/async_tx.h:
264 core header file for the async_tx api
265 crypto/async_tx/async_tx.c:
266 async_tx interface to dmaengine and common code
267 crypto/async_tx/async_memcpy.c:
269 crypto/async_tx/async_xor.c:
/linux/crypto/async_tx/
A DMakefile2 obj-$(CONFIG_ASYNC_CORE) += async_tx.o
/linux/include/linux/platform_data/
A Ddma-iop32x.h92 struct dma_async_tx_descriptor async_tx; member
/linux/drivers/dma/sf-pdma/
A Dsf-pdma.h87 struct dma_async_tx_descriptor *async_tx; member
A Dsf-pdma.c111 desc->async_tx = vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); in sf_pdma_prep_dma_memcpy()
118 return desc->async_tx; in sf_pdma_prep_dma_memcpy()
314 dmaengine_desc_get_callback_invoke(desc->async_tx, NULL); in sf_pdma_errbh_tasklet()
/linux/include/linux/
A Dshdma-base.h48 struct dma_async_tx_descriptor async_tx; member
/linux/Documentation/driver-api/dmaengine/
A Dclient.rst7 .. note:: For DMA Engine usage in async_tx please see:
139 Although the async_tx API specifies that completion callback

Completed in 72 milliseconds

12