Lines Matching refs:dma_map

271 	struct xsk_dma_map *dma_map;  in xp_find_dma_map()  local
273 list_for_each_entry(dma_map, &pool->umem->xsk_dma_list, list) { in xp_find_dma_map()
274 if (dma_map->netdev == pool->netdev) in xp_find_dma_map()
275 return dma_map; in xp_find_dma_map()
284 struct xsk_dma_map *dma_map; in xp_create_dma_map() local
286 dma_map = kzalloc(sizeof(*dma_map), GFP_KERNEL); in xp_create_dma_map()
287 if (!dma_map) in xp_create_dma_map()
290 dma_map->dma_pages = kvcalloc(nr_pages, sizeof(*dma_map->dma_pages), GFP_KERNEL); in xp_create_dma_map()
291 if (!dma_map->dma_pages) { in xp_create_dma_map()
292 kfree(dma_map); in xp_create_dma_map()
296 dma_map->netdev = netdev; in xp_create_dma_map()
297 dma_map->dev = dev; in xp_create_dma_map()
298 dma_map->dma_need_sync = false; in xp_create_dma_map()
299 dma_map->dma_pages_cnt = nr_pages; in xp_create_dma_map()
300 refcount_set(&dma_map->users, 1); in xp_create_dma_map()
301 list_add(&dma_map->list, &umem->xsk_dma_list); in xp_create_dma_map()
302 return dma_map; in xp_create_dma_map()
305 static void xp_destroy_dma_map(struct xsk_dma_map *dma_map) in xp_destroy_dma_map() argument
307 list_del(&dma_map->list); in xp_destroy_dma_map()
308 kvfree(dma_map->dma_pages); in xp_destroy_dma_map()
309 kfree(dma_map); in xp_destroy_dma_map()
312 static void __xp_dma_unmap(struct xsk_dma_map *dma_map, unsigned long attrs) in __xp_dma_unmap() argument
317 for (i = 0; i < dma_map->dma_pages_cnt; i++) { in __xp_dma_unmap()
318 dma = &dma_map->dma_pages[i]; in __xp_dma_unmap()
320 dma_unmap_page_attrs(dma_map->dev, *dma, PAGE_SIZE, in __xp_dma_unmap()
326 xp_destroy_dma_map(dma_map); in __xp_dma_unmap()
331 struct xsk_dma_map *dma_map; in xp_dma_unmap() local
336 dma_map = xp_find_dma_map(pool); in xp_dma_unmap()
337 if (!dma_map) { in xp_dma_unmap()
342 if (!refcount_dec_and_test(&dma_map->users)) in xp_dma_unmap()
345 __xp_dma_unmap(dma_map, attrs); in xp_dma_unmap()
352 static void xp_check_dma_contiguity(struct xsk_dma_map *dma_map) in xp_check_dma_contiguity() argument
356 for (i = 0; i < dma_map->dma_pages_cnt - 1; i++) { in xp_check_dma_contiguity()
357 if (dma_map->dma_pages[i] + PAGE_SIZE == dma_map->dma_pages[i + 1]) in xp_check_dma_contiguity()
358 dma_map->dma_pages[i] |= XSK_NEXT_PG_CONTIG_MASK; in xp_check_dma_contiguity()
360 dma_map->dma_pages[i] &= ~XSK_NEXT_PG_CONTIG_MASK; in xp_check_dma_contiguity()
364 static int xp_init_dma_info(struct xsk_buff_pool *pool, struct xsk_dma_map *dma_map) in xp_init_dma_info() argument
366 pool->dma_pages = kvcalloc(dma_map->dma_pages_cnt, sizeof(*pool->dma_pages), GFP_KERNEL); in xp_init_dma_info()
370 pool->dev = dma_map->dev; in xp_init_dma_info()
371 pool->dma_pages_cnt = dma_map->dma_pages_cnt; in xp_init_dma_info()
372 pool->dma_need_sync = dma_map->dma_need_sync; in xp_init_dma_info()
373 memcpy(pool->dma_pages, dma_map->dma_pages, in xp_init_dma_info()
382 struct xsk_dma_map *dma_map; in xp_dma_map() local
387 dma_map = xp_find_dma_map(pool); in xp_dma_map()
388 if (dma_map) { in xp_dma_map()
389 err = xp_init_dma_info(pool, dma_map); in xp_dma_map()
393 refcount_inc(&dma_map->users); in xp_dma_map()
397 dma_map = xp_create_dma_map(dev, pool->netdev, nr_pages, pool->umem); in xp_dma_map()
398 if (!dma_map) in xp_dma_map()
401 for (i = 0; i < dma_map->dma_pages_cnt; i++) { in xp_dma_map()
405 __xp_dma_unmap(dma_map, attrs); in xp_dma_map()
409 dma_map->dma_need_sync = true; in xp_dma_map()
410 dma_map->dma_pages[i] = dma; in xp_dma_map()
414 xp_check_dma_contiguity(dma_map); in xp_dma_map()
419 xp_init_xskb_dma(xskb, pool, dma_map->dma_pages, xskb->orig_addr); in xp_dma_map()
422 err = xp_init_dma_info(pool, dma_map); in xp_dma_map()
424 __xp_dma_unmap(dma_map, attrs); in xp_dma_map()