1 /* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
2 /*
3  * Copyright (c) 2017 Hisilicon Limited.
4  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5  */
6 
7 #include <linux/platform_device.h>
8 #include <rdma/ib_umem.h>
9 #include "hns_roce_device.h"
10 
hns_roce_db_map_user(struct hns_roce_ucontext * context,unsigned long virt,struct hns_roce_db * db)11 int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt,
12 			 struct hns_roce_db *db)
13 {
14 	unsigned long page_addr = virt & PAGE_MASK;
15 	struct hns_roce_user_db_page *page;
16 	unsigned int offset;
17 	int ret = 0;
18 
19 	mutex_lock(&context->page_mutex);
20 
21 	list_for_each_entry(page, &context->page_list, list)
22 		if (page->user_virt == page_addr)
23 			goto found;
24 
25 	page = kmalloc(sizeof(*page), GFP_KERNEL);
26 	if (!page) {
27 		ret = -ENOMEM;
28 		goto out;
29 	}
30 
31 	refcount_set(&page->refcount, 1);
32 	page->user_virt = page_addr;
33 	page->umem = ib_umem_get(context->ibucontext.device, page_addr,
34 				 PAGE_SIZE, 0);
35 	if (IS_ERR(page->umem)) {
36 		ret = PTR_ERR(page->umem);
37 		kfree(page);
38 		goto out;
39 	}
40 
41 	list_add(&page->list, &context->page_list);
42 
43 found:
44 	offset = virt - page_addr;
45 	db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) + offset;
46 	db->virt_addr = sg_virt(page->umem->sgt_append.sgt.sgl) + offset;
47 	db->u.user_page = page;
48 	refcount_inc(&page->refcount);
49 
50 out:
51 	mutex_unlock(&context->page_mutex);
52 
53 	return ret;
54 }
55 
hns_roce_db_unmap_user(struct hns_roce_ucontext * context,struct hns_roce_db * db)56 void hns_roce_db_unmap_user(struct hns_roce_ucontext *context,
57 			    struct hns_roce_db *db)
58 {
59 	mutex_lock(&context->page_mutex);
60 
61 	refcount_dec(&db->u.user_page->refcount);
62 	if (refcount_dec_if_one(&db->u.user_page->refcount)) {
63 		list_del(&db->u.user_page->list);
64 		ib_umem_release(db->u.user_page->umem);
65 		kfree(db->u.user_page);
66 	}
67 
68 	mutex_unlock(&context->page_mutex);
69 }
70 
hns_roce_alloc_db_pgdir(struct device * dma_device)71 static struct hns_roce_db_pgdir *hns_roce_alloc_db_pgdir(
72 					struct device *dma_device)
73 {
74 	struct hns_roce_db_pgdir *pgdir;
75 
76 	pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
77 	if (!pgdir)
78 		return NULL;
79 
80 	bitmap_fill(pgdir->order1,
81 		    HNS_ROCE_DB_PER_PAGE / HNS_ROCE_DB_TYPE_COUNT);
82 	pgdir->bits[0] = pgdir->order0;
83 	pgdir->bits[1] = pgdir->order1;
84 	pgdir->page = dma_alloc_coherent(dma_device, PAGE_SIZE,
85 					 &pgdir->db_dma, GFP_KERNEL);
86 	if (!pgdir->page) {
87 		kfree(pgdir);
88 		return NULL;
89 	}
90 
91 	return pgdir;
92 }
93 
hns_roce_alloc_db_from_pgdir(struct hns_roce_db_pgdir * pgdir,struct hns_roce_db * db,int order)94 static int hns_roce_alloc_db_from_pgdir(struct hns_roce_db_pgdir *pgdir,
95 					struct hns_roce_db *db, int order)
96 {
97 	unsigned long o;
98 	unsigned long i;
99 
100 	for (o = order; o <= 1; ++o) {
101 		i = find_first_bit(pgdir->bits[o], HNS_ROCE_DB_PER_PAGE >> o);
102 		if (i < HNS_ROCE_DB_PER_PAGE >> o)
103 			goto found;
104 	}
105 
106 	return -ENOMEM;
107 
108 found:
109 	clear_bit(i, pgdir->bits[o]);
110 
111 	i <<= o;
112 
113 	if (o > order)
114 		set_bit(i ^ 1, pgdir->bits[order]);
115 
116 	db->u.pgdir	= pgdir;
117 	db->index	= i;
118 	db->db_record	= pgdir->page + db->index;
119 	db->dma		= pgdir->db_dma  + db->index * HNS_ROCE_DB_UNIT_SIZE;
120 	db->order	= order;
121 
122 	return 0;
123 }
124 
hns_roce_alloc_db(struct hns_roce_dev * hr_dev,struct hns_roce_db * db,int order)125 int hns_roce_alloc_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db,
126 		      int order)
127 {
128 	struct hns_roce_db_pgdir *pgdir;
129 	int ret = 0;
130 
131 	mutex_lock(&hr_dev->pgdir_mutex);
132 
133 	list_for_each_entry(pgdir, &hr_dev->pgdir_list, list)
134 		if (!hns_roce_alloc_db_from_pgdir(pgdir, db, order))
135 			goto out;
136 
137 	pgdir = hns_roce_alloc_db_pgdir(hr_dev->dev);
138 	if (!pgdir) {
139 		ret = -ENOMEM;
140 		goto out;
141 	}
142 
143 	list_add(&pgdir->list, &hr_dev->pgdir_list);
144 
145 	/* This should never fail -- we just allocated an empty page: */
146 	WARN_ON(hns_roce_alloc_db_from_pgdir(pgdir, db, order));
147 
148 out:
149 	mutex_unlock(&hr_dev->pgdir_mutex);
150 
151 	return ret;
152 }
153 
hns_roce_free_db(struct hns_roce_dev * hr_dev,struct hns_roce_db * db)154 void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db)
155 {
156 	unsigned long o;
157 	unsigned long i;
158 
159 	mutex_lock(&hr_dev->pgdir_mutex);
160 
161 	o = db->order;
162 	i = db->index;
163 
164 	if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) {
165 		clear_bit(i ^ 1, db->u.pgdir->order0);
166 		++o;
167 	}
168 
169 	i >>= o;
170 	set_bit(i, db->u.pgdir->bits[o]);
171 
172 	if (bitmap_full(db->u.pgdir->order1,
173 			HNS_ROCE_DB_PER_PAGE / HNS_ROCE_DB_TYPE_COUNT)) {
174 		dma_free_coherent(hr_dev->dev, PAGE_SIZE, db->u.pgdir->page,
175 				  db->u.pgdir->db_dma);
176 		list_del(&db->u.pgdir->list);
177 		kfree(db->u.pgdir);
178 	}
179 
180 	mutex_unlock(&hr_dev->pgdir_mutex);
181 }
182