1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Management for virtio crypto devices (refer to adf_dev_mgr.c)
3 *
4 * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
5 */
6
7 #include <linux/mutex.h>
8 #include <linux/list.h>
9 #include <linux/module.h>
10
11 #include <uapi/linux/virtio_crypto.h>
12 #include "virtio_crypto_common.h"
13
14 static LIST_HEAD(virtio_crypto_table);
15 static uint32_t num_devices;
16
17 /* The table_lock protects the above global list and num_devices */
18 static DEFINE_MUTEX(table_lock);
19
20 #define VIRTIO_CRYPTO_MAX_DEVICES 32
21
22
23 /*
24 * virtcrypto_devmgr_add_dev() - Add vcrypto_dev to the acceleration
25 * framework.
26 * @vcrypto_dev: Pointer to virtio crypto device.
27 *
28 * Function adds virtio crypto device to the global list.
29 * To be used by virtio crypto device specific drivers.
30 *
31 * Return: 0 on success, error code othewise.
32 */
virtcrypto_devmgr_add_dev(struct virtio_crypto * vcrypto_dev)33 int virtcrypto_devmgr_add_dev(struct virtio_crypto *vcrypto_dev)
34 {
35 struct list_head *itr;
36
37 mutex_lock(&table_lock);
38 if (num_devices == VIRTIO_CRYPTO_MAX_DEVICES) {
39 pr_info("virtio_crypto: only support up to %d devices\n",
40 VIRTIO_CRYPTO_MAX_DEVICES);
41 mutex_unlock(&table_lock);
42 return -EFAULT;
43 }
44
45 list_for_each(itr, &virtio_crypto_table) {
46 struct virtio_crypto *ptr =
47 list_entry(itr, struct virtio_crypto, list);
48
49 if (ptr == vcrypto_dev) {
50 mutex_unlock(&table_lock);
51 return -EEXIST;
52 }
53 }
54 atomic_set(&vcrypto_dev->ref_count, 0);
55 list_add_tail(&vcrypto_dev->list, &virtio_crypto_table);
56 vcrypto_dev->dev_id = num_devices++;
57 mutex_unlock(&table_lock);
58 return 0;
59 }
60
virtcrypto_devmgr_get_head(void)61 struct list_head *virtcrypto_devmgr_get_head(void)
62 {
63 return &virtio_crypto_table;
64 }
65
66 /*
67 * virtcrypto_devmgr_rm_dev() - Remove vcrypto_dev from the acceleration
68 * framework.
69 * @vcrypto_dev: Pointer to virtio crypto device.
70 *
71 * Function removes virtio crypto device from the acceleration framework.
72 * To be used by virtio crypto device specific drivers.
73 *
74 * Return: void
75 */
virtcrypto_devmgr_rm_dev(struct virtio_crypto * vcrypto_dev)76 void virtcrypto_devmgr_rm_dev(struct virtio_crypto *vcrypto_dev)
77 {
78 mutex_lock(&table_lock);
79 list_del(&vcrypto_dev->list);
80 num_devices--;
81 mutex_unlock(&table_lock);
82 }
83
84 /*
85 * virtcrypto_devmgr_get_first()
86 *
87 * Function returns the first virtio crypto device from the acceleration
88 * framework.
89 *
90 * To be used by virtio crypto device specific drivers.
91 *
92 * Return: pointer to vcrypto_dev or NULL if not found.
93 */
virtcrypto_devmgr_get_first(void)94 struct virtio_crypto *virtcrypto_devmgr_get_first(void)
95 {
96 struct virtio_crypto *dev = NULL;
97
98 mutex_lock(&table_lock);
99 if (!list_empty(&virtio_crypto_table))
100 dev = list_first_entry(&virtio_crypto_table,
101 struct virtio_crypto,
102 list);
103 mutex_unlock(&table_lock);
104 return dev;
105 }
106
107 /*
108 * virtcrypto_dev_in_use() - Check whether vcrypto_dev is currently in use
109 * @vcrypto_dev: Pointer to virtio crypto device.
110 *
111 * To be used by virtio crypto device specific drivers.
112 *
113 * Return: 1 when device is in use, 0 otherwise.
114 */
virtcrypto_dev_in_use(struct virtio_crypto * vcrypto_dev)115 int virtcrypto_dev_in_use(struct virtio_crypto *vcrypto_dev)
116 {
117 return atomic_read(&vcrypto_dev->ref_count) != 0;
118 }
119
120 /*
121 * virtcrypto_dev_get() - Increment vcrypto_dev reference count
122 * @vcrypto_dev: Pointer to virtio crypto device.
123 *
124 * Increment the vcrypto_dev refcount and if this is the first time
125 * incrementing it during this period the vcrypto_dev is in use,
126 * increment the module refcount too.
127 * To be used by virtio crypto device specific drivers.
128 *
129 * Return: 0 when successful, EFAULT when fail to bump module refcount
130 */
virtcrypto_dev_get(struct virtio_crypto * vcrypto_dev)131 int virtcrypto_dev_get(struct virtio_crypto *vcrypto_dev)
132 {
133 if (atomic_add_return(1, &vcrypto_dev->ref_count) == 1)
134 if (!try_module_get(vcrypto_dev->owner))
135 return -EFAULT;
136 return 0;
137 }
138
139 /*
140 * virtcrypto_dev_put() - Decrement vcrypto_dev reference count
141 * @vcrypto_dev: Pointer to virtio crypto device.
142 *
143 * Decrement the vcrypto_dev refcount and if this is the last time
144 * decrementing it during this period the vcrypto_dev is in use,
145 * decrement the module refcount too.
146 * To be used by virtio crypto device specific drivers.
147 *
148 * Return: void
149 */
virtcrypto_dev_put(struct virtio_crypto * vcrypto_dev)150 void virtcrypto_dev_put(struct virtio_crypto *vcrypto_dev)
151 {
152 if (atomic_sub_return(1, &vcrypto_dev->ref_count) == 0)
153 module_put(vcrypto_dev->owner);
154 }
155
156 /*
157 * virtcrypto_dev_started() - Check whether device has started
158 * @vcrypto_dev: Pointer to virtio crypto device.
159 *
160 * To be used by virtio crypto device specific drivers.
161 *
162 * Return: 1 when the device has started, 0 otherwise
163 */
virtcrypto_dev_started(struct virtio_crypto * vcrypto_dev)164 int virtcrypto_dev_started(struct virtio_crypto *vcrypto_dev)
165 {
166 return (vcrypto_dev->status & VIRTIO_CRYPTO_S_HW_READY);
167 }
168
169 /*
170 * virtcrypto_get_dev_node() - Get vcrypto_dev on the node.
171 * @node: Node id the driver works.
172 * @service: Crypto service that needs to be supported by the
173 * dev
174 * @algo: The algorithm number that needs to be supported by the
175 * dev
176 *
177 * Function returns the virtio crypto device used fewest on the node,
178 * and supports the given crypto service and algorithm.
179 *
180 * To be used by virtio crypto device specific drivers.
181 *
182 * Return: pointer to vcrypto_dev or NULL if not found.
183 */
virtcrypto_get_dev_node(int node,uint32_t service,uint32_t algo)184 struct virtio_crypto *virtcrypto_get_dev_node(int node, uint32_t service,
185 uint32_t algo)
186 {
187 struct virtio_crypto *vcrypto_dev = NULL, *tmp_dev;
188 unsigned long best = ~0;
189 unsigned long ctr;
190
191 mutex_lock(&table_lock);
192 list_for_each_entry(tmp_dev, virtcrypto_devmgr_get_head(), list) {
193
194 if ((node == dev_to_node(&tmp_dev->vdev->dev) ||
195 dev_to_node(&tmp_dev->vdev->dev) < 0) &&
196 virtcrypto_dev_started(tmp_dev) &&
197 virtcrypto_algo_is_supported(tmp_dev, service, algo)) {
198 ctr = atomic_read(&tmp_dev->ref_count);
199 if (best > ctr) {
200 vcrypto_dev = tmp_dev;
201 best = ctr;
202 }
203 }
204 }
205
206 if (!vcrypto_dev) {
207 pr_info("virtio_crypto: Could not find a device on node %d\n",
208 node);
209 /* Get any started device */
210 list_for_each_entry(tmp_dev,
211 virtcrypto_devmgr_get_head(), list) {
212 if (virtcrypto_dev_started(tmp_dev) &&
213 virtcrypto_algo_is_supported(tmp_dev,
214 service, algo)) {
215 vcrypto_dev = tmp_dev;
216 break;
217 }
218 }
219 }
220 mutex_unlock(&table_lock);
221 if (!vcrypto_dev)
222 return NULL;
223
224 virtcrypto_dev_get(vcrypto_dev);
225 return vcrypto_dev;
226 }
227
228 /*
229 * virtcrypto_dev_start() - Start virtio crypto device
230 * @vcrypto: Pointer to virtio crypto device.
231 *
232 * Function notifies all the registered services that the virtio crypto device
233 * is ready to be used.
234 * To be used by virtio crypto device specific drivers.
235 *
236 * Return: 0 on success, EFAULT when fail to register algorithms
237 */
virtcrypto_dev_start(struct virtio_crypto * vcrypto)238 int virtcrypto_dev_start(struct virtio_crypto *vcrypto)
239 {
240 if (virtio_crypto_algs_register(vcrypto)) {
241 pr_err("virtio_crypto: Failed to register crypto algs\n");
242 return -EFAULT;
243 }
244
245 return 0;
246 }
247
248 /*
249 * virtcrypto_dev_stop() - Stop virtio crypto device
250 * @vcrypto: Pointer to virtio crypto device.
251 *
252 * Function notifies all the registered services that the virtio crypto device
253 * is ready to be used.
254 * To be used by virtio crypto device specific drivers.
255 *
256 * Return: void
257 */
virtcrypto_dev_stop(struct virtio_crypto * vcrypto)258 void virtcrypto_dev_stop(struct virtio_crypto *vcrypto)
259 {
260 virtio_crypto_algs_unregister(vcrypto);
261 }
262
263 /*
264 * vcrypto_algo_is_supported()
265 * @vcrypto: Pointer to virtio crypto device.
266 * @service: The bit number for service validate.
267 * See VIRTIO_CRYPTO_SERVICE_*
268 * @algo : The bit number for the algorithm to validate.
269 *
270 *
271 * Validate if the virtio crypto device supports a service and
272 * algo.
273 *
274 * Return true if device supports a service and algo.
275 */
276
virtcrypto_algo_is_supported(struct virtio_crypto * vcrypto,uint32_t service,uint32_t algo)277 bool virtcrypto_algo_is_supported(struct virtio_crypto *vcrypto,
278 uint32_t service,
279 uint32_t algo)
280 {
281 uint32_t service_mask = 1u << service;
282 uint32_t algo_mask = 0;
283 bool low = true;
284
285 if (algo > 31) {
286 algo -= 32;
287 low = false;
288 }
289
290 if (!(vcrypto->crypto_services & service_mask))
291 return false;
292
293 switch (service) {
294 case VIRTIO_CRYPTO_SERVICE_CIPHER:
295 if (low)
296 algo_mask = vcrypto->cipher_algo_l;
297 else
298 algo_mask = vcrypto->cipher_algo_h;
299 break;
300
301 case VIRTIO_CRYPTO_SERVICE_HASH:
302 algo_mask = vcrypto->hash_algo;
303 break;
304
305 case VIRTIO_CRYPTO_SERVICE_MAC:
306 if (low)
307 algo_mask = vcrypto->mac_algo_l;
308 else
309 algo_mask = vcrypto->mac_algo_h;
310 break;
311
312 case VIRTIO_CRYPTO_SERVICE_AEAD:
313 algo_mask = vcrypto->aead_algo;
314 break;
315 }
316
317 if (!(algo_mask & (1u << algo)))
318 return false;
319
320 return true;
321 }
322