1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 /*
3  * Copyright (c) 2020 Intel Corporation. All rights reserved.
4  */
5 
6 #include <linux/dma-buf.h>
7 #include <linux/dma-resv.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/module.h>
10 
11 #include "uverbs.h"
12 
13 MODULE_IMPORT_NS(DMA_BUF);
14 
ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf * umem_dmabuf)15 int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
16 {
17 	struct sg_table *sgt;
18 	struct scatterlist *sg;
19 	struct dma_fence *fence;
20 	unsigned long start, end, cur = 0;
21 	unsigned int nmap = 0;
22 	int i;
23 
24 	dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
25 
26 	if (umem_dmabuf->sgt)
27 		goto wait_fence;
28 
29 	sgt = dma_buf_map_attachment(umem_dmabuf->attach, DMA_BIDIRECTIONAL);
30 	if (IS_ERR(sgt))
31 		return PTR_ERR(sgt);
32 
33 	/* modify the sg list in-place to match umem address and length */
34 
35 	start = ALIGN_DOWN(umem_dmabuf->umem.address, PAGE_SIZE);
36 	end = ALIGN(umem_dmabuf->umem.address + umem_dmabuf->umem.length,
37 		    PAGE_SIZE);
38 	for_each_sgtable_dma_sg(sgt, sg, i) {
39 		if (start < cur + sg_dma_len(sg) && cur < end)
40 			nmap++;
41 		if (cur <= start && start < cur + sg_dma_len(sg)) {
42 			unsigned long offset = start - cur;
43 
44 			umem_dmabuf->first_sg = sg;
45 			umem_dmabuf->first_sg_offset = offset;
46 			sg_dma_address(sg) += offset;
47 			sg_dma_len(sg) -= offset;
48 			cur += offset;
49 		}
50 		if (cur < end && end <= cur + sg_dma_len(sg)) {
51 			unsigned long trim = cur + sg_dma_len(sg) - end;
52 
53 			umem_dmabuf->last_sg = sg;
54 			umem_dmabuf->last_sg_trim = trim;
55 			sg_dma_len(sg) -= trim;
56 			break;
57 		}
58 		cur += sg_dma_len(sg);
59 	}
60 
61 	umem_dmabuf->umem.sgt_append.sgt.sgl = umem_dmabuf->first_sg;
62 	umem_dmabuf->umem.sgt_append.sgt.nents = nmap;
63 	umem_dmabuf->sgt = sgt;
64 
65 wait_fence:
66 	/*
67 	 * Although the sg list is valid now, the content of the pages
68 	 * may be not up-to-date. Wait for the exporter to finish
69 	 * the migration.
70 	 */
71 	fence = dma_resv_excl_fence(umem_dmabuf->attach->dmabuf->resv);
72 	if (fence)
73 		return dma_fence_wait(fence, false);
74 
75 	return 0;
76 }
77 EXPORT_SYMBOL(ib_umem_dmabuf_map_pages);
78 
ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf * umem_dmabuf)79 void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf)
80 {
81 	dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
82 
83 	if (!umem_dmabuf->sgt)
84 		return;
85 
86 	/* retore the original sg list */
87 	if (umem_dmabuf->first_sg) {
88 		sg_dma_address(umem_dmabuf->first_sg) -=
89 			umem_dmabuf->first_sg_offset;
90 		sg_dma_len(umem_dmabuf->first_sg) +=
91 			umem_dmabuf->first_sg_offset;
92 		umem_dmabuf->first_sg = NULL;
93 		umem_dmabuf->first_sg_offset = 0;
94 	}
95 	if (umem_dmabuf->last_sg) {
96 		sg_dma_len(umem_dmabuf->last_sg) +=
97 			umem_dmabuf->last_sg_trim;
98 		umem_dmabuf->last_sg = NULL;
99 		umem_dmabuf->last_sg_trim = 0;
100 	}
101 
102 	dma_buf_unmap_attachment(umem_dmabuf->attach, umem_dmabuf->sgt,
103 				 DMA_BIDIRECTIONAL);
104 
105 	umem_dmabuf->sgt = NULL;
106 }
107 EXPORT_SYMBOL(ib_umem_dmabuf_unmap_pages);
108 
ib_umem_dmabuf_get(struct ib_device * device,unsigned long offset,size_t size,int fd,int access,const struct dma_buf_attach_ops * ops)109 struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
110 					  unsigned long offset, size_t size,
111 					  int fd, int access,
112 					  const struct dma_buf_attach_ops *ops)
113 {
114 	struct dma_buf *dmabuf;
115 	struct ib_umem_dmabuf *umem_dmabuf;
116 	struct ib_umem *umem;
117 	unsigned long end;
118 	struct ib_umem_dmabuf *ret = ERR_PTR(-EINVAL);
119 
120 	if (check_add_overflow(offset, (unsigned long)size, &end))
121 		return ret;
122 
123 	if (unlikely(!ops || !ops->move_notify))
124 		return ret;
125 
126 	dmabuf = dma_buf_get(fd);
127 	if (IS_ERR(dmabuf))
128 		return ERR_CAST(dmabuf);
129 
130 	if (dmabuf->size < end)
131 		goto out_release_dmabuf;
132 
133 	umem_dmabuf = kzalloc(sizeof(*umem_dmabuf), GFP_KERNEL);
134 	if (!umem_dmabuf) {
135 		ret = ERR_PTR(-ENOMEM);
136 		goto out_release_dmabuf;
137 	}
138 
139 	umem = &umem_dmabuf->umem;
140 	umem->ibdev = device;
141 	umem->length = size;
142 	umem->address = offset;
143 	umem->writable = ib_access_writable(access);
144 	umem->is_dmabuf = 1;
145 
146 	if (!ib_umem_num_pages(umem))
147 		goto out_free_umem;
148 
149 	umem_dmabuf->attach = dma_buf_dynamic_attach(
150 					dmabuf,
151 					device->dma_device,
152 					ops,
153 					umem_dmabuf);
154 	if (IS_ERR(umem_dmabuf->attach)) {
155 		ret = ERR_CAST(umem_dmabuf->attach);
156 		goto out_free_umem;
157 	}
158 	return umem_dmabuf;
159 
160 out_free_umem:
161 	kfree(umem_dmabuf);
162 
163 out_release_dmabuf:
164 	dma_buf_put(dmabuf);
165 	return ret;
166 }
167 EXPORT_SYMBOL(ib_umem_dmabuf_get);
168 
169 static void
ib_umem_dmabuf_unsupported_move_notify(struct dma_buf_attachment * attach)170 ib_umem_dmabuf_unsupported_move_notify(struct dma_buf_attachment *attach)
171 {
172 	struct ib_umem_dmabuf *umem_dmabuf = attach->importer_priv;
173 
174 	ibdev_warn_ratelimited(umem_dmabuf->umem.ibdev,
175 			       "Invalidate callback should not be called when memory is pinned\n");
176 }
177 
178 static struct dma_buf_attach_ops ib_umem_dmabuf_attach_pinned_ops = {
179 	.allow_peer2peer = true,
180 	.move_notify = ib_umem_dmabuf_unsupported_move_notify,
181 };
182 
ib_umem_dmabuf_get_pinned(struct ib_device * device,unsigned long offset,size_t size,int fd,int access)183 struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device,
184 						 unsigned long offset,
185 						 size_t size, int fd,
186 						 int access)
187 {
188 	struct ib_umem_dmabuf *umem_dmabuf;
189 	int err;
190 
191 	umem_dmabuf = ib_umem_dmabuf_get(device, offset, size, fd, access,
192 					 &ib_umem_dmabuf_attach_pinned_ops);
193 	if (IS_ERR(umem_dmabuf))
194 		return umem_dmabuf;
195 
196 	dma_resv_lock(umem_dmabuf->attach->dmabuf->resv, NULL);
197 	err = dma_buf_pin(umem_dmabuf->attach);
198 	if (err)
199 		goto err_release;
200 	umem_dmabuf->pinned = 1;
201 
202 	err = ib_umem_dmabuf_map_pages(umem_dmabuf);
203 	if (err)
204 		goto err_unpin;
205 	dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
206 
207 	return umem_dmabuf;
208 
209 err_unpin:
210 	dma_buf_unpin(umem_dmabuf->attach);
211 err_release:
212 	dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
213 	ib_umem_release(&umem_dmabuf->umem);
214 	return ERR_PTR(err);
215 }
216 EXPORT_SYMBOL(ib_umem_dmabuf_get_pinned);
217 
ib_umem_dmabuf_release(struct ib_umem_dmabuf * umem_dmabuf)218 void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf)
219 {
220 	struct dma_buf *dmabuf = umem_dmabuf->attach->dmabuf;
221 
222 	dma_resv_lock(dmabuf->resv, NULL);
223 	ib_umem_dmabuf_unmap_pages(umem_dmabuf);
224 	if (umem_dmabuf->pinned)
225 		dma_buf_unpin(umem_dmabuf->attach);
226 	dma_resv_unlock(dmabuf->resv);
227 
228 	dma_buf_detach(dmabuf, umem_dmabuf->attach);
229 	dma_buf_put(dmabuf);
230 	kfree(umem_dmabuf);
231 }
232