1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * File operations for Coda.
4  * Original version: (C) 1996 Peter Braam
5  * Rewritten for Linux 2.1: (C) 1997 Carnegie Mellon University
6  *
7  * Carnegie Mellon encourages users of this code to contribute improvements
8  * to the Coda project. Contact Peter Braam <coda@cs.cmu.edu>.
9  */
10 
11 #include <linux/refcount.h>
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <linux/time.h>
15 #include <linux/file.h>
16 #include <linux/fs.h>
17 #include <linux/stat.h>
18 #include <linux/cred.h>
19 #include <linux/errno.h>
20 #include <linux/spinlock.h>
21 #include <linux/string.h>
22 #include <linux/slab.h>
23 #include <linux/uaccess.h>
24 #include <linux/uio.h>
25 
26 #include <linux/coda.h>
27 #include "coda_psdev.h"
28 #include "coda_linux.h"
29 #include "coda_int.h"
30 
31 struct coda_vm_ops {
32 	refcount_t refcnt;
33 	struct file *coda_file;
34 	const struct vm_operations_struct *host_vm_ops;
35 	struct vm_operations_struct vm_ops;
36 };
37 
38 static ssize_t
coda_file_read_iter(struct kiocb * iocb,struct iov_iter * to)39 coda_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
40 {
41 	struct file *coda_file = iocb->ki_filp;
42 	struct inode *coda_inode = file_inode(coda_file);
43 	struct coda_file_info *cfi = coda_ftoc(coda_file);
44 	loff_t ki_pos = iocb->ki_pos;
45 	size_t count = iov_iter_count(to);
46 	ssize_t ret;
47 
48 	ret = venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode),
49 				  &cfi->cfi_access_intent,
50 				  count, ki_pos, CODA_ACCESS_TYPE_READ);
51 	if (ret)
52 		goto finish_read;
53 
54 	ret = vfs_iter_read(cfi->cfi_container, to, &iocb->ki_pos, 0);
55 
56 finish_read:
57 	venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode),
58 			    &cfi->cfi_access_intent,
59 			    count, ki_pos, CODA_ACCESS_TYPE_READ_FINISH);
60 	return ret;
61 }
62 
63 static ssize_t
coda_file_write_iter(struct kiocb * iocb,struct iov_iter * to)64 coda_file_write_iter(struct kiocb *iocb, struct iov_iter *to)
65 {
66 	struct file *coda_file = iocb->ki_filp;
67 	struct inode *coda_inode = file_inode(coda_file);
68 	struct coda_file_info *cfi = coda_ftoc(coda_file);
69 	struct file *host_file = cfi->cfi_container;
70 	loff_t ki_pos = iocb->ki_pos;
71 	size_t count = iov_iter_count(to);
72 	ssize_t ret;
73 
74 	ret = venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode),
75 				  &cfi->cfi_access_intent,
76 				  count, ki_pos, CODA_ACCESS_TYPE_WRITE);
77 	if (ret)
78 		goto finish_write;
79 
80 	file_start_write(host_file);
81 	inode_lock(coda_inode);
82 	ret = vfs_iter_write(cfi->cfi_container, to, &iocb->ki_pos, 0);
83 	coda_inode->i_size = file_inode(host_file)->i_size;
84 	coda_inode->i_blocks = (coda_inode->i_size + 511) >> 9;
85 	coda_inode->i_mtime = coda_inode->i_ctime = current_time(coda_inode);
86 	inode_unlock(coda_inode);
87 	file_end_write(host_file);
88 
89 finish_write:
90 	venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode),
91 			    &cfi->cfi_access_intent,
92 			    count, ki_pos, CODA_ACCESS_TYPE_WRITE_FINISH);
93 	return ret;
94 }
95 
96 static void
coda_vm_open(struct vm_area_struct * vma)97 coda_vm_open(struct vm_area_struct *vma)
98 {
99 	struct coda_vm_ops *cvm_ops =
100 		container_of(vma->vm_ops, struct coda_vm_ops, vm_ops);
101 
102 	refcount_inc(&cvm_ops->refcnt);
103 
104 	if (cvm_ops->host_vm_ops && cvm_ops->host_vm_ops->open)
105 		cvm_ops->host_vm_ops->open(vma);
106 }
107 
108 static void
coda_vm_close(struct vm_area_struct * vma)109 coda_vm_close(struct vm_area_struct *vma)
110 {
111 	struct coda_vm_ops *cvm_ops =
112 		container_of(vma->vm_ops, struct coda_vm_ops, vm_ops);
113 
114 	if (cvm_ops->host_vm_ops && cvm_ops->host_vm_ops->close)
115 		cvm_ops->host_vm_ops->close(vma);
116 
117 	if (refcount_dec_and_test(&cvm_ops->refcnt)) {
118 		vma->vm_ops = cvm_ops->host_vm_ops;
119 		fput(cvm_ops->coda_file);
120 		kfree(cvm_ops);
121 	}
122 }
123 
124 static int
coda_file_mmap(struct file * coda_file,struct vm_area_struct * vma)125 coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
126 {
127 	struct inode *coda_inode = file_inode(coda_file);
128 	struct coda_file_info *cfi = coda_ftoc(coda_file);
129 	struct file *host_file = cfi->cfi_container;
130 	struct inode *host_inode = file_inode(host_file);
131 	struct coda_inode_info *cii;
132 	struct coda_vm_ops *cvm_ops;
133 	loff_t ppos;
134 	size_t count;
135 	int ret;
136 
137 	if (!host_file->f_op->mmap)
138 		return -ENODEV;
139 
140 	if (WARN_ON(coda_file != vma->vm_file))
141 		return -EIO;
142 
143 	count = vma->vm_end - vma->vm_start;
144 	ppos = vma->vm_pgoff * PAGE_SIZE;
145 
146 	ret = venus_access_intent(coda_inode->i_sb, coda_i2f(coda_inode),
147 				  &cfi->cfi_access_intent,
148 				  count, ppos, CODA_ACCESS_TYPE_MMAP);
149 	if (ret)
150 		return ret;
151 
152 	cvm_ops = kmalloc(sizeof(struct coda_vm_ops), GFP_KERNEL);
153 	if (!cvm_ops)
154 		return -ENOMEM;
155 
156 	cii = ITOC(coda_inode);
157 	spin_lock(&cii->c_lock);
158 	coda_file->f_mapping = host_file->f_mapping;
159 	if (coda_inode->i_mapping == &coda_inode->i_data)
160 		coda_inode->i_mapping = host_inode->i_mapping;
161 
162 	/* only allow additional mmaps as long as userspace isn't changing
163 	 * the container file on us! */
164 	else if (coda_inode->i_mapping != host_inode->i_mapping) {
165 		spin_unlock(&cii->c_lock);
166 		kfree(cvm_ops);
167 		return -EBUSY;
168 	}
169 
170 	/* keep track of how often the coda_inode/host_file has been mmapped */
171 	cii->c_mapcount++;
172 	cfi->cfi_mapcount++;
173 	spin_unlock(&cii->c_lock);
174 
175 	vma->vm_file = get_file(host_file);
176 	ret = call_mmap(vma->vm_file, vma);
177 
178 	if (ret) {
179 		/* if call_mmap fails, our caller will put host_file so we
180 		 * should drop the reference to the coda_file that we got.
181 		 */
182 		fput(coda_file);
183 		kfree(cvm_ops);
184 	} else {
185 		/* here we add redirects for the open/close vm_operations */
186 		cvm_ops->host_vm_ops = vma->vm_ops;
187 		if (vma->vm_ops)
188 			cvm_ops->vm_ops = *vma->vm_ops;
189 
190 		cvm_ops->vm_ops.open = coda_vm_open;
191 		cvm_ops->vm_ops.close = coda_vm_close;
192 		cvm_ops->coda_file = coda_file;
193 		refcount_set(&cvm_ops->refcnt, 1);
194 
195 		vma->vm_ops = &cvm_ops->vm_ops;
196 	}
197 	return ret;
198 }
199 
coda_open(struct inode * coda_inode,struct file * coda_file)200 int coda_open(struct inode *coda_inode, struct file *coda_file)
201 {
202 	struct file *host_file = NULL;
203 	int error;
204 	unsigned short flags = coda_file->f_flags & (~O_EXCL);
205 	unsigned short coda_flags = coda_flags_to_cflags(flags);
206 	struct coda_file_info *cfi;
207 
208 	cfi = kmalloc(sizeof(struct coda_file_info), GFP_KERNEL);
209 	if (!cfi)
210 		return -ENOMEM;
211 
212 	error = venus_open(coda_inode->i_sb, coda_i2f(coda_inode), coda_flags,
213 			   &host_file);
214 	if (!host_file)
215 		error = -EIO;
216 
217 	if (error) {
218 		kfree(cfi);
219 		return error;
220 	}
221 
222 	host_file->f_flags |= coda_file->f_flags & (O_APPEND | O_SYNC);
223 
224 	cfi->cfi_magic = CODA_MAGIC;
225 	cfi->cfi_mapcount = 0;
226 	cfi->cfi_container = host_file;
227 	/* assume access intents are supported unless we hear otherwise */
228 	cfi->cfi_access_intent = true;
229 
230 	BUG_ON(coda_file->private_data != NULL);
231 	coda_file->private_data = cfi;
232 	return 0;
233 }
234 
coda_release(struct inode * coda_inode,struct file * coda_file)235 int coda_release(struct inode *coda_inode, struct file *coda_file)
236 {
237 	unsigned short flags = (coda_file->f_flags) & (~O_EXCL);
238 	unsigned short coda_flags = coda_flags_to_cflags(flags);
239 	struct coda_file_info *cfi;
240 	struct coda_inode_info *cii;
241 	struct inode *host_inode;
242 
243 	cfi = coda_ftoc(coda_file);
244 
245 	venus_close(coda_inode->i_sb, coda_i2f(coda_inode),
246 			  coda_flags, coda_file->f_cred->fsuid);
247 
248 	host_inode = file_inode(cfi->cfi_container);
249 	cii = ITOC(coda_inode);
250 
251 	/* did we mmap this file? */
252 	spin_lock(&cii->c_lock);
253 	if (coda_inode->i_mapping == &host_inode->i_data) {
254 		cii->c_mapcount -= cfi->cfi_mapcount;
255 		if (!cii->c_mapcount)
256 			coda_inode->i_mapping = &coda_inode->i_data;
257 	}
258 	spin_unlock(&cii->c_lock);
259 
260 	fput(cfi->cfi_container);
261 	kfree(coda_file->private_data);
262 	coda_file->private_data = NULL;
263 
264 	/* VFS fput ignores the return value from file_operations->release, so
265 	 * there is no use returning an error here */
266 	return 0;
267 }
268 
coda_fsync(struct file * coda_file,loff_t start,loff_t end,int datasync)269 int coda_fsync(struct file *coda_file, loff_t start, loff_t end, int datasync)
270 {
271 	struct file *host_file;
272 	struct inode *coda_inode = file_inode(coda_file);
273 	struct coda_file_info *cfi;
274 	int err;
275 
276 	if (!(S_ISREG(coda_inode->i_mode) || S_ISDIR(coda_inode->i_mode) ||
277 	      S_ISLNK(coda_inode->i_mode)))
278 		return -EINVAL;
279 
280 	err = filemap_write_and_wait_range(coda_inode->i_mapping, start, end);
281 	if (err)
282 		return err;
283 	inode_lock(coda_inode);
284 
285 	cfi = coda_ftoc(coda_file);
286 	host_file = cfi->cfi_container;
287 
288 	err = vfs_fsync(host_file, datasync);
289 	if (!err && !datasync)
290 		err = venus_fsync(coda_inode->i_sb, coda_i2f(coda_inode));
291 	inode_unlock(coda_inode);
292 
293 	return err;
294 }
295 
296 const struct file_operations coda_file_operations = {
297 	.llseek		= generic_file_llseek,
298 	.read_iter	= coda_file_read_iter,
299 	.write_iter	= coda_file_write_iter,
300 	.mmap		= coda_file_mmap,
301 	.open		= coda_open,
302 	.release	= coda_release,
303 	.fsync		= coda_fsync,
304 	.splice_read	= generic_file_splice_read,
305 };
306