1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* FS-Cache cache handling
3  *
4  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #define FSCACHE_DEBUG_LEVEL CACHE
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include "internal.h"
12 
13 LIST_HEAD(fscache_cache_list);
14 DECLARE_RWSEM(fscache_addremove_sem);
15 DECLARE_WAIT_QUEUE_HEAD(fscache_cache_cleared_wq);
16 EXPORT_SYMBOL(fscache_cache_cleared_wq);
17 
18 static LIST_HEAD(fscache_cache_tag_list);
19 
20 /*
21  * look up a cache tag
22  */
__fscache_lookup_cache_tag(const char * name)23 struct fscache_cache_tag *__fscache_lookup_cache_tag(const char *name)
24 {
25 	struct fscache_cache_tag *tag, *xtag;
26 
27 	/* firstly check for the existence of the tag under read lock */
28 	down_read(&fscache_addremove_sem);
29 
30 	list_for_each_entry(tag, &fscache_cache_tag_list, link) {
31 		if (strcmp(tag->name, name) == 0) {
32 			atomic_inc(&tag->usage);
33 			up_read(&fscache_addremove_sem);
34 			return tag;
35 		}
36 	}
37 
38 	up_read(&fscache_addremove_sem);
39 
40 	/* the tag does not exist - create a candidate */
41 	xtag = kzalloc(sizeof(*xtag) + strlen(name) + 1, GFP_KERNEL);
42 	if (!xtag)
43 		/* return a dummy tag if out of memory */
44 		return ERR_PTR(-ENOMEM);
45 
46 	atomic_set(&xtag->usage, 1);
47 	strcpy(xtag->name, name);
48 
49 	/* write lock, search again and add if still not present */
50 	down_write(&fscache_addremove_sem);
51 
52 	list_for_each_entry(tag, &fscache_cache_tag_list, link) {
53 		if (strcmp(tag->name, name) == 0) {
54 			atomic_inc(&tag->usage);
55 			up_write(&fscache_addremove_sem);
56 			kfree(xtag);
57 			return tag;
58 		}
59 	}
60 
61 	list_add_tail(&xtag->link, &fscache_cache_tag_list);
62 	up_write(&fscache_addremove_sem);
63 	return xtag;
64 }
65 
66 /*
67  * release a reference to a cache tag
68  */
__fscache_release_cache_tag(struct fscache_cache_tag * tag)69 void __fscache_release_cache_tag(struct fscache_cache_tag *tag)
70 {
71 	if (tag != ERR_PTR(-ENOMEM)) {
72 		down_write(&fscache_addremove_sem);
73 
74 		if (atomic_dec_and_test(&tag->usage))
75 			list_del_init(&tag->link);
76 		else
77 			tag = NULL;
78 
79 		up_write(&fscache_addremove_sem);
80 
81 		kfree(tag);
82 	}
83 }
84 
85 /*
86  * select a cache in which to store an object
87  * - the cache addremove semaphore must be at least read-locked by the caller
88  * - the object will never be an index
89  */
fscache_select_cache_for_object(struct fscache_cookie * cookie)90 struct fscache_cache *fscache_select_cache_for_object(
91 	struct fscache_cookie *cookie)
92 {
93 	struct fscache_cache_tag *tag;
94 	struct fscache_object *object;
95 	struct fscache_cache *cache;
96 
97 	_enter("");
98 
99 	if (list_empty(&fscache_cache_list)) {
100 		_leave(" = NULL [no cache]");
101 		return NULL;
102 	}
103 
104 	/* we check the parent to determine the cache to use */
105 	spin_lock(&cookie->lock);
106 
107 	/* the first in the parent's backing list should be the preferred
108 	 * cache */
109 	if (!hlist_empty(&cookie->backing_objects)) {
110 		object = hlist_entry(cookie->backing_objects.first,
111 				     struct fscache_object, cookie_link);
112 
113 		cache = object->cache;
114 		if (fscache_object_is_dying(object) ||
115 		    test_bit(FSCACHE_IOERROR, &cache->flags))
116 			cache = NULL;
117 
118 		spin_unlock(&cookie->lock);
119 		_leave(" = %s [parent]", cache ? cache->tag->name : "NULL");
120 		return cache;
121 	}
122 
123 	/* the parent is unbacked */
124 	if (cookie->type != FSCACHE_COOKIE_TYPE_INDEX) {
125 		/* cookie not an index and is unbacked */
126 		spin_unlock(&cookie->lock);
127 		_leave(" = NULL [cookie ub,ni]");
128 		return NULL;
129 	}
130 
131 	spin_unlock(&cookie->lock);
132 
133 	if (!cookie->def->select_cache)
134 		goto no_preference;
135 
136 	/* ask the netfs for its preference */
137 	tag = cookie->def->select_cache(cookie->parent->netfs_data,
138 					cookie->netfs_data);
139 	if (!tag)
140 		goto no_preference;
141 
142 	if (tag == ERR_PTR(-ENOMEM)) {
143 		_leave(" = NULL [nomem tag]");
144 		return NULL;
145 	}
146 
147 	if (!tag->cache) {
148 		_leave(" = NULL [unbacked tag]");
149 		return NULL;
150 	}
151 
152 	if (test_bit(FSCACHE_IOERROR, &tag->cache->flags))
153 		return NULL;
154 
155 	_leave(" = %s [specific]", tag->name);
156 	return tag->cache;
157 
158 no_preference:
159 	/* netfs has no preference - just select first cache */
160 	cache = list_entry(fscache_cache_list.next,
161 			   struct fscache_cache, link);
162 	_leave(" = %s [first]", cache->tag->name);
163 	return cache;
164 }
165 
166 /**
167  * fscache_init_cache - Initialise a cache record
168  * @cache: The cache record to be initialised
169  * @ops: The cache operations to be installed in that record
170  * @idfmt: Format string to define identifier
171  * @...: sprintf-style arguments
172  *
173  * Initialise a record of a cache and fill in the name.
174  *
175  * See Documentation/filesystems/caching/backend-api.rst for a complete
176  * description.
177  */
fscache_init_cache(struct fscache_cache * cache,const struct fscache_cache_ops * ops,const char * idfmt,...)178 void fscache_init_cache(struct fscache_cache *cache,
179 			const struct fscache_cache_ops *ops,
180 			const char *idfmt,
181 			...)
182 {
183 	va_list va;
184 
185 	memset(cache, 0, sizeof(*cache));
186 
187 	cache->ops = ops;
188 
189 	va_start(va, idfmt);
190 	vsnprintf(cache->identifier, sizeof(cache->identifier), idfmt, va);
191 	va_end(va);
192 
193 	INIT_WORK(&cache->op_gc, fscache_operation_gc);
194 	INIT_LIST_HEAD(&cache->link);
195 	INIT_LIST_HEAD(&cache->object_list);
196 	INIT_LIST_HEAD(&cache->op_gc_list);
197 	spin_lock_init(&cache->object_list_lock);
198 	spin_lock_init(&cache->op_gc_list_lock);
199 }
200 EXPORT_SYMBOL(fscache_init_cache);
201 
202 /**
203  * fscache_add_cache - Declare a cache as being open for business
204  * @cache: The record describing the cache
205  * @ifsdef: The record of the cache object describing the top-level index
206  * @tagname: The tag describing this cache
207  *
208  * Add a cache to the system, making it available for netfs's to use.
209  *
210  * See Documentation/filesystems/caching/backend-api.rst for a complete
211  * description.
212  */
fscache_add_cache(struct fscache_cache * cache,struct fscache_object * ifsdef,const char * tagname)213 int fscache_add_cache(struct fscache_cache *cache,
214 		      struct fscache_object *ifsdef,
215 		      const char *tagname)
216 {
217 	struct fscache_cache_tag *tag;
218 
219 	ASSERTCMP(ifsdef->cookie, ==, &fscache_fsdef_index);
220 	BUG_ON(!cache->ops);
221 	BUG_ON(!ifsdef);
222 
223 	cache->flags = 0;
224 	ifsdef->event_mask =
225 		((1 << NR_FSCACHE_OBJECT_EVENTS) - 1) &
226 		~(1 << FSCACHE_OBJECT_EV_CLEARED);
227 	__set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &ifsdef->flags);
228 
229 	if (!tagname)
230 		tagname = cache->identifier;
231 
232 	BUG_ON(!tagname[0]);
233 
234 	_enter("{%s.%s},,%s", cache->ops->name, cache->identifier, tagname);
235 
236 	/* we use the cache tag to uniquely identify caches */
237 	tag = __fscache_lookup_cache_tag(tagname);
238 	if (IS_ERR(tag))
239 		goto nomem;
240 
241 	if (test_and_set_bit(FSCACHE_TAG_RESERVED, &tag->flags))
242 		goto tag_in_use;
243 
244 	cache->kobj = kobject_create_and_add(tagname, fscache_root);
245 	if (!cache->kobj)
246 		goto error;
247 
248 	ifsdef->cache = cache;
249 	cache->fsdef = ifsdef;
250 
251 	down_write(&fscache_addremove_sem);
252 
253 	tag->cache = cache;
254 	cache->tag = tag;
255 
256 	/* add the cache to the list */
257 	list_add(&cache->link, &fscache_cache_list);
258 
259 	/* add the cache's netfs definition index object to the cache's
260 	 * list */
261 	spin_lock(&cache->object_list_lock);
262 	list_add_tail(&ifsdef->cache_link, &cache->object_list);
263 	spin_unlock(&cache->object_list_lock);
264 
265 	/* add the cache's netfs definition index object to the top level index
266 	 * cookie as a known backing object */
267 	spin_lock(&fscache_fsdef_index.lock);
268 
269 	hlist_add_head(&ifsdef->cookie_link,
270 		       &fscache_fsdef_index.backing_objects);
271 
272 	refcount_inc(&fscache_fsdef_index.ref);
273 
274 	/* done */
275 	spin_unlock(&fscache_fsdef_index.lock);
276 	up_write(&fscache_addremove_sem);
277 
278 	pr_notice("Cache \"%s\" added (type %s)\n",
279 		  cache->tag->name, cache->ops->name);
280 	kobject_uevent(cache->kobj, KOBJ_ADD);
281 
282 	_leave(" = 0 [%s]", cache->identifier);
283 	return 0;
284 
285 tag_in_use:
286 	pr_err("Cache tag '%s' already in use\n", tagname);
287 	__fscache_release_cache_tag(tag);
288 	_leave(" = -EXIST");
289 	return -EEXIST;
290 
291 error:
292 	__fscache_release_cache_tag(tag);
293 	_leave(" = -EINVAL");
294 	return -EINVAL;
295 
296 nomem:
297 	_leave(" = -ENOMEM");
298 	return -ENOMEM;
299 }
300 EXPORT_SYMBOL(fscache_add_cache);
301 
302 /**
303  * fscache_io_error - Note a cache I/O error
304  * @cache: The record describing the cache
305  *
306  * Note that an I/O error occurred in a cache and that it should no longer be
307  * used for anything.  This also reports the error into the kernel log.
308  *
309  * See Documentation/filesystems/caching/backend-api.rst for a complete
310  * description.
311  */
fscache_io_error(struct fscache_cache * cache)312 void fscache_io_error(struct fscache_cache *cache)
313 {
314 	if (!test_and_set_bit(FSCACHE_IOERROR, &cache->flags))
315 		pr_err("Cache '%s' stopped due to I/O error\n",
316 		       cache->ops->name);
317 }
318 EXPORT_SYMBOL(fscache_io_error);
319 
320 /*
321  * request withdrawal of all the objects in a cache
322  * - all the objects being withdrawn are moved onto the supplied list
323  */
fscache_withdraw_all_objects(struct fscache_cache * cache,struct list_head * dying_objects)324 static void fscache_withdraw_all_objects(struct fscache_cache *cache,
325 					 struct list_head *dying_objects)
326 {
327 	struct fscache_object *object;
328 
329 	while (!list_empty(&cache->object_list)) {
330 		spin_lock(&cache->object_list_lock);
331 
332 		if (!list_empty(&cache->object_list)) {
333 			object = list_entry(cache->object_list.next,
334 					    struct fscache_object, cache_link);
335 			list_move_tail(&object->cache_link, dying_objects);
336 
337 			_debug("withdraw %x", object->cookie->debug_id);
338 
339 			/* This must be done under object_list_lock to prevent
340 			 * a race with fscache_drop_object().
341 			 */
342 			fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL);
343 		}
344 
345 		spin_unlock(&cache->object_list_lock);
346 		cond_resched();
347 	}
348 }
349 
350 /**
351  * fscache_withdraw_cache - Withdraw a cache from the active service
352  * @cache: The record describing the cache
353  *
354  * Withdraw a cache from service, unbinding all its cache objects from the
355  * netfs cookies they're currently representing.
356  *
357  * See Documentation/filesystems/caching/backend-api.rst for a complete
358  * description.
359  */
fscache_withdraw_cache(struct fscache_cache * cache)360 void fscache_withdraw_cache(struct fscache_cache *cache)
361 {
362 	LIST_HEAD(dying_objects);
363 
364 	_enter("");
365 
366 	pr_notice("Withdrawing cache \"%s\"\n",
367 		  cache->tag->name);
368 
369 	/* make the cache unavailable for cookie acquisition */
370 	if (test_and_set_bit(FSCACHE_CACHE_WITHDRAWN, &cache->flags))
371 		BUG();
372 
373 	down_write(&fscache_addremove_sem);
374 	list_del_init(&cache->link);
375 	cache->tag->cache = NULL;
376 	up_write(&fscache_addremove_sem);
377 
378 	/* make sure all pages pinned by operations on behalf of the netfs are
379 	 * written to disk */
380 	fscache_stat(&fscache_n_cop_sync_cache);
381 	cache->ops->sync_cache(cache);
382 	fscache_stat_d(&fscache_n_cop_sync_cache);
383 
384 	/* dissociate all the netfs pages backed by this cache from the block
385 	 * mappings in the cache */
386 	fscache_stat(&fscache_n_cop_dissociate_pages);
387 	cache->ops->dissociate_pages(cache);
388 	fscache_stat_d(&fscache_n_cop_dissociate_pages);
389 
390 	/* we now have to destroy all the active objects pertaining to this
391 	 * cache - which we do by passing them off to thread pool to be
392 	 * disposed of */
393 	_debug("destroy");
394 
395 	fscache_withdraw_all_objects(cache, &dying_objects);
396 
397 	/* wait for all extant objects to finish their outstanding operations
398 	 * and go away */
399 	_debug("wait for finish");
400 	wait_event(fscache_cache_cleared_wq,
401 		   atomic_read(&cache->object_count) == 0);
402 	_debug("wait for clearance");
403 	wait_event(fscache_cache_cleared_wq,
404 		   list_empty(&cache->object_list));
405 	_debug("cleared");
406 	ASSERT(list_empty(&dying_objects));
407 
408 	kobject_put(cache->kobj);
409 
410 	clear_bit(FSCACHE_TAG_RESERVED, &cache->tag->flags);
411 	fscache_release_cache_tag(cache->tag);
412 	cache->tag = NULL;
413 
414 	_leave("");
415 }
416 EXPORT_SYMBOL(fscache_withdraw_cache);
417