summaryrefslogtreecommitdiffstats
path: root/fs/nfs/fscache.c
blob: 7a558dea75c4092663a7a2230b4430c7a89efea0 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
// SPDX-License-Identifier: GPL-2.0-or-later
/* NFS filesystem cache interface
 *
 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
 * Written by David Howells (dhowells@redhat.com)
 */

#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_fs_sb.h>
#include <linux/in6.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/iversion.h>
#include <linux/xarray.h>
#include <linux/fscache.h>
#include <linux/netfs.h>

#include "internal.h"
#include "iostat.h"
#include "fscache.h"
#include "nfstrace.h"

#define NFS_MAX_KEY_LEN 1000

static bool nfs_append_int(char *key, int *_len, unsigned long long x)
{
	if (*_len > NFS_MAX_KEY_LEN)
		return false;
	if (x == 0)
		key[(*_len)++] = ',';
	else
		*_len += sprintf(key + *_len, ",%llx", x);
	return true;
}

/*
 * Get the per-client index cookie for an NFS client if the appropriate mount
 * flag was set
 * - We always try and get an index cookie for the client, but get filehandle
 *   cookies on a per-superblock basis, depending on the mount flags
 */
static bool nfs_fscache_get_client_key(struct nfs_client *clp,
				       char *key, int *_len)
{
	const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) &clp->cl_addr;
	const struct sockaddr_in *sin = (struct sockaddr_in *) &clp->cl_addr;

	*_len += snprintf(key + *_len, NFS_MAX_KEY_LEN - *_len,
			  ",%u.%u,%x",
			  clp->rpc_ops->version,
			  clp->cl_minorversion,
			  clp->cl_addr.ss_family);

	switch (clp->cl_addr.ss_family) {
	case AF_INET:
		if (!nfs_append_int(key, _len, sin->sin_port) ||
		    !nfs_append_int(key, _len, sin->sin_addr.s_addr))
			return false;
		return true;

	case AF_INET6:
		if (!nfs_append_int(key, _len, sin6->sin6_port) ||
		    !nfs_append_int(key, _len, sin6->sin6_addr.s6_addr32[0]) ||
		    !nfs_append_int(key, _len, sin6->sin6_addr.s6_addr32[1]) ||
		    !nfs_append_int(key, _len, sin6->sin6_addr.s6_addr32[2]) ||
		    !nfs_append_int(key, _len, sin6->sin6_addr.s6_addr32[3]))
			return false;
		return true;

	default:
		printk(KERN_WARNING "NFS: Unknown network family '%d'\n",
		       clp->cl_addr.ss_family);
		return false;
	}
}

/*
 * Get the cache cookie for an NFS superblock.
 *
 * The default uniquifier is just an empty string, but it may be overridden
 * either by the 'fsc=xxx' option to mount, or by inheriting it from the parent
 * superblock across an automount point of some nature.
 */
int nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int ulen)
{
	struct fscache_volume *vcookie;
	struct nfs_server *nfss = NFS_SB(sb);
	unsigned int len = 3;
	char *key;

	if (uniq) {
		nfss->fscache_uniq = kmemdup_nul(uniq, ulen, GFP_KERNEL);
		if (!nfss->fscache_uniq)
			return -ENOMEM;
	}

	key = kmalloc(NFS_MAX_KEY_LEN + 24, GFP_KERNEL);
	if (!key)
		return -ENOMEM;

	memcpy(key, "nfs", 3);
	if (!nfs_fscache_get_client_key(nfss->nfs_client, key, &len) ||
	    !nfs_append_int(key, &len, nfss->fsid.major) ||
	    !nfs_append_int(key, &len, nfss->fsid.minor) ||
	    !nfs_append_int(key, &len, sb->s_flags & NFS_SB_MASK) ||
	    !nfs_append_int(key, &len, nfss->flags) ||
	    !nfs_append_int(key, &len, nfss->rsize) ||
	    !nfs_append_int(key, &len, nfss->wsize) ||
	    !nfs_append_int(key, &len, nfss->acregmin) ||
	    !nfs_append_int(key, &len, nfss->acregmax) ||
	    !nfs_append_int(key, &len, nfss->acdirmin) ||
	    !nfs_append_int(key, &len, nfss->acdirmax) ||
	    !nfs_append_int(key, &len, nfss->client->cl_auth->au_flavor))
		goto out;

	if (ulen > 0) {
		if (ulen > NFS_MAX_KEY_LEN - len)
			goto out;
		key[len++] = ',';
		memcpy(key + len, uniq, ulen);
		len += ulen;
	}
	key[len] = 0;

	/* create a cache index for looking up filehandles */
	vcookie = fscache_acquire_volume(key,
					 NULL, /* preferred_cache */
					 NULL, 0 /* coherency_data */);
	if (IS_ERR(vcookie)) {
		if (vcookie != ERR_PTR(-EBUSY)) {
			kfree(key);
			return PTR_ERR(vcookie);
		}
		pr_err("NFS: Cache volume key already in use (%s)\n", key);
		vcookie = NULL;
	}
	nfss->fscache = vcookie;

out:
	kfree(key);
	return 0;
}

/*
 * release a per-superblock cookie
 */
void nfs_fscache_release_super_cookie(struct super_block *sb)
{
	struct nfs_server *nfss = NFS_SB(sb);

	fscache_relinquish_volume(nfss->fscache, NULL, false);
	nfss->fscache = NULL;
	kfree(nfss->fscache_uniq);
}

/*
 * Initialise the per-inode cache cookie pointer for an NFS inode.
 */
void nfs_fscache_init_inode(struct inode *inode)
{
	struct nfs_fscache_inode_auxdata auxdata;
	struct nfs_server *nfss = NFS_SERVER(inode);
	struct nfs_inode *nfsi = NFS_I(inode);

	netfs_inode(inode)->cache = NULL;
	if (!(nfss->fscache && S_ISREG(inode->i_mode)))
		return;

	nfs_fscache_update_auxdata(&auxdata, inode);

	netfs_inode(inode)->cache = fscache_acquire_cookie(
					       nfss->fscache,
					       0,
					       nfsi->fh.data, /* index_key */
					       nfsi->fh.size,
					       &auxdata,      /* aux_data */
					       sizeof(auxdata),
					       i_size_read(inode));

	if (netfs_inode(inode)->cache)
		mapping_set_release_always(inode->i_mapping);
}

/*
 * Release a per-inode cookie.
 */
void nfs_fscache_clear_inode(struct inode *inode)
{
	fscache_relinquish_cookie(netfs_i_cookie(netfs_inode(inode)), false);
	netfs_inode(inode)->cache = NULL;
}

/*
 * Enable or disable caching for a file that is being opened as appropriate.
 * The cookie is allocated when the inode is initialised, but is not enabled at
 * that time.  Enablement is deferred to file-open time to avoid stat() and
 * access() thrashing the cache.
 *
 * For now, with NFS, only regular files that are open read-only will be able
 * to use the cache.
 *
 * We enable the cache for an inode if we open it read-only and it isn't
 * currently open for writing.  We disable the cache if the inode is open
 * write-only.
 *
 * The caller uses the file struct to pin i_writecount on the inode before
 * calling us when a file is opened for writing, so we can make use of that.
 *
 * Note that this may be invoked multiple times in parallel by parallel
 * nfs_open() functions.
 */
void nfs_fscache_open_file(struct inode *inode, struct file *filp)
{
	struct nfs_fscache_inode_auxdata auxdata;
	struct fscache_cookie *cookie = netfs_i_cookie(netfs_inode(inode));
	bool open_for_write = inode_is_open_for_write(inode);

	if (!fscache_cookie_valid(cookie))
		return;

	fscache_use_cookie(cookie, open_for_write);
	if (open_for_write) {
		nfs_fscache_update_auxdata(&auxdata, inode);
		fscache_invalidate(cookie, &auxdata, i_size_read(inode),
				   FSCACHE_INVAL_DIO_WRITE);
	}
}
EXPORT_SYMBOL_GPL(nfs_fscache_open_file);

void nfs_fscache_release_file(struct inode *inode, struct file *filp)
{
	struct nfs_fscache_inode_auxdata auxdata;
	struct fscache_cookie *cookie = netfs_i_cookie(netfs_inode(inode));
	loff_t i_size = i_size_read(inode);

	nfs_fscache_update_auxdata(&auxdata, inode);
	fscache_unuse_cookie(cookie, &auxdata, &i_size);
}

int nfs_netfs_read_folio(struct file *file, struct folio *folio)
{
	if (!netfs_inode(folio_inode(folio))->cache)
		return -ENOBUFS;

	return netfs_read_folio(file, folio);
}

int nfs_netfs_readahead(struct readahead_control *ractl)
{
	struct inode *inode = ractl->mapping->host;

	if (!netfs_inode(inode)->cache)
		return -ENOBUFS;

	netfs_readahead(ractl);
	return 0;
}

static atomic_t nfs_netfs_debug_id;
static int nfs_netfs_init_request(struct netfs_io_request *rreq, struct file *file)
{
	rreq->netfs_priv = get_nfs_open_context(nfs_file_open_context(file));
	rreq->debug_id = atomic_inc_return(&nfs_netfs_debug_id);
	/* [DEPRECATED] Use PG_private_2 to mark folio being written to the cache. */
	__set_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags);

	return 0;
}

static void nfs_netfs_free_request(struct netfs_io_request *rreq)
{
	put_nfs_open_context(rreq->netfs_priv);
}

static struct nfs_netfs_io_data *nfs_netfs_alloc(struct netfs_io_subrequest *sreq)
{
	struct nfs_netfs_io_data *netfs;

	netfs = kzalloc(sizeof(*netfs), GFP_KERNEL_ACCOUNT);
	if (!netfs)
		return NULL;
	netfs->sreq = sreq;
	refcount_set(&netfs->refcount, 1);
	return netfs;
}

static bool nfs_netfs_clamp_length(struct netfs_io_subrequest *sreq)
{
	size_t	rsize = NFS_SB(sreq->rreq->inode->i_sb)->rsize;

	sreq->len = min(sreq->len, rsize);
	return true;
}

static void nfs_netfs_issue_read(struct netfs_io_subrequest *sreq)
{
	struct nfs_netfs_io_data	*netfs;
	struct nfs_pageio_descriptor	pgio;
	struct inode *inode = sreq->rreq->inode;
	struct nfs_open_context *ctx = sreq->rreq->netfs_priv;
	struct page *page;
	unsigned long idx;
	int err;
	pgoff_t start = (sreq->start + sreq->transferred) >> PAGE_SHIFT;
	pgoff_t last = ((sreq->start + sreq->len -
			 sreq->transferred - 1) >> PAGE_SHIFT);

	nfs_pageio_init_read(&pgio, inode, false,
			     &nfs_async_read_completion_ops);

	netfs = nfs_netfs_alloc(sreq);
	if (!netfs)
		return netfs_subreq_terminated(sreq, -ENOMEM, false);

	pgio.pg_netfs = netfs; /* used in completion */

	xa_for_each_range(&sreq->rreq->mapping->i_pages, idx, page, start, last) {
		/* nfs_read_add_folio() may schedule() due to pNFS layout and other RPCs  */
		err = nfs_read_add_folio(&pgio, ctx, page_folio(page));
		if (err < 0) {
			netfs->error = err;
			goto out;
		}
	}
out:
	nfs_pageio_complete_read(&pgio);
	nfs_netfs_put(netfs);
}

void nfs_netfs_initiate_read(struct nfs_pgio_header *hdr)
{
	struct nfs_netfs_io_data        *netfs = hdr->netfs;

	if (!netfs)
		return;

	nfs_netfs_get(netfs);
}

int nfs_netfs_folio_unlock(struct folio *folio)
{
	struct inode *inode = folio->mapping->host;

	/*
	 * If fscache is enabled, netfs will unlock pages.
	 */
	if (netfs_inode(inode)->cache)
		return 0;

	return 1;
}

void nfs_netfs_read_completion(struct nfs_pgio_header *hdr)
{
	struct nfs_netfs_io_data        *netfs = hdr->netfs;
	struct netfs_io_subrequest      *sreq;

	if (!netfs)
		return;

	sreq = netfs->sreq;
	if (test_bit(NFS_IOHDR_EOF, &hdr->flags) &&
	    sreq->rreq->origin != NETFS_DIO_READ)
		__set_bit(NETFS_SREQ_CLEAR_TAIL, &sreq->flags);

	if (hdr->error)
		netfs->error = hdr->error;
	else
		atomic64_add(hdr->res.count, &netfs->transferred);

	nfs_netfs_put(netfs);
	hdr->netfs = NULL;
}

const struct netfs_request_ops nfs_netfs_ops = {
	.init_request		= nfs_netfs_init_request,
	.free_request		= nfs_netfs_free_request,
	.issue_read		= nfs_netfs_issue_read,
	.clamp_length		= nfs_netfs_clamp_length
};