Login
[x]
Log in using an account from:
Fedora Account System
Red Hat Associate
Red Hat Customer
Or login using a Red Hat Bugzilla account
Forgot Password
Login:
Hide Forgot
Create an Account
Red Hat Bugzilla – Attachment 582792 Details for
Bug 814626
Kernel panic with 5.8 nfs clients
[?]
New
Simple Search
Advanced Search
My Links
Browse
Requests
Reports
Current State
Search
Tabular reports
Graphical reports
Duplicates
Other Reports
User Changes
Plotly Reports
Bug Status
Bug Severity
Non-Defaults
|
Product Dashboard
Help
Page Help!
Bug Writing Guidelines
What's new
Browser Support Policy
5.0.4.rh83 Release notes
FAQ
Guides index
User guide
Web Services
Contact
Legal
This site requires JavaScript to be enabled to function correctly, please enable it.
[patch]
fix oops due to overrunning server's page array
TMP (text/plain), 8.98 KB, created by
J. Bruce Fields
on 2012-05-07 22:24:32 UTC
(
hide
)
Description:
fix oops due to overrunning server's page array
Filename:
MIME Type:
Creator:
J. Bruce Fields
Created:
2012-05-07 22:24:32 UTC
Size:
8.98 KB
patch
obsolete
>commit 4aafa2883a7925565aa4af20b0230262aa0d8a1e >Author: J. Bruce Fields <bfields@redhat.com> >Date: Mon May 7 16:00:23 2012 -0400 > > svcrpc: fix oops due to overrunning server's page array > > The rq_pages array has 1MB/PAGE_SIZE+2 elements. The loop in svc_recv > attempts to allocate sv_bufsz/PAGE_SIZE+2 pages. But the NFS server is > setting sv_bufsiz to over a megabyte, with the result that svc_recv may > attempt to allocate sv_bufsz/PAGE_SIZE+3 pages and run past the end of > the array, overwriting rq_respages. > > This was fixed upstream by this patch, with the following changelog, > which doesn't note the bug described above. > > Note to preserve kabi, instead of replacing the sv_bufsz field, we > retain it and append only one new field (sv_max_mesg). To prevent > gratuitous divergence from upstream we define a macro allowing us to > refer to sv_max_mesg as sv_max_payload. > > Upstream changelog: > > There is some confusion about the meaning of 'bufsz' for a sunrpc server. > In some cases it is the largest message that can be sent or received. In > other cases it is the largest 'payload' that can be included in a NFS > message. > > In either case, it is not possible for both the request and the reply to be > this large. One of the request or reply may only be one page long, which > fits nicely with NFS. > > So we remove 'bufsz' and replace it with two numbers: 'max_payload' and > 'max_mesg'. Max_payload is the size that the server requests. It is used > by the server to check the max size allowed on a particular connection: > depending on the protocol a lower limit might be used. > > max_mesg is the largest single message that can be sent or received. It is > calculated as the max_payload, rounded up to a multiple of PAGE_SIZE, and > with PAGE_SIZE added to overhead. Only one of the request and reply may be > this size. The other must be at most one page. > > Cc: Greg Banks <gnb@sgi.com> > Cc: "J. Bruce Fields" <bfields@fieldses.org> > Signed-off-by: Neil Brown <neilb@suse.de> > Signed-off-by: Andrew Morton <akpm@osdl.org> > Signed-off-by: Linus Torvalds <torvalds@osdl.org> > > Bugzilla: 814626 > Upstream: c6b0a9f87b82f25fa35206ec04b5160372eabab4 > >diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c >index 71b785f..5514111 100644 >--- a/fs/nfsd/nfssvc.c >+++ b/fs/nfsd/nfssvc.c >@@ -230,7 +230,7 @@ int nfsd_create_serv(void) > } > > atomic_set(&nfsd_busy, 0); >- nfsd_serv = svc_create(&nfsd_program, NFSD_BUFSIZE - NFSSVC_MAXBLKSIZE + nfsd_max_blksize); >+ nfsd_serv = svc_create(&nfsd_program, nfsd_max_blksize); > if (nfsd_serv == NULL) > err = -ENOMEM; > svc_shutdown(nfsd_serv, nfsd_last_thread); >diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h >index 7bea29d..b61e8f3 100644 >--- a/include/linux/sunrpc/svc.h >+++ b/include/linux/sunrpc/svc.h >@@ -35,7 +35,7 @@ struct svc_serv { > struct svc_stat * sv_stats; /* RPC statistics */ > spinlock_t sv_lock; > unsigned int sv_nrthreads; /* # of server threads */ >- unsigned int sv_bufsz; /* datagram buffer size */ >+ unsigned int sv_bufsz; > unsigned int sv_xdrsize; /* XDR buffer size */ > > struct list_head sv_permsocks; /* all permanent sockets */ >@@ -51,6 +51,8 @@ struct svc_serv { > unsigned int sv_maxconn; /* max connections allowed or > * '0' causing max to be based > * on number of threads. */ >+#define sv_max_payload sv_max_mesg /* datagram payload size */ >+ unsigned int sv_max_mesg; /* max_payload + 1 page for overheads */ > #endif > }; > >diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c >index 8bf9d71..81ada60 100644 >--- a/net/sunrpc/svc.c >+++ b/net/sunrpc/svc.c >@@ -39,7 +39,10 @@ svc_create(struct svc_program *prog, unsigned int bufsize) > serv->sv_program = prog; > serv->sv_nrthreads = 1; > serv->sv_stats = prog->pg_stats; >- serv->sv_bufsz = bufsize? bufsize : 4096; >+ if (bufsize > RPCSVC_MAXPAYLOAD) >+ bufsize = RPCSVC_MAXPAYLOAD; >+ serv->sv_max_payload = bufsize? bufsize : 4096; >+ serv->sv_max_mesg = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE); > xdrsize = 0; > while (prog) { > prog->pg_lovers = prog->pg_nvers-1; >@@ -119,9 +122,9 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size) > int pages; > int arghi; > >- if (size > RPCSVC_MAXPAYLOAD) >- size = RPCSVC_MAXPAYLOAD; >- pages = 2 + (size+ PAGE_SIZE -1) / PAGE_SIZE; >+ pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply. >+ * We assume one is at most one page >+ */ > arghi = 0; > BUG_ON(pages > RPCSVC_MAXPAGES); > while (pages) { >@@ -163,7 +166,7 @@ svc_create_thread(svc_thread_fn func, struct svc_serv *serv) > > if (!(rqstp->rq_argp = kmalloc(serv->sv_xdrsize, GFP_KERNEL)) > || !(rqstp->rq_resp = kmalloc(serv->sv_xdrsize, GFP_KERNEL)) >- || !svc_init_buffer(rqstp, serv->sv_bufsz)) >+ || !svc_init_buffer(rqstp, serv->sv_max_mesg)) > goto out_thread; > > serv->sv_nrthreads++; >@@ -502,8 +505,8 @@ u32 svc_max_payload(const struct svc_rqst *rqstp) > > if (rqstp->rq_sock->sk_sock->type == SOCK_DGRAM) > max = RPCSVC_MAXPAYLOAD_UDP; >- if (rqstp->rq_server->sv_bufsz < max) >- max = rqstp->rq_server->sv_bufsz; >+ if (rqstp->rq_server->sv_max_payload < max) >+ max = rqstp->rq_server->sv_max_payload; > return max; > } > EXPORT_SYMBOL_GPL(svc_max_payload); >diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c >index a0edff9..b840ea5 100644 >--- a/net/sunrpc/svcsock.c >+++ b/net/sunrpc/svcsock.c >@@ -200,13 +200,13 @@ svc_sock_enqueue(struct svc_sock *svsk) > } > > set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); >- if (((svsk->sk_reserved + serv->sv_bufsz)*2 >+ if (((svsk->sk_reserved + serv->sv_max_mesg)*2 > > svc_sock_wspace(svsk)) > && !test_bit(SK_CLOSE, &svsk->sk_flags) > && !test_bit(SK_CONN, &svsk->sk_flags)) { > /* Don't enqueue while not enough space for reply */ > dprintk("svc: socket %p no space, %d*2 > %ld, not enqueued\n", >- svsk->sk_sk, svsk->sk_reserved+serv->sv_bufsz, >+ svsk->sk_sk, svsk->sk_reserved+serv->sv_max_mesg, > svc_sock_wspace(svsk)); > goto out_unlock; > } >@@ -231,7 +231,7 @@ svc_sock_enqueue(struct svc_sock *svsk) > rqstp, rqstp->rq_sock); > rqstp->rq_sock = svsk; > svsk->sk_inuse++; >- rqstp->rq_reserved = serv->sv_bufsz; >+ rqstp->rq_reserved = serv->sv_max_mesg; > svsk->sk_reserved += rqstp->rq_reserved; > wake_up(&rqstp->rq_wait); > } else { >@@ -649,8 +649,8 @@ svc_udp_recvfrom(struct svc_rqst *rqstp) > * for one reply per thread. > */ > svc_sock_setbufsize(svsk->sk_sock, >- (serv->sv_nrthreads+3) * serv->sv_bufsz, >- (serv->sv_nrthreads+3) * serv->sv_bufsz); >+ (serv->sv_nrthreads+3) * serv->sv_max_mesg, >+ (serv->sv_nrthreads+3) * serv->sv_max_mesg); > > if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) { > svc_sock_received(svsk); >@@ -759,8 +759,8 @@ svc_udp_init(struct svc_sock *svsk) > * svc_udp_recvfrom will re-adjust if necessary > */ > svc_sock_setbufsize(svsk->sk_sock, >- 3 * svsk->sk_server->sv_bufsz, >- 3 * svsk->sk_server->sv_bufsz); >+ 3 * svsk->sk_server->sv_max_mesg, >+ 3 * svsk->sk_server->sv_max_mesg); > > set_bit(SK_DATA, &svsk->sk_flags); /* might have come in before data_ready set up */ > set_bit(SK_CHNGBUF, &svsk->sk_flags); >@@ -1009,8 +1009,8 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp) > * as soon a a complete request arrives. > */ > svc_sock_setbufsize(svsk->sk_sock, >- (serv->sv_nrthreads+3) * serv->sv_bufsz, >- 3 * serv->sv_bufsz); >+ (serv->sv_nrthreads+3) * serv->sv_max_mesg, >+ 3 * serv->sv_max_mesg); > > clear_bit(SK_DATA, &svsk->sk_flags); > >@@ -1048,7 +1048,7 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp) > } > svsk->sk_reclen &= 0x7fffffff; > dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen); >- if (svsk->sk_reclen > serv->sv_bufsz) { >+ if (svsk->sk_reclen > serv->sv_max_mesg) { > printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx (large)\n", > (unsigned long) svsk->sk_reclen); > goto err_delete; >@@ -1187,8 +1187,8 @@ svc_tcp_init(struct svc_sock *svsk) > * svc_tcp_recvfrom will re-adjust if necessary > */ > svc_sock_setbufsize(svsk->sk_sock, >- 3 * svsk->sk_server->sv_bufsz, >- 3 * svsk->sk_server->sv_bufsz); >+ 3 * svsk->sk_server->sv_max_mesg, >+ 3 * svsk->sk_server->sv_max_mesg); > > set_bit(SK_CHNGBUF, &svsk->sk_flags); > set_bit(SK_DATA, &svsk->sk_flags); >@@ -1246,7 +1246,7 @@ svc_recv(struct svc_serv *serv, struct svc_rqst *rqstp, long timeout) > > > /* now allocate needed pages. If we get a failure, sleep briefly */ >- pages = 2 + (serv->sv_bufsz + PAGE_SIZE -1) / PAGE_SIZE; >+ pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE; > for (i=0; i < pages ; i++) > while (rqstp->rq_pages[i] == NULL) { > struct page *p = alloc_page(GFP_KERNEL); >@@ -1292,7 +1292,7 @@ svc_recv(struct svc_serv *serv, struct svc_rqst *rqstp, long timeout) > } else if ((svsk = svc_sock_dequeue(serv)) != NULL) { > rqstp->rq_sock = svsk; > svsk->sk_inuse++; >- rqstp->rq_reserved = serv->sv_bufsz; >+ rqstp->rq_reserved = serv->sv_max_mesg; > svsk->sk_reserved += rqstp->rq_reserved; > } else { > /* No data pending. Go to sleep */
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Diff
Attachments on
bug 814626
:
578932
|
581493
|
582792
|
583282
|
584796