--- /dev/null
+From 2a596fc9d974bb040eda9ab70bf8756fcaaa6afe Mon Sep 17 00:00:00 2001
+From: Jonathan Liu <net147@gmail.com>
+Date: Mon, 10 Jul 2017 16:55:04 +1000
+Subject: drm/sun4i: Implement drm_driver lastclose to restore fbdev console
+
+From: Jonathan Liu <net147@gmail.com>
+
+commit 2a596fc9d974bb040eda9ab70bf8756fcaaa6afe upstream.
+
+The drm_driver lastclose callback is called when the last userspace
+DRM client has closed. Call drm_fbdev_cma_restore_mode to restore
+the fbdev console otherwise the fbdev console will stop working.
+
+Fixes: 9026e0d122ac ("drm: Add Allwinner A10 Display Engine support")
+Tested-by: Olliver Schinagl <oliver@schinagl.nl>
+Reviewed-by: Chen-Yu Tsai <wens@csie.org>
+Signed-off-by: Jonathan Liu <net147@gmail.com>
+Signed-off-by: Maxime Ripard <maxime.ripard@free-electrons.com>
+[net147@gmail.com: Backport to 4.9, minor context change]
+Signed-off-by: Jonathan Liu <net147@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ drivers/gpu/drm/sun4i/sun4i_drv.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
++++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
+@@ -47,6 +47,13 @@ static void sun4i_drv_disable_vblank(str
+ sun4i_tcon_enable_vblank(tcon, false);
+ }
+
++static void sun4i_drv_lastclose(struct drm_device *dev)
++{
++ struct sun4i_drv *drv = dev->dev_private;
++
++ drm_fbdev_cma_restore_mode(drv->fbdev);
++}
++
+ static const struct file_operations sun4i_drv_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+@@ -65,6 +72,7 @@ static struct drm_driver sun4i_drv_drive
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
+
+ /* Generic Operations */
++ .lastclose = sun4i_drv_lastclose,
+ .fops = &sun4i_drv_fops,
+ .name = "sun4i-drm",
+ .desc = "Allwinner sun4i Display Engine",
--- /dev/null
+From 5b0ef650bd0f820e922fcc42f1985d4621ae19cf Mon Sep 17 00:00:00 2001
+From: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Date: Mon, 21 Aug 2017 18:26:20 -0700
+Subject: IB/{qib, hfi1}: Avoid flow control testing for RDMA write operation
+
+From: Mike Marciniszyn <mike.marciniszyn@intel.com>
+
+commit 5b0ef650bd0f820e922fcc42f1985d4621ae19cf upstream.
+
+Section 9.7.7.2.5 of the 1.3 IBTA spec clearly says that receive
+credits should never apply to RDMA write.
+
+qib and hfi1 were doing that. The following situation will result
+in a QP hang:
+- A prior SEND or RDMA_WRITE with immmediate consumed the last
+ credit for a QP using RC receive buffer credits
+- The prior op is acked so there are no more acks
+- The peer ULP fails to post receive for some reason
+- An RDMA write sees that the credits are exhausted and waits
+- The peer ULP posts receive buffers
+- The ULP posts a send or RDMA write that will be hung
+
+The fix is to avoid the credit test for the RDMA write operation.
+
+Reviewed-by: Kaike Wan <kaike.wan@intel.com>
+Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Doug Ledford <dledford@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/hw/hfi1/rc.c | 3 ++-
+ drivers/infiniband/hw/qib/qib_rc.c | 4 ++--
+ 2 files changed, 4 insertions(+), 3 deletions(-)
+
+--- a/drivers/infiniband/hw/hfi1/rc.c
++++ b/drivers/infiniband/hw/hfi1/rc.c
+@@ -551,7 +551,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp,
+ case IB_WR_RDMA_WRITE:
+ if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
+ qp->s_lsn++;
+- /* FALLTHROUGH */
++ goto no_flow_control;
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ /* If no credit, return. */
+ if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
+@@ -559,6 +559,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp,
+ qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
+ goto bail;
+ }
++no_flow_control:
+ put_ib_reth_vaddr(
+ wqe->rdma_wr.remote_addr,
+ &ohdr->u.rc.reth);
+--- a/drivers/infiniband/hw/qib/qib_rc.c
++++ b/drivers/infiniband/hw/qib/qib_rc.c
+@@ -357,7 +357,7 @@ int qib_make_rc_req(struct rvt_qp *qp, u
+ case IB_WR_RDMA_WRITE:
+ if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
+ qp->s_lsn++;
+- /* FALLTHROUGH */
++ goto no_flow_control;
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ /* If no credit, return. */
+ if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
+@@ -365,7 +365,7 @@ int qib_make_rc_req(struct rvt_qp *qp, u
+ qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
+ goto bail;
+ }
+-
++no_flow_control:
+ ohdr->u.rc.reth.vaddr =
+ cpu_to_be64(wqe->rdma_wr.remote_addr);
+ ohdr->u.rc.reth.rkey =
--- /dev/null
+From 4855e4a7f29d6d10b0b9c84e189c770c9a94e91e Mon Sep 17 00:00:00 2001
+From: Minchan Kim <minchan@kernel.org>
+Date: Mon, 12 Dec 2016 16:42:08 -0800
+Subject: mm: prevent double decrease of nr_reserved_highatomic
+
+From: Minchan Kim <minchan@kernel.org>
+
+commit 4855e4a7f29d6d10b0b9c84e189c770c9a94e91e upstream.
+
+There is race between page freeing and unreserved highatomic.
+
+ CPU 0 CPU 1
+
+ free_hot_cold_page
+ mt = get_pfnblock_migratetype
+ set_pcppage_migratetype(page, mt)
+ unreserve_highatomic_pageblock
+ spin_lock_irqsave(&zone->lock)
+ move_freepages_block
+ set_pageblock_migratetype(page)
+ spin_unlock_irqrestore(&zone->lock)
+ free_pcppages_bulk
+ __free_one_page(mt) <- mt is stale
+
+By above race, a page on CPU 0 could go non-highorderatomic free list
+since the pageblock's type is changed. By that, unreserve logic of
+highorderatomic can decrease reserved count on a same pageblock severak
+times and then it will make mismatch between nr_reserved_highatomic and
+the number of reserved pageblock.
+
+So, this patch verifies whether the pageblock is highatomic or not and
+decrease the count only if the pageblock is highatomic.
+
+Link: http://lkml.kernel.org/r/1476259429-18279-3-git-send-email-minchan@kernel.org
+Signed-off-by: Minchan Kim <minchan@kernel.org>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Acked-by: Mel Gorman <mgorman@techsingularity.net>
+Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Cc: Sangseok Lee <sangseok.lee@lge.com>
+Cc: Michal Hocko <mhocko@suse.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Miles Chen <miles.chen@mediatek.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/page_alloc.c | 24 ++++++++++++++++++------
+ 1 file changed, 18 insertions(+), 6 deletions(-)
+
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -2100,13 +2100,25 @@ static void unreserve_highatomic_pageblo
+ continue;
+
+ /*
+- * It should never happen but changes to locking could
+- * inadvertently allow a per-cpu drain to add pages
+- * to MIGRATE_HIGHATOMIC while unreserving so be safe
+- * and watch for underflows.
++ * In page freeing path, migratetype change is racy so
++ * we can counter several free pages in a pageblock
++ * in this loop althoug we changed the pageblock type
++ * from highatomic to ac->migratetype. So we should
++ * adjust the count once.
+ */
+- zone->nr_reserved_highatomic -= min(pageblock_nr_pages,
+- zone->nr_reserved_highatomic);
++ if (get_pageblock_migratetype(page) ==
++ MIGRATE_HIGHATOMIC) {
++ /*
++ * It should never happen but changes to
++ * locking could inadvertently allow a per-cpu
++ * drain to add pages to MIGRATE_HIGHATOMIC
++ * while unreserving so be safe and watch for
++ * underflows.
++ */
++ zone->nr_reserved_highatomic -= min(
++ pageblock_nr_pages,
++ zone->nr_reserved_highatomic);
++ }
+
+ /*
+ * Convert to ac->migratetype and avoid the normal
--- /dev/null
+From ed6473ddc704a2005b9900ca08e236ebb2d8540a Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <trond.myklebust@primarydata.com>
+Date: Wed, 26 Apr 2017 11:55:27 -0400
+Subject: NFSv4: Fix callback server shutdown
+
+From: Trond Myklebust <trond.myklebust@primarydata.com>
+
+commit ed6473ddc704a2005b9900ca08e236ebb2d8540a upstream.
+
+We want to use kthread_stop() in order to ensure the threads are
+shut down before we tear down the nfs_callback_info in nfs_callback_down.
+
+Tested-and-reviewed-by: Kinglong Mee <kinglongmee@gmail.com>
+Reported-by: Kinglong Mee <kinglongmee@gmail.com>
+Fixes: bb6aeba736ba9 ("NFSv4.x: Switch to using svc_set_num_threads()...")
+Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
+Signed-off-by: J. Bruce Fields <bfields@redhat.com>
+Cc: Jan Hudoba <kernel@jahu.sk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/nfs/callback.c | 24 ++++++++++++++++--------
+ include/linux/sunrpc/svc.h | 1 +
+ net/sunrpc/svc.c | 38 ++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 55 insertions(+), 8 deletions(-)
+
+--- a/fs/nfs/callback.c
++++ b/fs/nfs/callback.c
+@@ -75,7 +75,10 @@ nfs4_callback_svc(void *vrqstp)
+
+ set_freezable();
+
+- while (!kthread_should_stop()) {
++ while (!kthread_freezable_should_stop(NULL)) {
++
++ if (signal_pending(current))
++ flush_signals(current);
+ /*
+ * Listen for a request on the socket
+ */
+@@ -84,6 +87,8 @@ nfs4_callback_svc(void *vrqstp)
+ continue;
+ svc_process(rqstp);
+ }
++ svc_exit_thread(rqstp);
++ module_put_and_exit(0);
+ return 0;
+ }
+
+@@ -102,9 +107,10 @@ nfs41_callback_svc(void *vrqstp)
+
+ set_freezable();
+
+- while (!kthread_should_stop()) {
+- if (try_to_freeze())
+- continue;
++ while (!kthread_freezable_should_stop(NULL)) {
++
++ if (signal_pending(current))
++ flush_signals(current);
+
+ prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE);
+ spin_lock_bh(&serv->sv_cb_lock);
+@@ -120,11 +126,13 @@ nfs41_callback_svc(void *vrqstp)
+ error);
+ } else {
+ spin_unlock_bh(&serv->sv_cb_lock);
+- schedule();
++ if (!kthread_should_stop())
++ schedule();
+ finish_wait(&serv->sv_cb_waitq, &wq);
+ }
+- flush_signals(current);
+ }
++ svc_exit_thread(rqstp);
++ module_put_and_exit(0);
+ return 0;
+ }
+
+@@ -220,14 +228,14 @@ err_bind:
+ static struct svc_serv_ops nfs40_cb_sv_ops = {
+ .svo_function = nfs4_callback_svc,
+ .svo_enqueue_xprt = svc_xprt_do_enqueue,
+- .svo_setup = svc_set_num_threads,
++ .svo_setup = svc_set_num_threads_sync,
+ .svo_module = THIS_MODULE,
+ };
+ #if defined(CONFIG_NFS_V4_1)
+ static struct svc_serv_ops nfs41_cb_sv_ops = {
+ .svo_function = nfs41_callback_svc,
+ .svo_enqueue_xprt = svc_xprt_do_enqueue,
+- .svo_setup = svc_set_num_threads,
++ .svo_setup = svc_set_num_threads_sync,
+ .svo_module = THIS_MODULE,
+ };
+
+--- a/include/linux/sunrpc/svc.h
++++ b/include/linux/sunrpc/svc.h
+@@ -470,6 +470,7 @@ void svc_pool_map_put(void);
+ struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int,
+ struct svc_serv_ops *);
+ int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
++int svc_set_num_threads_sync(struct svc_serv *, struct svc_pool *, int);
+ int svc_pool_stats_open(struct svc_serv *serv, struct file *file);
+ void svc_destroy(struct svc_serv *);
+ void svc_shutdown_net(struct svc_serv *, struct net *);
+--- a/net/sunrpc/svc.c
++++ b/net/sunrpc/svc.c
+@@ -795,6 +795,44 @@ svc_set_num_threads(struct svc_serv *ser
+ }
+ EXPORT_SYMBOL_GPL(svc_set_num_threads);
+
++/* destroy old threads */
++static int
++svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
++{
++ struct task_struct *task;
++ unsigned int state = serv->sv_nrthreads-1;
++
++ /* destroy old threads */
++ do {
++ task = choose_victim(serv, pool, &state);
++ if (task == NULL)
++ break;
++ kthread_stop(task);
++ nrservs++;
++ } while (nrservs < 0);
++ return 0;
++}
++
++int
++svc_set_num_threads_sync(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
++{
++ if (pool == NULL) {
++ /* The -1 assumes caller has done a svc_get() */
++ nrservs -= (serv->sv_nrthreads-1);
++ } else {
++ spin_lock_bh(&pool->sp_lock);
++ nrservs -= pool->sp_nrthreads;
++ spin_unlock_bh(&pool->sp_lock);
++ }
++
++ if (nrservs > 0)
++ return svc_start_kthreads(serv, pool, nrservs);
++ if (nrservs < 0)
++ return svc_stop_kthreads(serv, pool, nrservs);
++ return 0;
++}
++EXPORT_SYMBOL_GPL(svc_set_num_threads_sync);
++
+ /*
+ * Called from a server thread as it's exiting. Caller must hold the "service
+ * mutex" for the service.
--- /dev/null
+From b5accbb0dfae36d8d36cd882096943c98d5ede15 Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Thu, 22 Jun 2017 15:31:13 +0200
+Subject: orangefs: Don't clear SGID when inheriting ACLs
+
+From: Jan Kara <jack@suse.cz>
+
+commit b5accbb0dfae36d8d36cd882096943c98d5ede15 upstream.
+
+When new directory 'DIR1' is created in a directory 'DIR0' with SGID bit
+set, DIR1 is expected to have SGID bit set (and owning group equal to
+the owning group of 'DIR0'). However when 'DIR0' also has some default
+ACLs that 'DIR1' inherits, setting these ACLs will result in SGID bit on
+'DIR1' to get cleared if user is not member of the owning group.
+
+Fix the problem by creating __orangefs_set_acl() function that does not
+call posix_acl_update_mode() and use it when inheriting ACLs. That
+prevents SGID bit clearing and the mode has been properly set by
+posix_acl_create() anyway.
+
+Fixes: 073931017b49d9458aa351605b43a7e34598caef
+CC: stable@vger.kernel.org
+CC: Mike Marshall <hubcap@omnibond.com>
+CC: pvfs2-developers@beowulf-underground.org
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Mike Marshall <hubcap@omnibond.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/orangefs/acl.c | 48 ++++++++++++++++++++++++++++--------------------
+ 1 file changed, 28 insertions(+), 20 deletions(-)
+
+--- a/fs/orangefs/acl.c
++++ b/fs/orangefs/acl.c
+@@ -61,9 +61,9 @@ struct posix_acl *orangefs_get_acl(struc
+ return acl;
+ }
+
+-int orangefs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
++static int __orangefs_set_acl(struct inode *inode, struct posix_acl *acl,
++ int type)
+ {
+- struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
+ int error = 0;
+ void *value = NULL;
+ size_t size = 0;
+@@ -72,22 +72,6 @@ int orangefs_set_acl(struct inode *inode
+ switch (type) {
+ case ACL_TYPE_ACCESS:
+ name = XATTR_NAME_POSIX_ACL_ACCESS;
+- if (acl) {
+- umode_t mode;
+-
+- error = posix_acl_update_mode(inode, &mode, &acl);
+- if (error) {
+- gossip_err("%s: posix_acl_update_mode err: %d\n",
+- __func__,
+- error);
+- return error;
+- }
+-
+- if (inode->i_mode != mode)
+- SetModeFlag(orangefs_inode);
+- inode->i_mode = mode;
+- mark_inode_dirty_sync(inode);
+- }
+ break;
+ case ACL_TYPE_DEFAULT:
+ name = XATTR_NAME_POSIX_ACL_DEFAULT;
+@@ -132,6 +116,29 @@ out:
+ return error;
+ }
+
++int orangefs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
++{
++ int error;
++
++ if (type == ACL_TYPE_ACCESS && acl) {
++ umode_t mode;
++
++ error = posix_acl_update_mode(inode, &mode, &acl);
++ if (error) {
++ gossip_err("%s: posix_acl_update_mode err: %d\n",
++ __func__,
++ error);
++ return error;
++ }
++
++ if (inode->i_mode != mode)
++ SetModeFlag(ORANGEFS_I(inode));
++ inode->i_mode = mode;
++ mark_inode_dirty_sync(inode);
++ }
++ return __orangefs_set_acl(inode, acl, type);
++}
++
+ int orangefs_init_acl(struct inode *inode, struct inode *dir)
+ {
+ struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
+@@ -146,13 +153,14 @@ int orangefs_init_acl(struct inode *inod
+ return error;
+
+ if (default_acl) {
+- error = orangefs_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
++ error = __orangefs_set_acl(inode, default_acl,
++ ACL_TYPE_DEFAULT);
+ posix_acl_release(default_acl);
+ }
+
+ if (acl) {
+ if (!error)
+- error = orangefs_set_acl(inode, acl, ACL_TYPE_ACCESS);
++ error = __orangefs_set_acl(inode, acl, ACL_TYPE_ACCESS);
+ posix_acl_release(acl);
+ }
+
--- /dev/null
+From 9e0d87680d689f1758185851c3da6eafb16e71e1 Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <trond.myklebust@primarydata.com>
+Date: Wed, 26 Apr 2017 11:55:26 -0400
+Subject: SUNRPC: Refactor svc_set_num_threads()
+
+From: Trond Myklebust <trond.myklebust@primarydata.com>
+
+commit 9e0d87680d689f1758185851c3da6eafb16e71e1 upstream.
+
+Refactor to separate out the functions of starting and stopping threads
+so that they can be used in other helpers.
+
+Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
+Tested-and-reviewed-by: Kinglong Mee <kinglongmee@gmail.com>
+Signed-off-by: J. Bruce Fields <bfields@redhat.com>
+Cc: Jan Hudoba <kernel@jahu.sk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/sunrpc/svc.c | 96 +++++++++++++++++++++++++++++++++----------------------
+ 1 file changed, 58 insertions(+), 38 deletions(-)
+
+--- a/net/sunrpc/svc.c
++++ b/net/sunrpc/svc.c
+@@ -702,59 +702,32 @@ found_pool:
+ return task;
+ }
+
+-/*
+- * Create or destroy enough new threads to make the number
+- * of threads the given number. If `pool' is non-NULL, applies
+- * only to threads in that pool, otherwise round-robins between
+- * all pools. Caller must ensure that mutual exclusion between this and
+- * server startup or shutdown.
+- *
+- * Destroying threads relies on the service threads filling in
+- * rqstp->rq_task, which only the nfs ones do. Assumes the serv
+- * has been created using svc_create_pooled().
+- *
+- * Based on code that used to be in nfsd_svc() but tweaked
+- * to be pool-aware.
+- */
+-int
+-svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
++/* create new threads */
++static int
++svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
+ {
+ struct svc_rqst *rqstp;
+ struct task_struct *task;
+ struct svc_pool *chosen_pool;
+- int error = 0;
+ unsigned int state = serv->sv_nrthreads-1;
+ int node;
+
+- if (pool == NULL) {
+- /* The -1 assumes caller has done a svc_get() */
+- nrservs -= (serv->sv_nrthreads-1);
+- } else {
+- spin_lock_bh(&pool->sp_lock);
+- nrservs -= pool->sp_nrthreads;
+- spin_unlock_bh(&pool->sp_lock);
+- }
+-
+- /* create new threads */
+- while (nrservs > 0) {
++ do {
+ nrservs--;
+ chosen_pool = choose_pool(serv, pool, &state);
+
+ node = svc_pool_map_get_node(chosen_pool->sp_id);
+ rqstp = svc_prepare_thread(serv, chosen_pool, node);
+- if (IS_ERR(rqstp)) {
+- error = PTR_ERR(rqstp);
+- break;
+- }
++ if (IS_ERR(rqstp))
++ return PTR_ERR(rqstp);
+
+ __module_get(serv->sv_ops->svo_module);
+ task = kthread_create_on_node(serv->sv_ops->svo_function, rqstp,
+ node, "%s", serv->sv_name);
+ if (IS_ERR(task)) {
+- error = PTR_ERR(task);
+ module_put(serv->sv_ops->svo_module);
+ svc_exit_thread(rqstp);
+- break;
++ return PTR_ERR(task);
+ }
+
+ rqstp->rq_task = task;
+@@ -763,15 +736,62 @@ svc_set_num_threads(struct svc_serv *ser
+
+ svc_sock_update_bufs(serv);
+ wake_up_process(task);
+- }
++ } while (nrservs > 0);
++
++ return 0;
++}
++
++
++/* destroy old threads */
++static int
++svc_signal_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
++{
++ struct task_struct *task;
++ unsigned int state = serv->sv_nrthreads-1;
++
+ /* destroy old threads */
+- while (nrservs < 0 &&
+- (task = choose_victim(serv, pool, &state)) != NULL) {
++ do {
++ task = choose_victim(serv, pool, &state);
++ if (task == NULL)
++ break;
+ send_sig(SIGINT, task, 1);
+ nrservs++;
++ } while (nrservs < 0);
++
++ return 0;
++}
++
++/*
++ * Create or destroy enough new threads to make the number
++ * of threads the given number. If `pool' is non-NULL, applies
++ * only to threads in that pool, otherwise round-robins between
++ * all pools. Caller must ensure that mutual exclusion between this and
++ * server startup or shutdown.
++ *
++ * Destroying threads relies on the service threads filling in
++ * rqstp->rq_task, which only the nfs ones do. Assumes the serv
++ * has been created using svc_create_pooled().
++ *
++ * Based on code that used to be in nfsd_svc() but tweaked
++ * to be pool-aware.
++ */
++int
++svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
++{
++ if (pool == NULL) {
++ /* The -1 assumes caller has done a svc_get() */
++ nrservs -= (serv->sv_nrthreads-1);
++ } else {
++ spin_lock_bh(&pool->sp_lock);
++ nrservs -= pool->sp_nrthreads;
++ spin_unlock_bh(&pool->sp_lock);
+ }
+
+- return error;
++ if (nrservs > 0)
++ return svc_start_kthreads(serv, pool, nrservs);
++ if (nrservs < 0)
++ return svc_signal_kthreads(serv, pool, nrservs);
++ return 0;
+ }
+ EXPORT_SYMBOL_GPL(svc_set_num_threads);
+