]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.9-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 13 Feb 2019 15:01:31 +0000 (16:01 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 13 Feb 2019 15:01:31 +0000 (16:01 +0100)
added patches:
hid-debug-fix-the-ring-buffer-implementation.patch
nfsd4-catch-some-false-session-retries.patch
nfsd4-fix-cached-replies-to-solo-sequence-compounds.patch

queue-4.9/hid-debug-fix-the-ring-buffer-implementation.patch [new file with mode: 0644]
queue-4.9/nfsd4-catch-some-false-session-retries.patch [new file with mode: 0644]
queue-4.9/nfsd4-fix-cached-replies-to-solo-sequence-compounds.patch [new file with mode: 0644]
queue-4.9/series

diff --git a/queue-4.9/hid-debug-fix-the-ring-buffer-implementation.patch b/queue-4.9/hid-debug-fix-the-ring-buffer-implementation.patch
new file mode 100644 (file)
index 0000000..cab8f18
--- /dev/null
@@ -0,0 +1,273 @@
+From 13054abbaa4f1fd4e6f3b4b63439ec033b4c8035 Mon Sep 17 00:00:00 2001
+From: Vladis Dronov <vdronov@redhat.com>
+Date: Tue, 29 Jan 2019 11:58:35 +0100
+Subject: HID: debug: fix the ring buffer implementation
+
+From: Vladis Dronov <vdronov@redhat.com>
+
+commit 13054abbaa4f1fd4e6f3b4b63439ec033b4c8035 upstream.
+
+Ring buffer implementation in hid_debug_event() and hid_debug_events_read()
+is strange allowing lost or corrupted data. After commit 717adfdaf147
+("HID: debug: check length before copy_to_user()") it is possible to enter
+an infinite loop in hid_debug_events_read() by providing 0 as count, this
+locks up a system. Fix this by rewriting the ring buffer implementation
+with kfifo and simplify the code.
+
+This fixes CVE-2019-3819.
+
+v2: fix an execution logic and add a comment
+v3: use __set_current_state() instead of set_current_state()
+
+Backport to v4.9: some tree-wide patches are missing in v4.9 so
+cherry-pick relevant pieces from:
+ * 6396bb22151 ("treewide: kzalloc() -> kcalloc()")
+ * a9a08845e9ac ("vfs: do bulk POLL* -> EPOLL* replacement")
+ * 174cd4b1e5fb ("sched/headers: Prepare to move signal wakeup & sigpending
+   methods from <linux/sched.h> into <linux/sched/signal.h>")
+
+Link: https://bugzilla.redhat.com/show_bug.cgi?id=1669187
+Cc: stable@vger.kernel.org # v4.18+
+Fixes: cd667ce24796 ("HID: use debugfs for events/reports dumping")
+Fixes: 717adfdaf147 ("HID: debug: check length before copy_to_user()")
+Signed-off-by: Vladis Dronov <vdronov@redhat.com>
+Reviewed-by: Oleg Nesterov <oleg@redhat.com>
+Signed-off-by: Benjamin Tissoires <benjamin.tissoires@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/hid/hid-debug.c   |  122 ++++++++++++++++++----------------------------
+ include/linux/hid-debug.h |    9 +--
+ 2 files changed, 52 insertions(+), 79 deletions(-)
+
+--- a/drivers/hid/hid-debug.c
++++ b/drivers/hid/hid-debug.c
+@@ -30,6 +30,7 @@
+ #include <linux/debugfs.h>
+ #include <linux/seq_file.h>
++#include <linux/kfifo.h>
+ #include <linux/sched.h>
+ #include <linux/export.h>
+ #include <linux/slab.h>
+@@ -455,7 +456,7 @@ static char *resolv_usage_page(unsigned
+       char *buf = NULL;
+       if (!f) {
+-              buf = kzalloc(sizeof(char) * HID_DEBUG_BUFSIZE, GFP_ATOMIC);
++              buf = kzalloc(HID_DEBUG_BUFSIZE, GFP_ATOMIC);
+               if (!buf)
+                       return ERR_PTR(-ENOMEM);
+       }
+@@ -659,17 +660,12 @@ EXPORT_SYMBOL_GPL(hid_dump_device);
+ /* enqueue string to 'events' ring buffer */
+ void hid_debug_event(struct hid_device *hdev, char *buf)
+ {
+-      unsigned i;
+       struct hid_debug_list *list;
+       unsigned long flags;
+       spin_lock_irqsave(&hdev->debug_list_lock, flags);
+-      list_for_each_entry(list, &hdev->debug_list, node) {
+-              for (i = 0; buf[i]; i++)
+-                      list->hid_debug_buf[(list->tail + i) % HID_DEBUG_BUFSIZE] =
+-                              buf[i];
+-              list->tail = (list->tail + i) % HID_DEBUG_BUFSIZE;
+-        }
++      list_for_each_entry(list, &hdev->debug_list, node)
++              kfifo_in(&list->hid_debug_fifo, buf, strlen(buf));
+       spin_unlock_irqrestore(&hdev->debug_list_lock, flags);
+       wake_up_interruptible(&hdev->debug_wait);
+@@ -720,8 +716,7 @@ void hid_dump_input(struct hid_device *h
+       hid_debug_event(hdev, buf);
+       kfree(buf);
+-        wake_up_interruptible(&hdev->debug_wait);
+-
++      wake_up_interruptible(&hdev->debug_wait);
+ }
+ EXPORT_SYMBOL_GPL(hid_dump_input);
+@@ -1086,8 +1081,8 @@ static int hid_debug_events_open(struct
+               goto out;
+       }
+-      if (!(list->hid_debug_buf = kzalloc(sizeof(char) * HID_DEBUG_BUFSIZE, GFP_KERNEL))) {
+-              err = -ENOMEM;
++      err = kfifo_alloc(&list->hid_debug_fifo, HID_DEBUG_FIFOSIZE, GFP_KERNEL);
++      if (err) {
+               kfree(list);
+               goto out;
+       }
+@@ -1107,77 +1102,57 @@ static ssize_t hid_debug_events_read(str
+               size_t count, loff_t *ppos)
+ {
+       struct hid_debug_list *list = file->private_data;
+-      int ret = 0, len;
++      int ret = 0, copied;
+       DECLARE_WAITQUEUE(wait, current);
+       mutex_lock(&list->read_mutex);
+-      while (ret == 0) {
+-              if (list->head == list->tail) {
+-                      add_wait_queue(&list->hdev->debug_wait, &wait);
+-                      set_current_state(TASK_INTERRUPTIBLE);
+-
+-                      while (list->head == list->tail) {
+-                              if (file->f_flags & O_NONBLOCK) {
+-                                      ret = -EAGAIN;
+-                                      break;
+-                              }
+-                              if (signal_pending(current)) {
+-                                      ret = -ERESTARTSYS;
+-                                      break;
+-                              }
+-
+-                              if (!list->hdev || !list->hdev->debug) {
+-                                      ret = -EIO;
+-                                      set_current_state(TASK_RUNNING);
+-                                      goto out;
+-                              }
+-
+-                              /* allow O_NONBLOCK from other threads */
+-                              mutex_unlock(&list->read_mutex);
+-                              schedule();
+-                              mutex_lock(&list->read_mutex);
+-                              set_current_state(TASK_INTERRUPTIBLE);
++      if (kfifo_is_empty(&list->hid_debug_fifo)) {
++              add_wait_queue(&list->hdev->debug_wait, &wait);
++              set_current_state(TASK_INTERRUPTIBLE);
++
++              while (kfifo_is_empty(&list->hid_debug_fifo)) {
++                      if (file->f_flags & O_NONBLOCK) {
++                              ret = -EAGAIN;
++                              break;
+                       }
+-                      set_current_state(TASK_RUNNING);
+-                      remove_wait_queue(&list->hdev->debug_wait, &wait);
+-              }
+-
+-              if (ret)
+-                      goto out;
+-
+-              /* pass the ringbuffer contents to userspace */
+-copy_rest:
+-              if (list->tail == list->head)
+-                      goto out;
+-              if (list->tail > list->head) {
+-                      len = list->tail - list->head;
+-                      if (len > count)
+-                              len = count;
+-
+-                      if (copy_to_user(buffer + ret, &list->hid_debug_buf[list->head], len)) {
+-                              ret = -EFAULT;
+-                              goto out;
++                      if (signal_pending(current)) {
++                              ret = -ERESTARTSYS;
++                              break;
+                       }
+-                      ret += len;
+-                      list->head += len;
+-              } else {
+-                      len = HID_DEBUG_BUFSIZE - list->head;
+-                      if (len > count)
+-                              len = count;
+-                      if (copy_to_user(buffer, &list->hid_debug_buf[list->head], len)) {
+-                              ret = -EFAULT;
++                      /* if list->hdev is NULL we cannot remove_wait_queue().
++                       * if list->hdev->debug is 0 then hid_debug_unregister()
++                       * was already called and list->hdev is being destroyed.
++                       * if we add remove_wait_queue() here we can hit a race.
++                       */
++                      if (!list->hdev || !list->hdev->debug) {
++                              ret = -EIO;
++                              set_current_state(TASK_RUNNING);
+                               goto out;
+                       }
+-                      list->head = 0;
+-                      ret += len;
+-                      count -= len;
+-                      if (count > 0)
+-                              goto copy_rest;
++
++                      /* allow O_NONBLOCK from other threads */
++                      mutex_unlock(&list->read_mutex);
++                      schedule();
++                      mutex_lock(&list->read_mutex);
++                      set_current_state(TASK_INTERRUPTIBLE);
+               }
++              __set_current_state(TASK_RUNNING);
++              remove_wait_queue(&list->hdev->debug_wait, &wait);
++
++              if (ret)
++                      goto out;
+       }
++
++      /* pass the fifo content to userspace, locking is not needed with only
++       * one concurrent reader and one concurrent writer
++       */
++      ret = kfifo_to_user(&list->hid_debug_fifo, buffer, count, &copied);
++      if (ret)
++              goto out;
++      ret = copied;
+ out:
+       mutex_unlock(&list->read_mutex);
+       return ret;
+@@ -1188,7 +1163,7 @@ static unsigned int hid_debug_events_pol
+       struct hid_debug_list *list = file->private_data;
+       poll_wait(file, &list->hdev->debug_wait, wait);
+-      if (list->head != list->tail)
++      if (!kfifo_is_empty(&list->hid_debug_fifo))
+               return POLLIN | POLLRDNORM;
+       if (!list->hdev->debug)
+               return POLLERR | POLLHUP;
+@@ -1203,7 +1178,7 @@ static int hid_debug_events_release(stru
+       spin_lock_irqsave(&list->hdev->debug_list_lock, flags);
+       list_del(&list->node);
+       spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags);
+-      kfree(list->hid_debug_buf);
++      kfifo_free(&list->hid_debug_fifo);
+       kfree(list);
+       return 0;
+@@ -1254,4 +1229,3 @@ void hid_debug_exit(void)
+ {
+       debugfs_remove_recursive(hid_debug_root);
+ }
+-
+--- a/include/linux/hid-debug.h
++++ b/include/linux/hid-debug.h
+@@ -24,7 +24,10 @@
+ #ifdef CONFIG_DEBUG_FS
++#include <linux/kfifo.h>
++
+ #define HID_DEBUG_BUFSIZE 512
++#define HID_DEBUG_FIFOSIZE 512
+ void hid_dump_input(struct hid_device *, struct hid_usage *, __s32);
+ void hid_dump_report(struct hid_device *, int , u8 *, int);
+@@ -37,11 +40,8 @@ void hid_debug_init(void);
+ void hid_debug_exit(void);
+ void hid_debug_event(struct hid_device *, char *);
+-
+ struct hid_debug_list {
+-      char *hid_debug_buf;
+-      int head;
+-      int tail;
++      DECLARE_KFIFO_PTR(hid_debug_fifo, char);
+       struct fasync_struct *fasync;
+       struct hid_device *hdev;
+       struct list_head node;
+@@ -64,4 +64,3 @@ struct hid_debug_list {
+ #endif
+ #endif
+-
diff --git a/queue-4.9/nfsd4-catch-some-false-session-retries.patch b/queue-4.9/nfsd4-catch-some-false-session-retries.patch
new file mode 100644 (file)
index 0000000..100a822
--- /dev/null
@@ -0,0 +1,115 @@
+From 53da6a53e1d414e05759fa59b7032ee08f4e22d7 Mon Sep 17 00:00:00 2001
+From: "J. Bruce Fields" <bfields@redhat.com>
+Date: Tue, 17 Oct 2017 20:38:49 -0400
+Subject: nfsd4: catch some false session retries
+
+From: J. Bruce Fields <bfields@redhat.com>
+
+commit 53da6a53e1d414e05759fa59b7032ee08f4e22d7 upstream.
+
+The spec allows us to return NFS4ERR_SEQ_FALSE_RETRY if we notice that
+the client is making a call that matches a previous (slot, seqid) pair
+but that *isn't* actually a replay, because some detail of the call
+doesn't actually match the previous one.
+
+Catching every such case is difficult, but we may as well catch a few
+easy ones.  This also handles the case described in the previous patch,
+in a different way.
+
+The spec does however require us to catch the case where the difference
+is in the rpc credentials.  This prevents somebody from snooping another
+user's replies by fabricating retries.
+
+(But the practical value of the attack is limited by the fact that the
+replies with the most sensitive data are READ replies, which are not
+normally cached.)
+
+Tested-by: Olga Kornievskaia <aglo@umich.edu>
+Signed-off-by: J. Bruce Fields <bfields@redhat.com>
+Cc: Salvatore Bonaccorso <carnil@debian.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ fs/nfsd/nfs4state.c |   37 ++++++++++++++++++++++++++++++++++++-
+ fs/nfsd/state.h     |    1 +
+ 2 files changed, 37 insertions(+), 1 deletion(-)
+
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -1472,8 +1472,10 @@ free_session_slots(struct nfsd4_session
+ {
+       int i;
+-      for (i = 0; i < ses->se_fchannel.maxreqs; i++)
++      for (i = 0; i < ses->se_fchannel.maxreqs; i++) {
++              free_svc_cred(&ses->se_slots[i]->sl_cred);
+               kfree(ses->se_slots[i]);
++      }
+ }
+ /*
+@@ -2347,6 +2349,8 @@ nfsd4_store_cache_entry(struct nfsd4_com
+       slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
+       slot->sl_opcnt = resp->opcnt;
+       slot->sl_status = resp->cstate.status;
++      free_svc_cred(&slot->sl_cred);
++      copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred);
+       if (!nfsd4_cache_this(resp)) {
+               slot->sl_flags &= ~NFSD4_SLOT_CACHED;
+@@ -3049,6 +3053,34 @@ static bool nfsd4_request_too_big(struct
+       return xb->len > session->se_fchannel.maxreq_sz;
+ }
++static bool replay_matches_cache(struct svc_rqst *rqstp,
++               struct nfsd4_sequence *seq, struct nfsd4_slot *slot)
++{
++      struct nfsd4_compoundargs *argp = rqstp->rq_argp;
++
++      if ((bool)(slot->sl_flags & NFSD4_SLOT_CACHETHIS) !=
++          (bool)seq->cachethis)
++              return false;
++      /*
++       * If there's an error than the reply can have fewer ops than
++       * the call.  But if we cached a reply with *more* ops than the
++       * call you're sending us now, then this new call is clearly not
++       * really a replay of the old one:
++       */
++      if (slot->sl_opcnt < argp->opcnt)
++              return false;
++      /* This is the only check explicitly called by spec: */
++      if (!same_creds(&rqstp->rq_cred, &slot->sl_cred))
++              return false;
++      /*
++       * There may be more comparisons we could actually do, but the
++       * spec doesn't require us to catch every case where the calls
++       * don't match (that would require caching the call as well as
++       * the reply), so we don't bother.
++       */
++      return true;
++}
++
+ __be32
+ nfsd4_sequence(struct svc_rqst *rqstp,
+              struct nfsd4_compound_state *cstate,
+@@ -3108,6 +3140,9 @@ nfsd4_sequence(struct svc_rqst *rqstp,
+               status = nfserr_seq_misordered;
+               if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
+                       goto out_put_session;
++              status = nfserr_seq_false_retry;
++              if (!replay_matches_cache(rqstp, seq, slot))
++                      goto out_put_session;
+               cstate->slot = slot;
+               cstate->session = session;
+               cstate->clp = clp;
+--- a/fs/nfsd/state.h
++++ b/fs/nfsd/state.h
+@@ -169,6 +169,7 @@ static inline struct nfs4_delegation *de
+ struct nfsd4_slot {
+       u32     sl_seqid;
+       __be32  sl_status;
++      struct svc_cred sl_cred;
+       u32     sl_datalen;
+       u16     sl_opcnt;
+ #define NFSD4_SLOT_INUSE      (1 << 0)
diff --git a/queue-4.9/nfsd4-fix-cached-replies-to-solo-sequence-compounds.patch b/queue-4.9/nfsd4-fix-cached-replies-to-solo-sequence-compounds.patch
new file mode 100644 (file)
index 0000000..f75b164
--- /dev/null
@@ -0,0 +1,125 @@
+From 085def3ade52f2ffe3e31f42e98c27dcc222dd37 Mon Sep 17 00:00:00 2001
+From: "J. Bruce Fields" <bfields@redhat.com>
+Date: Wed, 18 Oct 2017 16:17:18 -0400
+Subject: nfsd4: fix cached replies to solo SEQUENCE compounds
+
+From: J. Bruce Fields <bfields@redhat.com>
+
+commit 085def3ade52f2ffe3e31f42e98c27dcc222dd37 upstream.
+
+Currently our handling of 4.1+ requests without "cachethis" set is
+confusing and not quite correct.
+
+Suppose a client sends a compound consisting of only a single SEQUENCE
+op, and it matches the seqid in a session slot (so it's a retry), but
+the previous request with that seqid did not have "cachethis" set.
+
+The obvious thing to do might be to return NFS4ERR_RETRY_UNCACHED_REP,
+but the protocol only allows that to be returned on the op following the
+SEQUENCE, and there is no such op in this case.
+
+The protocol permits us to cache replies even if the client didn't ask
+us to.  And it's easy to do so in the case of solo SEQUENCE compounds.
+
+So, when we get a solo SEQUENCE, we can either return the previously
+cached reply or NFSERR_SEQ_FALSE_RETRY if we notice it differs in some
+way from the original call.
+
+Currently, we're returning a corrupt reply in the case a solo SEQUENCE
+matches a previous compound with more ops.  This actually matters
+because the Linux client recently started doing this as a way to recover
+from lost replies to idempotent operations in the case the process doing
+the original reply was killed: in that case it's difficult to keep the
+original arguments around to do a real retry, and the client no longer
+cares what the result is anyway, but it would like to make sure that the
+slot's sequence id has been incremented, and the solo SEQUENCE assures
+that: if the server never got the original reply, it will increment the
+sequence id.  If it did get the original reply, it won't increment, and
+nothing else that about the reply really matters much.  But we can at
+least attempt to return valid xdr!
+
+Tested-by: Olga Kornievskaia <aglo@umich.edu>
+Signed-off-by: J. Bruce Fields <bfields@redhat.com>
+Cc: Salvatore Bonaccorso <carnil@debian.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/nfsd/nfs4state.c |   20 +++++++++++++++-----
+ fs/nfsd/state.h     |    1 +
+ fs/nfsd/xdr4.h      |   13 +++++++++++--
+ 3 files changed, 27 insertions(+), 7 deletions(-)
+
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -2344,14 +2344,16 @@ nfsd4_store_cache_entry(struct nfsd4_com
+       dprintk("--> %s slot %p\n", __func__, slot);
++      slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
+       slot->sl_opcnt = resp->opcnt;
+       slot->sl_status = resp->cstate.status;
+-      slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
+-      if (nfsd4_not_cached(resp)) {
+-              slot->sl_datalen = 0;
++      if (!nfsd4_cache_this(resp)) {
++              slot->sl_flags &= ~NFSD4_SLOT_CACHED;
+               return;
+       }
++      slot->sl_flags |= NFSD4_SLOT_CACHED;
++
+       base = resp->cstate.data_offset;
+       slot->sl_datalen = buf->len - base;
+       if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
+@@ -2378,8 +2380,16 @@ nfsd4_enc_sequence_replay(struct nfsd4_c
+       op = &args->ops[resp->opcnt - 1];
+       nfsd4_encode_operation(resp, op);
+-      /* Return nfserr_retry_uncached_rep in next operation. */
+-      if (args->opcnt > 1 && !(slot->sl_flags & NFSD4_SLOT_CACHETHIS)) {
++      if (slot->sl_flags & NFSD4_SLOT_CACHED)
++              return op->status;
++      if (args->opcnt == 1) {
++              /*
++               * The original operation wasn't a solo sequence--we
++               * always cache those--so this retry must not match the
++               * original:
++               */
++              op->status = nfserr_seq_false_retry;
++      } else {
+               op = &args->ops[resp->opcnt++];
+               op->status = nfserr_retry_uncached_rep;
+               nfsd4_encode_operation(resp, op);
+--- a/fs/nfsd/state.h
++++ b/fs/nfsd/state.h
+@@ -174,6 +174,7 @@ struct nfsd4_slot {
+ #define NFSD4_SLOT_INUSE      (1 << 0)
+ #define NFSD4_SLOT_CACHETHIS  (1 << 1)
+ #define NFSD4_SLOT_INITIALIZED        (1 << 2)
++#define NFSD4_SLOT_CACHED     (1 << 3)
+       u8      sl_flags;
+       char    sl_data[];
+ };
+--- a/fs/nfsd/xdr4.h
++++ b/fs/nfsd/xdr4.h
+@@ -645,9 +645,18 @@ static inline bool nfsd4_is_solo_sequenc
+       return resp->opcnt == 1 && args->ops[0].opnum == OP_SEQUENCE;
+ }
+-static inline bool nfsd4_not_cached(struct nfsd4_compoundres *resp)
++/*
++ * The session reply cache only needs to cache replies that the client
++ * actually asked us to.  But it's almost free for us to cache compounds
++ * consisting of only a SEQUENCE op, so we may as well cache those too.
++ * Also, the protocol doesn't give us a convenient response in the case
++ * of a replay of a solo SEQUENCE op that wasn't cached
++ * (RETRY_UNCACHED_REP can only be returned in the second op of a
++ * compound).
++ */
++static inline bool nfsd4_cache_this(struct nfsd4_compoundres *resp)
+ {
+-      return !(resp->cstate.slot->sl_flags & NFSD4_SLOT_CACHETHIS)
++      return (resp->cstate.slot->sl_flags & NFSD4_SLOT_CACHETHIS)
+               || nfsd4_is_solo_sequence(resp);
+ }
index 2d617c3d2e19c3c096d23f1096d2d2e86c4702cb..d171d051a83cf24243c11d5f0c2dbe6b556e252b 100644 (file)
@@ -14,3 +14,6 @@ mac80211-ensure-that-mgmt-tx-skbs-have-tailroom-for-encryption.patch
 drm-modes-prevent-division-by-zero-htotal.patch
 drm-vmwgfx-fix-setting-of-dma-masks.patch
 drm-vmwgfx-return-error-code-from-vmw_execbuf_copy_fence_user.patch
+nfsd4-fix-cached-replies-to-solo-sequence-compounds.patch
+nfsd4-catch-some-false-session-retries.patch
+hid-debug-fix-the-ring-buffer-implementation.patch