From 05f4fb7d246276f9a15d24ca3a10319a30a4ac6a Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Mon, 10 Dec 2018 19:50:21 -0500 Subject: [PATCH] patches for 4.9 Signed-off-by: Sasha Levin --- queue-4.9/series | 4 + ...wn-correctly-sized-scsi-sense-buffer.patch | 87 ++++++++ queue-4.9/swiotlb-clean-up-reporting.patch | 91 ++++++++ ...use-after-free-in-network-stack-call.patch | 195 ++++++++++++++++++ ...-setup-guest_cid-inside-vhost_vsock_.patch | 81 ++++++++ 5 files changed, 458 insertions(+) create mode 100644 queue-4.9/sr-pass-down-correctly-sized-scsi-sense-buffer.patch create mode 100644 queue-4.9/swiotlb-clean-up-reporting.patch create mode 100644 queue-4.9/vhost-vsock-fix-use-after-free-in-network-stack-call.patch create mode 100644 queue-4.9/vsock-lookup-and-setup-guest_cid-inside-vhost_vsock_.patch diff --git a/queue-4.9/series b/queue-4.9/series index ca48580b7fd..a345c357ff4 100644 --- a/queue-4.9/series +++ b/queue-4.9/series @@ -32,3 +32,7 @@ virtio-s390-fix-race-in-ccw_io_helper.patch sunrpc-fix-leak-of-krb5p-encode-pages.patch dmaengine-cppi41-delete-channel-from-pending-list-when-stop-channel.patch xhci-prevent-u1-u2-link-pm-states-if-exit-latency-is-too-long.patch +sr-pass-down-correctly-sized-scsi-sense-buffer.patch +swiotlb-clean-up-reporting.patch +vsock-lookup-and-setup-guest_cid-inside-vhost_vsock_.patch +vhost-vsock-fix-use-after-free-in-network-stack-call.patch diff --git a/queue-4.9/sr-pass-down-correctly-sized-scsi-sense-buffer.patch b/queue-4.9/sr-pass-down-correctly-sized-scsi-sense-buffer.patch new file mode 100644 index 00000000000..05f8515729e --- /dev/null +++ b/queue-4.9/sr-pass-down-correctly-sized-scsi-sense-buffer.patch @@ -0,0 +1,87 @@ +From 61854e10b995837585cbdebd33e22e855f2424ca Mon Sep 17 00:00:00 2001 +From: Jens Axboe +Date: Mon, 10 Dec 2018 18:14:16 +0000 +Subject: sr: pass down correctly sized SCSI sense buffer + +commit f7068114d45ec55996b9040e98111afa56e010fe upstream. + +We're casting the CDROM layer request_sense to the SCSI sense +buffer, but the former is 64 bytes and the latter is 96 bytes. +As we generally allocate these on the stack, we end up blowing +up the stack. + +Fix this by wrapping the scsi_execute() call with a properly +sized sense buffer, and copying back the bits for the CDROM +layer. + +Reported-by: Piotr Gabriel Kosinski +Reported-by: Daniel Shapira +Tested-by: Kees Cook +Fixes: 82ed4db499b8 ("block: split scsi_request out of struct request") +Signed-off-by: Jens Axboe +[bwh: Despite what the "Fixes" field says, a buffer overrun was already + possible if the sense data was really > 64 bytes long. + Backported to 4.9: + - We always need to allocate a sense buffer in order to call + scsi_normalize_sense() + - Remove the existing conditional heap-allocation of the sense buffer] +Signed-off-by: Ben Hutchings +Signed-off-by: Sasha Levin +--- + drivers/scsi/sr_ioctl.c | 21 +++++++-------------- + 1 file changed, 7 insertions(+), 14 deletions(-) + +diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c +index 03054c0e7689..3c3e8115f73d 100644 +--- a/drivers/scsi/sr_ioctl.c ++++ b/drivers/scsi/sr_ioctl.c +@@ -187,30 +187,25 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc) + struct scsi_device *SDev; + struct scsi_sense_hdr sshdr; + int result, err = 0, retries = 0; +- struct request_sense *sense = cgc->sense; ++ unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE]; + + SDev = cd->device; + +- if (!sense) { +- sense = kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL); +- if (!sense) { +- err = -ENOMEM; +- goto out; +- } +- } +- + retry: + if (!scsi_block_when_processing_errors(SDev)) { + err = -ENODEV; + goto out; + } + +- memset(sense, 0, sizeof(*sense)); ++ memset(sense_buffer, 0, sizeof(sense_buffer)); + result = scsi_execute(SDev, cgc->cmd, cgc->data_direction, +- cgc->buffer, cgc->buflen, (char *)sense, ++ cgc->buffer, cgc->buflen, sense_buffer, + cgc->timeout, IOCTL_RETRIES, 0, NULL); + +- scsi_normalize_sense((char *)sense, sizeof(*sense), &sshdr); ++ scsi_normalize_sense(sense_buffer, sizeof(sense_buffer), &sshdr); ++ ++ if (cgc->sense) ++ memcpy(cgc->sense, sense_buffer, sizeof(*cgc->sense)); + + /* Minimal error checking. Ignore cases we know about, and report the rest. */ + if (driver_byte(result) != 0) { +@@ -261,8 +256,6 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc) + + /* Wake up a process waiting for device */ + out: +- if (!cgc->sense) +- kfree(sense); + cgc->stat = err; + return err; + } +-- +2.19.1 + diff --git a/queue-4.9/swiotlb-clean-up-reporting.patch b/queue-4.9/swiotlb-clean-up-reporting.patch new file mode 100644 index 00000000000..38eded1e40f --- /dev/null +++ b/queue-4.9/swiotlb-clean-up-reporting.patch @@ -0,0 +1,91 @@ +From 280d76b28cfa582639958f226f7ff82777d54db2 Mon Sep 17 00:00:00 2001 +From: Kees Cook +Date: Mon, 10 Dec 2018 18:14:06 +0000 +Subject: swiotlb: clean up reporting + +commit 7d63fb3af87aa67aa7d24466e792f9d7c57d8e79 upstream. + +This removes needless use of '%p', and refactors the printk calls to +use pr_*() helpers instead. + +Signed-off-by: Kees Cook +Reviewed-by: Konrad Rzeszutek Wilk +Signed-off-by: Christoph Hellwig +[bwh: Backported to 4.9: + - Adjust filename + - Remove "swiotlb: " prefix from an additional log message] +Signed-off-by: Ben Hutchings +Signed-off-by: Sasha Levin +--- + lib/swiotlb.c | 20 +++++++++----------- + 1 file changed, 9 insertions(+), 11 deletions(-) + +diff --git a/lib/swiotlb.c b/lib/swiotlb.c +index b7812df04437..7ff9dc36c2f8 100644 +--- a/lib/swiotlb.c ++++ b/lib/swiotlb.c +@@ -17,6 +17,8 @@ + * 08/12/11 beckyb Add highmem support + */ + ++#define pr_fmt(fmt) "software IO TLB: " fmt ++ + #include + #include + #include +@@ -147,20 +149,16 @@ static bool no_iotlb_memory; + void swiotlb_print_info(void) + { + unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT; +- unsigned char *vstart, *vend; + + if (no_iotlb_memory) { +- pr_warn("software IO TLB: No low mem\n"); ++ pr_warn("No low mem\n"); + return; + } + +- vstart = phys_to_virt(io_tlb_start); +- vend = phys_to_virt(io_tlb_end); +- +- printk(KERN_INFO "software IO TLB [mem %#010llx-%#010llx] (%luMB) mapped at [%p-%p]\n", ++ pr_info("mapped [mem %#010llx-%#010llx] (%luMB)\n", + (unsigned long long)io_tlb_start, + (unsigned long long)io_tlb_end, +- bytes >> 20, vstart, vend - 1); ++ bytes >> 20); + } + + int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) +@@ -234,7 +232,7 @@ swiotlb_init(int verbose) + if (io_tlb_start) + memblock_free_early(io_tlb_start, + PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); +- pr_warn("Cannot allocate SWIOTLB buffer"); ++ pr_warn("Cannot allocate buffer"); + no_iotlb_memory = true; + } + +@@ -276,8 +274,8 @@ swiotlb_late_init_with_default_size(size_t default_size) + return -ENOMEM; + } + if (order != get_order(bytes)) { +- printk(KERN_WARNING "Warning: only able to allocate %ld MB " +- "for software IO TLB\n", (PAGE_SIZE << order) >> 20); ++ pr_warn("only able to allocate %ld MB\n", ++ (PAGE_SIZE << order) >> 20); + io_tlb_nslabs = SLABS_PER_PAGE << order; + } + rc = swiotlb_late_init_with_tbl(vstart, io_tlb_nslabs); +@@ -691,7 +689,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, + return ret; + + err_warn: +- pr_warn("swiotlb: coherent allocation failed for device %s size=%zu\n", ++ pr_warn("coherent allocation failed for device %s size=%zu\n", + dev_name(hwdev), size); + dump_stack(); + +-- +2.19.1 + diff --git a/queue-4.9/vhost-vsock-fix-use-after-free-in-network-stack-call.patch b/queue-4.9/vhost-vsock-fix-use-after-free-in-network-stack-call.patch new file mode 100644 index 00000000000..c10f7d2e331 --- /dev/null +++ b/queue-4.9/vhost-vsock-fix-use-after-free-in-network-stack-call.patch @@ -0,0 +1,195 @@ +From dcea41d6aa0dc267dbb8b571b892e8c72d350bf7 Mon Sep 17 00:00:00 2001 +From: Stefan Hajnoczi +Date: Mon, 5 Nov 2018 10:35:47 +0000 +Subject: vhost/vsock: fix use-after-free in network stack callers + +[ Upstream commit 834e772c8db0c6a275d75315d90aba4ebbb1e249 ] + +If the network stack calls .send_pkt()/.cancel_pkt() during .release(), +a struct vhost_vsock use-after-free is possible. This occurs because +.release() does not wait for other CPUs to stop using struct +vhost_vsock. + +Switch to an RCU-enabled hashtable (indexed by guest CID) so that +.release() can wait for other CPUs by calling synchronize_rcu(). This +also eliminates vhost_vsock_lock acquisition in the data path so it +could have a positive effect on performance. + +This is CVE-2018-14625 "kernel: use-after-free Read in vhost_transport_send_pkt". + +Cc: stable@vger.kernel.org +Reported-and-tested-by: syzbot+bd391451452fb0b93039@syzkaller.appspotmail.com +Reported-by: syzbot+e3e074963495f92a89ed@syzkaller.appspotmail.com +Reported-by: syzbot+d5a0a170c5069658b141@syzkaller.appspotmail.com +Signed-off-by: Stefan Hajnoczi +Signed-off-by: Michael S. Tsirkin +Acked-by: Jason Wang +Signed-off-by: Sasha Levin +--- + drivers/vhost/vsock.c | 57 +++++++++++++++++++++++++------------------ + 1 file changed, 33 insertions(+), 24 deletions(-) + +diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c +index 6bca57896915..f800f89068db 100644 +--- a/drivers/vhost/vsock.c ++++ b/drivers/vhost/vsock.c +@@ -15,6 +15,7 @@ + #include + #include + #include ++#include + + #include + #include "vhost.h" +@@ -27,14 +28,14 @@ enum { + + /* Used to track all the vhost_vsock instances on the system. */ + static DEFINE_SPINLOCK(vhost_vsock_lock); +-static LIST_HEAD(vhost_vsock_list); ++static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash, 8); + + struct vhost_vsock { + struct vhost_dev dev; + struct vhost_virtqueue vqs[2]; + +- /* Link to global vhost_vsock_list, protected by vhost_vsock_lock */ +- struct list_head list; ++ /* Link to global vhost_vsock_hash, writes use vhost_vsock_lock */ ++ struct hlist_node hash; + + struct vhost_work send_pkt_work; + spinlock_t send_pkt_list_lock; +@@ -50,11 +51,14 @@ static u32 vhost_transport_get_local_cid(void) + return VHOST_VSOCK_DEFAULT_HOST_CID; + } + +-static struct vhost_vsock *__vhost_vsock_get(u32 guest_cid) ++/* Callers that dereference the return value must hold vhost_vsock_lock or the ++ * RCU read lock. ++ */ ++static struct vhost_vsock *vhost_vsock_get(u32 guest_cid) + { + struct vhost_vsock *vsock; + +- list_for_each_entry(vsock, &vhost_vsock_list, list) { ++ hash_for_each_possible_rcu(vhost_vsock_hash, vsock, hash, guest_cid) { + u32 other_cid = vsock->guest_cid; + + /* Skip instances that have no CID yet */ +@@ -69,17 +73,6 @@ static struct vhost_vsock *__vhost_vsock_get(u32 guest_cid) + return NULL; + } + +-static struct vhost_vsock *vhost_vsock_get(u32 guest_cid) +-{ +- struct vhost_vsock *vsock; +- +- spin_lock_bh(&vhost_vsock_lock); +- vsock = __vhost_vsock_get(guest_cid); +- spin_unlock_bh(&vhost_vsock_lock); +- +- return vsock; +-} +- + static void + vhost_transport_do_send_pkt(struct vhost_vsock *vsock, + struct vhost_virtqueue *vq) +@@ -206,9 +199,12 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt) + struct vhost_virtqueue *vq; + int len = pkt->len; + ++ rcu_read_lock(); ++ + /* Find the vhost_vsock according to guest context id */ + vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid)); + if (!vsock) { ++ rcu_read_unlock(); + virtio_transport_free_pkt(pkt); + return -ENODEV; + } +@@ -223,6 +219,8 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt) + spin_unlock_bh(&vsock->send_pkt_list_lock); + + vhost_work_queue(&vsock->dev, &vsock->send_pkt_work); ++ ++ rcu_read_unlock(); + return len; + } + +@@ -232,12 +230,15 @@ vhost_transport_cancel_pkt(struct vsock_sock *vsk) + struct vhost_vsock *vsock; + struct virtio_vsock_pkt *pkt, *n; + int cnt = 0; ++ int ret = -ENODEV; + LIST_HEAD(freeme); + ++ rcu_read_lock(); ++ + /* Find the vhost_vsock according to guest context id */ + vsock = vhost_vsock_get(vsk->remote_addr.svm_cid); + if (!vsock) +- return -ENODEV; ++ goto out; + + spin_lock_bh(&vsock->send_pkt_list_lock); + list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) { +@@ -263,7 +264,10 @@ vhost_transport_cancel_pkt(struct vsock_sock *vsk) + vhost_poll_queue(&tx_vq->poll); + } + +- return 0; ++ ret = 0; ++out: ++ rcu_read_unlock(); ++ return ret; + } + + static struct virtio_vsock_pkt * +@@ -529,10 +533,6 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file) + spin_lock_init(&vsock->send_pkt_list_lock); + INIT_LIST_HEAD(&vsock->send_pkt_list); + vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work); +- +- spin_lock_bh(&vhost_vsock_lock); +- list_add_tail(&vsock->list, &vhost_vsock_list); +- spin_unlock_bh(&vhost_vsock_lock); + return 0; + + out: +@@ -573,9 +573,13 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file) + struct vhost_vsock *vsock = file->private_data; + + spin_lock_bh(&vhost_vsock_lock); +- list_del(&vsock->list); ++ if (vsock->guest_cid) ++ hash_del_rcu(&vsock->hash); + spin_unlock_bh(&vhost_vsock_lock); + ++ /* Wait for other CPUs to finish using vsock */ ++ synchronize_rcu(); ++ + /* Iterating over all connections for all CIDs to find orphans is + * inefficient. Room for improvement here. */ + vsock_for_each_connected_socket(vhost_vsock_reset_orphans); +@@ -616,12 +620,17 @@ static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid) + + /* Refuse if CID is already in use */ + spin_lock_bh(&vhost_vsock_lock); +- other = __vhost_vsock_get(guest_cid); ++ other = vhost_vsock_get(guest_cid); + if (other && other != vsock) { + spin_unlock_bh(&vhost_vsock_lock); + return -EADDRINUSE; + } ++ ++ if (vsock->guest_cid) ++ hash_del_rcu(&vsock->hash); ++ + vsock->guest_cid = guest_cid; ++ hash_add_rcu(vhost_vsock_hash, &vsock->hash, guest_cid); + spin_unlock_bh(&vhost_vsock_lock); + + return 0; +-- +2.19.1 + diff --git a/queue-4.9/vsock-lookup-and-setup-guest_cid-inside-vhost_vsock_.patch b/queue-4.9/vsock-lookup-and-setup-guest_cid-inside-vhost_vsock_.patch new file mode 100644 index 00000000000..d23307de233 --- /dev/null +++ b/queue-4.9/vsock-lookup-and-setup-guest_cid-inside-vhost_vsock_.patch @@ -0,0 +1,81 @@ +From 2fd5b1a40fe06bb91bfef62ba145bf00c9988ec2 Mon Sep 17 00:00:00 2001 +From: Gao feng +Date: Wed, 14 Dec 2016 19:24:36 +0800 +Subject: vsock: lookup and setup guest_cid inside vhost_vsock_lock + +[ Upstream commit 6c083c2b8a0a110cad936bc0a2c089f0d8115175 ] + +Multi vsocks may setup the same cid at the same time. + +Signed-off-by: Gao feng +Signed-off-by: Michael S. Tsirkin +Reviewed-by: Stefan Hajnoczi +Signed-off-by: Sasha Levin +--- + drivers/vhost/vsock.c | 25 +++++++++++++++++-------- + 1 file changed, 17 insertions(+), 8 deletions(-) + +diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c +index 0ec970ca64ce..6bca57896915 100644 +--- a/drivers/vhost/vsock.c ++++ b/drivers/vhost/vsock.c +@@ -50,11 +50,10 @@ static u32 vhost_transport_get_local_cid(void) + return VHOST_VSOCK_DEFAULT_HOST_CID; + } + +-static struct vhost_vsock *vhost_vsock_get(u32 guest_cid) ++static struct vhost_vsock *__vhost_vsock_get(u32 guest_cid) + { + struct vhost_vsock *vsock; + +- spin_lock_bh(&vhost_vsock_lock); + list_for_each_entry(vsock, &vhost_vsock_list, list) { + u32 other_cid = vsock->guest_cid; + +@@ -63,15 +62,24 @@ static struct vhost_vsock *vhost_vsock_get(u32 guest_cid) + continue; + + if (other_cid == guest_cid) { +- spin_unlock_bh(&vhost_vsock_lock); + return vsock; + } + } +- spin_unlock_bh(&vhost_vsock_lock); + + return NULL; + } + ++static struct vhost_vsock *vhost_vsock_get(u32 guest_cid) ++{ ++ struct vhost_vsock *vsock; ++ ++ spin_lock_bh(&vhost_vsock_lock); ++ vsock = __vhost_vsock_get(guest_cid); ++ spin_unlock_bh(&vhost_vsock_lock); ++ ++ return vsock; ++} ++ + static void + vhost_transport_do_send_pkt(struct vhost_vsock *vsock, + struct vhost_virtqueue *vq) +@@ -607,11 +615,12 @@ static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid) + return -EINVAL; + + /* Refuse if CID is already in use */ +- other = vhost_vsock_get(guest_cid); +- if (other && other != vsock) +- return -EADDRINUSE; +- + spin_lock_bh(&vhost_vsock_lock); ++ other = __vhost_vsock_get(guest_cid); ++ if (other && other != vsock) { ++ spin_unlock_bh(&vhost_vsock_lock); ++ return -EADDRINUSE; ++ } + vsock->guest_cid = guest_cid; + spin_unlock_bh(&vhost_vsock_lock); + +-- +2.19.1 + -- 2.47.3