From: Greg Kroah-Hartman Date: Fri, 2 Aug 2013 09:20:14 +0000 (+0800) Subject: 3.0-stable patches X-Git-Tag: v3.0.89~3 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=e721c6455ccdef00ea554f72dafc375195d9964c;p=thirdparty%2Fkernel%2Fstable-queue.git 3.0-stable patches added patches: mm-memory-hotplug-fix-lowmem-count-overflow-when-offline-pages.patch virtio-race-1of2.diff virtio-race-2of2.diff --- diff --git a/queue-3.0/mm-memory-hotplug-fix-lowmem-count-overflow-when-offline-pages.patch b/queue-3.0/mm-memory-hotplug-fix-lowmem-count-overflow-when-offline-pages.patch new file mode 100644 index 00000000000..187f6f0746a --- /dev/null +++ b/queue-3.0/mm-memory-hotplug-fix-lowmem-count-overflow-when-offline-pages.patch @@ -0,0 +1,81 @@ +From cea27eb2a202959783f81254c48c250ddd80e129 Mon Sep 17 00:00:00 2001 +From: Wanpeng Li +Date: Wed, 3 Jul 2013 15:02:40 -0700 +Subject: mm/memory-hotplug: fix lowmem count overflow when offline pages + +From: Wanpeng Li + +commit cea27eb2a202959783f81254c48c250ddd80e129 upstream. + +The logic for the memory-remove code fails to correctly account the +Total High Memory when a memory block which contains High Memory is +offlined as shown in the example below. The following patch fixes it. + +Before logic memory remove: + +MemTotal: 7603740 kB +MemFree: 6329612 kB +Buffers: 94352 kB +Cached: 872008 kB +SwapCached: 0 kB +Active: 626932 kB +Inactive: 519216 kB +Active(anon): 180776 kB +Inactive(anon): 222944 kB +Active(file): 446156 kB +Inactive(file): 296272 kB +Unevictable: 0 kB +Mlocked: 0 kB +HighTotal: 7294672 kB +HighFree: 5704696 kB +LowTotal: 309068 kB +LowFree: 624916 kB + +After logic memory remove: + +MemTotal: 7079452 kB +MemFree: 5805976 kB +Buffers: 94372 kB +Cached: 872000 kB +SwapCached: 0 kB +Active: 626936 kB +Inactive: 519236 kB +Active(anon): 180780 kB +Inactive(anon): 222944 kB +Active(file): 446156 kB +Inactive(file): 296292 kB +Unevictable: 0 kB +Mlocked: 0 kB +HighTotal: 7294672 kB +HighFree: 5181024 kB +LowTotal: 4294752076 kB +LowFree: 624952 kB + +[mhocko@suse.cz: fix CONFIG_HIGHMEM=n build] +Signed-off-by: Wanpeng Li +Reviewed-by: Michal Hocko +Cc: KAMEZAWA Hiroyuki +Cc: David Rientjes +Cc: [2.6.24+] +Signed-off-by: Andrew Morton +Signed-off-by: Linus Torvalds +Signed-off-by: Zhouping Liu +Signed-off-by: Greg Kroah-Hartman + +--- + mm/page_alloc.c | 4 ++++ + 1 file changed, 4 insertions(+) + +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -5737,6 +5737,10 @@ __offline_isolated_pages(unsigned long s + zone->free_area[order].nr_free--; + __mod_zone_page_state(zone, NR_FREE_PAGES, + - (1UL << order)); ++#ifdef CONFIG_HIGHMEM ++ if (PageHighMem(page)) ++ totalhigh_pages -= 1 << order; ++#endif + for (i = 0; i < (1 << order); i++) + SetPageReserved((page+i)); + pfn += (1 << order); diff --git a/queue-3.0/series b/queue-3.0/series index 03313464a2b..0c3f3d5a567 100644 --- a/queue-3.0/series +++ b/queue-3.0/series @@ -18,3 +18,6 @@ drm-radeon-atom-initialize-more-atom-interpretor-elements-to-0.patch usb-serial-ftdi_sio-add-more-rt-systems-ftdi-devices.patch livelock-avoidance-in-sget.patch xen-evtchn-avoid-a-deadlock-when-unbinding-an-event-channel.patch +virtio-race-1of2.diff +virtio-race-2of2.diff +mm-memory-hotplug-fix-lowmem-count-overflow-when-offline-pages.patch diff --git a/queue-3.0/virtio-race-1of2.diff b/queue-3.0/virtio-race-1of2.diff new file mode 100644 index 00000000000..61eae5f67d1 --- /dev/null +++ b/queue-3.0/virtio-race-1of2.diff @@ -0,0 +1,118 @@ +From: Michael S. Tsirkin +Subject: virtio: support unlocked queue poll + +From: Michael S. Tsirkin + +commit cc229884d3f77ec3b1240e467e0236c3e0647c0c upstream. + +This adds a way to check ring empty state after enable_cb outside any +locks. Will be used by virtio_net. + +Note: there's room for more optimization: caller is likely to have a +memory barrier already, which means we might be able to get rid of a +barrier here. Deferring this optimization until we do some +benchmarking. + +Signed-off-by: Michael S. Tsirkin +Signed-off-by: David S. Miller +[wg: Backported to 3.2] +Signed-off-by: Wolfram Gloger +Signed-off-by: Greg Kroah-Hartman +--- +--- + drivers/virtio/virtio_ring.c | 59 +++++++++++++++++++++++++++++++++++++------ + include/linux/virtio.h | 4 ++ + 2 files changed, 55 insertions(+), 8 deletions(-) + +--- a/drivers/virtio/virtio_ring.c ++++ b/drivers/virtio/virtio_ring.c +@@ -359,9 +359,22 @@ void virtqueue_disable_cb(struct virtque + } + EXPORT_SYMBOL_GPL(virtqueue_disable_cb); + +-bool virtqueue_enable_cb(struct virtqueue *_vq) ++/** ++ * virtqueue_enable_cb_prepare - restart callbacks after disable_cb ++ * @vq: the struct virtqueue we're talking about. ++ * ++ * This re-enables callbacks; it returns current queue state ++ * in an opaque unsigned value. This value should be later tested by ++ * virtqueue_poll, to detect a possible race between the driver checking for ++ * more work, and enabling callbacks. ++ * ++ * Caller must ensure we don't call this with other virtqueue ++ * operations at the same time (except where noted). ++ */ ++unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq) + { + struct vring_virtqueue *vq = to_vvq(_vq); ++ u16 last_used_idx; + + START_USE(vq); + +@@ -371,15 +384,45 @@ bool virtqueue_enable_cb(struct virtqueu + * either clear the flags bit or point the event index at the next + * entry. Always do both to keep code simple. */ + vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; +- vring_used_event(&vq->vring) = vq->last_used_idx; ++ vring_used_event(&vq->vring) = last_used_idx = vq->last_used_idx; ++ END_USE(vq); ++ return last_used_idx; ++} ++EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare); ++ ++/** ++ * virtqueue_poll - query pending used buffers ++ * @vq: the struct virtqueue we're talking about. ++ * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare). ++ * ++ * Returns "true" if there are pending used buffers in the queue. ++ * ++ * This does not need to be serialized. ++ */ ++bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx) ++{ ++ struct vring_virtqueue *vq = to_vvq(_vq); ++ + virtio_mb(); +- if (unlikely(more_used(vq))) { +- END_USE(vq); +- return false; +- } ++ return (u16)last_used_idx != vq->vring.used->idx; ++} ++EXPORT_SYMBOL_GPL(virtqueue_poll); + +- END_USE(vq); +- return true; ++/** ++ * virtqueue_enable_cb - restart callbacks after disable_cb. ++ * @vq: the struct virtqueue we're talking about. ++ * ++ * This re-enables callbacks; it returns "false" if there are pending ++ * buffers in the queue, to detect a possible race between the driver ++ * checking for more work, and enabling callbacks. ++ * ++ * Caller must ensure we don't call this with other virtqueue ++ * operations at the same time (except where noted). ++ */ ++bool virtqueue_enable_cb(struct virtqueue *_vq) ++{ ++ unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq); ++ return !virtqueue_poll(_vq, last_used_idx); + } + EXPORT_SYMBOL_GPL(virtqueue_enable_cb); + +--- a/include/linux/virtio.h ++++ b/include/linux/virtio.h +@@ -93,6 +93,10 @@ void virtqueue_disable_cb(struct virtque + + bool virtqueue_enable_cb(struct virtqueue *vq); + ++unsigned virtqueue_enable_cb_prepare(struct virtqueue *vq); ++ ++bool virtqueue_poll(struct virtqueue *vq, unsigned); ++ + bool virtqueue_enable_cb_delayed(struct virtqueue *vq); + + void *virtqueue_detach_unused_buf(struct virtqueue *vq); diff --git a/queue-3.0/virtio-race-2of2.diff b/queue-3.0/virtio-race-2of2.diff new file mode 100644 index 00000000000..623cb18eb60 --- /dev/null +++ b/queue-3.0/virtio-race-2of2.diff @@ -0,0 +1,60 @@ +From: Michael S. Tsirkin +Subject: virtio_net: fix race in RX VQ processing + +From: Michael S. Tsirkin + +commit cbdadbbf0c790f79350a8f36029208944c5487d0 upstream + +virtio net called virtqueue_enable_cq on RX path after napi_complete, so +with NAPI_STATE_SCHED clear - outside the implicit napi lock. +This violates the requirement to synchronize virtqueue_enable_cq wrt +virtqueue_add_buf. In particular, used event can move backwards, +causing us to lose interrupts. +In a debug build, this can trigger panic within START_USE. + +Jason Wang reports that he can trigger the races artificially, +by adding udelay() in virtqueue_enable_cb() after virtio_mb(). + +However, we must call napi_complete to clear NAPI_STATE_SCHED before +polling the virtqueue for used buffers, otherwise napi_schedule_prep in +a callback will fail, causing us to lose RX events. + +To fix, call virtqueue_enable_cb_prepare with NAPI_STATE_SCHED +set (under napi lock), later call virtqueue_poll with +NAPI_STATE_SCHED clear (outside the lock). + +Reported-by: Jason Wang +Tested-by: Jason Wang +Acked-by: Jason Wang +Signed-off-by: Michael S. Tsirkin +Signed-off-by: David S. Miller +[wg: Backported to 3.2] +Signed-off-by: Wolfram Gloger +Signed-off-by: Greg Kroah-Hartman +--- +--- + drivers/net/virtio_net.c | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +--- a/drivers/net/virtio_net.c ++++ b/drivers/net/virtio_net.c +@@ -480,7 +480,7 @@ static int virtnet_poll(struct napi_stru + { + struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi); + void *buf; +- unsigned int len, received = 0; ++ unsigned int r, len, received = 0; + + again: + while (received < budget && +@@ -497,8 +497,9 @@ again: + + /* Out of packets? */ + if (received < budget) { ++ r = virtqueue_enable_cb_prepare(vi->rvq); + napi_complete(napi); +- if (unlikely(!virtqueue_enable_cb(vi->rvq)) && ++ if (unlikely(virtqueue_poll(vi->rvq, r)) && + napi_schedule_prep(napi)) { + virtqueue_disable_cb(vi->rvq); + __napi_schedule(napi);