]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.0-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 3 Jun 2015 03:10:29 +0000 (12:10 +0900)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 3 Jun 2015 03:10:29 +0000 (12:10 +0900)
added patches:
crypto-s390-ghash-fix-incorrect-ghash-icv-buffer-handling.patch
gpio-gpio-kempld-fix-get_direction-return-value.patch
mac80211-don-t-use-napi_gro_receive-outside-napi-context.patch
mac80211-move-wep-tailroom-size-check.patch
s390-mm-correct-return-value-of-pmd_pfn.patch
sched-always-use-blk_schedule_flush_plug-in-io_schedule_out.patch
sched-handle-priority-boosted-tasks-proper-in-setscheduler.patch

queue-4.0/crypto-s390-ghash-fix-incorrect-ghash-icv-buffer-handling.patch [new file with mode: 0644]
queue-4.0/gpio-gpio-kempld-fix-get_direction-return-value.patch [new file with mode: 0644]
queue-4.0/mac80211-don-t-use-napi_gro_receive-outside-napi-context.patch [new file with mode: 0644]
queue-4.0/mac80211-move-wep-tailroom-size-check.patch [new file with mode: 0644]
queue-4.0/s390-mm-correct-return-value-of-pmd_pfn.patch [new file with mode: 0644]
queue-4.0/sched-always-use-blk_schedule_flush_plug-in-io_schedule_out.patch [new file with mode: 0644]
queue-4.0/sched-handle-priority-boosted-tasks-proper-in-setscheduler.patch [new file with mode: 0644]
queue-4.0/series

diff --git a/queue-4.0/crypto-s390-ghash-fix-incorrect-ghash-icv-buffer-handling.patch b/queue-4.0/crypto-s390-ghash-fix-incorrect-ghash-icv-buffer-handling.patch
new file mode 100644 (file)
index 0000000..c2b9731
--- /dev/null
@@ -0,0 +1,125 @@
+From a1cae34e23b1293eccbcc8ee9b39298039c3952a Mon Sep 17 00:00:00 2001
+From: Harald Freudenberger <freude@linux.vnet.ibm.com>
+Date: Thu, 21 May 2015 10:01:11 +0200
+Subject: crypto: s390/ghash - Fix incorrect ghash icv buffer handling.
+
+From: Harald Freudenberger <freude@linux.vnet.ibm.com>
+
+commit a1cae34e23b1293eccbcc8ee9b39298039c3952a upstream.
+
+Multitheaded tests showed that the icv buffer in the current ghash
+implementation is not handled correctly. A move of this working ghash
+buffer value to the descriptor context fixed this. Code is tested and
+verified with an multithreaded application via af_alg interface.
+
+Signed-off-by: Harald Freudenberger <freude@linux.vnet.ibm.com>
+Signed-off-by: Gerald Schaefer <geraldsc@linux.vnet.ibm.com>
+Reported-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/crypto/ghash_s390.c |   25 +++++++++++++------------
+ 1 file changed, 13 insertions(+), 12 deletions(-)
+
+--- a/arch/s390/crypto/ghash_s390.c
++++ b/arch/s390/crypto/ghash_s390.c
+@@ -16,11 +16,12 @@
+ #define GHASH_DIGEST_SIZE     16
+ struct ghash_ctx {
+-      u8 icv[16];
+-      u8 key[16];
++      u8 key[GHASH_BLOCK_SIZE];
+ };
+ struct ghash_desc_ctx {
++      u8 icv[GHASH_BLOCK_SIZE];
++      u8 key[GHASH_BLOCK_SIZE];
+       u8 buffer[GHASH_BLOCK_SIZE];
+       u32 bytes;
+ };
+@@ -28,8 +29,10 @@ struct ghash_desc_ctx {
+ static int ghash_init(struct shash_desc *desc)
+ {
+       struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
++      struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
+       memset(dctx, 0, sizeof(*dctx));
++      memcpy(dctx->key, ctx->key, GHASH_BLOCK_SIZE);
+       return 0;
+ }
+@@ -45,7 +48,6 @@ static int ghash_setkey(struct crypto_sh
+       }
+       memcpy(ctx->key, key, GHASH_BLOCK_SIZE);
+-      memset(ctx->icv, 0, GHASH_BLOCK_SIZE);
+       return 0;
+ }
+@@ -54,7 +56,6 @@ static int ghash_update(struct shash_des
+                        const u8 *src, unsigned int srclen)
+ {
+       struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+-      struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
+       unsigned int n;
+       u8 *buf = dctx->buffer;
+       int ret;
+@@ -70,7 +71,7 @@ static int ghash_update(struct shash_des
+               src += n;
+               if (!dctx->bytes) {
+-                      ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf,
++                      ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf,
+                                             GHASH_BLOCK_SIZE);
+                       if (ret != GHASH_BLOCK_SIZE)
+                               return -EIO;
+@@ -79,7 +80,7 @@ static int ghash_update(struct shash_des
+       n = srclen & ~(GHASH_BLOCK_SIZE - 1);
+       if (n) {
+-              ret = crypt_s390_kimd(KIMD_GHASH, ctx, src, n);
++              ret = crypt_s390_kimd(KIMD_GHASH, dctx, src, n);
+               if (ret != n)
+                       return -EIO;
+               src += n;
+@@ -94,7 +95,7 @@ static int ghash_update(struct shash_des
+       return 0;
+ }
+-static int ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
++static int ghash_flush(struct ghash_desc_ctx *dctx)
+ {
+       u8 *buf = dctx->buffer;
+       int ret;
+@@ -104,24 +105,24 @@ static int ghash_flush(struct ghash_ctx
+               memset(pos, 0, dctx->bytes);
+-              ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf, GHASH_BLOCK_SIZE);
++              ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf, GHASH_BLOCK_SIZE);
+               if (ret != GHASH_BLOCK_SIZE)
+                       return -EIO;
++
++              dctx->bytes = 0;
+       }
+-      dctx->bytes = 0;
+       return 0;
+ }
+ static int ghash_final(struct shash_desc *desc, u8 *dst)
+ {
+       struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+-      struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
+       int ret;
+-      ret = ghash_flush(ctx, dctx);
++      ret = ghash_flush(dctx);
+       if (!ret)
+-              memcpy(dst, ctx->icv, GHASH_BLOCK_SIZE);
++              memcpy(dst, dctx->icv, GHASH_BLOCK_SIZE);
+       return ret;
+ }
diff --git a/queue-4.0/gpio-gpio-kempld-fix-get_direction-return-value.patch b/queue-4.0/gpio-gpio-kempld-fix-get_direction-return-value.patch
new file mode 100644 (file)
index 0000000..eb14fe3
--- /dev/null
@@ -0,0 +1,40 @@
+From f230e8ffc03f17bd9d6b90ea890b8252a8cc1821 Mon Sep 17 00:00:00 2001
+From: Michael Brunner <mibru@gmx.de>
+Date: Mon, 11 May 2015 12:46:49 +0200
+Subject: gpio: gpio-kempld: Fix get_direction return value
+
+From: Michael Brunner <mibru@gmx.de>
+
+commit f230e8ffc03f17bd9d6b90ea890b8252a8cc1821 upstream.
+
+This patch fixes an inverted return value of the gpio get_direction
+function.
+
+The wrong value causes the direction sysfs entry and GPIO debugfs file
+to indicate incorrect GPIO direction settings. In some cases it also
+prevents setting GPIO output values.
+
+The problem is also present in all other stable kernel versions since
+linux-3.12.
+
+Reported-by: Jochen Henneberg <jh@henneberg-systemdesign.com>
+Signed-off-by: Michael Brunner <michael.brunner@kontron.com>
+Reviewed-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpio/gpio-kempld.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpio/gpio-kempld.c
++++ b/drivers/gpio/gpio-kempld.c
+@@ -117,7 +117,7 @@ static int kempld_gpio_get_direction(str
+               = container_of(chip, struct kempld_gpio_data, chip);
+       struct kempld_device_data *pld = gpio->pld;
+-      return kempld_gpio_get_bit(pld, KEMPLD_GPIO_DIR_NUM(offset), offset);
++      return !kempld_gpio_get_bit(pld, KEMPLD_GPIO_DIR_NUM(offset), offset);
+ }
+ static int kempld_gpio_pincount(struct kempld_device_data *pld)
diff --git a/queue-4.0/mac80211-don-t-use-napi_gro_receive-outside-napi-context.patch b/queue-4.0/mac80211-don-t-use-napi_gro_receive-outside-napi-context.patch
new file mode 100644 (file)
index 0000000..44aaae8
--- /dev/null
@@ -0,0 +1,65 @@
+From 22d3a3c829fa9ecdb493d1f1f2838d543f8d86a3 Mon Sep 17 00:00:00 2001
+From: Johannes Berg <johannes.berg@intel.com>
+Date: Tue, 19 May 2015 15:40:21 +0200
+Subject: mac80211: don't use napi_gro_receive() outside NAPI context
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+commit 22d3a3c829fa9ecdb493d1f1f2838d543f8d86a3 upstream.
+
+No matter how the driver manages its NAPI context, there's no way
+sending frames to it from a timer can be correct, since it would
+corrupt the internal GRO lists.
+
+To avoid that, always use the non-NAPI path when releasing frames
+from the timer.
+
+Reported-by: Jean Trivelly <jean.trivelly@intel.com>
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/mac80211/ieee80211_i.h |    3 +++
+ net/mac80211/rx.c          |    5 +++--
+ 2 files changed, 6 insertions(+), 2 deletions(-)
+
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -204,6 +204,8 @@ enum ieee80211_packet_rx_flags {
+  * @IEEE80211_RX_CMNTR: received on cooked monitor already
+  * @IEEE80211_RX_BEACON_REPORTED: This frame was already reported
+  *    to cfg80211_report_obss_beacon().
++ * @IEEE80211_RX_REORDER_TIMER: this frame is released by the
++ *    reorder buffer timeout timer, not the normal RX path
+  *
+  * These flags are used across handling multiple interfaces
+  * for a single frame.
+@@ -211,6 +213,7 @@ enum ieee80211_packet_rx_flags {
+ enum ieee80211_rx_flags {
+       IEEE80211_RX_CMNTR              = BIT(0),
+       IEEE80211_RX_BEACON_REPORTED    = BIT(1),
++      IEEE80211_RX_REORDER_TIMER      = BIT(2),
+ };
+ struct ieee80211_rx_data {
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -2106,7 +2106,8 @@ ieee80211_deliver_skb(struct ieee80211_r
+               /* deliver to local stack */
+               skb->protocol = eth_type_trans(skb, dev);
+               memset(skb->cb, 0, sizeof(skb->cb));
+-              if (rx->local->napi)
++              if (!(rx->flags & IEEE80211_RX_REORDER_TIMER) &&
++                  rx->local->napi)
+                       napi_gro_receive(rx->local->napi, skb);
+               else
+                       netif_receive_skb(skb);
+@@ -3215,7 +3216,7 @@ void ieee80211_release_reorder_timeout(s
+               /* This is OK -- must be QoS data frame */
+               .security_idx = tid,
+               .seqno_idx = tid,
+-              .flags = 0,
++              .flags = IEEE80211_RX_REORDER_TIMER,
+       };
+       struct tid_ampdu_rx *tid_agg_rx;
diff --git a/queue-4.0/mac80211-move-wep-tailroom-size-check.patch b/queue-4.0/mac80211-move-wep-tailroom-size-check.patch
new file mode 100644 (file)
index 0000000..9a3bf37
--- /dev/null
@@ -0,0 +1,60 @@
+From 47b4e1fc4972cc43a19121bc2608a60aef3bf216 Mon Sep 17 00:00:00 2001
+From: Janusz Dziedzic <janusz.dziedzic@tieto.com>
+Date: Mon, 11 May 2015 11:31:15 +0200
+Subject: mac80211: move WEP tailroom size check
+
+From: Janusz Dziedzic <janusz.dziedzic@tieto.com>
+
+commit 47b4e1fc4972cc43a19121bc2608a60aef3bf216 upstream.
+
+Remove checking tailroom when adding IV as it uses only
+headroom, and move the check to the ICV generation that
+actually needs the tailroom.
+
+In other case I hit such warning and datapath don't work,
+when testing:
+- IBSS + WEP
+- ath9k with hw crypt enabled
+- IPv6 data (ping6)
+
+WARNING: CPU: 3 PID: 13301 at net/mac80211/wep.c:102 ieee80211_wep_add_iv+0x129/0x190 [mac80211]()
+[...]
+Call Trace:
+[<ffffffff817bf491>] dump_stack+0x45/0x57
+[<ffffffff8107746a>] warn_slowpath_common+0x8a/0xc0
+[<ffffffff8107755a>] warn_slowpath_null+0x1a/0x20
+[<ffffffffc09ae109>] ieee80211_wep_add_iv+0x129/0x190 [mac80211]
+[<ffffffffc09ae7ab>] ieee80211_crypto_wep_encrypt+0x6b/0xd0 [mac80211]
+[<ffffffffc09d3fb1>] invoke_tx_handlers+0xc51/0xf30 [mac80211]
+[...]
+
+Signed-off-by: Janusz Dziedzic <janusz.dziedzic@tieto.com>
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/mac80211/wep.c |    6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/net/mac80211/wep.c
++++ b/net/mac80211/wep.c
+@@ -98,8 +98,7 @@ static u8 *ieee80211_wep_add_iv(struct i
+       hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+-      if (WARN_ON(skb_tailroom(skb) < IEEE80211_WEP_ICV_LEN ||
+-                  skb_headroom(skb) < IEEE80211_WEP_IV_LEN))
++      if (WARN_ON(skb_headroom(skb) < IEEE80211_WEP_IV_LEN))
+               return NULL;
+       hdrlen = ieee80211_hdrlen(hdr->frame_control);
+@@ -167,6 +166,9 @@ int ieee80211_wep_encrypt(struct ieee802
+       size_t len;
+       u8 rc4key[3 + WLAN_KEY_LEN_WEP104];
++      if (WARN_ON(skb_tailroom(skb) < IEEE80211_WEP_ICV_LEN))
++              return -1;
++
+       iv = ieee80211_wep_add_iv(local, skb, keylen, keyidx);
+       if (!iv)
+               return -1;
diff --git a/queue-4.0/s390-mm-correct-return-value-of-pmd_pfn.patch b/queue-4.0/s390-mm-correct-return-value-of-pmd_pfn.patch
new file mode 100644 (file)
index 0000000..f742f34
--- /dev/null
@@ -0,0 +1,33 @@
+From 7cded342c09f633666e71ee1ce048f218a9c5836 Mon Sep 17 00:00:00 2001
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Date: Wed, 13 May 2015 14:33:22 +0200
+Subject: s390/mm: correct return value of pmd_pfn
+
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+
+commit 7cded342c09f633666e71ee1ce048f218a9c5836 upstream.
+
+Git commit 152125b7a882df36a55a8eadbea6d0edf1461ee7
+"s390/mm: implement dirty bits for large segment table entries"
+broke the pmd_pfn function, it changed the return value from
+'unsigned long' to 'int'. This breaks all machine configurations
+with memory above the 8TB line.
+
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/include/asm/pgtable.h |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/s390/include/asm/pgtable.h
++++ b/arch/s390/include/asm/pgtable.h
+@@ -600,7 +600,7 @@ static inline int pmd_large(pmd_t pmd)
+       return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
+ }
+-static inline int pmd_pfn(pmd_t pmd)
++static inline unsigned long pmd_pfn(pmd_t pmd)
+ {
+       unsigned long origin_mask;
diff --git a/queue-4.0/sched-always-use-blk_schedule_flush_plug-in-io_schedule_out.patch b/queue-4.0/sched-always-use-blk_schedule_flush_plug-in-io_schedule_out.patch
new file mode 100644 (file)
index 0000000..aa029fc
--- /dev/null
@@ -0,0 +1,108 @@
+From 10d784eae2b41e25d8fc6a88096cd27286093c84 Mon Sep 17 00:00:00 2001
+From: Shaohua Li <shli@fb.com>
+Date: Fri, 8 May 2015 10:51:29 -0700
+Subject: sched: always use blk_schedule_flush_plug in io_schedule_out
+
+From: Shaohua Li <shli@fb.com>
+
+commit 10d784eae2b41e25d8fc6a88096cd27286093c84 upstream.
+
+block plug callback could sleep, so we introduce a parameter
+'from_schedule' and corresponding drivers can use it to destinguish a
+schedule plug flush or a plug finish. Unfortunately io_schedule_out
+still uses blk_flush_plug(). This causes below output (Note, I added a
+might_sleep() in raid1_unplug to make it trigger faster, but the whole
+thing doesn't matter if I add might_sleep). In raid1/10, this can cause
+deadlock.
+
+This patch makes io_schedule_out always uses blk_schedule_flush_plug.
+This should only impact drivers (as far as I know, raid 1/10) which are
+sensitive to the 'from_schedule' parameter.
+
+[  370.817949] ------------[ cut here ]------------
+[  370.817960] WARNING: CPU: 7 PID: 145 at ../kernel/sched/core.c:7306 __might_sleep+0x7f/0x90()
+[  370.817969] do not call blocking ops when !TASK_RUNNING; state=2 set at [<ffffffff81092fcf>] prepare_to_wait+0x2f/0x90
+[  370.817971] Modules linked in: raid1
+[  370.817976] CPU: 7 PID: 145 Comm: kworker/u16:9 Tainted: G        W       4.0.0+ #361
+[  370.817977] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.7.5-20140709_153802- 04/01/2014
+[  370.817983] Workqueue: writeback bdi_writeback_workfn (flush-9:1)
+[  370.817985]  ffffffff81cd83be ffff8800ba8cb298 ffffffff819dd7af 0000000000000001
+[  370.817988]  ffff8800ba8cb2e8 ffff8800ba8cb2d8 ffffffff81051afc ffff8800ba8cb2c8
+[  370.817990]  ffffffffa00061a8 000000000000041e 0000000000000000 ffff8800ba8cba28
+[  370.817993] Call Trace:
+[  370.817999]  [<ffffffff819dd7af>] dump_stack+0x4f/0x7b
+[  370.818002]  [<ffffffff81051afc>] warn_slowpath_common+0x8c/0xd0
+[  370.818004]  [<ffffffff81051b86>] warn_slowpath_fmt+0x46/0x50
+[  370.818006]  [<ffffffff81092fcf>] ? prepare_to_wait+0x2f/0x90
+[  370.818008]  [<ffffffff81092fcf>] ? prepare_to_wait+0x2f/0x90
+[  370.818010]  [<ffffffff810776ef>] __might_sleep+0x7f/0x90
+[  370.818014]  [<ffffffffa0000c03>] raid1_unplug+0xd3/0x170 [raid1]
+[  370.818024]  [<ffffffff81421d9a>] blk_flush_plug_list+0x8a/0x1e0
+[  370.818028]  [<ffffffff819e3550>] ? bit_wait+0x50/0x50
+[  370.818031]  [<ffffffff819e21b0>] io_schedule_timeout+0x130/0x140
+[  370.818033]  [<ffffffff819e3586>] bit_wait_io+0x36/0x50
+[  370.818034]  [<ffffffff819e31b5>] __wait_on_bit+0x65/0x90
+[  370.818041]  [<ffffffff8125b67c>] ? ext4_read_block_bitmap_nowait+0xbc/0x630
+[  370.818043]  [<ffffffff819e3550>] ? bit_wait+0x50/0x50
+[  370.818045]  [<ffffffff819e3302>] out_of_line_wait_on_bit+0x72/0x80
+[  370.818047]  [<ffffffff810935e0>] ? autoremove_wake_function+0x40/0x40
+[  370.818050]  [<ffffffff811de744>] __wait_on_buffer+0x44/0x50
+[  370.818053]  [<ffffffff8125ae80>] ext4_wait_block_bitmap+0xe0/0xf0
+[  370.818058]  [<ffffffff812975d6>] ext4_mb_init_cache+0x206/0x790
+[  370.818062]  [<ffffffff8114bc6c>] ? lru_cache_add+0x1c/0x50
+[  370.818064]  [<ffffffff81297c7e>] ext4_mb_init_group+0x11e/0x200
+[  370.818066]  [<ffffffff81298231>] ext4_mb_load_buddy+0x341/0x360
+[  370.818068]  [<ffffffff8129a1a3>] ext4_mb_find_by_goal+0x93/0x2f0
+[  370.818070]  [<ffffffff81295b54>] ? ext4_mb_normalize_request+0x1e4/0x5b0
+[  370.818072]  [<ffffffff8129ab67>] ext4_mb_regular_allocator+0x67/0x460
+[  370.818074]  [<ffffffff81295b54>] ? ext4_mb_normalize_request+0x1e4/0x5b0
+[  370.818076]  [<ffffffff8129ca4b>] ext4_mb_new_blocks+0x4cb/0x620
+[  370.818079]  [<ffffffff81290956>] ext4_ext_map_blocks+0x4c6/0x14d0
+[  370.818081]  [<ffffffff812a4d4e>] ? ext4_es_lookup_extent+0x4e/0x290
+[  370.818085]  [<ffffffff8126399d>] ext4_map_blocks+0x14d/0x4f0
+[  370.818088]  [<ffffffff81266fbd>] ext4_writepages+0x76d/0xe50
+[  370.818094]  [<ffffffff81149691>] do_writepages+0x21/0x50
+[  370.818097]  [<ffffffff811d5c00>] __writeback_single_inode+0x60/0x490
+[  370.818099]  [<ffffffff811d630a>] writeback_sb_inodes+0x2da/0x590
+[  370.818103]  [<ffffffff811abf4b>] ? trylock_super+0x1b/0x50
+[  370.818105]  [<ffffffff811abf4b>] ? trylock_super+0x1b/0x50
+[  370.818107]  [<ffffffff811d665f>] __writeback_inodes_wb+0x9f/0xd0
+[  370.818109]  [<ffffffff811d69db>] wb_writeback+0x34b/0x3c0
+[  370.818111]  [<ffffffff811d70df>] bdi_writeback_workfn+0x23f/0x550
+[  370.818116]  [<ffffffff8106bbd8>] process_one_work+0x1c8/0x570
+[  370.818117]  [<ffffffff8106bb5b>] ? process_one_work+0x14b/0x570
+[  370.818119]  [<ffffffff8106c09b>] worker_thread+0x11b/0x470
+[  370.818121]  [<ffffffff8106bf80>] ? process_one_work+0x570/0x570
+[  370.818124]  [<ffffffff81071868>] kthread+0xf8/0x110
+[  370.818126]  [<ffffffff81071770>] ? kthread_create_on_node+0x210/0x210
+[  370.818129]  [<ffffffff819e9322>] ret_from_fork+0x42/0x70
+[  370.818131]  [<ffffffff81071770>] ? kthread_create_on_node+0x210/0x210
+[  370.818132] ---[ end trace 7b4deb71e68b6605 ]---
+
+V2: don't change ->in_iowait
+
+Cc: NeilBrown <neilb@suse.de>
+Signed-off-by: Shaohua Li <shli@fb.com>
+Reviewed-by: Jeff Moyer <jmoyer@redhat.com>
+Signed-off-by: Jens Axboe <axboe@fb.com>
+Cc: poma <pomidorabelisima@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/sched/core.c |    5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -4384,10 +4384,7 @@ long __sched io_schedule_timeout(long ti
+       long ret;
+       current->in_iowait = 1;
+-      if (old_iowait)
+-              blk_schedule_flush_plug(current);
+-      else
+-              blk_flush_plug(current);
++      blk_schedule_flush_plug(current);
+       delayacct_blkio_start();
+       rq = raw_rq();
diff --git a/queue-4.0/sched-handle-priority-boosted-tasks-proper-in-setscheduler.patch b/queue-4.0/sched-handle-priority-boosted-tasks-proper-in-setscheduler.patch
new file mode 100644 (file)
index 0000000..06f439b
--- /dev/null
@@ -0,0 +1,174 @@
+From 0782e63bc6fe7e2d3408d250df11d388b7799c6b Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 5 May 2015 19:49:49 +0200
+Subject: sched: Handle priority boosted tasks proper in setscheduler()
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 0782e63bc6fe7e2d3408d250df11d388b7799c6b upstream.
+
+Ronny reported that the following scenario is not handled correctly:
+
+       T1 (prio = 10)
+          lock(rtmutex);
+
+       T2 (prio = 20)
+          lock(rtmutex)
+             boost T1
+
+       T1 (prio = 20)
+          sys_set_scheduler(prio = 30)
+          T1 prio = 30
+          ....
+          sys_set_scheduler(prio = 10)
+          T1 prio = 30
+
+The last step is wrong as T1 should now be back at prio 20.
+
+Commit c365c292d059 ("sched: Consider pi boosting in setscheduler()")
+only handles the case where a boosted tasks tries to lower its
+priority.
+
+Fix it by taking the new effective priority into account for the
+decision whether a change of the priority is required.
+
+Reported-by: Ronny Meeus <ronny.meeus@gmail.com>
+Tested-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Steven Rostedt <rostedt@goodmis.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Mike Galbraith <umgwanakikbuti@gmail.com>
+Fixes: c365c292d059 ("sched: Consider pi boosting in setscheduler()")
+Link: http://lkml.kernel.org/r/alpine.DEB.2.11.1505051806060.4225@nanos
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/sched/rt.h |    7 ++++---
+ kernel/locking/rtmutex.c |   12 +++++++-----
+ kernel/sched/core.c      |   26 ++++++++++++++------------
+ 3 files changed, 25 insertions(+), 20 deletions(-)
+
+--- a/include/linux/sched/rt.h
++++ b/include/linux/sched/rt.h
+@@ -18,7 +18,7 @@ static inline int rt_task(struct task_st
+ #ifdef CONFIG_RT_MUTEXES
+ extern int rt_mutex_getprio(struct task_struct *p);
+ extern void rt_mutex_setprio(struct task_struct *p, int prio);
+-extern int rt_mutex_check_prio(struct task_struct *task, int newprio);
++extern int rt_mutex_get_effective_prio(struct task_struct *task, int newprio);
+ extern struct task_struct *rt_mutex_get_top_task(struct task_struct *task);
+ extern void rt_mutex_adjust_pi(struct task_struct *p);
+ static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
+@@ -31,9 +31,10 @@ static inline int rt_mutex_getprio(struc
+       return p->normal_prio;
+ }
+-static inline int rt_mutex_check_prio(struct task_struct *task, int newprio)
++static inline int rt_mutex_get_effective_prio(struct task_struct *task,
++                                            int newprio)
+ {
+-      return 0;
++      return newprio;
+ }
+ static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -265,15 +265,17 @@ struct task_struct *rt_mutex_get_top_tas
+ }
+ /*
+- * Called by sched_setscheduler() to check whether the priority change
+- * is overruled by a possible priority boosting.
++ * Called by sched_setscheduler() to get the priority which will be
++ * effective after the change.
+  */
+-int rt_mutex_check_prio(struct task_struct *task, int newprio)
++int rt_mutex_get_effective_prio(struct task_struct *task, int newprio)
+ {
+       if (!task_has_pi_waiters(task))
+-              return 0;
++              return newprio;
+-      return task_top_pi_waiter(task)->task->prio <= newprio;
++      if (task_top_pi_waiter(task)->task->prio <= newprio)
++              return task_top_pi_waiter(task)->task->prio;
++      return newprio;
+ }
+ /*
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -3295,15 +3295,18 @@ static void __setscheduler_params(struct
+ /* Actually do priority change: must hold pi & rq lock. */
+ static void __setscheduler(struct rq *rq, struct task_struct *p,
+-                         const struct sched_attr *attr)
++                         const struct sched_attr *attr, bool keep_boost)
+ {
+       __setscheduler_params(p, attr);
+       /*
+-       * If we get here, there was no pi waiters boosting the
+-       * task. It is safe to use the normal prio.
++       * Keep a potential priority boosting if called from
++       * sched_setscheduler().
+        */
+-      p->prio = normal_prio(p);
++      if (keep_boost)
++              p->prio = rt_mutex_get_effective_prio(p, normal_prio(p));
++      else
++              p->prio = normal_prio(p);
+       if (dl_prio(p->prio))
+               p->sched_class = &dl_sched_class;
+@@ -3403,7 +3406,7 @@ static int __sched_setscheduler(struct t
+       int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
+                     MAX_RT_PRIO - 1 - attr->sched_priority;
+       int retval, oldprio, oldpolicy = -1, queued, running;
+-      int policy = attr->sched_policy;
++      int new_effective_prio, policy = attr->sched_policy;
+       unsigned long flags;
+       const struct sched_class *prev_class;
+       struct rq *rq;
+@@ -3585,15 +3588,14 @@ change:
+       oldprio = p->prio;
+       /*
+-       * Special case for priority boosted tasks.
+-       *
+-       * If the new priority is lower or equal (user space view)
+-       * than the current (boosted) priority, we just store the new
++       * Take priority boosted tasks into account. If the new
++       * effective priority is unchanged, we just store the new
+        * normal parameters and do not touch the scheduler class and
+        * the runqueue. This will be done when the task deboost
+        * itself.
+        */
+-      if (rt_mutex_check_prio(p, newprio)) {
++      new_effective_prio = rt_mutex_get_effective_prio(p, newprio);
++      if (new_effective_prio == oldprio) {
+               __setscheduler_params(p, attr);
+               task_rq_unlock(rq, p, &flags);
+               return 0;
+@@ -3607,7 +3609,7 @@ change:
+               put_prev_task(rq, p);
+       prev_class = p->sched_class;
+-      __setscheduler(rq, p, attr);
++      __setscheduler(rq, p, attr, true);
+       if (running)
+               p->sched_class->set_curr_task(rq);
+@@ -7357,7 +7359,7 @@ static void normalize_task(struct rq *rq
+       queued = task_on_rq_queued(p);
+       if (queued)
+               dequeue_task(rq, p, 0);
+-      __setscheduler(rq, p, &attr);
++      __setscheduler(rq, p, &attr, false);
+       if (queued) {
+               enqueue_task(rq, p, 0);
+               resched_curr(rq);
index e5e740109a73397fc77ecaf4ed086d0c9ff44100..794cbe4fb19e48d41279f61f2112c77eb2d2aac3 100644 (file)
@@ -93,3 +93,10 @@ libata-blacklist-queued-trim-on-all-samsung-800-series.patch
 arm64-bpf-fix-signedness-bug-in-loading-64-bit-immediate.patch
 rt2x00-add-new-rt2800usb-device-dwa-130.patch
 arm-8325-1-exynos-move-resume-code-to-.text-section.patch
+gpio-gpio-kempld-fix-get_direction-return-value.patch
+crypto-s390-ghash-fix-incorrect-ghash-icv-buffer-handling.patch
+mac80211-move-wep-tailroom-size-check.patch
+mac80211-don-t-use-napi_gro_receive-outside-napi-context.patch
+s390-mm-correct-return-value-of-pmd_pfn.patch
+sched-handle-priority-boosted-tasks-proper-in-setscheduler.patch
+sched-always-use-blk_schedule_flush_plug-in-io_schedule_out.patch