--- /dev/null
+From 28420dad233520811c0e0860e7fb4975ed863fc4 Mon Sep 17 00:00:00 2001
+From: Miklos Szeredi <mszeredi@suse.cz>
+Date: Mon, 3 Jun 2013 14:40:22 +0200
+Subject: fuse: fix readdirplus Oops in fuse_dentry_revalidate
+
+From: Miklos Szeredi <mszeredi@suse.cz>
+
+commit 28420dad233520811c0e0860e7fb4975ed863fc4 upstream.
+
+Fix bug introduced by commit 4582a4ab2a "FUSE: Adapt readdirplus to application
+usage patterns".
+
+We need to check for a positive dentry; negative dentries are not added by
+readdirplus. Secondly we need to advise the use of readdirplus on the *parent*,
+otherwise the whole thing is useless. Thirdly all this is only relevant if
+"readdirplus_auto" mode is selected by the filesystem.
+
+We advise the use of readdirplus only if the dentry was still valid. If we had
+to redo the lookup then there was no use in doing the -plus version.
+
+Reported-by: Bernd Schubert <bernd.schubert@itwm.fraunhofer.de>
+Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
+CC: Feng Shuo <steve.shuo.feng@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/fuse/dir.c | 12 +++++++++---
+ fs/fuse/inode.c | 7 ++++---
+ 2 files changed, 13 insertions(+), 6 deletions(-)
+
+--- a/fs/fuse/dir.c
++++ b/fs/fuse/dir.c
+@@ -180,6 +180,8 @@ u64 fuse_get_attr_version(struct fuse_co
+ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
+ {
+ struct inode *inode;
++ struct dentry *parent;
++ struct fuse_conn *fc;
+
+ inode = ACCESS_ONCE(entry->d_inode);
+ if (inode && is_bad_inode(inode))
+@@ -187,10 +189,8 @@ static int fuse_dentry_revalidate(struct
+ else if (fuse_dentry_time(entry) < get_jiffies_64()) {
+ int err;
+ struct fuse_entry_out outarg;
+- struct fuse_conn *fc;
+ struct fuse_req *req;
+ struct fuse_forget_link *forget;
+- struct dentry *parent;
+ u64 attr_version;
+
+ /* For negative dentries, always do a fresh lookup */
+@@ -241,8 +241,14 @@ static int fuse_dentry_revalidate(struct
+ entry_attr_timeout(&outarg),
+ attr_version);
+ fuse_change_entry_timeout(entry, &outarg);
++ } else if (inode) {
++ fc = get_fuse_conn(inode);
++ if (fc->readdirplus_auto) {
++ parent = dget_parent(entry);
++ fuse_advise_use_readdirplus(parent->d_inode);
++ dput(parent);
++ }
+ }
+- fuse_advise_use_readdirplus(inode);
+ return 1;
+ }
+
+--- a/fs/fuse/inode.c
++++ b/fs/fuse/inode.c
+@@ -864,10 +864,11 @@ static void process_init_reply(struct fu
+ fc->dont_mask = 1;
+ if (arg->flags & FUSE_AUTO_INVAL_DATA)
+ fc->auto_inval_data = 1;
+- if (arg->flags & FUSE_DO_READDIRPLUS)
++ if (arg->flags & FUSE_DO_READDIRPLUS) {
+ fc->do_readdirplus = 1;
+- if (arg->flags & FUSE_READDIRPLUS_AUTO)
+- fc->readdirplus_auto = 1;
++ if (arg->flags & FUSE_READDIRPLUS_AUTO)
++ fc->readdirplus_auto = 1;
++ }
+ } else {
+ ra_pages = fc->max_read / PAGE_CACHE_SIZE;
+ fc->no_lock = 1;
--- /dev/null
+From johannes@sipsolutions.net Wed Jun 5 13:25:39 2013
+From: Johannes Berg <johannes@sipsolutions.net>
+Date: Thu, 23 May 2013 22:24:31 +0200
+Subject: iwlwifi: mvm: remove P2P_DEVICE support
+To: linux-wireless@vger.kernel.org
+Cc: stable@vger.kernel.org, Jouni Malinen <j@w1.fi>, Arend van Spriel <arend@broadcom.com>, Johannes Berg <johannes.berg@intel.com>
+Message-ID: <1369340671-14560-1-git-send-email-johannes@sipsolutions.net>
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+Unfortunately, advertising P2P_DEVICE support was a little
+premature, a number of issues came up in testing and have
+been fixed for 3.10. Rather than try to backport all the
+different fixes, disable P2P_DEVICE support in the drivers
+using it. For iwlmvm that implies disabling P2P completely
+as it can't support P2P operation w/o P2P Device.
+
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/iwlwifi/mvm/mac80211.c | 14 +-------------
+ 1 file changed, 1 insertion(+), 13 deletions(-)
+
+--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
++++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+@@ -82,15 +82,6 @@ static const struct ieee80211_iface_limi
+ .types = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP),
+ },
+- {
+- .max = 1,
+- .types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
+- BIT(NL80211_IFTYPE_P2P_GO),
+- },
+- {
+- .max = 1,
+- .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+- },
+ };
+
+ static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
+@@ -136,10 +127,7 @@ int iwl_mvm_mac_setup_register(struct iw
+ hw->chanctx_data_size = sizeof(struct iwl_mvm_phy_ctxt);
+
+ hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+- BIT(NL80211_IFTYPE_P2P_CLIENT) |
+- BIT(NL80211_IFTYPE_AP) |
+- BIT(NL80211_IFTYPE_P2P_GO) |
+- BIT(NL80211_IFTYPE_P2P_DEVICE);
++ BIT(NL80211_IFTYPE_AP);
+
+ hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
+ WIPHY_FLAG_DISABLE_BEACON_HINTS |
--- /dev/null
+From 803d19d57a042e86e9e9b685bbc3f4a0a751040f Mon Sep 17 00:00:00 2001
+From: Timo Teräs <timo.teras@iki.f>
+Date: Fri, 17 May 2013 00:48:39 -0700
+Subject: leds: leds-gpio: reserve gpio before using it
+
+From: Timo Teräs <timo.teras@iki.f>
+
+commit 803d19d57a042e86e9e9b685bbc3f4a0a751040f upstream.
+
+This reverts commit a99d76f (leds: leds-gpio: use gpio_request_one)
+and commit 2d7c22f (leds: leds-gpio: set devm_gpio_request_one()
+flags param correctly) which was a fix of the first one.
+
+The conversion to devm_gpio_request in commit e3b1d44c (leds:
+leds-gpio: use devm_gpio_request_one) is not reverted.
+
+The problem is that gpio_cansleep() and gpio_get_value_cansleep()
+calls can crash if the gpio is not first reserved. Incidentally this
+same bug existed earlier and was fixed similarly in commit d95cbe61
+(leds: Fix potential leds-gpio oops). But the OOPS is real. It happens
+when GPIOs are provided by module which is not yet loaded.
+
+So this fixes the following BUG during my ALIX boot (3.9.2-vanilla):
+
+BUG: unable to handle kernel NULL pointer dereference at 0000004c
+IP: [<c11287d6>] __gpio_cansleep+0xe/0x1a
+*pde = 00000000
+Oops: 0000 [#1] SMP
+Modules linked in: leds_gpio(+) via_rhine mii cs5535_mfd mfd_core
+geode_rng rng_core geode_aes isofs nls_utf8 nls_cp437 vfat fat
+ata_generic pata_amd pata_cs5536 pata_acpi libata ehci_pci ehci_hcd
+ohci_hcd usb_storage usbcore usb_common sd_mod scsi_mod squashfs loop
+Pid: 881, comm: modprobe Not tainted 3.9.2 #1-Alpine
+EIP: 0060:[<c11287d6>] EFLAGS: 00010282 CPU: 0
+EIP is at __gpio_cansleep+0xe/0x1a
+EAX: 00000000 EBX: cf364018 ECX: c132b8b9 EDX: 00000000
+ESI: c13993a4 EDI: c1399370 EBP: cded9dbc ESP: cded9dbc
+ DS: 007b ES: 007b FS: 00d8 GS: 0033 SS: 0068
+CR0: 8005003b CR2: 0000004c CR3: 0f0c4000 CR4: 00000090
+DR0: 00000000 DR1: 00000000 DR2: 00000000 DR3: 00000000
+DR6: ffff0ff0 DR7: 00000400
+Process modprobe (pid: 881, ti=cded8000 task=cf094aa0 task.ti=cded8000)
+Stack:
+ cded9de0 d09471cb 00000000 c1399260 cf364014 00000000 c1399260 c1399254
+ d0949014 cded9df4 c118cd59 c1399260 d0949014 d0949014 cded9e08 c118ba47
+ c1399260 d0949014 c1399294 cded9e1c c118bb75 cded9e24 d0949014 00000000
+Call Trace:
+ [<d09471cb>] gpio_led_probe+0xba/0x203 [leds_gpio]
+ [<c118cd59>] platform_drv_probe+0x26/0x48
+ [<c118ba47>] driver_probe_device+0x75/0x15c
+ [<c118bb75>] __driver_attach+0x47/0x63
+ [<c118a727>] bus_for_each_dev+0x3c/0x66
+ [<c118b6f9>] driver_attach+0x14/0x16
+ [<c118bb2e>] ? driver_probe_device+0x15c/0x15c
+ [<c118b3d5>] bus_add_driver+0xbd/0x1bc
+ [<d08b4000>] ? 0xd08b3fff
+ [<d08b4000>] ? 0xd08b3fff
+ [<c118bffc>] driver_register+0x74/0xec
+ [<d08b4000>] ? 0xd08b3fff
+ [<c118c8e8>] platform_driver_register+0x38/0x3a
+ [<d08b400d>] gpio_led_driver_init+0xd/0x1000 [leds_gpio]
+ [<c100116c>] do_one_initcall+0x6b/0x10f
+ [<d08b4000>] ? 0xd08b3fff
+ [<c105e918>] load_module+0x1631/0x1907
+ [<c10975d6>] ? insert_vmalloc_vmlist+0x14/0x43
+ [<c1098d5b>] ? __vmalloc_node_range+0x13e/0x15f
+ [<c105ec50>] sys_init_module+0x62/0x77
+ [<c1257888>] syscall_call+0x7/0xb
+EIP: [<c11287d6>] __gpio_cansleep+0xe/0x1a SS:ESP 0068:cded9dbc
+CR2: 000000000000004c
+ ---[ end trace 5308fb20d2514822 ]---
+
+Signed-off-by: Timo Teräs <timo.teras@iki.f>
+Cc: Sachin Kamat <sachin.kamat@linaro.org>
+Cc: Raphael Assenat <raph@8d.com>
+Cc: Trent Piepho <tpiepho@freescale.com>
+Cc: Javier Martinez Canillas <javier.martinez@collabora.co.uk>
+Cc: Arnaud Patard <arnaud.patard@rtp-net.org>
+Cc: Ezequiel Garcia <ezequiel.garcia@free-electrons.com>
+Acked-by: Jingoo Han <jg1.han@samsung.com>
+Signed-off-by: Bryan Wu <cooloney@gmail.com>
+Signed-off-by: Jonghwan Choi <jhbird.choi@samsung.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/leds/leds-gpio.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/drivers/leds/leds-gpio.c
++++ b/drivers/leds/leds-gpio.c
+@@ -107,6 +107,10 @@ static int create_gpio_led(const struct
+ return 0;
+ }
+
++ ret = devm_gpio_request(parent, template->gpio, template->name);
++ if (ret < 0)
++ return ret;
++
+ led_dat->cdev.name = template->name;
+ led_dat->cdev.default_trigger = template->default_trigger;
+ led_dat->gpio = template->gpio;
+@@ -126,10 +130,7 @@ static int create_gpio_led(const struct
+ if (!template->retain_state_suspended)
+ led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
+
+- ret = devm_gpio_request_one(parent, template->gpio,
+- (led_dat->active_low ^ state) ?
+- GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
+- template->name);
++ ret = gpio_direction_output(led_dat->gpio, led_dat->active_low ^ state);
+ if (ret < 0)
+ return ret;
+
--- /dev/null
+From johannes@sipsolutions.net Wed Jun 5 13:26:01 2013
+From: Johannes Berg <johannes@sipsolutions.net>
+Date: Thu, 23 May 2013 22:24:11 +0200
+Subject: mac80211_hwsim: remove P2P_DEVICE support
+To: linux-wireless@vger.kernel.org
+Cc: stable@vger.kernel.org, Jouni Malinen <j@w1.fi>, Arend van Spriel <arend@broadcom.com>, Johannes Berg <johannes.berg@intel.com>
+Message-ID: <1369340651-14475-1-git-send-email-johannes@sipsolutions.net>
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+Unfortunately, advertising P2P_DEVICE support was a little
+premature, a number of issues came up in testing and have
+been fixed for 3.10. Rather than try to backport all the
+different fixes, disable P2P_DEVICE support in the drivers
+using it.
+
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/mac80211_hwsim.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/drivers/net/wireless/mac80211_hwsim.c
++++ b/drivers/net/wireless/mac80211_hwsim.c
+@@ -2118,7 +2118,6 @@ static const struct ieee80211_iface_limi
+ #endif
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_GO) },
+- { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) },
+ };
+
+ static struct ieee80211_iface_combination hwsim_if_comb = {
+@@ -2230,8 +2229,7 @@ static int __init init_mac80211_hwsim(vo
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+- BIT(NL80211_IFTYPE_MESH_POINT) |
+- BIT(NL80211_IFTYPE_P2P_DEVICE);
++ BIT(NL80211_IFTYPE_MESH_POINT);
+
+ hw->flags = IEEE80211_HW_MFP_CAPABLE |
+ IEEE80211_HW_SIGNAL_DBM |
net-can-kvaser_usb-fix-reception-on-usbcan-pro-and-usbcan-r-type-hardware.patch
ib-iser-return-error-to-upper-layers-on-eagain-registration-failures.patch
asoc-davinci-fix-sample-rotation.patch
+fuse-fix-readdirplus-oops-in-fuse_dentry_revalidate.patch
+target-re-instate-sess_wait_list-for-target_wait_for_sess_cmds.patch
+target-file-fix-off-by-one-read_capacity-bug-for-s_isblk-export.patch
+leds-leds-gpio-reserve-gpio-before-using-it.patch
+xen-netback-coalesce-slots-in-tx-path-and-fix-regressions.patch
+xen-netback-don-t-disconnect-frontend-when-seeing-oversize-packet.patch
+xen-netback-remove-redundent-parameter-in-netbk_count_requests.patch
+xen-netback-avoid-allocating-variable-size-array-on-stack.patch
+xen-netfront-reduce-gso_max_size-to-account-for-max-tcp-header.patch
+iwlwifi-mvm-remove-p2p_device-support.patch
+mac80211_hwsim-remove-p2p_device-support.patch
--- /dev/null
+From 21363ca873334391992f2f424856aa864345bb61 Mon Sep 17 00:00:00 2001
+From: Nicholas Bellinger <nab@linux-iscsi.org>
+Date: Wed, 29 May 2013 21:35:23 -0700
+Subject: target/file: Fix off-by-one READ_CAPACITY bug for !S_ISBLK export
+
+From: Nicholas Bellinger <nab@linux-iscsi.org>
+
+commit 21363ca873334391992f2f424856aa864345bb61 upstream.
+
+This patch fixes a bug where FILEIO was incorrectly reporting the number
+of logical blocks (+ 1) when using non struct block_device export mode.
+
+It changes fd_get_blocks() to follow all other backend ->get_blocks() cases,
+and reduces the calculated dev_size by one dev->dev_attrib.block_size
+number of bytes, and also fixes initial fd_block_size assignment at
+fd_configure_device() time introduced in commit 0fd97ccf4.
+
+Reported-by: Wenchao Xia <xiawenc@linux.vnet.ibm.com>
+Reported-by: Badari Pulavarty <pbadari@us.ibm.com>
+Tested-by: Badari Pulavarty <pbadari@us.ibm.com>
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Lingzhu Xiang <lxiang@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/target/target_core_file.c | 12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+--- a/drivers/target/target_core_file.c
++++ b/drivers/target/target_core_file.c
+@@ -150,6 +150,7 @@ static int fd_configure_device(struct se
+ if (S_ISBLK(inode->i_mode)) {
+ unsigned long long dev_size;
+
++ fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
+ /*
+ * Determine the number of bytes from i_size_read() minus
+ * one (1) logical sector from underlying struct block_device
+@@ -168,11 +169,11 @@ static int fd_configure_device(struct se
+ " block_device\n");
+ goto fail;
+ }
+- }
+
+- fd_dev->fd_block_size = dev->dev_attrib.hw_block_size;
++ fd_dev->fd_block_size = FD_BLOCKSIZE;
++ }
+
+- dev->dev_attrib.hw_block_size = FD_BLOCKSIZE;
++ dev->dev_attrib.hw_block_size = fd_dev->fd_block_size;
+ dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS;
+ dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
+
+@@ -583,11 +584,12 @@ static sector_t fd_get_blocks(struct se_
+ * to handle underlying block_device resize operations.
+ */
+ if (S_ISBLK(i->i_mode))
+- dev_size = (i_size_read(i) - fd_dev->fd_block_size);
++ dev_size = i_size_read(i);
+ else
+ dev_size = fd_dev->fd_dev_size;
+
+- return div_u64(dev_size, dev->dev_attrib.block_size);
++ return div_u64(dev_size - dev->dev_attrib.block_size,
++ dev->dev_attrib.block_size);
+ }
+
+ static struct sbc_ops fd_sbc_ops = {
--- /dev/null
+From 9b31a328e344e62e7cc98ae574edcb7b674719bb Mon Sep 17 00:00:00 2001
+From: Nicholas Bellinger <nab@linux-iscsi.org>
+Date: Wed, 15 May 2013 00:52:44 -0700
+Subject: target: Re-instate sess_wait_list for target_wait_for_sess_cmds
+
+From: Nicholas Bellinger <nab@linux-iscsi.org>
+
+commit 9b31a328e344e62e7cc98ae574edcb7b674719bb upstream.
+
+Switch back to pre commit 1c7b13fe652 list splicing logic for active I/O
+shutdown with tcm_qla2xxx + ib_srpt fabrics.
+
+The original commit was done under the incorrect assumption that it's safe to
+walk se_sess->sess_cmd_list unprotected in target_wait_for_sess_cmds() after
+sess->sess_tearing_down = 1 has been set by target_sess_cmd_list_set_waiting()
+during session shutdown.
+
+So instead of adding sess->sess_cmd_lock protection around sess->sess_cmd_list
+during target_wait_for_sess_cmds(), switch back to sess->sess_wait_list to
+allow wait_for_completion() + TFO->release_cmd() to occur without having to
+walk ->sess_cmd_list after the list_splice.
+
+Also add a check to exit if target_sess_cmd_list_set_waiting() has already
+been called, and add a WARN_ON to check for any fabric bug where new se_cmds
+are added to sess->sess_cmd_list after sess->sess_tearing_down = 1 has already
+been set.
+
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Cc: Joern Engel <joern@logfs.org>
+Cc: Roland Dreier <roland@kernel.org>
+Signed-off-by: Lingzhu Xiang <lxiang@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/target/target_core_transport.c | 18 ++++++++++++++----
+ include/target/target_core_base.h | 1 +
+ 2 files changed, 15 insertions(+), 4 deletions(-)
+
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -222,6 +222,7 @@ struct se_session *transport_init_sessio
+ INIT_LIST_HEAD(&se_sess->sess_list);
+ INIT_LIST_HEAD(&se_sess->sess_acl_list);
+ INIT_LIST_HEAD(&se_sess->sess_cmd_list);
++ INIT_LIST_HEAD(&se_sess->sess_wait_list);
+ spin_lock_init(&se_sess->sess_cmd_lock);
+ kref_init(&se_sess->sess_kref);
+
+@@ -2252,11 +2253,14 @@ void target_sess_cmd_list_set_waiting(st
+ unsigned long flags;
+
+ spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
+-
+- WARN_ON(se_sess->sess_tearing_down);
++ if (se_sess->sess_tearing_down) {
++ spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
++ return;
++ }
+ se_sess->sess_tearing_down = 1;
++ list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
+
+- list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list)
++ list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
+ se_cmd->cmd_wait_set = 1;
+
+ spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+@@ -2273,9 +2277,10 @@ void target_wait_for_sess_cmds(
+ {
+ struct se_cmd *se_cmd, *tmp_cmd;
+ bool rc = false;
++ unsigned long flags;
+
+ list_for_each_entry_safe(se_cmd, tmp_cmd,
+- &se_sess->sess_cmd_list, se_cmd_list) {
++ &se_sess->sess_wait_list, se_cmd_list) {
+ list_del(&se_cmd->se_cmd_list);
+
+ pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
+@@ -2303,6 +2308,11 @@ void target_wait_for_sess_cmds(
+
+ se_cmd->se_tfo->release_cmd(se_cmd);
+ }
++
++ spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
++ WARN_ON(!list_empty(&se_sess->sess_cmd_list));
++ spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
++
+ }
+ EXPORT_SYMBOL(target_wait_for_sess_cmds);
+
+--- a/include/target/target_core_base.h
++++ b/include/target/target_core_base.h
+@@ -544,6 +544,7 @@ struct se_session {
+ struct list_head sess_list;
+ struct list_head sess_acl_list;
+ struct list_head sess_cmd_list;
++ struct list_head sess_wait_list;
+ spinlock_t sess_cmd_lock;
+ struct kref sess_kref;
+ };
--- /dev/null
+From 59ccb4ebbc35e36a3c143f2d1355deb75c2e628f Mon Sep 17 00:00:00 2001
+From: Wei Liu <wei.liu2@citrix.com>
+Date: Thu, 2 May 2013 00:43:58 +0000
+Subject: xen-netback: avoid allocating variable size array on stack
+
+From: Wei Liu <wei.liu2@citrix.com>
+
+commit 59ccb4ebbc35e36a3c143f2d1355deb75c2e628f upstream.
+
+Tune xen_netbk_count_requests to not touch working array beyond limit, so that
+we can make working array size constant.
+
+Suggested-by: Jan Beulich <jbeulich@suse.com>
+Signed-off-by: Wei Liu <wei.liu2@citrix.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/xen-netback/netback.c | 16 ++++++++++++++--
+ 1 file changed, 14 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -934,11 +934,14 @@ static int netbk_count_requests(struct x
+ RING_IDX cons = vif->tx.req_cons;
+ int slots = 0;
+ int drop_err = 0;
++ int more_data;
+
+ if (!(first->flags & XEN_NETTXF_more_data))
+ return 0;
+
+ do {
++ struct xen_netif_tx_request dropped_tx = { 0 };
++
+ if (slots >= work_to_do) {
+ netdev_err(vif->dev,
+ "Asked for %d slots but exceeds this limit\n",
+@@ -972,6 +975,9 @@ static int netbk_count_requests(struct x
+ drop_err = -E2BIG;
+ }
+
++ if (drop_err)
++ txp = &dropped_tx;
++
+ memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots),
+ sizeof(*txp));
+
+@@ -1001,7 +1007,13 @@ static int netbk_count_requests(struct x
+ netbk_fatal_tx_err(vif);
+ return -EINVAL;
+ }
+- } while ((txp++)->flags & XEN_NETTXF_more_data);
++
++ more_data = txp->flags & XEN_NETTXF_more_data;
++
++ if (!drop_err)
++ txp++;
++
++ } while (more_data);
+
+ if (drop_err) {
+ netbk_tx_err(vif, first, cons + slots);
+@@ -1413,7 +1425,7 @@ static unsigned xen_netbk_tx_build_gops(
+ !list_empty(&netbk->net_schedule_list)) {
+ struct xenvif *vif;
+ struct xen_netif_tx_request txreq;
+- struct xen_netif_tx_request txfrags[max_skb_slots];
++ struct xen_netif_tx_request txfrags[XEN_NETIF_NR_SLOTS_MIN];
+ struct page *page;
+ struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
+ u16 pending_idx;
--- /dev/null
+From 2810e5b9a7731ca5fce22bfbe12c96e16ac44b6f Mon Sep 17 00:00:00 2001
+From: Wei Liu <wei.liu2@citrix.com>
+Date: Mon, 22 Apr 2013 02:20:42 +0000
+Subject: xen-netback: coalesce slots in TX path and fix regressions
+
+From: Wei Liu <wei.liu2@citrix.com>
+
+commit 2810e5b9a7731ca5fce22bfbe12c96e16ac44b6f upstream.
+
+This patch tries to coalesce tx requests when constructing grant copy
+structures. It enables netback to deal with situation when frontend's
+MAX_SKB_FRAGS is larger than backend's MAX_SKB_FRAGS.
+
+With the help of coalescing, this patch tries to address two regressions
+avoid reopening the security hole in XSA-39.
+
+Regression 1. The reduction of the number of supported ring entries (slots)
+per packet (from 18 to 17). This regression has been around for some time but
+remains unnoticed until XSA-39 security fix. This is fixed by coalescing
+slots.
+
+Regression 2. The XSA-39 security fix turning "too many frags" errors from
+just dropping the packet to a fatal error and disabling the VIF. This is fixed
+by coalescing slots (handling 18 slots when backend's MAX_SKB_FRAGS is 17)
+which rules out false positive (using 18 slots is legit) and dropping packets
+using 19 to `max_skb_slots` slots.
+
+To avoid reopening security hole in XSA-39, frontend sending packet using more
+than max_skb_slots is considered malicious.
+
+The behavior of netback for packet is thus:
+
+ 1-18 slots: valid
+ 19-max_skb_slots slots: drop and respond with an error
+ max_skb_slots+ slots: fatal error
+
+max_skb_slots is configurable by admin, default value is 20.
+
+Also change variable name from "frags" to "slots" in netbk_count_requests.
+
+Please note that RX path still has dependency on MAX_SKB_FRAGS. This will be
+fixed with separate patch.
+
+Signed-off-by: Wei Liu <wei.liu2@citrix.com>
+Acked-by: Ian Campbell <ian.campbell@citrix.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ drivers/net/xen-netback/netback.c | 271 +++++++++++++++++++++++++++++++-------
+ include/xen/interface/io/netif.h | 18 ++
+ 2 files changed, 240 insertions(+), 49 deletions(-)
+
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -47,11 +47,25 @@
+ #include <asm/xen/hypercall.h>
+ #include <asm/xen/page.h>
+
++/*
++ * This is the maximum slots a skb can have. If a guest sends a skb
++ * which exceeds this limit it is considered malicious.
++ */
++#define MAX_SKB_SLOTS_DEFAULT 20
++static unsigned int max_skb_slots = MAX_SKB_SLOTS_DEFAULT;
++module_param(max_skb_slots, uint, 0444);
++
++typedef unsigned int pending_ring_idx_t;
++#define INVALID_PENDING_RING_IDX (~0U)
++
+ struct pending_tx_info {
+- struct xen_netif_tx_request req;
++ struct xen_netif_tx_request req; /* coalesced tx request */
+ struct xenvif *vif;
++ pending_ring_idx_t head; /* head != INVALID_PENDING_RING_IDX
++ * if it is head of one or more tx
++ * reqs
++ */
+ };
+-typedef unsigned int pending_ring_idx_t;
+
+ struct netbk_rx_meta {
+ int id;
+@@ -102,7 +116,11 @@ struct xen_netbk {
+ atomic_t netfront_count;
+
+ struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
+- struct gnttab_copy tx_copy_ops[MAX_PENDING_REQS];
++ /* Coalescing tx requests before copying makes number of grant
++ * copy ops greater or equal to number of slots required. In
++ * worst case a tx request consumes 2 gnttab_copy.
++ */
++ struct gnttab_copy tx_copy_ops[2*MAX_PENDING_REQS];
+
+ u16 pending_ring[MAX_PENDING_REQS];
+
+@@ -118,6 +136,16 @@ struct xen_netbk {
+ static struct xen_netbk *xen_netbk;
+ static int xen_netbk_group_nr;
+
++/*
++ * If head != INVALID_PENDING_RING_IDX, it means this tx request is head of
++ * one or more merged tx requests, otherwise it is the continuation of
++ * previous tx request.
++ */
++static inline int pending_tx_is_head(struct xen_netbk *netbk, RING_IDX idx)
++{
++ return netbk->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX;
++}
++
+ void xen_netbk_add_xenvif(struct xenvif *vif)
+ {
+ int i;
+@@ -250,6 +278,7 @@ static int max_required_rx_slots(struct
+ {
+ int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
+
++ /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
+ if (vif->can_sg || vif->gso || vif->gso_prefix)
+ max += MAX_SKB_FRAGS + 1; /* extra_info + frags */
+
+@@ -657,6 +686,7 @@ static void xen_netbk_rx_action(struct x
+ __skb_queue_tail(&rxq, skb);
+
+ /* Filled the batch queue? */
++ /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
+ if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE)
+ break;
+ }
+@@ -898,47 +928,78 @@ static void netbk_fatal_tx_err(struct xe
+
+ static int netbk_count_requests(struct xenvif *vif,
+ struct xen_netif_tx_request *first,
++ RING_IDX first_idx,
+ struct xen_netif_tx_request *txp,
+ int work_to_do)
+ {
+ RING_IDX cons = vif->tx.req_cons;
+- int frags = 0;
++ int slots = 0;
++ int drop_err = 0;
+
+ if (!(first->flags & XEN_NETTXF_more_data))
+ return 0;
+
+ do {
+- if (frags >= work_to_do) {
+- netdev_err(vif->dev, "Need more frags\n");
++ if (slots >= work_to_do) {
++ netdev_err(vif->dev,
++ "Asked for %d slots but exceeds this limit\n",
++ work_to_do);
+ netbk_fatal_tx_err(vif);
+ return -ENODATA;
+ }
+
+- if (unlikely(frags >= MAX_SKB_FRAGS)) {
+- netdev_err(vif->dev, "Too many frags\n");
++ /* This guest is really using too many slots and
++ * considered malicious.
++ */
++ if (unlikely(slots >= max_skb_slots)) {
++ netdev_err(vif->dev,
++ "Malicious frontend using %d slots, threshold %u\n",
++ slots, max_skb_slots);
+ netbk_fatal_tx_err(vif);
+ return -E2BIG;
+ }
+
+- memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags),
++ /* Xen network protocol had implicit dependency on
++ * MAX_SKB_FRAGS. XEN_NETIF_NR_SLOTS_MIN is set to the
++ * historical MAX_SKB_FRAGS value 18 to honor the same
++ * behavior as before. Any packet using more than 18
++ * slots but less than max_skb_slots slots is dropped
++ */
++ if (!drop_err && slots >= XEN_NETIF_NR_SLOTS_MIN) {
++ if (net_ratelimit())
++ netdev_dbg(vif->dev,
++ "Too many slots (%d) exceeding limit (%d), dropping packet\n",
++ slots, XEN_NETIF_NR_SLOTS_MIN);
++ drop_err = -E2BIG;
++ }
++
++ memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots),
+ sizeof(*txp));
+ if (txp->size > first->size) {
+- netdev_err(vif->dev, "Frag is bigger than frame.\n");
++ netdev_err(vif->dev,
++ "Invalid tx request, slot size %u > remaining size %u\n",
++ txp->size, first->size);
+ netbk_fatal_tx_err(vif);
+ return -EIO;
+ }
+
+ first->size -= txp->size;
+- frags++;
++ slots++;
+
+ if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
+- netdev_err(vif->dev, "txp->offset: %x, size: %u\n",
++ netdev_err(vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
+ txp->offset, txp->size);
+ netbk_fatal_tx_err(vif);
+ return -EINVAL;
+ }
+ } while ((txp++)->flags & XEN_NETTXF_more_data);
+- return frags;
++
++ if (drop_err) {
++ netbk_tx_err(vif, first, first_idx + slots);
++ return drop_err;
++ }
++
++ return slots;
+ }
+
+ static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk,
+@@ -962,48 +1023,114 @@ static struct gnttab_copy *xen_netbk_get
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ skb_frag_t *frags = shinfo->frags;
+ u16 pending_idx = *((u16 *)skb->data);
+- int i, start;
++ u16 head_idx = 0;
++ int slot, start;
++ struct page *page;
++ pending_ring_idx_t index, start_idx = 0;
++ uint16_t dst_offset;
++ unsigned int nr_slots;
++ struct pending_tx_info *first = NULL;
++
++ /* At this point shinfo->nr_frags is in fact the number of
++ * slots, which can be as large as XEN_NETIF_NR_SLOTS_MIN.
++ */
++ nr_slots = shinfo->nr_frags;
+
+ /* Skip first skb fragment if it is on same page as header fragment. */
+ start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
+
+- for (i = start; i < shinfo->nr_frags; i++, txp++) {
+- struct page *page;
+- pending_ring_idx_t index;
++ /* Coalesce tx requests, at this point the packet passed in
++ * should be <= 64K. Any packets larger than 64K have been
++ * handled in netbk_count_requests().
++ */
++ for (shinfo->nr_frags = slot = start; slot < nr_slots;
++ shinfo->nr_frags++) {
+ struct pending_tx_info *pending_tx_info =
+ netbk->pending_tx_info;
+
+- index = pending_index(netbk->pending_cons++);
+- pending_idx = netbk->pending_ring[index];
+- page = xen_netbk_alloc_page(netbk, pending_idx);
++ page = alloc_page(GFP_KERNEL|__GFP_COLD);
+ if (!page)
+ goto err;
+
+- gop->source.u.ref = txp->gref;
+- gop->source.domid = vif->domid;
+- gop->source.offset = txp->offset;
++ dst_offset = 0;
++ first = NULL;
++ while (dst_offset < PAGE_SIZE && slot < nr_slots) {
++ gop->flags = GNTCOPY_source_gref;
++
++ gop->source.u.ref = txp->gref;
++ gop->source.domid = vif->domid;
++ gop->source.offset = txp->offset;
++
++ gop->dest.domid = DOMID_SELF;
++
++ gop->dest.offset = dst_offset;
++ gop->dest.u.gmfn = virt_to_mfn(page_address(page));
++
++ if (dst_offset + txp->size > PAGE_SIZE) {
++ /* This page can only merge a portion
++ * of tx request. Do not increment any
++ * pointer / counter here. The txp
++ * will be dealt with in future
++ * rounds, eventually hitting the
++ * `else` branch.
++ */
++ gop->len = PAGE_SIZE - dst_offset;
++ txp->offset += gop->len;
++ txp->size -= gop->len;
++ dst_offset += gop->len; /* quit loop */
++ } else {
++ /* This tx request can be merged in the page */
++ gop->len = txp->size;
++ dst_offset += gop->len;
++
++ index = pending_index(netbk->pending_cons++);
++
++ pending_idx = netbk->pending_ring[index];
++
++ memcpy(&pending_tx_info[pending_idx].req, txp,
++ sizeof(*txp));
++ xenvif_get(vif);
++
++ pending_tx_info[pending_idx].vif = vif;
++
++ /* Poison these fields, corresponding
++ * fields for head tx req will be set
++ * to correct values after the loop.
++ */
++ netbk->mmap_pages[pending_idx] = (void *)(~0UL);
++ pending_tx_info[pending_idx].head =
++ INVALID_PENDING_RING_IDX;
++
++ if (!first) {
++ first = &pending_tx_info[pending_idx];
++ start_idx = index;
++ head_idx = pending_idx;
++ }
+
+- gop->dest.u.gmfn = virt_to_mfn(page_address(page));
+- gop->dest.domid = DOMID_SELF;
+- gop->dest.offset = txp->offset;
+-
+- gop->len = txp->size;
+- gop->flags = GNTCOPY_source_gref;
++ txp++;
++ slot++;
++ }
+
+- gop++;
++ gop++;
++ }
+
+- memcpy(&pending_tx_info[pending_idx].req, txp, sizeof(*txp));
+- xenvif_get(vif);
+- pending_tx_info[pending_idx].vif = vif;
+- frag_set_pending_idx(&frags[i], pending_idx);
++ first->req.offset = 0;
++ first->req.size = dst_offset;
++ first->head = start_idx;
++ set_page_ext(page, netbk, head_idx);
++ netbk->mmap_pages[head_idx] = page;
++ frag_set_pending_idx(&frags[shinfo->nr_frags], head_idx);
+ }
+
++ BUG_ON(shinfo->nr_frags > MAX_SKB_FRAGS);
++
+ return gop;
+ err:
+ /* Unwind, freeing all pages and sending error responses. */
+- while (i-- > start) {
+- xen_netbk_idx_release(netbk, frag_get_pending_idx(&frags[i]),
+- XEN_NETIF_RSP_ERROR);
++ while (shinfo->nr_frags-- > start) {
++ xen_netbk_idx_release(netbk,
++ frag_get_pending_idx(&frags[shinfo->nr_frags]),
++ XEN_NETIF_RSP_ERROR);
+ }
+ /* The head too, if necessary. */
+ if (start)
+@@ -1019,8 +1146,10 @@ static int xen_netbk_tx_check_gop(struct
+ struct gnttab_copy *gop = *gopp;
+ u16 pending_idx = *((u16 *)skb->data);
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
++ struct pending_tx_info *tx_info;
+ int nr_frags = shinfo->nr_frags;
+ int i, err, start;
++ u16 peek; /* peek into next tx request */
+
+ /* Check status of header. */
+ err = gop->status;
+@@ -1032,11 +1161,20 @@ static int xen_netbk_tx_check_gop(struct
+
+ for (i = start; i < nr_frags; i++) {
+ int j, newerr;
++ pending_ring_idx_t head;
+
+ pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
++ tx_info = &netbk->pending_tx_info[pending_idx];
++ head = tx_info->head;
+
+ /* Check error status: if okay then remember grant handle. */
+- newerr = (++gop)->status;
++ do {
++ newerr = (++gop)->status;
++ if (newerr)
++ break;
++ peek = netbk->pending_ring[pending_index(++head)];
++ } while (!pending_tx_is_head(netbk, peek));
++
+ if (likely(!newerr)) {
+ /* Had a previous error? Invalidate this fragment. */
+ if (unlikely(err))
+@@ -1261,11 +1399,12 @@ static unsigned xen_netbk_tx_build_gops(
+ struct sk_buff *skb;
+ int ret;
+
+- while (((nr_pending_reqs(netbk) + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
++ while ((nr_pending_reqs(netbk) + XEN_NETIF_NR_SLOTS_MIN
++ < MAX_PENDING_REQS) &&
+ !list_empty(&netbk->net_schedule_list)) {
+ struct xenvif *vif;
+ struct xen_netif_tx_request txreq;
+- struct xen_netif_tx_request txfrags[MAX_SKB_FRAGS];
++ struct xen_netif_tx_request txfrags[max_skb_slots];
+ struct page *page;
+ struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
+ u16 pending_idx;
+@@ -1326,7 +1465,8 @@ static unsigned xen_netbk_tx_build_gops(
+ continue;
+ }
+
+- ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
++ ret = netbk_count_requests(vif, &txreq, idx,
++ txfrags, work_to_do);
+ if (unlikely(ret < 0))
+ continue;
+
+@@ -1353,7 +1493,7 @@ static unsigned xen_netbk_tx_build_gops(
+ pending_idx = netbk->pending_ring[index];
+
+ data_len = (txreq.size > PKT_PROT_LEN &&
+- ret < MAX_SKB_FRAGS) ?
++ ret < XEN_NETIF_NR_SLOTS_MIN) ?
+ PKT_PROT_LEN : txreq.size;
+
+ skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN,
+@@ -1403,6 +1543,7 @@ static unsigned xen_netbk_tx_build_gops(
+ memcpy(&netbk->pending_tx_info[pending_idx].req,
+ &txreq, sizeof(txreq));
+ netbk->pending_tx_info[pending_idx].vif = vif;
++ netbk->pending_tx_info[pending_idx].head = index;
+ *((u16 *)skb->data) = pending_idx;
+
+ __skb_put(skb, data_len);
+@@ -1530,7 +1671,10 @@ static void xen_netbk_idx_release(struct
+ {
+ struct xenvif *vif;
+ struct pending_tx_info *pending_tx_info;
+- pending_ring_idx_t index;
++ pending_ring_idx_t head;
++ u16 peek; /* peek into next tx request */
++
++ BUG_ON(netbk->mmap_pages[pending_idx] == (void *)(~0UL));
+
+ /* Already complete? */
+ if (netbk->mmap_pages[pending_idx] == NULL)
+@@ -1539,19 +1683,40 @@ static void xen_netbk_idx_release(struct
+ pending_tx_info = &netbk->pending_tx_info[pending_idx];
+
+ vif = pending_tx_info->vif;
++ head = pending_tx_info->head;
+
+- make_tx_response(vif, &pending_tx_info->req, status);
++ BUG_ON(!pending_tx_is_head(netbk, head));
++ BUG_ON(netbk->pending_ring[pending_index(head)] != pending_idx);
+
+- index = pending_index(netbk->pending_prod++);
+- netbk->pending_ring[index] = pending_idx;
++ do {
++ pending_ring_idx_t index;
++ pending_ring_idx_t idx = pending_index(head);
++ u16 info_idx = netbk->pending_ring[idx];
+
+- xenvif_put(vif);
++ pending_tx_info = &netbk->pending_tx_info[info_idx];
++ make_tx_response(vif, &pending_tx_info->req, status);
++
++ /* Setting any number other than
++ * INVALID_PENDING_RING_IDX indicates this slot is
++ * starting a new packet / ending a previous packet.
++ */
++ pending_tx_info->head = 0;
++
++ index = pending_index(netbk->pending_prod++);
++ netbk->pending_ring[index] = netbk->pending_ring[info_idx];
++
++ xenvif_put(vif);
++
++ peek = netbk->pending_ring[pending_index(++head)];
++
++ } while (!pending_tx_is_head(netbk, peek));
+
+ netbk->mmap_pages[pending_idx]->mapping = 0;
+ put_page(netbk->mmap_pages[pending_idx]);
+ netbk->mmap_pages[pending_idx] = NULL;
+ }
+
++
+ static void make_tx_response(struct xenvif *vif,
+ struct xen_netif_tx_request *txp,
+ s8 st)
+@@ -1604,8 +1769,9 @@ static inline int rx_work_todo(struct xe
+ static inline int tx_work_todo(struct xen_netbk *netbk)
+ {
+
+- if (((nr_pending_reqs(netbk) + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
+- !list_empty(&netbk->net_schedule_list))
++ if ((nr_pending_reqs(netbk) + XEN_NETIF_NR_SLOTS_MIN
++ < MAX_PENDING_REQS) &&
++ !list_empty(&netbk->net_schedule_list))
+ return 1;
+
+ return 0;
+@@ -1688,6 +1854,13 @@ static int __init netback_init(void)
+ if (!xen_domain())
+ return -ENODEV;
+
++ if (max_skb_slots < XEN_NETIF_NR_SLOTS_MIN) {
++ printk(KERN_INFO
++ "xen-netback: max_skb_slots too small (%d), bump it to XEN_NETIF_NR_SLOTS_MIN (%d)\n",
++ max_skb_slots, XEN_NETIF_NR_SLOTS_MIN);
++ max_skb_slots = XEN_NETIF_NR_SLOTS_MIN;
++ }
++
+ xen_netbk_group_nr = num_online_cpus();
+ xen_netbk = vzalloc(sizeof(struct xen_netbk) * xen_netbk_group_nr);
+ if (!xen_netbk)
+--- a/include/xen/interface/io/netif.h
++++ b/include/xen/interface/io/netif.h
+@@ -13,6 +13,24 @@
+ #include <xen/interface/grant_table.h>
+
+ /*
++ * Older implementation of Xen network frontend / backend has an
++ * implicit dependency on the MAX_SKB_FRAGS as the maximum number of
++ * ring slots a skb can use. Netfront / netback may not work as
++ * expected when frontend and backend have different MAX_SKB_FRAGS.
++ *
++ * A better approach is to add mechanism for netfront / netback to
++ * negotiate this value. However we cannot fix all possible
++ * frontends, so we need to define a value which states the minimum
++ * slots backend must support.
++ *
++ * The minimum value derives from older Linux kernel's MAX_SKB_FRAGS
++ * (18), which is proved to work with most frontends. Any new backend
++ * which doesn't negotiate with frontend should expect frontend to
++ * send a valid packet using slots up to this value.
++ */
++#define XEN_NETIF_NR_SLOTS_MIN 18
++
++/*
+ * Notifications after enqueuing any type of message should be conditional on
+ * the appropriate req_event or rsp_event field in the shared ring.
+ * If the client sends notification for rx requests then it should specify
--- /dev/null
+From 03393fd5cc2b6cdeec32b704ecba64dbb0feae3c Mon Sep 17 00:00:00 2001
+From: Wei Liu <wei.liu2@citrix.com>
+Date: Mon, 22 Apr 2013 02:20:43 +0000
+Subject: xen-netback: don't disconnect frontend when seeing oversize packet
+
+From: Wei Liu <wei.liu2@citrix.com>
+
+commit 03393fd5cc2b6cdeec32b704ecba64dbb0feae3c upstream.
+
+Some frontend drivers are sending packets > 64 KiB in length. This length
+overflows the length field in the first slot making the following slots have
+an invalid length.
+
+Turn this error back into a non-fatal error by dropping the packet. To avoid
+having the following slots having fatal errors, consume all slots in the
+packet.
+
+This does not reopen the security hole in XSA-39 as if the packet as an
+invalid number of slots it will still hit fatal error case.
+
+Signed-off-by: David Vrabel <david.vrabel@citrix.com>
+Signed-off-by: Wei Liu <wei.liu2@citrix.com>
+Acked-by: Ian Campbell <ian.campbell@citrix.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/xen-netback/netback.c | 22 ++++++++++++++++------
+ 1 file changed, 16 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -975,12 +975,22 @@ static int netbk_count_requests(struct x
+
+ memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots),
+ sizeof(*txp));
+- if (txp->size > first->size) {
+- netdev_err(vif->dev,
+- "Invalid tx request, slot size %u > remaining size %u\n",
+- txp->size, first->size);
+- netbk_fatal_tx_err(vif);
+- return -EIO;
++
++ /* If the guest submitted a frame >= 64 KiB then
++ * first->size overflowed and following slots will
++ * appear to be larger than the frame.
++ *
++ * This cannot be fatal error as there are buggy
++ * frontends that do this.
++ *
++ * Consume all slots and drop the packet.
++ */
++ if (!drop_err && txp->size > first->size) {
++ if (net_ratelimit())
++ netdev_dbg(vif->dev,
++ "Invalid tx request, slot size %u > remaining size %u\n",
++ txp->size, first->size);
++ drop_err = -EIO;
+ }
+
+ first->size -= txp->size;
--- /dev/null
+From ac69c26e7accb04ae2cb9ab0872068983a42b3c8 Mon Sep 17 00:00:00 2001
+From: Wei Liu <wei.liu2@citrix.com>
+Date: Thu, 2 May 2013 00:43:57 +0000
+Subject: xen-netback: remove redundent parameter in netbk_count_requests
+
+From: Wei Liu <wei.liu2@citrix.com>
+
+commit ac69c26e7accb04ae2cb9ab0872068983a42b3c8 upstream.
+
+Tracking down from the caller, first_idx is always equal to vif->tx.req_cons.
+Remove it to avoid confusion.
+
+Suggested-by: Jan Beulich <jbeulich@suse.com>
+Signed-off-by: Wei Liu <wei.liu2@citrix.com>
+Acked-by: Ian Campbell <ian.campbell@citrix.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/xen-netback/netback.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -928,7 +928,6 @@ static void netbk_fatal_tx_err(struct xe
+
+ static int netbk_count_requests(struct xenvif *vif,
+ struct xen_netif_tx_request *first,
+- RING_IDX first_idx,
+ struct xen_netif_tx_request *txp,
+ int work_to_do)
+ {
+@@ -1005,7 +1004,7 @@ static int netbk_count_requests(struct x
+ } while ((txp++)->flags & XEN_NETTXF_more_data);
+
+ if (drop_err) {
+- netbk_tx_err(vif, first, first_idx + slots);
++ netbk_tx_err(vif, first, cons + slots);
+ return drop_err;
+ }
+
+@@ -1475,8 +1474,7 @@ static unsigned xen_netbk_tx_build_gops(
+ continue;
+ }
+
+- ret = netbk_count_requests(vif, &txreq, idx,
+- txfrags, work_to_do);
++ ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
+ if (unlikely(ret < 0))
+ continue;
+
--- /dev/null
+From 9ecd1a75d977e2e8c48139c7d3efed183f898d94 Mon Sep 17 00:00:00 2001
+From: Wei Liu <wei.liu2@citrix.com>
+Date: Mon, 22 Apr 2013 02:20:41 +0000
+Subject: xen-netfront: reduce gso_max_size to account for max TCP header
+
+From: Wei Liu <wei.liu2@citrix.com>
+
+commit 9ecd1a75d977e2e8c48139c7d3efed183f898d94 upstream.
+
+The maximum packet including header that can be handled by netfront / netback
+wire format is 65535. Reduce gso_max_size accordingly.
+
+Drop skb and print warning when skb->len > 65535. This can 1) save the effort
+to send malformed packet to netback, 2) help spotting misconfiguration of
+netfront in the future.
+
+Signed-off-by: Wei Liu <wei.liu2@citrix.com>
+Acked-by: Ian Campbell <ian.campbell@citrix.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/xen-netfront.c | 17 +++++++++++++++--
+ include/xen/interface/io/netif.h | 1 +
+ 2 files changed, 16 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -36,7 +36,7 @@
+ #include <linux/skbuff.h>
+ #include <linux/ethtool.h>
+ #include <linux/if_ether.h>
+-#include <linux/tcp.h>
++#include <net/tcp.h>
+ #include <linux/udp.h>
+ #include <linux/moduleparam.h>
+ #include <linux/mm.h>
+@@ -548,6 +548,16 @@ static int xennet_start_xmit(struct sk_b
+ unsigned int len = skb_headlen(skb);
+ unsigned long flags;
+
++ /* If skb->len is too big for wire format, drop skb and alert
++ * user about misconfiguration.
++ */
++ if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
++ net_alert_ratelimited(
++ "xennet: skb->len = %u, too big for wire format\n",
++ skb->len);
++ goto drop;
++ }
++
+ slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) +
+ xennet_count_skb_frag_slots(skb);
+ if (unlikely(slots > MAX_SKB_FRAGS + 1)) {
+@@ -1064,7 +1074,8 @@ err:
+
+ static int xennet_change_mtu(struct net_device *dev, int mtu)
+ {
+- int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN;
++ int max = xennet_can_sg(dev) ?
++ XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER : ETH_DATA_LEN;
+
+ if (mtu > max)
+ return -EINVAL;
+@@ -1368,6 +1379,8 @@ static struct net_device *xennet_create_
+ SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops);
+ SET_NETDEV_DEV(netdev, &dev->dev);
+
++ netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER);
++
+ np->netdev = netdev;
+
+ netif_carrier_off(netdev);
+--- a/include/xen/interface/io/netif.h
++++ b/include/xen/interface/io/netif.h
+@@ -65,6 +65,7 @@
+ #define _XEN_NETTXF_extra_info (3)
+ #define XEN_NETTXF_extra_info (1U<<_XEN_NETTXF_extra_info)
+
++#define XEN_NETIF_MAX_TX_SIZE 0xFFFF
+ struct xen_netif_tx_request {
+ grant_ref_t gref; /* Reference to buffer page */
+ uint16_t offset; /* Offset within buffer page */