--- /dev/null
+From 69713c42f7cc8b763375c9ce359e7596b9c5592c Mon Sep 17 00:00:00 2001
+From: Gao feng <gaofeng@cn.fujitsu.com>
+Date: Thu, 4 Oct 2012 20:15:49 +0000
+Subject: infiniband: pass rdma_cm module to netlink_dump_start
+
+
+From: Gao feng <gaofeng@cn.fujitsu.com>
+
+[ Upstream commit 809d5fc9bf6589276a12bd4fd611e4c7ff9940c3 ]
+
+set netlink_dump_control.module to avoid panic.
+
+Signed-off-by: Gao feng <gaofeng@cn.fujitsu.com>
+Cc: Roland Dreier <roland@kernel.org>
+Cc: Sean Hefty <sean.hefty@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/core/cma.c | 3 ++-
+ drivers/infiniband/core/netlink.c | 1 +
+ include/rdma/rdma_netlink.h | 1 +
+ 3 files changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -3451,7 +3451,8 @@ out:
+ }
+
+ static const struct ibnl_client_cbs cma_cb_table[] = {
+- [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats },
++ [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats,
++ .module = THIS_MODULE },
+ };
+
+ static int __init cma_init(void)
+--- a/drivers/infiniband/core/netlink.c
++++ b/drivers/infiniband/core/netlink.c
+@@ -151,6 +151,7 @@ static int ibnl_rcv_msg(struct sk_buff *
+ {
+ struct netlink_dump_control c = {
+ .dump = client->cb_table[op].dump,
++ .module = client->cb_table[op].module,
+ };
+ return netlink_dump_start(nls, skb, nlh, &c);
+ }
+--- a/include/rdma/rdma_netlink.h
++++ b/include/rdma/rdma_netlink.h
+@@ -39,6 +39,7 @@ struct rdma_cm_id_stats {
+
+ struct ibnl_client_cbs {
+ int (*dump)(struct sk_buff *skb, struct netlink_callback *nlcb);
++ struct module *module;
+ };
+
+ int ibnl_init(void);
--- /dev/null
+From 1b3fc36d9a868fcbc4efa3bab56bfa45dbd3e543 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Tue, 16 Oct 2012 07:37:27 +0000
+Subject: ipv6: addrconf: fix /proc/net/if_inet6
+
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 9f0d3c2781baa1102108e16efbe640dd74564a7c ]
+
+Commit 1d5783030a1 (ipv6/addrconf: speedup /proc/net/if_inet6 filling)
+added bugs hiding some devices from if_inet6 and breaking applications.
+
+"ip -6 addr" could still display all IPv6 addresses, while "ifconfig -a"
+couldnt.
+
+One way to reproduce the bug is by starting in a shell :
+
+unshare -n /bin/bash
+ifconfig lo up
+
+And in original net namespace, lo device disappeared from if_inet6
+
+Reported-by: Jan Hinnerk Stosch <janhinnerk.stosch@gmail.com>
+Tested-by: Jan Hinnerk Stosch <janhinnerk.stosch@gmail.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Mihai Maruseac <mihai.maruseac@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/addrconf.c | 15 +++++++++------
+ 1 file changed, 9 insertions(+), 6 deletions(-)
+
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -3095,14 +3095,15 @@ static struct inet6_ifaddr *if6_get_firs
+ struct hlist_node *n;
+ hlist_for_each_entry_rcu_bh(ifa, n, &inet6_addr_lst[state->bucket],
+ addr_lst) {
++ if (!net_eq(dev_net(ifa->idev->dev), net))
++ continue;
+ /* sync with offset */
+ if (p < state->offset) {
+ p++;
+ continue;
+ }
+ state->offset++;
+- if (net_eq(dev_net(ifa->idev->dev), net))
+- return ifa;
++ return ifa;
+ }
+
+ /* prepare for next bucket */
+@@ -3120,18 +3121,20 @@ static struct inet6_ifaddr *if6_get_next
+ struct hlist_node *n = &ifa->addr_lst;
+
+ hlist_for_each_entry_continue_rcu_bh(ifa, n, addr_lst) {
++ if (!net_eq(dev_net(ifa->idev->dev), net))
++ continue;
+ state->offset++;
+- if (net_eq(dev_net(ifa->idev->dev), net))
+- return ifa;
++ return ifa;
+ }
+
+ while (++state->bucket < IN6_ADDR_HSIZE) {
+ state->offset = 0;
+ hlist_for_each_entry_rcu_bh(ifa, n,
+ &inet6_addr_lst[state->bucket], addr_lst) {
++ if (!net_eq(dev_net(ifa->idev->dev), net))
++ continue;
+ state->offset++;
+- if (net_eq(dev_net(ifa->idev->dev), net))
+- return ifa;
++ return ifa;
+ }
+ }
+
--- /dev/null
+From a595c1ce4c9d572cf53513570b9f1a263d7867f2 Mon Sep 17 00:00:00 2001
+From: Devin Heitmueller <dheitmueller@kernellabs.com>
+Date: Mon, 6 Aug 2012 22:47:03 -0300
+Subject: media: au0828: fix case where STREAMOFF being called on stopped stream causes BUG()
+
+From: Devin Heitmueller <dheitmueller@kernellabs.com>
+
+commit a595c1ce4c9d572cf53513570b9f1a263d7867f2 upstream.
+
+We weren't checking whether the resource was in use before calling
+res_free(), so applications which called STREAMOFF on a v4l2 device that
+wasn't already streaming would cause a BUG() to be hit (MythTV).
+
+Reported-by: Larry Finger <larry.finger@lwfinger.net>
+Reported-by: Jay Harbeston <jharbestonus@gmail.com>
+Signed-off-by: Devin Heitmueller <dheitmueller@kernellabs.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>
+
+---
+ drivers/media/video/au0828/au0828-video.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+--- a/drivers/media/video/au0828/au0828-video.c
++++ b/drivers/media/video/au0828/au0828-video.c
+@@ -1692,14 +1692,18 @@ static int vidioc_streamoff(struct file
+ (AUVI_INPUT(i).audio_setup)(dev, 0);
+ }
+
+- videobuf_streamoff(&fh->vb_vidq);
+- res_free(fh, AU0828_RESOURCE_VIDEO);
++ if (res_check(fh, AU0828_RESOURCE_VIDEO)) {
++ videobuf_streamoff(&fh->vb_vidq);
++ res_free(fh, AU0828_RESOURCE_VIDEO);
++ }
+ } else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
+ dev->vbi_timeout_running = 0;
+ del_timer_sync(&dev->vbi_timeout);
+
+- videobuf_streamoff(&fh->vb_vbiq);
+- res_free(fh, AU0828_RESOURCE_VBI);
++ if (res_check(fh, AU0828_RESOURCE_VBI)) {
++ videobuf_streamoff(&fh->vb_vbiq);
++ res_free(fh, AU0828_RESOURCE_VBI);
++ }
+ }
+
+ return 0;
--- /dev/null
+From 444e0764972d3e8f1f949ed98e9b21d60f07f72e Mon Sep 17 00:00:00 2001
+From: "ramesh.nagappa@gmail.com" <ramesh.nagappa@gmail.com>
+Date: Fri, 5 Oct 2012 19:10:15 +0000
+Subject: net: Fix skb_under_panic oops in neigh_resolve_output
+
+
+From: "ramesh.nagappa@gmail.com" <ramesh.nagappa@gmail.com>
+
+[ Upstream commit e1f165032c8bade3a6bdf546f8faf61fda4dd01c ]
+
+The retry loop in neigh_resolve_output() and neigh_connected_output()
+call dev_hard_header() with out reseting the skb to network_header.
+This causes the retry to fail with skb_under_panic. The fix is to
+reset the network_header within the retry loop.
+
+Signed-off-by: Ramesh Nagappa <ramesh.nagappa@ericsson.com>
+Reviewed-by: Shawn Lu <shawn.lu@ericsson.com>
+Reviewed-by: Robert Coulson <robert.coulson@ericsson.com>
+Reviewed-by: Billie Alsup <billie.alsup@ericsson.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/neighbour.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -1285,8 +1285,6 @@ int neigh_resolve_output(struct neighbou
+ if (!dst)
+ goto discard;
+
+- __skb_pull(skb, skb_network_offset(skb));
+-
+ if (!neigh_event_send(neigh, skb)) {
+ int err;
+ struct net_device *dev = neigh->dev;
+@@ -1296,6 +1294,7 @@ int neigh_resolve_output(struct neighbou
+ neigh_hh_init(neigh, dst);
+
+ do {
++ __skb_pull(skb, skb_network_offset(skb));
+ seq = read_seqbegin(&neigh->ha_lock);
+ err = dev_hard_header(skb, dev, ntohs(skb->protocol),
+ neigh->ha, NULL, skb->len);
+@@ -1326,9 +1325,8 @@ int neigh_connected_output(struct neighb
+ unsigned int seq;
+ int err;
+
+- __skb_pull(skb, skb_network_offset(skb));
+-
+ do {
++ __skb_pull(skb, skb_network_offset(skb));
+ seq = read_seqbegin(&neigh->ha_lock);
+ err = dev_hard_header(skb, dev, ntohs(skb->protocol),
+ neigh->ha, NULL, skb->len);
--- /dev/null
+From 9cafef746eead4c1e9b0f4b58ae0c6b0c7f73ed4 Mon Sep 17 00:00:00 2001
+From: Gao feng <gaofeng@cn.fujitsu.com>
+Date: Thu, 4 Oct 2012 20:15:48 +0000
+Subject: netlink: add reference of module in netlink_dump_start
+
+
+From: Gao feng <gaofeng@cn.fujitsu.com>
+
+[ Upstream commit 6dc878a8ca39e93f70c42f3dd7260bde10c1e0f1 ]
+
+I get a panic when I use ss -a and rmmod inet_diag at the
+same time.
+
+It's because netlink_dump uses inet_diag_dump which belongs to module
+inet_diag.
+
+I search the codes and find many modules have the same problem. We
+need to add a reference to the module which the cb->dump belongs to.
+
+Thanks for all help from Stephen,Jan,Eric,Steffen and Pablo.
+
+Change From v3:
+change netlink_dump_start to inline,suggestion from Pablo and
+Eric.
+
+Change From v2:
+delete netlink_dump_done,and call module_put in netlink_dump
+and netlink_sock_destruct.
+
+Signed-off-by: Gao feng <gaofeng@cn.fujitsu.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/netlink.h | 21 +++++++++++++++++----
+ net/netlink/af_netlink.c | 29 +++++++++++++++++++++--------
+ 2 files changed, 38 insertions(+), 12 deletions(-)
+
+--- a/include/linux/netlink.h
++++ b/include/linux/netlink.h
+@@ -153,6 +153,7 @@ struct nlattr {
+
+ #include <linux/capability.h>
+ #include <linux/skbuff.h>
++#include <linux/export.h>
+
+ struct net;
+
+@@ -226,6 +227,8 @@ struct netlink_callback {
+ struct netlink_callback *cb);
+ int (*done)(struct netlink_callback *cb);
+ void *data;
++ /* the module that dump function belong to */
++ struct module *module;
+ u16 family;
+ u16 min_dump_alloc;
+ unsigned int prev_seq, seq;
+@@ -251,14 +254,24 @@ __nlmsg_put(struct sk_buff *skb, u32 pid
+
+ struct netlink_dump_control {
+ int (*dump)(struct sk_buff *skb, struct netlink_callback *);
+- int (*done)(struct netlink_callback*);
++ int (*done)(struct netlink_callback *);
+ void *data;
++ struct module *module;
+ u16 min_dump_alloc;
+ };
+
+-extern int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+- const struct nlmsghdr *nlh,
+- struct netlink_dump_control *control);
++extern int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
++ const struct nlmsghdr *nlh,
++ struct netlink_dump_control *control);
++static inline int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
++ const struct nlmsghdr *nlh,
++ struct netlink_dump_control *control)
++{
++ if (!control->module)
++ control->module = THIS_MODULE;
++
++ return __netlink_dump_start(ssk, skb, nlh, control);
++}
+
+
+ #define NL_NONROOT_RECV 0x1
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -156,6 +156,8 @@ static void netlink_sock_destruct(struct
+ if (nlk->cb) {
+ if (nlk->cb->done)
+ nlk->cb->done(nlk->cb);
++
++ module_put(nlk->cb->module);
+ netlink_destroy_callback(nlk->cb);
+ }
+
+@@ -1728,6 +1730,7 @@ static int netlink_dump(struct sock *sk)
+ nlk->cb = NULL;
+ mutex_unlock(nlk->cb_mutex);
+
++ module_put(cb->module);
+ netlink_destroy_callback(cb);
+ return 0;
+
+@@ -1737,9 +1740,9 @@ errout_skb:
+ return err;
+ }
+
+-int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+- const struct nlmsghdr *nlh,
+- struct netlink_dump_control *control)
++int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
++ const struct nlmsghdr *nlh,
++ struct netlink_dump_control *control)
+ {
+ struct netlink_callback *cb;
+ struct sock *sk;
+@@ -1754,6 +1757,7 @@ int netlink_dump_start(struct sock *ssk,
+ cb->done = control->done;
+ cb->nlh = nlh;
+ cb->data = control->data;
++ cb->module = control->module;
+ cb->min_dump_alloc = control->min_dump_alloc;
+ atomic_inc(&skb->users);
+ cb->skb = skb;
+@@ -1764,19 +1768,28 @@ int netlink_dump_start(struct sock *ssk,
+ return -ECONNREFUSED;
+ }
+ nlk = nlk_sk(sk);
+- /* A dump is in progress... */
++
+ mutex_lock(nlk->cb_mutex);
++ /* A dump is in progress... */
+ if (nlk->cb) {
+ mutex_unlock(nlk->cb_mutex);
+ netlink_destroy_callback(cb);
+- sock_put(sk);
+- return -EBUSY;
++ ret = -EBUSY;
++ goto out;
+ }
++ /* add reference of module which cb->dump belongs to */
++ if (!try_module_get(cb->module)) {
++ mutex_unlock(nlk->cb_mutex);
++ netlink_destroy_callback(cb);
++ ret = -EPROTONOSUPPORT;
++ goto out;
++ }
++
+ nlk->cb = cb;
+ mutex_unlock(nlk->cb_mutex);
+
+ ret = netlink_dump(sk);
+-
++out:
+ sock_put(sk);
+
+ if (ret)
+@@ -1787,7 +1800,7 @@ int netlink_dump_start(struct sock *ssk,
+ */
+ return -EINTR;
+ }
+-EXPORT_SYMBOL(netlink_dump_start);
++EXPORT_SYMBOL(__netlink_dump_start);
+
+ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
+ {
--- /dev/null
+From a809a74b63bdcc342bb568f2f531be4bdd8a4c81 Mon Sep 17 00:00:00 2001
+From: "jeff.liu" <jeff.liu@oracle.com>
+Date: Mon, 8 Oct 2012 18:57:27 +0000
+Subject: RDS: fix rds-ping spinlock recursion
+
+
+From: "jeff.liu" <jeff.liu@oracle.com>
+
+[ Upstream commit 5175a5e76bbdf20a614fb47ce7a38f0f39e70226 ]
+
+This is the revised patch for fixing rds-ping spinlock recursion
+according to Venkat's suggestions.
+
+RDS ping/pong over TCP feature has been broken for years(2.6.39 to
+3.6.0) since we have to set TCP cork and call kernel_sendmsg() between
+ping/pong which both need to lock "struct sock *sk". However, this
+lock has already been hold before rds_tcp_data_ready() callback is
+triggerred. As a result, we always facing spinlock resursion which
+would resulting in system panic.
+
+Given that RDS ping is only used to test the connectivity and not for
+serious performance measurements, we can queue the pong transmit to
+rds_wq as a delayed response.
+
+Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
+CC: Venkat Venkatsubra <venkat.x.venkatsubra@oracle.com>
+CC: David S. Miller <davem@davemloft.net>
+CC: James Morris <james.l.morris@oracle.com>
+Signed-off-by: Jie Liu <jeff.liu@oracle.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/rds/send.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/rds/send.c
++++ b/net/rds/send.c
+@@ -1122,7 +1122,7 @@ rds_send_pong(struct rds_connection *con
+ rds_stats_inc(s_send_pong);
+
+ if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
+- rds_send_xmit(conn);
++ queue_delayed_work(rds_wq, &conn->c_send_w, 0);
+
+ rds_message_put(rm);
+ return 0;
pinctrl-tegra-set-low-power-mode-bank-width-to-2.patch
iommu-tegra-smmu-fix-deadly-typo.patch
amd64_edac-__amd64_set_scrub_rate-avoid-overindexing-scrubrates.patch
+usb-dwc3-gadget-fix-endpoint-always-busy-bug.patch
+media-au0828-fix-case-where-streamoff-being-called-on-stopped-stream-causes-bug.patch
+netlink-add-reference-of-module-in-netlink_dump_start.patch
+infiniband-pass-rdma_cm-module-to-netlink_dump_start.patch
+net-fix-skb_under_panic-oops-in-neigh_resolve_output.patch
+skge-add-dma-mask-quirk-for-marvell-88e8001-on-asus-p5nsli-motherboard.patch
+vlan-don-t-deliver-frames-for-unknown-vlans-to-protocols.patch
+rds-fix-rds-ping-spinlock-recursion.patch
+tcp-resets-are-misrouted.patch
+ipv6-addrconf-fix-proc-net-if_inet6.patch
+sparc64-fix-ptrace-interaction-with-force_successful_syscall_return.patch
+sparc64-like-x86-we-should-check-current-mm-during-perf-backtrace-generation.patch
+sparc64-fix-bit-twiddling-in-sparc_pmu_enable_event.patch
+sparc64-do-not-clobber-personality-flags-in-sys_sparc64_personality.patch
+sparc64-be-less-verbose-during-vmemmap-population.patch
--- /dev/null
+From 033336e3627c20f2e8e66cbb5b0a2581fbb702b0 Mon Sep 17 00:00:00 2001
+From: Graham Gower <graham.gower@gmail.com>
+Date: Mon, 8 Oct 2012 08:34:50 +0000
+Subject: skge: Add DMA mask quirk for Marvell 88E8001 on ASUS P5NSLI motherboard
+
+
+From: Graham Gower <graham.gower@gmail.com>
+
+[ Upstream commit a2af139ff1cd85df586690ff626619ab1ee88b0a ]
+
+Marvell 88E8001 on an ASUS P5NSLI motherboard is unable to send/receive
+packets on a system with >4gb ram unless a 32bit DMA mask is used.
+
+This issue has been around for years and a fix was sent 3.5 years ago, but
+there was some debate as to whether it should instead be fixed as a PCI quirk.
+http://www.spinics.net/lists/netdev/msg88670.html
+
+However, 18 months later a similar workaround was introduced for another
+chipset exhibiting the same problem.
+http://www.spinics.net/lists/netdev/msg142287.html
+
+Signed-off-by: Graham Gower <graham.gower@gmail.com>
+Signed-off-by: Jan Ceuleers <jan.ceuleers@computer.org>
+Acked-by: Stephen Hemminger <shemminger@vyatta.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/marvell/skge.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/drivers/net/ethernet/marvell/skge.c
++++ b/drivers/net/ethernet/marvell/skge.c
+@@ -4153,6 +4153,13 @@ static struct dmi_system_id skge_32bit_d
+ DMI_MATCH(DMI_BOARD_NAME, "nForce"),
+ },
+ },
++ {
++ .ident = "ASUS P5NSLI",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
++ DMI_MATCH(DMI_BOARD_NAME, "P5NSLI")
++ },
++ },
+ {}
+ };
+
--- /dev/null
+From 00950467b5e4b390ede1b8f796fab33d31064258 Mon Sep 17 00:00:00 2001
+From: "David S. Miller" <davem@davemloft.net>
+Date: Wed, 15 Aug 2012 00:37:29 -0700
+Subject: sparc64: Be less verbose during vmemmap population.
+
+
+From: "David S. Miller" <davem@davemloft.net>
+
+[ Upstream commit 2856cc2e4d0852c3ddaae9dcb19cb9396512eb08 ]
+
+On a 2-node machine with 256GB of ram we get 512 lines of
+console output, which is just too much.
+
+This mimicks Yinghai Lu's x86 commit c2b91e2eec9678dbda274e906cc32ea8f711da3b
+(x86_64/mm: check and print vmemmap allocation continuous) except that
+we aren't ever going to get contiguous block pointers in between calls
+so just print when the virtual address or node changes.
+
+This decreases the output by an order of 16.
+
+Also demote this to KERN_DEBUG.
+
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sparc/mm/init_64.c | 28 +++++++++++++++++++++++-----
+ 1 file changed, 23 insertions(+), 5 deletions(-)
+
+--- a/arch/sparc/mm/init_64.c
++++ b/arch/sparc/mm/init_64.c
+@@ -2099,6 +2099,9 @@ EXPORT_SYMBOL(_PAGE_CACHE);
+ #ifdef CONFIG_SPARSEMEM_VMEMMAP
+ unsigned long vmemmap_table[VMEMMAP_SIZE];
+
++static long __meminitdata addr_start, addr_end;
++static int __meminitdata node_start;
++
+ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
+ {
+ unsigned long vstart = (unsigned long) start;
+@@ -2129,15 +2132,30 @@ int __meminit vmemmap_populate(struct pa
+
+ *vmem_pp = pte_base | __pa(block);
+
+- printk(KERN_INFO "[%p-%p] page_structs=%lu "
+- "node=%d entry=%lu/%lu\n", start, block, nr,
+- node,
+- addr >> VMEMMAP_CHUNK_SHIFT,
+- VMEMMAP_SIZE);
++ /* check to see if we have contiguous blocks */
++ if (addr_end != addr || node_start != node) {
++ if (addr_start)
++ printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
++ addr_start, addr_end-1, node_start);
++ addr_start = addr;
++ node_start = node;
++ }
++ addr_end = addr + VMEMMAP_CHUNK;
+ }
+ }
+ return 0;
+ }
++
++void __meminit vmemmap_populate_print_last(void)
++{
++ if (addr_start) {
++ printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
++ addr_start, addr_end-1, node_start);
++ addr_start = 0;
++ addr_end = 0;
++ node_start = 0;
++ }
++}
+ #endif /* CONFIG_SPARSEMEM_VMEMMAP */
+
+ static void prot_init_common(unsigned long page_none,
--- /dev/null
+From 5b5ad809cd2d593cc34260a5ff064ff7e3a525c1 Mon Sep 17 00:00:00 2001
+From: Jiri Kosina <jkosina@suse.cz>
+Date: Wed, 1 Aug 2012 21:10:51 +0200
+Subject: sparc64: do not clobber personality flags in sys_sparc64_personality()
+
+
+From: Jiri Kosina <jkosina@suse.cz>
+
+[ Upstream commit a27032eee8cb6e16516f13c8a9752e9d5d4cc430 ]
+
+There are multiple errors in how sys_sparc64_personality() handles
+personality flags stored in top three bytes.
+
+- directly comparing current->personality against PER_LINUX32 doesn't work
+ in cases when any of the personality flags stored in the top three bytes
+ are used.
+- directly forcefully setting personality to PER_LINUX32 or PER_LINUX
+ discards any flags stored in the top three bytes
+
+Fix the first one by properly using personality() macro to compare only
+PER_MASK bytes.
+Fix the second one by setting only the bits that should be set, instead of
+overwriting the whole value.
+
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sparc/kernel/sys_sparc_64.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/arch/sparc/kernel/sys_sparc_64.c
++++ b/arch/sparc/kernel/sys_sparc_64.c
+@@ -519,12 +519,12 @@ SYSCALL_DEFINE1(sparc64_personality, uns
+ {
+ int ret;
+
+- if (current->personality == PER_LINUX32 &&
+- personality == PER_LINUX)
+- personality = PER_LINUX32;
++ if (personality(current->personality) == PER_LINUX32 &&
++ personality(personality) == PER_LINUX)
++ personality |= PER_LINUX32;
+ ret = sys_personality(personality);
+- if (ret == PER_LINUX32)
+- ret = PER_LINUX;
++ if (personality(ret) == PER_LINUX32)
++ ret &= ~PER_LINUX32;
+
+ return ret;
+ }
--- /dev/null
+From 60f463df0d39ddb5d875ed6052d458e4224055f2 Mon Sep 17 00:00:00 2001
+From: "David S. Miller" <davem@davemloft.net>
+Date: Tue, 16 Oct 2012 13:05:25 -0700
+Subject: sparc64: Fix bit twiddling in sparc_pmu_enable_event().
+
+
+From: "David S. Miller" <davem@davemloft.net>
+
+[ Upstream commit e793d8c6740f8fe704fa216e95685f4d92c4c4b9 ]
+
+There was a serious disconnect in the logic happening in
+sparc_pmu_disable_event() vs. sparc_pmu_enable_event().
+
+Event disable is implemented by programming a NOP event into the PCR.
+
+However, event enable was not reversing this operation. Instead, it
+was setting the User/Priv/Hypervisor trace enable bits.
+
+That's not sparc_pmu_enable_event()'s job, that's what
+sparc_pmu_enable() and sparc_pmu_disable() do .
+
+The intent of sparc_pmu_enable_event() is clear, since it first clear
+out the event type encoding field. So fix this by OR'ing in the event
+encoding rather than the trace enable bits.
+
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sparc/kernel/perf_event.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/arch/sparc/kernel/perf_event.c
++++ b/arch/sparc/kernel/perf_event.c
+@@ -557,11 +557,13 @@ static u64 nop_for_index(int idx)
+
+ static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
+ {
+- u64 val, mask = mask_for_index(idx);
++ u64 enc, val, mask = mask_for_index(idx);
++
++ enc = perf_event_get_enc(cpuc->events[idx]);
+
+ val = cpuc->pcr;
+ val &= ~mask;
+- val |= hwc->config;
++ val |= event_encoding(enc, idx);
+ cpuc->pcr = val;
+
+ pcr_ops->write(cpuc->pcr);
--- /dev/null
+From 54e7541c2eea7beecca0a7743406f8da1d041550 Mon Sep 17 00:00:00 2001
+From: Al Viro <viro@zeniv.linux.org.uk>
+Date: Wed, 10 Oct 2012 17:25:00 -0700
+Subject: sparc64: fix ptrace interaction with force_successful_syscall_return()
+
+
+From: Al Viro <viro@zeniv.linux.org.uk>
+
+[ Upstream commit 55c2770e413e96871147b9406a9c41fe9bc5209c ]
+
+we want syscall_trace_leave() called on exit from any syscall;
+skipping its call in case we'd done force_successful_syscall_return()
+is broken...
+
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sparc/kernel/syscalls.S | 32 ++++++++++++++------------------
+ 1 file changed, 14 insertions(+), 18 deletions(-)
+
+--- a/arch/sparc/kernel/syscalls.S
++++ b/arch/sparc/kernel/syscalls.S
+@@ -212,24 +212,20 @@ linux_sparc_syscall:
+ 3: stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
+ ret_sys_call:
+ ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3
+- ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
+ sra %o0, 0, %o0
+ mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2
+ sllx %g2, 32, %g2
+
+- /* Check if force_successful_syscall_return()
+- * was invoked.
+- */
+- ldub [%g6 + TI_SYS_NOERROR], %l2
+- brnz,a,pn %l2, 80f
+- stb %g0, [%g6 + TI_SYS_NOERROR]
+-
+ cmp %o0, -ERESTART_RESTARTBLOCK
+ bgeu,pn %xcc, 1f
+- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
+-80:
++ andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
++ ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
++
++2:
++ stb %g0, [%g6 + TI_SYS_NOERROR]
+ /* System call success, clear Carry condition code. */
+ andn %g3, %g2, %g3
++3:
+ stx %g3, [%sp + PTREGS_OFF + PT_V9_TSTATE]
+ bne,pn %icc, linux_syscall_trace2
+ add %l1, 0x4, %l2 ! npc = npc+4
+@@ -238,20 +234,20 @@ ret_sys_call:
+ stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC]
+
+ 1:
++ /* Check if force_successful_syscall_return()
++ * was invoked.
++ */
++ ldub [%g6 + TI_SYS_NOERROR], %l2
++ brnz,pn %l2, 2b
++ ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
+ /* System call failure, set Carry condition code.
+ * Also, get abs(errno) to return to the process.
+ */
+- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
+ sub %g0, %o0, %o0
+- or %g3, %g2, %g3
+ stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
+- stx %g3, [%sp + PTREGS_OFF + PT_V9_TSTATE]
+- bne,pn %icc, linux_syscall_trace2
+- add %l1, 0x4, %l2 ! npc = npc+4
+- stx %l1, [%sp + PTREGS_OFF + PT_V9_TPC]
++ ba,pt %xcc, 3b
++ or %g3, %g2, %g3
+
+- b,pt %xcc, rtrap
+- stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC]
+ linux_syscall_trace2:
+ call syscall_trace_leave
+ add %sp, PTREGS_OFF, %o0
--- /dev/null
+From 15de396492e25989381e814ea61f699a8cce12db Mon Sep 17 00:00:00 2001
+From: "David S. Miller" <davem@davemloft.net>
+Date: Sun, 14 Oct 2012 17:59:40 -0700
+Subject: sparc64: Like x86 we should check current->mm during perf backtrace generation.
+
+
+From: "David S. Miller" <davem@davemloft.net>
+
+[ Upstream commit 08280e6c4c2e8049ac61d9e8e3536ec1df629c0d ]
+
+If the MM is not active, only report the top-level PC. Do not try to
+access the address space.
+
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sparc/kernel/perf_event.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/arch/sparc/kernel/perf_event.c
++++ b/arch/sparc/kernel/perf_event.c
+@@ -1428,8 +1428,6 @@ static void perf_callchain_user_64(struc
+ {
+ unsigned long ufp;
+
+- perf_callchain_store(entry, regs->tpc);
+-
+ ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
+ do {
+ struct sparc_stackf *usf, sf;
+@@ -1450,8 +1448,6 @@ static void perf_callchain_user_32(struc
+ {
+ unsigned long ufp;
+
+- perf_callchain_store(entry, regs->tpc);
+-
+ ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
+ do {
+ struct sparc_stackf32 *usf, sf;
+@@ -1470,6 +1466,11 @@ static void perf_callchain_user_32(struc
+ void
+ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
+ {
++ perf_callchain_store(entry, regs->tpc);
++
++ if (!current->mm)
++ return;
++
+ flushw_user();
+ if (test_thread_flag(TIF_32BIT))
+ perf_callchain_user_32(entry, regs);
--- /dev/null
+From a0331a06cc80b12ab98a4e66165622bb5ab537b0 Mon Sep 17 00:00:00 2001
+From: Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
+Date: Fri, 12 Oct 2012 04:34:17 +0000
+Subject: tcp: resets are misrouted
+
+
+From: Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
+
+[ Upstream commit 4c67525849e0b7f4bd4fab2487ec9e43ea52ef29 ]
+
+After commit e2446eaa ("tcp_v4_send_reset: binding oif to iif in no
+sock case").. tcp resets are always lost, when routing is asymmetric.
+Yes, backing out that patch will result in misrouting of resets for
+dead connections which used interface binding when were alive, but we
+actually cannot do anything here. What's died that's died and correct
+handling normal unbound connections is obviously a priority.
+
+Comment to comment:
+> This has few benefits:
+> 1. tcp_v6_send_reset already did that.
+
+It was done to route resets for IPv6 link local addresses. It was a
+mistake to do so for global addresses. The patch fixes this as well.
+
+Actually, the problem appears to be even more serious than guaranteed
+loss of resets. As reported by Sergey Soloviev <sol@eqv.ru>, those
+misrouted resets create a lot of arp traffic and huge amount of
+unresolved arp entires putting down to knees NAT firewalls which use
+asymmetric routing.
+
+Signed-off-by: Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_ipv4.c | 7 ++++---
+ net/ipv6/tcp_ipv6.c | 3 ++-
+ 2 files changed, 6 insertions(+), 4 deletions(-)
+
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -678,10 +678,11 @@ static void tcp_v4_send_reset(struct soc
+ arg.csumoffset = offsetof(struct tcphdr, check) / 2;
+ arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
+ /* When socket is gone, all binding information is lost.
+- * routing might fail in this case. using iif for oif to
+- * make sure we can deliver it
++ * routing might fail in this case. No choice here, if we choose to force
++ * input interface, we will misroute in case of asymmetric route.
+ */
+- arg.bound_dev_if = sk ? sk->sk_bound_dev_if : inet_iif(skb);
++ if (sk)
++ arg.bound_dev_if = sk->sk_bound_dev_if;
+
+ net = dev_net(skb_dst(skb)->dev);
+ arg.tos = ip_hdr(skb)->tos;
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -896,7 +896,8 @@ static void tcp_v6_send_response(struct
+ __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
+
+ fl6.flowi6_proto = IPPROTO_TCP;
+- fl6.flowi6_oif = inet6_iif(skb);
++ if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
++ fl6.flowi6_oif = inet6_iif(skb);
+ fl6.fl6_dport = t1->dest;
+ fl6.fl6_sport = t1->source;
+ security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
--- /dev/null
+From 041d81f493d90c940ec41f0ec98bc7c4f2fba431 Mon Sep 17 00:00:00 2001
+From: Felipe Balbi <balbi@ti.com>
+Date: Thu, 4 Oct 2012 11:58:00 +0300
+Subject: usb: dwc3: gadget: fix 'endpoint always busy' bug
+
+From: Felipe Balbi <balbi@ti.com>
+
+commit 041d81f493d90c940ec41f0ec98bc7c4f2fba431 upstream.
+
+If a USB transfer has already been started, meaning
+we have already issued StartTransfer command to that
+particular endpoint, DWC3_EP_BUSY flag has also
+already been set.
+
+When we try to cancel this transfer which is already
+in controller's cache, we will not receive XferComplete
+event and we must clear DWC3_EP_BUSY in order to allow
+subsequent requests to be properly started.
+
+The best place to clear that flag is right after issuing
+DWC3_DEPCMD_ENDTRANSFER.
+
+Reported-by: Moiz Sonasath <m-sonasath@ti.com>
+Signed-off-by: Felipe Balbi <balbi@ti.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ drivers/usb/dwc3/gadget.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -1777,6 +1777,7 @@ static void dwc3_stop_active_transfer(st
+ ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms);
+ WARN_ON_ONCE(ret);
+ dep->res_trans_idx = 0;
++ dep->flags &= ~DWC3_EP_BUSY;
+ }
+ }
+
--- /dev/null
+From 03022fa480f1fa6871c389ae5e238596ae4cb46c Mon Sep 17 00:00:00 2001
+From: Florian Zumbiehl <florz@florz.de>
+Date: Sun, 7 Oct 2012 15:51:58 +0000
+Subject: vlan: don't deliver frames for unknown vlans to protocols
+
+
+From: Florian Zumbiehl <florz@florz.de>
+
+[ Upstream commit 48cc32d38a52d0b68f91a171a8d00531edc6a46e ]
+
+6a32e4f9dd9219261f8856f817e6655114cfec2f made the vlan code skip marking
+vlan-tagged frames for not locally configured vlans as PACKET_OTHERHOST if
+there was an rx_handler, as the rx_handler could cause the frame to be received
+on a different (virtual) vlan-capable interface where that vlan might be
+configured.
+
+As rx_handlers do not necessarily return RX_HANDLER_ANOTHER, this could cause
+frames for unknown vlans to be delivered to the protocol stack as if they had
+been received untagged.
+
+For example, if an ipv6 router advertisement that's tagged for a locally not
+configured vlan is received on an interface with macvlan interfaces attached,
+macvlan's rx_handler returns RX_HANDLER_PASS after delivering the frame to the
+macvlan interfaces, which caused it to be passed to the protocol stack, leading
+to ipv6 addresses for the announced prefix being configured even though those
+are completely unusable on the underlying interface.
+
+The fix moves marking as PACKET_OTHERHOST after the rx_handler so the
+rx_handler, if there is one, sees the frame unchanged, but afterwards,
+before the frame is delivered to the protocol stack, it gets marked whether
+there is an rx_handler or not.
+
+Signed-off-by: Florian Zumbiehl <florz@florz.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/if_vlan.h | 8 ++++----
+ net/8021q/vlan_core.c | 10 ++--------
+ net/core/dev.c | 7 +++++--
+ 3 files changed, 11 insertions(+), 14 deletions(-)
+
+--- a/include/linux/if_vlan.h
++++ b/include/linux/if_vlan.h
+@@ -82,6 +82,8 @@ static inline int is_vlan_dev(struct net
+ }
+
+ #define vlan_tx_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT)
++#define vlan_tx_nonzero_tag_present(__skb) \
++ (vlan_tx_tag_present(__skb) && ((__skb)->vlan_tci & VLAN_VID_MASK))
+ #define vlan_tx_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT)
+
+ #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+@@ -91,7 +93,7 @@ extern struct net_device *__vlan_find_de
+ extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
+ extern u16 vlan_dev_vlan_id(const struct net_device *dev);
+
+-extern bool vlan_do_receive(struct sk_buff **skb, bool last_handler);
++extern bool vlan_do_receive(struct sk_buff **skb);
+ extern struct sk_buff *vlan_untag(struct sk_buff *skb);
+
+ extern int vlan_vid_add(struct net_device *dev, unsigned short vid);
+@@ -120,10 +122,8 @@ static inline u16 vlan_dev_vlan_id(const
+ return 0;
+ }
+
+-static inline bool vlan_do_receive(struct sk_buff **skb, bool last_handler)
++static inline bool vlan_do_receive(struct sk_buff **skb)
+ {
+- if (((*skb)->vlan_tci & VLAN_VID_MASK) && last_handler)
+- (*skb)->pkt_type = PACKET_OTHERHOST;
+ return false;
+ }
+
+--- a/net/8021q/vlan_core.c
++++ b/net/8021q/vlan_core.c
+@@ -5,7 +5,7 @@
+ #include <linux/export.h>
+ #include "vlan.h"
+
+-bool vlan_do_receive(struct sk_buff **skbp, bool last_handler)
++bool vlan_do_receive(struct sk_buff **skbp)
+ {
+ struct sk_buff *skb = *skbp;
+ u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK;
+@@ -13,14 +13,8 @@ bool vlan_do_receive(struct sk_buff **sk
+ struct vlan_pcpu_stats *rx_stats;
+
+ vlan_dev = vlan_find_dev(skb->dev, vlan_id);
+- if (!vlan_dev) {
+- /* Only the last call to vlan_do_receive() should change
+- * pkt_type to PACKET_OTHERHOST
+- */
+- if (vlan_id && last_handler)
+- skb->pkt_type = PACKET_OTHERHOST;
++ if (!vlan_dev)
+ return false;
+- }
+
+ skb = *skbp = skb_share_check(skb, GFP_ATOMIC);
+ if (unlikely(!skb))
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3209,18 +3209,18 @@ another_round:
+ ncls:
+ #endif
+
+- rx_handler = rcu_dereference(skb->dev->rx_handler);
+ if (vlan_tx_tag_present(skb)) {
+ if (pt_prev) {
+ ret = deliver_skb(skb, pt_prev, orig_dev);
+ pt_prev = NULL;
+ }
+- if (vlan_do_receive(&skb, !rx_handler))
++ if (vlan_do_receive(&skb))
+ goto another_round;
+ else if (unlikely(!skb))
+ goto out;
+ }
+
++ rx_handler = rcu_dereference(skb->dev->rx_handler);
+ if (rx_handler) {
+ if (pt_prev) {
+ ret = deliver_skb(skb, pt_prev, orig_dev);
+@@ -3240,6 +3240,9 @@ ncls:
+ }
+ }
+
++ if (vlan_tx_nonzero_tag_present(skb))
++ skb->pkt_type = PACKET_OTHERHOST;
++
+ /* deliver only exact match when indicated */
+ null_or_dev = deliver_exact ? skb->dev : NULL;
+