--- /dev/null
+From 86f40622af7329375e38f282f6c0aab95f3e5f72 Mon Sep 17 00:00:00 2001
+From: Jianguo Wu <wujianguo@huawei.com>
+Date: Thu, 24 Apr 2014 03:45:56 +0100
+Subject: ARM: 8037/1: mm: support big-endian page tables
+
+From: Jianguo Wu <wujianguo@huawei.com>
+
+commit 86f40622af7329375e38f282f6c0aab95f3e5f72 upstream.
+
+When enable LPAE and big-endian in a hisilicon board, while specify
+mem=384M mem=512M@7680M, will get bad page state:
+
+Freeing unused kernel memory: 180K (c0466000 - c0493000)
+BUG: Bad page state in process init pfn:fa442
+page:c7749840 count:0 mapcount:-1 mapping: (null) index:0x0
+page flags: 0x40000400(reserved)
+Modules linked in:
+CPU: 0 PID: 1 Comm: init Not tainted 3.10.27+ #66
+[<c000f5f0>] (unwind_backtrace+0x0/0x11c) from [<c000cbc4>] (show_stack+0x10/0x14)
+[<c000cbc4>] (show_stack+0x10/0x14) from [<c009e448>] (bad_page+0xd4/0x104)
+[<c009e448>] (bad_page+0xd4/0x104) from [<c009e520>] (free_pages_prepare+0xa8/0x14c)
+[<c009e520>] (free_pages_prepare+0xa8/0x14c) from [<c009f8ec>] (free_hot_cold_page+0x18/0xf0)
+[<c009f8ec>] (free_hot_cold_page+0x18/0xf0) from [<c00b5444>] (handle_pte_fault+0xcf4/0xdc8)
+[<c00b5444>] (handle_pte_fault+0xcf4/0xdc8) from [<c00b6458>] (handle_mm_fault+0xf4/0x120)
+[<c00b6458>] (handle_mm_fault+0xf4/0x120) from [<c0013754>] (do_page_fault+0xfc/0x354)
+[<c0013754>] (do_page_fault+0xfc/0x354) from [<c0008400>] (do_DataAbort+0x2c/0x90)
+[<c0008400>] (do_DataAbort+0x2c/0x90) from [<c0008fb4>] (__dabt_usr+0x34/0x40)
+
+The bad pfn:fa442 is not system memory(mem=384M mem=512M@7680M), after debugging,
+I find in page fault handler, will get wrong pfn from pte just after set pte,
+as follow:
+do_anonymous_page()
+{
+ ...
+ set_pte_at(mm, address, page_table, entry);
+
+ //debug code
+ pfn = pte_pfn(entry);
+ pr_info("pfn:0x%lx, pte:0x%llxn", pfn, pte_val(entry));
+
+ //read out the pte just set
+ new_pte = pte_offset_map(pmd, address);
+ new_pfn = pte_pfn(*new_pte);
+ pr_info("new pfn:0x%lx, new pte:0x%llxn", pfn, pte_val(entry));
+ ...
+}
+
+pfn: 0x1fa4f5, pte:0xc00001fa4f575f
+new_pfn:0xfa4f5, new_pte:0xc00000fa4f5f5f //new pfn/pte is wrong.
+
+The bug is happened in cpu_v7_set_pte_ext(ptep, pte):
+An LPAE PTE is a 64bit quantity, passed to cpu_v7_set_pte_ext in the r2 and r3 registers.
+On an LE kernel, r2 contains the LSB of the PTE, and r3 the MSB.
+On a BE kernel, the assignment is reversed.
+
+Unfortunately, the current code always assumes the LE case,
+leading to corruption of the PTE when clearing/setting bits.
+
+This patch fixes this issue much like it has been done already in the
+cpu_v7_switch_mm case.
+
+Signed-off-by: Jianguo Wu <wujianguo@huawei.com>
+Acked-by: Marc Zyngier <marc.zyngier@arm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/mm/proc-v7-3level.S | 18 +++++++++++++-----
+ 1 file changed, 13 insertions(+), 5 deletions(-)
+
+--- a/arch/arm/mm/proc-v7-3level.S
++++ b/arch/arm/mm/proc-v7-3level.S
+@@ -64,6 +64,14 @@ ENTRY(cpu_v7_switch_mm)
+ mov pc, lr
+ ENDPROC(cpu_v7_switch_mm)
+
++#ifdef __ARMEB__
++#define rl r3
++#define rh r2
++#else
++#define rl r2
++#define rh r3
++#endif
++
+ /*
+ * cpu_v7_set_pte_ext(ptep, pte)
+ *
+@@ -73,13 +81,13 @@ ENDPROC(cpu_v7_switch_mm)
+ */
+ ENTRY(cpu_v7_set_pte_ext)
+ #ifdef CONFIG_MMU
+- tst r2, #L_PTE_VALID
++ tst rl, #L_PTE_VALID
+ beq 1f
+- tst r3, #1 << (57 - 32) @ L_PTE_NONE
+- bicne r2, #L_PTE_VALID
++ tst rh, #1 << (57 - 32) @ L_PTE_NONE
++ bicne rl, #L_PTE_VALID
+ bne 1f
+- tst r3, #1 << (55 - 32) @ L_PTE_DIRTY
+- orreq r2, #L_PTE_RDONLY
++ tst rh, #1 << (55 - 32) @ L_PTE_DIRTY
++ orreq rl, #L_PTE_RDONLY
+ 1: strd r2, r3, [r0]
+ ALT_SMP(W(nop))
+ ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
--- /dev/null
+From 3683f44c42e991d313dc301504ee0fca1aeb8580 Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@arm.linux.org.uk>
+Date: Sat, 3 May 2014 11:03:28 +0100
+Subject: ARM: stacktrace: avoid listing stacktrace functions in stacktrace
+
+From: Russell King <rmk+kernel@arm.linux.org.uk>
+
+commit 3683f44c42e991d313dc301504ee0fca1aeb8580 upstream.
+
+While debugging the FEC ethernet driver using stacktrace, it was noticed
+that the stacktraces always begin as follows:
+
+ [<c00117b4>] save_stack_trace_tsk+0x0/0x98
+ [<c0011870>] save_stack_trace+0x24/0x28
+ ...
+
+This is because the stack trace code includes the stack frames for itself.
+This is incorrect behaviour, and also leads to "skip" doing the wrong
+thing (which is the number of stack frames to avoid recording.)
+
+Perversely, it does the right thing when passed a non-current thread. Fix
+this by ensuring that we have a known constant number of frames above the
+main stack trace function, and always skip these.
+
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kernel/stacktrace.c | 18 +++++++++++++-----
+ 1 file changed, 13 insertions(+), 5 deletions(-)
+
+--- a/arch/arm/kernel/stacktrace.c
++++ b/arch/arm/kernel/stacktrace.c
+@@ -83,13 +83,16 @@ static int save_trace(struct stackframe
+ return trace->nr_entries >= trace->max_entries;
+ }
+
+-void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
++/* This must be noinline to so that our skip calculation works correctly */
++static noinline void __save_stack_trace(struct task_struct *tsk,
++ struct stack_trace *trace, unsigned int nosched)
+ {
+ struct stack_trace_data data;
+ struct stackframe frame;
+
+ data.trace = trace;
+ data.skip = trace->skip;
++ data.no_sched_functions = nosched;
+
+ if (tsk != current) {
+ #ifdef CONFIG_SMP
+@@ -102,7 +105,6 @@ void save_stack_trace_tsk(struct task_st
+ trace->entries[trace->nr_entries++] = ULONG_MAX;
+ return;
+ #else
+- data.no_sched_functions = 1;
+ frame.fp = thread_saved_fp(tsk);
+ frame.sp = thread_saved_sp(tsk);
+ frame.lr = 0; /* recovered from the stack */
+@@ -111,11 +113,12 @@ void save_stack_trace_tsk(struct task_st
+ } else {
+ register unsigned long current_sp asm ("sp");
+
+- data.no_sched_functions = 0;
++ /* We don't want this function nor the caller */
++ data.skip += 2;
+ frame.fp = (unsigned long)__builtin_frame_address(0);
+ frame.sp = current_sp;
+ frame.lr = (unsigned long)__builtin_return_address(0);
+- frame.pc = (unsigned long)save_stack_trace_tsk;
++ frame.pc = (unsigned long)__save_stack_trace;
+ }
+
+ walk_stackframe(&frame, save_trace, &data);
+@@ -123,9 +126,14 @@ void save_stack_trace_tsk(struct task_st
+ trace->entries[trace->nr_entries++] = ULONG_MAX;
+ }
+
++void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
++{
++ __save_stack_trace(tsk, trace, 1);
++}
++
+ void save_stack_trace(struct stack_trace *trace)
+ {
+- save_stack_trace_tsk(current, trace);
++ __save_stack_trace(current, trace, 0);
+ }
+ EXPORT_SYMBOL_GPL(save_stack_trace);
+ #endif
--- /dev/null
+From 62bbd5b35994eaf30519f126765d7f6af9cd3526 Mon Sep 17 00:00:00 2001
+From: Jukka Rissanen <jukka.rissanen@linux.intel.com>
+Date: Tue, 27 May 2014 11:33:22 +0300
+Subject: Bluetooth: 6LoWPAN: Fix MAC address universal/local bit handling
+
+From: Jukka Rissanen <jukka.rissanen@linux.intel.com>
+
+commit 62bbd5b35994eaf30519f126765d7f6af9cd3526 upstream.
+
+The universal/local bit handling was incorrectly done in the code.
+
+So when setting EUI address from BD address we do this:
+- If BD address type is PUBLIC, then we clear the universal bit
+ in EUI address. If the address type is RANDOM, then the universal
+ bit is set (BT 6lowpan draft chapter 3.2.2)
+- After this we invert the universal/local bit according to RFC 2464
+
+When figuring out BD address we do the reverse:
+- Take EUI address from stateless IPv6 address, invert the
+ universal/local bit according to RFC 2464
+- If universal bit is 1 in this modified EUI address, then address
+ type is set to RANDOM, otherwise it is PUBLIC
+
+Note that 6lowpan_iphc.[ch] does the final toggling of U/L bit
+before sending or receiving the network packet.
+
+Signed-off-by: Jukka Rissanen <jukka.rissanen@linux.intel.com>
+Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/bluetooth/6lowpan.c | 65 +++++++++++++++++++++++++-----------------------
+ 1 file changed, 35 insertions(+), 30 deletions(-)
+
+--- a/net/bluetooth/6lowpan.c
++++ b/net/bluetooth/6lowpan.c
+@@ -420,12 +420,18 @@ static int conn_send(struct l2cap_conn *
+ return 0;
+ }
+
+-static void get_dest_bdaddr(struct in6_addr *ip6_daddr,
+- bdaddr_t *addr, u8 *addr_type)
++static u8 get_addr_type_from_eui64(u8 byte)
+ {
+- u8 *eui64;
++ /* Is universal(0) or local(1) bit, */
++ if (byte & 0x02)
++ return ADDR_LE_DEV_RANDOM;
+
+- eui64 = ip6_daddr->s6_addr + 8;
++ return ADDR_LE_DEV_PUBLIC;
++}
++
++static void copy_to_bdaddr(struct in6_addr *ip6_daddr, bdaddr_t *addr)
++{
++ u8 *eui64 = ip6_daddr->s6_addr + 8;
+
+ addr->b[0] = eui64[7];
+ addr->b[1] = eui64[6];
+@@ -433,16 +439,19 @@ static void get_dest_bdaddr(struct in6_a
+ addr->b[3] = eui64[2];
+ addr->b[4] = eui64[1];
+ addr->b[5] = eui64[0];
++}
++
++static void convert_dest_bdaddr(struct in6_addr *ip6_daddr,
++ bdaddr_t *addr, u8 *addr_type)
++{
++ copy_to_bdaddr(ip6_daddr, addr);
+
+- addr->b[5] ^= 2;
++ /* We need to toggle the U/L bit that we got from IPv6 address
++ * so that we get the proper address and type of the BD address.
++ */
++ addr->b[5] ^= 0x02;
+
+- /* Set universal/local bit to 0 */
+- if (addr->b[5] & 1) {
+- addr->b[5] &= ~1;
+- *addr_type = ADDR_LE_DEV_PUBLIC;
+- } else {
+- *addr_type = ADDR_LE_DEV_RANDOM;
+- }
++ *addr_type = get_addr_type_from_eui64(addr->b[5]);
+ }
+
+ static int header_create(struct sk_buff *skb, struct net_device *netdev,
+@@ -473,9 +482,11 @@ static int header_create(struct sk_buff
+ /* Get destination BT device from skb.
+ * If there is no such peer then discard the packet.
+ */
+- get_dest_bdaddr(&hdr->daddr, &addr, &addr_type);
++ convert_dest_bdaddr(&hdr->daddr, &addr, &addr_type);
+
+- BT_DBG("dest addr %pMR type %d", &addr, addr_type);
++ BT_DBG("dest addr %pMR type %s IP %pI6c", &addr,
++ addr_type == ADDR_LE_DEV_PUBLIC ? "PUBLIC" : "RANDOM",
++ &hdr->daddr);
+
+ read_lock_irqsave(&devices_lock, flags);
+ peer = peer_lookup_ba(dev, &addr, addr_type);
+@@ -556,7 +567,7 @@ static netdev_tx_t bt_xmit(struct sk_buf
+ } else {
+ unsigned long flags;
+
+- get_dest_bdaddr(&lowpan_cb(skb)->addr, &addr, &addr_type);
++ convert_dest_bdaddr(&lowpan_cb(skb)->addr, &addr, &addr_type);
+ eui64_addr = lowpan_cb(skb)->addr.s6_addr + 8;
+ dev = lowpan_dev(netdev);
+
+@@ -564,8 +575,10 @@ static netdev_tx_t bt_xmit(struct sk_buf
+ peer = peer_lookup_ba(dev, &addr, addr_type);
+ read_unlock_irqrestore(&devices_lock, flags);
+
+- BT_DBG("xmit from %s to %pMR (%pI6c) peer %p", netdev->name,
+- &addr, &lowpan_cb(skb)->addr, peer);
++ BT_DBG("xmit %s to %pMR type %s IP %pI6c peer %p",
++ netdev->name, &addr,
++ addr_type == ADDR_LE_DEV_PUBLIC ? "PUBLIC" : "RANDOM",
++ &lowpan_cb(skb)->addr, peer);
+
+ if (peer && peer->conn)
+ err = send_pkt(peer->conn, netdev->dev_addr,
+@@ -620,13 +633,13 @@ static void set_addr(u8 *eui, u8 *addr,
+ eui[6] = addr[1];
+ eui[7] = addr[0];
+
+- eui[0] ^= 2;
+-
+- /* Universal/local bit set, RFC 4291 */
++ /* Universal/local bit set, BT 6lowpan draft ch. 3.2.1 */
+ if (addr_type == ADDR_LE_DEV_PUBLIC)
+- eui[0] |= 1;
++ eui[0] &= ~0x02;
+ else
+- eui[0] &= ~1;
++ eui[0] |= 0x02;
++
++ BT_DBG("type %d addr %*phC", addr_type, 8, eui);
+ }
+
+ static void set_dev_addr(struct net_device *netdev, bdaddr_t *addr,
+@@ -634,7 +647,6 @@ static void set_dev_addr(struct net_devi
+ {
+ netdev->addr_assign_type = NET_ADDR_PERM;
+ set_addr(netdev->dev_addr, addr->b, addr_type);
+- netdev->dev_addr[0] ^= 2;
+ }
+
+ static void ifup(struct net_device *netdev)
+@@ -684,13 +696,6 @@ static int add_peer_conn(struct l2cap_co
+
+ memcpy(&peer->eui64_addr, (u8 *)&peer->peer_addr.s6_addr + 8,
+ EUI64_ADDR_LEN);
+- peer->eui64_addr[0] ^= 2; /* second bit-flip (Universe/Local)
+- * is done according RFC2464
+- */
+-
+- raw_dump_inline(__func__, "peer IPv6 address",
+- (unsigned char *)&peer->peer_addr, 16);
+- raw_dump_inline(__func__, "peer EUI64 address", peer->eui64_addr, 8);
+
+ write_lock_irqsave(&devices_lock, flags);
+ INIT_LIST_HEAD(&peer->list);
--- /dev/null
+From 8a96f3cd22878fc0bb564a8478a6e17c0b8dca73 Mon Sep 17 00:00:00 2001
+From: Jukka Taimisto <jtt@codenomicon.com>
+Date: Thu, 22 May 2014 10:02:39 +0000
+Subject: Bluetooth: Fix L2CAP deadlock
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jukka Taimisto <jtt@codenomicon.com>
+
+commit 8a96f3cd22878fc0bb564a8478a6e17c0b8dca73 upstream.
+
+-[0x01 Introduction
+
+We have found a programming error causing a deadlock in Bluetooth subsystem
+of Linux kernel. The problem is caused by missing release_sock() call when
+L2CAP connection creation fails due full accept queue.
+
+The issue can be reproduced with 3.15-rc5 kernel and is also present in
+earlier kernels.
+
+-[0x02 Details
+
+The problem occurs when multiple L2CAP connections are created to a PSM which
+contains listening socket (like SDP) and left pending, for example,
+configuration (the underlying ACL link is not disconnected between
+connections).
+
+When L2CAP connection request is received and listening socket is found the
+l2cap_sock_new_connection_cb() function (net/bluetooth/l2cap_sock.c) is called.
+This function locks the 'parent' socket and then checks if the accept queue
+is full.
+
+1178 lock_sock(parent);
+1179
+1180 /* Check for backlog size */
+1181 if (sk_acceptq_is_full(parent)) {
+1182 BT_DBG("backlog full %d", parent->sk_ack_backlog);
+1183 return NULL;
+1184 }
+
+If case the accept queue is full NULL is returned, but the 'parent' socket
+is not released. Thus when next L2CAP connection request is received the code
+blocks on lock_sock() since the parent is still locked.
+
+Also note that for connections already established and waiting for
+configuration to complete a timeout will occur and l2cap_chan_timeout()
+(net/bluetooth/l2cap_core.c) will be called. All threads calling this
+function will also be blocked waiting for the channel mutex since the thread
+which is waiting on lock_sock() alread holds the channel mutex.
+
+We were able to reproduce this by sending continuously L2CAP connection
+request followed by disconnection request containing invalid CID. This left
+the created connections pending configuration.
+
+After the deadlock occurs it is impossible to kill bluetoothd, btmon will not
+get any more data etc. requiring reboot to recover.
+
+-[0x03 Fix
+
+Releasing the 'parent' socket when l2cap_sock_new_connection_cb() returns NULL
+seems to fix the issue.
+
+Signed-off-by: Jukka Taimisto <jtt@codenomicon.com>
+Reported-by: Tommi Mäkilä <tmakila@codenomicon.com>
+Signed-off-by: Johan Hedberg <johan.hedberg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/bluetooth/l2cap_sock.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -1181,13 +1181,16 @@ static struct l2cap_chan *l2cap_sock_new
+ /* Check for backlog size */
+ if (sk_acceptq_is_full(parent)) {
+ BT_DBG("backlog full %d", parent->sk_ack_backlog);
++ release_sock(parent);
+ return NULL;
+ }
+
+ sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP,
+ GFP_ATOMIC);
+- if (!sk)
++ if (!sk) {
++ release_sock(parent);
+ return NULL;
++ }
+
+ bt_sock_reclassify_lock(sk, BTPROTO_L2CAP);
+
--- /dev/null
+From da64c27d3c93ee9f89956b9de86c4127eb244494 Mon Sep 17 00:00:00 2001
+From: Felipe Balbi <balbi@ti.com>
+Date: Wed, 23 Apr 2014 09:58:26 -0500
+Subject: bluetooth: hci_ldisc: fix deadlock condition
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Felipe Balbi <balbi@ti.com>
+
+commit da64c27d3c93ee9f89956b9de86c4127eb244494 upstream.
+
+LDISCs shouldn't call tty->ops->write() from within
+->write_wakeup().
+
+->write_wakeup() is called with port lock taken and
+IRQs disabled, tty->ops->write() will try to acquire
+the same port lock and we will deadlock.
+
+Acked-by: Marcel Holtmann <marcel@holtmann.org>
+Reviewed-by: Peter Hurley <peter@hurleysoftware.com>
+Reported-by: Huang Shijie <b32955@freescale.com>
+Signed-off-by: Felipe Balbi <balbi@ti.com>
+Tested-by: Andreas Bießmann <andreas@biessmann.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/bluetooth/hci_ldisc.c | 24 +++++++++++++++++++-----
+ drivers/bluetooth/hci_uart.h | 1 +
+ 2 files changed, 20 insertions(+), 5 deletions(-)
+
+--- a/drivers/bluetooth/hci_ldisc.c
++++ b/drivers/bluetooth/hci_ldisc.c
+@@ -118,10 +118,6 @@ static inline struct sk_buff *hci_uart_d
+
+ int hci_uart_tx_wakeup(struct hci_uart *hu)
+ {
+- struct tty_struct *tty = hu->tty;
+- struct hci_dev *hdev = hu->hdev;
+- struct sk_buff *skb;
+-
+ if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state)) {
+ set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
+ return 0;
+@@ -129,6 +125,22 @@ int hci_uart_tx_wakeup(struct hci_uart *
+
+ BT_DBG("");
+
++ schedule_work(&hu->write_work);
++
++ return 0;
++}
++
++static void hci_uart_write_work(struct work_struct *work)
++{
++ struct hci_uart *hu = container_of(work, struct hci_uart, write_work);
++ struct tty_struct *tty = hu->tty;
++ struct hci_dev *hdev = hu->hdev;
++ struct sk_buff *skb;
++
++ /* REVISIT: should we cope with bad skbs or ->write() returning
++ * and error value ?
++ */
++
+ restart:
+ clear_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
+
+@@ -153,7 +165,6 @@ restart:
+ goto restart;
+
+ clear_bit(HCI_UART_SENDING, &hu->tx_state);
+- return 0;
+ }
+
+ static void hci_uart_init_work(struct work_struct *work)
+@@ -281,6 +292,7 @@ static int hci_uart_tty_open(struct tty_
+ tty->receive_room = 65536;
+
+ INIT_WORK(&hu->init_ready, hci_uart_init_work);
++ INIT_WORK(&hu->write_work, hci_uart_write_work);
+
+ spin_lock_init(&hu->rx_lock);
+
+@@ -318,6 +330,8 @@ static void hci_uart_tty_close(struct tt
+ if (hdev)
+ hci_uart_close(hdev);
+
++ cancel_work_sync(&hu->write_work);
++
+ if (test_and_clear_bit(HCI_UART_PROTO_SET, &hu->flags)) {
+ if (hdev) {
+ if (test_bit(HCI_UART_REGISTERED, &hu->flags))
+--- a/drivers/bluetooth/hci_uart.h
++++ b/drivers/bluetooth/hci_uart.h
+@@ -68,6 +68,7 @@ struct hci_uart {
+ unsigned long hdev_flags;
+
+ struct work_struct init_ready;
++ struct work_struct write_work;
+
+ struct hci_uart_proto *proto;
+ void *priv;
--- /dev/null
+From 5d60122b7e30f275593df93b39a76d3c2663cfc2 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Pali=20Roh=C3=A1r?= <pali.rohar@gmail.com>
+Date: Tue, 22 Apr 2014 12:02:39 -0300
+Subject: media: radio-bcm2048: fix wrong overflow check
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: =?UTF-8?q?Pali=20Roh=C3=A1r?= <pali.rohar@gmail.com>
+
+commit 5d60122b7e30f275593df93b39a76d3c2663cfc2 upstream.
+
+This patch fixes an off by one check in bcm2048_set_region().
+
+Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Pali Rohár <pali.rohar@gmail.com>
+Signed-off-by: Pavel Machek <pavel@ucw.cz>
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Hans Verkuil <hans.verkuil@cisco.com>
+Signed-off-by: Mauro Carvalho Chehab <m.chehab@samsung.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/media/bcm2048/radio-bcm2048.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/staging/media/bcm2048/radio-bcm2048.c
++++ b/drivers/staging/media/bcm2048/radio-bcm2048.c
+@@ -737,7 +737,7 @@ static int bcm2048_set_region(struct bcm
+ int err;
+ u32 new_frequency = 0;
+
+- if (region > ARRAY_SIZE(region_configs))
++ if (region >= ARRAY_SIZE(region_configs))
+ return -EINVAL;
+
+ mutex_lock(&bdev->mutex);
--- /dev/null
+From 17e7f1b515803e1a79b246688aacbddd2e34165d Mon Sep 17 00:00:00 2001
+From: Hans Verkuil <hans.verkuil@cisco.com>
+Date: Thu, 17 Apr 2014 07:24:31 -0300
+Subject: media: saa7134: fix regression with tvtime
+
+From: Hans Verkuil <hans.verkuil@cisco.com>
+
+commit 17e7f1b515803e1a79b246688aacbddd2e34165d upstream.
+
+This solves this bug:
+
+https://bugzilla.kernel.org/show_bug.cgi?id=73361
+
+The problem is that when you quit tvtime it calls STREAMOFF, but then it queues a
+bunch of buffers for no good reason before closing the file descriptor.
+
+In the past closing the fd would free the vb queue since that was part of the file
+handle struct. Since that was moved to the global struct that no longer happened.
+
+This wouldn't be a problem, but the extra QBUF calls that tvtime does meant that
+the buffer list in videobuf (q->stream) contained buffers, so REQBUFS would fail
+with -EBUSY.
+
+The solution is to init the list head explicitly when releasing the file
+descriptor and to not free the video resource when calling streamoff.
+
+The real fix will hopefully go into kernel 3.16 when the vb2 conversion is
+merged. Basically the saa7134 driver with the old videobuf is so full of holes it
+ain't funny anymore, so consider this a band-aid for kernels 3.14 and 15.
+
+Signed-off-by: Hans Verkuil <hans.verkuil@cisco.com>
+Signed-off-by: Mauro Carvalho Chehab <m.chehab@samsung.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/media/pci/saa7134/saa7134-video.c | 9 +++------
+ 1 file changed, 3 insertions(+), 6 deletions(-)
+
+--- a/drivers/media/pci/saa7134/saa7134-video.c
++++ b/drivers/media/pci/saa7134/saa7134-video.c
+@@ -1243,6 +1243,7 @@ static int video_release(struct file *fi
+ videobuf_streamoff(&dev->cap);
+ res_free(dev, fh, RESOURCE_VIDEO);
+ videobuf_mmap_free(&dev->cap);
++ INIT_LIST_HEAD(&dev->cap.stream);
+ }
+ if (dev->cap.read_buf) {
+ buffer_release(&dev->cap, dev->cap.read_buf);
+@@ -1254,6 +1255,7 @@ static int video_release(struct file *fi
+ videobuf_stop(&dev->vbi);
+ res_free(dev, fh, RESOURCE_VBI);
+ videobuf_mmap_free(&dev->vbi);
++ INIT_LIST_HEAD(&dev->vbi.stream);
+ }
+
+ /* ts-capture will not work in planar mode, so turn it off Hac: 04.05*/
+@@ -1987,17 +1989,12 @@ int saa7134_streamoff(struct file *file,
+ enum v4l2_buf_type type)
+ {
+ struct saa7134_dev *dev = video_drvdata(file);
+- int err;
+ int res = saa7134_resource(file);
+
+ if (res != RESOURCE_EMPRESS)
+ pm_qos_remove_request(&dev->qos_request);
+
+- err = videobuf_streamoff(saa7134_queue(file));
+- if (err < 0)
+- return err;
+- res_free(dev, priv, res);
+- return 0;
++ return videobuf_streamoff(saa7134_queue(file));
+ }
+ EXPORT_SYMBOL_GPL(saa7134_streamoff);
+
--- /dev/null
+From 3b35fc81e7ec552147a4fd843d0da0bbbe4ef253 Mon Sep 17 00:00:00 2001
+From: Olivier Langlois <olivier@trillion01.com>
+Date: Fri, 28 Mar 2014 02:42:38 -0300
+Subject: media: uvcvideo: Fix clock param realtime setting
+
+From: Olivier Langlois <olivier@trillion01.com>
+
+commit 3b35fc81e7ec552147a4fd843d0da0bbbe4ef253 upstream.
+
+timestamps in v4l2 buffers returned to userspace are updated in
+uvc_video_clock_update() which uses timestamps fetched from
+uvc_video_clock_decode() by calling unconditionally ktime_get_ts().
+
+Hence setting the module clock param to realtime has no effect before
+this patch.
+
+This has been tested with ffmpeg:
+
+ffmpeg -y -f v4l2 -input_format yuyv422 -video_size 640x480 -framerate 30 -i /dev/video0 \
+ -f alsa -acodec pcm_s16le -ar 16000 -ac 1 -i default \
+ -c:v libx264 -preset ultrafast \
+ -c:a libfdk_aac \
+ out.mkv
+
+and inspecting the v4l2 input starting timestamp.
+
+Signed-off-by: Olivier Langlois <olivier@trillion01.com>
+Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Signed-off-by: Mauro Carvalho Chehab <m.chehab@samsung.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/media/usb/uvc/uvc_video.c | 15 ++++++++++-----
+ 1 file changed, 10 insertions(+), 5 deletions(-)
+
+--- a/drivers/media/usb/uvc/uvc_video.c
++++ b/drivers/media/usb/uvc/uvc_video.c
+@@ -361,6 +361,14 @@ static int uvc_commit_video(struct uvc_s
+ * Clocks and timestamps
+ */
+
++static inline void uvc_video_get_ts(struct timespec *ts)
++{
++ if (uvc_clock_param == CLOCK_MONOTONIC)
++ ktime_get_ts(ts);
++ else
++ ktime_get_real_ts(ts);
++}
++
+ static void
+ uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf,
+ const __u8 *data, int len)
+@@ -420,7 +428,7 @@ uvc_video_clock_decode(struct uvc_stream
+ stream->clock.last_sof = dev_sof;
+
+ host_sof = usb_get_current_frame_number(stream->dev->udev);
+- ktime_get_ts(&ts);
++ uvc_video_get_ts(&ts);
+
+ /* The UVC specification allows device implementations that can't obtain
+ * the USB frame number to keep their own frame counters as long as they
+@@ -1011,10 +1019,7 @@ static int uvc_video_decode_start(struct
+ return -ENODATA;
+ }
+
+- if (uvc_clock_param == CLOCK_MONOTONIC)
+- ktime_get_ts(&ts);
+- else
+- ktime_get_real_ts(&ts);
++ uvc_video_get_ts(&ts);
+
+ buf->buf.v4l2_buf.sequence = stream->sequence;
+ buf->buf.v4l2_buf.timestamp.tv_sec = ts.tv_sec;
--- /dev/null
+From 086abb58590a4df73e8a6ed71fd418826937cd46 Mon Sep 17 00:00:00 2001
+From: Chander Kashyap <chander.kashyap@linaro.org>
+Date: Fri, 16 May 2014 16:21:17 +0530
+Subject: PM / OPP: fix incorrect OPP count handling in of_init_opp_table
+
+From: Chander Kashyap <chander.kashyap@linaro.org>
+
+commit 086abb58590a4df73e8a6ed71fd418826937cd46 upstream.
+
+In of_init_opp_table function, if a failure to add an OPP is
+detected, the count of OPPs, yet to be added is not updated.
+Fix this by decrementing this count on failure as well.
+
+Signed-off-by: Chander Kashyap <k.chander@samsung.com>
+Signed-off-by: Inderpal Singh <inderpal.s@samsung.com>
+Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
+Acked-by: Nishanth Menon <nm@ti.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/base/power/opp.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/drivers/base/power/opp.c
++++ b/drivers/base/power/opp.c
+@@ -735,11 +735,9 @@ int of_init_opp_table(struct device *dev
+ unsigned long freq = be32_to_cpup(val++) * 1000;
+ unsigned long volt = be32_to_cpup(val++);
+
+- if (dev_pm_opp_add(dev, freq, volt)) {
++ if (dev_pm_opp_add(dev, freq, volt))
+ dev_warn(dev, "%s: Failed to add OPP %ld\n",
+ __func__, freq);
+- continue;
+- }
+ nr -= 2;
+ }
+
--- /dev/null
+From 82084984383babe728e6e3c9a8e5c46278091315 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 5 Jun 2014 11:16:12 +0200
+Subject: rtmutex: Detect changes in the pi lock chain
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 82084984383babe728e6e3c9a8e5c46278091315 upstream.
+
+When we walk the lock chain, we drop all locks after each step. So the
+lock chain can change under us before we reacquire the locks. That's
+harmless in principle as we just follow the wrong lock path. But it
+can lead to a false positive in the dead lock detection logic:
+
+T0 holds L0
+T0 blocks on L1 held by T1
+T1 blocks on L2 held by T2
+T2 blocks on L3 held by T3
+T4 blocks on L4 held by T4
+
+Now we walk the chain
+
+lock T1 -> lock L2 -> adjust L2 -> unlock T1 ->
+ lock T2 -> adjust T2 -> drop locks
+
+T2 times out and blocks on L0
+
+Now we continue:
+
+lock T2 -> lock L0 -> deadlock detected, but it's not a deadlock at all.
+
+Brad tried to work around that in the deadlock detection logic itself,
+but the more I looked at it the less I liked it, because it's crystal
+ball magic after the fact.
+
+We actually can detect a chain change very simple:
+
+lock T1 -> lock L2 -> adjust L2 -> unlock T1 -> lock T2 -> adjust T2 ->
+
+ next_lock = T2->pi_blocked_on->lock;
+
+drop locks
+
+T2 times out and blocks on L0
+
+Now we continue:
+
+lock T2 ->
+
+ if (next_lock != T2->pi_blocked_on->lock)
+ return;
+
+So if we detect that T2 is now blocked on a different lock we stop the
+chain walk. That's also correct in the following scenario:
+
+lock T1 -> lock L2 -> adjust L2 -> unlock T1 -> lock T2 -> adjust T2 ->
+
+ next_lock = T2->pi_blocked_on->lock;
+
+drop locks
+
+T3 times out and drops L3
+T2 acquires L3 and blocks on L4 now
+
+Now we continue:
+
+lock T2 ->
+
+ if (next_lock != T2->pi_blocked_on->lock)
+ return;
+
+We don't have to follow up the chain at that point, because T2
+propagated our priority up to T4 already.
+
+[ Folded a cleanup patch from peterz ]
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reported-by: Brad Mouring <bmouring@ni.com>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Link: http://lkml.kernel.org/r/20140605152801.930031935@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/locking/rtmutex.c | 95 +++++++++++++++++++++++++++++++++++------------
+ 1 file changed, 71 insertions(+), 24 deletions(-)
+
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -248,27 +248,36 @@ static void rt_mutex_adjust_prio(struct
+ */
+ int max_lock_depth = 1024;
+
++static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
++{
++ return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
++}
++
+ /*
+ * Adjust the priority chain. Also used for deadlock detection.
+ * Decreases task's usage by one - may thus free the task.
+ *
+- * @task: the task owning the mutex (owner) for which a chain walk is probably
+- * needed
++ * @task: the task owning the mutex (owner) for which a chain walk is
++ * probably needed
+ * @deadlock_detect: do we have to carry out deadlock detection?
+- * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck
+- * things for a task that has just got its priority adjusted, and
+- * is waiting on a mutex)
++ * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck
++ * things for a task that has just got its priority adjusted, and
++ * is waiting on a mutex)
++ * @next_lock: the mutex on which the owner of @orig_lock was blocked before
++ * we dropped its pi_lock. Is never dereferenced, only used for
++ * comparison to detect lock chain changes.
+ * @orig_waiter: rt_mutex_waiter struct for the task that has just donated
+- * its priority to the mutex owner (can be NULL in the case
+- * depicted above or if the top waiter is gone away and we are
+- * actually deboosting the owner)
+- * @top_task: the current top waiter
++ * its priority to the mutex owner (can be NULL in the case
++ * depicted above or if the top waiter is gone away and we are
++ * actually deboosting the owner)
++ * @top_task: the current top waiter
+ *
+ * Returns 0 or -EDEADLK.
+ */
+ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
+ int deadlock_detect,
+ struct rt_mutex *orig_lock,
++ struct rt_mutex *next_lock,
+ struct rt_mutex_waiter *orig_waiter,
+ struct task_struct *top_task)
+ {
+@@ -327,6 +336,18 @@ static int rt_mutex_adjust_prio_chain(st
+ goto out_unlock_pi;
+
+ /*
++ * We dropped all locks after taking a refcount on @task, so
++ * the task might have moved on in the lock chain or even left
++ * the chain completely and blocks now on an unrelated lock or
++ * on @orig_lock.
++ *
++ * We stored the lock on which @task was blocked in @next_lock,
++ * so we can detect the chain change.
++ */
++ if (next_lock != waiter->lock)
++ goto out_unlock_pi;
++
++ /*
+ * Drop out, when the task has no waiters. Note,
+ * top_waiter can be NULL, when we are in the deboosting
+ * mode!
+@@ -410,11 +431,26 @@ static int rt_mutex_adjust_prio_chain(st
+ __rt_mutex_adjust_prio(task);
+ }
+
++ /*
++ * Check whether the task which owns the current lock is pi
++ * blocked itself. If yes we store a pointer to the lock for
++ * the lock chain change detection above. After we dropped
++ * task->pi_lock next_lock cannot be dereferenced anymore.
++ */
++ next_lock = task_blocked_on_lock(task);
++
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+
+ top_waiter = rt_mutex_top_waiter(lock);
+ raw_spin_unlock(&lock->wait_lock);
+
++ /*
++ * We reached the end of the lock chain. Stop right here. No
++ * point to go back just to figure that out.
++ */
++ if (!next_lock)
++ goto out_put_task;
++
+ if (!detect_deadlock && waiter != top_waiter)
+ goto out_put_task;
+
+@@ -524,8 +560,9 @@ static int task_blocks_on_rt_mutex(struc
+ {
+ struct task_struct *owner = rt_mutex_owner(lock);
+ struct rt_mutex_waiter *top_waiter = waiter;
+- unsigned long flags;
++ struct rt_mutex *next_lock;
+ int chain_walk = 0, res;
++ unsigned long flags;
+
+ /*
+ * Early deadlock detection. We really don't want the task to
+@@ -557,20 +594,28 @@ static int task_blocks_on_rt_mutex(struc
+ if (!owner)
+ return 0;
+
++ raw_spin_lock_irqsave(&owner->pi_lock, flags);
+ if (waiter == rt_mutex_top_waiter(lock)) {
+- raw_spin_lock_irqsave(&owner->pi_lock, flags);
+ rt_mutex_dequeue_pi(owner, top_waiter);
+ rt_mutex_enqueue_pi(owner, waiter);
+
+ __rt_mutex_adjust_prio(owner);
+ if (owner->pi_blocked_on)
+ chain_walk = 1;
+- raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
+- }
+- else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock))
++ } else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) {
+ chain_walk = 1;
++ }
+
+- if (!chain_walk)
++ /* Store the lock on which owner is blocked or NULL */
++ next_lock = task_blocked_on_lock(owner);
++
++ raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
++ /*
++ * Even if full deadlock detection is on, if the owner is not
++ * blocked itself, we can avoid finding this out in the chain
++ * walk.
++ */
++ if (!chain_walk || !next_lock)
+ return 0;
+
+ /*
+@@ -582,8 +627,8 @@ static int task_blocks_on_rt_mutex(struc
+
+ raw_spin_unlock(&lock->wait_lock);
+
+- res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
+- task);
++ res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock,
++ next_lock, waiter, task);
+
+ raw_spin_lock(&lock->wait_lock);
+
+@@ -632,8 +677,8 @@ static void remove_waiter(struct rt_mute
+ {
+ int first = (waiter == rt_mutex_top_waiter(lock));
+ struct task_struct *owner = rt_mutex_owner(lock);
++ struct rt_mutex *next_lock = NULL;
+ unsigned long flags;
+- int chain_walk = 0;
+
+ raw_spin_lock_irqsave(¤t->pi_lock, flags);
+ rt_mutex_dequeue(lock, waiter);
+@@ -657,13 +702,13 @@ static void remove_waiter(struct rt_mute
+ }
+ __rt_mutex_adjust_prio(owner);
+
+- if (owner->pi_blocked_on)
+- chain_walk = 1;
++ /* Store the lock on which owner is blocked or NULL */
++ next_lock = task_blocked_on_lock(owner);
+
+ raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
+ }
+
+- if (!chain_walk)
++ if (!next_lock)
+ return;
+
+ /* gets dropped in rt_mutex_adjust_prio_chain()! */
+@@ -671,7 +716,7 @@ static void remove_waiter(struct rt_mute
+
+ raw_spin_unlock(&lock->wait_lock);
+
+- rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current);
++ rt_mutex_adjust_prio_chain(owner, 0, lock, next_lock, NULL, current);
+
+ raw_spin_lock(&lock->wait_lock);
+ }
+@@ -684,6 +729,7 @@ static void remove_waiter(struct rt_mute
+ void rt_mutex_adjust_pi(struct task_struct *task)
+ {
+ struct rt_mutex_waiter *waiter;
++ struct rt_mutex *next_lock;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
+@@ -694,12 +740,13 @@ void rt_mutex_adjust_pi(struct task_stru
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+ return;
+ }
+-
++ next_lock = waiter->lock;
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+
+ /* gets dropped in rt_mutex_adjust_prio_chain()! */
+ get_task_struct(task);
+- rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task);
++
++ rt_mutex_adjust_prio_chain(task, 0, NULL, next_lock, NULL, task);
+ }
+
+ /**
--- /dev/null
+From 3d5c9340d1949733eb37616abd15db36aef9a57c Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 5 Jun 2014 12:34:23 +0200
+Subject: rtmutex: Handle deadlock detection smarter
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 3d5c9340d1949733eb37616abd15db36aef9a57c upstream.
+
+Even in the case when deadlock detection is not requested by the
+caller, we can detect deadlocks. Right now the code stops the lock
+chain walk and keeps the waiter enqueued, even on itself. Silly not to
+yell when such a scenario is detected and to keep the waiter enqueued.
+
+Return -EDEADLK unconditionally and handle it at the call sites.
+
+The futex calls return -EDEADLK. The non futex ones dequeue the
+waiter, throw a warning and put the task into a schedule loop.
+
+Tagged for stable as it makes the code more robust.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Brad Mouring <bmouring@ni.com>
+Link: http://lkml.kernel.org/r/20140605152801.836501969@linutronix.de
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/locking/rtmutex-debug.h | 5 +++++
+ kernel/locking/rtmutex.c | 33 ++++++++++++++++++++++++++++-----
+ kernel/locking/rtmutex.h | 5 +++++
+ 3 files changed, 38 insertions(+), 5 deletions(-)
+
+--- a/kernel/locking/rtmutex-debug.h
++++ b/kernel/locking/rtmutex-debug.h
+@@ -31,3 +31,8 @@ static inline int debug_rt_mutex_detect_
+ {
+ return (waiter != NULL);
+ }
++
++static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w)
++{
++ debug_rt_mutex_print_deadlock(w);
++}
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -311,7 +311,7 @@ static int rt_mutex_adjust_prio_chain(st
+ }
+ put_task_struct(task);
+
+- return deadlock_detect ? -EDEADLK : 0;
++ return -EDEADLK;
+ }
+ retry:
+ /*
+@@ -386,7 +386,7 @@ static int rt_mutex_adjust_prio_chain(st
+ if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
+ debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
+ raw_spin_unlock(&lock->wait_lock);
+- ret = deadlock_detect ? -EDEADLK : 0;
++ ret = -EDEADLK;
+ goto out_unlock_pi;
+ }
+
+@@ -573,7 +573,7 @@ static int task_blocks_on_rt_mutex(struc
+ * which is wrong, as the other waiter is not in a deadlock
+ * situation.
+ */
+- if (detect_deadlock && owner == task)
++ if (owner == task)
+ return -EDEADLK;
+
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
+@@ -798,6 +798,26 @@ __rt_mutex_slowlock(struct rt_mutex *loc
+ return ret;
+ }
+
++static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
++ struct rt_mutex_waiter *w)
++{
++ /*
++ * If the result is not -EDEADLOCK or the caller requested
++ * deadlock detection, nothing to do here.
++ */
++ if (res != -EDEADLOCK || detect_deadlock)
++ return;
++
++ /*
++ * Yell lowdly and stop the task right here.
++ */
++ rt_mutex_print_deadlock(w);
++ while (1) {
++ set_current_state(TASK_INTERRUPTIBLE);
++ schedule();
++ }
++}
++
+ /*
+ * Slow path lock function:
+ */
+@@ -837,8 +857,10 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+
+ set_current_state(TASK_RUNNING);
+
+- if (unlikely(ret))
++ if (unlikely(ret)) {
+ remove_waiter(lock, &waiter);
++ rt_mutex_handle_deadlock(ret, detect_deadlock, &waiter);
++ }
+
+ /*
+ * try_to_take_rt_mutex() sets the waiter bit
+@@ -1147,7 +1169,8 @@ int rt_mutex_start_proxy_lock(struct rt_
+ return 1;
+ }
+
+- ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);
++ /* We enforce deadlock detection for futexes */
++ ret = task_blocks_on_rt_mutex(lock, waiter, task, 1);
+
+ if (ret && !rt_mutex_owner(lock)) {
+ /*
+--- a/kernel/locking/rtmutex.h
++++ b/kernel/locking/rtmutex.h
+@@ -24,3 +24,8 @@
+ #define debug_rt_mutex_print_deadlock(w) do { } while (0)
+ #define debug_rt_mutex_detect_deadlock(w,d) (d)
+ #define debug_rt_mutex_reset_waiter(w) do { } while (0)
++
++static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w)
++{
++ WARN(1, "rtmutex deadlock detected\n");
++}
--- /dev/null
+From 27e35715df54cbc4f2d044f681802ae30479e7fb Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 11 Jun 2014 18:44:04 +0000
+Subject: rtmutex: Plug slow unlock race
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 27e35715df54cbc4f2d044f681802ae30479e7fb upstream.
+
+When the rtmutex fast path is enabled the slow unlock function can
+create the following situation:
+
+spin_lock(foo->m->wait_lock);
+foo->m->owner = NULL;
+ rt_mutex_lock(foo->m); <-- fast path
+ free = atomic_dec_and_test(foo->refcnt);
+ rt_mutex_unlock(foo->m); <-- fast path
+ if (free)
+ kfree(foo);
+
+spin_unlock(foo->m->wait_lock); <--- Use after free.
+
+Plug the race by changing the slow unlock to the following scheme:
+
+ while (!rt_mutex_has_waiters(m)) {
+ /* Clear the waiters bit in m->owner */
+ clear_rt_mutex_waiters(m);
+ owner = rt_mutex_owner(m);
+ spin_unlock(m->wait_lock);
+ if (cmpxchg(m->owner, owner, 0) == owner)
+ return;
+ spin_lock(m->wait_lock);
+ }
+
+So in case of a new waiter incoming while the owner tries the slow
+path unlock we have two situations:
+
+ unlock(wait_lock);
+ lock(wait_lock);
+ cmpxchg(p, owner, 0) == owner
+ mark_rt_mutex_waiters(lock);
+ acquire(lock);
+
+Or:
+
+ unlock(wait_lock);
+ lock(wait_lock);
+ mark_rt_mutex_waiters(lock);
+ cmpxchg(p, owner, 0) != owner
+ enqueue_waiter();
+ unlock(wait_lock);
+ lock(wait_lock);
+ wakeup_next waiter();
+ unlock(wait_lock);
+ lock(wait_lock);
+ acquire(lock);
+
+If the fast path is disabled, then the simple
+
+ m->owner = NULL;
+ unlock(m->wait_lock);
+
+is sufficient as all access to m->owner is serialized via
+m->wait_lock;
+
+Also document and clarify the wakeup_next_waiter function as suggested
+by Oleg Nesterov.
+
+Reported-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Steven Rostedt <rostedt@goodmis.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Link: http://lkml.kernel.org/r/20140611183852.937945560@linutronix.de
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/locking/rtmutex.c | 115 ++++++++++++++++++++++++++++++++++++++++++++---
+ 1 file changed, 109 insertions(+), 6 deletions(-)
+
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -83,6 +83,47 @@ static inline void mark_rt_mutex_waiters
+ owner = *p;
+ } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
+ }
++
++/*
++ * Safe fastpath aware unlock:
++ * 1) Clear the waiters bit
++ * 2) Drop lock->wait_lock
++ * 3) Try to unlock the lock with cmpxchg
++ */
++static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
++ __releases(lock->wait_lock)
++{
++ struct task_struct *owner = rt_mutex_owner(lock);
++
++ clear_rt_mutex_waiters(lock);
++ raw_spin_unlock(&lock->wait_lock);
++ /*
++ * If a new waiter comes in between the unlock and the cmpxchg
++ * we have two situations:
++ *
++ * unlock(wait_lock);
++ * lock(wait_lock);
++ * cmpxchg(p, owner, 0) == owner
++ * mark_rt_mutex_waiters(lock);
++ * acquire(lock);
++ * or:
++ *
++ * unlock(wait_lock);
++ * lock(wait_lock);
++ * mark_rt_mutex_waiters(lock);
++ *
++ * cmpxchg(p, owner, 0) != owner
++ * enqueue_waiter();
++ * unlock(wait_lock);
++ * lock(wait_lock);
++ * wake waiter();
++ * unlock(wait_lock);
++ * lock(wait_lock);
++ * acquire(lock);
++ */
++ return rt_mutex_cmpxchg(lock, owner, NULL);
++}
++
+ #else
+ # define rt_mutex_cmpxchg(l,c,n) (0)
+ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
+@@ -90,6 +131,17 @@ static inline void mark_rt_mutex_waiters
+ lock->owner = (struct task_struct *)
+ ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
+ }
++
++/*
++ * Simple slow path only version: lock->owner is protected by lock->wait_lock.
++ */
++static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
++ __releases(lock->wait_lock)
++{
++ lock->owner = NULL;
++ raw_spin_unlock(&lock->wait_lock);
++ return true;
++}
+ #endif
+
+ static inline int
+@@ -638,7 +690,8 @@ static int task_blocks_on_rt_mutex(struc
+ /*
+ * Wake up the next waiter on the lock.
+ *
+- * Remove the top waiter from the current tasks waiter list and wake it up.
++ * Remove the top waiter from the current tasks pi waiter list and
++ * wake it up.
+ *
+ * Called with lock->wait_lock held.
+ */
+@@ -659,10 +712,23 @@ static void wakeup_next_waiter(struct rt
+ */
+ rt_mutex_dequeue_pi(current, waiter);
+
+- rt_mutex_set_owner(lock, NULL);
++ /*
++ * As we are waking up the top waiter, and the waiter stays
++ * queued on the lock until it gets the lock, this lock
++ * obviously has waiters. Just set the bit here and this has
++ * the added benefit of forcing all new tasks into the
++ * slow path making sure no task of lower priority than
++ * the top waiter can steal this lock.
++ */
++ lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
+
+ raw_spin_unlock_irqrestore(¤t->pi_lock, flags);
+
++ /*
++ * It's safe to dereference waiter as it cannot go away as
++ * long as we hold lock->wait_lock. The waiter task needs to
++ * acquire it in order to dequeue the waiter.
++ */
+ wake_up_process(waiter->task);
+ }
+
+@@ -916,12 +982,49 @@ rt_mutex_slowunlock(struct rt_mutex *loc
+
+ rt_mutex_deadlock_account_unlock(current);
+
+- if (!rt_mutex_has_waiters(lock)) {
+- lock->owner = NULL;
+- raw_spin_unlock(&lock->wait_lock);
+- return;
++ /*
++ * We must be careful here if the fast path is enabled. If we
++ * have no waiters queued we cannot set owner to NULL here
++ * because of:
++ *
++ * foo->lock->owner = NULL;
++ * rtmutex_lock(foo->lock); <- fast path
++ * free = atomic_dec_and_test(foo->refcnt);
++ * rtmutex_unlock(foo->lock); <- fast path
++ * if (free)
++ * kfree(foo);
++ * raw_spin_unlock(foo->lock->wait_lock);
++ *
++ * So for the fastpath enabled kernel:
++ *
++ * Nothing can set the waiters bit as long as we hold
++ * lock->wait_lock. So we do the following sequence:
++ *
++ * owner = rt_mutex_owner(lock);
++ * clear_rt_mutex_waiters(lock);
++ * raw_spin_unlock(&lock->wait_lock);
++ * if (cmpxchg(&lock->owner, owner, 0) == owner)
++ * return;
++ * goto retry;
++ *
++ * The fastpath disabled variant is simple as all access to
++ * lock->owner is serialized by lock->wait_lock:
++ *
++ * lock->owner = NULL;
++ * raw_spin_unlock(&lock->wait_lock);
++ */
++ while (!rt_mutex_has_waiters(lock)) {
++ /* Drops lock->wait_lock ! */
++ if (unlock_rt_mutex_safe(lock) == true)
++ return;
++ /* Relock the rtmutex and try again */
++ raw_spin_lock(&lock->wait_lock);
+ }
+
++ /*
++ * The wakeup next waiter path does not suffer from the above
++ * race. See the comments there.
++ */
+ wakeup_next_waiter(lock);
+
+ raw_spin_unlock(&lock->wait_lock);
acpi-add-dynamic_debug-support.patch
acpica-utstring-check-array-index-bound-before-use.patch
acpi-fix-conflict-between-customized-dsdt-and-dsdt-local-copy.patch
+rtmutex-detect-changes-in-the-pi-lock-chain.patch
+rtmutex-handle-deadlock-detection-smarter.patch
+rtmutex-plug-slow-unlock-race.patch
+media-uvcvideo-fix-clock-param-realtime-setting.patch
+media-radio-bcm2048-fix-wrong-overflow-check.patch
+media-saa7134-fix-regression-with-tvtime.patch
+arm-stacktrace-avoid-listing-stacktrace-functions-in-stacktrace.patch
+arm-8037-1-mm-support-big-endian-page-tables.patch
+pm-opp-fix-incorrect-opp-count-handling-in-of_init_opp_table.patch
+bluetooth-hci_ldisc-fix-deadlock-condition.patch
+bluetooth-6lowpan-fix-mac-address-universal-local-bit-handling.patch
+bluetooth-fix-l2cap-deadlock.patch
+target-iser-bail-from-accept_np-if-np_thread-is-trying-to-close.patch
+target-iser-fix-hangs-in-connection-teardown.patch
--- /dev/null
+From e346ab343f4f58c12a96725c7b13df9cc2ad56f6 Mon Sep 17 00:00:00 2001
+From: Sagi Grimberg <sagig@mellanox.com>
+Date: Mon, 19 May 2014 17:44:22 +0300
+Subject: Target/iser: Bail from accept_np if np_thread is trying to close
+
+From: Sagi Grimberg <sagig@mellanox.com>
+
+commit e346ab343f4f58c12a96725c7b13df9cc2ad56f6 upstream.
+
+In case np_thread state is in RESET/SHUTDOWN/EXIT states,
+no point for isert to stall there as we may get a hang in
+case no one will wake it up later.
+
+Signed-off-by: Sagi Grimberg <sagig@mellanox.com>
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/ulp/isert/ib_isert.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -2725,9 +2725,14 @@ accept_wait:
+ return -ENODEV;
+
+ spin_lock_bh(&np->np_thread_lock);
+- if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
++ if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
+ spin_unlock_bh(&np->np_thread_lock);
+- pr_debug("ISCSI_NP_THREAD_RESET for isert_accept_np\n");
++ pr_debug("np_thread_state %d for isert_accept_np\n",
++ np->np_thread_state);
++ /**
++ * No point in stalling here when np_thread
++ * is in state RESET/SHUTDOWN/EXIT - bail
++ **/
+ return -ENODEV;
+ }
+ spin_unlock_bh(&np->np_thread_lock);
--- /dev/null
+From 9d49f5e284e700576f3b65f1e28dea8539da6661 Mon Sep 17 00:00:00 2001
+From: Sagi Grimberg <sagig@mellanox.com>
+Date: Mon, 19 May 2014 17:44:23 +0300
+Subject: Target/iser: Fix hangs in connection teardown
+
+From: Sagi Grimberg <sagig@mellanox.com>
+
+commit 9d49f5e284e700576f3b65f1e28dea8539da6661 upstream.
+
+In ungraceful teardowns isert close flows seem racy such that
+isert_wait_conn hangs as RDMA_CM_EVENT_DISCONNECTED never
+gets invoked (no one called rdma_disconnect).
+
+Both graceful and ungraceful teardowns will have rx flush errors
+(isert posts a batch once connection is established). Once all
+flush errors are consumed we invoke isert_wait_conn and it will
+be responsible for calling rdma_disconnect. This way it can be
+sure that rdma_disconnect was called and it won't wait forever.
+
+This patch also removes the logout_posted indicator. either the
+logout completion was consumed and no problem decrementing the
+post_send_buf_count, or it was consumed as a flush error. no point
+of keeping it for isert_wait_conn as there is no danger that
+isert_conn will be accidentally removed while it is running.
+
+(Drop unnecessary sleep_on_conn_wait_comp check in
+ isert_cq_rx_comp_err - nab)
+
+Signed-off-by: Sagi Grimberg <sagig@mellanox.com>
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/ulp/isert/ib_isert.c | 31 ++++++++++---------------------
+ drivers/infiniband/ulp/isert/ib_isert.h | 1 -
+ 2 files changed, 10 insertions(+), 22 deletions(-)
+
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -705,14 +705,10 @@ isert_disconnect_work(struct work_struct
+ isert_put_conn(isert_conn);
+ return;
+ }
+- if (!isert_conn->logout_posted) {
+- pr_debug("Calling rdma_disconnect for !logout_posted from"
+- " isert_disconnect_work\n");
+- rdma_disconnect(isert_conn->conn_cm_id);
+- mutex_unlock(&isert_conn->conn_mutex);
+- iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
+- goto wake_up;
+- }
++
++ /* Send DREQ/DREP towards our initiator */
++ rdma_disconnect(isert_conn->conn_cm_id);
++
+ mutex_unlock(&isert_conn->conn_mutex);
+
+ wake_up:
+@@ -1617,11 +1613,8 @@ isert_do_control_comp(struct work_struct
+ break;
+ case ISTATE_SEND_LOGOUTRSP:
+ pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
+- /*
+- * Call atomic_dec(&isert_conn->post_send_buf_count)
+- * from isert_wait_conn()
+- */
+- isert_conn->logout_posted = true;
++
++ atomic_dec(&isert_conn->post_send_buf_count);
+ iscsit_logout_post_handler(cmd, cmd->conn);
+ break;
+ case ISTATE_SEND_TEXTRSP:
+@@ -1791,6 +1784,8 @@ isert_cq_rx_comp_err(struct isert_conn *
+ isert_conn->state = ISER_CONN_DOWN;
+ mutex_unlock(&isert_conn->conn_mutex);
+
++ iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
++
+ complete(&isert_conn->conn_wait_comp_err);
+ }
+
+@@ -2782,15 +2777,9 @@ static void isert_wait_conn(struct iscsi
+ struct isert_conn *isert_conn = conn->context;
+
+ pr_debug("isert_wait_conn: Starting \n");
+- /*
+- * Decrement post_send_buf_count for special case when called
+- * from isert_do_control_comp() -> iscsit_logout_post_handler()
+- */
+- mutex_lock(&isert_conn->conn_mutex);
+- if (isert_conn->logout_posted)
+- atomic_dec(&isert_conn->post_send_buf_count);
+
+- if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) {
++ mutex_lock(&isert_conn->conn_mutex);
++ if (isert_conn->conn_cm_id) {
+ pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
+ rdma_disconnect(isert_conn->conn_cm_id);
+ }
+--- a/drivers/infiniband/ulp/isert/ib_isert.h
++++ b/drivers/infiniband/ulp/isert/ib_isert.h
+@@ -93,7 +93,6 @@ struct isert_device;
+
+ struct isert_conn {
+ enum iser_conn_state state;
+- bool logout_posted;
+ int post_recv_buf_count;
+ atomic_t post_send_buf_count;
+ u32 responder_resources;