]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.9-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 3 Apr 2019 14:10:39 +0000 (16:10 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 3 Apr 2019 14:10:39 +0000 (16:10 +0200)
added patches:
ext4-cleanup-bh-release-code-in-ext4_ind_remove_space.patch
i2c-core-smbus-prevent-stack-corruption-on-read-i2c_block_data.patch
lib-int_sqrt-optimize-initial-value-compute.patch
mm-mempolicy-make-mbind-return-eio-when-mpol_mf_strict-is-specified.patch
tty-serial-atmel-add-is_half_duplex-helper.patch

queue-4.9/ext4-cleanup-bh-release-code-in-ext4_ind_remove_space.patch [new file with mode: 0644]
queue-4.9/i2c-core-smbus-prevent-stack-corruption-on-read-i2c_block_data.patch [new file with mode: 0644]
queue-4.9/lib-int_sqrt-optimize-initial-value-compute.patch [new file with mode: 0644]
queue-4.9/mm-mempolicy-make-mbind-return-eio-when-mpol_mf_strict-is-specified.patch [new file with mode: 0644]
queue-4.9/series
queue-4.9/tty-serial-atmel-add-is_half_duplex-helper.patch [new file with mode: 0644]

diff --git a/queue-4.9/ext4-cleanup-bh-release-code-in-ext4_ind_remove_space.patch b/queue-4.9/ext4-cleanup-bh-release-code-in-ext4_ind_remove_space.patch
new file mode 100644 (file)
index 0000000..314554f
--- /dev/null
@@ -0,0 +1,163 @@
+From 5e86bdda41534e17621d5a071b294943cae4376e Mon Sep 17 00:00:00 2001
+From: "zhangyi (F)" <yi.zhang@huawei.com>
+Date: Sat, 23 Mar 2019 11:56:01 -0400
+Subject: ext4: cleanup bh release code in ext4_ind_remove_space()
+
+From: zhangyi (F) <yi.zhang@huawei.com>
+
+commit 5e86bdda41534e17621d5a071b294943cae4376e upstream.
+
+Currently, we are releasing the indirect buffer where we are done with
+it in ext4_ind_remove_space(), so we can see the brelse() and
+BUFFER_TRACE() everywhere.  It seems fragile and hard to read, and we
+may probably forget to release the buffer some day.  This patch cleans
+up the code by putting of the code which releases the buffers to the
+end of the function.
+
+Signed-off-by: zhangyi (F) <yi.zhang@huawei.com>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Cc: Jari Ruusu <jari.ruusu@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ext4/indirect.c |   47 ++++++++++++++++++++++-------------------------
+ 1 file changed, 22 insertions(+), 25 deletions(-)
+
+--- a/fs/ext4/indirect.c
++++ b/fs/ext4/indirect.c
+@@ -1217,6 +1217,7 @@ int ext4_ind_remove_space(handle_t *hand
+       ext4_lblk_t offsets[4], offsets2[4];
+       Indirect chain[4], chain2[4];
+       Indirect *partial, *partial2;
++      Indirect *p = NULL, *p2 = NULL;
+       ext4_lblk_t max_block;
+       __le32 nr = 0, nr2 = 0;
+       int n = 0, n2 = 0;
+@@ -1258,7 +1259,7 @@ int ext4_ind_remove_space(handle_t *hand
+               }
+-              partial = ext4_find_shared(inode, n, offsets, chain, &nr);
++              partial = p = ext4_find_shared(inode, n, offsets, chain, &nr);
+               if (nr) {
+                       if (partial == chain) {
+                               /* Shared branch grows from the inode */
+@@ -1283,13 +1284,11 @@ int ext4_ind_remove_space(handle_t *hand
+                               partial->p + 1,
+                               (__le32 *)partial->bh->b_data+addr_per_block,
+                               (chain+n-1) - partial);
+-                      BUFFER_TRACE(partial->bh, "call brelse");
+-                      brelse(partial->bh);
+                       partial--;
+               }
+ end_range:
+-              partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
++              partial2 = p2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
+               if (nr2) {
+                       if (partial2 == chain2) {
+                               /*
+@@ -1319,16 +1318,14 @@ end_range:
+                                          (__le32 *)partial2->bh->b_data,
+                                          partial2->p,
+                                          (chain2+n2-1) - partial2);
+-                      BUFFER_TRACE(partial2->bh, "call brelse");
+-                      brelse(partial2->bh);
+                       partial2--;
+               }
+               goto do_indirects;
+       }
+       /* Punch happened within the same level (n == n2) */
+-      partial = ext4_find_shared(inode, n, offsets, chain, &nr);
+-      partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
++      partial = p = ext4_find_shared(inode, n, offsets, chain, &nr);
++      partial2 = p2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
+       /* Free top, but only if partial2 isn't its subtree. */
+       if (nr) {
+@@ -1385,15 +1382,7 @@ end_range:
+                                          partial->p + 1,
+                                          partial2->p,
+                                          (chain+n-1) - partial);
+-                      while (partial > chain) {
+-                              BUFFER_TRACE(partial->bh, "call brelse");
+-                              brelse(partial->bh);
+-                      }
+-                      while (partial2 > chain2) {
+-                              BUFFER_TRACE(partial2->bh, "call brelse");
+-                              brelse(partial2->bh);
+-                      }
+-                      return 0;
++                      goto cleanup;
+               }
+               /*
+@@ -1408,8 +1397,6 @@ end_range:
+                                          partial->p + 1,
+                                          (__le32 *)partial->bh->b_data+addr_per_block,
+                                          (chain+n-1) - partial);
+-                      BUFFER_TRACE(partial->bh, "call brelse");
+-                      brelse(partial->bh);
+                       partial--;
+               }
+               if (partial2 > chain2 && depth2 <= depth) {
+@@ -1417,11 +1404,21 @@ end_range:
+                                          (__le32 *)partial2->bh->b_data,
+                                          partial2->p,
+                                          (chain2+n2-1) - partial2);
+-                      BUFFER_TRACE(partial2->bh, "call brelse");
+-                      brelse(partial2->bh);
+                       partial2--;
+               }
+       }
++
++cleanup:
++      while (p && p > chain) {
++              BUFFER_TRACE(p->bh, "call brelse");
++              brelse(p->bh);
++              p--;
++      }
++      while (p2 && p2 > chain2) {
++              BUFFER_TRACE(p2->bh, "call brelse");
++              brelse(p2->bh);
++              p2--;
++      }
+       return 0;
+ do_indirects:
+@@ -1429,7 +1426,7 @@ do_indirects:
+       switch (offsets[0]) {
+       default:
+               if (++n >= n2)
+-                      return 0;
++                      break;
+               nr = i_data[EXT4_IND_BLOCK];
+               if (nr) {
+                       ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
+@@ -1437,7 +1434,7 @@ do_indirects:
+               }
+       case EXT4_IND_BLOCK:
+               if (++n >= n2)
+-                      return 0;
++                      break;
+               nr = i_data[EXT4_DIND_BLOCK];
+               if (nr) {
+                       ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
+@@ -1445,7 +1442,7 @@ do_indirects:
+               }
+       case EXT4_DIND_BLOCK:
+               if (++n >= n2)
+-                      return 0;
++                      break;
+               nr = i_data[EXT4_TIND_BLOCK];
+               if (nr) {
+                       ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
+@@ -1454,5 +1451,5 @@ do_indirects:
+       case EXT4_TIND_BLOCK:
+               ;
+       }
+-      return 0;
++      goto cleanup;
+ }
diff --git a/queue-4.9/i2c-core-smbus-prevent-stack-corruption-on-read-i2c_block_data.patch b/queue-4.9/i2c-core-smbus-prevent-stack-corruption-on-read-i2c_block_data.patch
new file mode 100644 (file)
index 0000000..e58845a
--- /dev/null
@@ -0,0 +1,70 @@
+From 89c6efa61f5709327ecfa24bff18e57a4e80c7fa Mon Sep 17 00:00:00 2001
+From: Jeremy Compostella <jeremy.compostella@intel.com>
+Date: Wed, 15 Nov 2017 12:31:44 -0700
+Subject: i2c: core-smbus: prevent stack corruption on read I2C_BLOCK_DATA
+
+From: Jeremy Compostella <jeremy.compostella@intel.com>
+
+commit 89c6efa61f5709327ecfa24bff18e57a4e80c7fa upstream.
+
+On a I2C_SMBUS_I2C_BLOCK_DATA read request, if data->block[0] is
+greater than I2C_SMBUS_BLOCK_MAX + 1, the underlying I2C driver writes
+data out of the msgbuf1 array boundary.
+
+It is possible from a user application to run into that issue by
+calling the I2C_SMBUS ioctl with data.block[0] greater than
+I2C_SMBUS_BLOCK_MAX + 1.
+
+This patch makes the code compliant with
+Documentation/i2c/dev-interface by raising an error when the requested
+size is larger than 32 bytes.
+
+Call Trace:
+ [<ffffffff8139f695>] dump_stack+0x67/0x92
+ [<ffffffff811802a4>] panic+0xc5/0x1eb
+ [<ffffffff810ecb5f>] ? vprintk_default+0x1f/0x30
+ [<ffffffff817456d3>] ? i2cdev_ioctl_smbus+0x303/0x320
+ [<ffffffff8109a68b>] __stack_chk_fail+0x1b/0x20
+ [<ffffffff817456d3>] i2cdev_ioctl_smbus+0x303/0x320
+ [<ffffffff81745aed>] i2cdev_ioctl+0x4d/0x1e0
+ [<ffffffff811f761a>] do_vfs_ioctl+0x2ba/0x490
+ [<ffffffff81336e43>] ? security_file_ioctl+0x43/0x60
+ [<ffffffff811f7869>] SyS_ioctl+0x79/0x90
+ [<ffffffff81a22e97>] entry_SYSCALL_64_fastpath+0x12/0x6a
+
+Signed-off-by: Jeremy Compostella <jeremy.compostella@intel.com>
+Signed-off-by: Wolfram Sang <wsa@the-dreams.de>
+Cc: stable@kernel.org
+[connoro@google.com: 4.9 backport: adjust filename]
+Signed-off-by: Connor O'Brien <connoro@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/i2c/i2c-core.c |   12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/drivers/i2c/i2c-core.c
++++ b/drivers/i2c/i2c-core.c
+@@ -3250,16 +3250,16 @@ static s32 i2c_smbus_xfer_emulated(struc
+                                  the underlying bus driver */
+               break;
+       case I2C_SMBUS_I2C_BLOCK_DATA:
++              if (data->block[0] > I2C_SMBUS_BLOCK_MAX) {
++                      dev_err(&adapter->dev, "Invalid block %s size %d\n",
++                              read_write == I2C_SMBUS_READ ? "read" : "write",
++                              data->block[0]);
++                      return -EINVAL;
++              }
+               if (read_write == I2C_SMBUS_READ) {
+                       msg[1].len = data->block[0];
+               } else {
+                       msg[0].len = data->block[0] + 1;
+-                      if (msg[0].len > I2C_SMBUS_BLOCK_MAX + 1) {
+-                              dev_err(&adapter->dev,
+-                                      "Invalid block write size %d\n",
+-                                      data->block[0]);
+-                              return -EINVAL;
+-                      }
+                       for (i = 1; i <= data->block[0]; i++)
+                               msgbuf0[i] = data->block[i];
+               }
diff --git a/queue-4.9/lib-int_sqrt-optimize-initial-value-compute.patch b/queue-4.9/lib-int_sqrt-optimize-initial-value-compute.patch
new file mode 100644 (file)
index 0000000..e7d2dcc
--- /dev/null
@@ -0,0 +1,91 @@
+From f8ae107eef209bff29a5816bc1aad40d5cd69a80 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Fri, 17 Nov 2017 15:28:08 -0800
+Subject: lib/int_sqrt: optimize initial value compute
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit f8ae107eef209bff29a5816bc1aad40d5cd69a80 upstream.
+
+The initial value (@m) compute is:
+
+       m = 1UL << (BITS_PER_LONG - 2);
+       while (m > x)
+               m >>= 2;
+
+Which is a linear search for the highest even bit smaller or equal to @x
+We can implement this using a binary search using __fls() (or better when
+its hardware implemented).
+
+       m = 1UL << (__fls(x) & ~1UL);
+
+Especially for small values of @x; which are the more common arguments
+when doing a CDF on idle times; the linear search is near to worst case,
+while the binary search of __fls() is a constant 6 (or 5 on 32bit)
+branches.
+
+      cycles:                 branches:              branch-misses:
+
+PRE:
+
+hot:   43.633557 +- 0.034373  45.333132 +- 0.002277  0.023529 +- 0.000681
+cold: 207.438411 +- 0.125840  45.333132 +- 0.002277  6.976486 +- 0.004219
+
+SOFTWARE FLS:
+
+hot:   29.576176 +- 0.028850  26.666730 +- 0.004511  0.019463 +- 0.000663
+cold: 165.947136 +- 0.188406  26.666746 +- 0.004511  6.133897 +- 0.004386
+
+HARDWARE FLS:
+
+hot:   24.720922 +- 0.025161  20.666784 +- 0.004509  0.020836 +- 0.000677
+cold: 132.777197 +- 0.127471  20.666776 +- 0.004509  5.080285 +- 0.003874
+
+Averages computed over all values <128k using a LFSR to generate order.
+Cold numbers have a LFSR based branch trace buffer 'confuser' ran between
+each int_sqrt() invocation.
+
+Link: http://lkml.kernel.org/r/20171020164644.936577234@infradead.org
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Suggested-by: Joe Perches <joe@perches.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Anshul Garg <aksgarg1989@gmail.com>
+Cc: Davidlohr Bueso <dave@stgolabs.net>
+Cc: David Miller <davem@davemloft.net>
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: Matthew Wilcox <mawilcox@microsoft.com>
+Cc: Michael Davidson <md@google.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Joe Perches <joe@perches.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ lib/int_sqrt.c |    6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+--- a/lib/int_sqrt.c
++++ b/lib/int_sqrt.c
+@@ -7,6 +7,7 @@
+ #include <linux/kernel.h>
+ #include <linux/export.h>
++#include <linux/bitops.h>
+ /**
+  * int_sqrt - rough approximation to sqrt
+@@ -21,10 +22,7 @@ unsigned long int_sqrt(unsigned long x)
+       if (x <= 1)
+               return x;
+-      m = 1UL << (BITS_PER_LONG - 2);
+-      while (m > x)
+-              m >>= 2;
+-
++      m = 1UL << (__fls(x) & ~1UL);
+       while (m != 0) {
+               b = y + m;
+               y >>= 1;
diff --git a/queue-4.9/mm-mempolicy-make-mbind-return-eio-when-mpol_mf_strict-is-specified.patch b/queue-4.9/mm-mempolicy-make-mbind-return-eio-when-mpol_mf_strict-is-specified.patch
new file mode 100644 (file)
index 0000000..dc2e052
--- /dev/null
@@ -0,0 +1,89 @@
+From a7f40cfe3b7ada57af9b62fd28430eeb4a7cfcb7 Mon Sep 17 00:00:00 2001
+From: Yang Shi <yang.shi@linux.alibaba.com>
+Date: Thu, 28 Mar 2019 20:43:55 -0700
+Subject: mm: mempolicy: make mbind() return -EIO when MPOL_MF_STRICT is specified
+
+From: Yang Shi <yang.shi@linux.alibaba.com>
+
+commit a7f40cfe3b7ada57af9b62fd28430eeb4a7cfcb7 upstream.
+
+When MPOL_MF_STRICT was specified and an existing page was already on a
+node that does not follow the policy, mbind() should return -EIO.  But
+commit 6f4576e3687b ("mempolicy: apply page table walker on
+queue_pages_range()") broke the rule.
+
+And commit c8633798497c ("mm: mempolicy: mbind and migrate_pages support
+thp migration") didn't return the correct value for THP mbind() too.
+
+If MPOL_MF_STRICT is set, ignore vma_migratable() to make sure it
+reaches queue_pages_to_pte_range() or queue_pages_pmd() to check if an
+existing page was already on a node that does not follow the policy.
+And, non-migratable vma may be used, return -EIO too if MPOL_MF_MOVE or
+MPOL_MF_MOVE_ALL was specified.
+
+Tested with https://github.com/metan-ucw/ltp/blob/master/testcases/kernel/syscalls/mbind/mbind02.c
+
+[akpm@linux-foundation.org: tweak code comment]
+Link: http://lkml.kernel.org/r/1553020556-38583-1-git-send-email-yang.shi@linux.alibaba.com
+Fixes: 6f4576e3687b ("mempolicy: apply page table walker on queue_pages_range()")
+Signed-off-by: Yang Shi <yang.shi@linux.alibaba.com>
+Signed-off-by: Oscar Salvador <osalvador@suse.de>
+Reported-by: Cyril Hrubis <chrubis@suse.cz>
+Suggested-by: Kirill A. Shutemov <kirill@shutemov.name>
+Acked-by: Rafael Aquini <aquini@redhat.com>
+Reviewed-by: Oscar Salvador <osalvador@suse.de>
+Acked-by: David Rientjes <rientjes@google.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/mempolicy.c |   18 ++++++++++++++----
+ 1 file changed, 14 insertions(+), 4 deletions(-)
+
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -547,11 +547,16 @@ retry:
+                       goto retry;
+               }
+-              migrate_page_add(page, qp->pagelist, flags);
++              if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
++                      if (!vma_migratable(vma))
++                              break;
++                      migrate_page_add(page, qp->pagelist, flags);
++              } else
++                      break;
+       }
+       pte_unmap_unlock(pte - 1, ptl);
+       cond_resched();
+-      return 0;
++      return addr != end ? -EIO : 0;
+ }
+ static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
+@@ -623,7 +628,12 @@ static int queue_pages_test_walk(unsigne
+       unsigned long endvma = vma->vm_end;
+       unsigned long flags = qp->flags;
+-      if (!vma_migratable(vma))
++      /*
++       * Need check MPOL_MF_STRICT to return -EIO if possible
++       * regardless of vma_migratable
++       */
++      if (!vma_migratable(vma) &&
++          !(flags & MPOL_MF_STRICT))
+               return 1;
+       if (endvma > end)
+@@ -650,7 +660,7 @@ static int queue_pages_test_walk(unsigne
+       }
+       /* queue pages from current vma */
+-      if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
++      if (flags & MPOL_MF_VALID)
+               return 0;
+       return 1;
+ }
index 43ec6919ab97081818644aae14c3d8b3427d8edf..c16607f74dfdae8b59b741333a0dae08641f43b8 100644 (file)
@@ -1,2 +1,7 @@
 arm64-debug-don-t-propagate-unknown-far-into-si_code-for-debug-signals.patch
 arm64-debug-ensure-debug-handlers-check-triggering-exception-level.patch
+ext4-cleanup-bh-release-code-in-ext4_ind_remove_space.patch
+lib-int_sqrt-optimize-initial-value-compute.patch
+tty-serial-atmel-add-is_half_duplex-helper.patch
+mm-mempolicy-make-mbind-return-eio-when-mpol_mf_strict-is-specified.patch
+i2c-core-smbus-prevent-stack-corruption-on-read-i2c_block_data.patch
diff --git a/queue-4.9/tty-serial-atmel-add-is_half_duplex-helper.patch b/queue-4.9/tty-serial-atmel-add-is_half_duplex-helper.patch
new file mode 100644 (file)
index 0000000..44f4688
--- /dev/null
@@ -0,0 +1,80 @@
+From f3040983132bf3477acd45d2452a906e67c2fec9 Mon Sep 17 00:00:00 2001
+From: Razvan Stefanescu <razvan.stefanescu@microchip.com>
+Date: Tue, 19 Mar 2019 15:20:34 +0200
+Subject: tty/serial: atmel: Add is_half_duplex helper
+
+From: Razvan Stefanescu <razvan.stefanescu@microchip.com>
+
+commit f3040983132bf3477acd45d2452a906e67c2fec9 upstream.
+
+Use a helper function to check that a port needs to use half duplex
+communication, replacing several occurrences of multi-line bit checking.
+
+Fixes: b389f173aaa1 ("tty/serial: atmel: RS485 half duplex w/DMA: enable RX after TX is done")
+Cc: stable <stable@vger.kernel.org>
+Signed-off-by: Razvan Stefanescu <razvan.stefanescu@microchip.com>
+Acked-by: Richard Genoud <richard.genoud@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ drivers/tty/serial/atmel_serial.c |   19 +++++++++++--------
+ 1 file changed, 11 insertions(+), 8 deletions(-)
+
+--- a/drivers/tty/serial/atmel_serial.c
++++ b/drivers/tty/serial/atmel_serial.c
+@@ -241,6 +241,12 @@ static inline void atmel_uart_write_char
+ #endif
++static inline int atmel_uart_is_half_duplex(struct uart_port *port)
++{
++      return (port->rs485.flags & SER_RS485_ENABLED) &&
++              !(port->rs485.flags & SER_RS485_RX_DURING_TX);
++}
++
+ #ifdef CONFIG_SERIAL_ATMEL_PDC
+ static bool atmel_use_pdc_rx(struct uart_port *port)
+ {
+@@ -492,9 +498,9 @@ static void atmel_stop_tx(struct uart_po
+       /* Disable interrupts */
+       atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
+-      if ((port->rs485.flags & SER_RS485_ENABLED) &&
+-          !(port->rs485.flags & SER_RS485_RX_DURING_TX))
++      if (atmel_uart_is_half_duplex(port))
+               atmel_start_rx(port);
++
+ }
+ /*
+@@ -511,8 +517,7 @@ static void atmel_start_tx(struct uart_p
+               return;
+       if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port))
+-              if ((port->rs485.flags & SER_RS485_ENABLED) &&
+-                  !(port->rs485.flags & SER_RS485_RX_DURING_TX))
++              if (atmel_uart_is_half_duplex(port))
+                       atmel_stop_rx(port);
+       if (atmel_use_pdc_tx(port))
+@@ -809,8 +814,7 @@ static void atmel_complete_tx_dma(void *
+        */
+       if (!uart_circ_empty(xmit))
+               atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
+-      else if ((port->rs485.flags & SER_RS485_ENABLED) &&
+-               !(port->rs485.flags & SER_RS485_RX_DURING_TX)) {
++      else if (atmel_uart_is_half_duplex(port)) {
+               /* DMA done, stop TX, start RX for RS485 */
+               atmel_start_rx(port);
+       }
+@@ -1386,8 +1390,7 @@ static void atmel_tx_pdc(struct uart_por
+               atmel_uart_writel(port, ATMEL_US_IER,
+                                 atmel_port->tx_done_mask);
+       } else {
+-              if ((port->rs485.flags & SER_RS485_ENABLED) &&
+-                  !(port->rs485.flags & SER_RS485_RX_DURING_TX)) {
++              if (atmel_uart_is_half_duplex(port)) {
+                       /* DMA done, stop TX, start RX for RS485 */
+                       atmel_start_rx(port);
+               }