}
}
+static int fec_rx_error_check(struct net_device *ndev, u16 status)
+{
+ if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
+ BD_ENET_RX_CR | BD_ENET_RX_OV | BD_ENET_RX_LAST |
+ BD_ENET_RX_CL)) {
+ ndev->stats.rx_errors++;
+
+ if (status & BD_ENET_RX_OV) {
+ /* FIFO overrun */
+ ndev->stats.rx_fifo_errors++;
+ return -EIO;
+ }
+
+ if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH |
+ BD_ENET_RX_LAST)) {
+ /* Frame too long or too short. */
+ ndev->stats.rx_length_errors++;
+ if ((status & BD_ENET_RX_LAST) && net_ratelimit())
+ netdev_err(ndev, "rcv is not +last\n");
+ }
+
+ /* CRC Error */
+ if (status & BD_ENET_RX_CR)
+ ndev->stats.rx_crc_errors++;
+
+ /* Report late collisions as a frame error. */
+ if (status & (BD_ENET_RX_NO | BD_ENET_RX_CL))
+ ndev->stats.rx_frame_errors++;
+
+ return -EIO;
+ }
+
+ return 0;
+}
+
/* During a receive, the bd_rx.cur points to the current incoming buffer.
* When we update through the ring, if the next incoming buffer has
* not been given to the system, we just set the empty indicator,
/* Check for errors. */
status ^= BD_ENET_RX_LAST;
- if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
- BD_ENET_RX_CR | BD_ENET_RX_OV | BD_ENET_RX_LAST |
- BD_ENET_RX_CL)) {
- ndev->stats.rx_errors++;
- if (status & BD_ENET_RX_OV) {
- /* FIFO overrun */
- ndev->stats.rx_fifo_errors++;
- goto rx_processing_done;
- }
- if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH
- | BD_ENET_RX_LAST)) {
- /* Frame too long or too short. */
- ndev->stats.rx_length_errors++;
- if (status & BD_ENET_RX_LAST)
- netdev_err(ndev, "rcv is not +last\n");
- }
- if (status & BD_ENET_RX_CR) /* CRC Error */
- ndev->stats.rx_crc_errors++;
- /* Report late collisions as a frame error. */
- if (status & (BD_ENET_RX_NO | BD_ENET_RX_CL))
- ndev->stats.rx_frame_errors++;
+ if (unlikely(fec_rx_error_check(ndev, status)))
goto rx_processing_done;
- }
/* Process the incoming frame. */
ndev->stats.rx_packets++;