1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 - 2019 Intel Corporation
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of version 2 of the GNU General Public License as
15 * published by the Free Software Foundation.
17 * This program is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
22 * The full GNU General Public License is included in this distribution in the
23 * file called COPYING.
25 * Contact Information:
26 * Intel Linux Wireless <linuxwifi@intel.com>
27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
33 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
34 * Copyright(c) 2018 - 2019 Intel Corporation
35 * All rights reserved.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
41 * * Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * * Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in
45 * the documentation and/or other materials provided with the
47 * * Neither the name Intel Corporation nor the names of its
48 * contributors may be used to endorse or promote products derived
49 * from this software without specific prior written permission.
51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *****************************************************************************/
64 #include <linux/sched.h>
65 #include <linux/wait.h>
66 #include <linux/gfp.h>
71 #include "iwl-op-mode.h"
72 #include "iwl-context-info-gen3.h"
74 /******************************************************************************
78 ******************************************************************************/
81 * Rx theory of operation
83 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
84 * each of which point to Receive Buffers to be filled by the NIC. These get
85 * used not only for Rx frames, but for any command response or notification
86 * from the NIC. The driver and NIC manage the Rx buffers by means
87 * of indexes into the circular buffer.
90 * The host/firmware share two index registers for managing the Rx buffers.
92 * The READ index maps to the first position that the firmware may be writing
93 * to -- the driver can read up to (but not including) this position and get
95 * The READ index is managed by the firmware once the card is enabled.
97 * The WRITE index maps to the last position the driver has read from -- the
98 * position preceding WRITE is the last slot the firmware can place a packet.
100 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
103 * During initialization, the host sets up the READ queue position to the first
104 * INDEX position, and WRITE to the last (READ - 1 wrapped)
106 * When the firmware places a packet in a buffer, it will advance the READ index
107 * and fire the RX interrupt. The driver can then query the READ index and
108 * process as many packets as possible, moving the WRITE index forward as it
109 * resets the Rx queue buffers with new memory.
111 * The management in the driver is as follows:
112 * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
113 * When the interrupt handler is called, the request is processed.
114 * The page is either stolen - transferred to the upper layer
115 * or reused - added immediately to the iwl->rxq->rx_free list.
116 * + When the page is stolen - the driver updates the matching queue's used
117 * count, detaches the RBD and transfers it to the queue used list.
118 * When there are two used RBDs - they are transferred to the allocator empty
119 * list. Work is then scheduled for the allocator to start allocating
121 * When there are another 6 used RBDs - they are transferred to the allocator
122 * empty list and the driver tries to claim the pre-allocated buffers and
123 * add them to iwl->rxq->rx_free. If it fails - it continues to claim them
125 * When there are 8+ buffers in the free list - either from allocation or from
126 * 8 reused unstolen pages - restock is called to update the FW and indexes.
127 * + In order to make sure the allocator always has RBDs to use for allocation
128 * the allocator has initial pool in the size of num_queues*(8-2) - the
129 * maximum missing RBDs per allocation request (request posted with 2
130 * empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
131 * The queues supplies the recycle of the rest of the RBDs.
132 * + A received packet is processed and handed to the kernel network stack,
133 * detached from the iwl->rxq. The driver 'processed' index is updated.
134 * + If there are no allocated buffers in iwl->rxq->rx_free,
135 * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
136 * If there were enough free buffers and RX_STALLED is set it is cleared.
141 * iwl_rxq_alloc() Allocates rx_free
142 * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls
143 * iwl_pcie_rxq_restock.
144 * Used only during initialization.
145 * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx
146 * queue, updates firmware pointers, and updates
148 * iwl_pcie_rx_allocator() Background work for allocating pages.
150 * -- enable interrupts --
151 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
152 * READ INDEX, detaching the SKB from the pool.
153 * Moves the packet buffer from queue to rx_used.
154 * Posts and claims requests to the allocator.
155 * Calls iwl_pcie_rxq_restock to refill any empty
161 * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
163 * Regular Receive interrupt:
165 * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
166 * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
168 * rxq.queue -> rxq.rx_free -> rxq.queue
174 * iwl_rxq_space - Return number of free slots available in queue.
176 static int iwl_rxq_space(const struct iwl_rxq
*rxq
)
178 /* Make sure rx queue size is a power of 2 */
179 WARN_ON(rxq
->queue_size
& (rxq
->queue_size
- 1));
182 * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
183 * between empty and completely full queues.
184 * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
185 * defined for negative dividends.
187 return (rxq
->read
- rxq
->write
- 1) & (rxq
->queue_size
- 1);
191 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
193 static inline __le32
iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr
)
195 return cpu_to_le32((u32
)(dma_addr
>> 8));
199 * iwl_pcie_rx_stop - stops the Rx DMA
201 int iwl_pcie_rx_stop(struct iwl_trans
*trans
)
203 if (trans
->cfg
->device_family
>= IWL_DEVICE_FAMILY_22560
) {
204 /* TODO: remove this for 22560 once fw does it */
205 iwl_write_umac_prph(trans
, RFH_RXF_DMA_CFG_GEN3
, 0);
206 return iwl_poll_umac_prph_bit(trans
, RFH_GEN_STATUS_GEN3
,
207 RXF_DMA_IDLE
, RXF_DMA_IDLE
, 1000);
208 } else if (trans
->cfg
->mq_rx_supported
) {
209 iwl_write_prph(trans
, RFH_RXF_DMA_CFG
, 0);
210 return iwl_poll_prph_bit(trans
, RFH_GEN_STATUS
,
211 RXF_DMA_IDLE
, RXF_DMA_IDLE
, 1000);
213 iwl_write_direct32(trans
, FH_MEM_RCSR_CHNL0_CONFIG_REG
, 0);
214 return iwl_poll_direct_bit(trans
, FH_MEM_RSSR_RX_STATUS_REG
,
215 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE
,
221 * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
223 static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans
*trans
,
228 lockdep_assert_held(&rxq
->lock
);
231 * explicitly wake up the NIC if:
232 * 1. shadow registers aren't enabled
233 * 2. there is a chance that the NIC is asleep
235 if (!trans
->cfg
->base_params
->shadow_reg_enable
&&
236 test_bit(STATUS_TPOWER_PMI
, &trans
->status
)) {
237 reg
= iwl_read32(trans
, CSR_UCODE_DRV_GP1
);
239 if (reg
& CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP
) {
240 IWL_DEBUG_INFO(trans
, "Rx queue requesting wakeup, GP1 = 0x%x\n",
242 iwl_set_bit(trans
, CSR_GP_CNTRL
,
243 BIT(trans
->cfg
->csr
->flag_mac_access_req
));
244 rxq
->need_update
= true;
249 rxq
->write_actual
= round_down(rxq
->write
, 8);
250 if (trans
->cfg
->device_family
== IWL_DEVICE_FAMILY_22560
)
251 iwl_write32(trans
, HBUS_TARG_WRPTR
,
253 ((FIRST_RX_QUEUE
+ rxq
->id
) << 16)));
254 else if (trans
->cfg
->mq_rx_supported
)
255 iwl_write32(trans
, RFH_Q_FRBDCB_WIDX_TRG(rxq
->id
),
258 iwl_write32(trans
, FH_RSCSR_CHNL0_WPTR
, rxq
->write_actual
);
261 static void iwl_pcie_rxq_check_wrptr(struct iwl_trans
*trans
)
263 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
266 for (i
= 0; i
< trans
->num_rx_queues
; i
++) {
267 struct iwl_rxq
*rxq
= &trans_pcie
->rxq
[i
];
269 if (!rxq
->need_update
)
271 spin_lock(&rxq
->lock
);
272 iwl_pcie_rxq_inc_wr_ptr(trans
, rxq
);
273 rxq
->need_update
= false;
274 spin_unlock(&rxq
->lock
);
278 static void iwl_pcie_restock_bd(struct iwl_trans
*trans
,
280 struct iwl_rx_mem_buffer
*rxb
)
282 if (trans
->cfg
->device_family
>= IWL_DEVICE_FAMILY_22560
) {
283 struct iwl_rx_transfer_desc
*bd
= rxq
->bd
;
285 bd
[rxq
->write
].type_n_size
=
286 cpu_to_le32((IWL_RX_TD_TYPE
& IWL_RX_TD_TYPE_MSK
) |
287 ((IWL_RX_TD_SIZE_2K
>> 8) & IWL_RX_TD_SIZE_MSK
));
288 bd
[rxq
->write
].addr
= cpu_to_le64(rxb
->page_dma
);
289 bd
[rxq
->write
].rbid
= cpu_to_le16(rxb
->vid
);
291 __le64
*bd
= rxq
->bd
;
293 bd
[rxq
->write
] = cpu_to_le64(rxb
->page_dma
| rxb
->vid
);
296 IWL_DEBUG_RX(trans
, "Assigned virtual RB ID %u to queue %d index %d\n",
297 (u32
)rxb
->vid
, rxq
->id
, rxq
->write
);
301 * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx
303 static void iwl_pcie_rxmq_restock(struct iwl_trans
*trans
,
306 struct iwl_rx_mem_buffer
*rxb
;
309 * If the device isn't enabled - no need to try to add buffers...
310 * This can happen when we stop the device and still have an interrupt
311 * pending. We stop the APM before we sync the interrupts because we
312 * have to (see comment there). On the other hand, since the APM is
313 * stopped, we cannot access the HW (in particular not prph).
314 * So don't try to restock if the APM has been already stopped.
316 if (!test_bit(STATUS_DEVICE_ENABLED
, &trans
->status
))
319 spin_lock(&rxq
->lock
);
320 while (rxq
->free_count
) {
321 /* Get next free Rx buffer, remove from free list */
322 rxb
= list_first_entry(&rxq
->rx_free
, struct iwl_rx_mem_buffer
,
324 list_del(&rxb
->list
);
325 rxb
->invalid
= false;
326 /* 12 first bits are expected to be empty */
327 WARN_ON(rxb
->page_dma
& DMA_BIT_MASK(12));
328 /* Point to Rx buffer via next RBD in circular buffer */
329 iwl_pcie_restock_bd(trans
, rxq
, rxb
);
330 rxq
->write
= (rxq
->write
+ 1) & MQ_RX_TABLE_MASK
;
333 spin_unlock(&rxq
->lock
);
336 * If we've added more space for the firmware to place data, tell it.
337 * Increment device's write pointer in multiples of 8.
339 if (rxq
->write_actual
!= (rxq
->write
& ~0x7)) {
340 spin_lock(&rxq
->lock
);
341 iwl_pcie_rxq_inc_wr_ptr(trans
, rxq
);
342 spin_unlock(&rxq
->lock
);
347 * iwl_pcie_rxsq_restock - restock implementation for single queue rx
349 static void iwl_pcie_rxsq_restock(struct iwl_trans
*trans
,
352 struct iwl_rx_mem_buffer
*rxb
;
355 * If the device isn't enabled - not need to try to add buffers...
356 * This can happen when we stop the device and still have an interrupt
357 * pending. We stop the APM before we sync the interrupts because we
358 * have to (see comment there). On the other hand, since the APM is
359 * stopped, we cannot access the HW (in particular not prph).
360 * So don't try to restock if the APM has been already stopped.
362 if (!test_bit(STATUS_DEVICE_ENABLED
, &trans
->status
))
365 spin_lock(&rxq
->lock
);
366 while ((iwl_rxq_space(rxq
) > 0) && (rxq
->free_count
)) {
367 __le32
*bd
= (__le32
*)rxq
->bd
;
368 /* The overwritten rxb must be a used one */
369 rxb
= rxq
->queue
[rxq
->write
];
370 BUG_ON(rxb
&& rxb
->page
);
372 /* Get next free Rx buffer, remove from free list */
373 rxb
= list_first_entry(&rxq
->rx_free
, struct iwl_rx_mem_buffer
,
375 list_del(&rxb
->list
);
376 rxb
->invalid
= false;
378 /* Point to Rx buffer via next RBD in circular buffer */
379 bd
[rxq
->write
] = iwl_pcie_dma_addr2rbd_ptr(rxb
->page_dma
);
380 rxq
->queue
[rxq
->write
] = rxb
;
381 rxq
->write
= (rxq
->write
+ 1) & RX_QUEUE_MASK
;
384 spin_unlock(&rxq
->lock
);
386 /* If we've added more space for the firmware to place data, tell it.
387 * Increment device's write pointer in multiples of 8. */
388 if (rxq
->write_actual
!= (rxq
->write
& ~0x7)) {
389 spin_lock(&rxq
->lock
);
390 iwl_pcie_rxq_inc_wr_ptr(trans
, rxq
);
391 spin_unlock(&rxq
->lock
);
396 * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
398 * If there are slots in the RX queue that need to be restocked,
399 * and we have free pre-allocated buffers, fill the ranks as much
400 * as we can, pulling from rx_free.
402 * This moves the 'write' index forward to catch up with 'processed', and
403 * also updates the memory address in the firmware to reference the new
407 void iwl_pcie_rxq_restock(struct iwl_trans
*trans
, struct iwl_rxq
*rxq
)
409 if (trans
->cfg
->mq_rx_supported
)
410 iwl_pcie_rxmq_restock(trans
, rxq
);
412 iwl_pcie_rxsq_restock(trans
, rxq
);
416 * iwl_pcie_rx_alloc_page - allocates and returns a page.
419 static struct page
*iwl_pcie_rx_alloc_page(struct iwl_trans
*trans
,
422 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
424 gfp_t gfp_mask
= priority
;
426 if (trans_pcie
->rx_page_order
> 0)
427 gfp_mask
|= __GFP_COMP
;
429 /* Alloc a new receive buffer */
430 page
= alloc_pages(gfp_mask
, trans_pcie
->rx_page_order
);
433 IWL_DEBUG_INFO(trans
, "alloc_pages failed, order: %d\n",
434 trans_pcie
->rx_page_order
);
436 * Issue an error if we don't have enough pre-allocated
439 if (!(gfp_mask
& __GFP_NOWARN
) && net_ratelimit())
441 "Failed to alloc_pages\n");
448 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
450 * A used RBD is an Rx buffer that has been given to the stack. To use it again
451 * a page must be allocated and the RBD must point to the page. This function
452 * doesn't change the HW pointer but handles the list of pages that is used by
453 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
456 void iwl_pcie_rxq_alloc_rbs(struct iwl_trans
*trans
, gfp_t priority
,
459 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
460 struct iwl_rx_mem_buffer
*rxb
;
464 spin_lock(&rxq
->lock
);
465 if (list_empty(&rxq
->rx_used
)) {
466 spin_unlock(&rxq
->lock
);
469 spin_unlock(&rxq
->lock
);
471 /* Alloc a new receive buffer */
472 page
= iwl_pcie_rx_alloc_page(trans
, priority
);
476 spin_lock(&rxq
->lock
);
478 if (list_empty(&rxq
->rx_used
)) {
479 spin_unlock(&rxq
->lock
);
480 __free_pages(page
, trans_pcie
->rx_page_order
);
483 rxb
= list_first_entry(&rxq
->rx_used
, struct iwl_rx_mem_buffer
,
485 list_del(&rxb
->list
);
486 spin_unlock(&rxq
->lock
);
490 /* Get physical address of the RB */
492 dma_map_page(trans
->dev
, page
, 0,
493 PAGE_SIZE
<< trans_pcie
->rx_page_order
,
495 if (dma_mapping_error(trans
->dev
, rxb
->page_dma
)) {
497 spin_lock(&rxq
->lock
);
498 list_add(&rxb
->list
, &rxq
->rx_used
);
499 spin_unlock(&rxq
->lock
);
500 __free_pages(page
, trans_pcie
->rx_page_order
);
504 spin_lock(&rxq
->lock
);
506 list_add_tail(&rxb
->list
, &rxq
->rx_free
);
509 spin_unlock(&rxq
->lock
);
513 void iwl_pcie_free_rbs_pool(struct iwl_trans
*trans
)
515 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
518 for (i
= 0; i
< RX_POOL_SIZE
; i
++) {
519 if (!trans_pcie
->rx_pool
[i
].page
)
521 dma_unmap_page(trans
->dev
, trans_pcie
->rx_pool
[i
].page_dma
,
522 PAGE_SIZE
<< trans_pcie
->rx_page_order
,
524 __free_pages(trans_pcie
->rx_pool
[i
].page
,
525 trans_pcie
->rx_page_order
);
526 trans_pcie
->rx_pool
[i
].page
= NULL
;
531 * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
533 * Allocates for each received request 8 pages
534 * Called as a scheduled work item.
536 static void iwl_pcie_rx_allocator(struct iwl_trans
*trans
)
538 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
539 struct iwl_rb_allocator
*rba
= &trans_pcie
->rba
;
540 struct list_head local_empty
;
541 int pending
= atomic_read(&rba
->req_pending
);
543 IWL_DEBUG_TPT(trans
, "Pending allocation requests = %d\n", pending
);
545 /* If we were scheduled - there is at least one request */
546 spin_lock(&rba
->lock
);
547 /* swap out the rba->rbd_empty to a local list */
548 list_replace_init(&rba
->rbd_empty
, &local_empty
);
549 spin_unlock(&rba
->lock
);
553 LIST_HEAD(local_allocated
);
554 gfp_t gfp_mask
= GFP_KERNEL
;
556 /* Do not post a warning if there are only a few requests */
557 if (pending
< RX_PENDING_WATERMARK
)
558 gfp_mask
|= __GFP_NOWARN
;
560 for (i
= 0; i
< RX_CLAIM_REQ_ALLOC
;) {
561 struct iwl_rx_mem_buffer
*rxb
;
564 /* List should never be empty - each reused RBD is
565 * returned to the list, and initial pool covers any
566 * possible gap between the time the page is allocated
567 * to the time the RBD is added.
569 BUG_ON(list_empty(&local_empty
));
570 /* Get the first rxb from the rbd list */
571 rxb
= list_first_entry(&local_empty
,
572 struct iwl_rx_mem_buffer
, list
);
575 /* Alloc a new receive buffer */
576 page
= iwl_pcie_rx_alloc_page(trans
, gfp_mask
);
581 /* Get physical address of the RB */
582 rxb
->page_dma
= dma_map_page(trans
->dev
, page
, 0,
583 PAGE_SIZE
<< trans_pcie
->rx_page_order
,
585 if (dma_mapping_error(trans
->dev
, rxb
->page_dma
)) {
587 __free_pages(page
, trans_pcie
->rx_page_order
);
591 /* move the allocated entry to the out list */
592 list_move(&rxb
->list
, &local_allocated
);
596 atomic_dec(&rba
->req_pending
);
600 pending
= atomic_read(&rba
->req_pending
);
603 "Got more pending allocation requests = %d\n",
607 spin_lock(&rba
->lock
);
608 /* add the allocated rbds to the allocator allocated list */
609 list_splice_tail(&local_allocated
, &rba
->rbd_allocated
);
610 /* get more empty RBDs for current pending requests */
611 list_splice_tail_init(&rba
->rbd_empty
, &local_empty
);
612 spin_unlock(&rba
->lock
);
614 atomic_inc(&rba
->req_ready
);
618 spin_lock(&rba
->lock
);
619 /* return unused rbds to the allocator empty list */
620 list_splice_tail(&local_empty
, &rba
->rbd_empty
);
621 spin_unlock(&rba
->lock
);
623 IWL_DEBUG_TPT(trans
, "%s, exit.\n", __func__
);
627 * iwl_pcie_rx_allocator_get - returns the pre-allocated pages
629 .* Called by queue when the queue posted allocation request and
630 * has freed 8 RBDs in order to restock itself.
631 * This function directly moves the allocated RBs to the queue's ownership
632 * and updates the relevant counters.
634 static void iwl_pcie_rx_allocator_get(struct iwl_trans
*trans
,
637 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
638 struct iwl_rb_allocator
*rba
= &trans_pcie
->rba
;
641 lockdep_assert_held(&rxq
->lock
);
644 * atomic_dec_if_positive returns req_ready - 1 for any scenario.
645 * If req_ready is 0 atomic_dec_if_positive will return -1 and this
646 * function will return early, as there are no ready requests.
647 * atomic_dec_if_positive will perofrm the *actual* decrement only if
648 * req_ready > 0, i.e. - there are ready requests and the function
649 * hands one request to the caller.
651 if (atomic_dec_if_positive(&rba
->req_ready
) < 0)
654 spin_lock(&rba
->lock
);
655 for (i
= 0; i
< RX_CLAIM_REQ_ALLOC
; i
++) {
656 /* Get next free Rx buffer, remove it from free list */
657 struct iwl_rx_mem_buffer
*rxb
=
658 list_first_entry(&rba
->rbd_allocated
,
659 struct iwl_rx_mem_buffer
, list
);
661 list_move(&rxb
->list
, &rxq
->rx_free
);
663 spin_unlock(&rba
->lock
);
665 rxq
->used_count
-= RX_CLAIM_REQ_ALLOC
;
666 rxq
->free_count
+= RX_CLAIM_REQ_ALLOC
;
669 void iwl_pcie_rx_allocator_work(struct work_struct
*data
)
671 struct iwl_rb_allocator
*rba_p
=
672 container_of(data
, struct iwl_rb_allocator
, rx_alloc
);
673 struct iwl_trans_pcie
*trans_pcie
=
674 container_of(rba_p
, struct iwl_trans_pcie
, rba
);
676 iwl_pcie_rx_allocator(trans_pcie
->trans
);
679 static int iwl_pcie_free_bd_size(struct iwl_trans
*trans
, bool use_rx_td
)
681 struct iwl_rx_transfer_desc
*rx_td
;
684 return sizeof(*rx_td
);
686 return trans
->cfg
->mq_rx_supported
? sizeof(__le64
) :
690 static void iwl_pcie_free_rxq_dma(struct iwl_trans
*trans
,
693 struct device
*dev
= trans
->dev
;
694 bool use_rx_td
= (trans
->cfg
->device_family
>=
695 IWL_DEVICE_FAMILY_22560
);
696 int free_size
= iwl_pcie_free_bd_size(trans
, use_rx_td
);
699 dma_free_coherent(trans
->dev
,
700 free_size
* rxq
->queue_size
,
701 rxq
->bd
, rxq
->bd_dma
);
705 rxq
->rb_stts_dma
= 0;
709 dma_free_coherent(trans
->dev
,
710 (use_rx_td
? sizeof(*rxq
->cd
) :
711 sizeof(__le32
)) * rxq
->queue_size
,
712 rxq
->used_bd
, rxq
->used_bd_dma
);
713 rxq
->used_bd_dma
= 0;
716 if (trans
->cfg
->device_family
< IWL_DEVICE_FAMILY_22560
)
720 dma_free_coherent(dev
, sizeof(__le16
),
721 rxq
->tr_tail
, rxq
->tr_tail_dma
);
722 rxq
->tr_tail_dma
= 0;
726 dma_free_coherent(dev
, sizeof(__le16
),
727 rxq
->cr_tail
, rxq
->cr_tail_dma
);
728 rxq
->cr_tail_dma
= 0;
732 static int iwl_pcie_alloc_rxq_dma(struct iwl_trans
*trans
,
735 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
736 struct device
*dev
= trans
->dev
;
739 bool use_rx_td
= (trans
->cfg
->device_family
>=
740 IWL_DEVICE_FAMILY_22560
);
741 size_t rb_stts_size
= use_rx_td
? sizeof(__le16
) :
742 sizeof(struct iwl_rb_status
);
744 spin_lock_init(&rxq
->lock
);
745 if (trans
->cfg
->mq_rx_supported
)
746 rxq
->queue_size
= MQ_RX_TABLE_SIZE
;
748 rxq
->queue_size
= RX_QUEUE_SIZE
;
750 free_size
= iwl_pcie_free_bd_size(trans
, use_rx_td
);
753 * Allocate the circular buffer of Read Buffer Descriptors
756 rxq
->bd
= dma_alloc_coherent(dev
, free_size
* rxq
->queue_size
,
757 &rxq
->bd_dma
, GFP_KERNEL
);
761 if (trans
->cfg
->mq_rx_supported
) {
762 rxq
->used_bd
= dma_alloc_coherent(dev
,
763 (use_rx_td
? sizeof(*rxq
->cd
) : sizeof(__le32
)) * rxq
->queue_size
,
770 rxq
->rb_stts
= trans_pcie
->base_rb_stts
+ rxq
->id
* rb_stts_size
;
772 trans_pcie
->base_rb_stts_dma
+ rxq
->id
* rb_stts_size
;
777 /* Allocate the driver's pointer to TR tail */
778 rxq
->tr_tail
= dma_alloc_coherent(dev
, sizeof(__le16
),
779 &rxq
->tr_tail_dma
, GFP_KERNEL
);
783 /* Allocate the driver's pointer to CR tail */
784 rxq
->cr_tail
= dma_alloc_coherent(dev
, sizeof(__le16
),
785 &rxq
->cr_tail_dma
, GFP_KERNEL
);
789 * W/A 22560 device step Z0 must be non zero bug
790 * TODO: remove this when stop supporting Z0
792 *rxq
->cr_tail
= cpu_to_le16(500);
797 for (i
= 0; i
< trans
->num_rx_queues
; i
++) {
798 struct iwl_rxq
*rxq
= &trans_pcie
->rxq
[i
];
800 iwl_pcie_free_rxq_dma(trans
, rxq
);
806 int iwl_pcie_rx_alloc(struct iwl_trans
*trans
)
808 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
809 struct iwl_rb_allocator
*rba
= &trans_pcie
->rba
;
811 size_t rb_stts_size
= trans
->cfg
->device_family
>=
812 IWL_DEVICE_FAMILY_22560
?
813 sizeof(__le16
) : sizeof(struct iwl_rb_status
);
815 if (WARN_ON(trans_pcie
->rxq
))
818 trans_pcie
->rxq
= kcalloc(trans
->num_rx_queues
, sizeof(struct iwl_rxq
),
820 if (!trans_pcie
->rxq
)
823 spin_lock_init(&rba
->lock
);
826 * Allocate the driver's pointer to receive buffer status.
827 * Allocate for all queues continuously (HW requirement).
829 trans_pcie
->base_rb_stts
=
830 dma_alloc_coherent(trans
->dev
,
831 rb_stts_size
* trans
->num_rx_queues
,
832 &trans_pcie
->base_rb_stts_dma
,
834 if (!trans_pcie
->base_rb_stts
) {
839 for (i
= 0; i
< trans
->num_rx_queues
; i
++) {
840 struct iwl_rxq
*rxq
= &trans_pcie
->rxq
[i
];
843 ret
= iwl_pcie_alloc_rxq_dma(trans
, rxq
);
850 if (trans_pcie
->base_rb_stts
) {
851 dma_free_coherent(trans
->dev
,
852 rb_stts_size
* trans
->num_rx_queues
,
853 trans_pcie
->base_rb_stts
,
854 trans_pcie
->base_rb_stts_dma
);
855 trans_pcie
->base_rb_stts
= NULL
;
856 trans_pcie
->base_rb_stts_dma
= 0;
858 kfree(trans_pcie
->rxq
);
863 static void iwl_pcie_rx_hw_init(struct iwl_trans
*trans
, struct iwl_rxq
*rxq
)
865 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
868 const u32 rfdnlog
= RX_QUEUE_SIZE_LOG
; /* 256 RBDs */
870 switch (trans_pcie
->rx_buf_size
) {
872 rb_size
= FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K
;
875 rb_size
= FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K
;
878 rb_size
= FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K
;
882 rb_size
= FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K
;
885 if (!iwl_trans_grab_nic_access(trans
, &flags
))
889 iwl_write32(trans
, FH_MEM_RCSR_CHNL0_CONFIG_REG
, 0);
890 /* reset and flush pointers */
891 iwl_write32(trans
, FH_MEM_RCSR_CHNL0_RBDCB_WPTR
, 0);
892 iwl_write32(trans
, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ
, 0);
893 iwl_write32(trans
, FH_RSCSR_CHNL0_RDPTR
, 0);
895 /* Reset driver's Rx queue write index */
896 iwl_write32(trans
, FH_RSCSR_CHNL0_RBDCB_WPTR_REG
, 0);
898 /* Tell device where to find RBD circular buffer in DRAM */
899 iwl_write32(trans
, FH_RSCSR_CHNL0_RBDCB_BASE_REG
,
900 (u32
)(rxq
->bd_dma
>> 8));
902 /* Tell device where in DRAM to update its Rx status */
903 iwl_write32(trans
, FH_RSCSR_CHNL0_STTS_WPTR_REG
,
904 rxq
->rb_stts_dma
>> 4);
907 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
908 * the credit mechanism in 5000 HW RX FIFO
909 * Direct rx interrupts to hosts
910 * Rx buffer size 4 or 8k or 12k
914 iwl_write32(trans
, FH_MEM_RCSR_CHNL0_CONFIG_REG
,
915 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL
|
916 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY
|
917 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL
|
919 (RX_RB_TIMEOUT
<< FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS
) |
920 (rfdnlog
<< FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS
));
922 iwl_trans_release_nic_access(trans
, &flags
);
924 /* Set interrupt coalescing timer to default (2048 usecs) */
925 iwl_write8(trans
, CSR_INT_COALESCING
, IWL_HOST_INT_TIMEOUT_DEF
);
927 /* W/A for interrupt coalescing bug in 7260 and 3160 */
928 if (trans
->cfg
->host_interrupt_operation_mode
)
929 iwl_set_bit(trans
, CSR_INT_COALESCING
, IWL_HOST_INT_OPER_MODE
);
932 static void iwl_pcie_rx_mq_hw_init(struct iwl_trans
*trans
)
934 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
935 u32 rb_size
, enabled
= 0;
939 switch (trans_pcie
->rx_buf_size
) {
941 rb_size
= RFH_RXF_DMA_RB_SIZE_2K
;
944 rb_size
= RFH_RXF_DMA_RB_SIZE_4K
;
947 rb_size
= RFH_RXF_DMA_RB_SIZE_8K
;
950 rb_size
= RFH_RXF_DMA_RB_SIZE_12K
;
954 rb_size
= RFH_RXF_DMA_RB_SIZE_4K
;
957 if (!iwl_trans_grab_nic_access(trans
, &flags
))
961 iwl_write_prph_no_grab(trans
, RFH_RXF_DMA_CFG
, 0);
962 /* disable free amd used rx queue operation */
963 iwl_write_prph_no_grab(trans
, RFH_RXF_RXQ_ACTIVE
, 0);
965 for (i
= 0; i
< trans
->num_rx_queues
; i
++) {
966 /* Tell device where to find RBD free table in DRAM */
967 iwl_write_prph64_no_grab(trans
,
968 RFH_Q_FRBDCB_BA_LSB(i
),
969 trans_pcie
->rxq
[i
].bd_dma
);
970 /* Tell device where to find RBD used table in DRAM */
971 iwl_write_prph64_no_grab(trans
,
972 RFH_Q_URBDCB_BA_LSB(i
),
973 trans_pcie
->rxq
[i
].used_bd_dma
);
974 /* Tell device where in DRAM to update its Rx status */
975 iwl_write_prph64_no_grab(trans
,
976 RFH_Q_URBD_STTS_WPTR_LSB(i
),
977 trans_pcie
->rxq
[i
].rb_stts_dma
);
978 /* Reset device indice tables */
979 iwl_write_prph_no_grab(trans
, RFH_Q_FRBDCB_WIDX(i
), 0);
980 iwl_write_prph_no_grab(trans
, RFH_Q_FRBDCB_RIDX(i
), 0);
981 iwl_write_prph_no_grab(trans
, RFH_Q_URBDCB_WIDX(i
), 0);
983 enabled
|= BIT(i
) | BIT(i
+ 16);
988 * Rx buffer size 4 or 8k or 12k
990 * Drop frames that exceed RB size
993 iwl_write_prph_no_grab(trans
, RFH_RXF_DMA_CFG
,
994 RFH_DMA_EN_ENABLE_VAL
| rb_size
|
995 RFH_RXF_DMA_MIN_RB_4_8
|
996 RFH_RXF_DMA_DROP_TOO_LARGE_MASK
|
997 RFH_RXF_DMA_RBDCB_SIZE_512
);
1000 * Activate DMA snooping.
1001 * Set RX DMA chunk size to 64B for IOSF and 128B for PCIe
1002 * Default queue is 0
1004 iwl_write_prph_no_grab(trans
, RFH_GEN_CFG
,
1005 RFH_GEN_CFG_RFH_DMA_SNOOP
|
1006 RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM
, 0) |
1007 RFH_GEN_CFG_SERVICE_DMA_SNOOP
|
1008 RFH_GEN_CFG_VAL(RB_CHUNK_SIZE
,
1009 trans
->cfg
->integrated
?
1010 RFH_GEN_CFG_RB_CHUNK_SIZE_64
:
1011 RFH_GEN_CFG_RB_CHUNK_SIZE_128
));
1012 /* Enable the relevant rx queues */
1013 iwl_write_prph_no_grab(trans
, RFH_RXF_RXQ_ACTIVE
, enabled
);
1015 iwl_trans_release_nic_access(trans
, &flags
);
1017 /* Set interrupt coalescing timer to default (2048 usecs) */
1018 iwl_write8(trans
, CSR_INT_COALESCING
, IWL_HOST_INT_TIMEOUT_DEF
);
1021 void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq
*rxq
)
1023 lockdep_assert_held(&rxq
->lock
);
1025 INIT_LIST_HEAD(&rxq
->rx_free
);
1026 INIT_LIST_HEAD(&rxq
->rx_used
);
1027 rxq
->free_count
= 0;
1028 rxq
->used_count
= 0;
1031 int iwl_pcie_dummy_napi_poll(struct napi_struct
*napi
, int budget
)
1037 int _iwl_pcie_rx_init(struct iwl_trans
*trans
)
1039 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1040 struct iwl_rxq
*def_rxq
;
1041 struct iwl_rb_allocator
*rba
= &trans_pcie
->rba
;
1042 int i
, err
, queue_size
, allocator_pool_size
, num_alloc
;
1044 if (!trans_pcie
->rxq
) {
1045 err
= iwl_pcie_rx_alloc(trans
);
1049 def_rxq
= trans_pcie
->rxq
;
1051 cancel_work_sync(&rba
->rx_alloc
);
1053 spin_lock(&rba
->lock
);
1054 atomic_set(&rba
->req_pending
, 0);
1055 atomic_set(&rba
->req_ready
, 0);
1056 INIT_LIST_HEAD(&rba
->rbd_allocated
);
1057 INIT_LIST_HEAD(&rba
->rbd_empty
);
1058 spin_unlock(&rba
->lock
);
1060 /* free all first - we might be reconfigured for a different size */
1061 iwl_pcie_free_rbs_pool(trans
);
1063 for (i
= 0; i
< RX_QUEUE_SIZE
; i
++)
1064 def_rxq
->queue
[i
] = NULL
;
1066 for (i
= 0; i
< trans
->num_rx_queues
; i
++) {
1067 struct iwl_rxq
*rxq
= &trans_pcie
->rxq
[i
];
1069 spin_lock(&rxq
->lock
);
1071 * Set read write pointer to reflect that we have processed
1072 * and used all buffers, but have not restocked the Rx queue
1073 * with fresh buffers
1077 rxq
->write_actual
= 0;
1078 memset(rxq
->rb_stts
, 0,
1079 (trans
->cfg
->device_family
>= IWL_DEVICE_FAMILY_22560
) ?
1080 sizeof(__le16
) : sizeof(struct iwl_rb_status
));
1082 iwl_pcie_rx_init_rxb_lists(rxq
);
1084 if (!rxq
->napi
.poll
)
1085 netif_napi_add(&trans_pcie
->napi_dev
, &rxq
->napi
,
1086 iwl_pcie_dummy_napi_poll
, 64);
1088 spin_unlock(&rxq
->lock
);
1091 /* move the pool to the default queue and allocator ownerships */
1092 queue_size
= trans
->cfg
->mq_rx_supported
?
1093 MQ_RX_NUM_RBDS
: RX_QUEUE_SIZE
;
1094 allocator_pool_size
= trans
->num_rx_queues
*
1095 (RX_CLAIM_REQ_ALLOC
- RX_POST_REQ_ALLOC
);
1096 num_alloc
= queue_size
+ allocator_pool_size
;
1097 BUILD_BUG_ON(ARRAY_SIZE(trans_pcie
->global_table
) !=
1098 ARRAY_SIZE(trans_pcie
->rx_pool
));
1099 for (i
= 0; i
< num_alloc
; i
++) {
1100 struct iwl_rx_mem_buffer
*rxb
= &trans_pcie
->rx_pool
[i
];
1102 if (i
< allocator_pool_size
)
1103 list_add(&rxb
->list
, &rba
->rbd_empty
);
1105 list_add(&rxb
->list
, &def_rxq
->rx_used
);
1106 trans_pcie
->global_table
[i
] = rxb
;
1107 rxb
->vid
= (u16
)(i
+ 1);
1108 rxb
->invalid
= true;
1111 iwl_pcie_rxq_alloc_rbs(trans
, GFP_KERNEL
, def_rxq
);
1116 int iwl_pcie_rx_init(struct iwl_trans
*trans
)
1118 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1119 int ret
= _iwl_pcie_rx_init(trans
);
1124 if (trans
->cfg
->mq_rx_supported
)
1125 iwl_pcie_rx_mq_hw_init(trans
);
1127 iwl_pcie_rx_hw_init(trans
, trans_pcie
->rxq
);
1129 iwl_pcie_rxq_restock(trans
, trans_pcie
->rxq
);
1131 spin_lock(&trans_pcie
->rxq
->lock
);
1132 iwl_pcie_rxq_inc_wr_ptr(trans
, trans_pcie
->rxq
);
1133 spin_unlock(&trans_pcie
->rxq
->lock
);
1138 int iwl_pcie_gen2_rx_init(struct iwl_trans
*trans
)
1140 /* Set interrupt coalescing timer to default (2048 usecs) */
1141 iwl_write8(trans
, CSR_INT_COALESCING
, IWL_HOST_INT_TIMEOUT_DEF
);
1144 * We don't configure the RFH.
1145 * Restock will be done at alive, after firmware configured the RFH.
1147 return _iwl_pcie_rx_init(trans
);
1150 void iwl_pcie_rx_free(struct iwl_trans
*trans
)
1152 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1153 struct iwl_rb_allocator
*rba
= &trans_pcie
->rba
;
1155 size_t rb_stts_size
= trans
->cfg
->device_family
>=
1156 IWL_DEVICE_FAMILY_22560
?
1157 sizeof(__le16
) : sizeof(struct iwl_rb_status
);
1160 * if rxq is NULL, it means that nothing has been allocated,
1163 if (!trans_pcie
->rxq
) {
1164 IWL_DEBUG_INFO(trans
, "Free NULL rx context\n");
1168 cancel_work_sync(&rba
->rx_alloc
);
1170 iwl_pcie_free_rbs_pool(trans
);
1172 if (trans_pcie
->base_rb_stts
) {
1173 dma_free_coherent(trans
->dev
,
1174 rb_stts_size
* trans
->num_rx_queues
,
1175 trans_pcie
->base_rb_stts
,
1176 trans_pcie
->base_rb_stts_dma
);
1177 trans_pcie
->base_rb_stts
= NULL
;
1178 trans_pcie
->base_rb_stts_dma
= 0;
1181 for (i
= 0; i
< trans
->num_rx_queues
; i
++) {
1182 struct iwl_rxq
*rxq
= &trans_pcie
->rxq
[i
];
1184 iwl_pcie_free_rxq_dma(trans
, rxq
);
1187 netif_napi_del(&rxq
->napi
);
1189 kfree(trans_pcie
->rxq
);
1192 static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq
*rxq
,
1193 struct iwl_rb_allocator
*rba
)
1195 spin_lock(&rba
->lock
);
1196 list_splice_tail_init(&rxq
->rx_used
, &rba
->rbd_empty
);
1197 spin_unlock(&rba
->lock
);
1201 * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
1203 * Called when a RBD can be reused. The RBD is transferred to the allocator.
1204 * When there are 2 empty RBDs - a request for allocation is posted
1206 static void iwl_pcie_rx_reuse_rbd(struct iwl_trans
*trans
,
1207 struct iwl_rx_mem_buffer
*rxb
,
1208 struct iwl_rxq
*rxq
, bool emergency
)
1210 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1211 struct iwl_rb_allocator
*rba
= &trans_pcie
->rba
;
1213 /* Move the RBD to the used list, will be moved to allocator in batches
1214 * before claiming or posting a request*/
1215 list_add_tail(&rxb
->list
, &rxq
->rx_used
);
1217 if (unlikely(emergency
))
1220 /* Count the allocator owned RBDs */
1223 /* If we have RX_POST_REQ_ALLOC new released rx buffers -
1224 * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
1225 * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
1226 * after but we still need to post another request.
1228 if ((rxq
->used_count
% RX_CLAIM_REQ_ALLOC
) == RX_POST_REQ_ALLOC
) {
1229 /* Move the 2 RBDs to the allocator ownership.
1230 Allocator has another 6 from pool for the request completion*/
1231 iwl_pcie_rx_move_to_allocator(rxq
, rba
);
1233 atomic_inc(&rba
->req_pending
);
1234 queue_work(rba
->alloc_wq
, &rba
->rx_alloc
);
1238 static void iwl_pcie_rx_handle_rb(struct iwl_trans
*trans
,
1239 struct iwl_rxq
*rxq
,
1240 struct iwl_rx_mem_buffer
*rxb
,
1244 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1245 struct iwl_txq
*txq
= trans_pcie
->txq
[trans_pcie
->cmd_queue
];
1246 bool page_stolen
= false;
1247 int max_len
= PAGE_SIZE
<< trans_pcie
->rx_page_order
;
1253 dma_unmap_page(trans
->dev
, rxb
->page_dma
, max_len
, DMA_FROM_DEVICE
);
1255 while (offset
+ sizeof(u32
) + sizeof(struct iwl_cmd_header
) < max_len
) {
1256 struct iwl_rx_packet
*pkt
;
1259 int index
, cmd_index
, len
;
1260 struct iwl_rx_cmd_buffer rxcb
= {
1262 ._rx_page_order
= trans_pcie
->rx_page_order
,
1264 ._page_stolen
= false,
1265 .truesize
= max_len
,
1268 if (trans
->cfg
->device_family
>= IWL_DEVICE_FAMILY_22560
)
1269 rxcb
.status
= rxq
->cd
[i
].status
;
1271 pkt
= rxb_addr(&rxcb
);
1273 if (pkt
->len_n_flags
== cpu_to_le32(FH_RSCSR_FRAME_INVALID
)) {
1275 "Q %d: RB end marker at offset %d\n",
1280 WARN((le32_to_cpu(pkt
->len_n_flags
) & FH_RSCSR_RXQ_MASK
) >>
1281 FH_RSCSR_RXQ_POS
!= rxq
->id
,
1282 "frame on invalid queue - is on %d and indicates %d\n",
1284 (le32_to_cpu(pkt
->len_n_flags
) & FH_RSCSR_RXQ_MASK
) >>
1288 "Q %d: cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n",
1290 iwl_get_cmd_string(trans
,
1291 iwl_cmd_id(pkt
->hdr
.cmd
,
1294 pkt
->hdr
.group_id
, pkt
->hdr
.cmd
,
1295 le16_to_cpu(pkt
->hdr
.sequence
));
1297 len
= iwl_rx_packet_len(pkt
);
1298 len
+= sizeof(u32
); /* account for status word */
1299 trace_iwlwifi_dev_rx(trans
->dev
, trans
, pkt
, len
);
1300 trace_iwlwifi_dev_rx_data(trans
->dev
, trans
, pkt
, len
);
1302 /* Reclaim a command buffer only if this packet is a response
1303 * to a (driver-originated) command.
1304 * If the packet (e.g. Rx frame) originated from uCode,
1305 * there is no command buffer to reclaim.
1306 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
1307 * but apparently a few don't get set; catch them here. */
1308 reclaim
= !(pkt
->hdr
.sequence
& SEQ_RX_FRAME
);
1309 if (reclaim
&& !pkt
->hdr
.group_id
) {
1312 for (i
= 0; i
< trans_pcie
->n_no_reclaim_cmds
; i
++) {
1313 if (trans_pcie
->no_reclaim_cmds
[i
] ==
1321 sequence
= le16_to_cpu(pkt
->hdr
.sequence
);
1322 index
= SEQ_TO_INDEX(sequence
);
1323 cmd_index
= iwl_pcie_get_cmd_index(txq
, index
);
1325 if (rxq
->id
== trans_pcie
->def_rx_queue
)
1326 iwl_op_mode_rx(trans
->op_mode
, &rxq
->napi
,
1329 iwl_op_mode_rx_rss(trans
->op_mode
, &rxq
->napi
,
1333 kzfree(txq
->entries
[cmd_index
].free_buf
);
1334 txq
->entries
[cmd_index
].free_buf
= NULL
;
1338 * After here, we should always check rxcb._page_stolen,
1339 * if it is true then one of the handlers took the page.
1343 /* Invoke any callbacks, transfer the buffer to caller,
1344 * and fire off the (possibly) blocking
1345 * iwl_trans_send_cmd()
1346 * as we reclaim the driver command queue */
1347 if (!rxcb
._page_stolen
)
1348 iwl_pcie_hcmd_complete(trans
, &rxcb
);
1350 IWL_WARN(trans
, "Claim null rxb?\n");
1353 page_stolen
|= rxcb
._page_stolen
;
1354 if (trans
->cfg
->device_family
>= IWL_DEVICE_FAMILY_22560
)
1356 offset
+= ALIGN(len
, FH_RSCSR_FRAME_ALIGN
);
1359 /* page was stolen from us -- free our reference */
1361 __free_pages(rxb
->page
, trans_pcie
->rx_page_order
);
1365 /* Reuse the page if possible. For notification packets and
1366 * SKBs that fail to Rx correctly, add them back into the
1367 * rx_free list for reuse later. */
1368 if (rxb
->page
!= NULL
) {
1370 dma_map_page(trans
->dev
, rxb
->page
, 0,
1371 PAGE_SIZE
<< trans_pcie
->rx_page_order
,
1373 if (dma_mapping_error(trans
->dev
, rxb
->page_dma
)) {
1375 * free the page(s) as well to not break
1376 * the invariant that the items on the used
1377 * list have no page(s)
1379 __free_pages(rxb
->page
, trans_pcie
->rx_page_order
);
1381 iwl_pcie_rx_reuse_rbd(trans
, rxb
, rxq
, emergency
);
1383 list_add_tail(&rxb
->list
, &rxq
->rx_free
);
1387 iwl_pcie_rx_reuse_rbd(trans
, rxb
, rxq
, emergency
);
1390 static struct iwl_rx_mem_buffer
*iwl_pcie_get_rxb(struct iwl_trans
*trans
,
1391 struct iwl_rxq
*rxq
, int i
)
1393 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1394 struct iwl_rx_mem_buffer
*rxb
;
1397 if (!trans
->cfg
->mq_rx_supported
) {
1398 rxb
= rxq
->queue
[i
];
1399 rxq
->queue
[i
] = NULL
;
1403 /* used_bd is a 32/16 bit but only 12 are used to retrieve the vid */
1404 if (trans
->cfg
->device_family
>= IWL_DEVICE_FAMILY_22560
)
1405 vid
= le16_to_cpu(rxq
->cd
[i
].rbid
) & 0x0FFF;
1407 vid
= le32_to_cpu(rxq
->bd_32
[i
]) & 0x0FFF;
1409 if (!vid
|| vid
> ARRAY_SIZE(trans_pcie
->global_table
))
1412 rxb
= trans_pcie
->global_table
[vid
- 1];
1416 IWL_DEBUG_RX(trans
, "Got virtual RB ID %u\n", (u32
)rxb
->vid
);
1418 if (trans
->cfg
->device_family
>= IWL_DEVICE_FAMILY_22560
)
1419 rxb
->size
= le32_to_cpu(rxq
->cd
[i
].size
) & IWL_RX_CD_SIZE
;
1421 rxb
->invalid
= true;
1426 WARN(1, "Invalid rxb from HW %u\n", (u32
)vid
);
1427 iwl_force_nmi(trans
);
1432 * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
1434 static void iwl_pcie_rx_handle(struct iwl_trans
*trans
, int queue
)
1436 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1437 struct iwl_rxq
*rxq
;
1438 u32 r
, i
, count
= 0;
1439 bool emergency
= false;
1441 if (WARN_ON_ONCE(!trans_pcie
->rxq
|| !trans_pcie
->rxq
[queue
].bd
))
1444 rxq
= &trans_pcie
->rxq
[queue
];
1447 spin_lock(&rxq
->lock
);
1448 /* uCode's read index (stored in shared DRAM) indicates the last Rx
1449 * buffer that the driver may process (last buffer filled by ucode). */
1450 r
= le16_to_cpu(iwl_get_closed_rb_stts(trans
, rxq
)) & 0x0FFF;
1453 /* W/A 9000 device step A0 wrap-around bug */
1454 r
&= (rxq
->queue_size
- 1);
1456 /* Rx interrupt, but nothing sent from uCode */
1458 IWL_DEBUG_RX(trans
, "Q %d: HW = SW = %d\n", rxq
->id
, r
);
1461 struct iwl_rb_allocator
*rba
= &trans_pcie
->rba
;
1462 struct iwl_rx_mem_buffer
*rxb
;
1463 /* number of RBDs still waiting for page allocation */
1464 u32 rb_pending_alloc
=
1465 atomic_read(&trans_pcie
->rba
.req_pending
) *
1468 if (unlikely(rb_pending_alloc
>= rxq
->queue_size
/ 2 &&
1470 iwl_pcie_rx_move_to_allocator(rxq
, rba
);
1472 IWL_DEBUG_TPT(trans
,
1473 "RX path is in emergency. Pending allocations %d\n",
1477 IWL_DEBUG_RX(trans
, "Q %d: HW = %d, SW = %d\n", rxq
->id
, r
, i
);
1479 rxb
= iwl_pcie_get_rxb(trans
, rxq
, i
);
1483 iwl_pcie_rx_handle_rb(trans
, rxq
, rxb
, emergency
, i
);
1485 i
= (i
+ 1) & (rxq
->queue_size
- 1);
1488 * If we have RX_CLAIM_REQ_ALLOC released rx buffers -
1489 * try to claim the pre-allocated buffers from the allocator.
1490 * If not ready - will try to reclaim next time.
1491 * There is no need to reschedule work - allocator exits only
1494 if (rxq
->used_count
>= RX_CLAIM_REQ_ALLOC
)
1495 iwl_pcie_rx_allocator_get(trans
, rxq
);
1497 if (rxq
->used_count
% RX_CLAIM_REQ_ALLOC
== 0 && !emergency
) {
1498 /* Add the remaining empty RBDs for allocator use */
1499 iwl_pcie_rx_move_to_allocator(rxq
, rba
);
1500 } else if (emergency
) {
1504 if (rb_pending_alloc
< rxq
->queue_size
/ 3) {
1505 IWL_DEBUG_TPT(trans
,
1506 "RX path exited emergency. Pending allocations %d\n",
1512 spin_unlock(&rxq
->lock
);
1513 iwl_pcie_rxq_alloc_rbs(trans
, GFP_ATOMIC
, rxq
);
1514 iwl_pcie_rxq_restock(trans
, rxq
);
1520 /* Backtrack one entry */
1522 /* update cr tail with the rxq read pointer */
1523 if (trans
->cfg
->device_family
>= IWL_DEVICE_FAMILY_22560
)
1524 *rxq
->cr_tail
= cpu_to_le16(r
);
1525 spin_unlock(&rxq
->lock
);
1528 * handle a case where in emergency there are some unallocated RBDs.
1529 * those RBDs are in the used list, but are not tracked by the queue's
1530 * used_count which counts allocator owned RBDs.
1531 * unallocated emergency RBDs must be allocated on exit, otherwise
1532 * when called again the function may not be in emergency mode and
1533 * they will be handed to the allocator with no tracking in the RBD
1534 * allocator counters, which will lead to them never being claimed back
1536 * by allocating them here, they are now in the queue free list, and
1537 * will be restocked by the next call of iwl_pcie_rxq_restock.
1539 if (unlikely(emergency
&& count
))
1540 iwl_pcie_rxq_alloc_rbs(trans
, GFP_ATOMIC
, rxq
);
1543 napi_gro_flush(&rxq
->napi
, false);
1545 iwl_pcie_rxq_restock(trans
, rxq
);
1548 static struct iwl_trans_pcie
*iwl_pcie_get_trans_pcie(struct msix_entry
*entry
)
1550 u8 queue
= entry
->entry
;
1551 struct msix_entry
*entries
= entry
- queue
;
1553 return container_of(entries
, struct iwl_trans_pcie
, msix_entries
[0]);
1557 * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw
1558 * This interrupt handler should be used with RSS queue only.
1560 irqreturn_t
iwl_pcie_irq_rx_msix_handler(int irq
, void *dev_id
)
1562 struct msix_entry
*entry
= dev_id
;
1563 struct iwl_trans_pcie
*trans_pcie
= iwl_pcie_get_trans_pcie(entry
);
1564 struct iwl_trans
*trans
= trans_pcie
->trans
;
1566 trace_iwlwifi_dev_irq_msix(trans
->dev
, entry
, false, 0, 0);
1568 if (WARN_ON(entry
->entry
>= trans
->num_rx_queues
))
1571 lock_map_acquire(&trans
->sync_cmd_lockdep_map
);
1574 iwl_pcie_rx_handle(trans
, entry
->entry
);
1577 iwl_pcie_clear_irq(trans
, entry
);
1579 lock_map_release(&trans
->sync_cmd_lockdep_map
);
1585 * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
1587 static void iwl_pcie_irq_handle_error(struct iwl_trans
*trans
)
1589 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1592 /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
1593 if (trans
->cfg
->internal_wimax_coex
&&
1594 !trans
->cfg
->apmg_not_supported
&&
1595 (!(iwl_read_prph(trans
, APMG_CLK_CTRL_REG
) &
1596 APMS_CLK_VAL_MRB_FUNC_MODE
) ||
1597 (iwl_read_prph(trans
, APMG_PS_CTRL_REG
) &
1598 APMG_PS_CTRL_VAL_RESET_REQ
))) {
1599 clear_bit(STATUS_SYNC_HCMD_ACTIVE
, &trans
->status
);
1600 iwl_op_mode_wimax_active(trans
->op_mode
);
1601 wake_up(&trans_pcie
->wait_command_queue
);
1605 for (i
= 0; i
< trans
->cfg
->base_params
->num_of_queues
; i
++) {
1606 if (!trans_pcie
->txq
[i
])
1608 del_timer(&trans_pcie
->txq
[i
]->stuck_timer
);
1611 /* The STATUS_FW_ERROR bit is set in this function. This must happen
1612 * before we wake up the command caller, to ensure a proper cleanup. */
1613 iwl_trans_fw_error(trans
);
1615 clear_bit(STATUS_SYNC_HCMD_ACTIVE
, &trans
->status
);
1616 wake_up(&trans_pcie
->wait_command_queue
);
1619 static u32
iwl_pcie_int_cause_non_ict(struct iwl_trans
*trans
)
1623 lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans
)->irq_lock
);
1625 trace_iwlwifi_dev_irq(trans
->dev
);
1627 /* Discover which interrupts are active/pending */
1628 inta
= iwl_read32(trans
, CSR_INT
);
1630 /* the thread will service interrupts and re-enable them */
1634 /* a device (PCI-E) page is 4096 bytes long */
1635 #define ICT_SHIFT 12
1636 #define ICT_SIZE (1 << ICT_SHIFT)
1637 #define ICT_COUNT (ICT_SIZE / sizeof(u32))
1639 /* interrupt handler using ict table, with this interrupt driver will
1640 * stop using INTA register to get device's interrupt, reading this register
1641 * is expensive, device will write interrupts in ICT dram table, increment
1642 * index then will fire interrupt to driver, driver will OR all ICT table
1643 * entries from current index up to table entry with 0 value. the result is
1644 * the interrupt we need to service, driver will set the entries back to 0 and
1647 static u32
iwl_pcie_int_cause_ict(struct iwl_trans
*trans
)
1649 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1654 trace_iwlwifi_dev_irq(trans
->dev
);
1656 /* Ignore interrupt if there's nothing in NIC to service.
1657 * This may be due to IRQ shared with another device,
1658 * or due to sporadic interrupts thrown from our NIC. */
1659 read
= le32_to_cpu(trans_pcie
->ict_tbl
[trans_pcie
->ict_index
]);
1660 trace_iwlwifi_dev_ict_read(trans
->dev
, trans_pcie
->ict_index
, read
);
1665 * Collect all entries up to the first 0, starting from ict_index;
1666 * note we already read at ict_index.
1670 IWL_DEBUG_ISR(trans
, "ICT index %d value 0x%08X\n",
1671 trans_pcie
->ict_index
, read
);
1672 trans_pcie
->ict_tbl
[trans_pcie
->ict_index
] = 0;
1673 trans_pcie
->ict_index
=
1674 ((trans_pcie
->ict_index
+ 1) & (ICT_COUNT
- 1));
1676 read
= le32_to_cpu(trans_pcie
->ict_tbl
[trans_pcie
->ict_index
]);
1677 trace_iwlwifi_dev_ict_read(trans
->dev
, trans_pcie
->ict_index
,
1681 /* We should not get this value, just ignore it. */
1682 if (val
== 0xffffffff)
1686 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
1687 * (bit 15 before shifting it to 31) to clear when using interrupt
1688 * coalescing. fortunately, bits 18 and 19 stay set when this happens
1689 * so we use them to decide on the real state of the Rx bit.
1690 * In order words, bit 15 is set if bit 18 or bit 19 are set.
1695 inta
= (0xff & val
) | ((0xff00 & val
) << 16);
1699 void iwl_pcie_handle_rfkill_irq(struct iwl_trans
*trans
)
1701 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1702 struct isr_statistics
*isr_stats
= &trans_pcie
->isr_stats
;
1703 bool hw_rfkill
, prev
, report
;
1705 mutex_lock(&trans_pcie
->mutex
);
1706 prev
= test_bit(STATUS_RFKILL_OPMODE
, &trans
->status
);
1707 hw_rfkill
= iwl_is_rfkill_set(trans
);
1709 set_bit(STATUS_RFKILL_OPMODE
, &trans
->status
);
1710 set_bit(STATUS_RFKILL_HW
, &trans
->status
);
1712 if (trans_pcie
->opmode_down
)
1715 report
= test_bit(STATUS_RFKILL_OPMODE
, &trans
->status
);
1717 IWL_WARN(trans
, "RF_KILL bit toggled to %s.\n",
1718 hw_rfkill
? "disable radio" : "enable radio");
1720 isr_stats
->rfkill
++;
1723 iwl_trans_pcie_rf_kill(trans
, report
);
1724 mutex_unlock(&trans_pcie
->mutex
);
1727 if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE
,
1729 IWL_DEBUG_RF_KILL(trans
,
1730 "Rfkill while SYNC HCMD in flight\n");
1731 wake_up(&trans_pcie
->wait_command_queue
);
1733 clear_bit(STATUS_RFKILL_HW
, &trans
->status
);
1734 if (trans_pcie
->opmode_down
)
1735 clear_bit(STATUS_RFKILL_OPMODE
, &trans
->status
);
1739 irqreturn_t
iwl_pcie_irq_handler(int irq
, void *dev_id
)
1741 struct iwl_trans
*trans
= dev_id
;
1742 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1743 struct isr_statistics
*isr_stats
= &trans_pcie
->isr_stats
;
1747 lock_map_acquire(&trans
->sync_cmd_lockdep_map
);
1749 spin_lock(&trans_pcie
->irq_lock
);
1751 /* dram interrupt table not set yet,
1752 * use legacy interrupt.
1754 if (likely(trans_pcie
->use_ict
))
1755 inta
= iwl_pcie_int_cause_ict(trans
);
1757 inta
= iwl_pcie_int_cause_non_ict(trans
);
1759 if (iwl_have_debug_level(IWL_DL_ISR
)) {
1760 IWL_DEBUG_ISR(trans
,
1761 "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
1762 inta
, trans_pcie
->inta_mask
,
1763 iwl_read32(trans
, CSR_INT_MASK
),
1764 iwl_read32(trans
, CSR_FH_INT_STATUS
));
1765 if (inta
& (~trans_pcie
->inta_mask
))
1766 IWL_DEBUG_ISR(trans
,
1767 "We got a masked interrupt (0x%08x)\n",
1768 inta
& (~trans_pcie
->inta_mask
));
1771 inta
&= trans_pcie
->inta_mask
;
1774 * Ignore interrupt if there's nothing in NIC to service.
1775 * This may be due to IRQ shared with another device,
1776 * or due to sporadic interrupts thrown from our NIC.
1778 if (unlikely(!inta
)) {
1779 IWL_DEBUG_ISR(trans
, "Ignore interrupt, inta == 0\n");
1781 * Re-enable interrupts here since we don't
1782 * have anything to service
1784 if (test_bit(STATUS_INT_ENABLED
, &trans
->status
))
1785 _iwl_enable_interrupts(trans
);
1786 spin_unlock(&trans_pcie
->irq_lock
);
1787 lock_map_release(&trans
->sync_cmd_lockdep_map
);
1791 if (unlikely(inta
== 0xFFFFFFFF || (inta
& 0xFFFFFFF0) == 0xa5a5a5a0)) {
1793 * Hardware disappeared. It might have
1794 * already raised an interrupt.
1796 IWL_WARN(trans
, "HARDWARE GONE?? INTA == 0x%08x\n", inta
);
1797 spin_unlock(&trans_pcie
->irq_lock
);
1801 /* Ack/clear/reset pending uCode interrupts.
1802 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
1804 /* There is a hardware bug in the interrupt mask function that some
1805 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
1806 * they are disabled in the CSR_INT_MASK register. Furthermore the
1807 * ICT interrupt handling mechanism has another bug that might cause
1808 * these unmasked interrupts fail to be detected. We workaround the
1809 * hardware bugs here by ACKing all the possible interrupts so that
1810 * interrupt coalescing can still be achieved.
1812 iwl_write32(trans
, CSR_INT
, inta
| ~trans_pcie
->inta_mask
);
1814 if (iwl_have_debug_level(IWL_DL_ISR
))
1815 IWL_DEBUG_ISR(trans
, "inta 0x%08x, enabled 0x%08x\n",
1816 inta
, iwl_read32(trans
, CSR_INT_MASK
));
1818 spin_unlock(&trans_pcie
->irq_lock
);
1820 /* Now service all interrupt bits discovered above. */
1821 if (inta
& CSR_INT_BIT_HW_ERR
) {
1822 IWL_ERR(trans
, "Hardware error detected. Restarting.\n");
1824 /* Tell the device to stop sending interrupts */
1825 iwl_disable_interrupts(trans
);
1828 iwl_pcie_irq_handle_error(trans
);
1830 handled
|= CSR_INT_BIT_HW_ERR
;
1835 /* NIC fires this, but we don't use it, redundant with WAKEUP */
1836 if (inta
& CSR_INT_BIT_SCD
) {
1837 IWL_DEBUG_ISR(trans
,
1838 "Scheduler finished to transmit the frame/frames.\n");
1842 /* Alive notification via Rx interrupt will do the real work */
1843 if (inta
& CSR_INT_BIT_ALIVE
) {
1844 IWL_DEBUG_ISR(trans
, "Alive interrupt\n");
1846 if (trans
->cfg
->gen2
) {
1848 * We can restock, since firmware configured
1851 iwl_pcie_rxmq_restock(trans
, trans_pcie
->rxq
);
1854 handled
|= CSR_INT_BIT_ALIVE
;
1857 /* Safely ignore these bits for debug checks below */
1858 inta
&= ~(CSR_INT_BIT_SCD
| CSR_INT_BIT_ALIVE
);
1860 /* HW RF KILL switch toggled */
1861 if (inta
& CSR_INT_BIT_RF_KILL
) {
1862 iwl_pcie_handle_rfkill_irq(trans
);
1863 handled
|= CSR_INT_BIT_RF_KILL
;
1866 /* Chip got too hot and stopped itself */
1867 if (inta
& CSR_INT_BIT_CT_KILL
) {
1868 IWL_ERR(trans
, "Microcode CT kill error detected.\n");
1869 isr_stats
->ctkill
++;
1870 handled
|= CSR_INT_BIT_CT_KILL
;
1873 /* Error detected by uCode */
1874 if (inta
& CSR_INT_BIT_SW_ERR
) {
1875 IWL_ERR(trans
, "Microcode SW error detected. "
1876 " Restarting 0x%X.\n", inta
);
1878 iwl_pcie_irq_handle_error(trans
);
1879 handled
|= CSR_INT_BIT_SW_ERR
;
1882 /* uCode wakes up after power-down sleep */
1883 if (inta
& CSR_INT_BIT_WAKEUP
) {
1884 IWL_DEBUG_ISR(trans
, "Wakeup interrupt\n");
1885 iwl_pcie_rxq_check_wrptr(trans
);
1886 iwl_pcie_txq_check_wrptrs(trans
);
1888 isr_stats
->wakeup
++;
1890 handled
|= CSR_INT_BIT_WAKEUP
;
1893 /* All uCode command responses, including Tx command responses,
1894 * Rx "responses" (frame-received notification), and other
1895 * notifications from uCode come through here*/
1896 if (inta
& (CSR_INT_BIT_FH_RX
| CSR_INT_BIT_SW_RX
|
1897 CSR_INT_BIT_RX_PERIODIC
)) {
1898 IWL_DEBUG_ISR(trans
, "Rx interrupt\n");
1899 if (inta
& (CSR_INT_BIT_FH_RX
| CSR_INT_BIT_SW_RX
)) {
1900 handled
|= (CSR_INT_BIT_FH_RX
| CSR_INT_BIT_SW_RX
);
1901 iwl_write32(trans
, CSR_FH_INT_STATUS
,
1902 CSR_FH_INT_RX_MASK
);
1904 if (inta
& CSR_INT_BIT_RX_PERIODIC
) {
1905 handled
|= CSR_INT_BIT_RX_PERIODIC
;
1907 CSR_INT
, CSR_INT_BIT_RX_PERIODIC
);
1909 /* Sending RX interrupt require many steps to be done in the
1911 * 1- write interrupt to current index in ICT table.
1913 * 3- update RX shared data to indicate last write index.
1914 * 4- send interrupt.
1915 * This could lead to RX race, driver could receive RX interrupt
1916 * but the shared data changes does not reflect this;
1917 * periodic interrupt will detect any dangling Rx activity.
1920 /* Disable periodic interrupt; we use it as just a one-shot. */
1921 iwl_write8(trans
, CSR_INT_PERIODIC_REG
,
1922 CSR_INT_PERIODIC_DIS
);
1925 * Enable periodic interrupt in 8 msec only if we received
1926 * real RX interrupt (instead of just periodic int), to catch
1927 * any dangling Rx interrupt. If it was just the periodic
1928 * interrupt, there was no dangling Rx activity, and no need
1929 * to extend the periodic interrupt; one-shot is enough.
1931 if (inta
& (CSR_INT_BIT_FH_RX
| CSR_INT_BIT_SW_RX
))
1932 iwl_write8(trans
, CSR_INT_PERIODIC_REG
,
1933 CSR_INT_PERIODIC_ENA
);
1938 iwl_pcie_rx_handle(trans
, 0);
1942 /* This "Tx" DMA channel is used only for loading uCode */
1943 if (inta
& CSR_INT_BIT_FH_TX
) {
1944 iwl_write32(trans
, CSR_FH_INT_STATUS
, CSR_FH_INT_TX_MASK
);
1945 IWL_DEBUG_ISR(trans
, "uCode load interrupt\n");
1947 handled
|= CSR_INT_BIT_FH_TX
;
1948 /* Wake up uCode load routine, now that load is complete */
1949 trans_pcie
->ucode_write_complete
= true;
1950 wake_up(&trans_pcie
->ucode_write_waitq
);
1953 if (inta
& ~handled
) {
1954 IWL_ERR(trans
, "Unhandled INTA bits 0x%08x\n", inta
& ~handled
);
1955 isr_stats
->unhandled
++;
1958 if (inta
& ~(trans_pcie
->inta_mask
)) {
1959 IWL_WARN(trans
, "Disabled INTA bits 0x%08x were pending\n",
1960 inta
& ~trans_pcie
->inta_mask
);
1963 spin_lock(&trans_pcie
->irq_lock
);
1964 /* only Re-enable all interrupt if disabled by irq */
1965 if (test_bit(STATUS_INT_ENABLED
, &trans
->status
))
1966 _iwl_enable_interrupts(trans
);
1967 /* we are loading the firmware, enable FH_TX interrupt only */
1968 else if (handled
& CSR_INT_BIT_FH_TX
)
1969 iwl_enable_fw_load_int(trans
);
1970 /* Re-enable RF_KILL if it occurred */
1971 else if (handled
& CSR_INT_BIT_RF_KILL
)
1972 iwl_enable_rfkill_int(trans
);
1973 /* Re-enable the ALIVE / Rx interrupt if it occurred */
1974 else if (handled
& (CSR_INT_BIT_ALIVE
| CSR_INT_BIT_FH_RX
))
1975 iwl_enable_fw_load_int_ctx_info(trans
);
1976 spin_unlock(&trans_pcie
->irq_lock
);
1979 lock_map_release(&trans
->sync_cmd_lockdep_map
);
1983 /******************************************************************************
1987 ******************************************************************************/
1989 /* Free dram table */
1990 void iwl_pcie_free_ict(struct iwl_trans
*trans
)
1992 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1994 if (trans_pcie
->ict_tbl
) {
1995 dma_free_coherent(trans
->dev
, ICT_SIZE
,
1996 trans_pcie
->ict_tbl
,
1997 trans_pcie
->ict_tbl_dma
);
1998 trans_pcie
->ict_tbl
= NULL
;
1999 trans_pcie
->ict_tbl_dma
= 0;
2004 * allocate dram shared table, it is an aligned memory
2005 * block of ICT_SIZE.
2006 * also reset all data related to ICT table interrupt.
2008 int iwl_pcie_alloc_ict(struct iwl_trans
*trans
)
2010 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
2012 trans_pcie
->ict_tbl
=
2013 dma_alloc_coherent(trans
->dev
, ICT_SIZE
,
2014 &trans_pcie
->ict_tbl_dma
, GFP_KERNEL
);
2015 if (!trans_pcie
->ict_tbl
)
2018 /* just an API sanity check ... it is guaranteed to be aligned */
2019 if (WARN_ON(trans_pcie
->ict_tbl_dma
& (ICT_SIZE
- 1))) {
2020 iwl_pcie_free_ict(trans
);
2027 /* Device is going up inform it about using ICT interrupt table,
2028 * also we need to tell the driver to start using ICT interrupt.
2030 void iwl_pcie_reset_ict(struct iwl_trans
*trans
)
2032 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
2035 if (!trans_pcie
->ict_tbl
)
2038 spin_lock(&trans_pcie
->irq_lock
);
2039 _iwl_disable_interrupts(trans
);
2041 memset(trans_pcie
->ict_tbl
, 0, ICT_SIZE
);
2043 val
= trans_pcie
->ict_tbl_dma
>> ICT_SHIFT
;
2045 val
|= CSR_DRAM_INT_TBL_ENABLE
|
2046 CSR_DRAM_INIT_TBL_WRAP_CHECK
|
2047 CSR_DRAM_INIT_TBL_WRITE_POINTER
;
2049 IWL_DEBUG_ISR(trans
, "CSR_DRAM_INT_TBL_REG =0x%x\n", val
);
2051 iwl_write32(trans
, CSR_DRAM_INT_TBL_REG
, val
);
2052 trans_pcie
->use_ict
= true;
2053 trans_pcie
->ict_index
= 0;
2054 iwl_write32(trans
, CSR_INT
, trans_pcie
->inta_mask
);
2055 _iwl_enable_interrupts(trans
);
2056 spin_unlock(&trans_pcie
->irq_lock
);
2059 /* Device is going down disable ict interrupt usage */
2060 void iwl_pcie_disable_ict(struct iwl_trans
*trans
)
2062 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
2064 spin_lock(&trans_pcie
->irq_lock
);
2065 trans_pcie
->use_ict
= false;
2066 spin_unlock(&trans_pcie
->irq_lock
);
2069 irqreturn_t
iwl_pcie_isr(int irq
, void *data
)
2071 struct iwl_trans
*trans
= data
;
2076 /* Disable (but don't clear!) interrupts here to avoid
2077 * back-to-back ISRs and sporadic interrupts from our NIC.
2078 * If we have something to service, the tasklet will re-enable ints.
2079 * If we *don't* have something, we'll re-enable before leaving here.
2081 iwl_write32(trans
, CSR_INT_MASK
, 0x00000000);
2083 return IRQ_WAKE_THREAD
;
2086 irqreturn_t
iwl_pcie_msix_isr(int irq
, void *data
)
2088 return IRQ_WAKE_THREAD
;
2091 irqreturn_t
iwl_pcie_irq_msix_handler(int irq
, void *dev_id
)
2093 struct msix_entry
*entry
= dev_id
;
2094 struct iwl_trans_pcie
*trans_pcie
= iwl_pcie_get_trans_pcie(entry
);
2095 struct iwl_trans
*trans
= trans_pcie
->trans
;
2096 struct isr_statistics
*isr_stats
= &trans_pcie
->isr_stats
;
2097 u32 inta_fh
, inta_hw
;
2099 lock_map_acquire(&trans
->sync_cmd_lockdep_map
);
2101 spin_lock(&trans_pcie
->irq_lock
);
2102 inta_fh
= iwl_read32(trans
, CSR_MSIX_FH_INT_CAUSES_AD
);
2103 inta_hw
= iwl_read32(trans
, CSR_MSIX_HW_INT_CAUSES_AD
);
2105 * Clear causes registers to avoid being handling the same cause.
2107 iwl_write32(trans
, CSR_MSIX_FH_INT_CAUSES_AD
, inta_fh
);
2108 iwl_write32(trans
, CSR_MSIX_HW_INT_CAUSES_AD
, inta_hw
);
2109 spin_unlock(&trans_pcie
->irq_lock
);
2111 trace_iwlwifi_dev_irq_msix(trans
->dev
, entry
, true, inta_fh
, inta_hw
);
2113 if (unlikely(!(inta_fh
| inta_hw
))) {
2114 IWL_DEBUG_ISR(trans
, "Ignore interrupt, inta == 0\n");
2115 lock_map_release(&trans
->sync_cmd_lockdep_map
);
2119 if (iwl_have_debug_level(IWL_DL_ISR
)) {
2120 IWL_DEBUG_ISR(trans
,
2121 "ISR inta_fh 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
2122 inta_fh
, trans_pcie
->fh_mask
,
2123 iwl_read32(trans
, CSR_MSIX_FH_INT_MASK_AD
));
2124 if (inta_fh
& ~trans_pcie
->fh_mask
)
2125 IWL_DEBUG_ISR(trans
,
2126 "We got a masked interrupt (0x%08x)\n",
2127 inta_fh
& ~trans_pcie
->fh_mask
);
2130 inta_fh
&= trans_pcie
->fh_mask
;
2132 if ((trans_pcie
->shared_vec_mask
& IWL_SHARED_IRQ_NON_RX
) &&
2133 inta_fh
& MSIX_FH_INT_CAUSES_Q0
) {
2135 iwl_pcie_rx_handle(trans
, 0);
2139 if ((trans_pcie
->shared_vec_mask
& IWL_SHARED_IRQ_FIRST_RSS
) &&
2140 inta_fh
& MSIX_FH_INT_CAUSES_Q1
) {
2142 iwl_pcie_rx_handle(trans
, 1);
2146 /* This "Tx" DMA channel is used only for loading uCode */
2147 if (inta_fh
& MSIX_FH_INT_CAUSES_D2S_CH0_NUM
) {
2148 IWL_DEBUG_ISR(trans
, "uCode load interrupt\n");
2151 * Wake up uCode load routine,
2152 * now that load is complete
2154 trans_pcie
->ucode_write_complete
= true;
2155 wake_up(&trans_pcie
->ucode_write_waitq
);
2158 /* Error detected by uCode */
2159 if ((inta_fh
& MSIX_FH_INT_CAUSES_FH_ERR
) ||
2160 (inta_hw
& MSIX_HW_INT_CAUSES_REG_SW_ERR
) ||
2161 (inta_hw
& MSIX_HW_INT_CAUSES_REG_SW_ERR_V2
)) {
2163 "Microcode SW error detected. Restarting 0x%X.\n",
2166 iwl_pcie_irq_handle_error(trans
);
2169 /* After checking FH register check HW register */
2170 if (iwl_have_debug_level(IWL_DL_ISR
)) {
2171 IWL_DEBUG_ISR(trans
,
2172 "ISR inta_hw 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
2173 inta_hw
, trans_pcie
->hw_mask
,
2174 iwl_read32(trans
, CSR_MSIX_HW_INT_MASK_AD
));
2175 if (inta_hw
& ~trans_pcie
->hw_mask
)
2176 IWL_DEBUG_ISR(trans
,
2177 "We got a masked interrupt 0x%08x\n",
2178 inta_hw
& ~trans_pcie
->hw_mask
);
2181 inta_hw
&= trans_pcie
->hw_mask
;
2183 /* Alive notification via Rx interrupt will do the real work */
2184 if (inta_hw
& MSIX_HW_INT_CAUSES_REG_ALIVE
) {
2185 IWL_DEBUG_ISR(trans
, "Alive interrupt\n");
2187 if (trans
->cfg
->gen2
) {
2188 /* We can restock, since firmware configured the RFH */
2189 iwl_pcie_rxmq_restock(trans
, trans_pcie
->rxq
);
2193 if (trans
->cfg
->device_family
== IWL_DEVICE_FAMILY_22560
&&
2194 inta_hw
& MSIX_HW_INT_CAUSES_REG_IPC
) {
2195 /* Reflect IML transfer status */
2196 int res
= iwl_read32(trans
, CSR_IML_RESP_ADDR
);
2198 IWL_DEBUG_ISR(trans
, "IML transfer status: %d\n", res
);
2199 if (res
== IWL_IMAGE_RESP_FAIL
) {
2201 iwl_pcie_irq_handle_error(trans
);
2203 } else if (inta_hw
& MSIX_HW_INT_CAUSES_REG_WAKEUP
) {
2204 /* uCode wakes up after power-down sleep */
2205 IWL_DEBUG_ISR(trans
, "Wakeup interrupt\n");
2206 iwl_pcie_rxq_check_wrptr(trans
);
2207 iwl_pcie_txq_check_wrptrs(trans
);
2209 isr_stats
->wakeup
++;
2212 if (inta_hw
& MSIX_HW_INT_CAUSES_REG_IML
) {
2213 /* Reflect IML transfer status */
2214 int res
= iwl_read32(trans
, CSR_IML_RESP_ADDR
);
2216 IWL_DEBUG_ISR(trans
, "IML transfer status: %d\n", res
);
2217 if (res
== IWL_IMAGE_RESP_FAIL
) {
2219 iwl_pcie_irq_handle_error(trans
);
2223 /* Chip got too hot and stopped itself */
2224 if (inta_hw
& MSIX_HW_INT_CAUSES_REG_CT_KILL
) {
2225 IWL_ERR(trans
, "Microcode CT kill error detected.\n");
2226 isr_stats
->ctkill
++;
2229 /* HW RF KILL switch toggled */
2230 if (inta_hw
& MSIX_HW_INT_CAUSES_REG_RF_KILL
)
2231 iwl_pcie_handle_rfkill_irq(trans
);
2233 if (inta_hw
& MSIX_HW_INT_CAUSES_REG_HW_ERR
) {
2235 "Hardware error detected. Restarting.\n");
2238 iwl_pcie_irq_handle_error(trans
);
2241 iwl_pcie_clear_irq(trans
, entry
);
2243 lock_map_release(&trans
->sync_cmd_lockdep_map
);