]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - drivers/net/wireless/intel/iwlwifi/pcie/internal.h
iwlwifi: fix RF-Kill interrupt while FW load for gen2 devices
[thirdparty/kernel/stable.git] / drivers / net / wireless / intel / iwlwifi / pcie / internal.h
CommitLineData
ab697a9f 1/******************************************************************************
cefec29e
JB
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
ab697a9f 7 *
fc8a350d
IP
8 * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
eda50cde 10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
6cc6ba3a 11 * Copyright(c) 2018 - 2019 Intel Corporation
ab697a9f 12 *
ab697a9f
EG
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of version 2 of the GNU General Public License as
15 * published by the Free Software Foundation.
16 *
17 * This program is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 * more details.
21 *
ab697a9f 22 * The full GNU General Public License is included in this distribution in the
cefec29e 23 * file called COPYING.
ab697a9f
EG
24 *
25 * Contact Information:
cb2f8277 26 * Intel Linux Wireless <linuxwifi@intel.com>
ab697a9f
EG
27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *
cefec29e
JB
29 * BSD LICENSE
30 *
31 * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
33 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
6cc6ba3a 34 * Copyright(c) 2018 - 2019 Intel Corporation
cefec29e
JB
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 *
41 * * Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * * Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in
45 * the documentation and/or other materials provided with the
46 * distribution.
47 * * Neither the name Intel Corporation nor the names of its
48 * contributors may be used to endorse or promote products derived
49 * from this software without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62 *
ab697a9f
EG
63 *****************************************************************************/
64#ifndef __iwl_trans_int_pcie_h__
65#define __iwl_trans_int_pcie_h__
66
a72b8b08
EG
67#include <linux/spinlock.h>
68#include <linux/interrupt.h>
69#include <linux/skbuff.h>
13df1aab 70#include <linux/wait.h>
522376d2 71#include <linux/pci.h>
7c5ba4a8 72#include <linux/timer.h>
7c8d91eb 73#include <linux/cpu.h>
a72b8b08 74
dda61a44 75#include "iwl-fh.h"
a72b8b08 76#include "iwl-csr.h"
a72b8b08
EG
77#include "iwl-trans.h"
78#include "iwl-debug.h"
79#include "iwl-io.h"
02e38358 80#include "iwl-op-mode.h"
ff932f61 81#include "iwl-drv.h"
a72b8b08 82
206eea78
JB
83/* We need 2 entries for the TX command and header, and another one might
84 * be needed for potential data in the SKB's head. The remaining ones can
85 * be used for frags.
86 */
3cd1980b 87#define IWL_PCIE_MAX_FRAGS(x) (x->max_tbs - 3)
206eea78 88
26d535ae
SS
89/*
90 * RX related structures and functions
91 */
92#define RX_NUM_QUEUES 1
93#define RX_POST_REQ_ALLOC 2
94#define RX_CLAIM_REQ_ALLOC 8
78485054 95#define RX_PENDING_WATERMARK 16
1b493e30 96#define FIRST_RX_QUEUE 512
26d535ae 97
a72b8b08 98struct iwl_host_cmd;
dda61a44 99
ab697a9f
EG
100/*This file includes the declaration that are internal to the
101 * trans_pcie layer */
102
96a6497b
SS
103/**
104 * struct iwl_rx_mem_buffer
105 * @page_dma: bus address of rxb page
106 * @page: driver's pointer to the rxb page
b1753c62 107 * @invalid: rxb is in driver ownership - not owned by HW
96a6497b 108 * @vid: index of this rxb in the global table
0307c839 109 * @size: size used from the buffer
96a6497b 110 */
48a2d66f
JB
111struct iwl_rx_mem_buffer {
112 dma_addr_t page_dma;
113 struct page *page;
96a6497b 114 u16 vid;
b1753c62 115 bool invalid;
48a2d66f 116 struct list_head list;
0307c839 117 u32 size;
48a2d66f
JB
118};
119
1f7b6172
EG
120/**
121 * struct isr_statistics - interrupt statistics
122 *
123 */
124struct isr_statistics {
125 u32 hw;
126 u32 sw;
127 u32 err_code;
128 u32 sch;
129 u32 alive;
130 u32 rfkill;
131 u32 ctkill;
132 u32 wakeup;
133 u32 rx;
134 u32 tx;
135 u32 unhandled;
136};
137
0307c839
GBA
138#define IWL_RX_TD_TYPE_MSK 0xff000000
139#define IWL_RX_TD_SIZE_MSK 0x00ffffff
140#define IWL_RX_TD_SIZE_2K BIT(11)
141#define IWL_RX_TD_TYPE 0
cf495496
GBA
142
143/**
144 * struct iwl_rx_transfer_desc - transfer descriptor
145 * @type_n_size: buffer type (bit 0: external buff valid,
146 * bit 1: optional footer valid, bit 2-7: reserved)
147 * and buffer size
148 * @addr: ptr to free buffer start address
149 * @rbid: unique tag of the buffer
150 * @reserved: reserved
151 */
152struct iwl_rx_transfer_desc {
153 __le32 type_n_size;
154 __le64 addr;
155 __le16 rbid;
156 __le16 reserved;
157} __packed;
158
159#define IWL_RX_CD_SIZE 0xffffff00
160
161/**
162 * struct iwl_rx_completion_desc - completion descriptor
163 * @type: buffer type (bit 0: external buff valid,
164 * bit 1: optional footer valid, bit 2-7: reserved)
165 * @status: status of the completion
166 * @reserved1: reserved
167 * @rbid: unique tag of the received buffer
168 * @size: buffer size, masked by IWL_RX_CD_SIZE
169 * @reserved2: reserved
170 */
171struct iwl_rx_completion_desc {
172 u8 type;
173 u8 status;
174 __le16 reserved1;
175 __le16 rbid;
176 __le32 size;
177 u8 reserved2[22];
178} __packed;
179
5a878bf6 180/**
990aa6d7 181 * struct iwl_rxq - Rx queue
96a6497b
SS
182 * @id: queue index
183 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
184 * Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
0307c839 185 * In 22560 devices it is a pointer to a list of iwl_rx_transfer_desc's
5a878bf6 186 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
96a6497b
SS
187 * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd)
188 * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd)
1b493e30
GBA
189 * @tr_tail: driver's pointer to the transmission ring tail buffer
190 * @tr_tail_dma: physical address of the buffer for the transmission ring tail
191 * @cr_tail: driver's pointer to the completion ring tail buffer
192 * @cr_tail_dma: physical address of the buffer for the completion ring tail
5a878bf6
EG
193 * @read: Shared index to newest available Rx buffer
194 * @write: Shared index to oldest written Rx packet
195 * @free_count: Number of pre-allocated buffers in rx_free
26d535ae 196 * @used_count: Number of RBDs handled to allocator to use for allocation
5a878bf6 197 * @write_actual:
26d535ae
SS
198 * @rx_free: list of RBDs with allocated RB ready for use
199 * @rx_used: list of RBDs with no RB attached
5a878bf6
EG
200 * @need_update: flag to indicate we need to update read/write index
201 * @rb_stts: driver's pointer to receive buffer status
202 * @rb_stts_dma: bus address of receive buffer status
203 * @lock:
96a6497b 204 * @queue: actual rx queue. Not used for multi-rx queue.
5a878bf6
EG
205 *
206 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
207 */
990aa6d7 208struct iwl_rxq {
96a6497b
SS
209 int id;
210 void *bd;
5a878bf6 211 dma_addr_t bd_dma;
b2a58c97
SS
212 union {
213 void *used_bd;
214 __le32 *bd_32;
215 struct iwl_rx_completion_desc *cd;
216 };
96a6497b 217 dma_addr_t used_bd_dma;
1b493e30
GBA
218 __le16 *tr_tail;
219 dma_addr_t tr_tail_dma;
220 __le16 *cr_tail;
221 dma_addr_t cr_tail_dma;
5a878bf6
EG
222 u32 read;
223 u32 write;
224 u32 free_count;
26d535ae 225 u32 used_count;
5a878bf6 226 u32 write_actual;
96a6497b 227 u32 queue_size;
5a878bf6
EG
228 struct list_head rx_free;
229 struct list_head rx_used;
5d63f926 230 bool need_update;
0307c839 231 void *rb_stts;
5a878bf6
EG
232 dma_addr_t rb_stts_dma;
233 spinlock_t lock;
bce97731 234 struct napi_struct napi;
26d535ae
SS
235 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
236};
237
238/**
239 * struct iwl_rb_allocator - Rx allocator
26d535ae
SS
240 * @req_pending: number of requests the allcator had not processed yet
241 * @req_ready: number of requests honored and ready for claiming
242 * @rbd_allocated: RBDs with pages allocated and ready to be handled to
243 * the queue. This is a list of &struct iwl_rx_mem_buffer
244 * @rbd_empty: RBDs with no page attached for allocator use. This is a list
245 * of &struct iwl_rx_mem_buffer
246 * @lock: protects the rbd_allocated and rbd_empty lists
247 * @alloc_wq: work queue for background calls
248 * @rx_alloc: work struct for background calls
249 */
250struct iwl_rb_allocator {
26d535ae
SS
251 atomic_t req_pending;
252 atomic_t req_ready;
253 struct list_head rbd_allocated;
254 struct list_head rbd_empty;
255 spinlock_t lock;
256 struct workqueue_struct *alloc_wq;
257 struct work_struct rx_alloc;
5a878bf6
EG
258};
259
a72b8b08
EG
260struct iwl_dma_ptr {
261 dma_addr_t dma;
262 void *addr;
263 size_t size;
264};
265
bffc66ce
JB
266/**
267 * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
268 * @index -- current index
bffc66ce 269 */
7b3e42ea 270static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index)
bffc66ce 271{
7b3e42ea 272 return ++index & (trans->cfg->base_params->max_tfd_queue_size - 1);
bffc66ce
JB
273}
274
0307c839
GBA
275/**
276 * iwl_get_closed_rb_stts - get closed rb stts from different structs
277 * @rxq - the rxq to get the rb stts from
278 */
279static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
280 struct iwl_rxq *rxq)
281{
282 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
283 __le16 *rb_stts = rxq->rb_stts;
284
285 return READ_ONCE(*rb_stts);
286 } else {
287 struct iwl_rb_status *rb_stts = rxq->rb_stts;
288
289 return READ_ONCE(rb_stts->closed_rb_num);
290 }
291}
292
bffc66ce
JB
293/**
294 * iwl_queue_dec_wrap - decrement queue index, wrap back to end
295 * @index -- current index
bffc66ce 296 */
7b3e42ea 297static inline int iwl_queue_dec_wrap(struct iwl_trans *trans, int index)
bffc66ce 298{
7b3e42ea 299 return --index & (trans->cfg->base_params->max_tfd_queue_size - 1);
bffc66ce
JB
300}
301
522376d2
EG
302struct iwl_cmd_meta {
303 /* only for SYNC commands, iff the reply skb is wanted */
304 struct iwl_host_cmd *source;
c14c7372 305 u32 flags;
3cd1980b 306 u32 tbs;
522376d2
EG
307};
308
522376d2 309
bf8440e6
JB
310#define TFD_TX_CMD_SLOTS 256
311#define TFD_CMD_SLOTS 32
312
8a964f44 313/*
8de437c7
SS
314 * The FH will write back to the first TB only, so we need to copy some data
315 * into the buffer regardless of whether it should be mapped or not.
316 * This indicates how big the first TB must be to include the scratch buffer
317 * and the assigned PN.
b97277cc 318 * Since PN location is 8 bytes at offset 12, it's 20 now.
8de437c7
SS
319 * If we make it bigger then allocations will be bigger and copy slower, so
320 * that's probably not useful.
8a964f44 321 */
b97277cc 322#define IWL_FIRST_TB_SIZE 20
8de437c7 323#define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
8a964f44 324
990aa6d7 325struct iwl_pcie_txq_entry {
bf8440e6
JB
326 struct iwl_device_cmd *cmd;
327 struct sk_buff *skb;
f4feb8ac
JB
328 /* buffer to free after command completes */
329 const void *free_buf;
bf8440e6
JB
330 struct iwl_cmd_meta meta;
331};
332
8de437c7
SS
333struct iwl_pcie_first_tb_buf {
334 u8 buf[IWL_FIRST_TB_SIZE_ALIGN];
38c0f334
JB
335};
336
522376d2 337/**
990aa6d7 338 * struct iwl_txq - Tx Queue for DMA
522376d2 339 * @q: generic Rx/Tx queue descriptor
bf8440e6 340 * @tfds: transmit frame descriptors (DMA memory)
8de437c7 341 * @first_tb_bufs: start of command headers, including scratch buffers, for
38c0f334
JB
342 * the writeback -- this is DMA memory and an array holding one buffer
343 * for each command on the queue
8de437c7 344 * @first_tb_dma: DMA address for the first_tb_bufs start
bf8440e6
JB
345 * @entries: transmit entries (driver state)
346 * @lock: queue lock
347 * @stuck_timer: timer that fires if queue gets stuck
348 * @trans_pcie: pointer back to transport (for timer)
522376d2 349 * @need_update: indicates need to update read/write index
68972c46 350 * @ampdu: true if this queue is an ampdu queue for an specific RA/TID
4cf677fd 351 * @wd_timeout: queue watchdog timeout (jiffies) - per queue
e0b8d405
EG
352 * @frozen: tx stuck queue timer is frozen
353 * @frozen_expiry_remainder: remember how long until the timer fires
13a3a390 354 * @bc_tbl: byte count table of the queue (relevant only for gen2 transport)
bb98ecd4
SS
355 * @write_ptr: 1-st empty entry (index) host_w
356 * @read_ptr: last used entry (index) host_r
357 * @dma_addr: physical addr for BD's
358 * @n_window: safe queue window
359 * @id: queue id
360 * @low_mark: low watermark, resume queue if free space more than this
361 * @high_mark: high watermark, stop queue if free space less than this
522376d2
EG
362 *
363 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
364 * descriptors) and required locking structures.
bb98ecd4
SS
365 *
366 * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
367 * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
368 * there might be HW changes in the future). For the normal TX
369 * queues, n_window, which is the size of the software queue data
370 * is also 256; however, for the command queue, n_window is only
371 * 32 since we don't need so many commands pending. Since the HW
372 * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256.
373 * This means that we end up with the following:
374 * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
375 * SW entries: | 0 | ... | 31 |
376 * where N is a number between 0 and 7. This means that the SW
377 * data is a window overlayed over the HW queue.
522376d2 378 */
990aa6d7 379struct iwl_txq {
6983ba69 380 void *tfds;
8de437c7
SS
381 struct iwl_pcie_first_tb_buf *first_tb_bufs;
382 dma_addr_t first_tb_dma;
990aa6d7 383 struct iwl_pcie_txq_entry *entries;
015c15e1 384 spinlock_t lock;
e0b8d405 385 unsigned long frozen_expiry_remainder;
7c5ba4a8
JB
386 struct timer_list stuck_timer;
387 struct iwl_trans_pcie *trans_pcie;
43aa616f 388 bool need_update;
e0b8d405 389 bool frozen;
68972c46 390 bool ampdu;
04fa3e68 391 int block;
4cf677fd 392 unsigned long wd_timeout;
3955525d 393 struct sk_buff_head overflow_q;
13a3a390 394 struct iwl_dma_ptr bc_tbl;
bb98ecd4
SS
395
396 int write_ptr;
397 int read_ptr;
398 dma_addr_t dma_addr;
399 int n_window;
400 u32 id;
401 int low_mark;
402 int high_mark;
2ae48edc
SS
403
404 bool overflow_tx;
522376d2
EG
405};
406
38c0f334 407static inline dma_addr_t
8de437c7 408iwl_pcie_get_first_tb_dma(struct iwl_txq *txq, int idx)
38c0f334 409{
8de437c7
SS
410 return txq->first_tb_dma +
411 sizeof(struct iwl_pcie_first_tb_buf) * idx;
38c0f334
JB
412}
413
6eb5e529
EG
414struct iwl_tso_hdr_page {
415 struct page *page;
416 u8 *pos;
417};
418
f7805b33
LC
419#ifdef CONFIG_IWLWIFI_DEBUGFS
420/**
421 * enum iwl_fw_mon_dbgfs_state - the different states of the monitor_data
422 * debugfs file
423 *
424 * @IWL_FW_MON_DBGFS_STATE_CLOSED: the file is closed.
425 * @IWL_FW_MON_DBGFS_STATE_OPEN: the file is open.
426 * @IWL_FW_MON_DBGFS_STATE_DISABLED: the file is disabled, once this state is
427 * set the file can no longer be used.
428 */
429enum iwl_fw_mon_dbgfs_state {
430 IWL_FW_MON_DBGFS_STATE_CLOSED,
431 IWL_FW_MON_DBGFS_STATE_OPEN,
432 IWL_FW_MON_DBGFS_STATE_DISABLED,
433};
434#endif
435
496d83ca
HD
436/**
437 * enum iwl_shared_irq_flags - level of sharing for irq
438 * @IWL_SHARED_IRQ_NON_RX: interrupt vector serves non rx causes.
439 * @IWL_SHARED_IRQ_FIRST_RSS: interrupt vector serves first RSS queue.
440 */
441enum iwl_shared_irq_flags {
442 IWL_SHARED_IRQ_NON_RX = BIT(0),
443 IWL_SHARED_IRQ_FIRST_RSS = BIT(1),
444};
445
9b58419e
GBA
446/**
447 * enum iwl_image_response_code - image response values
448 * @IWL_IMAGE_RESP_DEF: the default value of the register
449 * @IWL_IMAGE_RESP_SUCCESS: iml was read successfully
450 * @IWL_IMAGE_RESP_FAIL: iml reading failed
451 */
452enum iwl_image_response_code {
453 IWL_IMAGE_RESP_DEF = 0,
454 IWL_IMAGE_RESP_SUCCESS = 1,
455 IWL_IMAGE_RESP_FAIL = 2,
456};
457
f7805b33
LC
458/**
459 * struct cont_rec: continuous recording data structure
460 * @prev_wr_ptr: the last address that was read in monitor_data
461 * debugfs file
462 * @prev_wrap_cnt: the wrap count that was used during the last read in
463 * monitor_data debugfs file
464 * @state: the state of monitor_data debugfs file as described
465 * in &iwl_fw_mon_dbgfs_state enum
466 * @mutex: locked while reading from monitor_data debugfs file
467 */
468#ifdef CONFIG_IWLWIFI_DEBUGFS
469struct cont_rec {
470 u32 prev_wr_ptr;
471 u32 prev_wrap_cnt;
472 u8 state;
473 /* Used to sync monitor_data debugfs file with driver unload flow */
474 struct mutex mutex;
475};
476#endif
477
e6bb4c9c
EG
478/**
479 * struct iwl_trans_pcie - PCIe transport specific data
5a878bf6 480 * @rxq: all the RX queue data
78485054 481 * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues
96a6497b 482 * @global_table: table mapping received VID from hw to rxb
26d535ae 483 * @rba: allocator for RX replenishing
eda50cde 484 * @ctxt_info: context information for FW self init
2ee82402
GBA
485 * @ctxt_info_gen3: context information for gen3 devices
486 * @prph_info: prph info for self init
487 * @prph_scratch: prph scratch for self init
488 * @ctxt_info_dma_addr: dma addr of context information
489 * @prph_info_dma_addr: dma addr of prph info
490 * @prph_scratch_dma_addr: dma addr of prph scratch
eda50cde
SS
491 * @ctxt_info_dma_addr: dma addr of context information
492 * @init_dram: DRAM data of firmware image (including paging).
493 * Context information addresses will be taken from here.
494 * This is driver's local copy for keeping track of size and
495 * count for allocating and freeing the memory.
5a878bf6 496 * @trans: pointer to the generic transport area
105183b1
EG
497 * @scd_base_addr: scheduler sram base address in SRAM
498 * @scd_bc_tbls: pointer to the byte count table of the scheduler
9d6b2cb1 499 * @kw: keep warm address
a42a1844
EG
500 * @pci_dev: basic pci-network driver stuff
501 * @hw_base: pci hardware address support
13df1aab
JB
502 * @ucode_write_complete: indicates that the ucode has been copied.
503 * @ucode_write_waitq: wait queue for uCode load
c6f600fc 504 * @cmd_queue - command queue number
9416560e 505 * @def_rx_queue - default rx queue number
6c4fbcbc 506 * @rx_buf_size: Rx buffer size
046db346 507 * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
3a736bcb 508 * @scd_set_active: should the transport configure the SCD for HCMD queue
41837ca9
EG
509 * @sw_csum_tx: if true, then the transport will compute the csum of the TXed
510 * frame.
b2cf410c 511 * @rx_page_order: page order for receive buffer size
e56b04ef 512 * @reg_lock: protect hw register access
fa9f3281 513 * @mutex: to protect stop_device / start_fw / start_hw
b9439491 514 * @cmd_in_flight: true when we have a host command in flight
f7805b33
LC
515#ifdef CONFIG_IWLWIFI_DEBUGFS
516 * @fw_mon_data: fw continuous recording data
517#endif
2e5d4a8f
HD
518 * @msix_entries: array of MSI-X entries
519 * @msix_enabled: true if managed to enable MSI-X
496d83ca
HD
520 * @shared_vec_mask: the type of causes the shared vector handles
521 * (see iwl_shared_irq_flags).
522 * @alloc_vecs: the number of interrupt vectors allocated by the OS
523 * @def_irq: default irq for non rx causes
2e5d4a8f
HD
524 * @fh_init_mask: initial unmasked fh causes
525 * @hw_init_mask: initial unmasked hw causes
526 * @fh_mask: current unmasked fh causes
527 * @hw_mask: current unmasked hw causes
49564a80 528 * @in_rescan: true if we have triggered a device rescan
6cc6ba3a
T
529 * @base_rb_stts: base virtual address of receive buffer status for all queues
530 * @base_rb_stts_dma: base physical address of receive buffer status
e6bb4c9c
EG
531 */
532struct iwl_trans_pcie {
78485054 533 struct iwl_rxq *rxq;
7b542436 534 struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE];
43146925 535 struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE];
26d535ae 536 struct iwl_rb_allocator rba;
2ee82402
GBA
537 union {
538 struct iwl_context_info *ctxt_info;
539 struct iwl_context_info_gen3 *ctxt_info_gen3;
540 };
541 struct iwl_prph_info *prph_info;
542 struct iwl_prph_scratch *prph_scratch;
eda50cde 543 dma_addr_t ctxt_info_dma_addr;
2ee82402
GBA
544 dma_addr_t prph_info_dma_addr;
545 dma_addr_t prph_scratch_dma_addr;
546 dma_addr_t iml_dma_addr;
5a878bf6 547 struct iwl_trans *trans;
0c325769 548
f14d6b39 549 struct net_device napi_dev;
f14d6b39 550
6eb5e529
EG
551 struct __percpu iwl_tso_hdr_page *tso_hdr_page;
552
0c325769
EG
553 /* INT ICT Table */
554 __le32 *ict_tbl;
0c325769 555 dma_addr_t ict_tbl_dma;
0c325769 556 int ict_index;
0c325769 557 bool use_ict;
326477e4 558 bool is_down, opmode_down;
fa4de7f7 559 bool debug_rfkill;
1f7b6172 560 struct isr_statistics isr_stats;
0c325769 561
7b11488f 562 spinlock_t irq_lock;
fa9f3281 563 struct mutex mutex;
0c325769 564 u32 inta_mask;
105183b1
EG
565 u32 scd_base_addr;
566 struct iwl_dma_ptr scd_bc_tbls;
9d6b2cb1 567 struct iwl_dma_ptr kw;
e13c0c59 568
b2a3b1c1 569 struct iwl_txq *txq_memory;
e982bc2c
SS
570 struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
571 unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
572 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
a42a1844
EG
573
574 /* PCI bus related data */
575 struct pci_dev *pci_dev;
576 void __iomem *hw_base;
13df1aab
JB
577
578 bool ucode_write_complete;
579 wait_queue_head_t ucode_write_waitq;
f946b529 580 wait_queue_head_t wait_command_queue;
4cbb8e50 581 wait_queue_head_t d0i3_waitq;
f946b529 582
21cb3222
JB
583 u8 page_offs, dev_cmd_offs;
584
c6f600fc 585 u8 cmd_queue;
9416560e 586 u8 def_rx_queue;
b04db9ac 587 u8 cmd_fifo;
4cf677fd 588 unsigned int cmd_q_wdg_timeout;
d663ee73
JB
589 u8 n_no_reclaim_cmds;
590 u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
3cd1980b 591 u8 max_tbs;
6983ba69 592 u16 tfd_size;
b2cf410c 593
6c4fbcbc 594 enum iwl_amsdu_size rx_buf_size;
046db346 595 bool bc_table_dword;
3a736bcb 596 bool scd_set_active;
41837ca9 597 bool sw_csum_tx;
a6d24fad 598 bool pcie_dbg_dumped_once;
b2cf410c 599 u32 rx_page_order;
7c5ba4a8 600
e56b04ef
LE
601 /*protect hw register */
602 spinlock_t reg_lock;
fc8a350d 603 bool cmd_hold_nic_awake;
7616f334
EP
604 bool ref_cmd_in_flight;
605
f7805b33
LC
606#ifdef CONFIG_IWLWIFI_DEBUGFS
607 struct cont_rec fw_mon_data;
608#endif
609
2e5d4a8f
HD
610 struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
611 bool msix_enabled;
496d83ca
HD
612 u8 shared_vec_mask;
613 u32 alloc_vecs;
614 u32 def_irq;
2e5d4a8f
HD
615 u32 fh_init_mask;
616 u32 hw_init_mask;
617 u32 fh_mask;
618 u32 hw_mask;
7c8d91eb 619 cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES];
49564a80
LC
620 u16 tx_cmd_queue_size;
621 bool in_rescan;
6cc6ba3a
T
622
623 void *base_rb_stts;
624 dma_addr_t base_rb_stts_dma;
e6bb4c9c
EG
625};
626
85e5a387
JB
627static inline struct iwl_trans_pcie *
628IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans)
629{
630 return (void *)trans->trans_specific;
631}
5a878bf6 632
ff932f61
GBA
633static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
634 struct msix_entry *entry)
635{
636 /*
637 * Before sending the interrupt the HW disables it to prevent
638 * a nested interrupt. This is done by writing 1 to the corresponding
639 * bit in the mask register. After handling the interrupt, it should be
640 * re-enabled by clearing this bit. This register is defined as
641 * write 1 clear (W1C) register, meaning that it's being clear
642 * by writing 1 to the bit.
643 */
644 iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
645}
646
7c5ba4a8
JB
647static inline struct iwl_trans *
648iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
649{
650 return container_of((void *)trans_pcie, struct iwl_trans,
651 trans_specific);
652}
653
f02831be
EG
654/*
655 * Convention: trans API functions: iwl_trans_pcie_XXX
656 * Other functions: iwl_pcie_XXX
657 */
d1ff5253
JB
658struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
659 const struct pci_device_id *ent,
660 const struct iwl_cfg *cfg);
661void iwl_trans_pcie_free(struct iwl_trans *trans);
662
253a634c
EG
663/*****************************************************
664* RX
665******************************************************/
89d5e833 666int _iwl_pcie_rx_init(struct iwl_trans *trans);
9805c446 667int iwl_pcie_rx_init(struct iwl_trans *trans);
eda50cde 668int iwl_pcie_gen2_rx_init(struct iwl_trans *trans);
2e5d4a8f 669irqreturn_t iwl_pcie_msix_isr(int irq, void *data);
2bfb5092 670irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
2e5d4a8f
HD
671irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
672irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id);
9805c446
EG
673int iwl_pcie_rx_stop(struct iwl_trans *trans);
674void iwl_pcie_rx_free(struct iwl_trans *trans);
ff932f61
GBA
675void iwl_pcie_free_rbs_pool(struct iwl_trans *trans);
676void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq);
677int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget);
678void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
679 struct iwl_rxq *rxq);
89d5e833 680int iwl_pcie_rx_alloc(struct iwl_trans *trans);
ab697a9f 681
1a361cd8 682/*****************************************************
990aa6d7 683* ICT - interrupt handling
1a361cd8 684******************************************************/
85bf9da1 685irqreturn_t iwl_pcie_isr(int irq, void *data);
990aa6d7
EG
686int iwl_pcie_alloc_ict(struct iwl_trans *trans);
687void iwl_pcie_free_ict(struct iwl_trans *trans);
688void iwl_pcie_reset_ict(struct iwl_trans *trans);
689void iwl_pcie_disable_ict(struct iwl_trans *trans);
1a361cd8 690
253a634c
EG
691/*****************************************************
692* TX / HCMD
693******************************************************/
f02831be 694int iwl_pcie_tx_init(struct iwl_trans *trans);
9b3089bd
GBA
695int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id,
696 int queue_size);
f02831be
EG
697void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
698int iwl_pcie_tx_stop(struct iwl_trans *trans);
699void iwl_pcie_tx_free(struct iwl_trans *trans);
dcfbd67b 700bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
4cf677fd
EG
701 const struct iwl_trans_txq_scd_cfg *cfg,
702 unsigned int wdg_timeout);
d4578ea8
JB
703void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
704 bool configure_scd);
42db09c1
LK
705void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
706 bool shared_mode);
38398efb
SS
707void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans,
708 struct iwl_txq *txq);
f02831be
EG
709int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
710 struct iwl_device_cmd *dev_cmd, int txq_id);
ea68f460 711void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
f02831be 712int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
89d5e833
GBA
713void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx);
714void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans,
715 struct iwl_txq *txq);
990aa6d7 716void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
f7e6469f 717 struct iwl_rx_cmd_buffer *rxb);
f02831be
EG
718void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
719 struct sk_buff_head *skbs);
ddaf5a5b 720void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
89d5e833
GBA
721void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
722 struct iwl_txq *txq, u16 byte_cnt,
723 int num_tbs);
ddaf5a5b 724
cc2f41f8 725static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd,
6983ba69 726 u8 idx)
4d075007 727{
6983ba69 728 if (trans->cfg->use_tfh) {
cc2f41f8
JB
729 struct iwl_tfh_tfd *tfd = _tfd;
730 struct iwl_tfh_tb *tb = &tfd->tbs[idx];
6983ba69
SS
731
732 return le16_to_cpu(tb->tb_len);
cc2f41f8
JB
733 } else {
734 struct iwl_tfd *tfd = _tfd;
735 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
4d075007 736
cc2f41f8
JB
737 return le16_to_cpu(tb->hi_n_len) >> 4;
738 }
4d075007
JB
739}
740
7ff94706
EG
741/*****************************************************
742* Error handling
743******************************************************/
990aa6d7 744void iwl_pcie_dump_csr(struct iwl_trans *trans);
16db88ba 745
8ad71bef
EG
746/*****************************************************
747* Helpers
748******************************************************/
f16c3ebf 749static inline void _iwl_disable_interrupts(struct iwl_trans *trans)
0c325769 750{
2e5d4a8f 751 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
0c325769 752
2e5d4a8f
HD
753 clear_bit(STATUS_INT_ENABLED, &trans->status);
754 if (!trans_pcie->msix_enabled) {
755 /* disable interrupts from uCode/NIC to host */
756 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
757
758 /* acknowledge/clear/reset any interrupts still pending
759 * from uCode or flow handler (Rx/Tx DMA) */
760 iwl_write32(trans, CSR_INT, 0xffffffff);
761 iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
762 } else {
763 /* disable all the interrupt we might use */
764 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
765 trans_pcie->fh_init_mask);
766 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
767 trans_pcie->hw_init_mask);
768 }
0c325769
EG
769 IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
770}
2ee82402
GBA
771
772#define IWL_NUM_OF_COMPLETION_RINGS 31
773#define IWL_NUM_OF_TRANSFER_RINGS 527
774
775static inline int iwl_pcie_get_num_sections(const struct fw_img *fw,
776 int start)
777{
778 int i = 0;
779
780 while (start < fw->num_sec &&
781 fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
782 fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
783 start++;
784 i++;
785 }
786
787 return i;
788}
789
790static inline int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
791 const struct fw_desc *sec,
792 struct iwl_dram_data *dram)
793{
794 dram->block = dma_alloc_coherent(trans->dev, sec->len,
795 &dram->physical,
796 GFP_KERNEL);
797 if (!dram->block)
798 return -ENOMEM;
799
800 dram->size = sec->len;
801 memcpy(dram->block, sec->data, sec->len);
802
803 return 0;
804}
805
806static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
807{
505a00c0 808 struct iwl_self_init_dram *dram = &trans->init_dram;
2ee82402
GBA
809 int i;
810
811 if (!dram->fw) {
812 WARN_ON(dram->fw_cnt);
813 return;
814 }
815
816 for (i = 0; i < dram->fw_cnt; i++)
817 dma_free_coherent(trans->dev, dram->fw[i].size,
818 dram->fw[i].block, dram->fw[i].physical);
819
820 kfree(dram->fw);
821 dram->fw_cnt = 0;
822 dram->fw = NULL;
823}
0c325769 824
f16c3ebf
EG
825static inline void iwl_disable_interrupts(struct iwl_trans *trans)
826{
827 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
828
829 spin_lock(&trans_pcie->irq_lock);
830 _iwl_disable_interrupts(trans);
831 spin_unlock(&trans_pcie->irq_lock);
832}
833
834static inline void _iwl_enable_interrupts(struct iwl_trans *trans)
0c325769 835{
83626404 836 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
0c325769
EG
837
838 IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
eb7ff77e 839 set_bit(STATUS_INT_ENABLED, &trans->status);
2e5d4a8f
HD
840 if (!trans_pcie->msix_enabled) {
841 trans_pcie->inta_mask = CSR_INI_SET_MASK;
842 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
843 } else {
844 /*
845 * fh/hw_mask keeps all the unmasked causes.
846 * Unlike msi, in msix cause is enabled when it is unset.
847 */
848 trans_pcie->hw_mask = trans_pcie->hw_init_mask;
849 trans_pcie->fh_mask = trans_pcie->fh_init_mask;
850 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
851 ~trans_pcie->fh_mask);
852 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
853 ~trans_pcie->hw_mask);
854 }
855}
856
f16c3ebf
EG
857static inline void iwl_enable_interrupts(struct iwl_trans *trans)
858{
859 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
860
861 spin_lock(&trans_pcie->irq_lock);
862 _iwl_enable_interrupts(trans);
863 spin_unlock(&trans_pcie->irq_lock);
864}
2e5d4a8f
HD
865static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk)
866{
867 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
868
869 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk);
870 trans_pcie->hw_mask = msk;
871}
872
873static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk)
874{
875 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
876
877 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk);
878 trans_pcie->fh_mask = msk;
0c325769
EG
879}
880
a6bd005f
EG
881static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
882{
883 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
884
885 IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n");
2e5d4a8f
HD
886 if (!trans_pcie->msix_enabled) {
887 trans_pcie->inta_mask = CSR_INT_BIT_FH_TX;
888 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
889 } else {
890 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
891 trans_pcie->hw_init_mask);
892 iwl_enable_fh_int_msk_msix(trans,
893 MSIX_FH_INT_CAUSES_D2S_CH0_NUM);
894 }
a6bd005f
EG
895}
896
d307dbc4
EG
897static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans)
898{
899 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
900
901 IWL_DEBUG_ISR(trans, "Enabling ALIVE interrupt only\n");
902
903 if (!trans_pcie->msix_enabled) {
904 /*
905 * When we'll receive the ALIVE interrupt, the ISR will call
906 * iwl_enable_fw_load_int_ctx_info again to set the ALIVE
907 * interrupt (which is not really needed anymore) but also the
908 * RX interrupt which will allow us to receive the ALIVE
909 * notification (which is Rx) and continue the flow.
910 */
911 trans_pcie->inta_mask = CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX;
912 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
913 } else {
914 iwl_enable_hw_int_msk_msix(trans,
915 MSIX_HW_INT_CAUSES_REG_ALIVE);
916 /*
917 * Leave all the FH causes enabled to get the ALIVE
918 * notification.
919 */
920 iwl_enable_fh_int_msk_msix(trans, trans_pcie->fh_init_mask);
921 }
922}
923
7b3e42ea 924static inline u16 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index)
4ecab561
EG
925{
926 return index & (q->n_window - 1);
927}
928
943309d4 929static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans,
ab6c6445
SS
930 struct iwl_txq *txq, int idx)
931{
943309d4
EG
932 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
933
934 if (trans->cfg->use_tfh)
935 idx = iwl_pcie_get_cmd_index(txq, idx);
936
937 return txq->tfds + trans_pcie->tfd_size * idx;
ab6c6445
SS
938}
939
ff932f61
GBA
940static inline const char *queue_name(struct device *dev,
941 struct iwl_trans_pcie *trans_p, int i)
942{
943 if (trans_p->shared_vec_mask) {
944 int vec = trans_p->shared_vec_mask &
945 IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
946
947 if (i == 0)
948 return DRV_NAME ": shared IRQ";
949
950 return devm_kasprintf(dev, GFP_KERNEL,
951 DRV_NAME ": queue %d", i + vec);
952 }
953 if (i == 0)
954 return DRV_NAME ": default queue";
955
956 if (i == trans_p->alloc_vecs - 1)
957 return DRV_NAME ": exception";
958
959 return devm_kasprintf(dev, GFP_KERNEL,
960 DRV_NAME ": queue %d", i);
961}
962
8722c899
SG
963static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
964{
2dbc368d
EG
965 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
966
8722c899 967 IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
2e5d4a8f
HD
968 if (!trans_pcie->msix_enabled) {
969 trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
970 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
971 } else {
972 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
973 trans_pcie->fh_init_mask);
974 iwl_enable_hw_int_msk_msix(trans,
975 MSIX_HW_INT_CAUSES_REG_RF_KILL);
976 }
ae5bb2a6 977
597bf518 978 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_9000) {
ae5bb2a6
JB
979 /*
980 * On 9000-series devices this bit isn't enabled by default, so
981 * when we power down the device we need set the bit to allow it
982 * to wake up the PCI-E bus for RF-kill interrupts.
983 */
984 iwl_set_bit(trans, CSR_GP_CNTRL,
985 CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
986 }
8722c899
SG
987}
988
fa4de7f7
JB
989void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans);
990
e20d4341 991static inline void iwl_wake_queue(struct iwl_trans *trans,
990aa6d7 992 struct iwl_txq *txq)
e20d4341 993{
9eae88fa
JB
994 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
995
bb98ecd4
SS
996 if (test_and_clear_bit(txq->id, trans_pcie->queue_stopped)) {
997 IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
998 iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
81a3de1c 999 }
e20d4341
EG
1000}
1001
1002static inline void iwl_stop_queue(struct iwl_trans *trans,
990aa6d7 1003 struct iwl_txq *txq)
e20d4341 1004{
9eae88fa 1005 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
8ad71bef 1006
bb98ecd4
SS
1007 if (!test_and_set_bit(txq->id, trans_pcie->queue_stopped)) {
1008 iwl_op_mode_queue_full(trans->op_mode, txq->id);
1009 IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
9eae88fa
JB
1010 } else
1011 IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
bb98ecd4 1012 txq->id);
8ad71bef
EG
1013}
1014
bb98ecd4 1015static inline bool iwl_queue_used(const struct iwl_txq *q, int i)
8ad71bef 1016{
f5955a6c
GBA
1017 int index = iwl_pcie_get_cmd_index(q, i);
1018 int r = iwl_pcie_get_cmd_index(q, q->read_ptr);
1019 int w = iwl_pcie_get_cmd_index(q, q->write_ptr);
1020
1021 return w >= r ?
1022 (index >= r && index < w) :
1023 !(index < r && index >= w);
8ad71bef
EG
1024}
1025
8d425517
EG
1026static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
1027{
fa4de7f7
JB
1028 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1029
1030 lockdep_assert_held(&trans_pcie->mutex);
1031
1032 if (trans_pcie->debug_rfkill)
1033 return true;
23aeea94 1034
8d425517
EG
1035 return !(iwl_read32(trans, CSR_GP_CNTRL) &
1036 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
1037}
1038
b9439491
EG
1039static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
1040 u32 reg, u32 mask, u32 value)
1041{
1042 u32 v;
1043
1044#ifdef CONFIG_IWLWIFI_DEBUG
1045 WARN_ON_ONCE(value & ~mask);
1046#endif
1047
1048 v = iwl_read32(trans, reg);
1049 v &= ~mask;
1050 v |= value;
1051 iwl_write32(trans, reg, v);
1052}
1053
1054static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
1055 u32 reg, u32 mask)
1056{
1057 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
1058}
1059
1060static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
1061 u32 reg, u32 mask)
1062{
1063 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
1064}
1065
7a14c23d
SS
1066static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans)
1067{
1068 return (trans->dbg_dest_tlv || trans->ini_valid);
1069}
1070
14cfca71 1071void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
4290eaad 1072void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
d1967ce6 1073void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans);
14cfca71 1074
f8a1edb7
JB
1075#ifdef CONFIG_IWLWIFI_DEBUGFS
1076int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
1077#else
1078static inline int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
1079{
1080 return 0;
1081}
1082#endif
1083
4cbb8e50
LC
1084int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans);
1085int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans);
1086
10a54d81
LC
1087void iwl_pcie_rx_allocator_work(struct work_struct *data);
1088
eda50cde 1089/* common functions that are used by gen2 transport */
b6fe2757 1090int iwl_pcie_gen2_apm_init(struct iwl_trans *trans);
eda50cde
SS
1091void iwl_pcie_apm_config(struct iwl_trans *trans);
1092int iwl_pcie_prepare_card_hw(struct iwl_trans *trans);
1093void iwl_pcie_synchronize_irqs(struct iwl_trans *trans);
9ad8fd0b 1094bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans);
326477e4
JB
1095void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
1096 bool was_in_rfkill);
6b35ff91 1097void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
7b3e42ea 1098int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q);
e8c8935e 1099void iwl_pcie_apm_stop_master(struct iwl_trans *trans);
77c09bc8 1100void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie);
13a3a390 1101int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
b8e8d7ce 1102 int slots_num, bool cmd_queue);
13a3a390 1103int iwl_pcie_txq_alloc(struct iwl_trans *trans,
b8e8d7ce 1104 struct iwl_txq *txq, int slots_num, bool cmd_queue);
13a3a390
SS
1105int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
1106 struct iwl_dma_ptr *ptr, size_t size);
1107void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr);
c9be849d 1108void iwl_pcie_apply_destination(struct iwl_trans *trans);
9bb3d5a0
EG
1109void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
1110 struct sk_buff *skb);
6ffe5de3
SS
1111#ifdef CONFIG_INET
1112struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len);
1113#endif
eda50cde 1114
9f358c17
GBA
1115/* common functions that are used by gen3 transport */
1116void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);
1117
eda50cde
SS
1118/* transport gen 2 exported functions */
1119int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
1120 const struct fw_img *fw, bool run_in_rfkill);
1121void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr);
1169310f
GBA
1122void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
1123 struct iwl_txq *txq);
1124int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans,
1125 struct iwl_txq **intxq, int size,
1126 unsigned int timeout);
1127int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans,
1128 struct iwl_txq *txq,
1129 struct iwl_host_cmd *hcmd);
6b35ff91 1130int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
1169310f 1131 __le16 flags, u8 sta_id, u8 tid,
5369774c 1132 int cmd_id, int size,
6b35ff91
SS
1133 unsigned int timeout);
1134void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue);
ab6c6445
SS
1135int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
1136 struct iwl_device_cmd *dev_cmd, int txq_id);
ca60da2e
SS
1137int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
1138 struct iwl_host_cmd *cmd);
77c09bc8
SS
1139void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans,
1140 bool low_power);
1141void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power);
13a3a390
SS
1142void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id);
1143void iwl_pcie_gen2_tx_free(struct iwl_trans *trans);
1144void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans);
ab697a9f 1145#endif /* __iwl_trans_int_pcie_h__ */