]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - drivers/net/wireless/mediatek/mt76/mt76.h
mt76: add an intermediate struct for rx status information
[thirdparty/kernel/stable.git] / drivers / net / wireless / mediatek / mt76 / mt76.h
CommitLineData
17f1de56
FF
1/*
2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef __MT76_H
18#define __MT76_H
19
20#include <linux/kernel.h>
21#include <linux/io.h>
22#include <linux/spinlock.h>
23#include <linux/skbuff.h>
24#include <linux/leds.h>
25#include <net/mac80211.h>
26#include "util.h"
27
28#define MT_TX_RING_SIZE 256
29#define MT_MCU_RING_SIZE 32
30#define MT_RX_BUF_SIZE 2048
31
32struct mt76_dev;
33
34struct mt76_bus_ops {
35 u32 (*rr)(struct mt76_dev *dev, u32 offset);
36 void (*wr)(struct mt76_dev *dev, u32 offset, u32 val);
37 u32 (*rmw)(struct mt76_dev *dev, u32 offset, u32 mask, u32 val);
38 void (*copy)(struct mt76_dev *dev, u32 offset, const void *data,
39 int len);
40};
41
42enum mt76_txq_id {
43 MT_TXQ_VO = IEEE80211_AC_VO,
44 MT_TXQ_VI = IEEE80211_AC_VI,
45 MT_TXQ_BE = IEEE80211_AC_BE,
46 MT_TXQ_BK = IEEE80211_AC_BK,
47 MT_TXQ_PSD,
48 MT_TXQ_MCU,
49 MT_TXQ_BEACON,
50 MT_TXQ_CAB,
51 __MT_TXQ_MAX
52};
53
54enum mt76_rxq_id {
55 MT_RXQ_MAIN,
56 MT_RXQ_MCU,
57 __MT_RXQ_MAX
58};
59
60struct mt76_queue_buf {
61 dma_addr_t addr;
62 int len;
63};
64
65struct mt76_queue_entry {
66 union {
67 void *buf;
68 struct sk_buff *skb;
69 };
70 struct mt76_txwi_cache *txwi;
71 bool schedule;
72};
73
74struct mt76_queue_regs {
75 u32 desc_base;
76 u32 ring_size;
77 u32 cpu_idx;
78 u32 dma_idx;
79} __packed __aligned(4);
80
81struct mt76_queue {
82 struct mt76_queue_regs __iomem *regs;
83
84 spinlock_t lock;
85 struct mt76_queue_entry *entry;
86 struct mt76_desc *desc;
87
88 struct list_head swq;
89 int swq_queued;
90
91 u16 head;
92 u16 tail;
93 int ndesc;
94 int queued;
95 int buf_size;
96
97 u8 buf_offset;
98 u8 hw_idx;
99
100 dma_addr_t desc_dma;
101 struct sk_buff *rx_head;
102};
103
104struct mt76_queue_ops {
105 int (*init)(struct mt76_dev *dev);
106
107 int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q);
108
109 int (*add_buf)(struct mt76_dev *dev, struct mt76_queue *q,
110 struct mt76_queue_buf *buf, int nbufs, u32 info,
111 struct sk_buff *skb, void *txwi);
112
113 void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
114 int *len, u32 *info, bool *more);
115
116 void (*rx_reset)(struct mt76_dev *dev, enum mt76_rxq_id qid);
117
118 void (*tx_cleanup)(struct mt76_dev *dev, enum mt76_txq_id qid,
119 bool flush);
120
121 void (*kick)(struct mt76_dev *dev, struct mt76_queue *q);
122};
123
124struct mt76_wcid {
125 u8 idx;
126 u8 hw_key_idx;
127
128 __le16 tx_rate;
129 bool tx_rate_set;
130 u8 tx_rate_nss;
131 s8 max_txpwr_adj;
23405236 132 bool sw_iv;
17f1de56
FF
133};
134
135struct mt76_txq {
136 struct list_head list;
137 struct mt76_queue *hwq;
138 struct mt76_wcid *wcid;
139
140 struct sk_buff_head retry_q;
141
142 u16 agg_ssn;
143 bool send_bar;
144 bool aggr;
145};
146
147struct mt76_txwi_cache {
148 u32 txwi[8];
149 dma_addr_t dma_addr;
150 struct list_head list;
151};
152
153enum {
154 MT76_STATE_INITIALIZED,
155 MT76_STATE_RUNNING,
156 MT76_SCANNING,
157 MT76_RESET,
158};
159
160struct mt76_hw_cap {
161 bool has_2ghz;
162 bool has_5ghz;
163};
164
165struct mt76_driver_ops {
166 u16 txwi_size;
167
168 void (*update_survey)(struct mt76_dev *dev);
169
170 int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr,
171 struct sk_buff *skb, struct mt76_queue *q,
172 struct mt76_wcid *wcid,
173 struct ieee80211_sta *sta, u32 *tx_info);
174
175 void (*tx_complete_skb)(struct mt76_dev *dev, struct mt76_queue *q,
176 struct mt76_queue_entry *e, bool flush);
177
178 void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q,
179 struct sk_buff *skb);
180
181 void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q);
182};
183
184struct mt76_channel_state {
185 u64 cc_active;
186 u64 cc_busy;
187};
188
189struct mt76_sband {
190 struct ieee80211_supported_band sband;
191 struct mt76_channel_state *chan;
192};
193
194struct mt76_dev {
195 struct ieee80211_hw *hw;
196 struct cfg80211_chan_def chandef;
197 struct ieee80211_channel *main_chan;
198
199 spinlock_t lock;
200 spinlock_t cc_lock;
201 const struct mt76_bus_ops *bus;
202 const struct mt76_driver_ops *drv;
203 void __iomem *regs;
204 struct device *dev;
205
206 struct net_device napi_dev;
207 struct napi_struct napi[__MT_RXQ_MAX];
208 struct sk_buff_head rx_skb[__MT_RXQ_MAX];
209
210 struct list_head txwi_cache;
211 struct mt76_queue q_tx[__MT_TXQ_MAX];
212 struct mt76_queue q_rx[__MT_RXQ_MAX];
213 const struct mt76_queue_ops *queue_ops;
214
215 u8 macaddr[ETH_ALEN];
216 u32 rev;
217 unsigned long state;
218
219 struct mt76_sband sband_2g;
220 struct mt76_sband sband_5g;
221 struct debugfs_blob_wrapper eeprom;
222 struct debugfs_blob_wrapper otp;
223 struct mt76_hw_cap cap;
224
225 u32 debugfs_reg;
226
227 struct led_classdev led_cdev;
228 char led_name[32];
229 bool led_al;
230 u8 led_pin;
231};
232
233enum mt76_phy_type {
234 MT_PHY_TYPE_CCK,
235 MT_PHY_TYPE_OFDM,
236 MT_PHY_TYPE_HT,
237 MT_PHY_TYPE_HT_GF,
238 MT_PHY_TYPE_VHT,
239};
240
241struct mt76_rate_power {
242 union {
243 struct {
244 s8 cck[4];
245 s8 ofdm[8];
246 s8 ht[16];
247 s8 vht[10];
248 };
249 s8 all[38];
250 };
251};
252
4e34249e
FF
253struct mt76_rx_status {
254 u32 flag;
255 u16 freq;
256 u8 enc_flags;
257 u8 encoding:2, bw:3;
258 u8 rate_idx;
259 u8 nss;
260 u8 band;
261 u8 signal;
262 u8 chains;
263 s8 chain_signal[IEEE80211_MAX_CHAINS];
264};
265
17f1de56
FF
266#define mt76_rr(dev, ...) (dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__)
267#define mt76_wr(dev, ...) (dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__)
268#define mt76_rmw(dev, ...) (dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__)
269#define mt76_wr_copy(dev, ...) (dev)->mt76.bus->copy(&((dev)->mt76), __VA_ARGS__)
270
271#define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val)
272#define mt76_clear(dev, offset, val) mt76_rmw(dev, offset, val, 0)
273
274#define mt76_get_field(_dev, _reg, _field) \
275 FIELD_GET(_field, mt76_rr(dev, _reg))
276
277#define mt76_rmw_field(_dev, _reg, _field, _val) \
278 mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
279
280#define mt76_hw(dev) (dev)->mt76.hw
281
282bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
283 int timeout);
284
285#define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__)
286
287bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
288 int timeout);
289
290#define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__)
291
292void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs);
293
294static inline u16 mt76_chip(struct mt76_dev *dev)
295{
296 return dev->rev >> 16;
297}
298
299static inline u16 mt76_rev(struct mt76_dev *dev)
300{
301 return dev->rev & 0xffff;
302}
303
304#define mt76xx_chip(dev) mt76_chip(&((dev)->mt76))
305#define mt76xx_rev(dev) mt76_rev(&((dev)->mt76))
306
307#define mt76_init_queues(dev) (dev)->mt76.queue_ops->init(&((dev)->mt76))
308#define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__)
309#define mt76_queue_add_buf(dev, ...) (dev)->mt76.queue_ops->add_buf(&((dev)->mt76), __VA_ARGS__)
310#define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__)
311#define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
312#define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__)
313
314static inline struct mt76_channel_state *
315mt76_channel_state(struct mt76_dev *dev, struct ieee80211_channel *c)
316{
317 struct mt76_sband *msband;
318 int idx;
319
320 if (c->band == NL80211_BAND_2GHZ)
321 msband = &dev->sband_2g;
322 else
323 msband = &dev->sband_5g;
324
325 idx = c - &msband->sband.channels[0];
326 return &msband->chan[idx];
327}
328
329int mt76_register_device(struct mt76_dev *dev, bool vht,
330 struct ieee80211_rate *rates, int n_rates);
331void mt76_unregister_device(struct mt76_dev *dev);
332
333struct dentry *mt76_register_debugfs(struct mt76_dev *dev);
334
335int mt76_eeprom_init(struct mt76_dev *dev, int len);
336void mt76_eeprom_override(struct mt76_dev *dev);
337
338static inline struct ieee80211_txq *
339mtxq_to_txq(struct mt76_txq *mtxq)
340{
341 void *ptr = mtxq;
342
343 return container_of(ptr, struct ieee80211_txq, drv_priv);
344}
345
346int mt76_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
347 struct sk_buff *skb, struct mt76_wcid *wcid,
348 struct ieee80211_sta *sta);
349
350void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
351void mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
352 struct mt76_wcid *wcid, struct sk_buff *skb);
353void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq);
354void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq);
355void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
356void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
357 bool send_bar);
358void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq);
359void mt76_txq_schedule_all(struct mt76_dev *dev);
360void mt76_release_buffered_frames(struct ieee80211_hw *hw,
361 struct ieee80211_sta *sta,
362 u16 tids, int nframes,
363 enum ieee80211_frame_release_type reason,
364 bool more_data);
365void mt76_set_channel(struct mt76_dev *dev);
366int mt76_get_survey(struct ieee80211_hw *hw, int idx,
367 struct survey_info *survey);
368
369/* internal */
370void mt76_tx_free(struct mt76_dev *dev);
371void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
372void mt76_rx_complete(struct mt76_dev *dev, enum mt76_rxq_id q);
373
374#endif