]> git.ipfire.org Git - thirdparty/linux.git/blame - drivers/net/wireless/iwlwifi/mvm/mac80211.c
iwlwifi: avoid read/write operations if the bus is dead
[thirdparty/linux.git] / drivers / net / wireless / iwlwifi / mvm / mac80211.c
CommitLineData
8ca151b5
JB
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
51368bf7 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
8b4139dc 9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
8ca151b5
JB
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23 * USA
24 *
25 * The full GNU General Public License is included in this distribution
410dc5aa 26 * in the file called COPYING.
8ca151b5
JB
27 *
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 *
32 * BSD LICENSE
33 *
51368bf7 34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
8b4139dc 35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
8ca151b5
JB
36 * All rights reserved.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 *
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
47 * distribution.
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *
64 *****************************************************************************/
65#include <linux/kernel.h>
66#include <linux/slab.h>
67#include <linux/skbuff.h>
68#include <linux/netdevice.h>
69#include <linux/etherdevice.h>
f0c2646a 70#include <linux/ip.h>
2ee8f021 71#include <linux/if_arp.h>
aadede6e 72#include <linux/devcoredump.h>
8ca151b5 73#include <net/mac80211.h>
7b1dd048 74#include <net/ieee80211_radiotap.h>
f0c2646a 75#include <net/tcp.h>
8ca151b5
JB
76
77#include "iwl-op-mode.h"
78#include "iwl-io.h"
79#include "mvm.h"
80#include "sta.h"
81#include "time-event.h"
82#include "iwl-eeprom-parse.h"
8ca151b5 83#include "iwl-phy-db.h"
507cadf2 84#include "testmode.h"
655e6d6d
EG
85#include "iwl-fw-error-dump.h"
86#include "iwl-prph.h"
363039be 87#include "iwl-csr.h"
88931cc9 88#include "iwl-nvm-parse.h"
8ca151b5
JB
89
90static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
91 {
92 .max = 1,
8eb38710 93 .types = BIT(NL80211_IFTYPE_STATION),
8ca151b5 94 },
3c15a0fb
JB
95 {
96 .max = 1,
8eb38710
IP
97 .types = BIT(NL80211_IFTYPE_AP) |
98 BIT(NL80211_IFTYPE_P2P_CLIENT) |
3c15a0fb
JB
99 BIT(NL80211_IFTYPE_P2P_GO),
100 },
101 {
102 .max = 1,
103 .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
104 },
8ca151b5
JB
105};
106
107static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
108 {
2624a5ca 109 .num_different_channels = 2,
8ca151b5
JB
110 .max_interfaces = 3,
111 .limits = iwl_mvm_limits,
112 .n_limits = ARRAY_SIZE(iwl_mvm_limits),
113 },
114};
115
f0c2646a
JB
116#ifdef CONFIG_PM_SLEEP
117static const struct nl80211_wowlan_tcp_data_token_feature
118iwl_mvm_wowlan_tcp_token_feature = {
119 .min_len = 0,
120 .max_len = 255,
121 .bufsize = IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS,
122};
123
124static const struct wiphy_wowlan_tcp_support iwl_mvm_wowlan_tcp_support = {
125 .tok = &iwl_mvm_wowlan_tcp_token_feature,
126 .data_payload_max = IWL_WOWLAN_TCP_MAX_PACKET_LEN -
127 sizeof(struct ethhdr) -
128 sizeof(struct iphdr) -
129 sizeof(struct tcphdr),
130 .data_interval_max = 65535, /* __le16 in API */
131 .wake_payload_max = IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN -
132 sizeof(struct ethhdr) -
133 sizeof(struct iphdr) -
134 sizeof(struct tcphdr),
135 .seq = true,
136};
137#endif
138
77736923 139#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
2ee8f021
EP
140/*
141 * Use the reserved field to indicate magic values.
142 * these values will only be used internally by the driver,
143 * and won't make it to the fw (reserved will be 0).
144 * BC_FILTER_MAGIC_IP - configure the val of this attribute to
145 * be the vif's ip address. in case there is not a single
146 * ip address (0, or more than 1), this attribute will
147 * be skipped.
148 * BC_FILTER_MAGIC_MAC - set the val of this attribute to
149 * the LSB bytes of the vif's mac address
150 */
151enum {
152 BC_FILTER_MAGIC_NONE = 0,
153 BC_FILTER_MAGIC_IP,
154 BC_FILTER_MAGIC_MAC,
155};
156
77736923
EP
157static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = {
158 {
159 /* arp */
160 .discard = 0,
161 .frame_type = BCAST_FILTER_FRAME_TYPE_ALL,
162 .attrs = {
163 {
164 /* frame type - arp, hw type - ethernet */
165 .offset_type =
166 BCAST_FILTER_OFFSET_PAYLOAD_START,
167 .offset = sizeof(rfc1042_header),
168 .val = cpu_to_be32(0x08060001),
169 .mask = cpu_to_be32(0xffffffff),
170 },
2ee8f021
EP
171 {
172 /* arp dest ip */
173 .offset_type =
174 BCAST_FILTER_OFFSET_PAYLOAD_START,
175 .offset = sizeof(rfc1042_header) + 2 +
176 sizeof(struct arphdr) +
177 ETH_ALEN + sizeof(__be32) +
178 ETH_ALEN,
179 .mask = cpu_to_be32(0xffffffff),
180 /* mark it as special field */
181 .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_IP),
182 },
183 },
184 },
185 {
186 /* dhcp offer bcast */
187 .discard = 0,
188 .frame_type = BCAST_FILTER_FRAME_TYPE_IPV4,
189 .attrs = {
190 {
191 /* udp dest port - 68 (bootp client)*/
192 .offset_type = BCAST_FILTER_OFFSET_IP_END,
193 .offset = offsetof(struct udphdr, dest),
194 .val = cpu_to_be32(0x00440000),
195 .mask = cpu_to_be32(0xffff0000),
196 },
197 {
198 /* dhcp - lsb bytes of client hw address */
199 .offset_type = BCAST_FILTER_OFFSET_IP_END,
200 .offset = 38,
201 .mask = cpu_to_be32(0xffffffff),
202 /* mark it as special field */
203 .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_MAC),
204 },
77736923
EP
205 },
206 },
207 /* last filter must be empty */
208 {},
209};
210#endif
211
7498cf4c
EP
212void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
213{
7bb426ea 214 if (!iwl_mvm_is_d0i3_supported(mvm))
7498cf4c
EP
215 return;
216
217 IWL_DEBUG_RPM(mvm, "Take mvm reference - type %d\n", ref_type);
576eeee9
EP
218 spin_lock_bh(&mvm->refs_lock);
219 mvm->refs[ref_type]++;
220 spin_unlock_bh(&mvm->refs_lock);
7498cf4c
EP
221 iwl_trans_ref(mvm->trans);
222}
223
224void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
225{
7bb426ea 226 if (!iwl_mvm_is_d0i3_supported(mvm))
7498cf4c
EP
227 return;
228
229 IWL_DEBUG_RPM(mvm, "Leave mvm reference - type %d\n", ref_type);
576eeee9
EP
230 spin_lock_bh(&mvm->refs_lock);
231 WARN_ON(!mvm->refs[ref_type]--);
232 spin_unlock_bh(&mvm->refs_lock);
7498cf4c
EP
233 iwl_trans_unref(mvm->trans);
234}
235
576eeee9
EP
236static void iwl_mvm_unref_all_except(struct iwl_mvm *mvm,
237 enum iwl_mvm_ref_type except_ref)
7498cf4c 238{
576eeee9 239 int i, j;
7498cf4c 240
7bb426ea 241 if (!iwl_mvm_is_d0i3_supported(mvm))
7498cf4c
EP
242 return;
243
576eeee9
EP
244 spin_lock_bh(&mvm->refs_lock);
245 for (i = 0; i < IWL_MVM_REF_COUNT; i++) {
246 if (except_ref == i || !mvm->refs[i])
7498cf4c
EP
247 continue;
248
576eeee9
EP
249 IWL_DEBUG_RPM(mvm, "Cleanup: remove mvm ref type %d (%d)\n",
250 i, mvm->refs[i]);
251 for (j = 0; j < mvm->refs[i]; j++)
252 iwl_trans_unref(mvm->trans);
253 mvm->refs[i] = 0;
7498cf4c 254 }
576eeee9 255 spin_unlock_bh(&mvm->refs_lock);
7498cf4c
EP
256}
257
f4cf8680
EP
258bool iwl_mvm_ref_taken(struct iwl_mvm *mvm)
259{
260 int i;
261 bool taken = false;
262
263 if (!iwl_mvm_is_d0i3_supported(mvm))
264 return true;
265
266 spin_lock_bh(&mvm->refs_lock);
267 for (i = 0; i < IWL_MVM_REF_COUNT; i++) {
268 if (mvm->refs[i]) {
269 taken = true;
270 break;
271 }
272 }
273 spin_unlock_bh(&mvm->refs_lock);
274
275 return taken;
276}
277
576eeee9 278int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
d40fc489
GG
279{
280 iwl_mvm_ref(mvm, ref_type);
281
282 if (!wait_event_timeout(mvm->d0i3_exit_waitq,
283 !test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status),
284 HZ)) {
285 WARN_ON_ONCE(1);
286 iwl_mvm_unref(mvm, ref_type);
287 return -EIO;
288 }
289
290 return 0;
291}
292
fe0f2de3
IP
293static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm)
294{
295 int i;
296
297 memset(mvm->phy_ctxts, 0, sizeof(mvm->phy_ctxts));
298 for (i = 0; i < NUM_PHY_CTX; i++) {
299 mvm->phy_ctxts[i].id = i;
300 mvm->phy_ctxts[i].ref = 0;
301 }
302}
303
88931cc9 304struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
8ba2d7a1 305 const char *alpha2,
47c8b154
JD
306 enum iwl_mcc_source src_id,
307 bool *changed)
88931cc9
AN
308{
309 struct ieee80211_regdomain *regd = NULL;
310 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
311 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
312 struct iwl_mcc_update_resp *resp;
313
314 IWL_DEBUG_LAR(mvm, "Getting regdomain data for %s from FW\n", alpha2);
315
8ba2d7a1 316 lockdep_assert_held(&mvm->mutex);
88931cc9 317
8ba2d7a1 318 resp = iwl_mvm_update_mcc(mvm, alpha2, src_id);
88931cc9
AN
319 if (IS_ERR_OR_NULL(resp)) {
320 IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n",
b8c474d9 321 PTR_ERR_OR_ZERO(resp));
8ba2d7a1 322 goto out;
88931cc9
AN
323 }
324
47c8b154
JD
325 if (changed)
326 *changed = (resp->status == MCC_RESP_NEW_CHAN_PROFILE);
327
162ee3c9 328 regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg,
88931cc9
AN
329 __le32_to_cpu(resp->n_channels),
330 resp->channels,
331 __le16_to_cpu(resp->mcc));
8ba2d7a1
EH
332 /* Store the return source id */
333 src_id = resp->source_id;
88931cc9
AN
334 kfree(resp);
335 if (IS_ERR_OR_NULL(regd)) {
336 IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n",
b8c474d9 337 PTR_ERR_OR_ZERO(regd));
8ba2d7a1 338 goto out;
88931cc9
AN
339 }
340
8ba2d7a1
EH
341 IWL_DEBUG_LAR(mvm, "setting alpha2 from FW to %s (0x%x, 0x%x) src=%d\n",
342 regd->alpha2, regd->alpha2[0], regd->alpha2[1], src_id);
88931cc9 343 mvm->lar_regdom_set = true;
8ba2d7a1 344 mvm->mcc_src = src_id;
88931cc9 345
8ba2d7a1 346out:
88931cc9
AN
347 return regd;
348}
349
47c8b154
JD
350void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm)
351{
352 bool changed;
353 struct ieee80211_regdomain *regd;
354
355 if (!iwl_mvm_is_lar_supported(mvm))
356 return;
357
358 regd = iwl_mvm_get_current_regdomain(mvm, &changed);
359 if (!IS_ERR_OR_NULL(regd)) {
360 /* only update the regulatory core if changed */
361 if (changed)
362 regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
363
364 kfree(regd);
365 }
366}
367
368struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm,
369 bool *changed)
8ba2d7a1
EH
370{
371 return iwl_mvm_get_regdomain(mvm->hw->wiphy, "ZZ",
372 iwl_mvm_is_wifi_mcc_supported(mvm) ?
373 MCC_SOURCE_GET_CURRENT :
47c8b154 374 MCC_SOURCE_OLD_FW, changed);
8ba2d7a1
EH
375}
376
377int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm)
378{
379 enum iwl_mcc_source used_src;
380 struct ieee80211_regdomain *regd;
b6e160ab
AN
381 int ret;
382 bool changed;
8ba2d7a1
EH
383 const struct ieee80211_regdomain *r =
384 rtnl_dereference(mvm->hw->wiphy->regd);
385
386 if (!r)
b6e160ab 387 return -ENOENT;
8ba2d7a1
EH
388
389 /* save the last source in case we overwrite it below */
390 used_src = mvm->mcc_src;
391 if (iwl_mvm_is_wifi_mcc_supported(mvm)) {
392 /* Notify the firmware we support wifi location updates */
47c8b154 393 regd = iwl_mvm_get_current_regdomain(mvm, NULL);
8ba2d7a1
EH
394 if (!IS_ERR_OR_NULL(regd))
395 kfree(regd);
396 }
397
398 /* Now set our last stored MCC and source */
b6e160ab
AN
399 regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, r->alpha2, used_src,
400 &changed);
8ba2d7a1
EH
401 if (IS_ERR_OR_NULL(regd))
402 return -EIO;
403
b6e160ab
AN
404 /* update cfg80211 if the regdomain was changed */
405 if (changed)
406 ret = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd);
407 else
408 ret = 0;
8ba2d7a1 409
b6e160ab
AN
410 kfree(regd);
411 return ret;
8ba2d7a1
EH
412}
413
8ca151b5
JB
414int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
415{
416 struct ieee80211_hw *hw = mvm->hw;
831e85f3 417 int num_mac, ret, i;
5f4c02e2
JB
418 static const u32 mvm_ciphers[] = {
419 WLAN_CIPHER_SUITE_WEP40,
420 WLAN_CIPHER_SUITE_WEP104,
421 WLAN_CIPHER_SUITE_TKIP,
422 WLAN_CIPHER_SUITE_CCMP,
423 };
8ca151b5
JB
424
425 /* Tell mac80211 our characteristics */
30686bf7
JB
426 ieee80211_hw_set(hw, SIGNAL_DBM);
427 ieee80211_hw_set(hw, SPECTRUM_MGMT);
428 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
429 ieee80211_hw_set(hw, QUEUE_CONTROL);
430 ieee80211_hw_set(hw, WANT_MONITOR_VIF);
431 ieee80211_hw_set(hw, SUPPORTS_PS);
432 ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
433 ieee80211_hw_set(hw, AMPDU_AGGREGATION);
434 ieee80211_hw_set(hw, TIMING_BEACON_ONLY);
435 ieee80211_hw_set(hw, CONNECTION_MONITOR);
436 ieee80211_hw_set(hw, CHANCTX_STA_CSA);
437 ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
438 ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
8ca151b5 439
19e737c9 440 hw->queues = mvm->first_agg_queue;
398e8c6c 441 hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
7b1dd048
EG
442 hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC |
443 IEEE80211_RADIOTAP_MCS_HAVE_STBC;
339b3086
ES
444 hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
445 IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED;
8ca151b5 446 hw->rate_control_algorithm = "iwl-mvm-rs";
848955cc
JB
447 hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
448 hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
8ca151b5 449
5f4c02e2
JB
450 BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 2);
451 memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers));
452 hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers);
453 hw->wiphy->cipher_suites = mvm->ciphers;
454
8ca151b5
JB
455 /*
456 * Enable 11w if advertised by firmware and software crypto
457 * is not enabled (as the firmware will interpret some mgmt
458 * packets, so enabling it with software crypto isn't safe)
459 */
460 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_MFP &&
5f4c02e2 461 !iwlwifi_mod_params.sw_crypto) {
30686bf7 462 ieee80211_hw_set(hw, MFP_CAPABLE);
5f4c02e2
JB
463 mvm->ciphers[hw->wiphy->n_cipher_suites] =
464 WLAN_CIPHER_SUITE_AES_CMAC;
465 hw->wiphy->n_cipher_suites++;
466 }
467
468 /* currently FW API supports only one optional cipher scheme */
469 if (mvm->fw->cs[0].cipher) {
470 mvm->hw->n_cipher_schemes = 1;
471 mvm->hw->cipher_schemes = &mvm->fw->cs[0];
472 mvm->ciphers[hw->wiphy->n_cipher_suites] =
473 mvm->fw->cs[0].cipher;
474 hw->wiphy->n_cipher_suites++;
475 }
8ca151b5 476
30686bf7 477 ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
1f940386
LC
478 hw->wiphy->features |=
479 NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
3db93420
JB
480 NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR |
481 NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
fb98be5e 482
8ca151b5
JB
483 hw->sta_data_size = sizeof(struct iwl_mvm_sta);
484 hw->vif_data_size = sizeof(struct iwl_mvm_vif);
fe0f2de3 485 hw->chanctx_data_size = sizeof(u16);
8ca151b5
JB
486
487 hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
3c15a0fb
JB
488 BIT(NL80211_IFTYPE_P2P_CLIENT) |
489 BIT(NL80211_IFTYPE_AP) |
490 BIT(NL80211_IFTYPE_P2P_GO) |
c13b1725
EG
491 BIT(NL80211_IFTYPE_P2P_DEVICE) |
492 BIT(NL80211_IFTYPE_ADHOC);
5023d966 493
a2f73b6c 494 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
8ba2d7a1
EH
495 hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR;
496 if (iwl_mvm_is_lar_supported(mvm))
497 hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
498 else
499 hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
500 REGULATORY_DISABLE_BEACON_HINTS;
8ca151b5 501
3e56eadf
JB
502 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_GO_UAPSD)
503 hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
504
94bbed72 505 hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
bd3398e2 506
8ca151b5
JB
507 hw->wiphy->iface_combinations = iwl_mvm_iface_combinations;
508 hw->wiphy->n_iface_combinations =
509 ARRAY_SIZE(iwl_mvm_iface_combinations);
510
c451e6d4 511 hw->wiphy->max_remain_on_channel_duration = 10000;
8ca151b5 512 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
f1a68542
EG
513 /* we can compensate an offset of up to 3 channels = 15 MHz */
514 hw->wiphy->max_adj_channel_rssi_comp = 3 * 5;
8ca151b5
JB
515
516 /* Extract MAC address */
517 memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN);
518 hw->wiphy->addresses = mvm->addresses;
519 hw->wiphy->n_addresses = 1;
831e85f3
IP
520
521 /* Extract additional MAC addresses if available */
522 num_mac = (mvm->nvm_data->n_hw_addrs > 1) ?
523 min(IWL_MVM_MAX_ADDRESSES, mvm->nvm_data->n_hw_addrs) : 1;
524
525 for (i = 1; i < num_mac; i++) {
526 memcpy(mvm->addresses[i].addr, mvm->addresses[i-1].addr,
8ca151b5 527 ETH_ALEN);
831e85f3 528 mvm->addresses[i].addr[5]++;
8ca151b5
JB
529 hw->wiphy->n_addresses++;
530 }
531
fe0f2de3
IP
532 iwl_mvm_reset_phy_ctxts(mvm);
533
999d2568 534 hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm);
20f1a5de 535
8ca151b5
JB
536 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
537
c7d42480 538 BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK);
507e4cda
LC
539 BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) ||
540 IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK));
541
859d914c 542 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
507e4cda
LC
543 mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS;
544 else
545 mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS;
546
8ca151b5
JB
547 if (mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels)
548 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
549 &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
3d44eebf 550 if (mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels) {
8ca151b5
JB
551 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
552 &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
553
859d914c
JB
554 if (fw_has_capa(&mvm->fw->ucode_capa,
555 IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
556 fw_has_api(&mvm->fw->ucode_capa,
557 IWL_UCODE_TLV_API_LQ_SS_PARAMS))
3d44eebf
ES
558 hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |=
559 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
560 }
561
8ca151b5
JB
562 hw->wiphy->hw_version = mvm->trans->hw_id;
563
ade50652 564 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
8ca151b5
JB
565 hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
566 else
567 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
568
9954b37c
EG
569 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
570 hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
571 hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
572 /* we create the 802.11 header and zero length SSID IE. */
573 hw->wiphy->max_sched_scan_ie_len =
574 SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
cd55ccea
AS
575 hw->wiphy->max_sched_scan_plans = IWL_MAX_SCHED_SCAN_PLANS;
576 hw->wiphy->max_sched_scan_plan_interval = U16_MAX;
577
578 /*
579 * the firmware uses u8 for num of iterations, but 0xff is saved for
580 * infinite loop, so the maximum number of iterations is actually 254.
581 */
582 hw->wiphy->max_sched_scan_plan_iterations = 254;
35a000b7 583
8ca151b5 584 hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
ab480030 585 NL80211_FEATURE_LOW_PRIORITY_SCAN |
0d8614b4
EP
586 NL80211_FEATURE_P2P_GO_OPPPS |
587 NL80211_FEATURE_DYNAMIC_SMPS |
9b5452fd
EG
588 NL80211_FEATURE_STATIC_SMPS |
589 NL80211_FEATURE_SUPPORTS_WMM_ADMISSION;
8ca151b5 590
859d914c
JB
591 if (fw_has_capa(&mvm->fw->ucode_capa,
592 IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT))
f1daa00e 593 hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION;
859d914c
JB
594 if (fw_has_capa(&mvm->fw->ucode_capa,
595 IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT))
226bcd48 596 hw->wiphy->features |= NL80211_FEATURE_QUIET;
f1daa00e 597
859d914c
JB
598 if (fw_has_capa(&mvm->fw->ucode_capa,
599 IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
73897bd1
AO
600 hw->wiphy->features |=
601 NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES;
602
859d914c
JB
603 if (fw_has_capa(&mvm->fw->ucode_capa,
604 IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
73897bd1
AO
605 hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
606
8ca151b5
JB
607 mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
608
609#ifdef CONFIG_PM_SLEEP
d15a747f
EP
610 if (iwl_mvm_is_d0i3_supported(mvm) &&
611 device_can_wakeup(mvm->trans->dev)) {
612 mvm->wowlan.flags = WIPHY_WOWLAN_ANY;
613 hw->wiphy->wowlan = &mvm->wowlan;
91742449
EP
614 }
615
616 if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
8ca151b5
JB
617 mvm->trans->ops->d3_suspend &&
618 mvm->trans->ops->d3_resume &&
619 device_can_wakeup(mvm->trans->dev)) {
91742449
EP
620 mvm->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT |
621 WIPHY_WOWLAN_DISCONNECT |
622 WIPHY_WOWLAN_EAP_IDENTITY_REQ |
623 WIPHY_WOWLAN_RFKILL_RELEASE |
624 WIPHY_WOWLAN_NET_DETECT;
8ca151b5 625 if (!iwlwifi_mod_params.sw_crypto)
964dc9e2
JB
626 mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
627 WIPHY_WOWLAN_GTK_REKEY_FAILURE |
628 WIPHY_WOWLAN_4WAY_HANDSHAKE;
629
630 mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS;
631 mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
632 mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN;
c55385f5 633 mvm->wowlan.max_nd_match_sets = IWL_SCAN_MAX_PROFILES;
964dc9e2
JB
634 mvm->wowlan.tcp = &iwl_mvm_wowlan_tcp_support;
635 hw->wiphy->wowlan = &mvm->wowlan;
8ca151b5
JB
636 }
637#endif
638
77736923
EP
639#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
640 /* assign default bcast filtering configuration */
641 mvm->bcast_filters = iwl_mvm_default_bcast_filters;
642#endif
643
8ca151b5
JB
644 ret = iwl_mvm_leds_init(mvm);
645 if (ret)
646 return ret;
647
859d914c
JB
648 if (fw_has_capa(&mvm->fw->ucode_capa,
649 IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) {
d8f1c515
AN
650 IWL_DEBUG_TDLS(mvm, "TDLS supported\n");
651 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
7c4f0843 652 ieee80211_hw_set(hw, TDLS_WIDER_BW);
d8f1c515
AN
653 }
654
859d914c
JB
655 if (fw_has_capa(&mvm->fw->ucode_capa,
656 IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) {
1d3c3f63
AN
657 IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n");
658 hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
659 }
660
93190fb0
AA
661 hw->netdev_features |= mvm->cfg->features;
662 if (!iwl_mvm_is_csum_supported(mvm))
663 hw->netdev_features &= ~NETIF_F_RXCSUM;
664
b7327d89
EG
665 ret = ieee80211_register_hw(mvm->hw);
666 if (ret)
667 iwl_mvm_leds_exit(mvm);
668
669 return ret;
8ca151b5
JB
670}
671
b2492501
AN
672static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm,
673 struct ieee80211_sta *sta,
674 struct sk_buff *skb)
675{
676 struct iwl_mvm_sta *mvmsta;
677 bool defer = false;
678
679 /*
680 * double check the IN_D0I3 flag both before and after
681 * taking the spinlock, in order to prevent taking
682 * the spinlock when not needed.
683 */
684 if (likely(!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)))
685 return false;
686
687 spin_lock(&mvm->d0i3_tx_lock);
688 /*
689 * testing the flag again ensures the skb dequeue
690 * loop (on d0i3 exit) hasn't run yet.
691 */
692 if (!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
693 goto out;
694
695 mvmsta = iwl_mvm_sta_from_mac80211(sta);
696 if (mvmsta->sta_id == IWL_MVM_STATION_COUNT ||
697 mvmsta->sta_id != mvm->d0i3_ap_sta_id)
698 goto out;
699
700 __skb_queue_tail(&mvm->d0i3_tx, skb);
701 ieee80211_stop_queues(mvm->hw);
702
703 /* trigger wakeup */
704 iwl_mvm_ref(mvm, IWL_MVM_REF_TX);
705 iwl_mvm_unref(mvm, IWL_MVM_REF_TX);
706
707 defer = true;
708out:
709 spin_unlock(&mvm->d0i3_tx_lock);
710 return defer;
711}
712
8ca151b5
JB
713static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
714 struct ieee80211_tx_control *control,
715 struct sk_buff *skb)
716{
717 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3e56eadf
JB
718 struct ieee80211_sta *sta = control->sta;
719 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
720 struct ieee80211_hdr *hdr = (void *)skb->data;
8ca151b5 721
9ee718aa
EL
722 if (iwl_mvm_is_radio_killed(mvm)) {
723 IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n");
8ca151b5
JB
724 goto drop;
725 }
726
398e8c6c 727 if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
a6cc5163
MG
728 !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) &&
729 !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
8ca151b5
JB
730 goto drop;
731
3e56eadf
JB
732 /* treat non-bufferable MMPDUs as broadcast if sta is sleeping */
733 if (unlikely(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER &&
734 ieee80211_is_mgmt(hdr->frame_control) &&
735 !ieee80211_is_deauth(hdr->frame_control) &&
736 !ieee80211_is_disassoc(hdr->frame_control) &&
737 !ieee80211_is_action(hdr->frame_control)))
738 sta = NULL;
739
740 if (sta) {
b2492501
AN
741 if (iwl_mvm_defer_tx(mvm, sta, skb))
742 return;
3e56eadf 743 if (iwl_mvm_tx_skb(mvm, skb, sta))
8ca151b5
JB
744 goto drop;
745 return;
746 }
747
748 if (iwl_mvm_tx_skb_non_sta(mvm, skb))
749 goto drop;
750 return;
751 drop:
752 ieee80211_free_txskb(hw, skb);
753}
754
205e2210
EG
755static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
756{
757 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
758 return false;
759 return true;
760}
761
762static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
763{
764 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
765 return false;
766 if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
767 return true;
768
769 /* enabled by default */
770 return true;
771}
772
4203263d
EG
773#define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \
774 do { \
775 if (!(le16_to_cpu(_tid_bm) & BIT(_tid))) \
776 break; \
777 iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt); \
778 } while (0)
779
780static void
781iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
782 struct ieee80211_sta *sta, u16 tid, u16 rx_ba_ssn,
783 enum ieee80211_ampdu_mlme_action action)
784{
785 struct iwl_fw_dbg_trigger_tlv *trig;
786 struct iwl_fw_dbg_trigger_ba *ba_trig;
787
788 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
789 return;
790
791 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
792 ba_trig = (void *)trig->data;
793
794 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
795 return;
796
797 switch (action) {
798 case IEEE80211_AMPDU_TX_OPERATIONAL: {
799 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
800 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
801
802 CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_start, tid,
803 "TX AGG START: MAC %pM tid %d ssn %d\n",
804 sta->addr, tid, tid_data->ssn);
805 break;
806 }
807 case IEEE80211_AMPDU_TX_STOP_CONT:
808 CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_stop, tid,
809 "TX AGG STOP: MAC %pM tid %d\n",
810 sta->addr, tid);
811 break;
812 case IEEE80211_AMPDU_RX_START:
813 CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_start, tid,
814 "RX AGG START: MAC %pM tid %d ssn %d\n",
815 sta->addr, tid, rx_ba_ssn);
816 break;
817 case IEEE80211_AMPDU_RX_STOP:
818 CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_stop, tid,
819 "RX AGG STOP: MAC %pM tid %d\n",
820 sta->addr, tid);
821 break;
822 default:
823 break;
824 }
825}
826
8ca151b5
JB
827static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
828 struct ieee80211_vif *vif,
829 enum ieee80211_ampdu_mlme_action action,
830 struct ieee80211_sta *sta, u16 tid,
e3abc8ff 831 u16 *ssn, u8 buf_size, bool amsdu)
8ca151b5
JB
832{
833 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
834 int ret;
b2492501 835 bool tx_agg_ref = false;
8ca151b5
JB
836
837 IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n",
838 sta->addr, tid, action);
839
840 if (!(mvm->nvm_data->sku_cap_11n_enable))
841 return -EACCES;
842
b2492501 843 /* return from D0i3 before starting a new Tx aggregation */
9256c205
EP
844 switch (action) {
845 case IEEE80211_AMPDU_TX_START:
846 case IEEE80211_AMPDU_TX_STOP_CONT:
847 case IEEE80211_AMPDU_TX_STOP_FLUSH:
848 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
849 case IEEE80211_AMPDU_TX_OPERATIONAL:
b2492501 850 /*
9256c205
EP
851 * for tx start, wait synchronously until D0i3 exit to
852 * get the correct sequence number for the tid.
853 * additionally, some other ampdu actions use direct
854 * target access, which is not handled automatically
855 * by the trans layer (unlike commands), so wait for
856 * d0i3 exit in these cases as well.
b2492501 857 */
d40fc489
GG
858 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_TX_AGG);
859 if (ret)
860 return ret;
861
862 tx_agg_ref = true;
9256c205
EP
863 break;
864 default:
865 break;
b2492501
AN
866 }
867
8ca151b5
JB
868 mutex_lock(&mvm->mutex);
869
870 switch (action) {
871 case IEEE80211_AMPDU_RX_START:
205e2210 872 if (!iwl_enable_rx_ampdu(mvm->cfg)) {
8ca151b5
JB
873 ret = -EINVAL;
874 break;
875 }
876 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true);
877 break;
878 case IEEE80211_AMPDU_RX_STOP:
879 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false);
880 break;
881 case IEEE80211_AMPDU_TX_START:
205e2210 882 if (!iwl_enable_tx_ampdu(mvm->cfg)) {
5d158efa
EG
883 ret = -EINVAL;
884 break;
885 }
8ca151b5
JB
886 ret = iwl_mvm_sta_tx_agg_start(mvm, vif, sta, tid, ssn);
887 break;
888 case IEEE80211_AMPDU_TX_STOP_CONT:
e3d9e7ce
EG
889 ret = iwl_mvm_sta_tx_agg_stop(mvm, vif, sta, tid);
890 break;
8ca151b5
JB
891 case IEEE80211_AMPDU_TX_STOP_FLUSH:
892 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
e3d9e7ce 893 ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid);
8ca151b5
JB
894 break;
895 case IEEE80211_AMPDU_TX_OPERATIONAL:
896 ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid, buf_size);
897 break;
898 default:
899 WARN_ON_ONCE(1);
900 ret = -EINVAL;
901 break;
902 }
4203263d
EG
903
904 if (!ret) {
905 u16 rx_ba_ssn = 0;
906
907 if (action == IEEE80211_AMPDU_RX_START)
908 rx_ba_ssn = *ssn;
909
910 iwl_mvm_ampdu_check_trigger(mvm, vif, sta, tid,
911 rx_ba_ssn, action);
912 }
8ca151b5
JB
913 mutex_unlock(&mvm->mutex);
914
b2492501
AN
915 /*
916 * If the tid is marked as started, we won't use it for offloaded
917 * traffic on the next D0i3 entry. It's safe to unref.
918 */
919 if (tx_agg_ref)
920 iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG);
921
8ca151b5
JB
922 return ret;
923}
924
925static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
926 struct ieee80211_vif *vif)
927{
928 struct iwl_mvm *mvm = data;
929 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
930
931 mvmvif->uploaded = false;
932 mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
933
8ca151b5
JB
934 spin_lock_bh(&mvm->time_event_lock);
935 iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data);
936 spin_unlock_bh(&mvm->time_event_lock);
937
fe0f2de3 938 mvmvif->phy_ctxt = NULL;
8a275bad 939 memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data));
8ca151b5
JB
940}
941
aadede6e
JB
942static ssize_t iwl_mvm_read_coredump(char *buffer, loff_t offset, size_t count,
943 const void *data, size_t datalen)
944{
945 const struct iwl_mvm_dump_ptrs *dump_ptrs = data;
946 ssize_t bytes_read;
947 ssize_t bytes_read_trans;
948
949 if (offset < dump_ptrs->op_mode_len) {
950 bytes_read = min_t(ssize_t, count,
951 dump_ptrs->op_mode_len - offset);
952 memcpy(buffer, (u8 *)dump_ptrs->op_mode_ptr + offset,
953 bytes_read);
954 offset += bytes_read;
955 count -= bytes_read;
956
957 if (count == 0)
958 return bytes_read;
959 } else {
960 bytes_read = 0;
961 }
962
963 if (!dump_ptrs->trans_ptr)
964 return bytes_read;
965
966 offset -= dump_ptrs->op_mode_len;
967 bytes_read_trans = min_t(ssize_t, count,
968 dump_ptrs->trans_ptr->len - offset);
969 memcpy(buffer + bytes_read,
970 (u8 *)dump_ptrs->trans_ptr->data + offset,
971 bytes_read_trans);
972
973 return bytes_read + bytes_read_trans;
974}
975
976static void iwl_mvm_free_coredump(const void *data)
977{
978 const struct iwl_mvm_dump_ptrs *fw_error_dump = data;
979
980 vfree(fw_error_dump->op_mode_ptr);
981 vfree(fw_error_dump->trans_ptr);
982 kfree(fw_error_dump);
983}
984
04fd2c28
LK
985static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
986 struct iwl_fw_error_dump_data **dump_data)
987{
988 struct iwl_fw_error_dump_fifo *fifo_hdr;
989 u32 *fifo_data;
990 u32 fifo_len;
991 unsigned long flags;
992 int i, j;
993
994 if (!iwl_trans_grab_nic_access(mvm->trans, false, &flags))
995 return;
996
997 /* Pull RXF data from all RXFs */
998 for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++) {
999 /*
1000 * Keep aside the additional offset that might be needed for
1001 * next RXF
1002 */
1003 u32 offset_diff = RXF_DIFF_FROM_PREV * i;
1004
1005 fifo_hdr = (void *)(*dump_data)->data;
1006 fifo_data = (void *)fifo_hdr->data;
1007 fifo_len = mvm->shared_mem_cfg.rxfifo_size[i];
1008
1009 /* No need to try to read the data if the length is 0 */
1010 if (fifo_len == 0)
1011 continue;
1012
1013 /* Add a TLV for the RXF */
1014 (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
1015 (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
1016
1017 fifo_hdr->fifo_num = cpu_to_le32(i);
1018 fifo_hdr->available_bytes =
1019 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1020 RXF_RD_D_SPACE +
1021 offset_diff));
1022 fifo_hdr->wr_ptr =
1023 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1024 RXF_RD_WR_PTR +
1025 offset_diff));
1026 fifo_hdr->rd_ptr =
1027 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1028 RXF_RD_RD_PTR +
1029 offset_diff));
1030 fifo_hdr->fence_ptr =
1031 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1032 RXF_RD_FENCE_PTR +
1033 offset_diff));
1034 fifo_hdr->fence_mode =
1035 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1036 RXF_SET_FENCE_MODE +
1037 offset_diff));
1038
1039 /* Lock fence */
1040 iwl_trans_write_prph(mvm->trans,
1041 RXF_SET_FENCE_MODE + offset_diff, 0x1);
1042 /* Set fence pointer to the same place like WR pointer */
1043 iwl_trans_write_prph(mvm->trans,
1044 RXF_LD_WR2FENCE + offset_diff, 0x1);
1045 /* Set fence offset */
1046 iwl_trans_write_prph(mvm->trans,
1047 RXF_LD_FENCE_OFFSET_ADDR + offset_diff,
1048 0x0);
1049
1050 /* Read FIFO */
1051 fifo_len /= sizeof(u32); /* Size in DWORDS */
1052 for (j = 0; j < fifo_len; j++)
1053 fifo_data[j] = iwl_trans_read_prph(mvm->trans,
1054 RXF_FIFO_RD_FENCE_INC +
1055 offset_diff);
1056 *dump_data = iwl_fw_error_next_data(*dump_data);
1057 }
1058
1059 /* Pull TXF data from all TXFs */
1060 for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++) {
1061 /* Mark the number of TXF we're pulling now */
1062 iwl_trans_write_prph(mvm->trans, TXF_LARC_NUM, i);
1063
1064 fifo_hdr = (void *)(*dump_data)->data;
1065 fifo_data = (void *)fifo_hdr->data;
1066 fifo_len = mvm->shared_mem_cfg.txfifo_size[i];
1067
1068 /* No need to try to read the data if the length is 0 */
1069 if (fifo_len == 0)
1070 continue;
1071
1072 /* Add a TLV for the FIFO */
1073 (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF);
1074 (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
1075
1076 fifo_hdr->fifo_num = cpu_to_le32(i);
1077 fifo_hdr->available_bytes =
1078 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1079 TXF_FIFO_ITEM_CNT));
1080 fifo_hdr->wr_ptr =
1081 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1082 TXF_WR_PTR));
1083 fifo_hdr->rd_ptr =
1084 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1085 TXF_RD_PTR));
1086 fifo_hdr->fence_ptr =
1087 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1088 TXF_FENCE_PTR));
1089 fifo_hdr->fence_mode =
1090 cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1091 TXF_LOCK_FENCE));
1092
1093 /* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */
1094 iwl_trans_write_prph(mvm->trans, TXF_READ_MODIFY_ADDR,
1095 TXF_WR_PTR);
1096
1097 /* Dummy-read to advance the read pointer to the head */
1098 iwl_trans_read_prph(mvm->trans, TXF_READ_MODIFY_DATA);
1099
1100 /* Read FIFO */
1101 fifo_len /= sizeof(u32); /* Size in DWORDS */
1102 for (j = 0; j < fifo_len; j++)
1103 fifo_data[j] = iwl_trans_read_prph(mvm->trans,
1104 TXF_READ_MODIFY_DATA);
1105 *dump_data = iwl_fw_error_next_data(*dump_data);
1106 }
1107
1108 iwl_trans_release_nic_access(mvm->trans, &flags);
1109}
1110
b6eaa45a
EG
1111void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm)
1112{
1113 if (mvm->fw_dump_desc == &iwl_mvm_dump_desc_assert ||
1114 !mvm->fw_dump_desc)
1115 return;
1116
1117 kfree(mvm->fw_dump_desc);
1118 mvm->fw_dump_desc = NULL;
1119}
1120
e539761d
LK
1121#define IWL8260_ICCM_OFFSET 0x44000 /* Only for B-step */
1122#define IWL8260_ICCM_LEN 0xC000 /* Only for B-step */
1123
4bfa47f3 1124void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
655e6d6d
EG
1125{
1126 struct iwl_fw_error_dump_file *dump_file;
1127 struct iwl_fw_error_dump_data *dump_data;
1128 struct iwl_fw_error_dump_info *dump_info;
a549b296 1129 struct iwl_fw_error_dump_mem *dump_mem;
b6eaa45a 1130 struct iwl_fw_error_dump_trigger_desc *dump_trig;
48eb7b34 1131 struct iwl_mvm_dump_ptrs *fw_error_dump;
655e6d6d 1132 u32 sram_len, sram_ofs;
04fd2c28 1133 u32 file_len, fifo_data_len = 0;
addfaada 1134 u32 smem_len = mvm->cfg->smem_len;
86138324 1135 u32 sram2_len = mvm->cfg->dccm2_len;
36fb9017 1136 bool monitor_dump_only = false;
655e6d6d
EG
1137
1138 lockdep_assert_held(&mvm->mutex);
1139
053225de
EP
1140 /* there's no point in fw dump if the bus is dead */
1141 if (test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) {
1142 IWL_ERR(mvm, "Skip fw error dump since bus is dead\n");
1143 return;
1144 }
1145
36fb9017
OG
1146 if (mvm->fw_dump_trig &&
1147 mvm->fw_dump_trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)
1148 monitor_dump_only = true;
1149
aadede6e 1150 fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
48eb7b34
EG
1151 if (!fw_error_dump)
1152 return;
1153
f53bf4c7
LK
1154 /* SRAM - include stack CCM if driver knows the values for it */
1155 if (!mvm->cfg->dccm_offset || !mvm->cfg->dccm_len) {
1156 const struct fw_img *img;
1157
1158 img = &mvm->fw->img[mvm->cur_ucode];
1159 sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
1160 sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
1161 } else {
1162 sram_ofs = mvm->cfg->dccm_offset;
1163 sram_len = mvm->cfg->dccm_len;
1164 }
655e6d6d 1165
04fd2c28
LK
1166 /* reading RXF/TXF sizes */
1167 if (test_bit(STATUS_FW_ERROR, &mvm->trans->status)) {
1168 struct iwl_mvm_shared_mem_cfg *mem_cfg = &mvm->shared_mem_cfg;
1169 int i;
1170
1171 fifo_data_len = 0;
1172
1173 /* Count RXF size */
1174 for (i = 0; i < ARRAY_SIZE(mem_cfg->rxfifo_size); i++) {
1175 if (!mem_cfg->rxfifo_size[i])
1176 continue;
1177
1178 /* Add header info */
1179 fifo_data_len += mem_cfg->rxfifo_size[i] +
1180 sizeof(*dump_data) +
1181 sizeof(struct iwl_fw_error_dump_fifo);
1182 }
1183
1184 for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++) {
1185 if (!mem_cfg->txfifo_size[i])
1186 continue;
655e6d6d 1187
04fd2c28
LK
1188 /* Add header info */
1189 fifo_data_len += mem_cfg->txfifo_size[i] +
1190 sizeof(*dump_data) +
1191 sizeof(struct iwl_fw_error_dump_fifo);
1192 }
1193 }
655e6d6d
EG
1194
1195 file_len = sizeof(*dump_file) +
04fd2c28 1196 sizeof(*dump_data) * 2 +
a549b296 1197 sram_len + sizeof(*dump_mem) +
04fd2c28 1198 fifo_data_len +
655e6d6d
EG
1199 sizeof(*dump_info);
1200
36fb9017
OG
1201 /* Make room for the SMEM, if it exists */
1202 if (smem_len)
1203 file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len;
1204
1205 /* Make room for the secondary SRAM, if it exists */
1206 if (sram2_len)
1207 file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
1208
f0afea54
MG
1209 /* Make room for fw's virtual image pages, if it exists */
1210 if (mvm->fw->img[mvm->cur_ucode].paging_mem_size)
1211 file_len += mvm->num_of_paging_blk *
1212 (sizeof(*dump_data) +
1213 sizeof(struct iwl_fw_error_dump_paging) +
1214 PAGING_BLOCK_SIZE);
1215
36fb9017
OG
1216 /* If we only want a monitor dump, reset the file length */
1217 if (monitor_dump_only) {
1218 file_len = sizeof(*dump_file) + sizeof(*dump_data) +
1219 sizeof(*dump_info);
1220 }
1221
e539761d
LK
1222 /*
1223 * In 8000 HW family B-step include the ICCM (which resides separately)
1224 */
1225 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
1226 CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP)
1227 file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
1228 IWL8260_ICCM_LEN;
1229
b6eaa45a
EG
1230 if (mvm->fw_dump_desc)
1231 file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
1232 mvm->fw_dump_desc->len;
1233
5bfe6f53 1234 dump_file = vzalloc(file_len);
48eb7b34
EG
1235 if (!dump_file) {
1236 kfree(fw_error_dump);
b6eaa45a 1237 iwl_mvm_free_fw_dump_desc(mvm);
655e6d6d 1238 return;
48eb7b34 1239 }
655e6d6d 1240
48eb7b34 1241 fw_error_dump->op_mode_ptr = dump_file;
655e6d6d
EG
1242
1243 dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
655e6d6d
EG
1244 dump_data = (void *)dump_file->data;
1245
1246 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO);
1247 dump_data->len = cpu_to_le32(sizeof(*dump_info));
1248 dump_info = (void *) dump_data->data;
1249 dump_info->device_family =
1250 mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000 ?
1251 cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) :
1252 cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8);
435da2ce 1253 dump_info->hw_step = cpu_to_le32(CSR_HW_REV_STEP(mvm->trans->hw_rev));
655e6d6d
EG
1254 memcpy(dump_info->fw_human_readable, mvm->fw->human_readable,
1255 sizeof(dump_info->fw_human_readable));
1256 strncpy(dump_info->dev_human_readable, mvm->cfg->name,
1257 sizeof(dump_info->dev_human_readable));
1258 strncpy(dump_info->bus_human_readable, mvm->dev->bus->name,
1259 sizeof(dump_info->bus_human_readable));
1260
1261 dump_data = iwl_fw_error_next_data(dump_data);
04fd2c28
LK
1262 /* We only dump the FIFOs if the FW is in error state */
1263 if (test_bit(STATUS_FW_ERROR, &mvm->trans->status))
1264 iwl_mvm_dump_fifos(mvm, &dump_data);
655e6d6d 1265
b6eaa45a
EG
1266 if (mvm->fw_dump_desc) {
1267 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
1268 dump_data->len = cpu_to_le32(sizeof(*dump_trig) +
1269 mvm->fw_dump_desc->len);
1270 dump_trig = (void *)dump_data->data;
1271 memcpy(dump_trig, &mvm->fw_dump_desc->trig_desc,
1272 sizeof(*dump_trig) + mvm->fw_dump_desc->len);
1273
1274 /* now we can free this copy */
1275 iwl_mvm_free_fw_dump_desc(mvm);
1276 dump_data = iwl_fw_error_next_data(dump_data);
1277 }
1278
36fb9017
OG
1279 /* In case we only want monitor dump, skip to dump trasport data */
1280 if (monitor_dump_only)
1281 goto dump_trans_data;
1282
a549b296
EG
1283 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
1284 dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
1285 dump_mem = (void *)dump_data->data;
1286 dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
1287 dump_mem->offset = cpu_to_le32(sram_ofs);
1288 iwl_trans_read_mem_bytes(mvm->trans, sram_ofs, dump_mem->data,
655e6d6d
EG
1289 sram_len);
1290
addfaada
LK
1291 if (smem_len) {
1292 dump_data = iwl_fw_error_next_data(dump_data);
e06d8437
EG
1293 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
1294 dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem));
1295 dump_mem = (void *)dump_data->data;
1296 dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SMEM);
1297 dump_mem->offset = cpu_to_le32(mvm->cfg->smem_offset);
addfaada 1298 iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->smem_offset,
e06d8437 1299 dump_mem->data, smem_len);
addfaada
LK
1300 }
1301
86138324
IY
1302 if (sram2_len) {
1303 dump_data = iwl_fw_error_next_data(dump_data);
1304 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
1305 dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem));
1306 dump_mem = (void *)dump_data->data;
1307 dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
1308 dump_mem->offset = cpu_to_le32(mvm->cfg->dccm2_offset);
1309 iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->dccm2_offset,
1310 dump_mem->data, sram2_len);
1311 }
1312
e539761d
LK
1313 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
1314 CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP) {
1315 dump_data = iwl_fw_error_next_data(dump_data);
1316 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
1317 dump_data->len = cpu_to_le32(IWL8260_ICCM_LEN +
1318 sizeof(*dump_mem));
1319 dump_mem = (void *)dump_data->data;
1320 dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
1321 dump_mem->offset = cpu_to_le32(IWL8260_ICCM_OFFSET);
1322 iwl_trans_read_mem_bytes(mvm->trans, IWL8260_ICCM_OFFSET,
1323 dump_mem->data, IWL8260_ICCM_LEN);
1324 }
1325
f0afea54
MG
1326 /* Dump fw's virtual image */
1327 if (mvm->fw->img[mvm->cur_ucode].paging_mem_size) {
1328 u32 i;
1329
1330 for (i = 1; i < mvm->num_of_paging_blk + 1; i++) {
1331 struct iwl_fw_error_dump_paging *paging;
1332 struct page *pages =
1333 mvm->fw_paging_db[i].fw_paging_block;
1334
1335 dump_data = iwl_fw_error_next_data(dump_data);
1336 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
1337 dump_data->len = cpu_to_le32(sizeof(*paging) +
1338 PAGING_BLOCK_SIZE);
1339 paging = (void *)dump_data->data;
1340 paging->index = cpu_to_le32(i);
1341 memcpy(paging->data, page_address(pages),
1342 PAGING_BLOCK_SIZE);
1343 }
1344 }
1345
36fb9017
OG
1346dump_trans_data:
1347 fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans,
1348 mvm->fw_dump_trig);
48eb7b34
EG
1349 fw_error_dump->op_mode_len = file_len;
1350 if (fw_error_dump->trans_ptr)
1351 file_len += fw_error_dump->trans_ptr->len;
1352 dump_file->file_len = cpu_to_le32(file_len);
4bfa47f3 1353
aadede6e
JB
1354 dev_coredumpm(mvm->trans->dev, THIS_MODULE, fw_error_dump, 0,
1355 GFP_KERNEL, iwl_mvm_read_coredump, iwl_mvm_free_coredump);
d2709ad7 1356
36fb9017 1357 mvm->fw_dump_trig = NULL;
d2709ad7 1358 clear_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status);
655e6d6d 1359}
655e6d6d 1360
b6eaa45a
EG
1361struct iwl_mvm_dump_desc iwl_mvm_dump_desc_assert = {
1362 .trig_desc = {
1363 .type = cpu_to_le32(FW_DBG_TRIGGER_FW_ASSERT),
1364 },
1365};
1366
8ca151b5
JB
1367static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
1368{
58629d9d
JB
1369 /* clear the D3 reconfig, we only need it to avoid dumping a
1370 * firmware coredump on reconfiguration, we shouldn't do that
1371 * on D3->D0 transition
1372 */
b6eaa45a
EG
1373 if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status)) {
1374 mvm->fw_dump_desc = &iwl_mvm_dump_desc_assert;
58629d9d 1375 iwl_mvm_fw_error_dump(mvm);
b6eaa45a 1376 }
1bd3cbc1 1377
744cb695
EP
1378 /* cleanup all stale references (scan, roc), but keep the
1379 * ucode_down ref until reconfig is complete
1380 */
1381 iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN);
1382
8ca151b5 1383 iwl_trans_stop_device(mvm->trans);
8ca151b5 1384
9af91f46 1385 mvm->scan_status = 0;
b1873300 1386 mvm->ps_disabled = false;
31b8b343 1387 mvm->calibrating = false;
8ca151b5
JB
1388
1389 /* just in case one was running */
1390 ieee80211_remain_on_channel_expired(mvm->hw);
1391
737719fe
AN
1392 /*
1393 * cleanup all interfaces, even inactive ones, as some might have
1394 * gone down during the HW restart
1395 */
1396 ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm);
8ca151b5 1397
fe0f2de3 1398 mvm->p2p_device_vif = NULL;
37577fe2 1399 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
fe0f2de3
IP
1400
1401 iwl_mvm_reset_phy_ctxts(mvm);
8ca151b5 1402 memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained));
a0f6bf2a 1403 memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained));
8a275bad
EG
1404 memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
1405 memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old));
1406 memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
1407 memset(&mvm->last_bt_ci_cmd_old, 0, sizeof(mvm->last_bt_ci_cmd_old));
1408 memset(&mvm->bt_ack_kill_msk, 0, sizeof(mvm->bt_ack_kill_msk));
1409 memset(&mvm->bt_cts_kill_msk, 0, sizeof(mvm->bt_cts_kill_msk));
8ca151b5
JB
1410
1411 ieee80211_wake_queues(mvm->hw);
1412
228670b2
EP
1413 /* clear any stale d0i3 state */
1414 clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
1415
8ca151b5 1416 mvm->vif_count = 0;
113a0447 1417 mvm->rx_ba_sessions = 0;
d2709ad7 1418 mvm->fw_dbg_conf = FW_DBG_INVALID;
91a8bcde
JB
1419
1420 /* keep statistics ticking */
1421 iwl_mvm_accu_radio_stats(mvm);
8ca151b5
JB
1422}
1423
a0a09243 1424int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
8ca151b5 1425{
8ca151b5
JB
1426 int ret;
1427
a0a09243 1428 lockdep_assert_held(&mvm->mutex);
8ca151b5
JB
1429
1430 /* Clean up some internal and mac80211 state on restart */
1431 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1432 iwl_mvm_restart_cleanup(mvm);
1433
1434 ret = iwl_mvm_up(mvm);
c47af22a
JB
1435
1436 if (ret && test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1437 /* Something went wrong - we need to finish some cleanup
1438 * that normally iwl_mvm_mac_restart_complete() below
1439 * would do.
1440 */
1441 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1442 iwl_mvm_d0i3_enable_tx(mvm, NULL);
1443 }
1444
a0a09243
LC
1445 return ret;
1446}
1447
1448static int iwl_mvm_mac_start(struct ieee80211_hw *hw)
1449{
1450 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1451 int ret;
1452
37948fcf
EP
1453 /* Some hw restart cleanups must not hold the mutex */
1454 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1455 /*
1456 * Make sure we are out of d0i3. This is needed
1457 * to make sure the reference accounting is correct
1458 * (and there is no stale d0i3_exit_work).
1459 */
1460 wait_event_timeout(mvm->d0i3_exit_waitq,
1461 !test_bit(IWL_MVM_STATUS_IN_D0I3,
1462 &mvm->status),
1463 HZ);
1464 }
1465
a0a09243
LC
1466 mutex_lock(&mvm->mutex);
1467 ret = __iwl_mvm_mac_start(mvm);
8ca151b5
JB
1468 mutex_unlock(&mvm->mutex);
1469
1470 return ret;
1471}
1472
cf2c92d8 1473static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
8ca151b5 1474{
8ca151b5
JB
1475 int ret;
1476
1477 mutex_lock(&mvm->mutex);
1478
1479 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
b2492501 1480 iwl_mvm_d0i3_enable_tx(mvm, NULL);
e7afe89f 1481 ret = iwl_mvm_update_quotas(mvm, true, NULL);
8ca151b5
JB
1482 if (ret)
1483 IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n",
1484 ret);
1485
7498cf4c
EP
1486 /* allow transport/FW low power modes */
1487 iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
1488
cbd2ae2d
AN
1489 /*
1490 * If we have TDLS peers, remove them. We don't know the last seqno/PN
1491 * of packets the FW sent out, so we must reconnect.
1492 */
1493 iwl_mvm_teardown_tdls_peers(mvm);
1494
8ca151b5
JB
1495 mutex_unlock(&mvm->mutex);
1496}
1497
088070a2
EP
1498static void iwl_mvm_resume_complete(struct iwl_mvm *mvm)
1499{
088070a2
EP
1500 if (!iwl_mvm_is_d0i3_supported(mvm))
1501 return;
1502
6735943f
EP
1503 if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND)
1504 if (!wait_event_timeout(mvm->d0i3_exit_waitq,
1505 !test_bit(IWL_MVM_STATUS_IN_D0I3,
1506 &mvm->status),
1507 HZ))
1508 WARN_ONCE(1, "D0i3 exit on resume timed out\n");
088070a2
EP
1509}
1510
cf2c92d8
EP
1511static void
1512iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw,
1513 enum ieee80211_reconfig_type reconfig_type)
1514{
1515 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1516
1517 switch (reconfig_type) {
1518 case IEEE80211_RECONFIG_TYPE_RESTART:
1519 iwl_mvm_restart_complete(mvm);
1520 break;
1521 case IEEE80211_RECONFIG_TYPE_SUSPEND:
088070a2 1522 iwl_mvm_resume_complete(mvm);
cf2c92d8
EP
1523 break;
1524 }
1525}
1526
a0a09243 1527void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
8ca151b5 1528{
a0a09243 1529 lockdep_assert_held(&mvm->mutex);
7498cf4c 1530
91a8bcde
JB
1531 /* firmware counters are obviously reset now, but we shouldn't
1532 * partially track so also clear the fw_reset_accu counters.
1533 */
1534 memset(&mvm->accu_radio_stats, 0, sizeof(mvm->accu_radio_stats));
1535
0a79a0c0
EP
1536 /*
1537 * Disallow low power states when the FW is down by taking
1538 * the UCODE_DOWN ref. in case of ongoing hw restart the
1539 * ref is already taken, so don't take it again.
1540 */
1541 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1542 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
7498cf4c 1543
8ca151b5
JB
1544 /* async_handlers_wk is now blocked */
1545
1546 /*
1547 * The work item could be running or queued if the
1548 * ROC time event stops just as we get here.
1549 */
c779273b 1550 flush_work(&mvm->roc_done_wk);
8ca151b5
JB
1551
1552 iwl_trans_stop_device(mvm->trans);
8ca151b5
JB
1553
1554 iwl_mvm_async_handlers_purge(mvm);
1555 /* async_handlers_list is empty and will stay empty: HW is stopped */
1556
1557 /* the fw is stopped, the aux sta is dead: clean up driver state */
712b24ad 1558 iwl_mvm_del_aux_sta(mvm);
8ca151b5 1559
0a79a0c0
EP
1560 /*
1561 * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
1562 * won't be called in this case).
8b2b9fbf
AN
1563 * But make sure to cleanup interfaces that have gone down before/during
1564 * HW restart was requested.
0a79a0c0 1565 */
8b2b9fbf
AN
1566 if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1567 ieee80211_iterate_interfaces(mvm->hw, 0,
1568 iwl_mvm_cleanup_iterator, mvm);
0a79a0c0 1569
963221be
AB
1570 /* We shouldn't have any UIDs still set. Loop over all the UIDs to
1571 * make sure there's nothing left there and warn if any is found.
1572 */
859d914c 1573 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
963221be
AB
1574 int i;
1575
507e4cda 1576 for (i = 0; i < mvm->max_scans; i++) {
6185af2a
LC
1577 if (WARN_ONCE(mvm->scan_uid_status[i],
1578 "UMAC scan UID %d status was not cleaned\n",
1579 i))
1580 mvm->scan_uid_status[i] = 0;
963221be
AB
1581 }
1582 }
1583
bc44886d 1584 mvm->ucode_loaded = false;
a0a09243 1585}
bc44886d 1586
a0a09243
LC
1587static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
1588{
1589 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1590
1591 flush_work(&mvm->d0i3_exit_work);
1592 flush_work(&mvm->async_handlers_wk);
d2709ad7 1593 cancel_delayed_work_sync(&mvm->fw_dump_wk);
b6eaa45a 1594 iwl_mvm_free_fw_dump_desc(mvm);
a0a09243
LC
1595
1596 mutex_lock(&mvm->mutex);
1597 __iwl_mvm_mac_stop(mvm);
8ca151b5
JB
1598 mutex_unlock(&mvm->mutex);
1599
1600 /*
1601 * The worker might have been waiting for the mutex, let it run and
1602 * discover that its list is now empty.
1603 */
1604 cancel_work_sync(&mvm->async_handlers_wk);
1605}
1606
fe0f2de3
IP
1607static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
1608{
1609 u16 i;
1610
1611 lockdep_assert_held(&mvm->mutex);
1612
1613 for (i = 0; i < NUM_PHY_CTX; i++)
1614 if (!mvm->phy_ctxts[i].ref)
1615 return &mvm->phy_ctxts[i];
1616
1617 IWL_ERR(mvm, "No available PHY context\n");
1618 return NULL;
1619}
1620
d44c3fe6
AA
1621static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1622 s16 tx_power)
1623{
1624 struct iwl_dev_tx_power_cmd cmd = {
da03f029
JB
1625 .v2.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
1626 .v2.mac_context_id =
d44c3fe6 1627 cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id),
da03f029 1628 .v2.pwr_restriction = cpu_to_le16(8 * tx_power),
d44c3fe6 1629 };
da03f029 1630 int len = sizeof(cmd);
d44c3fe6 1631
d44c3fe6 1632 if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
da03f029 1633 cmd.v2.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
d44c3fe6 1634
da03f029
JB
1635 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TX_POWER_CHAIN))
1636 len = sizeof(cmd.v2);
1637
1638 return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
d44c3fe6
AA
1639}
1640
8ca151b5
JB
1641static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
1642 struct ieee80211_vif *vif)
1643{
1644 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1645 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1646 int ret;
1647
aa5e1832
EG
1648 mvmvif->mvm = mvm;
1649
d40fc489
GG
1650 /*
1651 * make sure D0i3 exit is completed, otherwise a target access
1652 * during tx queue configuration could be done when still in
1653 * D0i3 state.
1654 */
1655 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_ADD_IF);
1656 if (ret)
1657 return ret;
1658
8ca151b5
JB
1659 /*
1660 * Not much to do here. The stack will not allow interface
1661 * types or combinations that we didn't advertise, so we
1662 * don't really have to check the types.
1663 */
1664
1665 mutex_lock(&mvm->mutex);
1666
33cef925
JB
1667 /* make sure that beacon statistics don't go backwards with FW reset */
1668 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1669 mvmvif->beacon_stats.accu_num_beacons +=
1670 mvmvif->beacon_stats.num_beacons;
1671
e89044d7 1672 /* Allocate resources for the MAC context, and add it to the fw */
8ca151b5
JB
1673 ret = iwl_mvm_mac_ctxt_init(mvm, vif);
1674 if (ret)
1675 goto out_unlock;
1676
1c2abf72 1677 /* Counting number of interfaces is needed for legacy PM */
ea183d02
IP
1678 if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
1679 mvm->vif_count++;
ea183d02 1680
8ca151b5
JB
1681 /*
1682 * The AP binding flow can be done only after the beacon
1683 * template is configured (which happens only in the mac80211
1684 * start_ap() flow), and adding the broadcast station can happen
1685 * only after the binding.
1686 * In addition, since modifying the MAC before adding a bcast
1687 * station is not allowed by the FW, delay the adding of MAC context to
1688 * the point where we can also add the bcast station.
1689 * In short: there's not much we can do at this point, other than
1690 * allocating resources :)
1691 */
5023d966
JB
1692 if (vif->type == NL80211_IFTYPE_AP ||
1693 vif->type == NL80211_IFTYPE_ADHOC) {
013290aa 1694 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
8ca151b5
JB
1695 if (ret) {
1696 IWL_ERR(mvm, "Failed to allocate bcast sta\n");
1697 goto out_release;
1698 }
1699
77740cb4 1700 iwl_mvm_vif_dbgfs_register(mvm, vif);
8ca151b5
JB
1701 goto out_unlock;
1702 }
1703
93190fb0
AA
1704 mvmvif->features |= hw->netdev_features;
1705
8ca151b5
JB
1706 ret = iwl_mvm_mac_ctxt_add(mvm, vif);
1707 if (ret)
1708 goto out_release;
1709
999609f1 1710 ret = iwl_mvm_power_update_mac(mvm);
e5e7aa8e 1711 if (ret)
fd66fc1c 1712 goto out_remove_mac;
8ca151b5 1713
7df15b1e 1714 /* beacon filtering */
a1022927 1715 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
bd3351ba
EP
1716 if (ret)
1717 goto out_remove_mac;
1718
7df15b1e 1719 if (!mvm->bf_allowed_vif &&
73e5f2c5 1720 vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
7df15b1e 1721 mvm->bf_allowed_vif = mvmvif;
a20fd398
AO
1722 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
1723 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
7df15b1e
HG
1724 }
1725
8ca151b5
JB
1726 /*
1727 * P2P_DEVICE interface does not have a channel context assigned to it,
1728 * so a dedicated PHY context is allocated to it and the corresponding
1729 * MAC context is bound to it at this stage.
1730 */
1731 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
8ca151b5 1732
fe0f2de3
IP
1733 mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
1734 if (!mvmvif->phy_ctxt) {
1735 ret = -ENOSPC;
bd3351ba 1736 goto out_free_bf;
fe0f2de3 1737 }
8ca151b5 1738
53a9d61e 1739 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
8ca151b5
JB
1740 ret = iwl_mvm_binding_add_vif(mvm, vif);
1741 if (ret)
53a9d61e 1742 goto out_unref_phy;
8ca151b5 1743
013290aa 1744 ret = iwl_mvm_add_bcast_sta(mvm, vif);
8ca151b5
JB
1745 if (ret)
1746 goto out_unbind;
1747
1748 /* Save a pointer to p2p device vif, so it can later be used to
1749 * update the p2p device MAC when a GO is started/stopped */
1750 mvm->p2p_device_vif = vif;
1751 }
1752
63494374 1753 iwl_mvm_vif_dbgfs_register(mvm, vif);
8ca151b5
JB
1754 goto out_unlock;
1755
1756 out_unbind:
1757 iwl_mvm_binding_remove_vif(mvm, vif);
53a9d61e 1758 out_unref_phy:
fe0f2de3 1759 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
bd3351ba
EP
1760 out_free_bf:
1761 if (mvm->bf_allowed_vif == mvmvif) {
1762 mvm->bf_allowed_vif = NULL;
a20fd398
AO
1763 vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
1764 IEEE80211_VIF_SUPPORTS_CQM_RSSI);
bd3351ba 1765 }
8ca151b5
JB
1766 out_remove_mac:
1767 mvmvif->phy_ctxt = NULL;
1768 iwl_mvm_mac_ctxt_remove(mvm, vif);
1769 out_release:
5ee2b215
AB
1770 if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
1771 mvm->vif_count--;
1c2abf72 1772
8ca151b5
JB
1773 iwl_mvm_mac_ctxt_release(mvm, vif);
1774 out_unlock:
1775 mutex_unlock(&mvm->mutex);
1776
d40fc489
GG
1777 iwl_mvm_unref(mvm, IWL_MVM_REF_ADD_IF);
1778
8ca151b5
JB
1779 return ret;
1780}
1781
38a12b5b
JB
1782static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
1783 struct ieee80211_vif *vif)
8ca151b5 1784{
d92b732e 1785 u32 tfd_msk = iwl_mvm_mac_get_queues_mask(vif);
8ca151b5
JB
1786
1787 if (tfd_msk) {
fe92e32a
EG
1788 /*
1789 * mac80211 first removes all the stations of the vif and
1790 * then removes the vif. When it removes a station it also
1791 * flushes the AMPDU session. So by now, all the AMPDU sessions
1792 * of all the stations of this vif are closed, and the queues
1793 * of these AMPDU sessions are properly closed.
1794 * We still need to take care of the shared queues of the vif.
1795 * Flush them here.
1796 */
8ca151b5 1797 mutex_lock(&mvm->mutex);
5888a40c 1798 iwl_mvm_flush_tx_path(mvm, tfd_msk, 0);
8ca151b5 1799 mutex_unlock(&mvm->mutex);
fe92e32a
EG
1800
1801 /*
1802 * There are transports that buffer a few frames in the host.
1803 * For these, the flush above isn't enough since while we were
1804 * flushing, the transport might have sent more frames to the
1805 * device. To solve this, wait here until the transport is
1806 * empty. Technically, this could have replaced the flush
1807 * above, but flush is much faster than draining. So flush
1808 * first, and drain to make sure we have no frames in the
1809 * transport anymore.
1810 * If a station still had frames on the shared queues, it is
1811 * already marked as draining, so to complete the draining, we
1812 * just need to wait until the transport is empty.
1813 */
1814 iwl_trans_wait_tx_queue_empty(mvm->trans, tfd_msk);
8ca151b5
JB
1815 }
1816
1817 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1818 /*
1819 * Flush the ROC worker which will flush the OFFCHANNEL queue.
1820 * We assume here that all the packets sent to the OFFCHANNEL
1821 * queue are sent in ROC session.
1822 */
1823 flush_work(&mvm->roc_done_wk);
1824 } else {
1825 /*
1826 * By now, all the AC queues are empty. The AGG queues are
1827 * empty too. We already got all the Tx responses for all the
1828 * packets in the queues. The drain work can have been
0742a75a 1829 * triggered. Flush it.
8ca151b5
JB
1830 */
1831 flush_work(&mvm->sta_drained_wk);
1832 }
38a12b5b
JB
1833}
1834
1835static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
1836 struct ieee80211_vif *vif)
1837{
1838 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1839 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1840
1841 iwl_mvm_prepare_mac_removal(mvm, vif);
8ca151b5
JB
1842
1843 mutex_lock(&mvm->mutex);
1844
7df15b1e
HG
1845 if (mvm->bf_allowed_vif == mvmvif) {
1846 mvm->bf_allowed_vif = NULL;
a20fd398
AO
1847 vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
1848 IEEE80211_VIF_SUPPORTS_CQM_RSSI);
7df15b1e
HG
1849 }
1850
63494374
JB
1851 iwl_mvm_vif_dbgfs_clean(mvm, vif);
1852
8ca151b5
JB
1853 /*
1854 * For AP/GO interface, the tear down of the resources allocated to the
38a12b5b 1855 * interface is be handled as part of the stop_ap flow.
8ca151b5 1856 */
5023d966
JB
1857 if (vif->type == NL80211_IFTYPE_AP ||
1858 vif->type == NL80211_IFTYPE_ADHOC) {
507cadf2
DS
1859#ifdef CONFIG_NL80211_TESTMODE
1860 if (vif == mvm->noa_vif) {
1861 mvm->noa_vif = NULL;
1862 mvm->noa_duration = 0;
1863 }
1864#endif
013290aa 1865 iwl_mvm_dealloc_bcast_sta(mvm, vif);
8ca151b5
JB
1866 goto out_release;
1867 }
1868
1869 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1870 mvm->p2p_device_vif = NULL;
013290aa 1871 iwl_mvm_rm_bcast_sta(mvm, vif);
8ca151b5 1872 iwl_mvm_binding_remove_vif(mvm, vif);
fe0f2de3 1873 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
8ca151b5
JB
1874 mvmvif->phy_ctxt = NULL;
1875 }
1876
5ee2b215 1877 if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE)
8ca151b5 1878 mvm->vif_count--;
1c2abf72 1879
999609f1 1880 iwl_mvm_power_update_mac(mvm);
8ca151b5
JB
1881 iwl_mvm_mac_ctxt_remove(mvm, vif);
1882
1883out_release:
1884 iwl_mvm_mac_ctxt_release(mvm, vif);
1885 mutex_unlock(&mvm->mutex);
1886}
1887
1888static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed)
88f2fd73 1889{
8ca151b5
JB
1890 return 0;
1891}
1892
e59647ea
EP
1893struct iwl_mvm_mc_iter_data {
1894 struct iwl_mvm *mvm;
1895 int port_id;
1896};
1897
1898static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
1899 struct ieee80211_vif *vif)
1900{
1901 struct iwl_mvm_mc_iter_data *data = _data;
1902 struct iwl_mvm *mvm = data->mvm;
1903 struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd;
1904 int ret, len;
1905
1906 /* if we don't have free ports, mcast frames will be dropped */
1907 if (WARN_ON_ONCE(data->port_id >= MAX_PORT_ID_NUM))
1908 return;
1909
1910 if (vif->type != NL80211_IFTYPE_STATION ||
1911 !vif->bss_conf.assoc)
1912 return;
1913
1914 cmd->port_id = data->port_id++;
1915 memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
1916 len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
1917
1c4abec0 1918 ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd);
e59647ea
EP
1919 if (ret)
1920 IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
1921}
1922
1923static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm)
1924{
1925 struct iwl_mvm_mc_iter_data iter_data = {
1926 .mvm = mvm,
88f2fd73
MG
1927 };
1928
e59647ea
EP
1929 lockdep_assert_held(&mvm->mutex);
1930
1931 if (WARN_ON_ONCE(!mvm->mcast_filter_cmd))
1932 return;
1933
1c4abec0 1934 ieee80211_iterate_active_interfaces_atomic(
e59647ea
EP
1935 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1936 iwl_mvm_mc_iface_iterator, &iter_data);
88f2fd73
MG
1937}
1938
e59647ea
EP
1939static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw,
1940 struct netdev_hw_addr_list *mc_list)
8ca151b5 1941{
e59647ea
EP
1942 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1943 struct iwl_mcast_filter_cmd *cmd;
1944 struct netdev_hw_addr *addr;
f3bd58f4
MS
1945 int addr_count;
1946 bool pass_all;
e59647ea
EP
1947 int len;
1948
f3bd58f4
MS
1949 addr_count = netdev_hw_addr_list_count(mc_list);
1950 pass_all = addr_count > MAX_MCAST_FILTERING_ADDRESSES ||
1951 IWL_MVM_FW_MCAST_FILTER_PASS_ALL;
1952 if (pass_all)
e59647ea 1953 addr_count = 0;
e59647ea
EP
1954
1955 len = roundup(sizeof(*cmd) + addr_count * ETH_ALEN, 4);
1956 cmd = kzalloc(len, GFP_ATOMIC);
1957 if (!cmd)
1958 return 0;
1959
1960 if (pass_all) {
1961 cmd->pass_all = 1;
1962 return (u64)(unsigned long)cmd;
1963 }
1964
1965 netdev_hw_addr_list_for_each(addr, mc_list) {
1966 IWL_DEBUG_MAC80211(mvm, "mcast addr (%d): %pM\n",
1967 cmd->count, addr->addr);
1968 memcpy(&cmd->addr_list[cmd->count * ETH_ALEN],
1969 addr->addr, ETH_ALEN);
1970 cmd->count++;
1971 }
1972
1973 return (u64)(unsigned long)cmd;
8ca151b5
JB
1974}
1975
1976static void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
1977 unsigned int changed_flags,
1978 unsigned int *total_flags,
1979 u64 multicast)
1980{
e59647ea
EP
1981 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1982 struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast;
8ca151b5 1983
e59647ea 1984 mutex_lock(&mvm->mutex);
51b6b9e0 1985
e59647ea
EP
1986 /* replace previous configuration */
1987 kfree(mvm->mcast_filter_cmd);
1988 mvm->mcast_filter_cmd = cmd;
51b6b9e0 1989
e59647ea
EP
1990 if (!cmd)
1991 goto out;
51b6b9e0 1992
e59647ea
EP
1993 iwl_mvm_recalc_multicast(mvm);
1994out:
1995 mutex_unlock(&mvm->mutex);
1996 *total_flags = 0;
51b6b9e0
EG
1997}
1998
c87163b9
EP
1999#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
2000struct iwl_bcast_iter_data {
2001 struct iwl_mvm *mvm;
2002 struct iwl_bcast_filter_cmd *cmd;
2003 u8 current_filter;
2004};
2005
2006static void
2007iwl_mvm_set_bcast_filter(struct ieee80211_vif *vif,
2008 const struct iwl_fw_bcast_filter *in_filter,
2009 struct iwl_fw_bcast_filter *out_filter)
2010{
2011 struct iwl_fw_bcast_filter_attr *attr;
2012 int i;
2013
2014 memcpy(out_filter, in_filter, sizeof(*out_filter));
2015
2016 for (i = 0; i < ARRAY_SIZE(out_filter->attrs); i++) {
2017 attr = &out_filter->attrs[i];
2018
2019 if (!attr->mask)
2020 break;
2021
2ee8f021
EP
2022 switch (attr->reserved1) {
2023 case cpu_to_le16(BC_FILTER_MAGIC_IP):
2024 if (vif->bss_conf.arp_addr_cnt != 1) {
2025 attr->mask = 0;
2026 continue;
2027 }
2028
2029 attr->val = vif->bss_conf.arp_addr_list[0];
2030 break;
2031 case cpu_to_le16(BC_FILTER_MAGIC_MAC):
2032 attr->val = *(__be32 *)&vif->addr[2];
2033 break;
2034 default:
2035 break;
2036 }
2037 attr->reserved1 = 0;
c87163b9
EP
2038 out_filter->num_attrs++;
2039 }
2040}
2041
2042static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac,
2043 struct ieee80211_vif *vif)
2044{
2045 struct iwl_bcast_iter_data *data = _data;
2046 struct iwl_mvm *mvm = data->mvm;
2047 struct iwl_bcast_filter_cmd *cmd = data->cmd;
2048 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2049 struct iwl_fw_bcast_mac *bcast_mac;
2050 int i;
2051
2052 if (WARN_ON(mvmvif->id >= ARRAY_SIZE(cmd->macs)))
2053 return;
2054
2055 bcast_mac = &cmd->macs[mvmvif->id];
2056
e48393e8
IP
2057 /*
2058 * enable filtering only for associated stations, but not for P2P
2059 * Clients
2060 */
2061 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p ||
2062 !vif->bss_conf.assoc)
c87163b9
EP
2063 return;
2064
2065 bcast_mac->default_discard = 1;
2066
2067 /* copy all configured filters */
2068 for (i = 0; mvm->bcast_filters[i].attrs[0].mask; i++) {
2069 /*
2070 * Make sure we don't exceed our filters limit.
2071 * if there is still a valid filter to be configured,
2072 * be on the safe side and just allow bcast for this mac.
2073 */
2074 if (WARN_ON_ONCE(data->current_filter >=
2075 ARRAY_SIZE(cmd->filters))) {
2076 bcast_mac->default_discard = 0;
2077 bcast_mac->attached_filters = 0;
2078 break;
2079 }
2080
2081 iwl_mvm_set_bcast_filter(vif,
2082 &mvm->bcast_filters[i],
2083 &cmd->filters[data->current_filter]);
2084
2085 /* skip current filter if it contains no attributes */
2086 if (!cmd->filters[data->current_filter].num_attrs)
2087 continue;
2088
2089 /* attach the filter to current mac */
2090 bcast_mac->attached_filters |=
2091 cpu_to_le16(BIT(data->current_filter));
2092
2093 data->current_filter++;
2094 }
2095}
2096
de06a59e
EP
2097bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
2098 struct iwl_bcast_filter_cmd *cmd)
c87163b9 2099{
c87163b9
EP
2100 struct iwl_bcast_iter_data iter_data = {
2101 .mvm = mvm,
de06a59e 2102 .cmd = cmd,
c87163b9
EP
2103 };
2104
3b8983b1
MS
2105 if (IWL_MVM_FW_BCAST_FILTER_PASS_ALL)
2106 return false;
2107
de06a59e
EP
2108 memset(cmd, 0, sizeof(*cmd));
2109 cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters);
2110 cmd->max_macs = ARRAY_SIZE(cmd->macs);
2111
2112#ifdef CONFIG_IWLWIFI_DEBUGFS
2113 /* use debugfs filters/macs if override is configured */
2114 if (mvm->dbgfs_bcast_filtering.override) {
2115 memcpy(cmd->filters, &mvm->dbgfs_bcast_filtering.cmd.filters,
2116 sizeof(cmd->filters));
2117 memcpy(cmd->macs, &mvm->dbgfs_bcast_filtering.cmd.macs,
2118 sizeof(cmd->macs));
2119 return true;
2120 }
2121#endif
c87163b9
EP
2122
2123 /* if no filters are configured, do nothing */
2124 if (!mvm->bcast_filters)
de06a59e 2125 return false;
c87163b9
EP
2126
2127 /* configure and attach these filters for each associated sta vif */
2128 ieee80211_iterate_active_interfaces(
2129 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
2130 iwl_mvm_bcast_filter_iterator, &iter_data);
2131
de06a59e
EP
2132 return true;
2133}
2134static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm,
2135 struct ieee80211_vif *vif)
2136{
2137 struct iwl_bcast_filter_cmd cmd;
2138
2139 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
2140 return 0;
2141
2142 if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
2143 return 0;
2144
a1022927 2145 return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
c87163b9
EP
2146 sizeof(cmd), &cmd);
2147}
2148#else
2149static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm,
2150 struct ieee80211_vif *vif)
2151{
2152 return 0;
2153}
2154#endif
2155
8ca151b5
JB
2156static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
2157 struct ieee80211_vif *vif,
2158 struct ieee80211_bss_conf *bss_conf,
2159 u32 changes)
2160{
2161 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2162 int ret;
2163
6e97b0d2
IP
2164 /*
2165 * Re-calculate the tsf id, as the master-slave relations depend on the
2166 * beacon interval, which was not known when the station interface was
2167 * added.
2168 */
2169 if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc)
2170 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
2171
3dfd3a97
JB
2172 /*
2173 * If we're not associated yet, take the (new) BSSID before associating
2174 * so the firmware knows. If we're already associated, then use the old
2175 * BSSID here, and we'll send a cleared one later in the CHANGED_ASSOC
2176 * branch for disassociation below.
2177 */
2178 if (changes & BSS_CHANGED_BSSID && !mvmvif->associated)
2179 memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN);
2180
2181 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, mvmvif->bssid);
8ca151b5
JB
2182 if (ret)
2183 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
2184
3dfd3a97
JB
2185 /* after sending it once, adopt mac80211 data */
2186 memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN);
2187 mvmvif->associated = bss_conf->assoc;
2188
8ca151b5
JB
2189 if (changes & BSS_CHANGED_ASSOC) {
2190 if (bss_conf->assoc) {
33cef925
JB
2191 /* clear statistics to get clean beacon counter */
2192 iwl_mvm_request_statistics(mvm, true);
2193 memset(&mvmvif->beacon_stats, 0,
2194 sizeof(mvmvif->beacon_stats));
2195
8ca151b5 2196 /* add quota for this interface */
7754ae79 2197 ret = iwl_mvm_update_quotas(mvm, true, NULL);
8ca151b5
JB
2198 if (ret) {
2199 IWL_ERR(mvm, "failed to update quotas\n");
2200 return;
2201 }
016d27e1
JB
2202
2203 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
2204 &mvm->status)) {
2205 /*
2206 * If we're restarting then the firmware will
2207 * obviously have lost synchronisation with
2208 * the AP. It will attempt to synchronise by
2209 * itself, but we can make it more reliable by
2210 * scheduling a session protection time event.
2211 *
2212 * The firmware needs to receive a beacon to
2213 * catch up with synchronisation, use 110% of
2214 * the beacon interval.
2215 *
2216 * Set a large maximum delay to allow for more
2217 * than a single interface.
2218 */
2219 u32 dur = (11 * vif->bss_conf.beacon_int) / 10;
2220 iwl_mvm_protect_session(mvm, vif, dur, dur,
d20d37bc 2221 5 * dur, false);
016d27e1 2222 }
1f3b0ff8
LE
2223
2224 iwl_mvm_sf_update(mvm, vif, false);
175a70b7 2225 iwl_mvm_power_vif_assoc(mvm, vif);
697162a1 2226 if (vif->p2p) {
29a90a49 2227 iwl_mvm_ref(mvm, IWL_MVM_REF_P2P_CLIENT);
697162a1
EG
2228 iwl_mvm_update_smps(mvm, vif,
2229 IWL_MVM_SMPS_REQ_PROT,
2230 IEEE80211_SMPS_DYNAMIC);
2231 }
8ca151b5 2232 } else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
1f3b0ff8
LE
2233 /*
2234 * If update fails - SF might be running in associated
2235 * mode while disassociated - which is forbidden.
2236 */
2237 WARN_ONCE(iwl_mvm_sf_update(mvm, vif, false),
2238 "Failed to update SF upon disassociation\n");
2239
8ca151b5
JB
2240 /* remove AP station now that the MAC is unassoc */
2241 ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id);
2242 if (ret)
2243 IWL_ERR(mvm, "failed to remove AP station\n");
37577fe2
EP
2244
2245 if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
2246 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
8ca151b5
JB
2247 mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
2248 /* remove quota for this interface */
7754ae79 2249 ret = iwl_mvm_update_quotas(mvm, false, NULL);
8ca151b5
JB
2250 if (ret)
2251 IWL_ERR(mvm, "failed to update quotas\n");
29a90a49
EP
2252
2253 if (vif->p2p)
2254 iwl_mvm_unref(mvm, IWL_MVM_REF_P2P_CLIENT);
3dfd3a97
JB
2255
2256 /* this will take the cleared BSSID from bss_conf */
2257 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2258 if (ret)
2259 IWL_ERR(mvm,
2260 "failed to update MAC %pM (clear after unassoc)\n",
2261 vif->addr);
8ca151b5 2262 }
a20fd398 2263
e59647ea 2264 iwl_mvm_recalc_multicast(mvm);
c87163b9 2265 iwl_mvm_configure_bcast_filter(mvm, vif);
e59647ea 2266
a20fd398
AO
2267 /* reset rssi values */
2268 mvmvif->bf_data.ave_beacon_signal = 0;
2269
8e484f0b 2270 iwl_mvm_bt_coex_vif_change(mvm);
f94045ed
EG
2271 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT,
2272 IEEE80211_SMPS_AUTOMATIC);
989c6505 2273 } else if (changes & BSS_CHANGED_BEACON_INFO) {
210a544e
JB
2274 /*
2275 * We received a beacon _after_ association so
2276 * remove the session protection.
2277 */
2278 iwl_mvm_remove_time_event(mvm, mvmvif,
2279 &mvmvif->time_event_data);
8ca151b5 2280 }
cc87d322
EH
2281
2282 if (changes & BSS_CHANGED_BEACON_INFO) {
2283 iwl_mvm_sf_update(mvm, vif, false);
2284 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
2285 }
2286
1bc10d3b
JB
2287 if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS)) {
2288 ret = iwl_mvm_power_update_mac(mvm);
2289 if (ret)
2290 IWL_ERR(mvm, "failed to update power mode\n");
2291 }
2292
88f2fd73
MG
2293 if (changes & BSS_CHANGED_TXPOWER) {
2294 IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
2295 bss_conf->txpower);
2296 iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
2297 }
a20fd398
AO
2298
2299 if (changes & BSS_CHANGED_CQM) {
3c6acb61 2300 IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n");
a20fd398
AO
2301 /* reset cqm events tracking */
2302 mvmvif->bf_data.last_cqm_event = 0;
fa7b2e7f
AA
2303 if (mvmvif->bf_data.bf_enabled) {
2304 ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
2305 if (ret)
2306 IWL_ERR(mvm,
2307 "failed to update CQM thresholds\n");
2308 }
a20fd398 2309 }
2ee8f021
EP
2310
2311 if (changes & BSS_CHANGED_ARP_FILTER) {
3c6acb61 2312 IWL_DEBUG_MAC80211(mvm, "arp filter changed\n");
2ee8f021
EP
2313 iwl_mvm_configure_bcast_filter(mvm, vif);
2314 }
8ca151b5
JB
2315}
2316
5023d966
JB
2317static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
2318 struct ieee80211_vif *vif)
8ca151b5
JB
2319{
2320 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2321 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2322 int ret;
2323
576eeee9
EP
2324 /*
2325 * iwl_mvm_mac_ctxt_add() might read directly from the device
2326 * (the system time), so make sure it is available.
2327 */
2328 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_START_AP);
2329 if (ret)
2330 return ret;
2331
8ca151b5
JB
2332 mutex_lock(&mvm->mutex);
2333
2334 /* Send the beacon template */
2335 ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif);
2336 if (ret)
2337 goto out_unlock;
2338
6e97b0d2
IP
2339 /*
2340 * Re-calculate the tsf id, as the master-slave relations depend on the
2341 * beacon interval, which was not known when the AP interface was added.
2342 */
2343 if (vif->type == NL80211_IFTYPE_AP)
2344 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
2345
94939080
GG
2346 mvmvif->ap_assoc_sta_count = 0;
2347
8ca151b5
JB
2348 /* Add the mac context */
2349 ret = iwl_mvm_mac_ctxt_add(mvm, vif);
2350 if (ret)
2351 goto out_unlock;
2352
2353 /* Perform the binding */
2354 ret = iwl_mvm_binding_add_vif(mvm, vif);
2355 if (ret)
2356 goto out_remove;
2357
8ca151b5
JB
2358 /* Send the bcast station. At this stage the TBTT and DTIM time events
2359 * are added and applied to the scheduler */
013290aa 2360 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
8ca151b5
JB
2361 if (ret)
2362 goto out_unbind;
2363
5691e218
IP
2364 /* must be set before quota calculations */
2365 mvmvif->ap_ibss_active = true;
2366
a11e144e 2367 /* power updated needs to be done before quotas */
999609f1 2368 iwl_mvm_power_update_mac(mvm);
a11e144e 2369
7754ae79 2370 ret = iwl_mvm_update_quotas(mvm, false, NULL);
8ca151b5 2371 if (ret)
a11e144e 2372 goto out_quota_failed;
8ca151b5 2373
5023d966 2374 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
8ca151b5 2375 if (vif->p2p && mvm->p2p_device_vif)
3dfd3a97 2376 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
8ca151b5 2377
29a90a49
EP
2378 iwl_mvm_ref(mvm, IWL_MVM_REF_AP_IBSS);
2379
8e484f0b 2380 iwl_mvm_bt_coex_vif_change(mvm);
dac94da8 2381
f697267f
AN
2382 /* we don't support TDLS during DCM */
2383 if (iwl_mvm_phy_ctx_count(mvm) > 1)
2384 iwl_mvm_teardown_tdls_peers(mvm);
2385
939e4904 2386 goto out_unlock;
8ca151b5 2387
a11e144e 2388out_quota_failed:
999609f1 2389 iwl_mvm_power_update_mac(mvm);
5691e218 2390 mvmvif->ap_ibss_active = false;
013290aa 2391 iwl_mvm_send_rm_bcast_sta(mvm, vif);
8ca151b5
JB
2392out_unbind:
2393 iwl_mvm_binding_remove_vif(mvm, vif);
2394out_remove:
2395 iwl_mvm_mac_ctxt_remove(mvm, vif);
2396out_unlock:
2397 mutex_unlock(&mvm->mutex);
576eeee9 2398 iwl_mvm_unref(mvm, IWL_MVM_REF_START_AP);
8ca151b5
JB
2399 return ret;
2400}
2401
5023d966
JB
2402static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
2403 struct ieee80211_vif *vif)
8ca151b5
JB
2404{
2405 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2406 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2407
38a12b5b
JB
2408 iwl_mvm_prepare_mac_removal(mvm, vif);
2409
8ca151b5
JB
2410 mutex_lock(&mvm->mutex);
2411
664322fa 2412 /* Handle AP stop while in CSA */
7f0a7c67
AO
2413 if (rcu_access_pointer(mvm->csa_vif) == vif) {
2414 iwl_mvm_remove_time_event(mvm, mvmvif,
2415 &mvmvif->time_event_data);
664322fa 2416 RCU_INIT_POINTER(mvm->csa_vif, NULL);
7f0a7c67 2417 }
664322fa 2418
003e5236
AO
2419 if (rcu_access_pointer(mvm->csa_tx_blocked_vif) == vif) {
2420 RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
2421 mvm->csa_tx_block_bcn_timeout = 0;
2422 }
2423
5023d966 2424 mvmvif->ap_ibss_active = false;
1c87bbad 2425 mvm->ap_last_beacon_gp2 = 0;
8ca151b5 2426
8e484f0b 2427 iwl_mvm_bt_coex_vif_change(mvm);
dac94da8 2428
29a90a49
EP
2429 iwl_mvm_unref(mvm, IWL_MVM_REF_AP_IBSS);
2430
5023d966 2431 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
8ca151b5 2432 if (vif->p2p && mvm->p2p_device_vif)
3dfd3a97 2433 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
8ca151b5 2434
7754ae79 2435 iwl_mvm_update_quotas(mvm, false, NULL);
013290aa 2436 iwl_mvm_send_rm_bcast_sta(mvm, vif);
8ca151b5 2437 iwl_mvm_binding_remove_vif(mvm, vif);
a11e144e 2438
999609f1 2439 iwl_mvm_power_update_mac(mvm);
a11e144e 2440
8ca151b5
JB
2441 iwl_mvm_mac_ctxt_remove(mvm, vif);
2442
2443 mutex_unlock(&mvm->mutex);
2444}
2445
5023d966
JB
2446static void
2447iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
2448 struct ieee80211_vif *vif,
2449 struct ieee80211_bss_conf *bss_conf,
2450 u32 changes)
8ca151b5 2451{
be2056fc 2452 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
8a5e3660 2453
be2056fc
IP
2454 /* Changes will be applied when the AP/IBSS is started */
2455 if (!mvmvif->ap_ibss_active)
2456 return;
2457
863230da 2458 if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT |
f7d8b702 2459 BSS_CHANGED_BANDWIDTH | BSS_CHANGED_QOS) &&
3dfd3a97 2460 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL))
863230da 2461 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
8a5e3660 2462
8ca151b5 2463 /* Need to send a new beacon template to the FW */
863230da
JB
2464 if (changes & BSS_CHANGED_BEACON &&
2465 iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
2466 IWL_WARN(mvm, "Failed updating beacon data\n");
79b7a69d
HD
2467
2468 if (changes & BSS_CHANGED_TXPOWER) {
2469 IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
2470 bss_conf->txpower);
2471 iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
2472 }
2473
8ca151b5
JB
2474}
2475
2476static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
2477 struct ieee80211_vif *vif,
2478 struct ieee80211_bss_conf *bss_conf,
2479 u32 changes)
2480{
2481 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2482
576eeee9
EP
2483 /*
2484 * iwl_mvm_bss_info_changed_station() might call
2485 * iwl_mvm_protect_session(), which reads directly from
2486 * the device (the system time), so make sure it is available.
2487 */
2488 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_BSS_CHANGED))
2489 return;
2490
8ca151b5
JB
2491 mutex_lock(&mvm->mutex);
2492
723f02ed 2493 if (changes & BSS_CHANGED_IDLE && !bss_conf->idle)
c7d42480 2494 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
723f02ed 2495
8ca151b5
JB
2496 switch (vif->type) {
2497 case NL80211_IFTYPE_STATION:
2498 iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes);
2499 break;
2500 case NL80211_IFTYPE_AP:
5023d966
JB
2501 case NL80211_IFTYPE_ADHOC:
2502 iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes);
8ca151b5
JB
2503 break;
2504 default:
2505 /* shouldn't happen */
2506 WARN_ON_ONCE(1);
2507 }
2508
2509 mutex_unlock(&mvm->mutex);
576eeee9 2510 iwl_mvm_unref(mvm, IWL_MVM_REF_BSS_CHANGED);
8ca151b5
JB
2511}
2512
2513static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
2514 struct ieee80211_vif *vif,
c56ef672 2515 struct ieee80211_scan_request *hw_req)
8ca151b5
JB
2516{
2517 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2518 int ret;
2519
6749dd80
LC
2520 if (hw_req->req.n_channels == 0 ||
2521 hw_req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels)
8ca151b5
JB
2522 return -EINVAL;
2523
2524 mutex_lock(&mvm->mutex);
6749dd80 2525 ret = iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies);
8ca151b5 2526 mutex_unlock(&mvm->mutex);
6749dd80 2527
8ca151b5
JB
2528 return ret;
2529}
2530
2531static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
2532 struct ieee80211_vif *vif)
2533{
2534 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2535
2536 mutex_lock(&mvm->mutex);
2537
e7d3abab
LC
2538 /* Due to a race condition, it's possible that mac80211 asks
2539 * us to stop a hw_scan when it's already stopped. This can
2540 * happen, for instance, if we stopped the scan ourselves,
2541 * called ieee80211_scan_completed() and the userspace called
2542 * cancel scan scan before ieee80211_scan_work() could run.
2543 * To handle that, simply return if the scan is not running.
2544 */
262888fc 2545 if (mvm->scan_status & IWL_MVM_SCAN_REGULAR)
c7d42480 2546 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
8ca151b5
JB
2547
2548 mutex_unlock(&mvm->mutex);
2549}
2550
2551static void
2552iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw,
3e56eadf 2553 struct ieee80211_sta *sta, u16 tids,
8ca151b5
JB
2554 int num_frames,
2555 enum ieee80211_frame_release_type reason,
2556 bool more_data)
2557{
2558 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
8ca151b5 2559
3e56eadf 2560 /* Called when we need to transmit (a) frame(s) from mac80211 */
8ca151b5 2561
3e56eadf
JB
2562 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
2563 tids, more_data, false);
2564}
2565
2566static void
2567iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw,
2568 struct ieee80211_sta *sta, u16 tids,
2569 int num_frames,
2570 enum ieee80211_frame_release_type reason,
2571 bool more_data)
2572{
2573 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2574
2575 /* Called when we need to transmit (a) frame(s) from agg queue */
2576
2577 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
2578 tids, more_data, true);
8ca151b5
JB
2579}
2580
2581static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
2582 struct ieee80211_vif *vif,
2583 enum sta_notify_cmd cmd,
2584 struct ieee80211_sta *sta)
2585{
2586 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
5b577a90 2587 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
c22b0ff5 2588 unsigned long txqs = 0, tids = 0;
3e56eadf 2589 int tid;
8ca151b5 2590
c22b0ff5
EG
2591 spin_lock_bh(&mvmsta->lock);
2592 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
2593 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2594
2595 if (tid_data->state != IWL_AGG_ON &&
2596 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
2597 continue;
2598
2599 __set_bit(tid_data->txq_id, &txqs);
2600
2601 if (iwl_mvm_tid_queued(tid_data) == 0)
2602 continue;
2603
2604 __set_bit(tid, &tids);
2605 }
2606
8ca151b5
JB
2607 switch (cmd) {
2608 case STA_NOTIFY_SLEEP:
e3d4bc8c 2609 if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0)
8ca151b5 2610 ieee80211_sta_block_awake(hw, sta, true);
3e56eadf 2611
c22b0ff5 2612 for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT)
3e56eadf 2613 ieee80211_sta_set_buffered(sta, tid, true);
c22b0ff5
EG
2614
2615 if (txqs)
2616 iwl_trans_freeze_txq_timer(mvm->trans, txqs, true);
8ca151b5
JB
2617 /*
2618 * The fw updates the STA to be asleep. Tx packets on the Tx
2619 * queues to this station will not be transmitted. The fw will
2620 * send a Tx response with TX_STATUS_FAIL_DEST_PS.
2621 */
2622 break;
2623 case STA_NOTIFY_AWAKE:
881acd89 2624 if (WARN_ON(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
8ca151b5 2625 break;
c22b0ff5
EG
2626
2627 if (txqs)
2628 iwl_trans_freeze_txq_timer(mvm->trans, txqs, false);
9cc40712 2629 iwl_mvm_sta_modify_ps_wake(mvm, sta);
8ca151b5
JB
2630 break;
2631 default:
2632 break;
2633 }
c22b0ff5 2634 spin_unlock_bh(&mvmsta->lock);
8ca151b5
JB
2635}
2636
1ddbbb0c
JB
2637static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
2638 struct ieee80211_vif *vif,
2639 struct ieee80211_sta *sta)
2640{
2641 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
94939080 2642 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
9d8ce6af 2643 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1ddbbb0c
JB
2644
2645 /*
2646 * This is called before mac80211 does RCU synchronisation,
2647 * so here we already invalidate our internal RCU-protected
2648 * station pointer. The rest of the code will thus no longer
2649 * be able to find the station this way, and we don't rely
2650 * on further RCU synchronisation after the sta_state()
2651 * callback deleted the station.
2652 */
2653 mutex_lock(&mvm->mutex);
2654 if (sta == rcu_access_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id]))
2655 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
2656 ERR_PTR(-ENOENT));
94939080
GG
2657
2658 if (mvm_sta->vif->type == NL80211_IFTYPE_AP) {
2659 mvmvif->ap_assoc_sta_count--;
f82c8339 2660 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
94939080
GG
2661 }
2662
1ddbbb0c
JB
2663 mutex_unlock(&mvm->mutex);
2664}
2665
bd1ba664
JB
2666static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2667 const u8 *bssid)
2668{
2669 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT))
2670 return;
2671
2672 if (iwlwifi_mod_params.uapsd_disable) {
2673 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
2674 return;
2675 }
2676
2677 vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
2678}
2679
8ca151b5
JB
2680static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
2681 struct ieee80211_vif *vif,
2682 struct ieee80211_sta *sta,
2683 enum ieee80211_sta_state old_state,
2684 enum ieee80211_sta_state new_state)
2685{
2686 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2687 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2688 int ret;
2689
2690 IWL_DEBUG_MAC80211(mvm, "station %pM state change %d->%d\n",
2691 sta->addr, old_state, new_state);
2692
2693 /* this would be a mac80211 bug ... but don't crash */
2694 if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
2695 return -EINVAL;
2696
2697 /* if a STA is being removed, reuse its ID */
2698 flush_work(&mvm->sta_drained_wk);
2699
2700 mutex_lock(&mvm->mutex);
2701 if (old_state == IEEE80211_STA_NOTEXIST &&
2702 new_state == IEEE80211_STA_NONE) {
48bc1307
JB
2703 /*
2704 * Firmware bug - it'll crash if the beacon interval is less
2705 * than 16. We can't avoid connecting at all, so refuse the
2706 * station state change, this will cause mac80211 to abandon
2707 * attempts to connect to this AP, and eventually wpa_s will
2708 * blacklist the AP...
2709 */
2710 if (vif->type == NL80211_IFTYPE_STATION &&
2711 vif->bss_conf.beacon_int < 16) {
2712 IWL_ERR(mvm,
2713 "AP %pM beacon interval is %d, refusing due to firmware bug!\n",
2714 sta->addr, vif->bss_conf.beacon_int);
2715 ret = -EINVAL;
2716 goto out_unlock;
2717 }
cf7b491d
AN
2718
2719 if (sta->tdls &&
2720 (vif->p2p ||
fa3d07e4
AN
2721 iwl_mvm_tdls_sta_count(mvm, NULL) ==
2722 IWL_MVM_TDLS_STA_COUNT ||
cf7b491d
AN
2723 iwl_mvm_phy_ctx_count(mvm) > 1)) {
2724 IWL_DEBUG_MAC80211(mvm, "refusing TDLS sta\n");
2725 ret = -EBUSY;
2726 goto out_unlock;
2727 }
2728
8ca151b5 2729 ret = iwl_mvm_add_sta(mvm, vif, sta);
fa3d07e4
AN
2730 if (sta->tdls && ret == 0)
2731 iwl_mvm_recalc_tdls_state(mvm, vif, true);
8ca151b5
JB
2732 } else if (old_state == IEEE80211_STA_NONE &&
2733 new_state == IEEE80211_STA_AUTH) {
e820c2da
HD
2734 /*
2735 * EBS may be disabled due to previous failures reported by FW.
2736 * Reset EBS status here assuming environment has been changed.
2737 */
2738 mvm->last_ebs_successful = true;
bd1ba664 2739 iwl_mvm_check_uapsd(mvm, vif, sta->addr);
8ca151b5
JB
2740 ret = 0;
2741 } else if (old_state == IEEE80211_STA_AUTH &&
2742 new_state == IEEE80211_STA_ASSOC) {
7a453973
JB
2743 ret = iwl_mvm_update_sta(mvm, vif, sta);
2744 if (ret == 0)
2745 iwl_mvm_rs_rate_init(mvm, sta,
b87c2179
ES
2746 mvmvif->phy_ctxt->channel->band,
2747 true);
8ca151b5
JB
2748 } else if (old_state == IEEE80211_STA_ASSOC &&
2749 new_state == IEEE80211_STA_AUTHORIZED) {
f59e0e3c
AN
2750
2751 /* we don't support TDLS during DCM */
2752 if (iwl_mvm_phy_ctx_count(mvm) > 1)
2753 iwl_mvm_teardown_tdls_peers(mvm);
2754
7df15b1e 2755 /* enable beacon filtering */
fa7b2e7f 2756 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
8ca151b5
JB
2757 ret = 0;
2758 } else if (old_state == IEEE80211_STA_AUTHORIZED &&
2759 new_state == IEEE80211_STA_ASSOC) {
7df15b1e 2760 /* disable beacon filtering */
a1022927 2761 WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif, 0));
8ca151b5
JB
2762 ret = 0;
2763 } else if (old_state == IEEE80211_STA_ASSOC &&
2764 new_state == IEEE80211_STA_AUTH) {
2765 ret = 0;
2766 } else if (old_state == IEEE80211_STA_AUTH &&
2767 new_state == IEEE80211_STA_NONE) {
2768 ret = 0;
2769 } else if (old_state == IEEE80211_STA_NONE &&
2770 new_state == IEEE80211_STA_NOTEXIST) {
2771 ret = iwl_mvm_rm_sta(mvm, vif, sta);
fa3d07e4
AN
2772 if (sta->tdls)
2773 iwl_mvm_recalc_tdls_state(mvm, vif, false);
8ca151b5
JB
2774 } else {
2775 ret = -EIO;
2776 }
48bc1307 2777 out_unlock:
8ca151b5
JB
2778 mutex_unlock(&mvm->mutex);
2779
9c126cd6
LK
2780 if (sta->tdls && ret == 0) {
2781 if (old_state == IEEE80211_STA_NOTEXIST &&
2782 new_state == IEEE80211_STA_NONE)
2783 ieee80211_reserve_tid(sta, IWL_MVM_TDLS_FW_TID);
2784 else if (old_state == IEEE80211_STA_NONE &&
2785 new_state == IEEE80211_STA_NOTEXIST)
2786 ieee80211_unreserve_tid(sta, IWL_MVM_TDLS_FW_TID);
2787 }
2788
8ca151b5
JB
2789 return ret;
2790}
2791
2792static int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
2793{
2794 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2795
2796 mvm->rts_threshold = value;
2797
2798 return 0;
2799}
2800
1f3b0ff8
LE
2801static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw,
2802 struct ieee80211_vif *vif,
2803 struct ieee80211_sta *sta, u32 changed)
2804{
2805 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2806
2807 if (vif->type == NL80211_IFTYPE_STATION &&
2808 changed & IEEE80211_RC_NSS_CHANGED)
2809 iwl_mvm_sf_update(mvm, vif, false);
2810}
2811
8ca151b5
JB
2812static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw,
2813 struct ieee80211_vif *vif, u16 ac,
2814 const struct ieee80211_tx_queue_params *params)
2815{
2816 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2817 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2818
2819 mvmvif->queue_params[ac] = *params;
2820
2821 /*
2822 * No need to update right away, we'll get BSS_CHANGED_QOS
2823 * The exception is P2P_DEVICE interface which needs immediate update.
2824 */
2825 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2826 int ret;
2827
2828 mutex_lock(&mvm->mutex);
3dfd3a97 2829 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
8ca151b5
JB
2830 mutex_unlock(&mvm->mutex);
2831 return ret;
2832 }
2833 return 0;
2834}
2835
2836static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
2837 struct ieee80211_vif *vif)
2838{
2839 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2840 u32 duration = min(IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS,
2841 200 + vif->bss_conf.beacon_int);
2842 u32 min_duration = min(IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS,
2843 100 + vif->bss_conf.beacon_int);
2844
2845 if (WARN_ON_ONCE(vif->bss_conf.assoc))
2846 return;
2847
576eeee9
EP
2848 /*
2849 * iwl_mvm_protect_session() reads directly from the device
2850 * (the system time), so make sure it is available.
2851 */
2852 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PREPARE_TX))
2853 return;
2854
8ca151b5
JB
2855 mutex_lock(&mvm->mutex);
2856 /* Try really hard to protect the session and hear a beacon */
d20d37bc 2857 iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500, false);
8ca151b5 2858 mutex_unlock(&mvm->mutex);
576eeee9
EP
2859
2860 iwl_mvm_unref(mvm, IWL_MVM_REF_PREPARE_TX);
8ca151b5
JB
2861}
2862
35a000b7
DS
2863static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
2864 struct ieee80211_vif *vif,
2865 struct cfg80211_sched_scan_request *req,
633e2713 2866 struct ieee80211_scan_ies *ies)
35a000b7
DS
2867{
2868 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
35a000b7 2869
35a000b7 2870 int ret;
4660dfbb 2871
35a000b7
DS
2872 mutex_lock(&mvm->mutex);
2873
1f940386 2874 if (!vif->bss_conf.idle) {
bd5e4744
DS
2875 ret = -EBUSY;
2876 goto out;
2877 }
2878
19945dfb 2879 ret = iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED);
d2496221 2880
35a000b7
DS
2881out:
2882 mutex_unlock(&mvm->mutex);
2883 return ret;
2884}
2885
37e3308c
JB
2886static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
2887 struct ieee80211_vif *vif)
35a000b7
DS
2888{
2889 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
33ea27f6 2890 int ret;
35a000b7
DS
2891
2892 mutex_lock(&mvm->mutex);
e7d3abab
LC
2893
2894 /* Due to a race condition, it's possible that mac80211 asks
2895 * us to stop a sched_scan when it's already stopped. This
2896 * can happen, for instance, if we stopped the scan ourselves,
2897 * called ieee80211_sched_scan_stopped() and the userspace called
2898 * stop sched scan scan before ieee80211_sched_scan_stopped_work()
2899 * could run. To handle this, simply return if the scan is
2900 * not running.
2901 */
262888fc 2902 if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED)) {
e7d3abab
LC
2903 mutex_unlock(&mvm->mutex);
2904 return 0;
2905 }
2906
c7d42480 2907 ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, false);
35a000b7 2908 mutex_unlock(&mvm->mutex);
33ea27f6 2909 iwl_mvm_wait_for_async_handlers(mvm);
37e3308c 2910
33ea27f6 2911 return ret;
35a000b7
DS
2912}
2913
8ca151b5
JB
2914static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
2915 enum set_key_cmd cmd,
2916 struct ieee80211_vif *vif,
2917 struct ieee80211_sta *sta,
2918 struct ieee80211_key_conf *key)
2919{
2920 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2921 int ret;
2922
2923 if (iwlwifi_mod_params.sw_crypto) {
2924 IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n");
2925 return -EOPNOTSUPP;
2926 }
2927
2928 switch (key->cipher) {
2929 case WLAN_CIPHER_SUITE_TKIP:
2930 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
8ca151b5
JB
2931 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
2932 break;
ca8c0f4b
JB
2933 case WLAN_CIPHER_SUITE_CCMP:
2934 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
2935 break;
8ca151b5 2936 case WLAN_CIPHER_SUITE_AES_CMAC:
30686bf7 2937 WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE));
8ca151b5
JB
2938 break;
2939 case WLAN_CIPHER_SUITE_WEP40:
2940 case WLAN_CIPHER_SUITE_WEP104:
ba3943b0
JB
2941 /* For non-client mode, only use WEP keys for TX as we probably
2942 * don't have a station yet anyway and would then have to keep
2943 * track of the keys, linking them to each of the clients/peers
2944 * as they appear. For now, don't do that, for performance WEP
2945 * offload doesn't really matter much, but we need it for some
2946 * other offload features in client mode.
8ca151b5 2947 */
ba3943b0
JB
2948 if (vif->type != NL80211_IFTYPE_STATION)
2949 return 0;
2950 break;
8ca151b5 2951 default:
e36e5433
MS
2952 /* currently FW supports only one optional cipher scheme */
2953 if (hw->n_cipher_schemes &&
2954 hw->cipher_schemes->cipher == key->cipher)
2955 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
2956 else
2957 return -EOPNOTSUPP;
8ca151b5
JB
2958 }
2959
2960 mutex_lock(&mvm->mutex);
2961
2962 switch (cmd) {
2963 case SET_KEY:
5023d966
JB
2964 if ((vif->type == NL80211_IFTYPE_ADHOC ||
2965 vif->type == NL80211_IFTYPE_AP) && !sta) {
2966 /*
2967 * GTK on AP interface is a TX-only key, return 0;
2968 * on IBSS they're per-station and because we're lazy
2969 * we don't support them for RX, so do the same.
2970 */
6caffd4f
JB
2971 ret = 0;
2972 key->hw_key_idx = STA_KEY_IDX_INVALID;
2973 break;
2974 }
2975
b546dcd6
JB
2976 /* During FW restart, in order to restore the state as it was,
2977 * don't try to reprogram keys we previously failed for.
2978 */
2979 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
2980 key->hw_key_idx == STA_KEY_IDX_INVALID) {
2981 IWL_DEBUG_MAC80211(mvm,
2982 "skip invalid idx key programming during restart\n");
2983 ret = 0;
2984 break;
2985 }
2986
8ca151b5 2987 IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n");
b546dcd6
JB
2988 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key,
2989 test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
2990 &mvm->status));
8ca151b5
JB
2991 if (ret) {
2992 IWL_WARN(mvm, "set key failed\n");
2993 /*
2994 * can't add key for RX, but we don't need it
2995 * in the device for TX so still return 0
2996 */
6caffd4f 2997 key->hw_key_idx = STA_KEY_IDX_INVALID;
8ca151b5
JB
2998 ret = 0;
2999 }
3000
3001 break;
3002 case DISABLE_KEY:
6caffd4f
JB
3003 if (key->hw_key_idx == STA_KEY_IDX_INVALID) {
3004 ret = 0;
3005 break;
3006 }
3007
8ca151b5
JB
3008 IWL_DEBUG_MAC80211(mvm, "disable hwcrypto key\n");
3009 ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key);
3010 break;
3011 default:
3012 ret = -EINVAL;
3013 }
3014
3015 mutex_unlock(&mvm->mutex);
3016 return ret;
3017}
3018
3019static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw,
3020 struct ieee80211_vif *vif,
3021 struct ieee80211_key_conf *keyconf,
3022 struct ieee80211_sta *sta,
3023 u32 iv32, u16 *phase1key)
3024{
3025 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3026
5023d966
JB
3027 if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID)
3028 return;
3029
8ca151b5
JB
3030 iwl_mvm_update_tkip_key(mvm, vif, keyconf, sta, iv32, phase1key);
3031}
3032
3033
b112889c
AM
3034static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait,
3035 struct iwl_rx_packet *pkt, void *data)
3036{
3037 struct iwl_mvm *mvm =
3038 container_of(notif_wait, struct iwl_mvm, notif_wait);
3039 struct iwl_hs20_roc_res *resp;
3040 int resp_len = iwl_rx_packet_payload_len(pkt);
3041 struct iwl_mvm_time_event_data *te_data = data;
3042
3043 if (WARN_ON(pkt->hdr.cmd != HOT_SPOT_CMD))
3044 return true;
3045
3046 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
3047 IWL_ERR(mvm, "Invalid HOT_SPOT_CMD response\n");
3048 return true;
3049 }
3050
3051 resp = (void *)pkt->data;
3052
3053 IWL_DEBUG_TE(mvm,
3054 "Aux ROC: Recieved response from ucode: status=%d uid=%d\n",
3055 resp->status, resp->event_unique_id);
3056
3057 te_data->uid = le32_to_cpu(resp->event_unique_id);
3058 IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n",
3059 te_data->uid);
3060
3061 spin_lock_bh(&mvm->time_event_lock);
3062 list_add_tail(&te_data->list, &mvm->aux_roc_te_list);
3063 spin_unlock_bh(&mvm->time_event_lock);
3064
3065 return true;
3066}
3067
35d3dab5 3068#define AUX_ROC_MAX_DELAY_ON_CHANNEL 200
b112889c
AM
3069static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
3070 struct ieee80211_channel *channel,
3071 struct ieee80211_vif *vif,
3072 int duration)
3073{
3074 int res, time_reg = DEVICE_SYSTEM_TIME_REG;
3075 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3076 struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data;
6eb031d2 3077 static const u16 time_event_response[] = { HOT_SPOT_CMD };
b112889c
AM
3078 struct iwl_notification_wait wait_time_event;
3079 struct iwl_hs20_roc_req aux_roc_req = {
3080 .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
3081 .id_and_color =
3082 cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)),
3083 .sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id),
3084 /* Set the channel info data */
3085 .channel_info.band = (channel->band == IEEE80211_BAND_2GHZ) ?
3086 PHY_BAND_24 : PHY_BAND_5,
3087 .channel_info.channel = channel->hw_value,
3088 .channel_info.width = PHY_VHT_CHANNEL_MODE20,
3089 /* Set the time and duration */
3090 .apply_time = cpu_to_le32(iwl_read_prph(mvm->trans, time_reg)),
3091 .apply_time_max_delay =
3092 cpu_to_le32(MSEC_TO_TU(AUX_ROC_MAX_DELAY_ON_CHANNEL)),
3093 .duration = cpu_to_le32(MSEC_TO_TU(duration)),
3094 };
3095
3096 /* Set the node address */
3097 memcpy(aux_roc_req.node_addr, vif->addr, ETH_ALEN);
3098
a6cc5163
MG
3099 lockdep_assert_held(&mvm->mutex);
3100
3101 spin_lock_bh(&mvm->time_event_lock);
3102
3103 if (WARN_ON(te_data->id == HOT_SPOT_CMD)) {
3104 spin_unlock_bh(&mvm->time_event_lock);
3105 return -EIO;
3106 }
3107
b112889c
AM
3108 te_data->vif = vif;
3109 te_data->duration = duration;
3110 te_data->id = HOT_SPOT_CMD;
3111
b112889c
AM
3112 spin_unlock_bh(&mvm->time_event_lock);
3113
3114 /*
3115 * Use a notification wait, which really just processes the
3116 * command response and doesn't wait for anything, in order
3117 * to be able to process the response and get the UID inside
3118 * the RX path. Using CMD_WANT_SKB doesn't work because it
3119 * stores the buffer and then wakes up this thread, by which
3120 * time another notification (that the time event started)
3121 * might already be processed unsuccessfully.
3122 */
3123 iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
3124 time_event_response,
3125 ARRAY_SIZE(time_event_response),
3126 iwl_mvm_rx_aux_roc, te_data);
3127
3128 res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, sizeof(aux_roc_req),
3129 &aux_roc_req);
3130
3131 if (res) {
3132 IWL_ERR(mvm, "Couldn't send HOT_SPOT_CMD: %d\n", res);
3133 iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
3134 goto out_clear_te;
3135 }
3136
3137 /* No need to wait for anything, so just pass 1 (0 isn't valid) */
3138 res = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
3139 /* should never fail */
3140 WARN_ON_ONCE(res);
3141
3142 if (res) {
3143 out_clear_te:
3144 spin_lock_bh(&mvm->time_event_lock);
3145 iwl_mvm_te_clear_data(mvm, te_data);
3146 spin_unlock_bh(&mvm->time_event_lock);
3147 }
3148
3149 return res;
3150}
3151
8ca151b5
JB
3152static int iwl_mvm_roc(struct ieee80211_hw *hw,
3153 struct ieee80211_vif *vif,
3154 struct ieee80211_channel *channel,
d339d5ca
IP
3155 int duration,
3156 enum ieee80211_roc_type type)
8ca151b5
JB
3157{
3158 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
fe0f2de3 3159 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
8ca151b5 3160 struct cfg80211_chan_def chandef;
31d385ae
IP
3161 struct iwl_mvm_phy_ctxt *phy_ctxt;
3162 int ret, i;
3163
3164 IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
3165 duration, type);
8ca151b5 3166
6ed13164
MG
3167 flush_work(&mvm->roc_done_wk);
3168
a6cc5163
MG
3169 mutex_lock(&mvm->mutex);
3170
b112889c
AM
3171 switch (vif->type) {
3172 case NL80211_IFTYPE_STATION:
859d914c
JB
3173 if (fw_has_capa(&mvm->fw->ucode_capa,
3174 IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) {
5ac6c72e
LC
3175 /* Use aux roc framework (HS20) */
3176 ret = iwl_mvm_send_aux_roc_cmd(mvm, channel,
3177 vif, duration);
3178 goto out_unlock;
3179 }
3180 IWL_ERR(mvm, "hotspot not supported\n");
3181 ret = -EINVAL;
a6cc5163 3182 goto out_unlock;
b112889c
AM
3183 case NL80211_IFTYPE_P2P_DEVICE:
3184 /* handle below */
3185 break;
3186 default:
3187 IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type);
a6cc5163
MG
3188 ret = -EINVAL;
3189 goto out_unlock;
8ca151b5
JB
3190 }
3191
31d385ae
IP
3192 for (i = 0; i < NUM_PHY_CTX; i++) {
3193 phy_ctxt = &mvm->phy_ctxts[i];
3194 if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt)
3195 continue;
3196
3197 if (phy_ctxt->ref && channel == phy_ctxt->channel) {
3198 /*
3199 * Unbind the P2P_DEVICE from the current PHY context,
3200 * and if the PHY context is not used remove it.
3201 */
3202 ret = iwl_mvm_binding_remove_vif(mvm, vif);
3203 if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
3204 goto out_unlock;
3205
3206 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
3207
3208 /* Bind the P2P_DEVICE to the current PHY Context */
3209 mvmvif->phy_ctxt = phy_ctxt;
3210
3211 ret = iwl_mvm_binding_add_vif(mvm, vif);
3212 if (WARN(ret, "Failed binding P2P_DEVICE\n"))
3213 goto out_unlock;
3214
3215 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
3216 goto schedule_time_event;
3217 }
3218 }
3219
3220 /* Need to update the PHY context only if the ROC channel changed */
3221 if (channel == mvmvif->phy_ctxt->channel)
3222 goto schedule_time_event;
3223
8ca151b5 3224 cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
8ca151b5 3225
31d385ae
IP
3226 /*
3227 * Change the PHY context configuration as it is currently referenced
3228 * only by the P2P Device MAC
3229 */
3230 if (mvmvif->phy_ctxt->ref == 1) {
3231 ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt,
3232 &chandef, 1, 1);
3233 if (ret)
3234 goto out_unlock;
3235 } else {
3236 /*
3237 * The PHY context is shared with other MACs. Need to remove the
3238 * P2P Device from the binding, allocate an new PHY context and
3239 * create a new binding
3240 */
3241 phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
3242 if (!phy_ctxt) {
3243 ret = -ENOSPC;
3244 goto out_unlock;
3245 }
3246
3247 ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef,
3248 1, 1);
3249 if (ret) {
3250 IWL_ERR(mvm, "Failed to change PHY context\n");
3251 goto out_unlock;
3252 }
3253
3254 /* Unbind the P2P_DEVICE from the current PHY context */
3255 ret = iwl_mvm_binding_remove_vif(mvm, vif);
3256 if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
3257 goto out_unlock;
3258
3259 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
3260
3261 /* Bind the P2P_DEVICE to the new allocated PHY context */
3262 mvmvif->phy_ctxt = phy_ctxt;
3263
3264 ret = iwl_mvm_binding_add_vif(mvm, vif);
3265 if (WARN(ret, "Failed binding P2P_DEVICE\n"))
3266 goto out_unlock;
3267
3268 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
3269 }
3270
3271schedule_time_event:
8ca151b5 3272 /* Schedule the time events */
e635c797 3273 ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type);
8ca151b5 3274
31d385ae 3275out_unlock:
8ca151b5
JB
3276 mutex_unlock(&mvm->mutex);
3277 IWL_DEBUG_MAC80211(mvm, "leave\n");
8ca151b5
JB
3278 return ret;
3279}
3280
3281static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw)
3282{
3283 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3284
3285 IWL_DEBUG_MAC80211(mvm, "enter\n");
3286
3287 mutex_lock(&mvm->mutex);
bf5da87f 3288 iwl_mvm_stop_roc(mvm);
8ca151b5
JB
3289 mutex_unlock(&mvm->mutex);
3290
3291 IWL_DEBUG_MAC80211(mvm, "leave\n");
3292 return 0;
3293}
3294
b08c1d97
LC
3295static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm,
3296 struct ieee80211_chanctx_conf *ctx)
8ca151b5 3297{
fe0f2de3
IP
3298 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3299 struct iwl_mvm_phy_ctxt *phy_ctxt;
8ca151b5
JB
3300 int ret;
3301
b08c1d97
LC
3302 lockdep_assert_held(&mvm->mutex);
3303
53a9d61e 3304 IWL_DEBUG_MAC80211(mvm, "Add channel context\n");
fe0f2de3 3305
fe0f2de3
IP
3306 phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
3307 if (!phy_ctxt) {
3308 ret = -ENOSPC;
3309 goto out;
3310 }
8ca151b5 3311
dcbc3e1a 3312 ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
53a9d61e
IP
3313 ctx->rx_chains_static,
3314 ctx->rx_chains_dynamic);
fe0f2de3
IP
3315 if (ret) {
3316 IWL_ERR(mvm, "Failed to add PHY context\n");
3317 goto out;
3318 }
3319
53a9d61e 3320 iwl_mvm_phy_ctxt_ref(mvm, phy_ctxt);
fe0f2de3
IP
3321 *phy_ctxt_id = phy_ctxt->id;
3322out:
b08c1d97
LC
3323 return ret;
3324}
3325
3326static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw,
3327 struct ieee80211_chanctx_conf *ctx)
3328{
3329 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3330 int ret;
3331
3332 mutex_lock(&mvm->mutex);
3333 ret = __iwl_mvm_add_chanctx(mvm, ctx);
8ca151b5 3334 mutex_unlock(&mvm->mutex);
b08c1d97 3335
8ca151b5
JB
3336 return ret;
3337}
3338
b08c1d97
LC
3339static void __iwl_mvm_remove_chanctx(struct iwl_mvm *mvm,
3340 struct ieee80211_chanctx_conf *ctx)
3341{
3342 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3343 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
3344
3345 lockdep_assert_held(&mvm->mutex);
3346
3347 iwl_mvm_phy_ctxt_unref(mvm, phy_ctxt);
3348}
3349
8ca151b5
JB
3350static void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw,
3351 struct ieee80211_chanctx_conf *ctx)
3352{
3353 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
8ca151b5
JB
3354
3355 mutex_lock(&mvm->mutex);
b08c1d97 3356 __iwl_mvm_remove_chanctx(mvm, ctx);
8ca151b5
JB
3357 mutex_unlock(&mvm->mutex);
3358}
3359
3360static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
3361 struct ieee80211_chanctx_conf *ctx,
3362 u32 changed)
3363{
3364 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
fe0f2de3
IP
3365 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3366 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
8ca151b5 3367
31d385ae
IP
3368 if (WARN_ONCE((phy_ctxt->ref > 1) &&
3369 (changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH |
3370 IEEE80211_CHANCTX_CHANGE_RX_CHAINS |
2dceedae
AN
3371 IEEE80211_CHANCTX_CHANGE_RADAR |
3372 IEEE80211_CHANCTX_CHANGE_MIN_WIDTH)),
31d385ae
IP
3373 "Cannot change PHY. Ref=%d, changed=0x%X\n",
3374 phy_ctxt->ref, changed))
3375 return;
3376
8ca151b5 3377 mutex_lock(&mvm->mutex);
4d66449a 3378 iwl_mvm_bt_coex_vif_change(mvm);
dcbc3e1a 3379 iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
8ca151b5
JB
3380 ctx->rx_chains_static,
3381 ctx->rx_chains_dynamic);
3382 mutex_unlock(&mvm->mutex);
3383}
3384
b08c1d97
LC
3385static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm,
3386 struct ieee80211_vif *vif,
f0c97783
LC
3387 struct ieee80211_chanctx_conf *ctx,
3388 bool switching_chanctx)
8ca151b5 3389{
fe0f2de3
IP
3390 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3391 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
8ca151b5
JB
3392 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3393 int ret;
3394
b08c1d97 3395 lockdep_assert_held(&mvm->mutex);
8ca151b5 3396
fe0f2de3 3397 mvmvif->phy_ctxt = phy_ctxt;
8ca151b5
JB
3398
3399 switch (vif->type) {
3400 case NL80211_IFTYPE_AP:
4741dd04
LC
3401 /* only needed if we're switching chanctx (i.e. during CSA) */
3402 if (switching_chanctx) {
bd3398e2
AO
3403 mvmvif->ap_ibss_active = true;
3404 break;
3405 }
5023d966 3406 case NL80211_IFTYPE_ADHOC:
8ca151b5
JB
3407 /*
3408 * The AP binding flow is handled as part of the start_ap flow
5023d966 3409 * (in bss_info_changed), similarly for IBSS.
8ca151b5
JB
3410 */
3411 ret = 0;
b08c1d97 3412 goto out;
8ca151b5 3413 case NL80211_IFTYPE_STATION:
2533edce 3414 break;
8ca151b5 3415 case NL80211_IFTYPE_MONITOR:
2533edce
LC
3416 /* always disable PS when a monitor interface is active */
3417 mvmvif->ps_disabled = true;
8ca151b5
JB
3418 break;
3419 default:
3420 ret = -EINVAL;
b08c1d97 3421 goto out;
8ca151b5
JB
3422 }
3423
3424 ret = iwl_mvm_binding_add_vif(mvm, vif);
3425 if (ret)
b08c1d97 3426 goto out;
8ca151b5
JB
3427
3428 /*
92d85562
AB
3429 * Power state must be updated before quotas,
3430 * otherwise fw will complain.
3431 */
999609f1 3432 iwl_mvm_power_update_mac(mvm);
92d85562
AB
3433
3434 /* Setting the quota at this stage is only required for monitor
8ca151b5
JB
3435 * interfaces. For the other types, the bss_info changed flow
3436 * will handle quota settings.
3437 */
3438 if (vif->type == NL80211_IFTYPE_MONITOR) {
1e1391ca 3439 mvmvif->monitor_active = true;
7754ae79 3440 ret = iwl_mvm_update_quotas(mvm, false, NULL);
8ca151b5
JB
3441 if (ret)
3442 goto out_remove_binding;
3443 }
3444
bd3398e2 3445 /* Handle binding during CSA */
a57c688d 3446 if (vif->type == NL80211_IFTYPE_AP) {
7754ae79 3447 iwl_mvm_update_quotas(mvm, false, NULL);
3dfd3a97 3448 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
bd3398e2
AO
3449 }
3450
4741dd04 3451 if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) {
686e7fe1
LC
3452 u32 duration = 2 * vif->bss_conf.beacon_int;
3453
3454 /* iwl_mvm_protect_session() reads directly from the
3455 * device (the system time), so make sure it is
3456 * available.
3457 */
3458 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_CSA);
3459 if (ret)
3460 goto out_remove_binding;
3461
3462 /* Protect the session to make sure we hear the first
3463 * beacon on the new channel.
3464 */
3465 iwl_mvm_protect_session(mvm, vif, duration, duration,
3466 vif->bss_conf.beacon_int / 2,
3467 true);
3468
3469 iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_CSA);
3470
7754ae79 3471 iwl_mvm_update_quotas(mvm, false, NULL);
0ce04ce7
LC
3472 }
3473
b08c1d97 3474 goto out;
8ca151b5 3475
b08c1d97 3476out_remove_binding:
8ca151b5 3477 iwl_mvm_binding_remove_vif(mvm, vif);
999609f1 3478 iwl_mvm_power_update_mac(mvm);
b08c1d97 3479out:
8ca151b5
JB
3480 if (ret)
3481 mvmvif->phy_ctxt = NULL;
3482 return ret;
3483}
b08c1d97
LC
3484static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
3485 struct ieee80211_vif *vif,
3486 struct ieee80211_chanctx_conf *ctx)
8ca151b5
JB
3487{
3488 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
b08c1d97 3489 int ret;
8ca151b5
JB
3490
3491 mutex_lock(&mvm->mutex);
f0c97783 3492 ret = __iwl_mvm_assign_vif_chanctx(mvm, vif, ctx, false);
b08c1d97
LC
3493 mutex_unlock(&mvm->mutex);
3494
3495 return ret;
3496}
3497
3498static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
3499 struct ieee80211_vif *vif,
f0c97783
LC
3500 struct ieee80211_chanctx_conf *ctx,
3501 bool switching_chanctx)
b08c1d97
LC
3502{
3503 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
f0c97783 3504 struct ieee80211_vif *disabled_vif = NULL;
b08c1d97
LC
3505
3506 lockdep_assert_held(&mvm->mutex);
8ca151b5
JB
3507
3508 iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data);
3509
8ca151b5 3510 switch (vif->type) {
5023d966 3511 case NL80211_IFTYPE_ADHOC:
b08c1d97 3512 goto out;
8ca151b5 3513 case NL80211_IFTYPE_MONITOR:
1e1391ca 3514 mvmvif->monitor_active = false;
2533edce 3515 mvmvif->ps_disabled = false;
8ca151b5 3516 break;
bd3398e2
AO
3517 case NL80211_IFTYPE_AP:
3518 /* This part is triggered only during CSA */
4741dd04 3519 if (!switching_chanctx || !mvmvif->ap_ibss_active)
b08c1d97 3520 goto out;
bd3398e2 3521
7ef0aab6
AO
3522 mvmvif->csa_countdown = false;
3523
003e5236
AO
3524 /* Set CS bit on all the stations */
3525 iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, true);
3526
3527 /* Save blocked iface, the timeout is set on the next beacon */
3528 rcu_assign_pointer(mvm->csa_tx_blocked_vif, vif);
3529
bd3398e2 3530 mvmvif->ap_ibss_active = false;
f0c97783
LC
3531 break;
3532 case NL80211_IFTYPE_STATION:
3533 if (!switching_chanctx)
3534 break;
3535
3536 disabled_vif = vif;
3537
3dfd3a97 3538 iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL);
f0c97783 3539 break;
8ca151b5
JB
3540 default:
3541 break;
3542 }
3543
7754ae79 3544 iwl_mvm_update_quotas(mvm, false, disabled_vif);
1e1391ca 3545 iwl_mvm_binding_remove_vif(mvm, vif);
1c2abf72 3546
b08c1d97 3547out:
a11e144e 3548 mvmvif->phy_ctxt = NULL;
999609f1 3549 iwl_mvm_power_update_mac(mvm);
b08c1d97
LC
3550}
3551
3552static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
3553 struct ieee80211_vif *vif,
3554 struct ieee80211_chanctx_conf *ctx)
3555{
3556 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3557
3558 mutex_lock(&mvm->mutex);
f0c97783 3559 __iwl_mvm_unassign_vif_chanctx(mvm, vif, ctx, false);
8ca151b5
JB
3560 mutex_unlock(&mvm->mutex);
3561}
3562
50cc9574
LC
3563static int
3564iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm,
3565 struct ieee80211_vif_chanctx_switch *vifs)
b08c1d97 3566{
b08c1d97
LC
3567 int ret;
3568
b08c1d97 3569 mutex_lock(&mvm->mutex);
f0c97783 3570 __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true);
b08c1d97
LC
3571 __iwl_mvm_remove_chanctx(mvm, vifs[0].old_ctx);
3572
3573 ret = __iwl_mvm_add_chanctx(mvm, vifs[0].new_ctx);
3574 if (ret) {
3575 IWL_ERR(mvm, "failed to add new_ctx during channel switch\n");
3576 goto out_reassign;
3577 }
3578
f0c97783
LC
3579 ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx,
3580 true);
b08c1d97
LC
3581 if (ret) {
3582 IWL_ERR(mvm,
3583 "failed to assign new_ctx during channel switch\n");
3584 goto out_remove;
3585 }
3586
f697267f
AN
3587 /* we don't support TDLS during DCM - can be caused by channel switch */
3588 if (iwl_mvm_phy_ctx_count(mvm) > 1)
3589 iwl_mvm_teardown_tdls_peers(mvm);
3590
b08c1d97
LC
3591 goto out;
3592
3593out_remove:
3594 __iwl_mvm_remove_chanctx(mvm, vifs[0].new_ctx);
3595
3596out_reassign:
6fd1fb63 3597 if (__iwl_mvm_add_chanctx(mvm, vifs[0].old_ctx)) {
b08c1d97
LC
3598 IWL_ERR(mvm, "failed to add old_ctx back after failure.\n");
3599 goto out_restart;
3600 }
3601
6fd1fb63
LC
3602 if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
3603 true)) {
b08c1d97
LC
3604 IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
3605 goto out_restart;
3606 }
3607
3608 goto out;
3609
3610out_restart:
3611 /* things keep failing, better restart the hw */
3612 iwl_mvm_nic_restart(mvm, false);
3613
3614out:
3615 mutex_unlock(&mvm->mutex);
50cc9574
LC
3616
3617 return ret;
3618}
3619
48a256e8
LC
3620static int
3621iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm,
3622 struct ieee80211_vif_chanctx_switch *vifs)
3623{
3624 int ret;
3625
3626 mutex_lock(&mvm->mutex);
3627 __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true);
3628
3629 ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx,
3630 true);
3631 if (ret) {
3632 IWL_ERR(mvm,
3633 "failed to assign new_ctx during channel switch\n");
3634 goto out_reassign;
3635 }
3636
3637 goto out;
3638
3639out_reassign:
3640 if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
3641 true)) {
3642 IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
3643 goto out_restart;
3644 }
3645
3646 goto out;
3647
3648out_restart:
3649 /* things keep failing, better restart the hw */
3650 iwl_mvm_nic_restart(mvm, false);
3651
3652out:
3653 mutex_unlock(&mvm->mutex);
3654
3655 return ret;
3656}
3657
50cc9574
LC
3658static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw,
3659 struct ieee80211_vif_chanctx_switch *vifs,
3660 int n_vifs,
3661 enum ieee80211_chanctx_switch_mode mode)
3662{
3663 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3664 int ret;
3665
3666 /* we only support a single-vif right now */
3667 if (n_vifs > 1)
3668 return -EOPNOTSUPP;
3669
3670 switch (mode) {
3671 case CHANCTX_SWMODE_SWAP_CONTEXTS:
3672 ret = iwl_mvm_switch_vif_chanctx_swap(mvm, vifs);
3673 break;
3674 case CHANCTX_SWMODE_REASSIGN_VIF:
48a256e8 3675 ret = iwl_mvm_switch_vif_chanctx_reassign(mvm, vifs);
50cc9574
LC
3676 break;
3677 default:
3678 ret = -EOPNOTSUPP;
3679 break;
3680 }
3681
b08c1d97
LC
3682 return ret;
3683}
3684
8ca151b5
JB
3685static int iwl_mvm_set_tim(struct ieee80211_hw *hw,
3686 struct ieee80211_sta *sta,
3687 bool set)
3688{
3689 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
9d8ce6af 3690 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
8ca151b5
JB
3691
3692 if (!mvm_sta || !mvm_sta->vif) {
3693 IWL_ERR(mvm, "Station is not associated to a vif\n");
3694 return -EINVAL;
3695 }
3696
3697 return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif);
3698}
3699
507cadf2
DS
3700#ifdef CONFIG_NL80211_TESTMODE
3701static const struct nla_policy iwl_mvm_tm_policy[IWL_MVM_TM_ATTR_MAX + 1] = {
3702 [IWL_MVM_TM_ATTR_CMD] = { .type = NLA_U32 },
3703 [IWL_MVM_TM_ATTR_NOA_DURATION] = { .type = NLA_U32 },
f6c6ad42 3704 [IWL_MVM_TM_ATTR_BEACON_FILTER_STATE] = { .type = NLA_U32 },
507cadf2
DS
3705};
3706
3707static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
3708 struct ieee80211_vif *vif,
3709 void *data, int len)
3710{
3711 struct nlattr *tb[IWL_MVM_TM_ATTR_MAX + 1];
3712 int err;
3713 u32 noa_duration;
3714
3715 err = nla_parse(tb, IWL_MVM_TM_ATTR_MAX, data, len, iwl_mvm_tm_policy);
3716 if (err)
3717 return err;
3718
3719 if (!tb[IWL_MVM_TM_ATTR_CMD])
3720 return -EINVAL;
3721
3722 switch (nla_get_u32(tb[IWL_MVM_TM_ATTR_CMD])) {
3723 case IWL_MVM_TM_CMD_SET_NOA:
3724 if (!vif || vif->type != NL80211_IFTYPE_AP || !vif->p2p ||
3725 !vif->bss_conf.enable_beacon ||
3726 !tb[IWL_MVM_TM_ATTR_NOA_DURATION])
3727 return -EINVAL;
3728
3729 noa_duration = nla_get_u32(tb[IWL_MVM_TM_ATTR_NOA_DURATION]);
3730 if (noa_duration >= vif->bss_conf.beacon_int)
3731 return -EINVAL;
3732
3733 mvm->noa_duration = noa_duration;
3734 mvm->noa_vif = vif;
3735
7754ae79 3736 return iwl_mvm_update_quotas(mvm, false, NULL);
f6c6ad42
JB
3737 case IWL_MVM_TM_CMD_SET_BEACON_FILTER:
3738 /* must be associated client vif - ignore authorized */
3739 if (!vif || vif->type != NL80211_IFTYPE_STATION ||
3740 !vif->bss_conf.assoc || !vif->bss_conf.dtim_period ||
3741 !tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])
3742 return -EINVAL;
3743
3744 if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]))
a1022927
EG
3745 return iwl_mvm_enable_beacon_filter(mvm, vif, 0);
3746 return iwl_mvm_disable_beacon_filter(mvm, vif, 0);
507cadf2
DS
3747 }
3748
3749 return -EOPNOTSUPP;
3750}
3751
3752static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw,
3753 struct ieee80211_vif *vif,
3754 void *data, int len)
3755{
3756 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3757 int err;
3758
3759 mutex_lock(&mvm->mutex);
3760 err = __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len);
3761 mutex_unlock(&mvm->mutex);
3762
3763 return err;
3764}
3765#endif
3766
622e3f9b
LC
3767static void iwl_mvm_channel_switch(struct ieee80211_hw *hw,
3768 struct ieee80211_vif *vif,
3769 struct ieee80211_channel_switch *chsw)
3770{
3771 /* By implementing this operation, we prevent mac80211 from
3772 * starting its own channel switch timer, so that we can call
3773 * ieee80211_chswitch_done() ourselves at the right time
3774 * (which is when the absence time event starts).
3775 */
3776
3777 IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw),
3778 "dummy channel switch op\n");
3779}
3780
f028905c
LC
3781static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
3782 struct ieee80211_vif *vif,
3783 struct ieee80211_channel_switch *chsw)
bd3398e2
AO
3784{
3785 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
664322fa 3786 struct ieee80211_vif *csa_vif;
f6c34820 3787 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
dc88b4ba 3788 u32 apply_time;
f028905c 3789 int ret;
bd3398e2
AO
3790
3791 mutex_lock(&mvm->mutex);
664322fa 3792
81d62d5a
JB
3793 mvmvif->csa_failed = false;
3794
6b20d774 3795 IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n",
f028905c 3796 chsw->chandef.center_freq1);
6b20d774 3797
21023b1e 3798 iwl_fw_dbg_trigger_simple_stop(mvm, vif, FW_DBG_TRIGGER_CHANNEL_SWITCH);
f35d9c55 3799
6b20d774
LC
3800 switch (vif->type) {
3801 case NL80211_IFTYPE_AP:
3802 csa_vif =
3803 rcu_dereference_protected(mvm->csa_vif,
3804 lockdep_is_held(&mvm->mutex));
3805 if (WARN_ONCE(csa_vif && csa_vif->csa_active,
3806 "Another CSA is already in progress")) {
3807 ret = -EBUSY;
3808 goto out_unlock;
3809 }
3810
3811 rcu_assign_pointer(mvm->csa_vif, vif);
7ef0aab6 3812
7ef0aab6
AO
3813 if (WARN_ONCE(mvmvif->csa_countdown,
3814 "Previous CSA countdown didn't complete")) {
3815 ret = -EBUSY;
3816 goto out_unlock;
3817 }
3818
6b20d774 3819 break;
dc88b4ba 3820 case NL80211_IFTYPE_STATION:
4500e133
LC
3821 /* Schedule the time event to a bit before beacon 1,
3822 * to make sure we're in the new channel when the
3823 * GO/AP arrives.
3824 */
3825 apply_time = chsw->device_timestamp +
3826 ((vif->bss_conf.beacon_int * (chsw->count - 1) -
3827 IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024);
dc88b4ba
LC
3828
3829 if (chsw->block_tx)
3830 iwl_mvm_csa_client_absent(mvm, vif);
3831
4500e133 3832 iwl_mvm_schedule_csa_period(mvm, vif, vif->bss_conf.beacon_int,
dc88b4ba 3833 apply_time);
c6e0a3e0
LC
3834 if (mvmvif->bf_data.bf_enabled) {
3835 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
3836 if (ret)
3837 goto out_unlock;
3838 }
3839
dc88b4ba 3840 break;
6b20d774
LC
3841 default:
3842 break;
3843 }
bd3398e2 3844
f6c34820
LC
3845 mvmvif->ps_disabled = true;
3846
3847 ret = iwl_mvm_power_update_ps(mvm);
3848 if (ret)
3849 goto out_unlock;
f028905c 3850
e198f5e7
AN
3851 /* we won't be on this channel any longer */
3852 iwl_mvm_teardown_tdls_peers(mvm);
3853
bd3398e2
AO
3854out_unlock:
3855 mutex_unlock(&mvm->mutex);
f028905c
LC
3856
3857 return ret;
bd3398e2
AO
3858}
3859
f6c34820
LC
3860static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
3861 struct ieee80211_vif *vif)
3862{
3863 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3864 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3865 int ret;
3866
3867 mutex_lock(&mvm->mutex);
3868
81d62d5a
JB
3869 if (mvmvif->csa_failed) {
3870 mvmvif->csa_failed = false;
3871 ret = -EIO;
3872 goto out_unlock;
3873 }
3874
a57c688d
LC
3875 if (vif->type == NL80211_IFTYPE_STATION) {
3876 struct iwl_mvm_sta *mvmsta;
3877
3878 mvmsta = iwl_mvm_sta_from_staid_protected(mvm,
3879 mvmvif->ap_sta_id);
3880
3881 if (WARN_ON(!mvmsta)) {
3882 ret = -EIO;
3883 goto out_unlock;
3884 }
3885
3886 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false);
3887
3888 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
c6e0a3e0
LC
3889
3890 ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
3891 if (ret)
3892 goto out_unlock;
686e7fe1
LC
3893
3894 iwl_mvm_stop_session_protection(mvm, vif);
a57c688d
LC
3895 }
3896
f6c34820
LC
3897 mvmvif->ps_disabled = false;
3898
3899 ret = iwl_mvm_power_update_ps(mvm);
3900
a57c688d 3901out_unlock:
f6c34820
LC
3902 mutex_unlock(&mvm->mutex);
3903
3904 return ret;
3905}
3906
c5b0e7c0
EG
3907static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
3908 struct ieee80211_vif *vif, u32 queues, bool drop)
3909{
3910 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3911 struct iwl_mvm_vif *mvmvif;
3912 struct iwl_mvm_sta *mvmsta;
a0f6bf2a
AN
3913 struct ieee80211_sta *sta;
3914 int i;
3915 u32 msk = 0;
c5b0e7c0
EG
3916
3917 if (!vif || vif->type != NL80211_IFTYPE_STATION)
3918 return;
3919
3920 mutex_lock(&mvm->mutex);
3921 mvmvif = iwl_mvm_vif_from_mac80211(vif);
c5b0e7c0 3922
a0f6bf2a
AN
3923 /* flush the AP-station and all TDLS peers */
3924 for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
3925 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
3926 lockdep_is_held(&mvm->mutex));
3927 if (IS_ERR_OR_NULL(sta))
3928 continue;
3929
3930 mvmsta = iwl_mvm_sta_from_mac80211(sta);
3931 if (mvmsta->vif != vif)
3932 continue;
3933
3934 /* make sure only TDLS peers or the AP are flushed */
3935 WARN_ON(i != mvmvif->ap_sta_id && !sta->tdls);
3936
3937 msk |= mvmsta->tfd_queue_msk;
480acbce 3938 }
c5b0e7c0 3939
6d440b25 3940 if (drop) {
5888a40c 3941 if (iwl_mvm_flush_tx_path(mvm, msk, 0))
6d440b25
EG
3942 IWL_ERR(mvm, "flush request fail\n");
3943 mutex_unlock(&mvm->mutex);
3944 } else {
3945 mutex_unlock(&mvm->mutex);
4e6c48e0 3946
6d440b25
EG
3947 /* this can take a while, and we may need/want other operations
3948 * to succeed while doing this, so do it without the mutex held
3949 */
3950 iwl_trans_wait_tx_queue_empty(mvm->trans, msk);
3951 }
c5b0e7c0
EG
3952}
3953
91a8bcde
JB
3954static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
3955 struct survey_info *survey)
3956{
3957 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3958 int ret;
3959
3960 memset(survey, 0, sizeof(*survey));
3961
3962 /* only support global statistics right now */
3963 if (idx != 0)
3964 return -ENOENT;
3965
859d914c 3966 if (fw_has_capa(&mvm->fw->ucode_capa,
91a8bcde
JB
3967 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
3968 return -ENOENT;
3969
3970 mutex_lock(&mvm->mutex);
3971
3972 if (mvm->ucode_loaded) {
33cef925 3973 ret = iwl_mvm_request_statistics(mvm, false);
91a8bcde
JB
3974 if (ret)
3975 goto out;
3976 }
3977
3978 survey->filled = SURVEY_INFO_TIME |
3979 SURVEY_INFO_TIME_RX |
3980 SURVEY_INFO_TIME_TX |
3981 SURVEY_INFO_TIME_SCAN;
3982 survey->time = mvm->accu_radio_stats.on_time_rf +
3983 mvm->radio_stats.on_time_rf;
3984 do_div(survey->time, USEC_PER_MSEC);
3985
3986 survey->time_rx = mvm->accu_radio_stats.rx_time +
3987 mvm->radio_stats.rx_time;
3988 do_div(survey->time_rx, USEC_PER_MSEC);
3989
3990 survey->time_tx = mvm->accu_radio_stats.tx_time +
3991 mvm->radio_stats.tx_time;
3992 do_div(survey->time_tx, USEC_PER_MSEC);
3993
3994 survey->time_scan = mvm->accu_radio_stats.on_time_scan +
3995 mvm->radio_stats.on_time_scan;
3996 do_div(survey->time_scan, USEC_PER_MSEC);
3997
10a7c028 3998 ret = 0;
91a8bcde
JB
3999 out:
4000 mutex_unlock(&mvm->mutex);
4001 return ret;
4002}
4003
33cef925
JB
4004static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
4005 struct ieee80211_vif *vif,
4006 struct ieee80211_sta *sta,
4007 struct station_info *sinfo)
4008{
4009 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4010 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4011 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
4012
859d914c
JB
4013 if (fw_has_capa(&mvm->fw->ucode_capa,
4014 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
33cef925
JB
4015 return;
4016
4017 /* if beacon filtering isn't on mac80211 does it anyway */
4018 if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
4019 return;
4020
4021 if (!vif->bss_conf.assoc)
4022 return;
4023
4024 mutex_lock(&mvm->mutex);
4025
4026 if (mvmvif->ap_sta_id != mvmsta->sta_id)
4027 goto unlock;
4028
4029 if (iwl_mvm_request_statistics(mvm, false))
4030 goto unlock;
4031
4032 sinfo->rx_beacon = mvmvif->beacon_stats.num_beacons +
4033 mvmvif->beacon_stats.accu_num_beacons;
4034 sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_RX);
4035 if (mvmvif->beacon_stats.avg_signal) {
4036 /* firmware only reports a value after RXing a few beacons */
4037 sinfo->rx_beacon_signal_avg = mvmvif->beacon_stats.avg_signal;
4038 sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_SIGNAL_AVG);
4039 }
4040 unlock:
4041 mutex_unlock(&mvm->mutex);
4042}
4043
4203263d
EG
4044static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm,
4045 struct ieee80211_vif *vif,
4046 const struct ieee80211_event *event)
d42f5350 4047{
5d4f929e 4048#define CHECK_MLME_TRIGGER(_mvm, _trig, _buf, _cnt, _fmt...) \
d42f5350
EG
4049 do { \
4050 if ((_cnt) && --(_cnt)) \
4051 break; \
5d4f929e 4052 iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt);\
d42f5350
EG
4053 } while (0)
4054
d42f5350
EG
4055 struct iwl_fw_dbg_trigger_tlv *trig;
4056 struct iwl_fw_dbg_trigger_mlme *trig_mlme;
d42f5350
EG
4057
4058 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME))
4059 return;
4060
d42f5350
EG
4061 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
4062 trig_mlme = (void *)trig->data;
4063 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
4064 return;
4065
d42f5350
EG
4066 if (event->u.mlme.data == ASSOC_EVENT) {
4067 if (event->u.mlme.status == MLME_DENIED)
4068 CHECK_MLME_TRIGGER(mvm, trig, buf,
4069 trig_mlme->stop_assoc_denied,
4070 "DENIED ASSOC: reason %d",
4071 event->u.mlme.reason);
4072 else if (event->u.mlme.status == MLME_TIMEOUT)
4073 CHECK_MLME_TRIGGER(mvm, trig, buf,
4074 trig_mlme->stop_assoc_timeout,
4075 "ASSOC TIMEOUT");
4076 } else if (event->u.mlme.data == AUTH_EVENT) {
4077 if (event->u.mlme.status == MLME_DENIED)
4078 CHECK_MLME_TRIGGER(mvm, trig, buf,
4079 trig_mlme->stop_auth_denied,
4080 "DENIED AUTH: reason %d",
4081 event->u.mlme.reason);
4082 else if (event->u.mlme.status == MLME_TIMEOUT)
4083 CHECK_MLME_TRIGGER(mvm, trig, buf,
4084 trig_mlme->stop_auth_timeout,
4085 "AUTH TIMEOUT");
4086 } else if (event->u.mlme.data == DEAUTH_RX_EVENT) {
4087 CHECK_MLME_TRIGGER(mvm, trig, buf,
4088 trig_mlme->stop_rx_deauth,
4089 "DEAUTH RX %d", event->u.mlme.reason);
4090 } else if (event->u.mlme.data == DEAUTH_TX_EVENT) {
4091 CHECK_MLME_TRIGGER(mvm, trig, buf,
4092 trig_mlme->stop_tx_deauth,
4093 "DEAUTH TX %d", event->u.mlme.reason);
4094 }
4095#undef CHECK_MLME_TRIGGER
4096}
4097
4203263d
EG
4098static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm,
4099 struct ieee80211_vif *vif,
4100 const struct ieee80211_event *event)
4101{
4102 struct iwl_fw_dbg_trigger_tlv *trig;
4103 struct iwl_fw_dbg_trigger_ba *ba_trig;
4104
4105 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
4106 return;
4107
4108 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
4109 ba_trig = (void *)trig->data;
4110 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
4111 return;
4112
4113 if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid)))
4114 return;
4115
4116 iwl_mvm_fw_dbg_collect_trig(mvm, trig,
4117 "BAR received from %pM, tid %d, ssn %d",
4118 event->u.ba.sta->addr, event->u.ba.tid,
4119 event->u.ba.ssn);
4120}
4121
4122static void
4123iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
4124 struct ieee80211_vif *vif,
4125 const struct ieee80211_event *event)
4126{
4127 struct iwl_fw_dbg_trigger_tlv *trig;
4128 struct iwl_fw_dbg_trigger_ba *ba_trig;
4129
4130 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
4131 return;
4132
4133 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
4134 ba_trig = (void *)trig->data;
4135 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
4136 return;
4137
4138 if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(event->u.ba.tid)))
4139 return;
4140
4141 iwl_mvm_fw_dbg_collect_trig(mvm, trig,
4142 "Frame from %pM timed out, tid %d",
4143 event->u.ba.sta->addr, event->u.ba.tid);
4144}
4145
4146static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
4147 struct ieee80211_vif *vif,
4148 const struct ieee80211_event *event)
4149{
4150 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4151
4152 switch (event->type) {
4153 case MLME_EVENT:
4154 iwl_mvm_event_mlme_callback(mvm, vif, event);
4155 break;
4156 case BAR_RX_EVENT:
4157 iwl_mvm_event_bar_rx_callback(mvm, vif, event);
4158 break;
4159 case BA_FRAME_TIMEOUT:
4160 iwl_mvm_event_frame_timeout_callback(mvm, vif, event);
4161 break;
4162 default:
4163 break;
4164 }
4165}
4166
e5209263 4167const struct ieee80211_ops iwl_mvm_hw_ops = {
8ca151b5
JB
4168 .tx = iwl_mvm_mac_tx,
4169 .ampdu_action = iwl_mvm_mac_ampdu_action,
4170 .start = iwl_mvm_mac_start,
cf2c92d8 4171 .reconfig_complete = iwl_mvm_mac_reconfig_complete,
8ca151b5
JB
4172 .stop = iwl_mvm_mac_stop,
4173 .add_interface = iwl_mvm_mac_add_interface,
4174 .remove_interface = iwl_mvm_mac_remove_interface,
4175 .config = iwl_mvm_mac_config,
e59647ea 4176 .prepare_multicast = iwl_mvm_prepare_multicast,
8ca151b5
JB
4177 .configure_filter = iwl_mvm_configure_filter,
4178 .bss_info_changed = iwl_mvm_bss_info_changed,
4179 .hw_scan = iwl_mvm_mac_hw_scan,
4180 .cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan,
1ddbbb0c 4181 .sta_pre_rcu_remove = iwl_mvm_sta_pre_rcu_remove,
8ca151b5
JB
4182 .sta_state = iwl_mvm_mac_sta_state,
4183 .sta_notify = iwl_mvm_mac_sta_notify,
4184 .allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames,
3e56eadf 4185 .release_buffered_frames = iwl_mvm_mac_release_buffered_frames,
8ca151b5 4186 .set_rts_threshold = iwl_mvm_mac_set_rts_threshold,
1f3b0ff8 4187 .sta_rc_update = iwl_mvm_sta_rc_update,
8ca151b5
JB
4188 .conf_tx = iwl_mvm_mac_conf_tx,
4189 .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx,
07ecd897 4190 .mgd_protect_tdls_discover = iwl_mvm_mac_mgd_protect_tdls_discover,
c5b0e7c0 4191 .flush = iwl_mvm_mac_flush,
35a000b7
DS
4192 .sched_scan_start = iwl_mvm_mac_sched_scan_start,
4193 .sched_scan_stop = iwl_mvm_mac_sched_scan_stop,
8ca151b5
JB
4194 .set_key = iwl_mvm_mac_set_key,
4195 .update_tkip_key = iwl_mvm_mac_update_tkip_key,
4196 .remain_on_channel = iwl_mvm_roc,
4197 .cancel_remain_on_channel = iwl_mvm_cancel_roc,
8ca151b5
JB
4198 .add_chanctx = iwl_mvm_add_chanctx,
4199 .remove_chanctx = iwl_mvm_remove_chanctx,
4200 .change_chanctx = iwl_mvm_change_chanctx,
4201 .assign_vif_chanctx = iwl_mvm_assign_vif_chanctx,
4202 .unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx,
b08c1d97 4203 .switch_vif_chanctx = iwl_mvm_switch_vif_chanctx,
8ca151b5 4204
5023d966
JB
4205 .start_ap = iwl_mvm_start_ap_ibss,
4206 .stop_ap = iwl_mvm_stop_ap_ibss,
4207 .join_ibss = iwl_mvm_start_ap_ibss,
4208 .leave_ibss = iwl_mvm_stop_ap_ibss,
8ca151b5
JB
4209
4210 .set_tim = iwl_mvm_set_tim,
4211
622e3f9b 4212 .channel_switch = iwl_mvm_channel_switch,
f028905c 4213 .pre_channel_switch = iwl_mvm_pre_channel_switch,
f6c34820 4214 .post_channel_switch = iwl_mvm_post_channel_switch,
bd3398e2 4215
1d3c3f63
AN
4216 .tdls_channel_switch = iwl_mvm_tdls_channel_switch,
4217 .tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch,
4218 .tdls_recv_channel_switch = iwl_mvm_tdls_recv_channel_switch,
4219
d42f5350
EG
4220 .event_callback = iwl_mvm_mac_event_callback,
4221
507cadf2
DS
4222 CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd)
4223
8ca151b5
JB
4224#ifdef CONFIG_PM_SLEEP
4225 /* look at d3.c */
4226 .suspend = iwl_mvm_suspend,
4227 .resume = iwl_mvm_resume,
4228 .set_wakeup = iwl_mvm_set_wakeup,
4229 .set_rekey_data = iwl_mvm_set_rekey_data,
4230#if IS_ENABLED(CONFIG_IPV6)
4231 .ipv6_addr_change = iwl_mvm_ipv6_addr_change,
4232#endif
4233 .set_default_unicast_key = iwl_mvm_set_default_unicast_key,
4234#endif
91a8bcde 4235 .get_survey = iwl_mvm_mac_get_survey,
33cef925 4236 .sta_statistics = iwl_mvm_mac_sta_statistics,
8ca151b5 4237};