]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - drivers/net/wireless/intel/iwlwifi/mvm/scan.c
Merge tag 'kvm-x86-mmu-6.7' of https://github.com/kvm-x86/linux into HEAD
[thirdparty/kernel/stable.git] / drivers / net / wireless / intel / iwlwifi / mvm / scan.c
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
6 */
7 #include <linux/etherdevice.h>
8 #include <net/mac80211.h>
9 #include <linux/crc32.h>
10
11 #include "mvm.h"
12 #include "fw/api/scan.h"
13 #include "iwl-io.h"
14
15 #define IWL_DENSE_EBS_SCAN_RATIO 5
16 #define IWL_SPARSE_EBS_SCAN_RATIO 1
17
18 #define IWL_SCAN_DWELL_ACTIVE 10
19 #define IWL_SCAN_DWELL_PASSIVE 110
20 #define IWL_SCAN_DWELL_FRAGMENTED 44
21 #define IWL_SCAN_DWELL_EXTENDED 90
22 #define IWL_SCAN_NUM_OF_FRAGS 3
23
24 /* adaptive dwell max budget time [TU] for full scan */
25 #define IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
26 /* adaptive dwell max budget time [TU] for directed scan */
27 #define IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100
28 /* adaptive dwell default high band APs number */
29 #define IWL_SCAN_ADWELL_DEFAULT_HB_N_APS 8
30 /* adaptive dwell default low band APs number */
31 #define IWL_SCAN_ADWELL_DEFAULT_LB_N_APS 2
32 /* adaptive dwell default APs number in social channels (1, 6, 11) */
33 #define IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
34 /* number of scan channels */
35 #define IWL_SCAN_NUM_CHANNELS 112
36 /* adaptive dwell number of APs override mask for p2p friendly GO */
37 #define IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY_BIT BIT(20)
38 /* adaptive dwell number of APs override mask for social channels */
39 #define IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS_BIT BIT(21)
40 /* adaptive dwell number of APs override for p2p friendly GO channels */
41 #define IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY 10
42 /* adaptive dwell number of APs override for social channels */
43 #define IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS 2
44
45 /* minimal number of 2GHz and 5GHz channels in the regular scan request */
46 #define IWL_MVM_6GHZ_PASSIVE_SCAN_MIN_CHANS 4
47
48 /* Number of iterations on the channel for mei filtered scan */
49 #define IWL_MEI_SCAN_NUM_ITER 5U
50
51 struct iwl_mvm_scan_timing_params {
52 u32 suspend_time;
53 u32 max_out_time;
54 };
55
56 static struct iwl_mvm_scan_timing_params scan_timing[] = {
57 [IWL_SCAN_TYPE_UNASSOC] = {
58 .suspend_time = 0,
59 .max_out_time = 0,
60 },
61 [IWL_SCAN_TYPE_WILD] = {
62 .suspend_time = 30,
63 .max_out_time = 120,
64 },
65 [IWL_SCAN_TYPE_MILD] = {
66 .suspend_time = 120,
67 .max_out_time = 120,
68 },
69 [IWL_SCAN_TYPE_FRAGMENTED] = {
70 .suspend_time = 95,
71 .max_out_time = 44,
72 },
73 [IWL_SCAN_TYPE_FAST_BALANCE] = {
74 .suspend_time = 30,
75 .max_out_time = 37,
76 },
77 };
78
79 struct iwl_mvm_scan_params {
80 /* For CDB this is low band scan type, for non-CDB - type. */
81 enum iwl_mvm_scan_type type;
82 enum iwl_mvm_scan_type hb_type;
83 u32 n_channels;
84 u16 delay;
85 int n_ssids;
86 struct cfg80211_ssid *ssids;
87 struct ieee80211_channel **channels;
88 u32 flags;
89 u8 *mac_addr;
90 u8 *mac_addr_mask;
91 bool no_cck;
92 bool pass_all;
93 int n_match_sets;
94 struct iwl_scan_probe_req preq;
95 struct cfg80211_match_set *match_sets;
96 int n_scan_plans;
97 struct cfg80211_sched_scan_plan *scan_plans;
98 bool iter_notif;
99 struct cfg80211_scan_6ghz_params *scan_6ghz_params;
100 u32 n_6ghz_params;
101 bool scan_6ghz;
102 bool enable_6ghz_passive;
103 bool respect_p2p_go, respect_p2p_go_hb;
104 u8 bssid[ETH_ALEN] __aligned(2);
105 };
106
107 static inline void *iwl_mvm_get_scan_req_umac_data(struct iwl_mvm *mvm)
108 {
109 struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
110
111 if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm))
112 return (void *)&cmd->v8.data;
113
114 if (iwl_mvm_is_adaptive_dwell_supported(mvm))
115 return (void *)&cmd->v7.data;
116
117 if (iwl_mvm_cdb_scan_api(mvm))
118 return (void *)&cmd->v6.data;
119
120 return (void *)&cmd->v1.data;
121 }
122
123 static inline struct iwl_scan_umac_chan_param *
124 iwl_mvm_get_scan_req_umac_channel(struct iwl_mvm *mvm)
125 {
126 struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
127
128 if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm))
129 return &cmd->v8.channel;
130
131 if (iwl_mvm_is_adaptive_dwell_supported(mvm))
132 return &cmd->v7.channel;
133
134 if (iwl_mvm_cdb_scan_api(mvm))
135 return &cmd->v6.channel;
136
137 return &cmd->v1.channel;
138 }
139
140 static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
141 {
142 if (mvm->scan_rx_ant != ANT_NONE)
143 return mvm->scan_rx_ant;
144 return iwl_mvm_get_valid_rx_ant(mvm);
145 }
146
147 static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
148 {
149 u16 rx_chain;
150 u8 rx_ant;
151
152 rx_ant = iwl_mvm_scan_rx_ant(mvm);
153 rx_chain = rx_ant << PHY_RX_CHAIN_VALID_POS;
154 rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
155 rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_SEL_POS;
156 rx_chain |= 0x1 << PHY_RX_CHAIN_DRIVER_FORCE_POS;
157 return cpu_to_le16(rx_chain);
158 }
159
160 static inline __le32
161 iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum nl80211_band band,
162 bool no_cck)
163 {
164 u32 tx_ant;
165
166 iwl_mvm_toggle_tx_ant(mvm, &mvm->scan_last_antenna_idx);
167 tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS;
168
169 if (band == NL80211_BAND_2GHZ && !no_cck)
170 return cpu_to_le32(IWL_RATE_1M_PLCP | RATE_MCS_CCK_MSK_V1 |
171 tx_ant);
172 else
173 return cpu_to_le32(IWL_RATE_6M_PLCP | tx_ant);
174 }
175
176 static enum iwl_mvm_traffic_load iwl_mvm_get_traffic_load(struct iwl_mvm *mvm)
177 {
178 return mvm->tcm.result.global_load;
179 }
180
181 static enum iwl_mvm_traffic_load
182 iwl_mvm_get_traffic_load_band(struct iwl_mvm *mvm, enum nl80211_band band)
183 {
184 return mvm->tcm.result.band_load[band];
185 }
186
187 struct iwl_mvm_scan_iter_data {
188 u32 global_cnt;
189 struct ieee80211_vif *current_vif;
190 bool is_dcm_with_p2p_go;
191 };
192
193 static void iwl_mvm_scan_iterator(void *_data, u8 *mac,
194 struct ieee80211_vif *vif)
195 {
196 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
197 struct iwl_mvm_scan_iter_data *data = _data;
198 struct iwl_mvm_vif *curr_mvmvif;
199
200 if (vif->type != NL80211_IFTYPE_P2P_DEVICE &&
201 mvmvif->deflink.phy_ctxt &&
202 mvmvif->deflink.phy_ctxt->id < NUM_PHY_CTX)
203 data->global_cnt += 1;
204
205 if (!data->current_vif || vif == data->current_vif)
206 return;
207
208 curr_mvmvif = iwl_mvm_vif_from_mac80211(data->current_vif);
209
210 if (vif->type == NL80211_IFTYPE_AP && vif->p2p &&
211 mvmvif->deflink.phy_ctxt && curr_mvmvif->deflink.phy_ctxt &&
212 mvmvif->deflink.phy_ctxt->id != curr_mvmvif->deflink.phy_ctxt->id)
213 data->is_dcm_with_p2p_go = true;
214 }
215
216 static enum
217 iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
218 struct ieee80211_vif *vif,
219 enum iwl_mvm_traffic_load load,
220 bool low_latency)
221 {
222 struct iwl_mvm_scan_iter_data data = {
223 .current_vif = vif,
224 .is_dcm_with_p2p_go = false,
225 .global_cnt = 0,
226 };
227
228 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
229 IEEE80211_IFACE_ITER_NORMAL,
230 iwl_mvm_scan_iterator,
231 &data);
232
233 if (!data.global_cnt)
234 return IWL_SCAN_TYPE_UNASSOC;
235
236 if (fw_has_api(&mvm->fw->ucode_capa,
237 IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
238 if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) &&
239 (!vif || vif->type != NL80211_IFTYPE_P2P_DEVICE))
240 return IWL_SCAN_TYPE_FRAGMENTED;
241
242 /*
243 * in case of DCM with GO where BSS DTIM interval < 220msec
244 * set all scan requests as fast-balance scan
245 */
246 if (vif && vif->type == NL80211_IFTYPE_STATION &&
247 data.is_dcm_with_p2p_go &&
248 ((vif->bss_conf.beacon_int *
249 vif->bss_conf.dtim_period) < 220))
250 return IWL_SCAN_TYPE_FAST_BALANCE;
251 }
252
253 if (load >= IWL_MVM_TRAFFIC_MEDIUM || low_latency)
254 return IWL_SCAN_TYPE_MILD;
255
256 return IWL_SCAN_TYPE_WILD;
257 }
258
259 static enum
260 iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
261 struct ieee80211_vif *vif)
262 {
263 enum iwl_mvm_traffic_load load;
264 bool low_latency;
265
266 load = iwl_mvm_get_traffic_load(mvm);
267 low_latency = iwl_mvm_low_latency(mvm);
268
269 return _iwl_mvm_get_scan_type(mvm, vif, load, low_latency);
270 }
271
272 static enum
273 iwl_mvm_scan_type iwl_mvm_get_scan_type_band(struct iwl_mvm *mvm,
274 struct ieee80211_vif *vif,
275 enum nl80211_band band)
276 {
277 enum iwl_mvm_traffic_load load;
278 bool low_latency;
279
280 load = iwl_mvm_get_traffic_load_band(mvm, band);
281 low_latency = iwl_mvm_low_latency_band(mvm, band);
282
283 return _iwl_mvm_get_scan_type(mvm, vif, load, low_latency);
284 }
285
286 static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm)
287 {
288 /* require rrm scan whenever the fw supports it */
289 return fw_has_capa(&mvm->fw->ucode_capa,
290 IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT);
291 }
292
293 static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm)
294 {
295 int max_probe_len;
296
297 max_probe_len = SCAN_OFFLOAD_PROBE_REQ_SIZE;
298
299 /* we create the 802.11 header and SSID element */
300 max_probe_len -= 24 + 2;
301
302 /* DS parameter set element is added on 2.4GHZ band if required */
303 if (iwl_mvm_rrm_scan_needed(mvm))
304 max_probe_len -= 3;
305
306 return max_probe_len;
307 }
308
309 int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm)
310 {
311 int max_ie_len = iwl_mvm_max_scan_ie_fw_cmd_room(mvm);
312
313 /* TODO: [BUG] This function should return the maximum allowed size of
314 * scan IEs, however the LMAC scan api contains both 2GHZ and 5GHZ IEs
315 * in the same command. So the correct implementation of this function
316 * is just iwl_mvm_max_scan_ie_fw_cmd_room() / 2. Currently the scan
317 * command has only 512 bytes and it would leave us with about 240
318 * bytes for scan IEs, which is clearly not enough. So meanwhile
319 * we will report an incorrect value. This may result in a failure to
320 * issue a scan in unified_scan_lmac and unified_sched_scan_lmac
321 * functions with -ENOBUFS, if a large enough probe will be provided.
322 */
323 return max_ie_len;
324 }
325
326 void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
327 struct iwl_rx_cmd_buffer *rxb)
328 {
329 struct iwl_rx_packet *pkt = rxb_addr(rxb);
330 struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data;
331
332 IWL_DEBUG_SCAN(mvm,
333 "Scan offload iteration complete: status=0x%x scanned channels=%d\n",
334 notif->status, notif->scanned_channels);
335
336 if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_FOUND) {
337 IWL_DEBUG_SCAN(mvm, "Pass all scheduled scan results found\n");
338 ieee80211_sched_scan_results(mvm->hw);
339 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED;
340 }
341 }
342
343 void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
344 struct iwl_rx_cmd_buffer *rxb)
345 {
346 IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
347 ieee80211_sched_scan_results(mvm->hw);
348 }
349
350 static const char *iwl_mvm_ebs_status_str(enum iwl_scan_ebs_status status)
351 {
352 switch (status) {
353 case IWL_SCAN_EBS_SUCCESS:
354 return "successful";
355 case IWL_SCAN_EBS_INACTIVE:
356 return "inactive";
357 case IWL_SCAN_EBS_FAILED:
358 case IWL_SCAN_EBS_CHAN_NOT_FOUND:
359 default:
360 return "failed";
361 }
362 }
363
364 void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
365 struct iwl_rx_cmd_buffer *rxb)
366 {
367 struct iwl_rx_packet *pkt = rxb_addr(rxb);
368 struct iwl_periodic_scan_complete *scan_notif = (void *)pkt->data;
369 bool aborted = (scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED);
370
371 /* If this happens, the firmware has mistakenly sent an LMAC
372 * notification during UMAC scans -- warn and ignore it.
373 */
374 if (WARN_ON_ONCE(fw_has_capa(&mvm->fw->ucode_capa,
375 IWL_UCODE_TLV_CAPA_UMAC_SCAN)))
376 return;
377
378 /* scan status must be locked for proper checking */
379 lockdep_assert_held(&mvm->mutex);
380
381 /* We first check if we were stopping a scan, in which case we
382 * just clear the stopping flag. Then we check if it was a
383 * firmware initiated stop, in which case we need to inform
384 * mac80211.
385 * Note that we can have a stopping and a running scan
386 * simultaneously, but we can't have two different types of
387 * scans stopping or running at the same time (since LMAC
388 * doesn't support it).
389 */
390
391 if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_SCHED) {
392 WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR);
393
394 IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n",
395 aborted ? "aborted" : "completed",
396 iwl_mvm_ebs_status_str(scan_notif->ebs_status));
397 IWL_DEBUG_SCAN(mvm,
398 "Last line %d, Last iteration %d, Time after last iteration %d\n",
399 scan_notif->last_schedule_line,
400 scan_notif->last_schedule_iteration,
401 __le32_to_cpu(scan_notif->time_after_last_iter));
402
403 mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_SCHED;
404 } else if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR) {
405 IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s\n",
406 aborted ? "aborted" : "completed",
407 iwl_mvm_ebs_status_str(scan_notif->ebs_status));
408
409 mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_REGULAR;
410 } else if (mvm->scan_status & IWL_MVM_SCAN_SCHED) {
411 WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_REGULAR);
412
413 IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n",
414 aborted ? "aborted" : "completed",
415 iwl_mvm_ebs_status_str(scan_notif->ebs_status));
416 IWL_DEBUG_SCAN(mvm,
417 "Last line %d, Last iteration %d, Time after last iteration %d (FW)\n",
418 scan_notif->last_schedule_line,
419 scan_notif->last_schedule_iteration,
420 __le32_to_cpu(scan_notif->time_after_last_iter));
421
422 mvm->scan_status &= ~IWL_MVM_SCAN_SCHED;
423 ieee80211_sched_scan_stopped(mvm->hw);
424 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
425 } else if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) {
426 struct cfg80211_scan_info info = {
427 .aborted = aborted,
428 };
429
430 IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s (FW)\n",
431 aborted ? "aborted" : "completed",
432 iwl_mvm_ebs_status_str(scan_notif->ebs_status));
433
434 mvm->scan_status &= ~IWL_MVM_SCAN_REGULAR;
435 ieee80211_scan_completed(mvm->hw, &info);
436 cancel_delayed_work(&mvm->scan_timeout_dwork);
437 iwl_mvm_resume_tcm(mvm);
438 } else {
439 IWL_ERR(mvm,
440 "got scan complete notification but no scan is running\n");
441 }
442
443 mvm->last_ebs_successful =
444 scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS ||
445 scan_notif->ebs_status == IWL_SCAN_EBS_INACTIVE;
446 }
447
448 static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
449 {
450 int i;
451
452 for (i = 0; i < PROBE_OPTION_MAX; i++) {
453 if (!ssid_list[i].len)
454 break;
455 if (ssid_list[i].len == ssid_len &&
456 !memcmp(ssid_list->ssid, ssid, ssid_len))
457 return i;
458 }
459 return -1;
460 }
461
462 /* We insert the SSIDs in an inverted order, because the FW will
463 * invert it back.
464 */
465 static void iwl_scan_build_ssids(struct iwl_mvm_scan_params *params,
466 struct iwl_ssid_ie *ssids,
467 u32 *ssid_bitmap)
468 {
469 int i, j;
470 int index;
471 u32 tmp_bitmap = 0;
472
473 /*
474 * copy SSIDs from match list.
475 * iwl_config_sched_scan_profiles() uses the order of these ssids to
476 * config match list.
477 */
478 for (i = 0, j = params->n_match_sets - 1;
479 j >= 0 && i < PROBE_OPTION_MAX;
480 i++, j--) {
481 /* skip empty SSID matchsets */
482 if (!params->match_sets[j].ssid.ssid_len)
483 continue;
484 ssids[i].id = WLAN_EID_SSID;
485 ssids[i].len = params->match_sets[j].ssid.ssid_len;
486 memcpy(ssids[i].ssid, params->match_sets[j].ssid.ssid,
487 ssids[i].len);
488 }
489
490 /* add SSIDs from scan SSID list */
491 for (j = params->n_ssids - 1;
492 j >= 0 && i < PROBE_OPTION_MAX;
493 i++, j--) {
494 index = iwl_ssid_exist(params->ssids[j].ssid,
495 params->ssids[j].ssid_len,
496 ssids);
497 if (index < 0) {
498 ssids[i].id = WLAN_EID_SSID;
499 ssids[i].len = params->ssids[j].ssid_len;
500 memcpy(ssids[i].ssid, params->ssids[j].ssid,
501 ssids[i].len);
502 tmp_bitmap |= BIT(i);
503 } else {
504 tmp_bitmap |= BIT(index);
505 }
506 }
507 if (ssid_bitmap)
508 *ssid_bitmap = tmp_bitmap;
509 }
510
511 static int
512 iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
513 struct cfg80211_sched_scan_request *req)
514 {
515 struct iwl_scan_offload_profile *profile;
516 struct iwl_scan_offload_profile_cfg_v1 *profile_cfg_v1;
517 struct iwl_scan_offload_blocklist *blocklist;
518 struct iwl_scan_offload_profile_cfg_data *data;
519 int max_profiles = iwl_umac_scan_get_max_profiles(mvm->fw);
520 int profile_cfg_size = sizeof(*data) +
521 sizeof(*profile) * max_profiles;
522 struct iwl_host_cmd cmd = {
523 .id = SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
524 .len[1] = profile_cfg_size,
525 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
526 .dataflags[1] = IWL_HCMD_DFL_NOCOPY,
527 };
528 int blocklist_len;
529 int i;
530 int ret;
531
532 if (WARN_ON(req->n_match_sets > max_profiles))
533 return -EIO;
534
535 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SHORT_BL)
536 blocklist_len = IWL_SCAN_SHORT_BLACKLIST_LEN;
537 else
538 blocklist_len = IWL_SCAN_MAX_BLACKLIST_LEN;
539
540 blocklist = kcalloc(blocklist_len, sizeof(*blocklist), GFP_KERNEL);
541 if (!blocklist)
542 return -ENOMEM;
543
544 profile_cfg_v1 = kzalloc(profile_cfg_size, GFP_KERNEL);
545 if (!profile_cfg_v1) {
546 ret = -ENOMEM;
547 goto free_blocklist;
548 }
549
550 cmd.data[0] = blocklist;
551 cmd.len[0] = sizeof(*blocklist) * blocklist_len;
552 cmd.data[1] = profile_cfg_v1;
553
554 /* if max_profile is MAX_PROFILES_V2, we have the new API */
555 if (max_profiles == IWL_SCAN_MAX_PROFILES_V2) {
556 struct iwl_scan_offload_profile_cfg *profile_cfg =
557 (struct iwl_scan_offload_profile_cfg *)profile_cfg_v1;
558
559 data = &profile_cfg->data;
560 } else {
561 data = &profile_cfg_v1->data;
562 }
563
564 /* No blocklist configuration */
565 data->num_profiles = req->n_match_sets;
566 data->active_clients = SCAN_CLIENT_SCHED_SCAN;
567 data->pass_match = SCAN_CLIENT_SCHED_SCAN;
568 data->match_notify = SCAN_CLIENT_SCHED_SCAN;
569
570 if (!req->n_match_sets || !req->match_sets[0].ssid.ssid_len)
571 data->any_beacon_notify = SCAN_CLIENT_SCHED_SCAN;
572
573 for (i = 0; i < req->n_match_sets; i++) {
574 profile = &profile_cfg_v1->profiles[i];
575 profile->ssid_index = i;
576 /* Support any cipher and auth algorithm */
577 profile->unicast_cipher = 0xff;
578 profile->auth_alg = IWL_AUTH_ALGO_UNSUPPORTED |
579 IWL_AUTH_ALGO_NONE | IWL_AUTH_ALGO_PSK | IWL_AUTH_ALGO_8021X |
580 IWL_AUTH_ALGO_SAE | IWL_AUTH_ALGO_8021X_SHA384 | IWL_AUTH_ALGO_OWE;
581 profile->network_type = IWL_NETWORK_TYPE_ANY;
582 profile->band_selection = IWL_SCAN_OFFLOAD_SELECT_ANY;
583 profile->client_bitmap = SCAN_CLIENT_SCHED_SCAN;
584 }
585
586 IWL_DEBUG_SCAN(mvm, "Sending scheduled scan profile config\n");
587
588 ret = iwl_mvm_send_cmd(mvm, &cmd);
589 kfree(profile_cfg_v1);
590 free_blocklist:
591 kfree(blocklist);
592
593 return ret;
594 }
595
596 static bool iwl_mvm_scan_pass_all(struct iwl_mvm *mvm,
597 struct cfg80211_sched_scan_request *req)
598 {
599 if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) {
600 IWL_DEBUG_SCAN(mvm,
601 "Sending scheduled scan with filtering, n_match_sets %d\n",
602 req->n_match_sets);
603 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
604 return false;
605 }
606
607 IWL_DEBUG_SCAN(mvm, "Sending Scheduled scan without filtering\n");
608
609 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED;
610 return true;
611 }
612
613 static int iwl_mvm_lmac_scan_abort(struct iwl_mvm *mvm)
614 {
615 int ret;
616 struct iwl_host_cmd cmd = {
617 .id = SCAN_OFFLOAD_ABORT_CMD,
618 };
619 u32 status = CAN_ABORT_STATUS;
620
621 ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status);
622 if (ret)
623 return ret;
624
625 if (status != CAN_ABORT_STATUS) {
626 /*
627 * The scan abort will return 1 for success or
628 * 2 for "failure". A failure condition can be
629 * due to simply not being in an active scan which
630 * can occur if we send the scan abort before the
631 * microcode has notified us that a scan is completed.
632 */
633 IWL_DEBUG_SCAN(mvm, "SCAN OFFLOAD ABORT ret %d.\n", status);
634 ret = -ENOENT;
635 }
636
637 return ret;
638 }
639
640 static void iwl_mvm_scan_fill_tx_cmd(struct iwl_mvm *mvm,
641 struct iwl_scan_req_tx_cmd *tx_cmd,
642 bool no_cck)
643 {
644 tx_cmd[0].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
645 TX_CMD_FLG_BT_DIS);
646 tx_cmd[0].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
647 NL80211_BAND_2GHZ,
648 no_cck);
649
650 if (!iwl_mvm_has_new_station_api(mvm->fw)) {
651 tx_cmd[0].sta_id = mvm->aux_sta.sta_id;
652 tx_cmd[1].sta_id = mvm->aux_sta.sta_id;
653
654 /*
655 * Fw doesn't use this sta anymore, pending deprecation via HOST API
656 * change
657 */
658 } else {
659 tx_cmd[0].sta_id = 0xff;
660 tx_cmd[1].sta_id = 0xff;
661 }
662
663 tx_cmd[1].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
664 TX_CMD_FLG_BT_DIS);
665
666 tx_cmd[1].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
667 NL80211_BAND_5GHZ,
668 no_cck);
669 }
670
671 static void
672 iwl_mvm_lmac_scan_cfg_channels(struct iwl_mvm *mvm,
673 struct ieee80211_channel **channels,
674 int n_channels, u32 ssid_bitmap,
675 struct iwl_scan_req_lmac *cmd)
676 {
677 struct iwl_scan_channel_cfg_lmac *channel_cfg = (void *)&cmd->data;
678 int i;
679
680 for (i = 0; i < n_channels; i++) {
681 channel_cfg[i].channel_num =
682 cpu_to_le16(channels[i]->hw_value);
683 channel_cfg[i].iter_count = cpu_to_le16(1);
684 channel_cfg[i].iter_interval = 0;
685 channel_cfg[i].flags =
686 cpu_to_le32(IWL_UNIFIED_SCAN_CHANNEL_PARTIAL |
687 ssid_bitmap);
688 }
689 }
690
691 static u8 *iwl_mvm_copy_and_insert_ds_elem(struct iwl_mvm *mvm, const u8 *ies,
692 size_t len, u8 *const pos)
693 {
694 static const u8 before_ds_params[] = {
695 WLAN_EID_SSID,
696 WLAN_EID_SUPP_RATES,
697 WLAN_EID_REQUEST,
698 WLAN_EID_EXT_SUPP_RATES,
699 };
700 size_t offs;
701 u8 *newpos = pos;
702
703 if (!iwl_mvm_rrm_scan_needed(mvm)) {
704 memcpy(newpos, ies, len);
705 return newpos + len;
706 }
707
708 offs = ieee80211_ie_split(ies, len,
709 before_ds_params,
710 ARRAY_SIZE(before_ds_params),
711 0);
712
713 memcpy(newpos, ies, offs);
714 newpos += offs;
715
716 /* Add a placeholder for DS Parameter Set element */
717 *newpos++ = WLAN_EID_DS_PARAMS;
718 *newpos++ = 1;
719 *newpos++ = 0;
720
721 memcpy(newpos, ies + offs, len - offs);
722 newpos += len - offs;
723
724 return newpos;
725 }
726
727 #define WFA_TPC_IE_LEN 9
728
729 static void iwl_mvm_add_tpc_report_ie(u8 *pos)
730 {
731 pos[0] = WLAN_EID_VENDOR_SPECIFIC;
732 pos[1] = WFA_TPC_IE_LEN - 2;
733 pos[2] = (WLAN_OUI_MICROSOFT >> 16) & 0xff;
734 pos[3] = (WLAN_OUI_MICROSOFT >> 8) & 0xff;
735 pos[4] = WLAN_OUI_MICROSOFT & 0xff;
736 pos[5] = WLAN_OUI_TYPE_MICROSOFT_TPC;
737 pos[6] = 0;
738 /* pos[7] - tx power will be inserted by the FW */
739 pos[7] = 0;
740 pos[8] = 0;
741 }
742
743 static void
744 iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
745 struct ieee80211_scan_ies *ies,
746 struct iwl_mvm_scan_params *params)
747 {
748 struct ieee80211_mgmt *frame = (void *)params->preq.buf;
749 u8 *pos, *newpos;
750 const u8 *mac_addr = params->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
751 params->mac_addr : NULL;
752
753 /*
754 * Unfortunately, right now the offload scan doesn't support randomising
755 * within the firmware, so until the firmware API is ready we implement
756 * it in the driver. This means that the scan iterations won't really be
757 * random, only when it's restarted, but at least that helps a bit.
758 */
759 if (mac_addr)
760 get_random_mask_addr(frame->sa, mac_addr,
761 params->mac_addr_mask);
762 else
763 memcpy(frame->sa, vif->addr, ETH_ALEN);
764
765 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
766 eth_broadcast_addr(frame->da);
767 ether_addr_copy(frame->bssid, params->bssid);
768 frame->seq_ctrl = 0;
769
770 pos = frame->u.probe_req.variable;
771 *pos++ = WLAN_EID_SSID;
772 *pos++ = 0;
773
774 params->preq.mac_header.offset = 0;
775 params->preq.mac_header.len = cpu_to_le16(24 + 2);
776
777 /* Insert ds parameter set element on 2.4 GHz band */
778 newpos = iwl_mvm_copy_and_insert_ds_elem(mvm,
779 ies->ies[NL80211_BAND_2GHZ],
780 ies->len[NL80211_BAND_2GHZ],
781 pos);
782 params->preq.band_data[0].offset = cpu_to_le16(pos - params->preq.buf);
783 params->preq.band_data[0].len = cpu_to_le16(newpos - pos);
784 pos = newpos;
785
786 memcpy(pos, ies->ies[NL80211_BAND_5GHZ],
787 ies->len[NL80211_BAND_5GHZ]);
788 params->preq.band_data[1].offset = cpu_to_le16(pos - params->preq.buf);
789 params->preq.band_data[1].len =
790 cpu_to_le16(ies->len[NL80211_BAND_5GHZ]);
791 pos += ies->len[NL80211_BAND_5GHZ];
792
793 memcpy(pos, ies->ies[NL80211_BAND_6GHZ],
794 ies->len[NL80211_BAND_6GHZ]);
795 params->preq.band_data[2].offset = cpu_to_le16(pos - params->preq.buf);
796 params->preq.band_data[2].len =
797 cpu_to_le16(ies->len[NL80211_BAND_6GHZ]);
798 pos += ies->len[NL80211_BAND_6GHZ];
799 memcpy(pos, ies->common_ies, ies->common_ie_len);
800 params->preq.common_data.offset = cpu_to_le16(pos - params->preq.buf);
801
802 if (iwl_mvm_rrm_scan_needed(mvm) &&
803 !fw_has_capa(&mvm->fw->ucode_capa,
804 IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) {
805 iwl_mvm_add_tpc_report_ie(pos + ies->common_ie_len);
806 params->preq.common_data.len = cpu_to_le16(ies->common_ie_len +
807 WFA_TPC_IE_LEN);
808 } else {
809 params->preq.common_data.len = cpu_to_le16(ies->common_ie_len);
810 }
811 }
812
813 static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm,
814 struct iwl_scan_req_lmac *cmd,
815 struct iwl_mvm_scan_params *params)
816 {
817 cmd->active_dwell = IWL_SCAN_DWELL_ACTIVE;
818 cmd->passive_dwell = IWL_SCAN_DWELL_PASSIVE;
819 cmd->fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
820 cmd->extended_dwell = IWL_SCAN_DWELL_EXTENDED;
821 cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time);
822 cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time);
823 cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
824 }
825
826 static inline bool iwl_mvm_scan_fits(struct iwl_mvm *mvm, int n_ssids,
827 struct ieee80211_scan_ies *ies,
828 int n_channels)
829 {
830 return ((n_ssids <= PROBE_OPTION_MAX) &&
831 (n_channels <= mvm->fw->ucode_capa.n_scan_channels) &
832 (ies->common_ie_len +
833 ies->len[NL80211_BAND_2GHZ] +
834 ies->len[NL80211_BAND_5GHZ] <=
835 iwl_mvm_max_scan_ie_fw_cmd_room(mvm)));
836 }
837
838 static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm,
839 struct ieee80211_vif *vif)
840 {
841 const struct iwl_ucode_capabilities *capa = &mvm->fw->ucode_capa;
842 bool low_latency;
843
844 if (iwl_mvm_is_cdb_supported(mvm))
845 low_latency = iwl_mvm_low_latency_band(mvm, NL80211_BAND_5GHZ);
846 else
847 low_latency = iwl_mvm_low_latency(mvm);
848
849 /* We can only use EBS if:
850 * 1. the feature is supported;
851 * 2. the last EBS was successful;
852 * 3. if only single scan, the single scan EBS API is supported;
853 * 4. it's not a p2p find operation.
854 * 5. we are not in low latency mode,
855 * or if fragmented ebs is supported by the FW
856 */
857 return ((capa->flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) &&
858 mvm->last_ebs_successful && IWL_MVM_ENABLE_EBS &&
859 vif->type != NL80211_IFTYPE_P2P_DEVICE &&
860 (!low_latency || iwl_mvm_is_frag_ebs_supported(mvm)));
861 }
862
863 static inline bool iwl_mvm_is_regular_scan(struct iwl_mvm_scan_params *params)
864 {
865 return params->n_scan_plans == 1 &&
866 params->scan_plans[0].iterations == 1;
867 }
868
869 static bool iwl_mvm_is_scan_fragmented(enum iwl_mvm_scan_type type)
870 {
871 return (type == IWL_SCAN_TYPE_FRAGMENTED ||
872 type == IWL_SCAN_TYPE_FAST_BALANCE);
873 }
874
875 static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
876 struct iwl_mvm_scan_params *params,
877 struct ieee80211_vif *vif)
878 {
879 int flags = 0;
880
881 if (params->n_ssids == 0)
882 flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE;
883
884 if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
885 flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
886
887 if (iwl_mvm_is_scan_fragmented(params->type))
888 flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
889
890 if (iwl_mvm_rrm_scan_needed(mvm) &&
891 fw_has_capa(&mvm->fw->ucode_capa,
892 IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
893 flags |= IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED;
894
895 if (params->pass_all)
896 flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
897 else
898 flags |= IWL_MVM_LMAC_SCAN_FLAG_MATCH;
899
900 #ifdef CONFIG_IWLWIFI_DEBUGFS
901 if (mvm->scan_iter_notif_enabled)
902 flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE;
903 #endif
904
905 if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)
906 flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE;
907
908 if (iwl_mvm_is_regular_scan(params) &&
909 vif->type != NL80211_IFTYPE_P2P_DEVICE &&
910 !iwl_mvm_is_scan_fragmented(params->type))
911 flags |= IWL_MVM_LMAC_SCAN_FLAG_EXTENDED_DWELL;
912
913 return flags;
914 }
915
916 static void
917 iwl_mvm_scan_set_legacy_probe_req(struct iwl_scan_probe_req_v1 *p_req,
918 struct iwl_scan_probe_req *src_p_req)
919 {
920 int i;
921
922 p_req->mac_header = src_p_req->mac_header;
923 for (i = 0; i < SCAN_NUM_BAND_PROBE_DATA_V_1; i++)
924 p_req->band_data[i] = src_p_req->band_data[i];
925 p_req->common_data = src_p_req->common_data;
926 memcpy(p_req->buf, src_p_req->buf, sizeof(p_req->buf));
927 }
928
929 static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
930 struct iwl_mvm_scan_params *params)
931 {
932 struct iwl_scan_req_lmac *cmd = mvm->scan_cmd;
933 struct iwl_scan_probe_req_v1 *preq =
934 (void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) *
935 mvm->fw->ucode_capa.n_scan_channels);
936 u32 ssid_bitmap = 0;
937 int i;
938 u8 band;
939
940 if (WARN_ON(params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS))
941 return -EINVAL;
942
943 iwl_mvm_scan_lmac_dwell(mvm, cmd, params);
944
945 cmd->rx_chain_select = iwl_mvm_scan_rx_chain(mvm);
946 cmd->iter_num = cpu_to_le32(1);
947 cmd->n_channels = (u8)params->n_channels;
948
949 cmd->delay = cpu_to_le32(params->delay);
950
951 cmd->scan_flags = cpu_to_le32(iwl_mvm_scan_lmac_flags(mvm, params,
952 vif));
953
954 band = iwl_mvm_phy_band_from_nl80211(params->channels[0]->band);
955 cmd->flags = cpu_to_le32(band);
956 cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
957 MAC_FILTER_IN_BEACON);
958 iwl_mvm_scan_fill_tx_cmd(mvm, cmd->tx_cmd, params->no_cck);
959 iwl_scan_build_ssids(params, cmd->direct_scan, &ssid_bitmap);
960
961 /* this API uses bits 1-20 instead of 0-19 */
962 ssid_bitmap <<= 1;
963
964 for (i = 0; i < params->n_scan_plans; i++) {
965 struct cfg80211_sched_scan_plan *scan_plan =
966 &params->scan_plans[i];
967
968 cmd->schedule[i].delay =
969 cpu_to_le16(scan_plan->interval);
970 cmd->schedule[i].iterations = scan_plan->iterations;
971 cmd->schedule[i].full_scan_mul = 1;
972 }
973
974 /*
975 * If the number of iterations of the last scan plan is set to
976 * zero, it should run infinitely. However, this is not always the case.
977 * For example, when regular scan is requested the driver sets one scan
978 * plan with one iteration.
979 */
980 if (!cmd->schedule[i - 1].iterations)
981 cmd->schedule[i - 1].iterations = 0xff;
982
983 if (iwl_mvm_scan_use_ebs(mvm, vif)) {
984 cmd->channel_opt[0].flags =
985 cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
986 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
987 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
988 cmd->channel_opt[0].non_ebs_ratio =
989 cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO);
990 cmd->channel_opt[1].flags =
991 cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
992 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
993 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
994 cmd->channel_opt[1].non_ebs_ratio =
995 cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
996 }
997
998 iwl_mvm_lmac_scan_cfg_channels(mvm, params->channels,
999 params->n_channels, ssid_bitmap, cmd);
1000
1001 iwl_mvm_scan_set_legacy_probe_req(preq, &params->preq);
1002
1003 return 0;
1004 }
1005
1006 static int rate_to_scan_rate_flag(unsigned int rate)
1007 {
1008 static const int rate_to_scan_rate[IWL_RATE_COUNT] = {
1009 [IWL_RATE_1M_INDEX] = SCAN_CONFIG_RATE_1M,
1010 [IWL_RATE_2M_INDEX] = SCAN_CONFIG_RATE_2M,
1011 [IWL_RATE_5M_INDEX] = SCAN_CONFIG_RATE_5M,
1012 [IWL_RATE_11M_INDEX] = SCAN_CONFIG_RATE_11M,
1013 [IWL_RATE_6M_INDEX] = SCAN_CONFIG_RATE_6M,
1014 [IWL_RATE_9M_INDEX] = SCAN_CONFIG_RATE_9M,
1015 [IWL_RATE_12M_INDEX] = SCAN_CONFIG_RATE_12M,
1016 [IWL_RATE_18M_INDEX] = SCAN_CONFIG_RATE_18M,
1017 [IWL_RATE_24M_INDEX] = SCAN_CONFIG_RATE_24M,
1018 [IWL_RATE_36M_INDEX] = SCAN_CONFIG_RATE_36M,
1019 [IWL_RATE_48M_INDEX] = SCAN_CONFIG_RATE_48M,
1020 [IWL_RATE_54M_INDEX] = SCAN_CONFIG_RATE_54M,
1021 };
1022
1023 return rate_to_scan_rate[rate];
1024 }
1025
1026 static __le32 iwl_mvm_scan_config_rates(struct iwl_mvm *mvm)
1027 {
1028 struct ieee80211_supported_band *band;
1029 unsigned int rates = 0;
1030 int i;
1031
1032 band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
1033 for (i = 0; i < band->n_bitrates; i++)
1034 rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value);
1035 band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
1036 for (i = 0; i < band->n_bitrates; i++)
1037 rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value);
1038
1039 /* Set both basic rates and supported rates */
1040 rates |= SCAN_CONFIG_SUPPORTED_RATE(rates);
1041
1042 return cpu_to_le32(rates);
1043 }
1044
1045 static void iwl_mvm_fill_scan_dwell(struct iwl_mvm *mvm,
1046 struct iwl_scan_dwell *dwell)
1047 {
1048 dwell->active = IWL_SCAN_DWELL_ACTIVE;
1049 dwell->passive = IWL_SCAN_DWELL_PASSIVE;
1050 dwell->fragmented = IWL_SCAN_DWELL_FRAGMENTED;
1051 dwell->extended = IWL_SCAN_DWELL_EXTENDED;
1052 }
1053
1054 static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels,
1055 u32 max_channels)
1056 {
1057 struct ieee80211_supported_band *band;
1058 int i, j = 0;
1059
1060 band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
1061 for (i = 0; i < band->n_channels && j < max_channels; i++, j++)
1062 channels[j] = band->channels[i].hw_value;
1063 band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
1064 for (i = 0; i < band->n_channels && j < max_channels; i++, j++)
1065 channels[j] = band->channels[i].hw_value;
1066 }
1067
1068 static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config,
1069 u32 flags, u8 channel_flags,
1070 u32 max_channels)
1071 {
1072 enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, NULL);
1073 struct iwl_scan_config_v1 *cfg = config;
1074
1075 cfg->flags = cpu_to_le32(flags);
1076 cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
1077 cfg->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
1078 cfg->legacy_rates = iwl_mvm_scan_config_rates(mvm);
1079 cfg->out_of_channel_time = cpu_to_le32(scan_timing[type].max_out_time);
1080 cfg->suspend_time = cpu_to_le32(scan_timing[type].suspend_time);
1081
1082 iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell);
1083
1084 memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
1085
1086 /* This function should not be called when using ADD_STA ver >=12 */
1087 WARN_ON_ONCE(iwl_mvm_has_new_station_api(mvm->fw));
1088
1089 cfg->bcast_sta_id = mvm->aux_sta.sta_id;
1090 cfg->channel_flags = channel_flags;
1091
1092 iwl_mvm_fill_channels(mvm, cfg->channel_array, max_channels);
1093 }
1094
1095 static void iwl_mvm_fill_scan_config_v2(struct iwl_mvm *mvm, void *config,
1096 u32 flags, u8 channel_flags,
1097 u32 max_channels)
1098 {
1099 struct iwl_scan_config_v2 *cfg = config;
1100
1101 cfg->flags = cpu_to_le32(flags);
1102 cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
1103 cfg->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
1104 cfg->legacy_rates = iwl_mvm_scan_config_rates(mvm);
1105
1106 if (iwl_mvm_is_cdb_supported(mvm)) {
1107 enum iwl_mvm_scan_type lb_type, hb_type;
1108
1109 lb_type = iwl_mvm_get_scan_type_band(mvm, NULL,
1110 NL80211_BAND_2GHZ);
1111 hb_type = iwl_mvm_get_scan_type_band(mvm, NULL,
1112 NL80211_BAND_5GHZ);
1113
1114 cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] =
1115 cpu_to_le32(scan_timing[lb_type].max_out_time);
1116 cfg->suspend_time[SCAN_LB_LMAC_IDX] =
1117 cpu_to_le32(scan_timing[lb_type].suspend_time);
1118
1119 cfg->out_of_channel_time[SCAN_HB_LMAC_IDX] =
1120 cpu_to_le32(scan_timing[hb_type].max_out_time);
1121 cfg->suspend_time[SCAN_HB_LMAC_IDX] =
1122 cpu_to_le32(scan_timing[hb_type].suspend_time);
1123 } else {
1124 enum iwl_mvm_scan_type type =
1125 iwl_mvm_get_scan_type(mvm, NULL);
1126
1127 cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] =
1128 cpu_to_le32(scan_timing[type].max_out_time);
1129 cfg->suspend_time[SCAN_LB_LMAC_IDX] =
1130 cpu_to_le32(scan_timing[type].suspend_time);
1131 }
1132
1133 iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell);
1134
1135 memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
1136
1137 /* This function should not be called when using ADD_STA ver >=12 */
1138 WARN_ON_ONCE(iwl_mvm_has_new_station_api(mvm->fw));
1139
1140 cfg->bcast_sta_id = mvm->aux_sta.sta_id;
1141 cfg->channel_flags = channel_flags;
1142
1143 iwl_mvm_fill_channels(mvm, cfg->channel_array, max_channels);
1144 }
1145
1146 static int iwl_mvm_legacy_config_scan(struct iwl_mvm *mvm)
1147 {
1148 void *cfg;
1149 int ret, cmd_size;
1150 struct iwl_host_cmd cmd = {
1151 .id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_CFG_CMD),
1152 };
1153 enum iwl_mvm_scan_type type;
1154 enum iwl_mvm_scan_type hb_type = IWL_SCAN_TYPE_NOT_SET;
1155 int num_channels =
1156 mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels +
1157 mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels;
1158 u32 flags;
1159 u8 channel_flags;
1160
1161 if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels))
1162 num_channels = mvm->fw->ucode_capa.n_scan_channels;
1163
1164 if (iwl_mvm_is_cdb_supported(mvm)) {
1165 type = iwl_mvm_get_scan_type_band(mvm, NULL,
1166 NL80211_BAND_2GHZ);
1167 hb_type = iwl_mvm_get_scan_type_band(mvm, NULL,
1168 NL80211_BAND_5GHZ);
1169 if (type == mvm->scan_type && hb_type == mvm->hb_scan_type)
1170 return 0;
1171 } else {
1172 type = iwl_mvm_get_scan_type(mvm, NULL);
1173 if (type == mvm->scan_type)
1174 return 0;
1175 }
1176
1177 if (iwl_mvm_cdb_scan_api(mvm))
1178 cmd_size = sizeof(struct iwl_scan_config_v2);
1179 else
1180 cmd_size = sizeof(struct iwl_scan_config_v1);
1181 cmd_size += mvm->fw->ucode_capa.n_scan_channels;
1182
1183 cfg = kzalloc(cmd_size, GFP_KERNEL);
1184 if (!cfg)
1185 return -ENOMEM;
1186
1187 flags = SCAN_CONFIG_FLAG_ACTIVATE |
1188 SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
1189 SCAN_CONFIG_FLAG_SET_TX_CHAINS |
1190 SCAN_CONFIG_FLAG_SET_RX_CHAINS |
1191 SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
1192 SCAN_CONFIG_FLAG_SET_ALL_TIMES |
1193 SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
1194 SCAN_CONFIG_FLAG_SET_MAC_ADDR |
1195 SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS |
1196 SCAN_CONFIG_N_CHANNELS(num_channels) |
1197 (iwl_mvm_is_scan_fragmented(type) ?
1198 SCAN_CONFIG_FLAG_SET_FRAGMENTED :
1199 SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
1200
1201 channel_flags = IWL_CHANNEL_FLAG_EBS |
1202 IWL_CHANNEL_FLAG_ACCURATE_EBS |
1203 IWL_CHANNEL_FLAG_EBS_ADD |
1204 IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
1205
1206 /*
1207 * Check for fragmented scan on LMAC2 - high band.
1208 * LMAC1 - low band is checked above.
1209 */
1210 if (iwl_mvm_cdb_scan_api(mvm)) {
1211 if (iwl_mvm_is_cdb_supported(mvm))
1212 flags |= (iwl_mvm_is_scan_fragmented(hb_type)) ?
1213 SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED :
1214 SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED;
1215 iwl_mvm_fill_scan_config_v2(mvm, cfg, flags, channel_flags,
1216 num_channels);
1217 } else {
1218 iwl_mvm_fill_scan_config_v1(mvm, cfg, flags, channel_flags,
1219 num_channels);
1220 }
1221
1222 cmd.data[0] = cfg;
1223 cmd.len[0] = cmd_size;
1224 cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
1225
1226 IWL_DEBUG_SCAN(mvm, "Sending UMAC scan config\n");
1227
1228 ret = iwl_mvm_send_cmd(mvm, &cmd);
1229 if (!ret) {
1230 mvm->scan_type = type;
1231 mvm->hb_scan_type = hb_type;
1232 }
1233
1234 kfree(cfg);
1235 return ret;
1236 }
1237
1238 int iwl_mvm_config_scan(struct iwl_mvm *mvm)
1239 {
1240 struct iwl_scan_config cfg;
1241 struct iwl_host_cmd cmd = {
1242 .id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_CFG_CMD),
1243 .len[0] = sizeof(cfg),
1244 .data[0] = &cfg,
1245 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
1246 };
1247
1248 if (!iwl_mvm_is_reduced_config_scan_supported(mvm))
1249 return iwl_mvm_legacy_config_scan(mvm);
1250
1251 memset(&cfg, 0, sizeof(cfg));
1252
1253 if (!iwl_mvm_has_new_station_api(mvm->fw)) {
1254 cfg.bcast_sta_id = mvm->aux_sta.sta_id;
1255 } else if (iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_CFG_CMD, 0) < 5) {
1256 /*
1257 * Fw doesn't use this sta anymore. Deprecated on SCAN_CFG_CMD
1258 * version 5.
1259 */
1260 cfg.bcast_sta_id = 0xff;
1261 }
1262
1263 cfg.tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
1264 cfg.rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
1265
1266 IWL_DEBUG_SCAN(mvm, "Sending UMAC scan config\n");
1267
1268 return iwl_mvm_send_cmd(mvm, &cmd);
1269 }
1270
1271 static int iwl_mvm_scan_uid_by_status(struct iwl_mvm *mvm, int status)
1272 {
1273 int i;
1274
1275 for (i = 0; i < mvm->max_scans; i++)
1276 if (mvm->scan_uid_status[i] == status)
1277 return i;
1278
1279 return -ENOENT;
1280 }
1281
1282 static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
1283 struct iwl_scan_req_umac *cmd,
1284 struct iwl_mvm_scan_params *params)
1285 {
1286 struct iwl_mvm_scan_timing_params *timing, *hb_timing;
1287 u8 active_dwell, passive_dwell;
1288
1289 timing = &scan_timing[params->type];
1290 active_dwell = IWL_SCAN_DWELL_ACTIVE;
1291 passive_dwell = IWL_SCAN_DWELL_PASSIVE;
1292
1293 if (iwl_mvm_is_adaptive_dwell_supported(mvm)) {
1294 cmd->v7.adwell_default_n_aps_social =
1295 IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
1296 cmd->v7.adwell_default_n_aps =
1297 IWL_SCAN_ADWELL_DEFAULT_LB_N_APS;
1298
1299 if (iwl_mvm_is_adwell_hb_ap_num_supported(mvm))
1300 cmd->v9.adwell_default_hb_n_aps =
1301 IWL_SCAN_ADWELL_DEFAULT_HB_N_APS;
1302
1303 /* if custom max budget was configured with debugfs */
1304 if (IWL_MVM_ADWELL_MAX_BUDGET)
1305 cmd->v7.adwell_max_budget =
1306 cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET);
1307 else if (params->ssids && params->ssids[0].ssid_len)
1308 cmd->v7.adwell_max_budget =
1309 cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
1310 else
1311 cmd->v7.adwell_max_budget =
1312 cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
1313
1314 cmd->v7.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
1315 cmd->v7.max_out_time[SCAN_LB_LMAC_IDX] =
1316 cpu_to_le32(timing->max_out_time);
1317 cmd->v7.suspend_time[SCAN_LB_LMAC_IDX] =
1318 cpu_to_le32(timing->suspend_time);
1319
1320 if (iwl_mvm_is_cdb_supported(mvm)) {
1321 hb_timing = &scan_timing[params->hb_type];
1322
1323 cmd->v7.max_out_time[SCAN_HB_LMAC_IDX] =
1324 cpu_to_le32(hb_timing->max_out_time);
1325 cmd->v7.suspend_time[SCAN_HB_LMAC_IDX] =
1326 cpu_to_le32(hb_timing->suspend_time);
1327 }
1328
1329 if (!iwl_mvm_is_adaptive_dwell_v2_supported(mvm)) {
1330 cmd->v7.active_dwell = active_dwell;
1331 cmd->v7.passive_dwell = passive_dwell;
1332 cmd->v7.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
1333 } else {
1334 cmd->v8.active_dwell[SCAN_LB_LMAC_IDX] = active_dwell;
1335 cmd->v8.passive_dwell[SCAN_LB_LMAC_IDX] = passive_dwell;
1336 if (iwl_mvm_is_cdb_supported(mvm)) {
1337 cmd->v8.active_dwell[SCAN_HB_LMAC_IDX] =
1338 active_dwell;
1339 cmd->v8.passive_dwell[SCAN_HB_LMAC_IDX] =
1340 passive_dwell;
1341 }
1342 }
1343 } else {
1344 cmd->v1.extended_dwell = IWL_SCAN_DWELL_EXTENDED;
1345 cmd->v1.active_dwell = active_dwell;
1346 cmd->v1.passive_dwell = passive_dwell;
1347 cmd->v1.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
1348
1349 if (iwl_mvm_is_cdb_supported(mvm)) {
1350 hb_timing = &scan_timing[params->hb_type];
1351
1352 cmd->v6.max_out_time[SCAN_HB_LMAC_IDX] =
1353 cpu_to_le32(hb_timing->max_out_time);
1354 cmd->v6.suspend_time[SCAN_HB_LMAC_IDX] =
1355 cpu_to_le32(hb_timing->suspend_time);
1356 }
1357
1358 if (iwl_mvm_cdb_scan_api(mvm)) {
1359 cmd->v6.scan_priority =
1360 cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
1361 cmd->v6.max_out_time[SCAN_LB_LMAC_IDX] =
1362 cpu_to_le32(timing->max_out_time);
1363 cmd->v6.suspend_time[SCAN_LB_LMAC_IDX] =
1364 cpu_to_le32(timing->suspend_time);
1365 } else {
1366 cmd->v1.scan_priority =
1367 cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
1368 cmd->v1.max_out_time =
1369 cpu_to_le32(timing->max_out_time);
1370 cmd->v1.suspend_time =
1371 cpu_to_le32(timing->suspend_time);
1372 }
1373 }
1374
1375 if (iwl_mvm_is_regular_scan(params))
1376 cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
1377 else
1378 cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2);
1379 }
1380
1381 static u32 iwl_mvm_scan_umac_ooc_priority(struct iwl_mvm_scan_params *params)
1382 {
1383 return iwl_mvm_is_regular_scan(params) ?
1384 IWL_SCAN_PRIORITY_EXT_6 :
1385 IWL_SCAN_PRIORITY_EXT_2;
1386 }
1387
1388 static void
1389 iwl_mvm_scan_umac_dwell_v11(struct iwl_mvm *mvm,
1390 struct iwl_scan_general_params_v11 *general_params,
1391 struct iwl_mvm_scan_params *params)
1392 {
1393 struct iwl_mvm_scan_timing_params *timing, *hb_timing;
1394 u8 active_dwell, passive_dwell;
1395
1396 timing = &scan_timing[params->type];
1397 active_dwell = IWL_SCAN_DWELL_ACTIVE;
1398 passive_dwell = IWL_SCAN_DWELL_PASSIVE;
1399
1400 general_params->adwell_default_social_chn =
1401 IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
1402 general_params->adwell_default_2g = IWL_SCAN_ADWELL_DEFAULT_LB_N_APS;
1403 general_params->adwell_default_5g = IWL_SCAN_ADWELL_DEFAULT_HB_N_APS;
1404
1405 /* if custom max budget was configured with debugfs */
1406 if (IWL_MVM_ADWELL_MAX_BUDGET)
1407 general_params->adwell_max_budget =
1408 cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET);
1409 else if (params->ssids && params->ssids[0].ssid_len)
1410 general_params->adwell_max_budget =
1411 cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
1412 else
1413 general_params->adwell_max_budget =
1414 cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
1415
1416 general_params->scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
1417 general_params->max_out_of_time[SCAN_LB_LMAC_IDX] =
1418 cpu_to_le32(timing->max_out_time);
1419 general_params->suspend_time[SCAN_LB_LMAC_IDX] =
1420 cpu_to_le32(timing->suspend_time);
1421
1422 hb_timing = &scan_timing[params->hb_type];
1423
1424 general_params->max_out_of_time[SCAN_HB_LMAC_IDX] =
1425 cpu_to_le32(hb_timing->max_out_time);
1426 general_params->suspend_time[SCAN_HB_LMAC_IDX] =
1427 cpu_to_le32(hb_timing->suspend_time);
1428
1429 general_params->active_dwell[SCAN_LB_LMAC_IDX] = active_dwell;
1430 general_params->passive_dwell[SCAN_LB_LMAC_IDX] = passive_dwell;
1431 general_params->active_dwell[SCAN_HB_LMAC_IDX] = active_dwell;
1432 general_params->passive_dwell[SCAN_HB_LMAC_IDX] = passive_dwell;
1433 }
1434
1435 struct iwl_mvm_scan_channel_segment {
1436 u8 start_idx;
1437 u8 end_idx;
1438 u8 first_channel_id;
1439 u8 last_channel_id;
1440 u8 channel_spacing_shift;
1441 u8 band;
1442 };
1443
1444 static const struct iwl_mvm_scan_channel_segment scan_channel_segments[] = {
1445 {
1446 .start_idx = 0,
1447 .end_idx = 13,
1448 .first_channel_id = 1,
1449 .last_channel_id = 14,
1450 .channel_spacing_shift = 0,
1451 .band = PHY_BAND_24
1452 },
1453 {
1454 .start_idx = 14,
1455 .end_idx = 41,
1456 .first_channel_id = 36,
1457 .last_channel_id = 144,
1458 .channel_spacing_shift = 2,
1459 .band = PHY_BAND_5
1460 },
1461 {
1462 .start_idx = 42,
1463 .end_idx = 50,
1464 .first_channel_id = 149,
1465 .last_channel_id = 181,
1466 .channel_spacing_shift = 2,
1467 .band = PHY_BAND_5
1468 },
1469 {
1470 .start_idx = 51,
1471 .end_idx = 111,
1472 .first_channel_id = 1,
1473 .last_channel_id = 241,
1474 .channel_spacing_shift = 2,
1475 .band = PHY_BAND_6
1476 },
1477 };
1478
1479 static int iwl_mvm_scan_ch_and_band_to_idx(u8 channel_id, u8 band)
1480 {
1481 int i, index;
1482
1483 if (!channel_id)
1484 return -EINVAL;
1485
1486 for (i = 0; i < ARRAY_SIZE(scan_channel_segments); i++) {
1487 const struct iwl_mvm_scan_channel_segment *ch_segment =
1488 &scan_channel_segments[i];
1489 u32 ch_offset;
1490
1491 if (ch_segment->band != band ||
1492 ch_segment->first_channel_id > channel_id ||
1493 ch_segment->last_channel_id < channel_id)
1494 continue;
1495
1496 ch_offset = (channel_id - ch_segment->first_channel_id) >>
1497 ch_segment->channel_spacing_shift;
1498
1499 index = scan_channel_segments[i].start_idx + ch_offset;
1500 if (index < IWL_SCAN_NUM_CHANNELS)
1501 return index;
1502
1503 break;
1504 }
1505
1506 return -EINVAL;
1507 }
1508
1509 static const u8 p2p_go_friendly_chs[] = {
1510 36, 40, 44, 48, 149, 153, 157, 161, 165,
1511 };
1512
1513 static const u8 social_chs[] = {
1514 1, 6, 11
1515 };
1516
1517 static void iwl_mvm_scan_ch_add_n_aps_override(enum nl80211_iftype vif_type,
1518 u8 ch_id, u8 band, u8 *ch_bitmap,
1519 size_t bitmap_n_entries)
1520 {
1521 int i;
1522
1523 if (vif_type != NL80211_IFTYPE_P2P_DEVICE)
1524 return;
1525
1526 for (i = 0; i < ARRAY_SIZE(p2p_go_friendly_chs); i++) {
1527 if (p2p_go_friendly_chs[i] == ch_id) {
1528 int ch_idx, bitmap_idx;
1529
1530 ch_idx = iwl_mvm_scan_ch_and_band_to_idx(ch_id, band);
1531 if (ch_idx < 0)
1532 return;
1533
1534 bitmap_idx = ch_idx / 8;
1535 if (bitmap_idx >= bitmap_n_entries)
1536 return;
1537
1538 ch_idx = ch_idx % 8;
1539 ch_bitmap[bitmap_idx] |= BIT(ch_idx);
1540
1541 return;
1542 }
1543 }
1544 }
1545
1546 static u32 iwl_mvm_scan_ch_n_aps_flag(enum nl80211_iftype vif_type, u8 ch_id)
1547 {
1548 int i;
1549 u32 flags = 0;
1550
1551 if (vif_type != NL80211_IFTYPE_P2P_DEVICE)
1552 goto out;
1553
1554 for (i = 0; i < ARRAY_SIZE(p2p_go_friendly_chs); i++) {
1555 if (p2p_go_friendly_chs[i] == ch_id) {
1556 flags |= IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY_BIT;
1557 break;
1558 }
1559 }
1560
1561 if (flags)
1562 goto out;
1563
1564 for (i = 0; i < ARRAY_SIZE(social_chs); i++) {
1565 if (social_chs[i] == ch_id) {
1566 flags |= IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS_BIT;
1567 break;
1568 }
1569 }
1570
1571 out:
1572 return flags;
1573 }
1574
1575 static void
1576 iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm,
1577 struct ieee80211_channel **channels,
1578 int n_channels, u32 flags,
1579 struct iwl_scan_channel_cfg_umac *channel_cfg)
1580 {
1581 int i;
1582
1583 for (i = 0; i < n_channels; i++) {
1584 channel_cfg[i].flags = cpu_to_le32(flags);
1585 channel_cfg[i].v1.channel_num = channels[i]->hw_value;
1586 if (iwl_mvm_is_scan_ext_chan_supported(mvm)) {
1587 enum nl80211_band band = channels[i]->band;
1588
1589 channel_cfg[i].v2.band =
1590 iwl_mvm_phy_band_from_nl80211(band);
1591 channel_cfg[i].v2.iter_count = 1;
1592 channel_cfg[i].v2.iter_interval = 0;
1593 } else {
1594 channel_cfg[i].v1.iter_count = 1;
1595 channel_cfg[i].v1.iter_interval = 0;
1596 }
1597 }
1598 }
1599
1600 static void
1601 iwl_mvm_umac_scan_cfg_channels_v4(struct iwl_mvm *mvm,
1602 struct ieee80211_channel **channels,
1603 struct iwl_scan_channel_params_v4 *cp,
1604 int n_channels, u32 flags,
1605 enum nl80211_iftype vif_type)
1606 {
1607 u8 *bitmap = cp->adwell_ch_override_bitmap;
1608 size_t bitmap_n_entries = ARRAY_SIZE(cp->adwell_ch_override_bitmap);
1609 int i;
1610
1611 for (i = 0; i < n_channels; i++) {
1612 enum nl80211_band band = channels[i]->band;
1613 struct iwl_scan_channel_cfg_umac *cfg =
1614 &cp->channel_config[i];
1615
1616 cfg->flags = cpu_to_le32(flags);
1617 cfg->v2.channel_num = channels[i]->hw_value;
1618 cfg->v2.band = iwl_mvm_phy_band_from_nl80211(band);
1619 cfg->v2.iter_count = 1;
1620 cfg->v2.iter_interval = 0;
1621
1622 iwl_mvm_scan_ch_add_n_aps_override(vif_type,
1623 cfg->v2.channel_num,
1624 cfg->v2.band, bitmap,
1625 bitmap_n_entries);
1626 }
1627 }
1628
1629 static void
1630 iwl_mvm_umac_scan_cfg_channels_v7(struct iwl_mvm *mvm,
1631 struct ieee80211_channel **channels,
1632 struct iwl_scan_channel_params_v7 *cp,
1633 int n_channels, u32 flags,
1634 enum nl80211_iftype vif_type, u32 version)
1635 {
1636 int i;
1637
1638 for (i = 0; i < n_channels; i++) {
1639 enum nl80211_band band = channels[i]->band;
1640 struct iwl_scan_channel_cfg_umac *cfg = &cp->channel_config[i];
1641 u32 n_aps_flag =
1642 iwl_mvm_scan_ch_n_aps_flag(vif_type,
1643 channels[i]->hw_value);
1644 u8 iwl_band = iwl_mvm_phy_band_from_nl80211(band);
1645
1646 cfg->flags = cpu_to_le32(flags | n_aps_flag);
1647 cfg->v2.channel_num = channels[i]->hw_value;
1648 if (cfg80211_channel_is_psc(channels[i]))
1649 cfg->flags = 0;
1650 cfg->v2.iter_count = 1;
1651 cfg->v2.iter_interval = 0;
1652 if (version < 17)
1653 cfg->v2.band = iwl_band;
1654 else
1655 cfg->flags |= cpu_to_le32((iwl_band <<
1656 IWL_CHAN_CFG_FLAGS_BAND_POS));
1657 }
1658 }
1659
1660 static void
1661 iwl_mvm_umac_scan_fill_6g_chan_list(struct iwl_mvm *mvm,
1662 struct iwl_mvm_scan_params *params,
1663 struct iwl_scan_probe_params_v4 *pp)
1664 {
1665 int j, idex_s = 0, idex_b = 0;
1666 struct cfg80211_scan_6ghz_params *scan_6ghz_params =
1667 params->scan_6ghz_params;
1668 bool hidden_supported = fw_has_capa(&mvm->fw->ucode_capa,
1669 IWL_UCODE_TLV_CAPA_HIDDEN_6GHZ_SCAN);
1670
1671 for (j = 0; j < params->n_ssids && idex_s < SCAN_SHORT_SSID_MAX_SIZE;
1672 j++) {
1673 if (!params->ssids[j].ssid_len)
1674 continue;
1675
1676 pp->short_ssid[idex_s] =
1677 cpu_to_le32(~crc32_le(~0, params->ssids[j].ssid,
1678 params->ssids[j].ssid_len));
1679
1680 if (hidden_supported) {
1681 pp->direct_scan[idex_s].id = WLAN_EID_SSID;
1682 pp->direct_scan[idex_s].len = params->ssids[j].ssid_len;
1683 memcpy(pp->direct_scan[idex_s].ssid, params->ssids[j].ssid,
1684 params->ssids[j].ssid_len);
1685 }
1686 idex_s++;
1687 }
1688
1689 /*
1690 * Populate the arrays of the short SSIDs and the BSSIDs using the 6GHz
1691 * collocated parameters. This might not be optimal, as this processing
1692 * does not (yet) correspond to the actual channels, so it is possible
1693 * that some entries would be left out.
1694 *
1695 * TODO: improve this logic.
1696 */
1697 for (j = 0; j < params->n_6ghz_params; j++) {
1698 int k;
1699
1700 /* First, try to place the short SSID */
1701 if (scan_6ghz_params[j].short_ssid_valid) {
1702 for (k = 0; k < idex_s; k++) {
1703 if (pp->short_ssid[k] ==
1704 cpu_to_le32(scan_6ghz_params[j].short_ssid))
1705 break;
1706 }
1707
1708 if (k == idex_s && idex_s < SCAN_SHORT_SSID_MAX_SIZE) {
1709 pp->short_ssid[idex_s++] =
1710 cpu_to_le32(scan_6ghz_params[j].short_ssid);
1711 }
1712 }
1713
1714 /* try to place BSSID for the same entry */
1715 for (k = 0; k < idex_b; k++) {
1716 if (!memcmp(&pp->bssid_array[k],
1717 scan_6ghz_params[j].bssid, ETH_ALEN))
1718 break;
1719 }
1720
1721 if (k == idex_b && idex_b < SCAN_BSSID_MAX_SIZE) {
1722 memcpy(&pp->bssid_array[idex_b++],
1723 scan_6ghz_params[j].bssid, ETH_ALEN);
1724 }
1725 }
1726
1727 pp->short_ssid_num = idex_s;
1728 pp->bssid_num = idex_b;
1729 }
1730
1731 /* TODO: this function can be merged with iwl_mvm_scan_umac_fill_ch_p_v7 */
1732 static u32
1733 iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm,
1734 struct iwl_mvm_scan_params *params,
1735 u32 n_channels,
1736 struct iwl_scan_probe_params_v4 *pp,
1737 struct iwl_scan_channel_params_v7 *cp,
1738 enum nl80211_iftype vif_type,
1739 u32 version)
1740 {
1741 int i;
1742 struct cfg80211_scan_6ghz_params *scan_6ghz_params =
1743 params->scan_6ghz_params;
1744 u32 ch_cnt;
1745
1746 for (i = 0, ch_cnt = 0; i < params->n_channels; i++) {
1747 struct iwl_scan_channel_cfg_umac *cfg =
1748 &cp->channel_config[ch_cnt];
1749
1750 u32 s_ssid_bitmap = 0, bssid_bitmap = 0, flags = 0;
1751 u8 j, k, s_max = 0, b_max = 0, n_used_bssid_entries;
1752 bool force_passive, found = false, allow_passive = true,
1753 unsolicited_probe_on_chan = false, psc_no_listen = false;
1754 s8 psd_20 = IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED;
1755
1756 /*
1757 * Avoid performing passive scan on non PSC channels unless the
1758 * scan is specifically a passive scan, i.e., no SSIDs
1759 * configured in the scan command.
1760 */
1761 if (!cfg80211_channel_is_psc(params->channels[i]) &&
1762 !params->n_6ghz_params && params->n_ssids)
1763 continue;
1764
1765 cfg->v1.channel_num = params->channels[i]->hw_value;
1766 if (version < 17)
1767 cfg->v2.band = PHY_BAND_6;
1768 else
1769 cfg->flags |= cpu_to_le32(PHY_BAND_6 <<
1770 IWL_CHAN_CFG_FLAGS_BAND_POS);
1771
1772 cfg->v5.iter_count = 1;
1773 cfg->v5.iter_interval = 0;
1774
1775 /*
1776 * The optimize the scan time, i.e., reduce the scan dwell time
1777 * on each channel, the below logic tries to set 3 direct BSSID
1778 * probe requests for each broadcast probe request with a short
1779 * SSID.
1780 * TODO: improve this logic
1781 */
1782 n_used_bssid_entries = 3;
1783 for (j = 0; j < params->n_6ghz_params; j++) {
1784 s8 tmp_psd_20;
1785
1786 if (!(scan_6ghz_params[j].channel_idx == i))
1787 continue;
1788
1789 /* Use the highest PSD value allowed as advertised by
1790 * APs for this channel
1791 */
1792 tmp_psd_20 = scan_6ghz_params[j].psd_20;
1793 if (tmp_psd_20 !=
1794 IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED &&
1795 (psd_20 ==
1796 IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED ||
1797 psd_20 < tmp_psd_20))
1798 psd_20 = tmp_psd_20;
1799
1800 found = false;
1801 unsolicited_probe_on_chan |=
1802 scan_6ghz_params[j].unsolicited_probe;
1803 psc_no_listen |= scan_6ghz_params[j].psc_no_listen;
1804
1805 for (k = 0; k < pp->short_ssid_num; k++) {
1806 if (!scan_6ghz_params[j].unsolicited_probe &&
1807 le32_to_cpu(pp->short_ssid[k]) ==
1808 scan_6ghz_params[j].short_ssid) {
1809 /* Relevant short SSID bit set */
1810 if (s_ssid_bitmap & BIT(k)) {
1811 found = true;
1812 break;
1813 }
1814
1815 /*
1816 * Use short SSID only to create a new
1817 * iteration during channel dwell or in
1818 * case that the short SSID has a
1819 * matching SSID, i.e., scan for hidden
1820 * APs.
1821 */
1822 if (n_used_bssid_entries >= 3) {
1823 s_ssid_bitmap |= BIT(k);
1824 s_max++;
1825 n_used_bssid_entries -= 3;
1826 found = true;
1827 break;
1828 } else if (pp->direct_scan[k].len) {
1829 s_ssid_bitmap |= BIT(k);
1830 s_max++;
1831 found = true;
1832 allow_passive = false;
1833 break;
1834 }
1835 }
1836 }
1837
1838 if (found)
1839 continue;
1840
1841 for (k = 0; k < pp->bssid_num; k++) {
1842 if (!memcmp(&pp->bssid_array[k],
1843 scan_6ghz_params[j].bssid,
1844 ETH_ALEN)) {
1845 if (!(bssid_bitmap & BIT(k))) {
1846 bssid_bitmap |= BIT(k);
1847 b_max++;
1848 n_used_bssid_entries++;
1849 }
1850 break;
1851 }
1852 }
1853 }
1854
1855 if (cfg80211_channel_is_psc(params->channels[i]) &&
1856 psc_no_listen)
1857 flags |= IWL_UHB_CHAN_CFG_FLAG_PSC_CHAN_NO_LISTEN;
1858
1859 if (unsolicited_probe_on_chan)
1860 flags |= IWL_UHB_CHAN_CFG_FLAG_UNSOLICITED_PROBE_RES;
1861
1862 /*
1863 * In the following cases apply passive scan:
1864 * 1. Non fragmented scan:
1865 * - PSC channel with NO_LISTEN_FLAG on should be treated
1866 * like non PSC channel
1867 * - Non PSC channel with more than 3 short SSIDs or more
1868 * than 9 BSSIDs.
1869 * - Non PSC Channel with unsolicited probe response and
1870 * more than 2 short SSIDs or more than 6 BSSIDs.
1871 * - PSC channel with more than 2 short SSIDs or more than
1872 * 6 BSSIDs.
1873 * 3. Fragmented scan:
1874 * - PSC channel with more than 1 SSID or 3 BSSIDs.
1875 * - Non PSC channel with more than 2 SSIDs or 6 BSSIDs.
1876 * - Non PSC channel with unsolicited probe response and
1877 * more than 1 SSID or more than 3 BSSIDs.
1878 */
1879 if (!iwl_mvm_is_scan_fragmented(params->type)) {
1880 if (!cfg80211_channel_is_psc(params->channels[i]) ||
1881 flags & IWL_UHB_CHAN_CFG_FLAG_PSC_CHAN_NO_LISTEN) {
1882 force_passive = (s_max > 3 || b_max > 9);
1883 force_passive |= (unsolicited_probe_on_chan &&
1884 (s_max > 2 || b_max > 6));
1885 } else {
1886 force_passive = (s_max > 2 || b_max > 6);
1887 }
1888 } else if (cfg80211_channel_is_psc(params->channels[i])) {
1889 force_passive = (s_max > 1 || b_max > 3);
1890 } else {
1891 force_passive = (s_max > 2 || b_max > 6);
1892 force_passive |= (unsolicited_probe_on_chan &&
1893 (s_max > 1 || b_max > 3));
1894 }
1895 if ((allow_passive && force_passive) ||
1896 (!(bssid_bitmap | s_ssid_bitmap) &&
1897 !cfg80211_channel_is_psc(params->channels[i])))
1898 flags |= IWL_UHB_CHAN_CFG_FLAG_FORCE_PASSIVE;
1899 else
1900 flags |= bssid_bitmap | (s_ssid_bitmap << 16);
1901
1902 cfg->flags |= cpu_to_le32(flags);
1903 if (version >= 17)
1904 cfg->v5.psd_20 = psd_20;
1905
1906 ch_cnt++;
1907 }
1908
1909 if (params->n_channels > ch_cnt)
1910 IWL_DEBUG_SCAN(mvm,
1911 "6GHz: reducing number channels: (%u->%u)\n",
1912 params->n_channels, ch_cnt);
1913
1914 return ch_cnt;
1915 }
1916
1917 static u8 iwl_mvm_scan_umac_chan_flags_v2(struct iwl_mvm *mvm,
1918 struct iwl_mvm_scan_params *params,
1919 struct ieee80211_vif *vif)
1920 {
1921 u8 flags = 0;
1922
1923 flags |= IWL_SCAN_CHANNEL_FLAG_ENABLE_CHAN_ORDER;
1924
1925 if (iwl_mvm_scan_use_ebs(mvm, vif))
1926 flags |= IWL_SCAN_CHANNEL_FLAG_EBS |
1927 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
1928 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
1929
1930 /* set fragmented ebs for fragmented scan on HB channels */
1931 if ((!iwl_mvm_is_cdb_supported(mvm) &&
1932 iwl_mvm_is_scan_fragmented(params->type)) ||
1933 (iwl_mvm_is_cdb_supported(mvm) &&
1934 iwl_mvm_is_scan_fragmented(params->hb_type)))
1935 flags |= IWL_SCAN_CHANNEL_FLAG_EBS_FRAG;
1936
1937 /*
1938 * force EBS in case the scan is a fragmented and there is a need to take P2P
1939 * GO operation into consideration during scan operation.
1940 */
1941 if ((!iwl_mvm_is_cdb_supported(mvm) &&
1942 iwl_mvm_is_scan_fragmented(params->type) && params->respect_p2p_go) ||
1943 (iwl_mvm_is_cdb_supported(mvm) &&
1944 iwl_mvm_is_scan_fragmented(params->hb_type) &&
1945 params->respect_p2p_go_hb)) {
1946 IWL_DEBUG_SCAN(mvm, "Respect P2P GO. Force EBS\n");
1947 flags |= IWL_SCAN_CHANNEL_FLAG_FORCE_EBS;
1948 }
1949
1950 return flags;
1951 }
1952
1953 static void iwl_mvm_scan_6ghz_passive_scan(struct iwl_mvm *mvm,
1954 struct iwl_mvm_scan_params *params,
1955 struct ieee80211_vif *vif)
1956 {
1957 struct ieee80211_supported_band *sband =
1958 &mvm->nvm_data->bands[NL80211_BAND_6GHZ];
1959 u32 n_disabled, i;
1960
1961 params->enable_6ghz_passive = false;
1962
1963 if (params->scan_6ghz)
1964 return;
1965
1966 if (!fw_has_capa(&mvm->fw->ucode_capa,
1967 IWL_UCODE_TLV_CAPA_PASSIVE_6GHZ_SCAN)) {
1968 IWL_DEBUG_SCAN(mvm,
1969 "6GHz passive scan: Not supported by FW\n");
1970 return;
1971 }
1972
1973 /* 6GHz passive scan allowed only on station interface */
1974 if (vif->type != NL80211_IFTYPE_STATION) {
1975 IWL_DEBUG_SCAN(mvm,
1976 "6GHz passive scan: not station interface\n");
1977 return;
1978 }
1979
1980 /*
1981 * 6GHz passive scan is allowed in a defined time interval following HW
1982 * reset or resume flow, or while not associated and a large interval
1983 * has passed since the last 6GHz passive scan.
1984 */
1985 if ((vif->cfg.assoc ||
1986 time_after(mvm->last_6ghz_passive_scan_jiffies +
1987 (IWL_MVM_6GHZ_PASSIVE_SCAN_TIMEOUT * HZ), jiffies)) &&
1988 (time_before(mvm->last_reset_or_resume_time_jiffies +
1989 (IWL_MVM_6GHZ_PASSIVE_SCAN_ASSOC_TIMEOUT * HZ),
1990 jiffies))) {
1991 IWL_DEBUG_SCAN(mvm, "6GHz passive scan: %s\n",
1992 vif->cfg.assoc ? "associated" :
1993 "timeout did not expire");
1994 return;
1995 }
1996
1997 /* not enough channels in the regular scan request */
1998 if (params->n_channels < IWL_MVM_6GHZ_PASSIVE_SCAN_MIN_CHANS) {
1999 IWL_DEBUG_SCAN(mvm,
2000 "6GHz passive scan: not enough channels\n");
2001 return;
2002 }
2003
2004 for (i = 0; i < params->n_ssids; i++) {
2005 if (!params->ssids[i].ssid_len)
2006 break;
2007 }
2008
2009 /* not a wildcard scan, so cannot enable passive 6GHz scan */
2010 if (i == params->n_ssids) {
2011 IWL_DEBUG_SCAN(mvm,
2012 "6GHz passive scan: no wildcard SSID\n");
2013 return;
2014 }
2015
2016 if (!sband || !sband->n_channels) {
2017 IWL_DEBUG_SCAN(mvm,
2018 "6GHz passive scan: no 6GHz channels\n");
2019 return;
2020 }
2021
2022 for (i = 0, n_disabled = 0; i < sband->n_channels; i++) {
2023 if (sband->channels[i].flags & (IEEE80211_CHAN_DISABLED))
2024 n_disabled++;
2025 }
2026
2027 /*
2028 * Not all the 6GHz channels are disabled, so no need for 6GHz passive
2029 * scan
2030 */
2031 if (n_disabled != sband->n_channels) {
2032 IWL_DEBUG_SCAN(mvm,
2033 "6GHz passive scan: 6GHz channels enabled\n");
2034 return;
2035 }
2036
2037 /* all conditions to enable 6ghz passive scan are satisfied */
2038 IWL_DEBUG_SCAN(mvm, "6GHz passive scan: can be enabled\n");
2039 params->enable_6ghz_passive = true;
2040 }
2041
2042 static u16 iwl_mvm_scan_umac_flags_v2(struct iwl_mvm *mvm,
2043 struct iwl_mvm_scan_params *params,
2044 struct ieee80211_vif *vif,
2045 int type)
2046 {
2047 u16 flags = 0;
2048
2049 /*
2050 * If no direct SSIDs are provided perform a passive scan. Otherwise,
2051 * if there is a single SSID which is not the broadcast SSID, assume
2052 * that the scan is intended for roaming purposes and thus enable Rx on
2053 * all chains to improve chances of hearing the beacons/probe responses.
2054 */
2055 if (params->n_ssids == 0)
2056 flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE;
2057 else if (params->n_ssids == 1 && params->ssids[0].ssid_len)
2058 flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_USE_ALL_RX_CHAINS;
2059
2060 if (iwl_mvm_is_scan_fragmented(params->type))
2061 flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1;
2062
2063 if (iwl_mvm_is_scan_fragmented(params->hb_type))
2064 flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2;
2065
2066 if (params->pass_all)
2067 flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL;
2068 else
2069 flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_MATCH;
2070
2071 if (!iwl_mvm_is_regular_scan(params))
2072 flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_PERIODIC;
2073
2074 if (params->iter_notif ||
2075 mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)
2076 flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE;
2077
2078 if (IWL_MVM_ADWELL_ENABLE)
2079 flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL;
2080
2081 if (type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT)
2082 flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_PREEMPTIVE;
2083
2084 if ((type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT) &&
2085 params->flags & NL80211_SCAN_FLAG_COLOCATED_6GHZ)
2086 flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_TRIGGER_UHB_SCAN;
2087
2088 if (params->enable_6ghz_passive)
2089 flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN;
2090
2091 if (iwl_mvm_is_oce_supported(mvm) &&
2092 (params->flags & (NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP |
2093 NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE |
2094 NL80211_SCAN_FLAG_FILS_MAX_CHANNEL_TIME)))
2095 flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_OCE;
2096
2097 return flags;
2098 }
2099
2100 static u8 iwl_mvm_scan_umac_flags2(struct iwl_mvm *mvm,
2101 struct iwl_mvm_scan_params *params,
2102 struct ieee80211_vif *vif, int type)
2103 {
2104 u8 flags = 0;
2105
2106 if (iwl_mvm_is_cdb_supported(mvm)) {
2107 if (params->respect_p2p_go)
2108 flags |= IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_LB;
2109 if (params->respect_p2p_go_hb)
2110 flags |= IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_HB;
2111 } else {
2112 if (params->respect_p2p_go)
2113 flags = IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_LB |
2114 IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_HB;
2115 }
2116
2117 if (params->scan_6ghz &&
2118 fw_has_capa(&mvm->fw->ucode_capa,
2119 IWL_UCODE_TLV_CAPA_SCAN_DONT_TOGGLE_ANT))
2120 flags |= IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_DONT_TOGGLE_ANT;
2121
2122 return flags;
2123 }
2124
2125 static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
2126 struct iwl_mvm_scan_params *params,
2127 struct ieee80211_vif *vif)
2128 {
2129 u16 flags = 0;
2130
2131 if (params->n_ssids == 0)
2132 flags = IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE;
2133
2134 if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
2135 flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT;
2136
2137 if (iwl_mvm_is_scan_fragmented(params->type))
2138 flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED;
2139
2140 if (iwl_mvm_is_cdb_supported(mvm) &&
2141 iwl_mvm_is_scan_fragmented(params->hb_type))
2142 flags |= IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED;
2143
2144 if (iwl_mvm_rrm_scan_needed(mvm) &&
2145 fw_has_capa(&mvm->fw->ucode_capa,
2146 IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
2147 flags |= IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED;
2148
2149 if (params->pass_all)
2150 flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL;
2151 else
2152 flags |= IWL_UMAC_SCAN_GEN_FLAGS_MATCH;
2153
2154 if (!iwl_mvm_is_regular_scan(params))
2155 flags |= IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC;
2156
2157 if (params->iter_notif)
2158 flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
2159
2160 #ifdef CONFIG_IWLWIFI_DEBUGFS
2161 if (mvm->scan_iter_notif_enabled)
2162 flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
2163 #endif
2164
2165 if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)
2166 flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
2167
2168 if (iwl_mvm_is_adaptive_dwell_supported(mvm) && IWL_MVM_ADWELL_ENABLE)
2169 flags |= IWL_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL;
2170
2171 /*
2172 * Extended dwell is relevant only for low band to start with, as it is
2173 * being used for social channles only (1, 6, 11), so we can check
2174 * only scan type on low band also for CDB.
2175 */
2176 if (iwl_mvm_is_regular_scan(params) &&
2177 vif->type != NL80211_IFTYPE_P2P_DEVICE &&
2178 !iwl_mvm_is_scan_fragmented(params->type) &&
2179 !iwl_mvm_is_adaptive_dwell_supported(mvm) &&
2180 !iwl_mvm_is_oce_supported(mvm))
2181 flags |= IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL;
2182
2183 if (iwl_mvm_is_oce_supported(mvm)) {
2184 if ((params->flags &
2185 NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE))
2186 flags |= IWL_UMAC_SCAN_GEN_FLAGS_PROB_REQ_HIGH_TX_RATE;
2187 /* Since IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL and
2188 * NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION shares
2189 * the same bit, we need to make sure that we use this bit here
2190 * only when IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL cannot be
2191 * used. */
2192 if ((params->flags &
2193 NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION) &&
2194 !WARN_ON_ONCE(!iwl_mvm_is_adaptive_dwell_supported(mvm)))
2195 flags |= IWL_UMAC_SCAN_GEN_FLAGS_PROB_REQ_DEFER_SUPP;
2196 if ((params->flags & NL80211_SCAN_FLAG_FILS_MAX_CHANNEL_TIME))
2197 flags |= IWL_UMAC_SCAN_GEN_FLAGS_MAX_CHNL_TIME;
2198 }
2199
2200 return flags;
2201 }
2202
2203 static int
2204 iwl_mvm_fill_scan_sched_params(struct iwl_mvm_scan_params *params,
2205 struct iwl_scan_umac_schedule *schedule,
2206 __le16 *delay)
2207 {
2208 int i;
2209 if (WARN_ON(!params->n_scan_plans ||
2210 params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS))
2211 return -EINVAL;
2212
2213 for (i = 0; i < params->n_scan_plans; i++) {
2214 struct cfg80211_sched_scan_plan *scan_plan =
2215 &params->scan_plans[i];
2216
2217 schedule[i].iter_count = scan_plan->iterations;
2218 schedule[i].interval =
2219 cpu_to_le16(scan_plan->interval);
2220 }
2221
2222 /*
2223 * If the number of iterations of the last scan plan is set to
2224 * zero, it should run infinitely. However, this is not always the case.
2225 * For example, when regular scan is requested the driver sets one scan
2226 * plan with one iteration.
2227 */
2228 if (!schedule[params->n_scan_plans - 1].iter_count)
2229 schedule[params->n_scan_plans - 1].iter_count = 0xff;
2230
2231 *delay = cpu_to_le16(params->delay);
2232
2233 return 0;
2234 }
2235
2236 static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2237 struct iwl_mvm_scan_params *params,
2238 int type, int uid)
2239 {
2240 struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
2241 struct iwl_scan_umac_chan_param *chan_param;
2242 void *cmd_data = iwl_mvm_get_scan_req_umac_data(mvm);
2243 void *sec_part = (u8 *)cmd_data + sizeof(struct iwl_scan_channel_cfg_umac) *
2244 mvm->fw->ucode_capa.n_scan_channels;
2245 struct iwl_scan_req_umac_tail_v2 *tail_v2 =
2246 (struct iwl_scan_req_umac_tail_v2 *)sec_part;
2247 struct iwl_scan_req_umac_tail_v1 *tail_v1;
2248 struct iwl_ssid_ie *direct_scan;
2249 int ret = 0;
2250 u32 ssid_bitmap = 0;
2251 u8 channel_flags = 0;
2252 u16 gen_flags;
2253 struct iwl_mvm_vif *scan_vif = iwl_mvm_vif_from_mac80211(vif);
2254
2255 chan_param = iwl_mvm_get_scan_req_umac_channel(mvm);
2256
2257 iwl_mvm_scan_umac_dwell(mvm, cmd, params);
2258
2259 mvm->scan_uid_status[uid] = type;
2260
2261 cmd->uid = cpu_to_le32(uid);
2262 gen_flags = iwl_mvm_scan_umac_flags(mvm, params, vif);
2263 cmd->general_flags = cpu_to_le16(gen_flags);
2264 if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm)) {
2265 if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED)
2266 cmd->v8.num_of_fragments[SCAN_LB_LMAC_IDX] =
2267 IWL_SCAN_NUM_OF_FRAGS;
2268 if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED)
2269 cmd->v8.num_of_fragments[SCAN_HB_LMAC_IDX] =
2270 IWL_SCAN_NUM_OF_FRAGS;
2271
2272 cmd->v8.general_flags2 =
2273 IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER;
2274 }
2275
2276 cmd->scan_start_mac_id = scan_vif->id;
2277
2278 if (type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT)
2279 cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
2280
2281 if (iwl_mvm_scan_use_ebs(mvm, vif)) {
2282 channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
2283 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
2284 IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
2285
2286 /* set fragmented ebs for fragmented scan on HB channels */
2287 if (iwl_mvm_is_frag_ebs_supported(mvm)) {
2288 if (gen_flags &
2289 IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED ||
2290 (!iwl_mvm_is_cdb_supported(mvm) &&
2291 gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED))
2292 channel_flags |= IWL_SCAN_CHANNEL_FLAG_EBS_FRAG;
2293 }
2294 }
2295
2296 chan_param->flags = channel_flags;
2297 chan_param->count = params->n_channels;
2298
2299 ret = iwl_mvm_fill_scan_sched_params(params, tail_v2->schedule,
2300 &tail_v2->delay);
2301 if (ret) {
2302 mvm->scan_uid_status[uid] = 0;
2303 return ret;
2304 }
2305
2306 if (iwl_mvm_is_scan_ext_chan_supported(mvm)) {
2307 tail_v2->preq = params->preq;
2308 direct_scan = tail_v2->direct_scan;
2309 } else {
2310 tail_v1 = (struct iwl_scan_req_umac_tail_v1 *)sec_part;
2311 iwl_mvm_scan_set_legacy_probe_req(&tail_v1->preq,
2312 &params->preq);
2313 direct_scan = tail_v1->direct_scan;
2314 }
2315 iwl_scan_build_ssids(params, direct_scan, &ssid_bitmap);
2316 iwl_mvm_umac_scan_cfg_channels(mvm, params->channels,
2317 params->n_channels, ssid_bitmap,
2318 cmd_data);
2319 return 0;
2320 }
2321
2322 static void
2323 iwl_mvm_scan_umac_fill_general_p_v12(struct iwl_mvm *mvm,
2324 struct iwl_mvm_scan_params *params,
2325 struct ieee80211_vif *vif,
2326 struct iwl_scan_general_params_v11 *gp,
2327 u16 gen_flags, u8 gen_flags2,
2328 u32 version)
2329 {
2330 struct iwl_mvm_vif *scan_vif = iwl_mvm_vif_from_mac80211(vif);
2331
2332 iwl_mvm_scan_umac_dwell_v11(mvm, gp, params);
2333
2334 IWL_DEBUG_SCAN(mvm, "General: flags=0x%x, flags2=0x%x\n",
2335 gen_flags, gen_flags2);
2336
2337 gp->flags = cpu_to_le16(gen_flags);
2338 gp->flags2 = gen_flags2;
2339
2340 if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1)
2341 gp->num_of_fragments[SCAN_LB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS;
2342 if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2)
2343 gp->num_of_fragments[SCAN_HB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS;
2344
2345 if (version < 16) {
2346 gp->scan_start_mac_or_link_id = scan_vif->id;
2347 } else {
2348 struct iwl_mvm_vif_link_info *link_info;
2349 u8 link_id = 0;
2350
2351 /* Use one of the active link (if any). In the future it would
2352 * be possible that the link ID would be part of the scan
2353 * request coming from upper layers so we would need to use it.
2354 */
2355 if (vif->active_links)
2356 link_id = ffs(vif->active_links) - 1;
2357
2358 link_info = scan_vif->link[link_id];
2359 if (!WARN_ON(!link_info))
2360 gp->scan_start_mac_or_link_id = link_info->fw_link_id;
2361 }
2362 }
2363
2364 static void
2365 iwl_mvm_scan_umac_fill_probe_p_v3(struct iwl_mvm_scan_params *params,
2366 struct iwl_scan_probe_params_v3 *pp)
2367 {
2368 pp->preq = params->preq;
2369 pp->ssid_num = params->n_ssids;
2370 iwl_scan_build_ssids(params, pp->direct_scan, NULL);
2371 }
2372
2373 static void
2374 iwl_mvm_scan_umac_fill_probe_p_v4(struct iwl_mvm_scan_params *params,
2375 struct iwl_scan_probe_params_v4 *pp,
2376 u32 *bitmap_ssid)
2377 {
2378 pp->preq = params->preq;
2379 iwl_scan_build_ssids(params, pp->direct_scan, bitmap_ssid);
2380 }
2381
2382 static void
2383 iwl_mvm_scan_umac_fill_ch_p_v4(struct iwl_mvm *mvm,
2384 struct iwl_mvm_scan_params *params,
2385 struct ieee80211_vif *vif,
2386 struct iwl_scan_channel_params_v4 *cp,
2387 u32 channel_cfg_flags)
2388 {
2389 cp->flags = iwl_mvm_scan_umac_chan_flags_v2(mvm, params, vif);
2390 cp->count = params->n_channels;
2391 cp->num_of_aps_override = IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY;
2392
2393 iwl_mvm_umac_scan_cfg_channels_v4(mvm, params->channels, cp,
2394 params->n_channels,
2395 channel_cfg_flags,
2396 vif->type);
2397 }
2398
2399 static void
2400 iwl_mvm_scan_umac_fill_ch_p_v7(struct iwl_mvm *mvm,
2401 struct iwl_mvm_scan_params *params,
2402 struct ieee80211_vif *vif,
2403 struct iwl_scan_channel_params_v7 *cp,
2404 u32 channel_cfg_flags,
2405 u32 version)
2406 {
2407 cp->flags = iwl_mvm_scan_umac_chan_flags_v2(mvm, params, vif);
2408 cp->count = params->n_channels;
2409 cp->n_aps_override[0] = IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY;
2410 cp->n_aps_override[1] = IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS;
2411
2412 iwl_mvm_umac_scan_cfg_channels_v7(mvm, params->channels, cp,
2413 params->n_channels,
2414 channel_cfg_flags,
2415 vif->type, version);
2416
2417 if (params->enable_6ghz_passive) {
2418 struct ieee80211_supported_band *sband =
2419 &mvm->nvm_data->bands[NL80211_BAND_6GHZ];
2420 u32 i;
2421
2422 for (i = 0; i < sband->n_channels; i++) {
2423 struct ieee80211_channel *channel =
2424 &sband->channels[i];
2425
2426 struct iwl_scan_channel_cfg_umac *cfg =
2427 &cp->channel_config[cp->count];
2428
2429 if (!cfg80211_channel_is_psc(channel))
2430 continue;
2431
2432 cfg->v5.channel_num = channel->hw_value;
2433 cfg->v5.iter_count = 1;
2434 cfg->v5.iter_interval = 0;
2435
2436 if (version < 17) {
2437 cfg->flags = 0;
2438 cfg->v2.band = PHY_BAND_6;
2439 } else {
2440 cfg->flags = cpu_to_le32(PHY_BAND_6 <<
2441 IWL_CHAN_CFG_FLAGS_BAND_POS);
2442 cfg->v5.psd_20 =
2443 IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED;
2444 }
2445 cp->count++;
2446 }
2447 }
2448 }
2449
2450 static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2451 struct iwl_mvm_scan_params *params, int type,
2452 int uid)
2453 {
2454 struct iwl_scan_req_umac_v12 *cmd = mvm->scan_cmd;
2455 struct iwl_scan_req_params_v12 *scan_p = &cmd->scan_params;
2456 int ret;
2457 u16 gen_flags;
2458
2459 mvm->scan_uid_status[uid] = type;
2460
2461 cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(params));
2462 cmd->uid = cpu_to_le32(uid);
2463
2464 gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
2465 iwl_mvm_scan_umac_fill_general_p_v12(mvm, params, vif,
2466 &scan_p->general_params,
2467 gen_flags, 0, 12);
2468
2469 ret = iwl_mvm_fill_scan_sched_params(params,
2470 scan_p->periodic_params.schedule,
2471 &scan_p->periodic_params.delay);
2472 if (ret)
2473 return ret;
2474
2475 iwl_mvm_scan_umac_fill_probe_p_v3(params, &scan_p->probe_params);
2476 iwl_mvm_scan_umac_fill_ch_p_v4(mvm, params, vif,
2477 &scan_p->channel_params, 0);
2478
2479 return 0;
2480 }
2481
2482 static int iwl_mvm_scan_umac_v14_and_above(struct iwl_mvm *mvm,
2483 struct ieee80211_vif *vif,
2484 struct iwl_mvm_scan_params *params,
2485 int type, int uid, u32 version)
2486 {
2487 struct iwl_scan_req_umac_v17 *cmd = mvm->scan_cmd;
2488 struct iwl_scan_req_params_v17 *scan_p = &cmd->scan_params;
2489 struct iwl_scan_channel_params_v7 *cp = &scan_p->channel_params;
2490 struct iwl_scan_probe_params_v4 *pb = &scan_p->probe_params;
2491 int ret;
2492 u16 gen_flags;
2493 u8 gen_flags2;
2494 u32 bitmap_ssid = 0;
2495
2496 mvm->scan_uid_status[uid] = type;
2497
2498 cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(params));
2499 cmd->uid = cpu_to_le32(uid);
2500
2501 gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
2502
2503 if (version >= 15)
2504 gen_flags2 = iwl_mvm_scan_umac_flags2(mvm, params, vif, type);
2505 else
2506 gen_flags2 = 0;
2507
2508 iwl_mvm_scan_umac_fill_general_p_v12(mvm, params, vif,
2509 &scan_p->general_params,
2510 gen_flags, gen_flags2, version);
2511
2512 ret = iwl_mvm_fill_scan_sched_params(params,
2513 scan_p->periodic_params.schedule,
2514 &scan_p->periodic_params.delay);
2515 if (ret)
2516 return ret;
2517
2518 if (!params->scan_6ghz) {
2519 iwl_mvm_scan_umac_fill_probe_p_v4(params,
2520 &scan_p->probe_params,
2521 &bitmap_ssid);
2522 iwl_mvm_scan_umac_fill_ch_p_v7(mvm, params, vif,
2523 &scan_p->channel_params,
2524 bitmap_ssid,
2525 version);
2526 return 0;
2527 } else {
2528 pb->preq = params->preq;
2529 }
2530
2531 cp->flags = iwl_mvm_scan_umac_chan_flags_v2(mvm, params, vif);
2532 cp->n_aps_override[0] = IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY;
2533 cp->n_aps_override[1] = IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS;
2534
2535 iwl_mvm_umac_scan_fill_6g_chan_list(mvm, params, pb);
2536
2537 cp->count = iwl_mvm_umac_scan_cfg_channels_v7_6g(mvm, params,
2538 params->n_channels,
2539 pb, cp, vif->type,
2540 version);
2541 if (!cp->count) {
2542 mvm->scan_uid_status[uid] = 0;
2543 return -EINVAL;
2544 }
2545
2546 if (!params->n_ssids ||
2547 (params->n_ssids == 1 && !params->ssids[0].ssid_len))
2548 cp->flags |= IWL_SCAN_CHANNEL_FLAG_6G_PSC_NO_FILTER;
2549
2550 return 0;
2551 }
2552
2553 static int iwl_mvm_scan_umac_v14(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2554 struct iwl_mvm_scan_params *params, int type,
2555 int uid)
2556 {
2557 return iwl_mvm_scan_umac_v14_and_above(mvm, vif, params, type, uid, 14);
2558 }
2559
2560 static int iwl_mvm_scan_umac_v15(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2561 struct iwl_mvm_scan_params *params, int type,
2562 int uid)
2563 {
2564 return iwl_mvm_scan_umac_v14_and_above(mvm, vif, params, type, uid, 15);
2565 }
2566
2567 static int iwl_mvm_scan_umac_v16(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2568 struct iwl_mvm_scan_params *params, int type,
2569 int uid)
2570 {
2571 return iwl_mvm_scan_umac_v14_and_above(mvm, vif, params, type, uid, 16);
2572 }
2573
2574 static int iwl_mvm_scan_umac_v17(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2575 struct iwl_mvm_scan_params *params, int type,
2576 int uid)
2577 {
2578 return iwl_mvm_scan_umac_v14_and_above(mvm, vif, params, type, uid, 17);
2579 }
2580
2581 static int iwl_mvm_num_scans(struct iwl_mvm *mvm)
2582 {
2583 return hweight32(mvm->scan_status & IWL_MVM_SCAN_MASK);
2584 }
2585
2586 static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
2587 {
2588 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
2589 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
2590
2591 /* This looks a bit arbitrary, but the idea is that if we run
2592 * out of possible simultaneous scans and the userspace is
2593 * trying to run a scan type that is already running, we
2594 * return -EBUSY. But if the userspace wants to start a
2595 * different type of scan, we stop the opposite type to make
2596 * space for the new request. The reason is backwards
2597 * compatibility with old wpa_supplicant that wouldn't stop a
2598 * scheduled scan before starting a normal scan.
2599 */
2600
2601 /* FW supports only a single periodic scan */
2602 if ((type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT) &&
2603 mvm->scan_status & (IWL_MVM_SCAN_SCHED | IWL_MVM_SCAN_NETDETECT))
2604 return -EBUSY;
2605
2606 if (iwl_mvm_num_scans(mvm) < mvm->max_scans)
2607 return 0;
2608
2609 /* Use a switch, even though this is a bitmask, so that more
2610 * than one bits set will fall in default and we will warn.
2611 */
2612 switch (type) {
2613 case IWL_MVM_SCAN_REGULAR:
2614 if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK)
2615 return -EBUSY;
2616 return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
2617 case IWL_MVM_SCAN_SCHED:
2618 if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK)
2619 return -EBUSY;
2620 return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
2621 case IWL_MVM_SCAN_NETDETECT:
2622 /* For non-unified images, there's no need to stop
2623 * anything for net-detect since the firmware is
2624 * restarted anyway. This way, any sched scans that
2625 * were running will be restarted when we resume.
2626 */
2627 if (!unified_image)
2628 return 0;
2629
2630 /* If this is a unified image and we ran out of scans,
2631 * we need to stop something. Prefer stopping regular
2632 * scans, because the results are useless at this
2633 * point, and we should be able to keep running
2634 * another scheduled scan while suspended.
2635 */
2636 if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK)
2637 return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR,
2638 true);
2639 if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK)
2640 return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED,
2641 true);
2642 /* Something is wrong if no scan was running but we
2643 * ran out of scans.
2644 */
2645 fallthrough;
2646 default:
2647 WARN_ON(1);
2648 break;
2649 }
2650
2651 return -EIO;
2652 }
2653
2654 #define SCAN_TIMEOUT 30000
2655
2656 void iwl_mvm_scan_timeout_wk(struct work_struct *work)
2657 {
2658 struct delayed_work *delayed_work = to_delayed_work(work);
2659 struct iwl_mvm *mvm = container_of(delayed_work, struct iwl_mvm,
2660 scan_timeout_dwork);
2661
2662 IWL_ERR(mvm, "regular scan timed out\n");
2663
2664 iwl_force_nmi(mvm->trans);
2665 }
2666
2667 static void iwl_mvm_fill_scan_type(struct iwl_mvm *mvm,
2668 struct iwl_mvm_scan_params *params,
2669 struct ieee80211_vif *vif)
2670 {
2671 if (iwl_mvm_is_cdb_supported(mvm)) {
2672 params->type =
2673 iwl_mvm_get_scan_type_band(mvm, vif,
2674 NL80211_BAND_2GHZ);
2675 params->hb_type =
2676 iwl_mvm_get_scan_type_band(mvm, vif,
2677 NL80211_BAND_5GHZ);
2678 } else {
2679 params->type = iwl_mvm_get_scan_type(mvm, vif);
2680 }
2681 }
2682
2683 struct iwl_scan_umac_handler {
2684 u8 version;
2685 int (*handler)(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2686 struct iwl_mvm_scan_params *params, int type, int uid);
2687 };
2688
2689 #define IWL_SCAN_UMAC_HANDLER(_ver) { \
2690 .version = _ver, \
2691 .handler = iwl_mvm_scan_umac_v##_ver, \
2692 }
2693
2694 static const struct iwl_scan_umac_handler iwl_scan_umac_handlers[] = {
2695 /* set the newest version first to shorten the list traverse time */
2696 IWL_SCAN_UMAC_HANDLER(17),
2697 IWL_SCAN_UMAC_HANDLER(16),
2698 IWL_SCAN_UMAC_HANDLER(15),
2699 IWL_SCAN_UMAC_HANDLER(14),
2700 IWL_SCAN_UMAC_HANDLER(12),
2701 };
2702
2703 static void iwl_mvm_mei_scan_work(struct work_struct *wk)
2704 {
2705 struct iwl_mei_scan_filter *scan_filter =
2706 container_of(wk, struct iwl_mei_scan_filter, scan_work);
2707 struct iwl_mvm *mvm =
2708 container_of(scan_filter, struct iwl_mvm, mei_scan_filter);
2709 struct iwl_mvm_csme_conn_info *info;
2710 struct sk_buff *skb;
2711 u8 bssid[ETH_ALEN];
2712
2713 mutex_lock(&mvm->mutex);
2714 info = iwl_mvm_get_csme_conn_info(mvm);
2715 memcpy(bssid, info->conn_info.bssid, ETH_ALEN);
2716 mutex_unlock(&mvm->mutex);
2717
2718 while ((skb = skb_dequeue(&scan_filter->scan_res))) {
2719 struct ieee80211_mgmt *mgmt = (void *)skb->data;
2720
2721 if (!memcmp(mgmt->bssid, bssid, ETH_ALEN))
2722 ieee80211_rx_irqsafe(mvm->hw, skb);
2723 else
2724 kfree_skb(skb);
2725 }
2726 }
2727
2728 void iwl_mvm_mei_scan_filter_init(struct iwl_mei_scan_filter *mei_scan_filter)
2729 {
2730 skb_queue_head_init(&mei_scan_filter->scan_res);
2731 INIT_WORK(&mei_scan_filter->scan_work, iwl_mvm_mei_scan_work);
2732 }
2733
2734 /* In case CSME is connected and has link protection set, this function will
2735 * override the scan request to scan only the associated channel and only for
2736 * the associated SSID.
2737 */
2738 static void iwl_mvm_mei_limited_scan(struct iwl_mvm *mvm,
2739 struct iwl_mvm_scan_params *params)
2740 {
2741 struct iwl_mvm_csme_conn_info *info = iwl_mvm_get_csme_conn_info(mvm);
2742 struct iwl_mei_conn_info *conn_info;
2743 struct ieee80211_channel *chan;
2744 int scan_iters, i;
2745
2746 if (!info) {
2747 IWL_DEBUG_SCAN(mvm, "mei_limited_scan: no connection info\n");
2748 return;
2749 }
2750
2751 conn_info = &info->conn_info;
2752 if (!info->conn_info.lp_state || !info->conn_info.ssid_len)
2753 return;
2754
2755 if (!params->n_channels || !params->n_ssids)
2756 return;
2757
2758 mvm->mei_scan_filter.is_mei_limited_scan = true;
2759
2760 chan = ieee80211_get_channel(mvm->hw->wiphy,
2761 ieee80211_channel_to_frequency(conn_info->channel,
2762 conn_info->band));
2763 if (!chan) {
2764 IWL_DEBUG_SCAN(mvm,
2765 "Failed to get CSME channel (chan=%u band=%u)\n",
2766 conn_info->channel, conn_info->band);
2767 return;
2768 }
2769
2770 /* The mei filtered scan must find the AP, otherwise CSME will
2771 * take the NIC ownership. Add several iterations on the channel to
2772 * make the scan more robust.
2773 */
2774 scan_iters = min(IWL_MEI_SCAN_NUM_ITER, params->n_channels);
2775 params->n_channels = scan_iters;
2776 for (i = 0; i < scan_iters; i++)
2777 params->channels[i] = chan;
2778
2779 IWL_DEBUG_SCAN(mvm, "Mei scan: num iterations=%u\n", scan_iters);
2780
2781 params->n_ssids = 1;
2782 params->ssids[0].ssid_len = conn_info->ssid_len;
2783 memcpy(params->ssids[0].ssid, conn_info->ssid, conn_info->ssid_len);
2784 }
2785
2786 static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm,
2787 struct ieee80211_vif *vif,
2788 struct iwl_host_cmd *hcmd,
2789 struct iwl_mvm_scan_params *params,
2790 int type)
2791 {
2792 int uid, i, err;
2793 u8 scan_ver;
2794
2795 lockdep_assert_held(&mvm->mutex);
2796 memset(mvm->scan_cmd, 0, mvm->scan_cmd_size);
2797
2798 iwl_mvm_mei_limited_scan(mvm, params);
2799
2800 if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
2801 hcmd->id = SCAN_OFFLOAD_REQUEST_CMD;
2802
2803 return iwl_mvm_scan_lmac(mvm, vif, params);
2804 }
2805
2806 uid = iwl_mvm_scan_uid_by_status(mvm, 0);
2807 if (uid < 0)
2808 return uid;
2809
2810 hcmd->id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_REQ_UMAC);
2811
2812 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_REQ_UMAC,
2813 IWL_FW_CMD_VER_UNKNOWN);
2814
2815 for (i = 0; i < ARRAY_SIZE(iwl_scan_umac_handlers); i++) {
2816 const struct iwl_scan_umac_handler *ver_handler =
2817 &iwl_scan_umac_handlers[i];
2818
2819 if (ver_handler->version != scan_ver)
2820 continue;
2821
2822 return ver_handler->handler(mvm, vif, params, type, uid);
2823 }
2824
2825 err = iwl_mvm_scan_umac(mvm, vif, params, type, uid);
2826 if (err)
2827 return err;
2828
2829 return uid;
2830 }
2831
2832 struct iwl_mvm_scan_respect_p2p_go_iter_data {
2833 struct ieee80211_vif *current_vif;
2834 bool p2p_go;
2835 enum nl80211_band band;
2836 };
2837
2838 static void iwl_mvm_scan_respect_p2p_go_iter(void *_data, u8 *mac,
2839 struct ieee80211_vif *vif)
2840 {
2841 struct iwl_mvm_scan_respect_p2p_go_iter_data *data = _data;
2842 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2843
2844 /* exclude the given vif */
2845 if (vif == data->current_vif)
2846 return;
2847
2848 if (vif->type == NL80211_IFTYPE_AP && vif->p2p) {
2849 u32 link_id;
2850
2851 for (link_id = 0;
2852 link_id < ARRAY_SIZE(mvmvif->link);
2853 link_id++) {
2854 struct iwl_mvm_vif_link_info *link =
2855 mvmvif->link[link_id];
2856
2857 if (link && link->phy_ctxt->id < NUM_PHY_CTX &&
2858 (data->band == NUM_NL80211_BANDS ||
2859 link->phy_ctxt->channel->band == data->band)) {
2860 data->p2p_go = true;
2861 break;
2862 }
2863 }
2864 }
2865 }
2866
2867 static bool _iwl_mvm_get_respect_p2p_go(struct iwl_mvm *mvm,
2868 struct ieee80211_vif *vif,
2869 bool low_latency,
2870 enum nl80211_band band)
2871 {
2872 struct iwl_mvm_scan_respect_p2p_go_iter_data data = {
2873 .current_vif = vif,
2874 .p2p_go = false,
2875 .band = band,
2876 };
2877
2878 if (!low_latency)
2879 return false;
2880
2881 ieee80211_iterate_active_interfaces_atomic(mvm->hw,
2882 IEEE80211_IFACE_ITER_NORMAL,
2883 iwl_mvm_scan_respect_p2p_go_iter,
2884 &data);
2885
2886 return data.p2p_go;
2887 }
2888
2889 static bool iwl_mvm_get_respect_p2p_go_band(struct iwl_mvm *mvm,
2890 struct ieee80211_vif *vif,
2891 enum nl80211_band band)
2892 {
2893 bool low_latency = iwl_mvm_low_latency_band(mvm, band);
2894
2895 return _iwl_mvm_get_respect_p2p_go(mvm, vif, low_latency, band);
2896 }
2897
2898 static bool iwl_mvm_get_respect_p2p_go(struct iwl_mvm *mvm,
2899 struct ieee80211_vif *vif)
2900 {
2901 bool low_latency = iwl_mvm_low_latency(mvm);
2902
2903 return _iwl_mvm_get_respect_p2p_go(mvm, vif, low_latency,
2904 NUM_NL80211_BANDS);
2905 }
2906
2907 static void iwl_mvm_fill_respect_p2p_go(struct iwl_mvm *mvm,
2908 struct iwl_mvm_scan_params *params,
2909 struct ieee80211_vif *vif)
2910 {
2911 if (iwl_mvm_is_cdb_supported(mvm)) {
2912 params->respect_p2p_go =
2913 iwl_mvm_get_respect_p2p_go_band(mvm, vif,
2914 NL80211_BAND_2GHZ);
2915 params->respect_p2p_go_hb =
2916 iwl_mvm_get_respect_p2p_go_band(mvm, vif,
2917 NL80211_BAND_5GHZ);
2918 } else {
2919 params->respect_p2p_go = iwl_mvm_get_respect_p2p_go(mvm, vif);
2920 }
2921 }
2922
2923 int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2924 struct cfg80211_scan_request *req,
2925 struct ieee80211_scan_ies *ies)
2926 {
2927 struct iwl_host_cmd hcmd = {
2928 .len = { iwl_mvm_scan_size(mvm), },
2929 .data = { mvm->scan_cmd, },
2930 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
2931 };
2932 struct iwl_mvm_scan_params params = {};
2933 int ret, uid;
2934 struct cfg80211_sched_scan_plan scan_plan = { .iterations = 1 };
2935
2936 lockdep_assert_held(&mvm->mutex);
2937
2938 if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
2939 IWL_ERR(mvm, "scan while LAR regdomain is not set\n");
2940 return -EBUSY;
2941 }
2942
2943 ret = iwl_mvm_check_running_scans(mvm, IWL_MVM_SCAN_REGULAR);
2944 if (ret)
2945 return ret;
2946
2947 /* we should have failed registration if scan_cmd was NULL */
2948 if (WARN_ON(!mvm->scan_cmd))
2949 return -ENOMEM;
2950
2951 if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, req->n_channels))
2952 return -ENOBUFS;
2953
2954 params.n_ssids = req->n_ssids;
2955 params.flags = req->flags;
2956 params.n_channels = req->n_channels;
2957 params.delay = 0;
2958 params.ssids = req->ssids;
2959 params.channels = req->channels;
2960 params.mac_addr = req->mac_addr;
2961 params.mac_addr_mask = req->mac_addr_mask;
2962 params.no_cck = req->no_cck;
2963 params.pass_all = true;
2964 params.n_match_sets = 0;
2965 params.match_sets = NULL;
2966 ether_addr_copy(params.bssid, req->bssid);
2967
2968 params.scan_plans = &scan_plan;
2969 params.n_scan_plans = 1;
2970
2971 params.n_6ghz_params = req->n_6ghz_params;
2972 params.scan_6ghz_params = req->scan_6ghz_params;
2973 params.scan_6ghz = req->scan_6ghz;
2974 iwl_mvm_fill_scan_type(mvm, &params, vif);
2975 iwl_mvm_fill_respect_p2p_go(mvm, &params, vif);
2976
2977 if (req->duration)
2978 params.iter_notif = true;
2979
2980 iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
2981
2982 iwl_mvm_scan_6ghz_passive_scan(mvm, &params, vif);
2983
2984 uid = iwl_mvm_build_scan_cmd(mvm, vif, &hcmd, &params,
2985 IWL_MVM_SCAN_REGULAR);
2986
2987 if (uid < 0)
2988 return uid;
2989
2990 iwl_mvm_pause_tcm(mvm, false);
2991
2992 ret = iwl_mvm_send_cmd(mvm, &hcmd);
2993 if (ret) {
2994 /* If the scan failed, it usually means that the FW was unable
2995 * to allocate the time events. Warn on it, but maybe we
2996 * should try to send the command again with different params.
2997 */
2998 IWL_ERR(mvm, "Scan failed! ret %d\n", ret);
2999 iwl_mvm_resume_tcm(mvm);
3000 mvm->scan_uid_status[uid] = 0;
3001 return ret;
3002 }
3003
3004 IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n");
3005 mvm->scan_status |= IWL_MVM_SCAN_REGULAR;
3006 mvm->scan_vif = iwl_mvm_vif_from_mac80211(vif);
3007
3008 if (params.enable_6ghz_passive)
3009 mvm->last_6ghz_passive_scan_jiffies = jiffies;
3010
3011 schedule_delayed_work(&mvm->scan_timeout_dwork,
3012 msecs_to_jiffies(SCAN_TIMEOUT));
3013
3014 return 0;
3015 }
3016
3017 int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
3018 struct ieee80211_vif *vif,
3019 struct cfg80211_sched_scan_request *req,
3020 struct ieee80211_scan_ies *ies,
3021 int type)
3022 {
3023 struct iwl_host_cmd hcmd = {
3024 .len = { iwl_mvm_scan_size(mvm), },
3025 .data = { mvm->scan_cmd, },
3026 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
3027 };
3028 struct iwl_mvm_scan_params params = {};
3029 int ret, uid;
3030 int i, j;
3031 bool non_psc_included = false;
3032
3033 lockdep_assert_held(&mvm->mutex);
3034
3035 if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
3036 IWL_ERR(mvm, "sched-scan while LAR regdomain is not set\n");
3037 return -EBUSY;
3038 }
3039
3040 ret = iwl_mvm_check_running_scans(mvm, type);
3041 if (ret)
3042 return ret;
3043
3044 /* we should have failed registration if scan_cmd was NULL */
3045 if (WARN_ON(!mvm->scan_cmd))
3046 return -ENOMEM;
3047
3048
3049 params.n_ssids = req->n_ssids;
3050 params.flags = req->flags;
3051 params.n_channels = req->n_channels;
3052 params.ssids = req->ssids;
3053 params.channels = req->channels;
3054 params.mac_addr = req->mac_addr;
3055 params.mac_addr_mask = req->mac_addr_mask;
3056 params.no_cck = false;
3057 params.pass_all = iwl_mvm_scan_pass_all(mvm, req);
3058 params.n_match_sets = req->n_match_sets;
3059 params.match_sets = req->match_sets;
3060 eth_broadcast_addr(params.bssid);
3061 if (!req->n_scan_plans)
3062 return -EINVAL;
3063
3064 params.n_scan_plans = req->n_scan_plans;
3065 params.scan_plans = req->scan_plans;
3066
3067 iwl_mvm_fill_scan_type(mvm, &params, vif);
3068 iwl_mvm_fill_respect_p2p_go(mvm, &params, vif);
3069
3070 /* In theory, LMAC scans can handle a 32-bit delay, but since
3071 * waiting for over 18 hours to start the scan is a bit silly
3072 * and to keep it aligned with UMAC scans (which only support
3073 * 16-bit delays), trim it down to 16-bits.
3074 */
3075 if (req->delay > U16_MAX) {
3076 IWL_DEBUG_SCAN(mvm,
3077 "delay value is > 16-bits, set to max possible\n");
3078 params.delay = U16_MAX;
3079 } else {
3080 params.delay = req->delay;
3081 }
3082
3083 ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
3084 if (ret)
3085 return ret;
3086
3087 iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
3088
3089 /* for 6 GHZ band only PSC channels need to be added */
3090 for (i = 0; i < params.n_channels; i++) {
3091 struct ieee80211_channel *channel = params.channels[i];
3092
3093 if (channel->band == NL80211_BAND_6GHZ &&
3094 !cfg80211_channel_is_psc(channel)) {
3095 non_psc_included = true;
3096 break;
3097 }
3098 }
3099
3100 if (non_psc_included) {
3101 params.channels = kmemdup(params.channels,
3102 sizeof(params.channels[0]) *
3103 params.n_channels,
3104 GFP_KERNEL);
3105 if (!params.channels)
3106 return -ENOMEM;
3107
3108 for (i = j = 0; i < params.n_channels; i++) {
3109 if (params.channels[i]->band == NL80211_BAND_6GHZ &&
3110 !cfg80211_channel_is_psc(params.channels[i]))
3111 continue;
3112 params.channels[j++] = params.channels[i];
3113 }
3114 params.n_channels = j;
3115 }
3116
3117 if (non_psc_included &&
3118 !iwl_mvm_scan_fits(mvm, req->n_ssids, ies, params.n_channels)) {
3119 kfree(params.channels);
3120 return -ENOBUFS;
3121 }
3122
3123 uid = iwl_mvm_build_scan_cmd(mvm, vif, &hcmd, &params, type);
3124
3125 if (non_psc_included)
3126 kfree(params.channels);
3127 if (uid < 0)
3128 return uid;
3129
3130 ret = iwl_mvm_send_cmd(mvm, &hcmd);
3131 if (!ret) {
3132 IWL_DEBUG_SCAN(mvm,
3133 "Sched scan request was sent successfully\n");
3134 mvm->scan_status |= type;
3135 } else {
3136 /* If the scan failed, it usually means that the FW was unable
3137 * to allocate the time events. Warn on it, but maybe we
3138 * should try to send the command again with different params.
3139 */
3140 IWL_ERR(mvm, "Sched scan failed! ret %d\n", ret);
3141 mvm->scan_uid_status[uid] = 0;
3142 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
3143 }
3144
3145 return ret;
3146 }
3147
3148 void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
3149 struct iwl_rx_cmd_buffer *rxb)
3150 {
3151 struct iwl_rx_packet *pkt = rxb_addr(rxb);
3152 struct iwl_umac_scan_complete *notif = (void *)pkt->data;
3153 u32 uid = __le32_to_cpu(notif->uid);
3154 bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED);
3155
3156 mvm->mei_scan_filter.is_mei_limited_scan = false;
3157
3158 if (WARN_ON(!(mvm->scan_uid_status[uid] & mvm->scan_status)))
3159 return;
3160
3161 /* if the scan is already stopping, we don't need to notify mac80211 */
3162 if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) {
3163 struct cfg80211_scan_info info = {
3164 .aborted = aborted,
3165 .scan_start_tsf = mvm->scan_start,
3166 };
3167
3168 memcpy(info.tsf_bssid, mvm->scan_vif->deflink.bssid, ETH_ALEN);
3169 ieee80211_scan_completed(mvm->hw, &info);
3170 mvm->scan_vif = NULL;
3171 cancel_delayed_work(&mvm->scan_timeout_dwork);
3172 iwl_mvm_resume_tcm(mvm);
3173 } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) {
3174 ieee80211_sched_scan_stopped(mvm->hw);
3175 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
3176 }
3177
3178 mvm->scan_status &= ~mvm->scan_uid_status[uid];
3179 IWL_DEBUG_SCAN(mvm,
3180 "Scan completed, uid %u type %u, status %s, EBS status %s\n",
3181 uid, mvm->scan_uid_status[uid],
3182 notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
3183 "completed" : "aborted",
3184 iwl_mvm_ebs_status_str(notif->ebs_status));
3185 IWL_DEBUG_SCAN(mvm,
3186 "Last line %d, Last iteration %d, Time from last iteration %d\n",
3187 notif->last_schedule, notif->last_iter,
3188 __le32_to_cpu(notif->time_from_last_iter));
3189
3190 if (notif->ebs_status != IWL_SCAN_EBS_SUCCESS &&
3191 notif->ebs_status != IWL_SCAN_EBS_INACTIVE)
3192 mvm->last_ebs_successful = false;
3193
3194 mvm->scan_uid_status[uid] = 0;
3195 }
3196
3197 void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
3198 struct iwl_rx_cmd_buffer *rxb)
3199 {
3200 struct iwl_rx_packet *pkt = rxb_addr(rxb);
3201 struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data;
3202
3203 mvm->scan_start = le64_to_cpu(notif->start_tsf);
3204
3205 IWL_DEBUG_SCAN(mvm,
3206 "UMAC Scan iteration complete: status=0x%x scanned_channels=%d\n",
3207 notif->status, notif->scanned_channels);
3208
3209 if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_FOUND) {
3210 IWL_DEBUG_SCAN(mvm, "Pass all scheduled scan results found\n");
3211 ieee80211_sched_scan_results(mvm->hw);
3212 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED;
3213 }
3214
3215 IWL_DEBUG_SCAN(mvm,
3216 "UMAC Scan iteration complete: scan started at %llu (TSF)\n",
3217 mvm->scan_start);
3218 }
3219
3220 static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
3221 {
3222 struct iwl_umac_scan_abort cmd = {};
3223 int uid, ret;
3224
3225 lockdep_assert_held(&mvm->mutex);
3226
3227 /* We should always get a valid index here, because we already
3228 * checked that this type of scan was running in the generic
3229 * code.
3230 */
3231 uid = iwl_mvm_scan_uid_by_status(mvm, type);
3232 if (WARN_ON_ONCE(uid < 0))
3233 return uid;
3234
3235 cmd.uid = cpu_to_le32(uid);
3236
3237 IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid);
3238
3239 ret = iwl_mvm_send_cmd_pdu(mvm,
3240 WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_ABORT_UMAC),
3241 0, sizeof(cmd), &cmd);
3242 if (!ret)
3243 mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
3244
3245 return ret;
3246 }
3247
3248 static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
3249 {
3250 struct iwl_notification_wait wait_scan_done;
3251 static const u16 scan_done_notif[] = { SCAN_COMPLETE_UMAC,
3252 SCAN_OFFLOAD_COMPLETE, };
3253 int ret;
3254
3255 lockdep_assert_held(&mvm->mutex);
3256
3257 iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
3258 scan_done_notif,
3259 ARRAY_SIZE(scan_done_notif),
3260 NULL, NULL);
3261
3262 IWL_DEBUG_SCAN(mvm, "Preparing to stop scan, type %x\n", type);
3263
3264 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
3265 ret = iwl_mvm_umac_scan_abort(mvm, type);
3266 else
3267 ret = iwl_mvm_lmac_scan_abort(mvm);
3268
3269 if (ret) {
3270 IWL_DEBUG_SCAN(mvm, "couldn't stop scan type %d\n", type);
3271 iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
3272 return ret;
3273 }
3274
3275 return iwl_wait_notification(&mvm->notif_wait, &wait_scan_done,
3276 1 * HZ);
3277 }
3278
3279 static size_t iwl_scan_req_umac_get_size(u8 scan_ver)
3280 {
3281 switch (scan_ver) {
3282 case 12:
3283 return sizeof(struct iwl_scan_req_umac_v12);
3284 case 14:
3285 case 15:
3286 case 16:
3287 case 17:
3288 return sizeof(struct iwl_scan_req_umac_v17);
3289 }
3290
3291 return 0;
3292 }
3293
3294 size_t iwl_mvm_scan_size(struct iwl_mvm *mvm)
3295 {
3296 int base_size, tail_size;
3297 u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_REQ_UMAC,
3298 IWL_FW_CMD_VER_UNKNOWN);
3299
3300 base_size = iwl_scan_req_umac_get_size(scan_ver);
3301 if (base_size)
3302 return base_size;
3303
3304
3305 if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm))
3306 base_size = IWL_SCAN_REQ_UMAC_SIZE_V8;
3307 else if (iwl_mvm_is_adaptive_dwell_supported(mvm))
3308 base_size = IWL_SCAN_REQ_UMAC_SIZE_V7;
3309 else if (iwl_mvm_cdb_scan_api(mvm))
3310 base_size = IWL_SCAN_REQ_UMAC_SIZE_V6;
3311 else
3312 base_size = IWL_SCAN_REQ_UMAC_SIZE_V1;
3313
3314 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
3315 if (iwl_mvm_is_scan_ext_chan_supported(mvm))
3316 tail_size = sizeof(struct iwl_scan_req_umac_tail_v2);
3317 else
3318 tail_size = sizeof(struct iwl_scan_req_umac_tail_v1);
3319
3320 return base_size +
3321 sizeof(struct iwl_scan_channel_cfg_umac) *
3322 mvm->fw->ucode_capa.n_scan_channels +
3323 tail_size;
3324 }
3325 return sizeof(struct iwl_scan_req_lmac) +
3326 sizeof(struct iwl_scan_channel_cfg_lmac) *
3327 mvm->fw->ucode_capa.n_scan_channels +
3328 sizeof(struct iwl_scan_probe_req_v1);
3329 }
3330
3331 /*
3332 * This function is used in nic restart flow, to inform mac80211 about scans
3333 * that was aborted by restart flow or by an assert.
3334 */
3335 void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
3336 {
3337 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
3338 int uid, i;
3339
3340 uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_REGULAR);
3341 if (uid >= 0) {
3342 struct cfg80211_scan_info info = {
3343 .aborted = true,
3344 };
3345
3346 cancel_delayed_work(&mvm->scan_timeout_dwork);
3347
3348 ieee80211_scan_completed(mvm->hw, &info);
3349 mvm->scan_uid_status[uid] = 0;
3350 }
3351 uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_SCHED);
3352 if (uid >= 0) {
3353 /* Sched scan will be restarted by mac80211 in
3354 * restart_hw, so do not report if FW is about to be
3355 * restarted.
3356 */
3357 if (!mvm->fw_restart)
3358 ieee80211_sched_scan_stopped(mvm->hw);
3359 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
3360 mvm->scan_uid_status[uid] = 0;
3361 }
3362 uid = iwl_mvm_scan_uid_by_status(mvm,
3363 IWL_MVM_SCAN_STOPPING_REGULAR);
3364 if (uid >= 0)
3365 mvm->scan_uid_status[uid] = 0;
3366
3367 uid = iwl_mvm_scan_uid_by_status(mvm,
3368 IWL_MVM_SCAN_STOPPING_SCHED);
3369 if (uid >= 0)
3370 mvm->scan_uid_status[uid] = 0;
3371
3372 /* We shouldn't have any UIDs still set. Loop over all the
3373 * UIDs to make sure there's nothing left there and warn if
3374 * any is found.
3375 */
3376 for (i = 0; i < mvm->max_scans; i++) {
3377 if (WARN_ONCE(mvm->scan_uid_status[i],
3378 "UMAC scan UID %d status was not cleaned\n",
3379 i))
3380 mvm->scan_uid_status[i] = 0;
3381 }
3382 } else {
3383 if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) {
3384 struct cfg80211_scan_info info = {
3385 .aborted = true,
3386 };
3387
3388 cancel_delayed_work(&mvm->scan_timeout_dwork);
3389 ieee80211_scan_completed(mvm->hw, &info);
3390 }
3391
3392 /* Sched scan will be restarted by mac80211 in
3393 * restart_hw, so do not report if FW is about to be
3394 * restarted.
3395 */
3396 if ((mvm->scan_status & IWL_MVM_SCAN_SCHED) &&
3397 !mvm->fw_restart) {
3398 ieee80211_sched_scan_stopped(mvm->hw);
3399 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
3400 }
3401 }
3402 }
3403
3404 int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify)
3405 {
3406 int ret;
3407
3408 if (!(mvm->scan_status & type))
3409 return 0;
3410
3411 if (iwl_mvm_is_radio_killed(mvm)) {
3412 ret = 0;
3413 goto out;
3414 }
3415
3416 ret = iwl_mvm_scan_stop_wait(mvm, type);
3417 if (!ret)
3418 mvm->scan_status |= type << IWL_MVM_SCAN_STOPPING_SHIFT;
3419 out:
3420 /* Clear the scan status so the next scan requests will
3421 * succeed and mark the scan as stopping, so that the Rx
3422 * handler doesn't do anything, as the scan was stopped from
3423 * above.
3424 */
3425 mvm->scan_status &= ~type;
3426
3427 if (type == IWL_MVM_SCAN_REGULAR) {
3428 cancel_delayed_work(&mvm->scan_timeout_dwork);
3429 if (notify) {
3430 struct cfg80211_scan_info info = {
3431 .aborted = true,
3432 };
3433
3434 ieee80211_scan_completed(mvm->hw, &info);
3435 }
3436 } else if (notify) {
3437 ieee80211_sched_scan_stopped(mvm->hw);
3438 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
3439 }
3440
3441 return ret;
3442 }