ath12k_core_halt(ar);
}
+ ath12k_mac_dp_peer_cleanup(ah);
break;
case ATH12K_HW_STATE_OFF:
ath12k_warn(ab,
DECLARE_BITMAP(free_ml_peer_id_map, ATH12K_MAX_MLO_PEERS);
- /* protected by wiphy_lock() */
- struct list_head ml_peers;
+ struct ath12k_dp_hw dp_hw;
/* Keep last */
struct ath12k radio[] __aligned(sizeof(void *));
struct ath12k_hw_group;
+/*
+ * ML Peer IDs start from 8192, assuming max SLO clients count 1536,
+ * then max peer id shall be 9728, therefore rounding the peer table size
+ * to the nearest next power of 2 i.e 16384.
+ */
+#define MAX_DP_PEER_LIST_SIZE 16384
+
+struct ath12k_dp_hw {
+ struct ath12k_dp_peer __rcu *dp_peers[MAX_DP_PEER_LIST_SIZE];
+
+ /* Lock for protection of dp_peer_list and peers */
+ spinlock_t peer_lock;
+ struct list_head dp_peers_list;
+};
+
struct ath12k_dp_hw_group {
struct ath12k_dp *dp[ATH12K_MAX_DEVICES];
};
bool is_ampdu;
};
+struct ath12k_dp_peer_create_params {
+ struct ieee80211_sta *sta;
+ bool is_mlo;
+ u16 peer_id;
+ bool ucast_ra_only;
+};
+
static inline struct ath12k_dp_link_vif *
ath12k_dp_vif_to_dp_link_vif(struct ath12k_dp_vif *dp_vif, u8 link_id)
{
ath12k_warn(dp, "failed to remove peer %pM with id %d in rhash_addr ret %d\n",
peer->addr, peer->peer_id, ret);
}
+
+struct ath12k_dp_peer *ath12k_dp_peer_find_by_addr(struct ath12k_dp_hw *dp_hw, u8 *addr)
+{
+ struct ath12k_dp_peer *peer;
+
+ lockdep_assert_held(&dp_hw->peer_lock);
+
+ list_for_each_entry(peer, &dp_hw->dp_peers_list, list) {
+ if (ether_addr_equal(peer->addr, addr))
+ return peer;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(ath12k_dp_peer_find_by_addr);
+
+struct ath12k_dp_peer *ath12k_dp_peer_find_by_addr_and_sta(struct ath12k_dp_hw *dp_hw,
+ u8 *addr,
+ struct ieee80211_sta *sta)
+{
+ struct ath12k_dp_peer *dp_peer;
+
+ lockdep_assert_held(&dp_hw->peer_lock);
+
+ list_for_each_entry(dp_peer, &dp_hw->dp_peers_list, list) {
+ if (ether_addr_equal(dp_peer->addr, addr) && (dp_peer->sta == sta))
+ return dp_peer;
+ }
+
+ return NULL;
+}
+
+static struct ath12k_dp_peer *ath12k_dp_peer_create_find(struct ath12k_dp_hw *dp_hw,
+ u8 *addr,
+ struct ieee80211_sta *sta,
+ bool mlo_peer)
+{
+ struct ath12k_dp_peer *dp_peer;
+
+ lockdep_assert_held(&dp_hw->peer_lock);
+
+ list_for_each_entry(dp_peer, &dp_hw->dp_peers_list, list) {
+ if (ether_addr_equal(dp_peer->addr, addr)) {
+ if (!sta || mlo_peer || dp_peer->is_mlo ||
+ dp_peer->sta == sta)
+ return dp_peer;
+ }
+ }
+
+ return NULL;
+}
+
+int ath12k_dp_peer_create(struct ath12k_dp_hw *dp_hw, u8 *addr,
+ struct ath12k_dp_peer_create_params *params)
+{
+ struct ath12k_dp_peer *dp_peer;
+
+ spin_lock_bh(&dp_hw->peer_lock);
+ dp_peer = ath12k_dp_peer_create_find(dp_hw, addr, params->sta, params->is_mlo);
+ if (dp_peer) {
+ spin_unlock_bh(&dp_hw->peer_lock);
+ return -EEXIST;
+ }
+ spin_unlock_bh(&dp_hw->peer_lock);
+
+ dp_peer = kzalloc(sizeof(*dp_peer), GFP_ATOMIC);
+ if (!dp_peer)
+ return -ENOMEM;
+
+ ether_addr_copy(dp_peer->addr, addr);
+ dp_peer->sta = params->sta;
+ dp_peer->is_mlo = params->is_mlo;
+
+ /*
+ * For MLO client, the host assigns the ML peer ID, so set peer_id in dp_peer
+ * For non-MLO client, host gets link peer ID from firmware and will be
+ * assigned at the time of link peer creation
+ */
+ dp_peer->peer_id = params->is_mlo ? params->peer_id : ATH12K_DP_PEER_ID_INVALID;
+ dp_peer->ucast_ra_only = params->ucast_ra_only;
+
+ dp_peer->sec_type = HAL_ENCRYPT_TYPE_OPEN;
+ dp_peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN;
+
+ spin_lock_bh(&dp_hw->peer_lock);
+
+ list_add(&dp_peer->list, &dp_hw->dp_peers_list);
+
+ /*
+ * For MLO client, the peer_id for ath12k_dp_peer is allocated by host
+ * and that peer_id is known at this point, and hence this ath12k_dp_peer
+ * can be added to the RCU table using the peer_id.
+ * For non-MLO client, this addition to RCU table shall be done at the
+ * time of assignment of ath12k_dp_link_peer to ath12k_dp_peer.
+ */
+ if (dp_peer->is_mlo)
+ rcu_assign_pointer(dp_hw->dp_peers[dp_peer->peer_id], dp_peer);
+
+ spin_unlock_bh(&dp_hw->peer_lock);
+
+ return 0;
+}
+
+void ath12k_dp_peer_delete(struct ath12k_dp_hw *dp_hw, u8 *addr,
+ struct ieee80211_sta *sta)
+{
+ struct ath12k_dp_peer *dp_peer;
+
+ spin_lock_bh(&dp_hw->peer_lock);
+
+ dp_peer = ath12k_dp_peer_find_by_addr_and_sta(dp_hw, addr, sta);
+ if (!dp_peer) {
+ spin_unlock_bh(&dp_hw->peer_lock);
+ return;
+ }
+
+ if (dp_peer->is_mlo)
+ rcu_assign_pointer(dp_hw->dp_peers[dp_peer->peer_id], NULL);
+
+ list_del(&dp_peer->list);
+
+ spin_unlock_bh(&dp_hw->peer_lock);
+
+ synchronize_rcu();
+ kfree(dp_peer);
+}
#include "dp_rx.h"
+#define ATH12K_DP_PEER_ID_INVALID 0x3FFF
+
struct ppdu_user_delayba {
u16 sw_peer_id;
u32 info0;
void ath12k_dp_link_peer_unmap_event(struct ath12k_base *ab, u16 peer_id);
void ath12k_dp_link_peer_map_event(struct ath12k_base *ab, u8 vdev_id, u16 peer_id,
u8 *mac_addr, u16 ast_hash, u16 hw_peer_id);
+
+struct ath12k_dp_peer {
+ struct list_head list;
+ struct ieee80211_sta *sta;
+ int peer_id;
+ u8 addr[ETH_ALEN];
+ bool is_mlo;
+
+ u16 sec_type;
+ u16 sec_type_grp;
+
+ bool ucast_ra_only;
+};
+
struct ath12k_dp_link_peer *
ath12k_dp_link_peer_find_by_vdev_and_addr(struct ath12k_dp *dp,
int vdev_id, const u8 *addr);
struct ath12k_dp_link_peer *peer);
void ath12k_dp_link_peer_rhash_delete(struct ath12k_dp *dp,
struct ath12k_dp_link_peer *peer);
+int ath12k_dp_peer_create(struct ath12k_dp_hw *dp_hw, u8 *addr,
+ struct ath12k_dp_peer_create_params *params);
+void ath12k_dp_peer_delete(struct ath12k_dp_hw *dp_hw, u8 *addr,
+ struct ieee80211_sta *sta);
+struct ath12k_dp_peer *ath12k_dp_peer_find_by_addr(struct ath12k_dp_hw *dp_hw, u8 *addr);
+struct ath12k_dp_peer *ath12k_dp_peer_find_by_addr_and_sta(struct ath12k_dp_hw *dp_hw,
+ u8 *addr,
+ struct ieee80211_sta *sta);
#endif
struct ath12k_dp_link_peer *peer, *tmp;
struct ath12k_base *ab = ar->ab;
struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
+ struct ath12k_link_vif *arvif, *tmp_vif;
+ struct ath12k_dp_hw *dp_hw = &ar->ah->dp_hw;
lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
/* Cleanup rhash table maintained for arsta by iterating over sta */
ieee80211_iterate_stations_mtx(ar->ah->hw, ath12k_mac_link_sta_rhash_cleanup,
ar);
+
+ /* Delete all the self dp_peers on asserted radio */
+ list_for_each_entry_safe_reverse(arvif, tmp_vif, &ar->arvifs, list) {
+ if (arvif->ahvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ ath12k_dp_peer_delete(dp_hw, arvif->bssid, NULL);
+ arvif->num_stations = 0;
+ }
+ }
+}
+
+void ath12k_mac_dp_peer_cleanup(struct ath12k_hw *ah)
+{
+ struct list_head peers;
+ struct ath12k_dp_peer *dp_peer, *tmp;
+ struct ath12k_dp_hw *dp_hw = &ah->dp_hw;
+
+ INIT_LIST_HEAD(&peers);
+
+ spin_lock_bh(&dp_hw->peer_lock);
+ list_for_each_entry_safe(dp_peer, tmp, &dp_hw->dp_peers_list, list) {
+ if (dp_peer->is_mlo) {
+ rcu_assign_pointer(dp_hw->dp_peers[dp_peer->peer_id], NULL);
+ clear_bit(dp_peer->peer_id, ah->free_ml_peer_id_map);
+ }
+
+ list_move(&dp_peer->list, &peers);
+ }
+
+ spin_unlock_bh(&dp_hw->peer_lock);
+
+ synchronize_rcu();
+
+ list_for_each_entry_safe(dp_peer, tmp, &peers, list) {
+ list_del(&dp_peer->list);
+ kfree(dp_peer);
+ }
}
static int ath12k_mac_vdev_setup_sync(struct ath12k *ar)
if (ret)
ath12k_warn(ar->ab, "failed to submit AP self-peer removal on vdev %d link id %d: %d",
arvif->vdev_id, arvif->link_id, ret);
+
+ ath12k_dp_peer_delete(&ah->dp_hw, arvif->bssid, NULL);
}
ath12k_mac_vdev_delete(ar, arvif);
}
ath12k_mac_free_unassign_link_sta(ah, ahsta, link_id);
}
- ath12k_peer_ml_delete(ah, sta);
+ if (sta->mlo) {
+ clear_bit(ahsta->ml_peer_id, ah->free_ml_peer_id_map);
+ ahsta->ml_peer_id = ATH12K_MLO_PEER_ID_INVALID;
+ }
}
static int ath12k_mac_handle_link_sta_state(struct ieee80211_hw *hw,
u16 selected_links = 0;
u8 link_id = 0, i;
struct ath12k *ar;
- int ret;
+ int ret = -EINVAL;
+ struct ath12k_dp_peer_create_params dp_params = {};
lockdep_assert_wiphy(hw->wiphy);
/* ML sta */
if (sta->mlo && !ahsta->links_map &&
(hweight16(sta->valid_links) == 1)) {
- ret = ath12k_peer_ml_create(ah, sta);
- if (ret) {
- ath12k_hw_warn(ah, "unable to create ML peer for sta %pM",
+ ahsta->ml_peer_id = ath12k_peer_ml_alloc(ah);
+ if (ahsta->ml_peer_id == ATH12K_MLO_PEER_ID_INVALID) {
+ ath12k_hw_warn(ah, "unable to allocate ML peer id for sta %pM",
sta->addr);
goto exit;
}
+
+ dp_params.is_mlo = true;
+ dp_params.peer_id = ahsta->ml_peer_id | ATH12K_PEER_ML_ID_VALID;
+ }
+
+ dp_params.sta = sta;
+
+ if (vif->type == NL80211_IFTYPE_AP)
+ dp_params.ucast_ra_only = true;
+
+ ret = ath12k_dp_peer_create(&ah->dp_hw, sta->addr, &dp_params);
+ if (ret) {
+ ath12k_hw_warn(ah, "unable to create ath12k_dp_peer for sta %pM, ret: %d",
+ sta->addr, ret);
+
+ goto ml_peer_id_clear;
}
ret = ath12k_mac_assign_link_sta(ah, ahsta, arsta, ahvif,
if (ret) {
ath12k_hw_warn(ah, "unable assign link %d for sta %pM",
link_id, sta->addr);
- goto exit;
+ goto peer_delete;
}
/* above arsta will get memset, hence do this after assign
if (ret) {
ath12k_hw_warn(ah, "unable to move link sta %d of sta %pM from state %d to %d",
link_id, arsta->addr, old_state, new_state);
- goto exit;
+
+ if (old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE)
+ goto peer_delete;
+ else
+ goto exit;
}
}
* handler below
*/
if (old_state == IEEE80211_STA_NONE &&
- new_state == IEEE80211_STA_NOTEXIST && sta->mlo)
- ath12k_mac_ml_station_remove(ahvif, ahsta);
+ new_state == IEEE80211_STA_NOTEXIST) {
+ if (sta->mlo)
+ ath12k_mac_ml_station_remove(ahvif, ahsta);
+
+ ath12k_dp_peer_delete(&ah->dp_hw, sta->addr, sta);
+ }
ret = 0;
+ goto exit;
+peer_delete:
+ ath12k_dp_peer_delete(&ah->dp_hw, sta->addr, sta);
+ml_peer_id_clear:
+ if (sta->mlo) {
+ clear_bit(ahsta->ml_peer_id, ah->free_ml_peer_id_map);
+ ahsta->ml_peer_id = ATH12K_MLO_PEER_ID_INVALID;
+ }
exit:
/* update the state if everything went well */
if (!ret)
int ret, vdev_id;
u8 link_id;
struct ath12k_dp_link_vif *dp_link_vif = NULL;
+ struct ath12k_dp_peer_create_params params = {};
lockdep_assert_wiphy(hw->wiphy);
switch (ahvif->vdev_type) {
case WMI_VDEV_TYPE_AP:
+ params.ucast_ra_only = true;
+
+ ret = ath12k_dp_peer_create(&ah->dp_hw, arvif->bssid, ¶ms);
+ if (ret) {
+ ath12k_warn(ab, "failed to vdev %d create dp_peer for AP: %d\n",
+ arvif->vdev_id, ret);
+ goto err_vdev_del;
+ }
+
peer_param.vdev_id = arvif->vdev_id;
peer_param.peer_addr = arvif->bssid;
peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
if (ret) {
ath12k_warn(ab, "failed to vdev %d create peer for AP: %d\n",
arvif->vdev_id, ret);
- goto err_vdev_del;
+ goto err_dp_peer_del;
}
ret = ath12k_mac_set_kickout(arvif);
ar->num_peers--;
}
+err_dp_peer_del:
+ if (ahvif->vdev_type == WMI_VDEV_TYPE_AP)
+ ath12k_dp_peer_delete(&ah->dp_hw, arvif->bssid, NULL);
+
err_vdev_del:
if (ahvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
ar->monitor_vdev_id = -1;
ah->num_radio = num_pdev_map;
mutex_init(&ah->hw_mutex);
- INIT_LIST_HEAD(&ah->ml_peers);
+
+ spin_lock_init(&ah->dp_hw.peer_lock);
+ INIT_LIST_HEAD(&ah->dp_hw.dp_peers_list);
for (i = 0; i < num_pdev_map; i++) {
ab = pdev_map[i].ab;
void ath12k_mac_drain_tx(struct ath12k *ar);
void ath12k_mac_peer_cleanup_all(struct ath12k *ar);
+void ath12k_mac_dp_peer_cleanup(struct ath12k_hw *ah);
int ath12k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx);
enum rate_info_bw ath12k_mac_bw_to_mac80211_bw(enum ath12k_supported_bw bw);
enum ath12k_supported_bw ath12k_mac_mac80211_bw_to_ath12k_bw(enum rate_info_bw bw);
#include "peer.h"
#include "debug.h"
-struct ath12k_ml_peer *ath12k_peer_ml_find(struct ath12k_hw *ah, const u8 *addr)
-{
- struct ath12k_ml_peer *ml_peer;
-
- lockdep_assert_wiphy(ah->hw->wiphy);
-
- list_for_each_entry(ml_peer, &ah->ml_peers, list) {
- if (!ether_addr_equal(ml_peer->addr, addr))
- continue;
-
- return ml_peer;
- }
-
- return NULL;
-}
-EXPORT_SYMBOL(ath12k_peer_ml_find);
-
static int ath12k_wait_for_dp_link_peer_common(struct ath12k_base *ab, int vdev_id,
const u8 *addr, bool expect_mapped)
{
return 0;
}
-static u16 ath12k_peer_ml_alloc(struct ath12k_hw *ah)
+u16 ath12k_peer_ml_alloc(struct ath12k_hw *ah)
{
u16 ml_peer_id;
return ml_peer_id;
}
-int ath12k_peer_ml_create(struct ath12k_hw *ah, struct ieee80211_sta *sta)
-{
- struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
- struct ath12k_ml_peer *ml_peer;
-
- lockdep_assert_wiphy(ah->hw->wiphy);
-
- if (!sta->mlo)
- return -EINVAL;
-
- ml_peer = ath12k_peer_ml_find(ah, sta->addr);
- if (ml_peer) {
- ath12k_hw_warn(ah, "ML peer %d exists already, unable to add new entry for %pM",
- ml_peer->id, sta->addr);
- return -EEXIST;
- }
-
- ml_peer = kzalloc(sizeof(*ml_peer), GFP_ATOMIC);
- if (!ml_peer)
- return -ENOMEM;
-
- ahsta->ml_peer_id = ath12k_peer_ml_alloc(ah);
-
- if (ahsta->ml_peer_id == ATH12K_MLO_PEER_ID_INVALID) {
- ath12k_hw_warn(ah, "unable to allocate ML peer id for sta %pM",
- sta->addr);
- kfree(ml_peer);
- return -ENOMEM;
- }
-
- ether_addr_copy(ml_peer->addr, sta->addr);
- ml_peer->id = ahsta->ml_peer_id;
- list_add(&ml_peer->list, &ah->ml_peers);
-
- return 0;
-}
-
-int ath12k_peer_ml_delete(struct ath12k_hw *ah, struct ieee80211_sta *sta)
-{
- struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
- struct ath12k_ml_peer *ml_peer;
-
- lockdep_assert_wiphy(ah->hw->wiphy);
-
- if (!sta->mlo)
- return -EINVAL;
-
- clear_bit(ahsta->ml_peer_id, ah->free_ml_peer_id_map);
- ahsta->ml_peer_id = ATH12K_MLO_PEER_ID_INVALID;
-
- ml_peer = ath12k_peer_ml_find(ah, sta->addr);
- if (!ml_peer) {
- ath12k_hw_warn(ah, "ML peer for %pM not found", sta->addr);
- return -EINVAL;
- }
-
- list_del(&ml_peer->list);
- kfree(ml_peer);
-
- return 0;
-}
-
int ath12k_peer_mlo_link_peers_delete(struct ath12k_vif *ahvif, struct ath12k_sta *ahsta)
{
struct ieee80211_sta *sta = ath12k_ahsta_to_sta(ahsta);
#include "dp_peer.h"
-struct ath12k_ml_peer {
- struct list_head list;
- u8 addr[ETH_ALEN];
- u16 id;
-};
-
void ath12k_peer_cleanup(struct ath12k *ar, u32 vdev_id);
int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, u8 *addr);
int ath12k_peer_create(struct ath12k *ar, struct ath12k_link_vif *arvif,
struct ath12k_wmi_peer_create_arg *arg);
int ath12k_wait_for_peer_delete_done(struct ath12k *ar, u32 vdev_id,
const u8 *addr);
-int ath12k_peer_ml_create(struct ath12k_hw *ah, struct ieee80211_sta *sta);
-int ath12k_peer_ml_delete(struct ath12k_hw *ah, struct ieee80211_sta *sta);
int ath12k_peer_mlo_link_peers_delete(struct ath12k_vif *ahvif, struct ath12k_sta *ahsta);
struct ath12k_ml_peer *ath12k_peer_ml_find(struct ath12k_hw *ah,
const u8 *addr);
int ath12k_link_sta_rhash_add(struct ath12k_base *ab, struct ath12k_link_sta *arsta);
struct ath12k_link_sta *ath12k_link_sta_find_by_addr(struct ath12k_base *ab,
const u8 *addr);
+u16 ath12k_peer_ml_alloc(struct ath12k_hw *ah);
#endif /* _PEER_H_ */
struct ath12k_hw *ah = ath12k_ar_to_ah(arvif->ar);
struct ath12k_base *ab = arvif->ar->ab;
struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
+ struct ath12k_dp_peer *peer;
__le16 fc = mgmt->frame_control;
spin_lock_bh(&dp->dp_lock);
- if (!ath12k_dp_link_peer_find_by_addr(dp, mgmt->da) &&
- !ath12k_peer_ml_find(ah, mgmt->da)) {
- spin_unlock_bh(&dp->dp_lock);
- return false;
+ if (!ath12k_dp_link_peer_find_by_addr(dp, mgmt->da)) {
+ spin_lock_bh(&ah->dp_hw.peer_lock);
+ peer = ath12k_dp_peer_find_by_addr(&ah->dp_hw, mgmt->da);
+ if (!peer || (peer && !peer->is_mlo)) {
+ spin_unlock_bh(&ah->dp_hw.peer_lock);
+ spin_unlock_bh(&dp->dp_lock);
+ return false;
+ }
+ spin_unlock_bh(&ah->dp_hw.peer_lock);
}
spin_unlock_bh(&dp->dp_lock);