A fairly big set of changes all over, notably with:

- cfg80211: new APIs for NAN (Neighbor Aware Networking,
    aka Wi-Fi Aware) so less work must be in firmware
  - mt76:
    - mt7996/mt7925 MLO fixes/improvements
    - mt7996 NPU support (HW eth/wifi traffic offload)
  - iwlwifi: UNII-9 and continuing UHR work
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEpeA8sTs3M8SN2hR410qiO8sPaAAFAmnFTegACgkQ10qiO8sP
 aABpghAAmcubFELG/ivDfwujEXjeKRU4CGcFPWDnOwBo28w8bQ36SoKRh251BUSL
 4XCEwZwPR2gFI77bJ7fLn1gsRNd8Cv+t8wsi2K3TV3bOy6wCxH85A7l4GmN5vGzP
 9MLcAAT7R684YAC4gFAi3DqFmSucd/ZodAt93Cw7+ikXq2tvrbR5wgUv9AQ5mUIw
 f5cqocOOv+4IbSL+r2cQnCAKLGWxVMJpoiWuAPpIQn7odcrncrhvBIG3l9ZC4KOL
 BKiO+YpK8Yg3+uc9zrz+RwOcQx6TjzgAydFY/AnqOmGfQ2dGaWC/zy/5stCOVrfd
 mAqw4jr14eAumUoHQoNrOBsWikuDBKmYMjHVObR3cKB9jJ/54CHtSYJVueg9gdhP
 4+s5lNkX0zEt76wimYQRpCkYhalBUZMwUv3HFnab99PDDmWvNFS8uHi8i2g7U81i
 yVdxI3MbQp2SRgJMDbKQPziSad1qJyIzg/LoN9fb6GV1DoNZ3IZabgVMOA2IoB0L
 zYi3Yuyo63yhDh2Np9uzDsIRQAbTCdbou2fzPqy6CvOyG6JXxCI8PZpZAN7dqYxc
 u8rljjaxQ4IYfBWrryFdHzIrYHJLo/B4g8kSFE+vzLiFblFmTxBoziHDWpJ4u5im
 YTyOyBYAtzQf0l8cZPKzRq+AuVgIuJVNV/3zyxnoFxfqg/lUWNk=
 =zap4
 -----END PGP SIGNATURE-----

Merge tag 'wireless-next-2026-03-26' of https://git.kernel.org/pub/scm/linux/kernel/git/wireless/wireless-next

Johannes Berg says:

====================
A fairly big set of changes all over, notably with:
 - cfg80211: new APIs for NAN (Neighbor Aware Networking,
   aka Wi-Fi Aware) so less work must be in firmware
 - mt76:
   - mt7996/mt7925 MLO fixes/improvements
   - mt7996 NPU support (HW eth/wifi traffic offload)
 - iwlwifi: UNII-9 and continuing UHR work

* tag 'wireless-next-2026-03-26' of https://git.kernel.org/pub/scm/linux/kernel/git/wireless/wireless-next: (230 commits)
  wifi: mac80211: ignore reserved bits in reconfiguration status
  wifi: cfg80211: allow protected action frame TX for NAN
  wifi: ieee80211: Add some missing NAN definitions
  wifi: nl80211: Add a notification to notify NAN channel evacuation
  wifi: nl80211: add NL80211_CMD_NAN_ULW_UPDATE notification
  wifi: nl80211: allow reporting spurious NAN Data frames
  wifi: cfg80211: allow ToDS=0/FromDS=0 data frames on NAN data interfaces
  wifi: nl80211: define an API for configuring the NAN peer's schedule
  wifi: nl80211: add support for NAN stations
  wifi: cfg80211: separately store HT, VHT and HE capabilities for NAN
  wifi: cfg80211: add support for NAN data interface
  wifi: cfg80211: make sure NAN chandefs are valid
  wifi: cfg80211: Add an API to configure local NAN schedule
  wifi: mac80211: cleanup error path of ieee80211_do_open
  wifi: mac80211: extract channel logic from link logic
  wifi: iwlwifi: mld: set RX_FLAG_RADIOTAP_TLV_AT_END generically
  wifi: iwlwifi: reduce the number of prints upon firmware crash
  wifi: iwlwifi: fix the description of SESSION_PROTECTION_CMD
  wifi: iwlwifi: mld: introduce iwl_mld_vif_fw_id_valid
  wifi: iwlwifi: mld: block EMLSR during TDLS connections
  ...
====================

Link: https://patch.msgid.link/20260326152021.305959-3-johannes@sipsolutions.net
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2026-03-26 18:17:14 -07:00
commit dbd94b9831
166 changed files with 7131 additions and 2321 deletions

View File

@ -1016,7 +1016,6 @@ static int ath10k_usb_probe(struct usb_interface *interface,
netif_napi_add(ar->napi_dev, &ar->napi, ath10k_usb_napi_poll);
usb_get_dev(dev);
vendor_id = le16_to_cpu(dev->descriptor.idVendor);
product_id = le16_to_cpu(dev->descriptor.idProduct);
@ -1055,12 +1054,10 @@ static int ath10k_usb_probe(struct usb_interface *interface,
err:
ath10k_core_destroy(ar);
usb_put_dev(dev);
return ret;
}
static void ath10k_usb_remove(struct usb_interface *interface)
static void ath10k_usb_disconnect(struct usb_interface *interface)
{
struct ath10k_usb *ar_usb;
@ -1071,7 +1068,6 @@ static void ath10k_usb_remove(struct usb_interface *interface)
ath10k_core_unregister(ar_usb->ar);
netif_napi_del(&ar_usb->ar->napi);
ath10k_usb_destroy(ar_usb->ar);
usb_put_dev(interface_to_usbdev(interface));
ath10k_core_destroy(ar_usb->ar);
}
@ -1117,7 +1113,7 @@ static struct usb_driver ath10k_usb_driver = {
.probe = ath10k_usb_probe,
.suspend = ath10k_usb_pm_suspend,
.resume = ath10k_usb_pm_resume,
.disconnect = ath10k_usb_remove,
.disconnect = ath10k_usb_disconnect,
.id_table = ath10k_usb_ids,
.supports_autosuspend = true,
.disable_hub_initiated_lpm = 1,

View File

@ -21,8 +21,8 @@
#define ATH12K_ROOTPD_READY_TIMEOUT (5 * HZ)
#define ATH12K_RPROC_AFTER_POWERUP QCOM_SSR_AFTER_POWERUP
#define ATH12K_AHB_FW_PREFIX "q6_fw"
#define ATH12K_AHB_FW_SUFFIX ".mdt"
#define ATH12K_AHB_FW2 "iu_fw.mdt"
#define ATH12K_AHB_FW_SUFFIX ".mbn"
#define ATH12K_AHB_FW2 "iu_fw.mbn"
#define ATH12K_AHB_UPD_SWID 0x12
#define ATH12K_USERPD_SPAWN_TIMEOUT (5 * HZ)
#define ATH12K_USERPD_READY_TIMEOUT (10 * HZ)

View File

@ -523,7 +523,7 @@ struct ath12k_sta {
u16 links_map;
u8 assoc_link_id;
u16 ml_peer_id;
u8 num_peer;
u16 free_logical_link_idx_map;
enum ieee80211_sta_state state;
};

View File

@ -205,16 +205,9 @@ ath12k_update_per_peer_tx_stats(struct ath12k_pdev_dp *dp_pdev,
if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE)))
return;
if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON)) {
if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON))
is_ampdu =
HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags);
tx_retry_failed =
__le16_to_cpu(usr_stats->cmpltn_cmn.mpdu_tried) -
__le16_to_cpu(usr_stats->cmpltn_cmn.mpdu_success);
tx_retry_count =
HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +
HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);
}
if (usr_stats->tlv_flags &
BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) {
@ -223,10 +216,19 @@ ath12k_update_per_peer_tx_stats(struct ath12k_pdev_dp *dp_pdev,
HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M);
tid = le32_get_bits(usr_stats->ack_ba.info,
HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM);
}
if (common->fes_duration_us)
tx_duration = le32_to_cpu(common->fes_duration_us);
if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON)) {
tx_retry_failed =
__le16_to_cpu(usr_stats->cmpltn_cmn.mpdu_tried) -
__le16_to_cpu(usr_stats->cmpltn_cmn.mpdu_success);
tx_retry_count =
HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +
HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);
}
if (common->fes_duration_us)
tx_duration = le32_to_cpu(common->fes_duration_us);
}
user_rate = &usr_stats->rate;
flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags);

View File

@ -268,21 +268,28 @@ enum hal_rx_reception_type {
};
enum hal_rx_legacy_rate {
HAL_RX_LEGACY_RATE_1_MBPS,
HAL_RX_LEGACY_RATE_2_MBPS,
HAL_RX_LEGACY_RATE_5_5_MBPS,
HAL_RX_LEGACY_RATE_6_MBPS,
HAL_RX_LEGACY_RATE_9_MBPS,
HAL_RX_LEGACY_RATE_11_MBPS,
HAL_RX_LEGACY_RATE_12_MBPS,
HAL_RX_LEGACY_RATE_18_MBPS,
HAL_RX_LEGACY_RATE_24_MBPS,
HAL_RX_LEGACY_RATE_36_MBPS,
HAL_RX_LEGACY_RATE_48_MBPS,
HAL_RX_LEGACY_RATE_54_MBPS,
HAL_RX_LEGACY_RATE_LP_1_MBPS,
HAL_RX_LEGACY_RATE_LP_2_MBPS,
HAL_RX_LEGACY_RATE_LP_5_5_MBPS,
HAL_RX_LEGACY_RATE_LP_11_MBPS,
HAL_RX_LEGACY_RATE_SP_2_MBPS,
HAL_RX_LEGACY_RATE_SP_5_5_MBPS,
HAL_RX_LEGACY_RATE_SP_11_MBPS,
HAL_RX_LEGACY_RATE_INVALID,
};
enum hal_rx_legacy_rates_ofdm {
HAL_RX_LEGACY_RATE_OFDM_48_MBPS,
HAL_RX_LEGACY_RATE_OFDM_24_MBPS,
HAL_RX_LEGACY_RATE_OFDM_12_MBPS,
HAL_RX_LEGACY_RATE_OFDM_6_MBPS,
HAL_RX_LEGACY_RATE_OFDM_54_MBPS,
HAL_RX_LEGACY_RATE_OFDM_36_MBPS,
HAL_RX_LEGACY_RATE_OFDM_18_MBPS,
HAL_RX_LEGACY_RATE_OFDM_9_MBPS,
HAL_RX_LEGACY_RATE_OFDM_INVALID,
};
enum hal_ring_type {
HAL_REO_DST,
HAL_REO_EXCEPTION,

View File

@ -164,30 +164,31 @@ static const struct ieee80211_channel ath12k_6ghz_channels[] = {
CHAN6G(233, 7115, 0),
};
#define ATH12K_MAC_RATE_A_M(bps, code) \
{ .bitrate = (bps), .hw_value = (code),\
.flags = IEEE80211_RATE_MANDATORY_A }
#define ATH12K_MAC_RATE_B(bps, code, code_short) \
{ .bitrate = (bps), .hw_value = (code), .hw_value_short = (code_short),\
.flags = IEEE80211_RATE_SHORT_PREAMBLE }
static struct ieee80211_rate ath12k_legacy_rates[] = {
{ .bitrate = 10,
.hw_value = ATH12K_HW_RATE_CCK_LP_1M },
{ .bitrate = 20,
.hw_value = ATH12K_HW_RATE_CCK_LP_2M,
.hw_value_short = ATH12K_HW_RATE_CCK_SP_2M,
.flags = IEEE80211_RATE_SHORT_PREAMBLE },
{ .bitrate = 55,
.hw_value = ATH12K_HW_RATE_CCK_LP_5_5M,
.hw_value_short = ATH12K_HW_RATE_CCK_SP_5_5M,
.flags = IEEE80211_RATE_SHORT_PREAMBLE },
{ .bitrate = 110,
.hw_value = ATH12K_HW_RATE_CCK_LP_11M,
.hw_value_short = ATH12K_HW_RATE_CCK_SP_11M,
.flags = IEEE80211_RATE_SHORT_PREAMBLE },
{ .bitrate = 60, .hw_value = ATH12K_HW_RATE_OFDM_6M },
{ .bitrate = 90, .hw_value = ATH12K_HW_RATE_OFDM_9M },
{ .bitrate = 120, .hw_value = ATH12K_HW_RATE_OFDM_12M },
{ .bitrate = 180, .hw_value = ATH12K_HW_RATE_OFDM_18M },
{ .bitrate = 240, .hw_value = ATH12K_HW_RATE_OFDM_24M },
{ .bitrate = 360, .hw_value = ATH12K_HW_RATE_OFDM_36M },
{ .bitrate = 480, .hw_value = ATH12K_HW_RATE_OFDM_48M },
{ .bitrate = 540, .hw_value = ATH12K_HW_RATE_OFDM_54M },
ATH12K_MAC_RATE_B(20, ATH12K_HW_RATE_CCK_LP_2M,
ATH12K_HW_RATE_CCK_SP_2M),
ATH12K_MAC_RATE_B(55, ATH12K_HW_RATE_CCK_LP_5_5M,
ATH12K_HW_RATE_CCK_SP_5_5M),
ATH12K_MAC_RATE_B(110, ATH12K_HW_RATE_CCK_LP_11M,
ATH12K_HW_RATE_CCK_SP_11M),
ATH12K_MAC_RATE_A_M(60, ATH12K_HW_RATE_OFDM_6M),
ATH12K_MAC_RATE_A_M(90, ATH12K_HW_RATE_OFDM_9M),
ATH12K_MAC_RATE_A_M(120, ATH12K_HW_RATE_OFDM_12M),
ATH12K_MAC_RATE_A_M(180, ATH12K_HW_RATE_OFDM_18M),
ATH12K_MAC_RATE_A_M(240, ATH12K_HW_RATE_OFDM_24M),
ATH12K_MAC_RATE_A_M(360, ATH12K_HW_RATE_OFDM_36M),
ATH12K_MAC_RATE_A_M(480, ATH12K_HW_RATE_OFDM_48M),
ATH12K_MAC_RATE_A_M(540, ATH12K_HW_RATE_OFDM_54M),
};
static const int
@ -732,11 +733,17 @@ u8 ath12k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
if (ath12k_mac_bitrate_is_cck(rate->bitrate) != cck)
continue;
if (rate->hw_value == hw_rate)
/* To handle 802.11a PPDU type */
if ((!cck) && (rate->hw_value == hw_rate) &&
(rate->flags & IEEE80211_RATE_MANDATORY_A))
return i;
/* To handle 802.11b short PPDU type */
else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE &&
rate->hw_value_short == hw_rate)
return i;
/* To handle 802.11b long PPDU type */
else if (rate->hw_value == hw_rate)
return i;
}
return 0;
@ -6786,6 +6793,8 @@ static void ath12k_mac_free_unassign_link_sta(struct ath12k_hw *ah,
return;
ahsta->links_map &= ~BIT(link_id);
ahsta->free_logical_link_idx_map |= BIT(arsta->link_idx);
rcu_assign_pointer(ahsta->link[link_id], NULL);
synchronize_rcu();
@ -7104,6 +7113,7 @@ static int ath12k_mac_assign_link_sta(struct ath12k_hw *ah,
struct ieee80211_sta *sta = ath12k_ahsta_to_sta(ahsta);
struct ieee80211_link_sta *link_sta;
struct ath12k_link_vif *arvif;
int link_idx;
lockdep_assert_wiphy(ah->hw->wiphy);
@ -7122,8 +7132,16 @@ static int ath12k_mac_assign_link_sta(struct ath12k_hw *ah,
ether_addr_copy(arsta->addr, link_sta->addr);
/* logical index of the link sta in order of creation */
arsta->link_idx = ahsta->num_peer++;
if (!ahsta->free_logical_link_idx_map)
return -ENOSPC;
/*
* Allocate a logical link index by selecting the first available bit
* from the free logical index map
*/
link_idx = __ffs(ahsta->free_logical_link_idx_map);
ahsta->free_logical_link_idx_map &= ~BIT(link_idx);
arsta->link_idx = link_idx;
arsta->link_id = link_id;
ahsta->links_map |= BIT(arsta->link_id);
@ -7632,6 +7650,7 @@ int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
if (old_state == IEEE80211_STA_NOTEXIST &&
new_state == IEEE80211_STA_NONE) {
memset(ahsta, 0, sizeof(*ahsta));
ahsta->free_logical_link_idx_map = U16_MAX;
arsta = &ahsta->deflink;

View File

@ -405,6 +405,42 @@ ath12k_wifi7_dp_mon_hal_rx_parse_user_info(const struct hal_receive_user_info *r
}
}
static __always_inline u8
ath12k_wifi7_hal_mon_map_legacy_rate_to_hw_rate(u8 rate)
{
u8 ath12k_rate;
/* Map hal_rx_legacy_rate to ath12k_hw_rate_cck */
switch (rate) {
case HAL_RX_LEGACY_RATE_LP_1_MBPS:
ath12k_rate = ATH12K_HW_RATE_CCK_LP_1M;
break;
case HAL_RX_LEGACY_RATE_LP_2_MBPS:
ath12k_rate = ATH12K_HW_RATE_CCK_LP_2M;
break;
case HAL_RX_LEGACY_RATE_LP_5_5_MBPS:
ath12k_rate = ATH12K_HW_RATE_CCK_LP_5_5M;
break;
case HAL_RX_LEGACY_RATE_LP_11_MBPS:
ath12k_rate = ATH12K_HW_RATE_CCK_LP_11M;
break;
case HAL_RX_LEGACY_RATE_SP_2_MBPS:
ath12k_rate = ATH12K_HW_RATE_CCK_SP_2M;
break;
case HAL_RX_LEGACY_RATE_SP_5_5_MBPS:
ath12k_rate = ATH12K_HW_RATE_CCK_SP_5_5M;
break;
case HAL_RX_LEGACY_RATE_SP_11_MBPS:
ath12k_rate = ATH12K_HW_RATE_CCK_SP_11M;
break;
default:
ath12k_rate = rate;
break;
}
return ath12k_rate;
}
static void
ath12k_wifi7_dp_mon_parse_l_sig_b(const struct hal_rx_lsig_b_info *lsigb,
struct hal_rx_mon_ppdu_info *ppdu_info)
@ -415,25 +451,32 @@ ath12k_wifi7_dp_mon_parse_l_sig_b(const struct hal_rx_lsig_b_info *lsigb,
rate = u32_get_bits(info0, HAL_RX_LSIG_B_INFO_INFO0_RATE);
switch (rate) {
case 1:
rate = HAL_RX_LEGACY_RATE_1_MBPS;
rate = HAL_RX_LEGACY_RATE_LP_1_MBPS;
break;
case 2:
case 5:
rate = HAL_RX_LEGACY_RATE_2_MBPS;
rate = HAL_RX_LEGACY_RATE_LP_2_MBPS;
break;
case 3:
case 6:
rate = HAL_RX_LEGACY_RATE_5_5_MBPS;
rate = HAL_RX_LEGACY_RATE_LP_5_5_MBPS;
break;
case 4:
rate = HAL_RX_LEGACY_RATE_LP_11_MBPS;
break;
case 5:
rate = HAL_RX_LEGACY_RATE_SP_2_MBPS;
break;
case 6:
rate = HAL_RX_LEGACY_RATE_SP_5_5_MBPS;
break;
case 7:
rate = HAL_RX_LEGACY_RATE_11_MBPS;
rate = HAL_RX_LEGACY_RATE_SP_11_MBPS;
break;
default:
rate = HAL_RX_LEGACY_RATE_INVALID;
break;
}
ppdu_info->rate = rate;
ppdu_info->rate = ath12k_wifi7_hal_mon_map_legacy_rate_to_hw_rate(rate);
ppdu_info->cck_flag = 1;
}
@ -447,31 +490,32 @@ ath12k_wifi7_dp_mon_parse_l_sig_a(const struct hal_rx_lsig_a_info *lsiga,
rate = u32_get_bits(info0, HAL_RX_LSIG_A_INFO_INFO0_RATE);
switch (rate) {
case 8:
rate = HAL_RX_LEGACY_RATE_48_MBPS;
rate = HAL_RX_LEGACY_RATE_OFDM_48_MBPS;
break;
case 9:
rate = HAL_RX_LEGACY_RATE_24_MBPS;
rate = HAL_RX_LEGACY_RATE_OFDM_24_MBPS;
break;
case 10:
rate = HAL_RX_LEGACY_RATE_12_MBPS;
rate = HAL_RX_LEGACY_RATE_OFDM_12_MBPS;
break;
case 11:
rate = HAL_RX_LEGACY_RATE_6_MBPS;
rate = HAL_RX_LEGACY_RATE_OFDM_6_MBPS;
break;
case 12:
rate = HAL_RX_LEGACY_RATE_54_MBPS;
rate = HAL_RX_LEGACY_RATE_OFDM_54_MBPS;
break;
case 13:
rate = HAL_RX_LEGACY_RATE_36_MBPS;
rate = HAL_RX_LEGACY_RATE_OFDM_36_MBPS;
break;
case 14:
rate = HAL_RX_LEGACY_RATE_18_MBPS;
rate = HAL_RX_LEGACY_RATE_OFDM_18_MBPS;
break;
case 15:
rate = HAL_RX_LEGACY_RATE_9_MBPS;
rate = HAL_RX_LEGACY_RATE_OFDM_9_MBPS;
break;
default:
rate = HAL_RX_LEGACY_RATE_INVALID;
rate = HAL_RX_LEGACY_RATE_OFDM_INVALID;
break;
}
ppdu_info->rate = rate;

View File

@ -10017,50 +10017,46 @@ static int ath12k_connect_pdev_htc_service(struct ath12k_base *ab,
static int
ath12k_wmi_send_unit_test_cmd(struct ath12k *ar,
struct wmi_unit_test_cmd ut_cmd,
u32 *test_args)
const struct wmi_unit_test_arg *ut)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_unit_test_cmd *cmd;
int buf_len, arg_len;
struct sk_buff *skb;
struct wmi_tlv *tlv;
__le32 *ut_cmd_args;
void *ptr;
u32 *ut_cmd_args;
int buf_len, arg_len;
int ret;
int i;
arg_len = sizeof(u32) * le32_to_cpu(ut_cmd.num_args);
buf_len = sizeof(ut_cmd) + arg_len + TLV_HDR_SIZE;
arg_len = sizeof(*ut_cmd_args) * ut->num_args;
buf_len = sizeof(*cmd) + arg_len + TLV_HDR_SIZE;
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_unit_test_cmd *)skb->data;
ptr = skb->data;
cmd = ptr;
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_UNIT_TEST_CMD,
sizeof(ut_cmd));
cmd->vdev_id = ut_cmd.vdev_id;
cmd->module_id = ut_cmd.module_id;
cmd->num_args = ut_cmd.num_args;
cmd->diag_token = ut_cmd.diag_token;
ptr = skb->data + sizeof(ut_cmd);
sizeof(*cmd));
cmd->vdev_id = cpu_to_le32(ut->vdev_id);
cmd->module_id = cpu_to_le32(ut->module_id);
cmd->num_args = cpu_to_le32(ut->num_args);
cmd->diag_token = cpu_to_le32(ut->diag_token);
ptr += sizeof(*cmd);
tlv = ptr;
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, arg_len);
ptr += TLV_HDR_SIZE;
ut_cmd_args = ptr;
for (i = 0; i < le32_to_cpu(ut_cmd.num_args); i++)
ut_cmd_args[i] = test_args[i];
for (i = 0; i < ut->num_args; i++)
ut_cmd_args[i] = cpu_to_le32(ut->args[i]);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI unit test : module %d vdev %d n_args %d token %d\n",
cmd->module_id, cmd->vdev_id, cmd->num_args,
cmd->diag_token);
ut->module_id, ut->vdev_id, ut->num_args, ut->diag_token);
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_UNIT_TEST_CMDID);
@ -10076,8 +10072,7 @@ ath12k_wmi_send_unit_test_cmd(struct ath12k *ar,
int ath12k_wmi_simulate_radar(struct ath12k *ar)
{
struct ath12k_link_vif *arvif;
u32 dfs_args[DFS_MAX_TEST_ARGS];
struct wmi_unit_test_cmd wmi_ut;
struct wmi_unit_test_arg wmi_ut = {};
bool arvif_found = false;
list_for_each_entry(arvif, &ar->arvifs, list) {
@ -10090,22 +10085,23 @@ int ath12k_wmi_simulate_radar(struct ath12k *ar)
if (!arvif_found)
return -EINVAL;
dfs_args[DFS_TEST_CMDID] = 0;
dfs_args[DFS_TEST_PDEV_ID] = ar->pdev->pdev_id;
/* Currently we could pass segment_id(b0 - b1), chirp(b2)
wmi_ut.args[DFS_TEST_CMDID] = 0;
wmi_ut.args[DFS_TEST_PDEV_ID] = ar->pdev->pdev_id;
/*
* Currently we could pass segment_id(b0 - b1), chirp(b2)
* freq offset (b3 - b10) to unit test. For simulation
* purpose this can be set to 0 which is valid.
*/
dfs_args[DFS_TEST_RADAR_PARAM] = 0;
wmi_ut.args[DFS_TEST_RADAR_PARAM] = 0;
wmi_ut.vdev_id = cpu_to_le32(arvif->vdev_id);
wmi_ut.module_id = cpu_to_le32(DFS_UNIT_TEST_MODULE);
wmi_ut.num_args = cpu_to_le32(DFS_MAX_TEST_ARGS);
wmi_ut.diag_token = cpu_to_le32(DFS_UNIT_TEST_TOKEN);
wmi_ut.vdev_id = arvif->vdev_id;
wmi_ut.module_id = DFS_UNIT_TEST_MODULE;
wmi_ut.num_args = DFS_MAX_TEST_ARGS;
wmi_ut.diag_token = DFS_UNIT_TEST_TOKEN;
ath12k_dbg(ar->ab, ATH12K_DBG_REG, "Triggering Radar Simulation\n");
return ath12k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args);
return ath12k_wmi_send_unit_test_cmd(ar, &wmi_ut);
}
int ath12k_wmi_send_tpc_stats_request(struct ath12k *ar,

View File

@ -4193,7 +4193,6 @@ struct wmi_addba_clear_resp_cmd {
struct ath12k_wmi_mac_addr_params peer_macaddr;
} __packed;
#define DFS_PHYERR_UNIT_TEST_CMD 0
#define DFS_UNIT_TEST_MODULE 0x2b
#define DFS_UNIT_TEST_TOKEN 0xAA
@ -4204,10 +4203,15 @@ enum dfs_test_args_idx {
DFS_MAX_TEST_ARGS,
};
struct wmi_dfs_unit_test_arg {
u32 cmd_id;
u32 pdev_id;
u32 radar_param;
/* update if another test command requires more */
#define WMI_UNIT_TEST_ARGS_MAX DFS_MAX_TEST_ARGS
struct wmi_unit_test_arg {
u32 vdev_id;
u32 module_id;
u32 diag_token;
u32 num_args;
u32 args[WMI_UNIT_TEST_ARGS_MAX];
};
struct wmi_unit_test_cmd {

View File

@ -1124,8 +1124,6 @@ static int ath6kl_usb_probe(struct usb_interface *interface,
int vendor_id, product_id;
int ret = 0;
usb_get_dev(dev);
vendor_id = le16_to_cpu(dev->descriptor.idVendor);
product_id = le16_to_cpu(dev->descriptor.idProduct);
@ -1143,11 +1141,8 @@ static int ath6kl_usb_probe(struct usb_interface *interface,
ath6kl_dbg(ATH6KL_DBG_USB, "USB 1.1 Host\n");
ar_usb = ath6kl_usb_create(interface);
if (ar_usb == NULL) {
ret = -ENOMEM;
goto err_usb_put;
}
if (ar_usb == NULL)
return -ENOMEM;
ar = ath6kl_core_create(&ar_usb->udev->dev);
if (ar == NULL) {
@ -1176,15 +1171,12 @@ static int ath6kl_usb_probe(struct usb_interface *interface,
ath6kl_core_destroy(ar);
err_usb_destroy:
ath6kl_usb_destroy(ar_usb);
err_usb_put:
usb_put_dev(dev);
return ret;
}
static void ath6kl_usb_remove(struct usb_interface *interface)
static void ath6kl_usb_disconnect(struct usb_interface *interface)
{
usb_put_dev(interface_to_usbdev(interface));
ath6kl_usb_device_detached(interface);
}
@ -1235,7 +1227,7 @@ static struct usb_driver ath6kl_usb_driver = {
.probe = ath6kl_usb_probe,
.suspend = ath6kl_usb_pm_suspend,
.resume = ath6kl_usb_pm_resume,
.disconnect = ath6kl_usb_remove,
.disconnect = ath6kl_usb_disconnect,
.id_table = ath6kl_usb_ids,
.supports_autosuspend = true,
.disable_hub_initiated_lpm = 1,

View File

@ -1630,16 +1630,6 @@ enum wmi_roam_mode {
WMI_LOCK_BSS_MODE = 3, /* Lock to the current BSS */
};
struct bss_bias {
u8 bssid[ETH_ALEN];
s8 bias;
} __packed;
struct bss_bias_info {
u8 num_bss;
struct bss_bias bss_bias[];
} __packed;
struct low_rssi_scan_params {
__le16 lrssi_scan_period;
a_sle16 lrssi_scan_threshold;
@ -1652,7 +1642,6 @@ struct roam_ctrl_cmd {
union {
u8 bssid[ETH_ALEN]; /* WMI_FORCE_ROAM */
u8 roam_mode; /* WMI_SET_ROAM_MODE */
struct bss_bias_info bss; /* WMI_SET_HOST_BIAS */
struct low_rssi_scan_params params; /* WMI_SET_LRSSI_SCAN_PARAMS
*/
} __packed info;

View File

@ -1382,8 +1382,6 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface,
goto err_alloc;
}
usb_get_dev(udev);
hif_dev->udev = udev;
hif_dev->interface = interface;
hif_dev->usb_device_id = id;
@ -1403,7 +1401,6 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface,
err_fw_req:
usb_set_intfdata(interface, NULL);
kfree(hif_dev);
usb_put_dev(udev);
err_alloc:
return ret;
}
@ -1451,7 +1448,6 @@ static void ath9k_hif_usb_disconnect(struct usb_interface *interface)
kfree(hif_dev);
dev_info(&udev->dev, "ath9k_htc: USB layer deinitialized\n");
usb_put_dev(udev);
}
#ifdef CONFIG_PM

View File

@ -837,18 +837,19 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
struct b43_dmaring *ring;
int i, err;
dma_addr_t dma_test;
size_t nr_slots;
ring = kzalloc_obj(*ring);
if (for_tx)
nr_slots = B43_TXRING_SLOTS;
else
nr_slots = B43_RXRING_SLOTS;
ring = kzalloc_flex(*ring, meta, nr_slots);
if (!ring)
goto out;
ring->nr_slots = B43_RXRING_SLOTS;
if (for_tx)
ring->nr_slots = B43_TXRING_SLOTS;
ring->nr_slots = nr_slots;
ring->meta = kzalloc_objs(struct b43_dmadesc_meta, ring->nr_slots);
if (!ring->meta)
goto err_kfree_ring;
for (i = 0; i < ring->nr_slots; i++)
ring->meta->skb = B43_DMA_PTR_POISON;
@ -943,8 +944,6 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
err_kfree_txhdr_cache:
kfree(ring->txhdr_cache);
err_kfree_meta:
kfree(ring->meta);
err_kfree_ring:
kfree(ring);
ring = NULL;
goto out;
@ -1004,7 +1003,6 @@ static void b43_destroy_dmaring(struct b43_dmaring *ring,
free_ringmemory(ring);
kfree(ring->txhdr_cache);
kfree(ring->meta);
kfree(ring);
}

View File

@ -228,8 +228,6 @@ struct b43_dmaring {
const struct b43_dma_ops *ops;
/* Kernel virtual base address of the ring memory. */
void *descbase;
/* Meta data about all descriptors. */
struct b43_dmadesc_meta *meta;
/* Cache of TX headers for each TX frame.
* This is to avoid an allocation on each TX.
* This is NULL for an RX ring.
@ -273,6 +271,8 @@ struct b43_dmaring {
/* Statistics: Total number of TX plus all retries. */
u64 nr_total_packet_tries;
#endif /* CONFIG_B43_DEBUG */
/* Meta data about all descriptors. */
struct b43_dmadesc_meta meta[] __counted_by(nr_slots);
};
static inline u32 b43_dma_read(struct b43_dmaring *ring, u16 offset)

View File

@ -10,7 +10,7 @@
#include "fw/api/txq.h"
/* Highest firmware core release supported */
#define IWL_BZ_UCODE_CORE_MAX 101
#define IWL_BZ_UCODE_CORE_MAX 102
/* Lowest firmware API version supported */
#define IWL_BZ_UCODE_API_MIN 100

View File

@ -9,7 +9,7 @@
#include "fw/api/txq.h"
/* Highest firmware core release supported */
#define IWL_DR_UCODE_CORE_MAX 101
#define IWL_DR_UCODE_CORE_MAX 102
/* Lowest firmware API version supported */
#define IWL_DR_UCODE_API_MIN 100

View File

@ -10,7 +10,7 @@
#include "fw/api/txq.h"
/* Highest firmware core release supported */
#define IWL_SC_UCODE_CORE_MAX 101
#define IWL_SC_UCODE_CORE_MAX 102
/* Lowest firmware API version supported */
#define IWL_SC_UCODE_API_MIN 100

View File

@ -504,7 +504,8 @@ iwl_acpi_parse_chains_table(union acpi_object *table,
u8 num_chains, u8 num_sub_bands)
{
for (u8 chain = 0; chain < num_chains; chain++) {
for (u8 subband = 0; subband < BIOS_SAR_MAX_SUB_BANDS_NUM;
for (u8 subband = 0;
subband < ARRAY_SIZE(chains[chain].subbands);
subband++) {
/* if we don't have the values, use the default */
if (subband >= num_sub_bands) {
@ -534,7 +535,23 @@ int iwl_acpi_get_wrds_table(struct iwl_fw_runtime *fwrt)
if (IS_ERR(data))
return PTR_ERR(data);
/* start by trying to read revision 2 */
/* start by trying to read revision 3 */
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_WRDS_WIFI_DATA_SIZE_REV3,
&tbl_rev);
if (!IS_ERR(wifi_pkg)) {
if (tbl_rev != 3) {
ret = -EINVAL;
goto out_free;
}
num_chains = ACPI_SAR_NUM_CHAINS_REV2;
num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV3;
goto read_table;
}
/* then try revision 2 */
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_WRDS_WIFI_DATA_SIZE_REV2,
&tbl_rev);
@ -591,6 +608,13 @@ int iwl_acpi_get_wrds_table(struct iwl_fw_runtime *fwrt)
goto out_free;
}
if (WARN_ON(num_chains * num_sub_bands >
ARRAY_SIZE(fwrt->sar_profiles[0].chains) *
ARRAY_SIZE(fwrt->sar_profiles[0].chains[0].subbands))) {
ret = -EINVAL;
goto out_free;
}
IWL_DEBUG_RADIO(fwrt, "Reading WRDS tbl_rev=%d\n", tbl_rev);
flags = wifi_pkg->package.elements[1].integer.value;
@ -624,7 +648,22 @@ int iwl_acpi_get_ewrd_table(struct iwl_fw_runtime *fwrt)
if (IS_ERR(data))
return PTR_ERR(data);
/* start by trying to read revision 2 */
/* start by trying to read revision 3 */
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_EWRD_WIFI_DATA_SIZE_REV3,
&tbl_rev);
if (!IS_ERR(wifi_pkg)) {
if (tbl_rev != 3) {
ret = -EINVAL;
goto out_free;
}
num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV3;
goto read_table;
}
/* then try revision 2 */
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_EWRD_WIFI_DATA_SIZE_REV2,
&tbl_rev);
@ -679,6 +718,13 @@ int iwl_acpi_get_ewrd_table(struct iwl_fw_runtime *fwrt)
goto out_free;
}
if (WARN_ON(ACPI_SAR_NUM_CHAINS_REV0 * num_sub_bands >
ARRAY_SIZE(fwrt->sar_profiles[0].chains) *
ARRAY_SIZE(fwrt->sar_profiles[0].chains[0].subbands))) {
ret = -EINVAL;
goto out_free;
}
enabled = !!(wifi_pkg->package.elements[1].integer.value);
n_profiles = wifi_pkg->package.elements[2].integer.value;
@ -721,6 +767,13 @@ int iwl_acpi_get_ewrd_table(struct iwl_fw_runtime *fwrt)
if (tbl_rev < 2)
goto set_enabled;
if (WARN_ON(ACPI_SAR_NUM_CHAINS_REV0 * 2 * num_sub_bands >
ARRAY_SIZE(fwrt->sar_profiles[0].chains) *
ARRAY_SIZE(fwrt->sar_profiles[0].chains[0].subbands))) {
ret = -EINVAL;
goto out_free;
}
/* parse cdb chains for all profiles */
for (i = 0; i < n_profiles; i++) {
struct iwl_sar_profile_chain *chains;
@ -759,6 +812,12 @@ int iwl_acpi_get_wgds_table(struct iwl_fw_runtime *fwrt)
u8 profiles;
u8 min_profiles;
} rev_data[] = {
{
.revisions = BIT(4),
.bands = ACPI_GEO_NUM_BANDS_REV4,
.profiles = ACPI_NUM_GEO_PROFILES_REV3,
.min_profiles = BIOS_GEO_MIN_PROFILE_NUM,
},
{
.revisions = BIT(3),
.bands = ACPI_GEO_NUM_BANDS_REV2,
@ -812,6 +871,18 @@ int iwl_acpi_get_wgds_table(struct iwl_fw_runtime *fwrt)
num_bands = rev_data[idx].bands;
num_profiles = rev_data[idx].profiles;
if (WARN_ON(num_profiles >
ARRAY_SIZE(fwrt->geo_profiles))) {
ret = -EINVAL;
goto out_free;
}
if (WARN_ON(num_bands >
ARRAY_SIZE(fwrt->geo_profiles[0].bands))) {
ret = -EINVAL;
goto out_free;
}
if (rev_data[idx].min_profiles) {
/* read header that says # of profiles */
union acpi_object *entry;
@ -851,18 +922,20 @@ int iwl_acpi_get_wgds_table(struct iwl_fw_runtime *fwrt)
read_table:
fwrt->geo_rev = tbl_rev;
for (i = 0; i < num_profiles; i++) {
for (j = 0; j < BIOS_GEO_MAX_NUM_BANDS; j++) {
struct iwl_geo_profile *prof = &fwrt->geo_profiles[i];
for (j = 0; j < ARRAY_SIZE(prof->bands); j++) {
union acpi_object *entry;
/*
* num_bands is either 2 or 3, if it's only 2 then
* fill the third band (6 GHz) with the values from
* 5 GHz (second band)
* num_bands is either 2 or 3 or 4, if it's lower
* than 4, fill the third band (6 GHz) with the values
* from 5 GHz (second band)
*/
if (j >= num_bands) {
fwrt->geo_profiles[i].bands[j].max =
fwrt->geo_profiles[i].bands[1].max;
prof->bands[j].max = prof->bands[1].max;
} else {
entry = &wifi_pkg->package.elements[entry_idx];
entry_idx++;
@ -872,15 +945,17 @@ int iwl_acpi_get_wgds_table(struct iwl_fw_runtime *fwrt)
goto out_free;
}
fwrt->geo_profiles[i].bands[j].max =
prof->bands[j].max =
entry->integer.value;
}
for (k = 0; k < BIOS_GEO_NUM_CHAINS; k++) {
for (k = 0;
k < ARRAY_SIZE(prof->bands[0].chains);
k++) {
/* same here as above */
if (j >= num_bands) {
fwrt->geo_profiles[i].bands[j].chains[k] =
fwrt->geo_profiles[i].bands[1].chains[k];
prof->bands[j].chains[k] =
prof->bands[1].chains[k];
} else {
entry = &wifi_pkg->package.elements[entry_idx];
entry_idx++;
@ -890,7 +965,7 @@ int iwl_acpi_get_wgds_table(struct iwl_fw_runtime *fwrt)
goto out_free;
}
fwrt->geo_profiles[i].bands[j].chains[k] =
prof->bands[j].chains[k] =
entry->integer.value;
}
}
@ -898,6 +973,7 @@ int iwl_acpi_get_wgds_table(struct iwl_fw_runtime *fwrt)
}
fwrt->geo_num_profiles = num_profiles;
fwrt->geo_bios_source = BIOS_SOURCE_ACPI;
fwrt->geo_enabled = true;
ret = 0;
out_free:
@ -915,6 +991,22 @@ int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt)
if (IS_ERR(data))
return PTR_ERR(data);
/* try to read ppag table rev 5 */
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_PPAG_WIFI_DATA_SIZE_V3, &tbl_rev);
if (!IS_ERR(wifi_pkg)) {
if (tbl_rev == 5) {
num_sub_bands = IWL_NUM_SUB_BANDS_V3;
IWL_DEBUG_RADIO(fwrt,
"Reading PPAG table (tbl_rev=%d)\n",
tbl_rev);
goto read_table;
} else {
ret = -EINVAL;
goto out_free;
}
}
/* try to read ppag table rev 1 to 4 (all have the same data size) */
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_PPAG_WIFI_DATA_SIZE_V2, &tbl_rev);
@ -950,6 +1042,15 @@ int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt)
goto out_free;
read_table:
if (WARN_ON_ONCE(num_sub_bands >
ARRAY_SIZE(fwrt->ppag_chains[0].subbands))) {
ret = -EINVAL;
goto out_free;
}
BUILD_BUG_ON(ACPI_PPAG_NUM_CHAINS >
ARRAY_SIZE(fwrt->ppag_chains));
fwrt->ppag_bios_rev = tbl_rev;
flags = &wifi_pkg->package.elements[1];
@ -966,7 +1067,7 @@ int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt)
* first sub-band (j=0) corresponds to Low-Band (2.4GHz), and the
* following sub-bands to High-Band (5GHz).
*/
for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
for (i = 0; i < ACPI_PPAG_NUM_CHAINS; i++) {
for (j = 0; j < num_sub_bands; j++) {
union acpi_object *ent;
@ -980,6 +1081,7 @@ int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt)
}
}
iwl_bios_print_ppag(fwrt, num_sub_bands);
fwrt->ppag_bios_source = BIOS_SOURCE_ACPI;
ret = 0;

View File

@ -8,11 +8,6 @@
#include <linux/acpi.h>
#include "fw/regulatory.h"
#include "fw/api/commands.h"
#include "fw/api/power.h"
#include "fw/api/phy.h"
#include "fw/api/nvm-reg.h"
#include "fw/api/config.h"
#include "fw/img.h"
#include "iwl-trans.h"
@ -44,6 +39,7 @@
#define ACPI_SAR_NUM_SUB_BANDS_REV0 5
#define ACPI_SAR_NUM_SUB_BANDS_REV1 11
#define ACPI_SAR_NUM_SUB_BANDS_REV2 11
#define ACPI_SAR_NUM_SUB_BANDS_REV3 12
#define ACPI_WRDS_WIFI_DATA_SIZE_REV0 (ACPI_SAR_NUM_CHAINS_REV0 * \
ACPI_SAR_NUM_SUB_BANDS_REV0 + 2)
@ -51,6 +47,8 @@
ACPI_SAR_NUM_SUB_BANDS_REV1 + 2)
#define ACPI_WRDS_WIFI_DATA_SIZE_REV2 (ACPI_SAR_NUM_CHAINS_REV2 * \
ACPI_SAR_NUM_SUB_BANDS_REV2 + 2)
#define ACPI_WRDS_WIFI_DATA_SIZE_REV3 (ACPI_SAR_NUM_CHAINS_REV2 * \
ACPI_SAR_NUM_SUB_BANDS_REV3 + 2)
#define ACPI_EWRD_WIFI_DATA_SIZE_REV0 ((ACPI_SAR_PROFILE_NUM - 1) * \
ACPI_SAR_NUM_CHAINS_REV0 * \
ACPI_SAR_NUM_SUB_BANDS_REV0 + 3)
@ -60,11 +58,15 @@
#define ACPI_EWRD_WIFI_DATA_SIZE_REV2 ((ACPI_SAR_PROFILE_NUM - 1) * \
ACPI_SAR_NUM_CHAINS_REV2 * \
ACPI_SAR_NUM_SUB_BANDS_REV2 + 3)
#define ACPI_EWRD_WIFI_DATA_SIZE_REV3 ((ACPI_SAR_PROFILE_NUM - 1) * \
ACPI_SAR_NUM_CHAINS_REV2 * \
ACPI_SAR_NUM_SUB_BANDS_REV3 + 3)
#define ACPI_WPFC_WIFI_DATA_SIZE 5 /* domain and 4 filter config words */
/* revision 0 and 1 are identical, except for the semantics in the FW */
#define ACPI_GEO_NUM_BANDS_REV0 2
#define ACPI_GEO_NUM_BANDS_REV2 3
#define ACPI_GEO_NUM_BANDS_REV4 4
#define ACPI_WRDD_WIFI_DATA_SIZE 2
#define ACPI_SPLC_WIFI_DATA_SIZE 2
@ -96,10 +98,18 @@
*/
#define ACPI_WTAS_WIFI_DATA_SIZE (3 + IWL_WTAS_BLACK_LIST_MAX)
#define ACPI_PPAG_WIFI_DATA_SIZE_V1 ((IWL_NUM_CHAIN_LIMITS * \
IWL_NUM_SUB_BANDS_V1) + 2)
#define ACPI_PPAG_WIFI_DATA_SIZE_V2 ((IWL_NUM_CHAIN_LIMITS * \
IWL_NUM_SUB_BANDS_V2) + 2)
#define ACPI_PPAG_NUM_CHAINS 2
#define ACPI_PPAG_NUM_BANDS_V1 5
#define ACPI_PPAG_NUM_BANDS_V2 11
#define ACPI_PPAG_NUM_BANDS_V3 12
#define ACPI_PPAG_WIFI_DATA_SIZE_V1 ((ACPI_PPAG_NUM_CHAINS * \
ACPI_PPAG_NUM_BANDS_V1) + 2)
#define ACPI_PPAG_WIFI_DATA_SIZE_V2 ((ACPI_PPAG_NUM_CHAINS * \
ACPI_PPAG_NUM_BANDS_V2) + 2)
/* used for ACPI PPAG table rev 5 */
#define ACPI_PPAG_WIFI_DATA_SIZE_V3 ((ACPI_PPAG_NUM_CHAINS * \
ACPI_PPAG_NUM_BANDS_V3) + 2)
#define IWL_SAR_ENABLE_MSK BIT(0)
#define IWL_REDUCE_POWER_FLAGS_POS 1

View File

@ -56,7 +56,8 @@ enum iwl_data_path_subcmd_ids {
RFH_QUEUE_CONFIG_CMD = 0xD,
/**
* @TLC_MNG_CONFIG_CMD: &struct iwl_tlc_config_cmd_v4
* @TLC_MNG_CONFIG_CMD: &struct iwl_tlc_config_cmd_v4 or
* &struct iwl_tlc_config_cmd_v5 or &struct iwl_tlc_config_cmd.
*/
TLC_MNG_CONFIG_CMD = 0xF,

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2012-2014, 2018-2019, 2021-2025 Intel Corporation
* Copyright (C) 2012-2014, 2018-2019, 2021-2026 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@ -26,7 +26,7 @@ enum iwl_mac_conf_subcmd_ids {
*/
MISSED_VAP_NOTIF = 0xFA,
/**
* @SESSION_PROTECTION_CMD: &struct iwl_mvm_session_prot_cmd
* @SESSION_PROTECTION_CMD: &struct iwl_session_prot_cmd
*/
SESSION_PROTECTION_CMD = 0x5,
/**
@ -34,7 +34,8 @@ enum iwl_mac_conf_subcmd_ids {
*/
CANCEL_CHANNEL_SWITCH_CMD = 0x6,
/**
* @MAC_CONFIG_CMD: &struct iwl_mac_config_cmd
* @MAC_CONFIG_CMD: &struct iwl_mac_config_cmd_v3 or
* &struct iwl_mac_config_cmd
*/
MAC_CONFIG_CMD = 0x8,
/**
@ -42,7 +43,8 @@ enum iwl_mac_conf_subcmd_ids {
*/
LINK_CONFIG_CMD = 0x9,
/**
* @STA_CONFIG_CMD: &struct iwl_sta_cfg_cmd
* @STA_CONFIG_CMD: &struct iwl_sta_cfg_cmd_v1,
* &struct iwl_sta_cfg_cmd_v2, or &struct iwl_sta_cfg_cmd
*/
STA_CONFIG_CMD = 0xA,
/**
@ -356,7 +358,7 @@ struct iwl_mac_wifi_gen_support {
} __packed;
/**
* struct iwl_mac_config_cmd - command structure to configure MAC contexts in
* struct iwl_mac_config_cmd_v3 - command structure to configure MAC contexts in
* MLD API for versions 2 and 3
* ( MAC_CONTEXT_CONFIG_CMD = 0x8 )
*
@ -375,7 +377,7 @@ struct iwl_mac_wifi_gen_support {
* @client: client mac data
* @p2p_dev: mac data for p2p device
*/
struct iwl_mac_config_cmd {
struct iwl_mac_config_cmd_v3 {
__le32 id_and_color;
__le32 action;
/* MAC_CONTEXT_TYPE_API_E */
@ -393,7 +395,62 @@ struct iwl_mac_config_cmd {
struct iwl_mac_client_data client;
struct iwl_mac_p2p_dev_data p2p_dev;
};
} __packed; /* MAC_CONTEXT_CONFIG_CMD_API_S_VER_2_VER_3 */
} __packed; /* MAC_CONTEXT_CONFIG_CMD_API_S_VER_2, _VER_3 */
/**
* struct iwl_mac_nan_data - NAN specific MAC data
* @ndi_addrs: extra NDI addresses being used
* @ndi_addrs_count: number of extra NDI addresses
*/
struct iwl_mac_nan_data {
struct {
u8 addr[ETH_ALEN];
__le16 reserved;
} __packed ndi_addrs[2];
__le32 ndi_addrs_count;
} __packed; /* MAC_CONTEXT_CONFIG_NAN_DATA_API_S_VER_1 */
/**
* struct iwl_mac_config_cmd - command structure to configure MAC contexts in
* MLD API for versions 4
* ( MAC_CONTEXT_CONFIG_CMD = 0x8 )
*
* @id_and_color: ID and color of the MAC
* @action: action to perform, see &enum iwl_ctxt_action
* @mac_type: one of &enum iwl_mac_types
* @local_mld_addr: mld address
* @reserved_for_local_mld_addr: reserved
* @filter_flags: combination of &enum iwl_mac_config_filter_flags
* @wifi_gen_v2: he/eht parameters as in cmd version 2
* @wifi_gen: he/eht/uhr parameters as in cmd version 3
* @nic_not_ack_enabled: mark that the NIC doesn't support receiving
* ACK-enabled AGG, (i.e. both BACK and non-BACK frames in single AGG).
* If the NIC is not ACK_ENABLED it may use the EOF-bit in first non-0
* len delim to determine if AGG or single.
* @client: client mac data
* @p2p_dev: mac data for p2p device
* @nan: NAN specific data (NAN data interface addresses)
*/
struct iwl_mac_config_cmd {
__le32 id_and_color;
__le32 action;
/* MAC_CONTEXT_TYPE_API_E */
__le32 mac_type;
u8 local_mld_addr[6];
__le16 reserved_for_local_mld_addr;
__le32 filter_flags;
union {
struct iwl_mac_wifi_gen_support_v2 wifi_gen_v2;
struct iwl_mac_wifi_gen_support wifi_gen;
};
__le32 nic_not_ack_enabled;
/* MAC_CONTEXT_CONFIG_SPECIFIC_DATA_API_U_VER_3 */
union {
struct iwl_mac_client_data client;
struct iwl_mac_p2p_dev_data p2p_dev;
struct iwl_mac_nan_data nan;
};
} __packed; /* MAC_CONTEXT_CONFIG_CMD_API_S_VER_4 */
/**
* enum iwl_link_ctx_modify_flags - indicate to the fw what fields are being
@ -652,6 +709,7 @@ struct iwl_link_config_cmd {
*/
#define IWL_FW_MAX_ACTIVE_LINKS_NUM 2
#define IWL_FW_MAX_LINK_ID 3
#define IWL_FW_MAX_LINKS IWL_FW_MAX_LINK_ID + 1
/**
* enum iwl_fw_sta_type - FW station types
@ -662,6 +720,13 @@ struct iwl_link_config_cmd {
* @STATION_TYPE_MCAST: the station used for BCAST / MCAST in GO. Will be
* suspended / resumed at the right timing depending on the clients'
* power save state and the DTIM timing
* @STATION_TYPE_NAN_PEER_NMI: NAN management peer station type. A station
* of this type can have any number of links (even none) set in the
* link_mask. (Supported since version 3.)
* @STATION_TYPE_NAN_PEER_NDI: NAN data peer station type. A station
* of this type can have any number of links (even none) set in the
* link_mask. (Supported since version 3.)
* @STATION_TYPE_MAX: maximum number of FW station types
* @STATION_TYPE_AUX: aux sta. In the FW there is no need for a special type
* for the aux sta, so this type is only for driver - internal use.
*/
@ -669,8 +734,11 @@ enum iwl_fw_sta_type {
STATION_TYPE_PEER,
STATION_TYPE_BCAST_MGMT,
STATION_TYPE_MCAST,
STATION_TYPE_AUX,
}; /* STATION_TYPE_E_VER_1 */
STATION_TYPE_NAN_PEER_NMI,
STATION_TYPE_NAN_PEER_NDI,
STATION_TYPE_MAX,
STATION_TYPE_AUX = STATION_TYPE_MAX /* this doesn't exist in FW */
}; /* STATION_TYPE_E_VER_1, _VER_2 */
/**
* struct iwl_sta_cfg_cmd_v1 - cmd structure to add a peer sta to the uCode's
@ -729,7 +797,7 @@ struct iwl_sta_cfg_cmd_v1 {
} __packed; /* STA_CMD_API_S_VER_1 */
/**
* struct iwl_sta_cfg_cmd - cmd structure to add a peer sta to the uCode's
* struct iwl_sta_cfg_cmd_v2 - cmd structure to add a peer sta to the uCode's
* station table
* ( STA_CONFIG_CMD = 0xA )
*
@ -769,7 +837,7 @@ struct iwl_sta_cfg_cmd_v1 {
* @mic_compute_pad_delay: MIC compute time padding
* @reserved: Reserved for alignment
*/
struct iwl_sta_cfg_cmd {
struct iwl_sta_cfg_cmd_v2 {
__le32 sta_id;
__le32 link_id;
u8 peer_mld_address[ETH_ALEN];
@ -799,6 +867,83 @@ struct iwl_sta_cfg_cmd {
u8 reserved[2];
} __packed; /* STA_CMD_API_S_VER_2 */
/**
* struct iwl_sta_cfg_cmd - cmd structure to add a peer sta to the uCode's
* station table
* ( STA_CONFIG_CMD = 0xA )
*
* @sta_id: index of station in uCode's station table
* @link_mask: bitmap of link FW IDs used with this STA
* @peer_mld_address: the peers mld address
* @reserved_for_peer_mld_address: reserved
* @peer_link_address: the address of the link that is used to communicate
* with this sta
* @reserved_for_peer_link_address: reserved
* @station_type: type of this station. See &enum iwl_fw_sta_type
* @assoc_id: for GO only
* @beamform_flags: beam forming controls
* @mfp: indicates whether the STA uses management frame protection or not.
* @mimo: indicates whether the sta uses mimo or not
* @mimo_protection: indicates whether the sta uses mimo protection or not
* @ack_enabled: indicates that the AP supports receiving ACK-
* enabled AGG, i.e. both BACK and non-BACK frames in a single AGG
* @trig_rnd_alloc: indicates that trigger based random allocation
* is enabled according to UORA element existence
* @tx_ampdu_spacing: minimum A-MPDU spacing:
* 4 - 2us density, 5 - 4us density, 6 - 8us density, 7 - 16us density
* @tx_ampdu_max_size: maximum A-MPDU length: 0 - 8K, 1 - 16K, 2 - 32K,
* 3 - 64K, 4 - 128K, 5 - 256K, 6 - 512K, 7 - 1024K.
* @sp_length: the size of the SP in actual number of frames
* @uapsd_acs: 4 LS bits are trigger enabled ACs, 4 MS bits are the deliver
* enabled ACs.
* @pkt_ext: optional, exists according to PPE-present bit in the HE/EHT-PHY
* capa
* @htc_flags: which features are supported in HTC
* @use_ldpc_x2_cw: Indicates whether to use LDPC with double CW
* @use_icf: Indicates whether to use ICF instead of RTS
* @dps_pad_time: DPS (Dynamic Power Save) padding delay resolution to ensure
* proper timing alignment
* @dps_trans_delay: DPS minimal time that takes the peer to return to low power
* @dps_enabled: flag indicating whether or not DPS is enabled
* @mic_prep_pad_delay: MIC prep time padding
* @mic_compute_pad_delay: MIC compute time padding
* @nmi_sta_id: for an NDI peer STA, the NMI peer STA ID it relates to
* @ndi_local_addr: for an NDI peer STA, the local NDI interface MAC address
* @reserved: Reserved for alignment
*/
struct iwl_sta_cfg_cmd {
__le32 sta_id;
__le32 link_mask;
u8 peer_mld_address[ETH_ALEN];
__le16 reserved_for_peer_mld_address;
u8 peer_link_address[ETH_ALEN];
__le16 reserved_for_peer_link_address;
__le32 station_type;
__le32 assoc_id;
__le32 beamform_flags;
__le32 mfp;
__le32 mimo;
__le32 mimo_protection;
__le32 ack_enabled;
__le32 trig_rnd_alloc;
__le32 tx_ampdu_spacing;
__le32 tx_ampdu_max_size;
__le32 sp_length;
__le32 uapsd_acs;
struct iwl_he_pkt_ext_v2 pkt_ext;
__le32 htc_flags;
u8 use_ldpc_x2_cw;
u8 use_icf;
u8 dps_pad_time;
u8 dps_trans_delay;
u8 dps_enabled;
u8 mic_prep_pad_delay;
u8 mic_compute_pad_delay;
u8 nmi_sta_id;
u8 ndi_local_addr[ETH_ALEN];
u8 reserved[2];
} __packed; /* STA_CMD_API_S_VER_3 */
/**
* struct iwl_aux_sta_cmd - command for AUX STA configuration
* ( AUX_STA_CMD = 0xB )

View File

@ -57,8 +57,7 @@ enum iwl_mac_protection_flags {
* @FW_MAC_TYPE_P2P_DEVICE: P2P Device
* @FW_MAC_TYPE_P2P_STA: P2P client
* @FW_MAC_TYPE_GO: P2P GO
* @FW_MAC_TYPE_TEST: ?
* @FW_MAC_TYPE_MAX: highest support MAC type
* @FW_MAC_TYPE_NAN: NAN (since version 4)
*/
enum iwl_mac_types {
FW_MAC_TYPE_FIRST = 1,
@ -70,8 +69,7 @@ enum iwl_mac_types {
FW_MAC_TYPE_P2P_DEVICE,
FW_MAC_TYPE_P2P_STA,
FW_MAC_TYPE_GO,
FW_MAC_TYPE_TEST,
FW_MAC_TYPE_MAX = FW_MAC_TYPE_TEST
FW_MAC_TYPE_NAN,
}; /* MAC_CONTEXT_TYPE_API_E_VER_1 */
/**

View File

@ -204,7 +204,7 @@ struct iwl_nvm_get_info_phy {
} __packed; /* REGULATORY_NVM_GET_INFO_PHY_SKU_SECTION_S_VER_1 */
#define IWL_NUM_CHANNELS_V1 51
#define IWL_NUM_CHANNELS 110
#define IWL_NUM_CHANNELS_V2 110
/**
* struct iwl_nvm_get_info_regulatory_v1 - regulatory information
@ -227,7 +227,7 @@ struct iwl_nvm_get_info_regulatory_v1 {
struct iwl_nvm_get_info_regulatory {
__le32 lar_enabled;
__le32 n_channels;
__le32 channel_profile[IWL_NUM_CHANNELS];
__le32 channel_profile[IWL_NUM_CHANNELS_V2];
} __packed; /* REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_2 */
/**
@ -701,13 +701,23 @@ struct iwl_pnvm_init_complete_ntfy {
#define UATS_TABLE_COL_SIZE 13
/**
* struct iwl_mcc_allowed_ap_type_cmd - struct for MCC_ALLOWED_AP_TYPE_CMD
* struct iwl_mcc_allowed_ap_type_cmd_v1 - struct for MCC_ALLOWED_AP_TYPE_CMD
* @mcc_to_ap_type_map: mapping an MCC to 6 GHz AP type support (UATS)
* @reserved: reserved
*/
struct iwl_mcc_allowed_ap_type_cmd {
struct iwl_mcc_allowed_ap_type_cmd_v1 {
u8 mcc_to_ap_type_map[UATS_TABLE_ROW_SIZE][UATS_TABLE_COL_SIZE];
__le16 reserved;
} __packed; /* MCC_ALLOWED_AP_TYPE_CMD_API_S_VER_1 */
/**
* struct iwl_mcc_allowed_ap_type_cmd - struct for MCC_ALLOWED_AP_TYPE_CMD
* @mcc_to_ap_type_map: mapping an MCC to 6 GHz AP type support (UATS)
* @mcc_to_ap_type_unii9_map: mapping an MCC to UNII-9 AP type support allowed
*/
struct iwl_mcc_allowed_ap_type_cmd {
u8 mcc_to_ap_type_map[UATS_TABLE_ROW_SIZE][UATS_TABLE_COL_SIZE];
u8 mcc_to_ap_type_unii9_map[UATS_TABLE_ROW_SIZE][UATS_TABLE_COL_SIZE];
} __packed; /* MCC_ALLOWED_AP_TYPE_CMD_API_S_VER_2 */
#endif /* __iwl_fw_api_nvm_reg_h__ */

View File

@ -269,6 +269,7 @@ enum iwl_dev_tx_power_cmd_mode {
#define IWL_NUM_CHAIN_LIMITS 2
#define IWL_NUM_SUB_BANDS_V1 5
#define IWL_NUM_SUB_BANDS_V2 11
#define IWL_NUM_SUB_BANDS_V3 12
/**
* struct iwl_dev_tx_power_common - Common part of the TX power reduction cmd
@ -425,24 +426,38 @@ struct iwl_dev_tx_power_cmd_v10 {
__le32 flags;
} __packed; /* TX_REDUCED_POWER_API_S_VER_10 */
struct iwl_dev_tx_power_cmd_v11 {
__le16 per_chain[IWL_NUM_CHAIN_TABLES_V2][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V3];
u8 per_chain_restriction_changed;
u8 reserved;
__le32 timer_period;
__le32 flags;
} __packed; /* TX_REDUCED_POWER_API_S_VER_11 */
/*
* struct iwl_dev_tx_power_cmd - TX power reduction command (multiversion)
* @common: common part of the command
* @v9: version 9 part of the command
* @v10: version 10 part of the command
* @v11: version 11 part of the command
*/
struct iwl_dev_tx_power_cmd {
struct iwl_dev_tx_power_common common;
union {
struct iwl_dev_tx_power_cmd_v9 v9;
struct iwl_dev_tx_power_cmd_v10 v10;
struct iwl_dev_tx_power_cmd_v11 v11;
};
} __packed; /* TX_REDUCED_POWER_API_S_VER_9_VER10 */
} __packed; /* TX_REDUCED_POWER_API_S_VER_9
* TX_REDUCED_POWER_API_S_VER_10
* TX_REDUCED_POWER_API_S_VER_11
*/
#define IWL_NUM_GEO_PROFILES 3
#define IWL_NUM_GEO_PROFILES_V3 8
#define IWL_NUM_BANDS_PER_CHAIN_V1 2
#define IWL_NUM_BANDS_PER_CHAIN_V2 3
#define IWL_NUM_BANDS_PER_CHAIN_V6 4
/**
* enum iwl_geo_per_chain_offset_operation - type of operation
@ -524,12 +539,25 @@ struct iwl_geo_tx_power_profiles_cmd_v5 {
__le32 table_revision;
} __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_5 */
/**
* struct iwl_geo_tx_power_profiles_cmd_v6 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
* @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
* @table: offset profile per band.
* @bios_hdr: describes the revision and the source of the BIOS
*/
struct iwl_geo_tx_power_profiles_cmd_v6 {
__le32 ops;
struct iwl_per_chain_offset table[IWL_NUM_GEO_PROFILES_V3][IWL_NUM_BANDS_PER_CHAIN_V6];
struct iwl_bios_config_hdr bios_hdr;
} __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_6 */
union iwl_geo_tx_power_profiles_cmd {
struct iwl_geo_tx_power_profiles_cmd_v1 v1;
struct iwl_geo_tx_power_profiles_cmd_v2 v2;
struct iwl_geo_tx_power_profiles_cmd_v3 v3;
struct iwl_geo_tx_power_profiles_cmd_v4 v4;
struct iwl_geo_tx_power_profiles_cmd_v5 v5;
struct iwl_geo_tx_power_profiles_cmd_v6 v6;
};
/**
@ -573,6 +601,7 @@ enum iwl_ppag_flags {
* @v1: command version 1 structure.
* @v5: command version 5 structure.
* @v7: command version 7 structure.
* @v8: command version 8 structure.
* @v1.flags: values from &enum iwl_ppag_flags
* @v1.gain: table of antenna gain values per chain and sub-band
* @v1.reserved: reserved
@ -581,6 +610,8 @@ enum iwl_ppag_flags {
* @v7.ppag_config_info: see @struct bios_value_u32
* @v7.gain: table of antenna gain values per chain and sub-band
* @v7.reserved: reserved
* @v8.ppag_config_info: see @struct bios_value_u32
* @v8.gain: table of antenna gain values per chain and sub-band
*/
union iwl_ppag_table_cmd {
struct {
@ -598,6 +629,10 @@ union iwl_ppag_table_cmd {
s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2];
s8 reserved[2];
} __packed v7; /* PER_PLAT_ANTENNA_GAIN_CMD_API_S_VER_7 */
struct {
struct bios_value_u32 ppag_config_info;
s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V3];
} __packed v8; /* PER_PLAT_ANTENNA_GAIN_CMD_API_S_VER_8 */
} __packed;
#define IWL_PPAG_CMD_V1_MASK (IWL_PPAG_ETSI_MASK | IWL_PPAG_CHINA_MASK)

View File

@ -985,6 +985,7 @@ struct iwl_scan_probe_params_v4 {
} __packed; /* SCAN_PROBE_PARAMS_API_S_VER_4 */
#define SCAN_MAX_NUM_CHANS_V3 67
#define SCAN_MAX_NUM_CHANS_V4 68
/**
* struct iwl_scan_channel_params_v4 - channel params
@ -1027,6 +1028,24 @@ struct iwl_scan_channel_params_v7 {
struct iwl_scan_channel_cfg_umac channel_config[SCAN_MAX_NUM_CHANS_V3];
} __packed; /* SCAN_CHANNEL_PARAMS_API_S_VER_6 */
/**
* struct iwl_scan_channel_params_v8 - channel params
* @flags: channel flags &enum iwl_scan_channel_flags
* @count: num of channels in scan request
* @n_aps_override: override the number of APs the FW uses to calculate dwell
* time when adaptive dwell is used.
* Channel k will use n_aps_override[i] when BIT(20 + i) is set in
* channel_config[k].flags
* @channel_config: array of explicit channel configurations
* for 2.4Ghz and 5.2Ghz bands
*/
struct iwl_scan_channel_params_v8 {
u8 flags;
u8 count;
u8 n_aps_override[2];
struct iwl_scan_channel_cfg_umac channel_config[SCAN_MAX_NUM_CHANS_V4];
} __packed; /* SCAN_CHANNEL_PARAMS_API_S_VER_8 */
/**
* struct iwl_scan_general_params_v11 - channel params
* @flags: &enum iwl_umac_scan_general_flags_v2
@ -1109,6 +1128,20 @@ struct iwl_scan_req_params_v17 {
struct iwl_scan_probe_params_v4 probe_params;
} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_17 - 14 */
/**
* struct iwl_scan_req_params_v18 - scan request parameters (v18)
* @general_params: &struct iwl_scan_general_params_v11
* @channel_params: &struct iwl_scan_channel_params_v8
* @periodic_params: &struct iwl_scan_periodic_parms_v1
* @probe_params: &struct iwl_scan_probe_params_v4
*/
struct iwl_scan_req_params_v18 {
struct iwl_scan_general_params_v11 general_params;
struct iwl_scan_channel_params_v8 channel_params;
struct iwl_scan_periodic_parms_v1 periodic_params;
struct iwl_scan_probe_params_v4 probe_params;
} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_18 */
/**
* struct iwl_scan_req_umac_v12 - scan request command (v12)
* @uid: scan id, &enum iwl_umac_scan_uid_offsets
@ -1133,6 +1166,18 @@ struct iwl_scan_req_umac_v17 {
struct iwl_scan_req_params_v17 scan_params;
} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_17 - 14 */
/**
* struct iwl_scan_req_umac_v18 - scan request command (v18)
* @uid: scan id, &enum iwl_umac_scan_uid_offsets
* @ooc_priority: out of channel priority - &enum iwl_scan_priority
* @scan_params: scan parameters
*/
struct iwl_scan_req_umac_v18 {
__le32 uid;
__le32 ooc_priority;
struct iwl_scan_req_params_v18 scan_params;
} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_18 */
/**
* struct iwl_umac_scan_abort - scan abort command
* @uid: scan id, &enum iwl_umac_scan_uid_offsets

View File

@ -598,7 +598,6 @@ struct iwl_stats_ntfy_per_sta {
} __packed; /* STATISTICS_NTFY_PER_STA_API_S_VER_1 */
#define IWL_STATS_MAX_PHY_OPERATIONAL 3
#define IWL_STATS_MAX_FW_LINKS (IWL_FW_MAX_LINK_ID + 1)
/**
* struct iwl_system_statistics_notif_oper - statistics notification
@ -610,7 +609,7 @@ struct iwl_stats_ntfy_per_sta {
*/
struct iwl_system_statistics_notif_oper {
__le32 time_stamp;
struct iwl_stats_ntfy_per_link per_link[IWL_STATS_MAX_FW_LINKS];
struct iwl_stats_ntfy_per_link per_link[IWL_FW_MAX_LINKS];
struct iwl_stats_ntfy_per_phy per_phy[IWL_STATS_MAX_PHY_OPERATIONAL];
struct iwl_stats_ntfy_per_sta per_sta[IWL_STATION_COUNT_MAX];
} __packed; /* STATISTICS_FW_NTFY_OPERATIONAL_API_S_VER_3 */
@ -624,7 +623,7 @@ struct iwl_system_statistics_notif_oper {
*/
struct iwl_system_statistics_part1_notif_oper {
__le32 time_stamp;
struct iwl_stats_ntfy_part1_per_link per_link[IWL_STATS_MAX_FW_LINKS];
struct iwl_stats_ntfy_part1_per_link per_link[IWL_FW_MAX_LINKS];
__le32 per_phy_crc_error_stats[IWL_STATS_MAX_PHY_OPERATIONAL];
} __packed; /* STATISTICS_FW_NTFY_OPERATIONAL_PART1_API_S_VER_4 */

View File

@ -2933,7 +2933,7 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
IWL_WARN(fwrt, "Collecting data: trigger %d fired.\n",
le32_to_cpu(desc->trig_desc.type));
queue_delayed_work(system_unbound_wq, &wk_data->wk,
queue_delayed_work(system_dfl_wq, &wk_data->wk,
usecs_to_jiffies(delay));
return 0;
@ -3236,7 +3236,7 @@ int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
if (sync)
iwl_fw_dbg_collect_sync(fwrt, idx);
else
queue_delayed_work(system_unbound_wq,
queue_delayed_work(system_dfl_wq,
&fwrt->dump.wks[idx].wk,
usecs_to_jiffies(delay));

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2012-2014, 2018-2025 Intel Corporation
* Copyright (C) 2012-2014, 2018-2026 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@ -128,19 +128,11 @@ static void iwl_fwrt_dump_umac_error_log(struct iwl_fw_runtime *fwrt)
IWL_ERR(fwrt, "0x%08X | %s\n", table.error_id,
iwl_fw_lookup_assert_desc(table.error_id));
IWL_ERR(fwrt, "0x%08X | umac branchlink1\n", table.blink1);
IWL_ERR(fwrt, "0x%08X | umac branchlink2\n", table.blink2);
IWL_ERR(fwrt, "0x%08X | umac interruptlink1\n", table.ilink1);
IWL_ERR(fwrt, "0x%08X | umac interruptlink2\n", table.ilink2);
IWL_ERR(fwrt, "0x%08X | umac data1\n", table.data1);
IWL_ERR(fwrt, "0x%08X | umac data2\n", table.data2);
IWL_ERR(fwrt, "0x%08X | umac data3\n", table.data3);
IWL_ERR(fwrt, "0x%08X | umac major\n", table.umac_major);
IWL_ERR(fwrt, "0x%08X | umac minor\n", table.umac_minor);
IWL_ERR(fwrt, "0x%08X | frame pointer\n", table.frame_pointer);
IWL_ERR(fwrt, "0x%08X | stack pointer\n", table.stack_pointer);
IWL_ERR(fwrt, "0x%08X | last host cmd\n", table.cmd_header);
IWL_ERR(fwrt, "0x%08X | isr status reg\n", table.nic_isr_pref);
}
static void iwl_fwrt_dump_lmac_error_log(struct iwl_fw_runtime *fwrt, u8 lmac_num)
@ -200,39 +192,10 @@ static void iwl_fwrt_dump_lmac_error_log(struct iwl_fw_runtime *fwrt, u8 lmac_nu
IWL_ERR(fwrt, "0x%08X | %-28s\n", table.error_id,
iwl_fw_lookup_assert_desc(table.error_id));
IWL_ERR(fwrt, "0x%08X | trm_hw_status0\n", table.trm_hw_status0);
IWL_ERR(fwrt, "0x%08X | trm_hw_status1\n", table.trm_hw_status1);
IWL_ERR(fwrt, "0x%08X | branchlink2\n", table.blink2);
IWL_ERR(fwrt, "0x%08X | interruptlink1\n", table.ilink1);
IWL_ERR(fwrt, "0x%08X | interruptlink2\n", table.ilink2);
IWL_ERR(fwrt, "0x%08X | data1\n", table.data1);
IWL_ERR(fwrt, "0x%08X | data2\n", table.data2);
IWL_ERR(fwrt, "0x%08X | data3\n", table.data3);
IWL_ERR(fwrt, "0x%08X | beacon time\n", table.bcon_time);
IWL_ERR(fwrt, "0x%08X | tsf low\n", table.tsf_low);
IWL_ERR(fwrt, "0x%08X | tsf hi\n", table.tsf_hi);
IWL_ERR(fwrt, "0x%08X | time gp1\n", table.gp1);
IWL_ERR(fwrt, "0x%08X | time gp2\n", table.gp2);
IWL_ERR(fwrt, "0x%08X | uCode revision type\n", table.fw_rev_type);
IWL_ERR(fwrt, "0x%08X | uCode version major\n", table.major);
IWL_ERR(fwrt, "0x%08X | uCode version minor\n", table.minor);
IWL_ERR(fwrt, "0x%08X | hw version\n", table.hw_ver);
IWL_ERR(fwrt, "0x%08X | board version\n", table.brd_ver);
IWL_ERR(fwrt, "0x%08X | hcmd\n", table.hcmd);
IWL_ERR(fwrt, "0x%08X | isr0\n", table.isr0);
IWL_ERR(fwrt, "0x%08X | isr1\n", table.isr1);
IWL_ERR(fwrt, "0x%08X | isr2\n", table.isr2);
IWL_ERR(fwrt, "0x%08X | isr3\n", table.isr3);
IWL_ERR(fwrt, "0x%08X | isr4\n", table.isr4);
IWL_ERR(fwrt, "0x%08X | last cmd Id\n", table.last_cmd_id);
IWL_ERR(fwrt, "0x%08X | wait_event\n", table.wait_event);
IWL_ERR(fwrt, "0x%08X | l2p_control\n", table.l2p_control);
IWL_ERR(fwrt, "0x%08X | l2p_duration\n", table.l2p_duration);
IWL_ERR(fwrt, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
IWL_ERR(fwrt, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
IWL_ERR(fwrt, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
IWL_ERR(fwrt, "0x%08X | timestamp\n", table.u_timestamp);
IWL_ERR(fwrt, "0x%08X | flow_handler\n", table.flow_handler);
}
/*
@ -264,7 +227,6 @@ static void iwl_fwrt_dump_tcm_error_log(struct iwl_fw_runtime *fwrt, int idx)
struct iwl_trans *trans = fwrt->trans;
struct iwl_tcm_error_event_table table = {};
u32 base = fwrt->trans->dbg.tcm_error_event_table[idx];
int i;
u32 flag = idx ? IWL_ERROR_EVENT_TABLE_TCM2 :
IWL_ERROR_EVENT_TABLE_TCM1;
@ -275,23 +237,10 @@ static void iwl_fwrt_dump_tcm_error_log(struct iwl_fw_runtime *fwrt, int idx)
IWL_ERR(fwrt, "TCM%d status:\n", idx + 1);
IWL_ERR(fwrt, "0x%08X | error ID\n", table.error_id);
IWL_ERR(fwrt, "0x%08X | tcm branchlink2\n", table.blink2);
IWL_ERR(fwrt, "0x%08X | tcm interruptlink1\n", table.ilink1);
IWL_ERR(fwrt, "0x%08X | tcm interruptlink2\n", table.ilink2);
IWL_ERR(fwrt, "0x%08X | tcm data1\n", table.data1);
IWL_ERR(fwrt, "0x%08X | tcm data2\n", table.data2);
IWL_ERR(fwrt, "0x%08X | tcm data3\n", table.data3);
IWL_ERR(fwrt, "0x%08X | tcm log PC\n", table.logpc);
IWL_ERR(fwrt, "0x%08X | tcm frame pointer\n", table.frame_pointer);
IWL_ERR(fwrt, "0x%08X | tcm stack pointer\n", table.stack_pointer);
IWL_ERR(fwrt, "0x%08X | tcm msg ID\n", table.msgid);
IWL_ERR(fwrt, "0x%08X | tcm ISR status\n", table.isr);
for (i = 0; i < ARRAY_SIZE(table.hw_status); i++)
IWL_ERR(fwrt, "0x%08X | tcm HW status[%d]\n",
table.hw_status[i], i);
for (i = 0; i < ARRAY_SIZE(table.sw_status); i++)
IWL_ERR(fwrt, "0x%08X | tcm SW status[%d]\n",
table.sw_status[i], i);
}
/*
@ -338,26 +287,10 @@ static void iwl_fwrt_dump_rcm_error_log(struct iwl_fw_runtime *fwrt, int idx)
IWL_ERR(fwrt, "RCM%d status:\n", idx + 1);
IWL_ERR(fwrt, "0x%08X | error ID\n", table.error_id);
IWL_ERR(fwrt, "0x%08X | rcm branchlink2\n", table.blink2);
IWL_ERR(fwrt, "0x%08X | rcm interruptlink1\n", table.ilink1);
IWL_ERR(fwrt, "0x%08X | rcm interruptlink2\n", table.ilink2);
IWL_ERR(fwrt, "0x%08X | rcm data1\n", table.data1);
IWL_ERR(fwrt, "0x%08X | rcm data2\n", table.data2);
IWL_ERR(fwrt, "0x%08X | rcm data3\n", table.data3);
IWL_ERR(fwrt, "0x%08X | rcm log PC\n", table.logpc);
IWL_ERR(fwrt, "0x%08X | rcm frame pointer\n", table.frame_pointer);
IWL_ERR(fwrt, "0x%08X | rcm stack pointer\n", table.stack_pointer);
IWL_ERR(fwrt, "0x%08X | rcm msg ID\n", table.msgid);
IWL_ERR(fwrt, "0x%08X | rcm ISR status\n", table.isr);
IWL_ERR(fwrt, "0x%08X | frame HW status\n", table.frame_hw_status);
IWL_ERR(fwrt, "0x%08X | LMAC-to-RCM request mbox\n",
table.mbx_lmac_to_rcm_req);
IWL_ERR(fwrt, "0x%08X | RCM-to-LMAC request mbox\n",
table.mbx_rcm_to_lmac_req);
IWL_ERR(fwrt, "0x%08X | MAC header control\n", table.mh_ctl);
IWL_ERR(fwrt, "0x%08X | MAC header addr1 low\n", table.mh_addr1_lo);
IWL_ERR(fwrt, "0x%08X | MAC header info\n", table.mh_info);
IWL_ERR(fwrt, "0x%08X | MAC header error\n", table.mh_err);
}
static void iwl_fwrt_dump_iml_error_log(struct iwl_fw_runtime *fwrt)

View File

@ -103,6 +103,7 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_D3_KEK_KCK_ADDR = 67,
IWL_UCODE_TLV_CURRENT_PC = 68,
IWL_UCODE_TLV_FSEQ_BIN_VERSION = 72,
IWL_UCODE_TLV_CMD_BIOS_TABLE = 73,
/* contains sub-sections like PNVM file does (did) */
IWL_UCODE_TLV_PNVM_DATA = 74,
@ -1040,6 +1041,20 @@ struct iwl_fw_cmd_version {
u8 notif_ver;
} __packed;
/**
* struct iwl_fw_cmd_bios_table - firmware command BIOS revision entry
* @cmd: command ID
* @group: group ID
* @max_acpi_revision: max supported ACPI revision of command.
* @max_uefi_revision: max supported UEFI revision of command.
*/
struct iwl_fw_cmd_bios_table {
u8 cmd;
u8 group;
u8 max_acpi_revision;
u8 max_uefi_revision;
} __packed;
struct iwl_fw_tcm_error_addr {
__le32 addr;
}; /* FW_TLV_TCM_ERROR_INFO_ADDRS_S */

View File

@ -1,11 +1,41 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright(c) 2019 - 2021 Intel Corporation
* Copyright(c) 2024 Intel Corporation
* Copyright(c) 2024 - 2025 Intel Corporation
*/
#include <fw/api/commands.h>
#include "img.h"
u8 iwl_fw_lookup_cmd_bios_supported_revision(const struct iwl_fw *fw,
enum bios_source table_source,
u32 cmd_id, u8 def)
{
const struct iwl_fw_cmd_bios_table *entry;
/* prior to LONG_GROUP, we never used this CMD version API */
u8 grp = iwl_cmd_groupid(cmd_id) ?: LONG_GROUP;
u8 cmd = iwl_cmd_opcode(cmd_id);
if (table_source != BIOS_SOURCE_ACPI &&
table_source != BIOS_SOURCE_UEFI)
return def;
if (!fw->ucode_capa.cmd_bios_tables ||
!fw->ucode_capa.n_cmd_bios_tables)
return def;
entry = fw->ucode_capa.cmd_bios_tables;
for (int i = 0; i < fw->ucode_capa.n_cmd_bios_tables; i++, entry++) {
if (entry->group == grp && entry->cmd == cmd) {
if (table_source == BIOS_SOURCE_ACPI)
return entry->max_acpi_revision;
return entry->max_uefi_revision;
}
}
return def;
}
EXPORT_SYMBOL_GPL(iwl_fw_lookup_cmd_bios_supported_revision);
u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u32 cmd_id, u8 def)
{
const struct iwl_fw_cmd_version *entry;

View File

@ -9,6 +9,7 @@
#include <linux/types.h>
#include "api/dbg-tlv.h"
#include "api/nvm-reg.h"
#include "file.h"
#include "error-dump.h"
@ -57,6 +58,9 @@ struct iwl_ucode_capabilities {
const struct iwl_fw_cmd_version *cmd_versions;
u32 n_cmd_versions;
const struct iwl_fw_cmd_bios_table *cmd_bios_tables;
u32 n_cmd_bios_tables;
};
static inline bool
@ -274,6 +278,10 @@ iwl_get_ucode_image(const struct iwl_fw *fw, enum iwl_ucode_type ucode_type)
return &fw->img[ucode_type];
}
u8 iwl_fw_lookup_cmd_bios_supported_revision(const struct iwl_fw *fw,
enum bios_source table_source,
u32 cmd_id, u8 def);
u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u32 cmd_id, u8 def);
u8 iwl_fw_lookup_notif_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def);

View File

@ -241,6 +241,10 @@ static int iwl_sar_fill_table(struct iwl_fw_runtime *fwrt,
int profs[BIOS_SAR_NUM_CHAINS] = { prof_a, prof_b };
int i, j;
if (WARN_ON_ONCE(n_subbands >
ARRAY_SIZE(fwrt->sar_profiles[0].chains[0].subbands)))
return -EINVAL;
for (i = 0; i < BIOS_SAR_NUM_CHAINS; i++) {
struct iwl_sar_profile *prof;
@ -300,132 +304,6 @@ int iwl_sar_fill_profile(struct iwl_fw_runtime *fwrt,
}
IWL_EXPORT_SYMBOL(iwl_sar_fill_profile);
static bool iwl_ppag_value_valid(struct iwl_fw_runtime *fwrt, int chain,
int subband)
{
s8 ppag_val = fwrt->ppag_chains[chain].subbands[subband];
if ((subband == 0 &&
(ppag_val > IWL_PPAG_MAX_LB || ppag_val < IWL_PPAG_MIN_LB)) ||
(subband != 0 &&
(ppag_val > IWL_PPAG_MAX_HB || ppag_val < IWL_PPAG_MIN_HB))) {
IWL_DEBUG_RADIO(fwrt, "Invalid PPAG value: %d\n", ppag_val);
return false;
}
return true;
}
/* Utility function for iwlmvm and iwlxvt */
int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt,
union iwl_ppag_table_cmd *cmd, int *cmd_size)
{
u8 cmd_ver;
int i, j, num_sub_bands;
s8 *gain;
bool send_ppag_always;
/* many firmware images for JF lie about this */
if (CSR_HW_RFID_TYPE(fwrt->trans->info.hw_rf_id) ==
CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF))
return -EOPNOTSUPP;
if (!fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) {
IWL_DEBUG_RADIO(fwrt,
"PPAG capability not supported by FW, command not sent.\n");
return -EINVAL;
}
cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw,
WIDE_ID(PHY_OPS_GROUP,
PER_PLATFORM_ANT_GAIN_CMD), 1);
/*
* Starting from ver 4, driver needs to send the PPAG CMD regardless
* if PPAG is enabled/disabled or valid/invalid.
*/
send_ppag_always = cmd_ver > 3;
/* Don't send PPAG if it is disabled */
if (!send_ppag_always && !fwrt->ppag_flags) {
IWL_DEBUG_RADIO(fwrt, "PPAG not enabled, command not sent.\n");
return -EINVAL;
}
IWL_DEBUG_RADIO(fwrt, "PPAG cmd ver is %d\n", cmd_ver);
if (cmd_ver == 1) {
num_sub_bands = IWL_NUM_SUB_BANDS_V1;
gain = cmd->v1.gain[0];
*cmd_size = sizeof(cmd->v1);
cmd->v1.flags = cpu_to_le32(fwrt->ppag_flags & IWL_PPAG_CMD_V1_MASK);
if (fwrt->ppag_bios_rev >= 1) {
/* in this case FW supports revision 0 */
IWL_DEBUG_RADIO(fwrt,
"PPAG table rev is %d, send truncated table\n",
fwrt->ppag_bios_rev);
}
} else if (cmd_ver == 5) {
num_sub_bands = IWL_NUM_SUB_BANDS_V2;
gain = cmd->v5.gain[0];
*cmd_size = sizeof(cmd->v5);
cmd->v5.flags = cpu_to_le32(fwrt->ppag_flags & IWL_PPAG_CMD_V5_MASK);
if (fwrt->ppag_bios_rev == 0) {
/* in this case FW supports revisions 1,2 or 3 */
IWL_DEBUG_RADIO(fwrt,
"PPAG table rev is 0, send padded table\n");
}
} else if (cmd_ver == 7) {
num_sub_bands = IWL_NUM_SUB_BANDS_V2;
gain = cmd->v7.gain[0];
*cmd_size = sizeof(cmd->v7);
cmd->v7.ppag_config_info.hdr.table_source =
fwrt->ppag_bios_source;
cmd->v7.ppag_config_info.hdr.table_revision =
fwrt->ppag_bios_rev;
cmd->v7.ppag_config_info.value = cpu_to_le32(fwrt->ppag_flags);
} else {
IWL_DEBUG_RADIO(fwrt, "Unsupported PPAG command version\n");
return -EINVAL;
}
/* ppag mode */
IWL_DEBUG_RADIO(fwrt,
"PPAG MODE bits were read from bios: %d\n",
fwrt->ppag_flags);
if (cmd_ver == 1 &&
!fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_PPAG_CHINA_BIOS_SUPPORT)) {
cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK);
IWL_DEBUG_RADIO(fwrt, "masking ppag China bit\n");
} else {
IWL_DEBUG_RADIO(fwrt, "isn't masking ppag China bit\n");
}
/* The 'flags' field is the same in v1 and v5 so we can just
* use v1 to access it.
*/
IWL_DEBUG_RADIO(fwrt,
"PPAG MODE bits going to be sent: %d\n",
(cmd_ver < 7) ? le32_to_cpu(cmd->v1.flags) :
le32_to_cpu(cmd->v7.ppag_config_info.value));
for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
for (j = 0; j < num_sub_bands; j++) {
if (!send_ppag_always &&
!iwl_ppag_value_valid(fwrt, i, j))
return -EINVAL;
gain[i * num_sub_bands + j] =
fwrt->ppag_chains[i].subbands[j];
IWL_DEBUG_RADIO(fwrt,
"PPAG table: chain[%d] band[%d]: gain = %d\n",
i, j, gain[i * num_sub_bands + j]);
}
}
return 0;
}
IWL_EXPORT_SYMBOL(iwl_fill_ppag_table);
bool iwl_is_ppag_approved(struct iwl_fw_runtime *fwrt)
{
if (!dmi_check_system(dmi_ppag_approved_list)) {
@ -440,6 +318,27 @@ bool iwl_is_ppag_approved(struct iwl_fw_runtime *fwrt)
}
IWL_EXPORT_SYMBOL(iwl_is_ppag_approved);
/* Print the PPAG table as read from BIOS */
void iwl_bios_print_ppag(struct iwl_fw_runtime *fwrt, int n_subbands)
{
int i, j;
IWL_DEBUG_RADIO(fwrt, "PPAG table as read from BIOS:\n");
IWL_DEBUG_RADIO(fwrt, "PPAG revision = %d\n", fwrt->ppag_bios_rev);
IWL_DEBUG_RADIO(fwrt, "PPAG flags = 0x%x\n", fwrt->ppag_flags);
if (WARN_ON_ONCE(n_subbands >
ARRAY_SIZE(fwrt->ppag_chains[0].subbands)))
return;
for (i = 0; i < ARRAY_SIZE(fwrt->ppag_chains); i++)
for (j = 0; j < n_subbands; j++)
IWL_DEBUG_RADIO(fwrt,
"ppag_chains[%d].subbands[%d] = %d\n",
i, j,
fwrt->ppag_chains[i].subbands[j]);
}
bool iwl_is_tas_approved(void)
{
return dmi_check_system(dmi_tas_approved_list);

View File

@ -21,10 +21,11 @@
*/
#define BIOS_SAR_MAX_CHAINS_PER_PROFILE 4
#define BIOS_SAR_NUM_CHAINS 2
#define BIOS_SAR_MAX_SUB_BANDS_NUM 11
#define BIOS_SAR_MAX_SUB_BANDS_NUM 12
#define BIOS_PPAG_MAX_SUB_BANDS_NUM 12
#define BIOS_GEO_NUM_CHAINS 2
#define BIOS_GEO_MAX_NUM_BANDS 3
#define BIOS_GEO_MAX_NUM_BANDS 4
#define BIOS_GEO_MAX_PROFILE_NUM 8
#define BIOS_GEO_MIN_PROFILE_NUM 3
@ -100,7 +101,7 @@ struct iwl_geo_profile {
/* Same thing as with SAR, all revisions fit in revision 2 */
struct iwl_ppag_chain {
s8 subbands[BIOS_SAR_MAX_SUB_BANDS_NUM];
s8 subbands[BIOS_PPAG_MAX_SUB_BANDS_NUM];
};
struct iwl_tas_data {
@ -180,6 +181,9 @@ enum iwl_dsm_masks_reg {
struct iwl_fw_runtime;
/* Print the PPAG table as read from BIOS */
void iwl_bios_print_ppag(struct iwl_fw_runtime *fwrt, int n_subbands);
bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt);
int iwl_sar_geo_fill_table(struct iwl_fw_runtime *fwrt,
@ -190,10 +194,6 @@ int iwl_sar_fill_profile(struct iwl_fw_runtime *fwrt,
__le16 *per_chain, u32 n_tables, u32 n_subbands,
int prof_a, int prof_b);
int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt,
union iwl_ppag_table_cmd *cmd,
int *cmd_size);
bool iwl_is_ppag_approved(struct iwl_fw_runtime *fwrt);
bool iwl_is_tas_approved(void);

View File

@ -106,13 +106,14 @@ struct iwl_txf_iter_data {
* @cur_fw_img: current firmware image, must be maintained by
* the driver by calling &iwl_fw_set_current_image()
* @dump: debug dump data
* @uats_table: AP type table
* @uats_valid: is AP type table valid
* @ap_type_cmd: AP type tables (for enablement on 6 GHz)
* @ap_type_cmd_valid: if &ap_type_cmd is valid
* @uefi_tables_lock_status: The status of the WIFI GUID UEFI variables lock:
* 0: Unlocked, 1 and 2: Locked.
* Only read the UEFI variables if locked.
* @sar_profiles: sar profiles as read from WRDS/EWRD BIOS tables
* @geo_profiles: geographic profiles as read from WGDS BIOS table
* @geo_bios_source: see &enum bios_source
* @phy_filters: specific phy filters as read from WPFC BIOS table
* @ppag_bios_rev: PPAG BIOS revision
* @ppag_bios_source: see &enum bios_source
@ -204,6 +205,7 @@ struct iwl_fw_runtime {
u8 sar_chain_b_profile;
u8 reduced_power_flags;
struct iwl_geo_profile geo_profiles[BIOS_GEO_MAX_PROFILE_NUM];
enum bios_source geo_bios_source;
u32 geo_rev;
u32 geo_num_profiles;
bool geo_enabled;
@ -213,8 +215,8 @@ struct iwl_fw_runtime {
u8 ppag_bios_source;
struct iwl_sar_offset_mapping_cmd sgom_table;
bool sgom_enabled;
struct iwl_mcc_allowed_ap_type_cmd uats_table;
bool uats_valid;
struct iwl_mcc_allowed_ap_type_cmd ap_type_cmd;
bool ap_type_cmd_valid;
u8 uefi_tables_lock_status;
struct iwl_phy_specific_cfg phy_filters;
enum bios_source dsm_source;

View File

@ -402,11 +402,11 @@ static int iwl_uefi_uats_parse(struct uefi_cnv_wlan_uats_data *uats_data,
if (uats_data->revision != 1)
return -EINVAL;
memcpy(fwrt->uats_table.mcc_to_ap_type_map,
memcpy(fwrt->ap_type_cmd.mcc_to_ap_type_map,
uats_data->mcc_to_ap_type_map,
sizeof(fwrt->uats_table.mcc_to_ap_type_map));
sizeof(fwrt->ap_type_cmd.mcc_to_ap_type_map));
fwrt->uats_valid = true;
fwrt->ap_type_cmd_valid = true;
return 0;
}
@ -429,12 +429,61 @@ void iwl_uefi_get_uats_table(struct iwl_trans *trans,
}
IWL_EXPORT_SYMBOL(iwl_uefi_get_uats_table);
static void iwl_uefi_set_sar_profile(struct iwl_fw_runtime *fwrt,
struct uefi_sar_profile *uefi_sar_prof,
u8 prof_index, bool enabled)
void iwl_uefi_get_uneb_table(struct iwl_trans *trans,
struct iwl_fw_runtime *fwrt)
{
memcpy(&fwrt->sar_profiles[prof_index].chains, uefi_sar_prof,
sizeof(struct uefi_sar_profile));
struct uefi_cnv_wlan_uneb_data *data;
data = iwl_uefi_get_verified_variable(trans, IWL_UEFI_UNEB_NAME,
"UNEB", sizeof(*data), NULL);
if (IS_ERR(data))
return;
if (data->revision != 1) {
IWL_DEBUG_RADIO(fwrt,
"Cannot read UNEB table. rev is invalid\n");
goto out;
}
BUILD_BUG_ON(sizeof(data->mcc_to_ap_type_map) !=
sizeof(fwrt->ap_type_cmd.mcc_to_ap_type_unii9_map));
memcpy(fwrt->ap_type_cmd.mcc_to_ap_type_unii9_map,
data->mcc_to_ap_type_map,
sizeof(fwrt->ap_type_cmd.mcc_to_ap_type_unii9_map));
fwrt->ap_type_cmd_valid = true;
out:
kfree(data);
}
IWL_EXPORT_SYMBOL(iwl_uefi_get_uneb_table);
static void iwl_uefi_set_sar_profile(struct iwl_fw_runtime *fwrt,
const u8 *vals, u8 prof_index,
u8 num_subbands, bool enabled)
{
struct iwl_sar_profile *sar_prof = &fwrt->sar_profiles[prof_index];
/*
* Make sure fwrt has enough room to hold the data
* coming from the UEFI table
*/
if (WARN_ON(ARRAY_SIZE(sar_prof->chains) *
ARRAY_SIZE(sar_prof->chains[0].subbands) <
UEFI_SAR_MAX_CHAINS_PER_PROFILE * num_subbands))
return;
BUILD_BUG_ON(ARRAY_SIZE(sar_prof->chains) !=
UEFI_SAR_MAX_CHAINS_PER_PROFILE);
for (int chain = 0;
chain < UEFI_SAR_MAX_CHAINS_PER_PROFILE;
chain++) {
for (int subband = 0; subband < num_subbands; subband++)
sar_prof->chains[chain].subbands[subband] =
vals[chain * num_subbands + subband];
}
fwrt->sar_profiles[prof_index].enabled = enabled & IWL_SAR_ENABLE_MSK;
}
@ -442,24 +491,46 @@ static void iwl_uefi_set_sar_profile(struct iwl_fw_runtime *fwrt,
int iwl_uefi_get_wrds_table(struct iwl_fw_runtime *fwrt)
{
struct uefi_cnv_var_wrds *data;
unsigned long size;
unsigned long expected_size;
int num_subbands;
int ret = 0;
data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_WRDS_NAME,
"WRDS", sizeof(*data), NULL);
"WRDS",
UEFI_SAR_WRDS_TABLE_SIZE_REV2,
&size);
if (IS_ERR(data))
return -EINVAL;
if (data->revision != IWL_UEFI_WRDS_REVISION) {
ret = -EINVAL;
IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WRDS revision:%d\n",
switch (data->revision) {
case 2:
expected_size = UEFI_SAR_WRDS_TABLE_SIZE_REV2;
num_subbands = UEFI_SAR_SUB_BANDS_NUM_REV2;
break;
case 3:
expected_size = UEFI_SAR_WRDS_TABLE_SIZE_REV3;
num_subbands = UEFI_SAR_SUB_BANDS_NUM_REV3;
break;
default:
IWL_DEBUG_RADIO(fwrt,
"Unsupported UEFI WRDS revision:%d\n",
data->revision);
ret = -EINVAL;
goto out;
}
if (size != expected_size) {
ret = -EINVAL;
goto out;
}
/* The profile from WRDS is officially profile 1, but goes
* into sar_profiles[0] (because we don't have a profile 0).
*/
iwl_uefi_set_sar_profile(fwrt, &data->sar_profile, 0, data->mode);
iwl_uefi_set_sar_profile(fwrt, data->vals, 0,
num_subbands, data->mode);
out:
kfree(data);
return ret;
@ -468,21 +539,40 @@ int iwl_uefi_get_wrds_table(struct iwl_fw_runtime *fwrt)
int iwl_uefi_get_ewrd_table(struct iwl_fw_runtime *fwrt)
{
struct uefi_cnv_var_ewrd *data;
unsigned long expected_size;
int i, ret = 0;
unsigned long size;
int num_subbands;
int profile_size;
data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_EWRD_NAME,
"EWRD", sizeof(*data), NULL);
"EWRD",
UEFI_SAR_EWRD_TABLE_SIZE_REV2,
&size);
if (IS_ERR(data))
return -EINVAL;
if (data->revision != IWL_UEFI_EWRD_REVISION) {
ret = -EINVAL;
IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI EWRD revision:%d\n",
switch (data->revision) {
case 2:
expected_size = UEFI_SAR_EWRD_TABLE_SIZE_REV2;
num_subbands = UEFI_SAR_SUB_BANDS_NUM_REV2;
profile_size = UEFI_SAR_PROFILE_SIZE_REV2;
break;
case 3:
expected_size = UEFI_SAR_EWRD_TABLE_SIZE_REV3;
num_subbands = UEFI_SAR_SUB_BANDS_NUM_REV3;
profile_size = UEFI_SAR_PROFILE_SIZE_REV3;
break;
default:
IWL_DEBUG_RADIO(fwrt,
"Unsupported UEFI EWRD revision:%d\n",
data->revision);
ret = -EINVAL;
goto out;
}
if (data->num_profiles >= BIOS_SAR_MAX_PROFILE_NUM) {
if (size != expected_size ||
data->num_profiles >= BIOS_SAR_MAX_PROFILE_NUM) {
ret = -EINVAL;
goto out;
}
@ -492,8 +582,8 @@ int iwl_uefi_get_ewrd_table(struct iwl_fw_runtime *fwrt)
* save them in sar_profiles[1-3] (because we don't
* have profile 0). So in the array we start from 1.
*/
iwl_uefi_set_sar_profile(fwrt, &data->sar_profiles[i], i + 1,
data->mode);
iwl_uefi_set_sar_profile(fwrt, &data->vals[i * profile_size],
i + 1, num_subbands, data->mode);
out:
kfree(data);
@ -503,20 +593,39 @@ int iwl_uefi_get_ewrd_table(struct iwl_fw_runtime *fwrt)
int iwl_uefi_get_wgds_table(struct iwl_fw_runtime *fwrt)
{
struct uefi_cnv_var_wgds *data;
int i, ret = 0;
unsigned long expected_size;
unsigned long size;
int profile_size;
int n_subbands;
int ret = 0;
data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_WGDS_NAME,
"WGDS", sizeof(*data), NULL);
"WGDS", UEFI_WGDS_TABLE_SIZE_REV3,
&size);
if (IS_ERR(data))
return -EINVAL;
if (data->revision != IWL_UEFI_WGDS_REVISION) {
switch (data->revision) {
case 3:
expected_size = UEFI_WGDS_TABLE_SIZE_REV3;
n_subbands = UEFI_GEO_NUM_BANDS_REV3;
break;
case 4:
expected_size = UEFI_WGDS_TABLE_SIZE_REV4;
n_subbands = UEFI_GEO_NUM_BANDS_REV4;
break;
default:
ret = -EINVAL;
IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WGDS revision:%d\n",
data->revision);
goto out;
}
if (size != expected_size) {
ret = -EINVAL;
goto out;
}
if (data->num_profiles < BIOS_GEO_MIN_PROFILE_NUM ||
data->num_profiles > BIOS_GEO_MAX_PROFILE_NUM) {
ret = -EINVAL;
@ -525,10 +634,31 @@ int iwl_uefi_get_wgds_table(struct iwl_fw_runtime *fwrt)
goto out;
}
if (WARN_ON(BIOS_GEO_MAX_PROFILE_NUM >
ARRAY_SIZE(fwrt->geo_profiles) ||
n_subbands > ARRAY_SIZE(fwrt->geo_profiles[0].bands) ||
BIOS_GEO_NUM_CHAINS >
ARRAY_SIZE(fwrt->geo_profiles[0].bands[0].chains))) {
ret = -EINVAL;
goto out;
}
fwrt->geo_rev = data->revision;
for (i = 0; i < data->num_profiles; i++)
memcpy(&fwrt->geo_profiles[i], &data->geo_profiles[i],
sizeof(struct iwl_geo_profile));
fwrt->geo_bios_source = BIOS_SOURCE_UEFI;
profile_size = 3 * n_subbands;
for (int prof = 0; prof < data->num_profiles; prof++) {
const u8 *val = &data->vals[profile_size * prof];
struct iwl_geo_profile *geo_prof = &fwrt->geo_profiles[prof];
for (int subband = 0; subband < n_subbands; subband++) {
geo_prof->bands[subband].max = *val++;
for (int chain = 0;
chain < BIOS_GEO_NUM_CHAINS;
chain++)
geo_prof->bands[subband].chains[chain] = *val++;
}
}
fwrt->geo_num_profiles = data->num_profiles;
fwrt->geo_enabled = true;
@ -540,28 +670,66 @@ int iwl_uefi_get_wgds_table(struct iwl_fw_runtime *fwrt)
int iwl_uefi_get_ppag_table(struct iwl_fw_runtime *fwrt)
{
struct uefi_cnv_var_ppag *data;
int n_subbands;
u32 valid_rev;
int ret = 0;
data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_PPAG_NAME,
"PPAG", sizeof(*data), NULL);
if (IS_ERR(data))
return -EINVAL;
"PPAG", UEFI_PPAG_DATA_SIZE_V5,
NULL);
if (!IS_ERR(data)) {
n_subbands = UEFI_PPAG_SUB_BANDS_NUM_REV5;
valid_rev = BIT(5);
if (data->revision < IWL_UEFI_MIN_PPAG_REV ||
data->revision > IWL_UEFI_MAX_PPAG_REV) {
goto parse_table;
}
data = iwl_uefi_get_verified_variable(fwrt->trans,
IWL_UEFI_PPAG_NAME,
"PPAG",
UEFI_PPAG_DATA_SIZE_V4,
NULL);
if (!IS_ERR(data)) {
n_subbands = UEFI_PPAG_SUB_BANDS_NUM_REV4;
/* revisions 1-4 have all the same size */
valid_rev = BIT(1) | BIT(2) | BIT(3) | BIT(4);
goto parse_table;
}
return -EINVAL;
parse_table:
if (!(BIT(data->revision) & valid_rev)) {
ret = -EINVAL;
IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI PPAG revision:%d\n",
IWL_DEBUG_RADIO(fwrt,
"Unsupported UEFI PPAG revision:%d\n",
data->revision);
goto out;
}
/*
* Make sure fwrt has enough room to hold
* data coming from the UEFI table
*/
if (WARN_ON(ARRAY_SIZE(fwrt->ppag_chains) *
ARRAY_SIZE(fwrt->ppag_chains[0].subbands) <
UEFI_PPAG_NUM_CHAINS * n_subbands)) {
ret = -EINVAL;
goto out;
}
fwrt->ppag_bios_rev = data->revision;
fwrt->ppag_flags = iwl_bios_get_ppag_flags(data->ppag_modes,
fwrt->ppag_bios_rev);
BUILD_BUG_ON(sizeof(fwrt->ppag_chains) != sizeof(data->ppag_chains));
memcpy(&fwrt->ppag_chains, &data->ppag_chains,
sizeof(data->ppag_chains));
for (int chain = 0; chain < UEFI_PPAG_NUM_CHAINS; chain++) {
for (int subband = 0; subband < n_subbands; subband++)
fwrt->ppag_chains[chain].subbands[subband] =
data->vals[chain * n_subbands + subband];
}
iwl_bios_print_ppag(fwrt, n_subbands);
fwrt->ppag_bios_source = BIOS_SOURCE_UEFI;
out:
kfree(data);

View File

@ -25,16 +25,12 @@
#define IWL_UEFI_PUNCTURING_NAME L"UefiCnvWlanPuncturing"
#define IWL_UEFI_DSBR_NAME L"UefiCnvCommonDSBR"
#define IWL_UEFI_WPFC_NAME L"WPFC"
#define IWL_UEFI_UNEB_NAME L"CnvUefiWlanUNEB"
#define IWL_SGOM_MAP_SIZE 339
#define IWL_UATS_MAP_SIZE 339
#define IWL_UEFI_WRDS_REVISION 2
#define IWL_UEFI_EWRD_REVISION 2
#define IWL_UEFI_WGDS_REVISION 3
#define IWL_UEFI_MIN_PPAG_REV 1
#define IWL_UEFI_MAX_PPAG_REV 4
#define IWL_UEFI_MIN_WTAS_REVISION 1
#define IWL_UEFI_MAX_WTAS_REVISION 2
#define IWL_UEFI_SPLC_REVISION 0
@ -63,6 +59,9 @@ struct uefi_cnv_wlan_uats_data {
u8 mcc_to_ap_type_map[IWL_UATS_MAP_SIZE - 1];
} __packed;
/* UNEB's layout is identical to UATS's */
#define uefi_cnv_wlan_uneb_data uefi_cnv_wlan_uats_data
struct uefi_cnv_common_step_data {
u8 revision;
u8 step_mode;
@ -72,68 +71,135 @@ struct uefi_cnv_common_step_data {
u8 radio2;
} __packed;
/*
* struct uefi_sar_profile - a SAR profile as defined in UEFI
*
* @chains: a per-chain table of SAR values
*/
struct uefi_sar_profile {
struct iwl_sar_profile_chain chains[BIOS_SAR_MAX_CHAINS_PER_PROFILE];
} __packed;
#define UEFI_PPAG_SUB_BANDS_NUM_REV4 11
#define UEFI_PPAG_SUB_BANDS_NUM_REV5 12
#define UEFI_PPAG_NUM_CHAINS 2
/*
#define UEFI_SAR_SUB_BANDS_NUM_REV2 11
#define UEFI_SAR_SUB_BANDS_NUM_REV3 12
#define UEFI_SAR_MAX_CHAINS_PER_PROFILE 4
#define UEFI_GEO_NUM_BANDS_REV3 3
#define UEFI_GEO_NUM_BANDS_REV4 4
/**
* struct uefi_cnv_var_wrds - WRDS table as defined in UEFI
*
* @revision: the revision of the table
* @mode: is WRDS enbaled/disabled
* @sar_profile: sar profile #1
* @vals: values for sar profile #1 as an array:
* vals[chain * num_of_subbands + subband] will return the right value.
* num_of_subbands depends on the revision. For revision 3, it is
* %UEFI_SAR_SUB_BANDS_NUM_REV3, for earlier revision, it is
* %UEFI_SAR_SUB_BANDS_NUM_REV2.
* The max number of chains is currently 2
*/
struct uefi_cnv_var_wrds {
u8 revision;
u32 mode;
struct uefi_sar_profile sar_profile;
u8 vals[];
} __packed;
/*
#define UEFI_SAR_PROFILE_SIZE_REV2 \
(sizeof(u8) * UEFI_SAR_MAX_CHAINS_PER_PROFILE * \
UEFI_SAR_SUB_BANDS_NUM_REV2)
#define UEFI_SAR_PROFILE_SIZE_REV3 \
(sizeof(u8) * UEFI_SAR_MAX_CHAINS_PER_PROFILE * \
UEFI_SAR_SUB_BANDS_NUM_REV3)
#define UEFI_SAR_WRDS_TABLE_SIZE_REV2 \
(offsetof(struct uefi_cnv_var_wrds, vals) + \
UEFI_SAR_PROFILE_SIZE_REV2)
#define UEFI_SAR_WRDS_TABLE_SIZE_REV3 \
(offsetof(struct uefi_cnv_var_wrds, vals) + \
UEFI_SAR_PROFILE_SIZE_REV3)
/**
* struct uefi_cnv_var_ewrd - EWRD table as defined in UEFI
* @revision: the revision of the table
* @mode: is WRDS enbaled/disabled
* @num_profiles: how many additional profiles we have in this table (0-3)
* @sar_profiles: the additional SAR profiles (#2-#4)
* @vals: the additional SAR profiles (#2-#4) as an array of SAR profiles.
* A SAR profile is defined the &struct uefi_cnv_var_wrds::vals. The size
* of each profile depends on the number of subbands which depends on the
* revision. This is explained in &struct uefi_cnv_var_wrds.
*/
struct uefi_cnv_var_ewrd {
u8 revision;
u32 mode;
u32 num_profiles;
struct uefi_sar_profile sar_profiles[BIOS_SAR_MAX_PROFILE_NUM - 1];
u8 vals[];
} __packed;
/*
#define UEFI_SAR_EWRD_TABLE_SIZE_REV2 \
(offsetof(struct uefi_cnv_var_ewrd, vals) + \
UEFI_SAR_PROFILE_SIZE_REV2 * (BIOS_SAR_MAX_PROFILE_NUM - 1))
#define UEFI_SAR_EWRD_TABLE_SIZE_REV3 \
(offsetof(struct uefi_cnv_var_ewrd, vals) + \
UEFI_SAR_PROFILE_SIZE_REV3 * (BIOS_SAR_MAX_PROFILE_NUM - 1))
/**
* struct uefi_cnv_var_wgds - WGDS table as defined in UEFI
* @revision: the revision of the table
* @num_profiles: the number of geo profiles we have in the table.
* The first 3 are mandatory, and can have up to 8.
* @geo_profiles: a per-profile table of the offsets to add to SAR values.
* @vals: a per-profile table of the offsets to add to SAR values. This is an
* array of profiles, each profile is an array of
* &struct iwl_geo_profile_band, one for each subband.
* There are %UEFI_GEO_NUM_BANDS_REV3 or %UEFI_GEO_NUM_BANDS_REV4 subbands
* depending on the revision.
*/
struct uefi_cnv_var_wgds {
u8 revision;
u8 num_profiles;
struct iwl_geo_profile geo_profiles[BIOS_GEO_MAX_PROFILE_NUM];
u8 vals[];
} __packed;
/*
/* struct iwl_geo_profile_band is 3 bytes-long, but since it is not packed,
* we can't use sizeof()
*/
#define UEFI_WGDS_PROFILE_SIZE_REV3 (sizeof(u8) * 3 * UEFI_GEO_NUM_BANDS_REV3)
#define UEFI_WGDS_PROFILE_SIZE_REV4 (sizeof(u8) * 3 * UEFI_GEO_NUM_BANDS_REV4)
#define UEFI_WGDS_TABLE_SIZE_REV3 \
(offsetof(struct uefi_cnv_var_wgds, vals) + \
UEFI_WGDS_PROFILE_SIZE_REV3 * BIOS_GEO_MAX_PROFILE_NUM)
#define UEFI_WGDS_TABLE_SIZE_REV4 \
(offsetof(struct uefi_cnv_var_wgds, vals) + \
UEFI_WGDS_PROFILE_SIZE_REV4 * BIOS_GEO_MAX_PROFILE_NUM)
/**
* struct uefi_cnv_var_ppag - PPAG table as defined in UEFI
* @revision: the revision of the table
* @ppag_modes: values from &enum iwl_ppag_flags
* @ppag_chains: the PPAG values per chain and band
* @vals: the PPAG values per chain and band as an array.
* vals[chain * num_of_subbands + subband] will return the right value.
* num_of_subbands depends on the revision. For revision 5, it is
* %UEFI_PPAG_SUB_BANDS_NUM_REV5, for earlier revision it is
* %UEFI_PPAG_SUB_BANDS_NUM_REV4.
* the max number of chains is currently 2
*/
struct uefi_cnv_var_ppag {
u8 revision;
u32 ppag_modes;
struct iwl_ppag_chain ppag_chains[IWL_NUM_CHAIN_LIMITS];
s8 vals[];
} __packed;
/* struct uefi_cnv_var_wtas - WTAS tabled as defined in UEFI
#define UEFI_PPAG_DATA_SIZE_V4 \
(offsetof(struct uefi_cnv_var_ppag, vals) + \
sizeof(s8) * UEFI_PPAG_NUM_CHAINS * UEFI_PPAG_SUB_BANDS_NUM_REV4)
#define UEFI_PPAG_DATA_SIZE_V5 \
(offsetof(struct uefi_cnv_var_ppag, vals) + \
sizeof(s8) * UEFI_PPAG_NUM_CHAINS * UEFI_PPAG_SUB_BANDS_NUM_REV5)
/**
* struct uefi_cnv_var_wtas - WTAS tabled as defined in UEFI
* @revision: the revision of the table
* @tas_selection: different options of TAS enablement.
* @black_list_size: the number of defined entried in the black list
@ -146,7 +212,8 @@ struct uefi_cnv_var_wtas {
u16 black_list[IWL_WTAS_BLACK_LIST_MAX];
} __packed;
/* struct uefi_cnv_var_splc - SPLC tabled as defined in UEFI
/**
* struct uefi_cnv_var_splc - SPLC tabled as defined in UEFI
* @revision: the revision of the table
* @default_pwr_limit: The default maximum power per device
*/
@ -155,7 +222,8 @@ struct uefi_cnv_var_splc {
u32 default_pwr_limit;
} __packed;
/* struct uefi_cnv_var_wrdd - WRDD table as defined in UEFI
/**
* struct uefi_cnv_var_wrdd - WRDD table as defined in UEFI
* @revision: the revision of the table
* @mcc: country identifier as defined in ISO/IEC 3166-1 Alpha 2 code
*/
@ -164,7 +232,8 @@ struct uefi_cnv_var_wrdd {
u32 mcc;
} __packed;
/* struct uefi_cnv_var_eckv - ECKV table as defined in UEFI
/**
* struct uefi_cnv_var_eckv - ECKV table as defined in UEFI
* @revision: the revision of the table
* @ext_clock_valid: indicates if external 32KHz clock is valid
*/
@ -175,7 +244,8 @@ struct uefi_cnv_var_eckv {
#define UEFI_MAX_DSM_FUNCS 32
/* struct uefi_cnv_var_general_cfg - DSM-like table as defined in UEFI
/**
* struct uefi_cnv_var_general_cfg - DSM-like table as defined in UEFI
* @revision: the revision of the table
* @functions: payload of the different DSM functions
*/
@ -185,7 +255,9 @@ struct uefi_cnv_var_general_cfg {
} __packed;
#define IWL_UEFI_WBEM_REV0_MASK (BIT(0) | BIT(1))
/* struct uefi_cnv_wlan_wbem_data - Bandwidth enablement per MCC as defined
/**
* struct uefi_cnv_wlan_wbem_data - Bandwidth enablement per MCC as defined
* in UEFI
* @revision: the revision of the table
* @wbem_320mhz_per_mcc: enablement of 320MHz bandwidth per MCC
@ -274,6 +346,8 @@ int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt);
void iwl_uefi_get_uats_table(struct iwl_trans *trans,
struct iwl_fw_runtime *fwrt);
void iwl_uefi_get_uneb_table(struct iwl_trans *trans,
struct iwl_fw_runtime *fwrt);
int iwl_uefi_get_puncturing(struct iwl_fw_runtime *fwrt);
int iwl_uefi_get_dsbr(struct iwl_fw_runtime *fwrt, u32 *value);
int iwl_uefi_get_phy_filters(struct iwl_fw_runtime *fwrt);
@ -373,6 +447,11 @@ iwl_uefi_get_uats_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt)
{
}
static inline void
iwl_uefi_get_uneb_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt)
{
}
static inline
int iwl_uefi_get_puncturing(struct iwl_fw_runtime *fwrt)
{

View File

@ -85,7 +85,6 @@ enum iwl_nvm_type {
#define IWL_WATCHDOG_DISABLED 0
#define IWL_DEF_WD_TIMEOUT 2500
#define IWL_LONG_WD_TIMEOUT 10000
#define IWL_MAX_WD_TIMEOUT 120000
#define IWL_DEFAULT_MAX_TX_POWER 22
#define IWL_TX_CSUM_NETIF_FLAGS (NETIF_F_IPV6_CSUM | NETIF_F_IP_CSUM |\

View File

@ -133,6 +133,7 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv)
kfree(drv->fw.dbg.mem_tlv);
kfree(drv->fw.iml);
kfree(drv->fw.ucode_capa.cmd_versions);
kfree(drv->fw.ucode_capa.cmd_bios_tables);
kfree(drv->fw.phy_integration_ver);
kfree(drv->trans->dbg.pc_data);
drv->trans->dbg.pc_data = NULL;
@ -1314,7 +1315,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
if (tlv_len != sizeof(u32))
goto invalid_tlv_len;
if (le32_to_cpup((const __le32 *)tlv_data) >
IWL_FW_MAX_LINK_ID + 1) {
IWL_FW_MAX_LINKS) {
IWL_ERR(drv,
"%d is an invalid number of links\n",
le32_to_cpup((const __le32 *)tlv_data));
@ -1426,6 +1427,26 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
return -ENOMEM;
drv->fw.pnvm_size = tlv_len;
break;
case IWL_UCODE_TLV_CMD_BIOS_TABLE:
if (tlv_len % sizeof(struct iwl_fw_cmd_bios_table)) {
IWL_ERR(drv,
"Invalid length for command bios table: %u\n",
tlv_len);
return -EINVAL;
}
if (capa->cmd_bios_tables) {
IWL_ERR(drv, "Duplicate TLV type 0x%02X detected\n",
tlv_type);
return -EINVAL;
}
capa->cmd_bios_tables = kmemdup(tlv_data, tlv_len,
GFP_KERNEL);
if (!capa->cmd_bios_tables)
return -ENOMEM;
capa->n_cmd_bios_tables =
tlv_len / sizeof(struct iwl_fw_cmd_bios_table);
break;
default:
IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
break;

View File

@ -23,6 +23,8 @@
#include "fw/api/commands.h"
#include "fw/api/cmdhdr.h"
#include "fw/img.h"
#include "fw/dbg.h"
#include "mei/iwl-mei.h"
/* NVM offsets (in words) definitions */
@ -1702,6 +1704,11 @@ iwl_parse_nvm_mcc_info(struct iwl_trans *trans,
band);
new_rule = false;
if (IWL_FW_CHECK(trans, !center_freq,
"Invalid channel %d (idx %d) in NVM\n",
nvm_chan[ch_idx], ch_idx))
continue;
if (!(ch_flags & NVM_CHANNEL_VALID)) {
iwl_nvm_print_channel_flags(dev, IWL_DL_LAR,
nvm_chan[ch_idx], ch_flags);
@ -2031,7 +2038,7 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
if (empty_otp)
IWL_INFO(trans, "OTP is empty\n");
nvm = kzalloc_flex(*nvm, channels, IWL_NUM_CHANNELS);
nvm = kzalloc_flex(*nvm, channels, IWL_NUM_CHANNELS_V2);
if (!nvm) {
ret = -ENOMEM;
goto out;

View File

@ -138,7 +138,7 @@ iwl_trans_determine_restart_mode(struct iwl_trans *trans)
IWL_RESET_MODE_FUNC_RESET,
IWL_RESET_MODE_PROD_RESET,
};
static const enum iwl_reset_mode escalation_list_sc[] = {
static const enum iwl_reset_mode escalation_list_top[] = {
IWL_RESET_MODE_SW_RESET,
IWL_RESET_MODE_REPROBE,
IWL_RESET_MODE_REPROBE,
@ -159,14 +159,14 @@ iwl_trans_determine_restart_mode(struct iwl_trans *trans)
if (trans->request_top_reset) {
trans->request_top_reset = 0;
if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_SC)
if (iwl_trans_is_top_reset_supported(trans))
return IWL_RESET_MODE_TOP_RESET;
return IWL_RESET_MODE_PROD_RESET;
}
if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_SC) {
escalation_list = escalation_list_sc;
escalation_list_size = ARRAY_SIZE(escalation_list_sc);
if (iwl_trans_is_top_reset_supported(trans)) {
escalation_list = escalation_list_top;
escalation_list_size = ARRAY_SIZE(escalation_list_top);
} else {
escalation_list = escalation_list_old;
escalation_list_size = ARRAY_SIZE(escalation_list_old);

View File

@ -1088,7 +1088,7 @@ static inline void iwl_trans_schedule_reset(struct iwl_trans *trans,
*/
trans->restart.during_reset = test_bit(STATUS_IN_SW_RESET,
&trans->status);
queue_delayed_work(system_unbound_wq, &trans->restart.wk, 0);
queue_delayed_work(system_dfl_wq, &trans->restart.wk, 0);
}
static inline void iwl_trans_fw_error(struct iwl_trans *trans,
@ -1258,4 +1258,22 @@ bool iwl_trans_is_pm_supported(struct iwl_trans *trans);
bool iwl_trans_is_ltr_enabled(struct iwl_trans *trans);
static inline bool iwl_trans_is_top_reset_supported(struct iwl_trans *trans)
{
/* not supported before Sc family */
if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_SC)
return false;
/* for Sc family only supported for Sc2/Sc2f */
if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_SC &&
CSR_HW_REV_TYPE(trans->info.hw_rev) == IWL_CFG_MAC_TYPE_SC)
return false;
/* so far these numbers are increasing - not before Pe */
if (CSR_HW_RFID_TYPE(trans->info.hw_rf_id) < IWL_CFG_RF_TYPE_PE)
return false;
return true;
}
#endif /* __iwl_trans_h__ */

View File

@ -36,7 +36,6 @@
#define IWL_MLD_PS_HEAVY_RX_THLD_PACKETS 8
#define IWL_MLD_TRIGGER_LINK_SEL_TIME_SEC 30
#define IWL_MLD_SCAN_EXPIRE_TIME_SEC 20
#define IWL_MLD_TPT_COUNT_WINDOW (5 * HZ)

View File

@ -513,7 +513,7 @@ static int iwl_mld_config_fw(struct iwl_mld *mld)
return ret;
iwl_mld_init_tas(mld);
iwl_mld_init_uats(mld);
iwl_mld_init_ap_type_tables(mld);
return 0;
}

View File

@ -61,20 +61,27 @@ void iwl_mld_cleanup_vif(void *data, u8 *mac, struct ieee80211_vif *vif)
static int iwl_mld_send_mac_cmd(struct iwl_mld *mld,
struct iwl_mac_config_cmd *cmd)
{
u16 cmd_id = WIDE_ID(MAC_CONF_GROUP, MAC_CONFIG_CMD);
int len = sizeof(*cmd);
int ret;
lockdep_assert_wiphy(mld->wiphy);
ret = iwl_mld_send_cmd_pdu(mld,
WIDE_ID(MAC_CONF_GROUP, MAC_CONFIG_CMD),
cmd);
if (iwl_fw_lookup_cmd_ver(mld->fw, cmd_id, 0) < 4) {
if (WARN_ON(cmd->mac_type == cpu_to_le32(FW_MAC_TYPE_NAN)))
return -EINVAL;
len = sizeof(struct iwl_mac_config_cmd_v3);
}
ret = iwl_mld_send_cmd_pdu(mld, cmd_id, cmd, len);
if (ret)
IWL_ERR(mld, "Failed to send MAC_CONFIG_CMD ret = %d\n", ret);
return ret;
}
int iwl_mld_mac80211_iftype_to_fw(const struct ieee80211_vif *vif)
static int iwl_mld_mac80211_iftype_to_fw(const struct ieee80211_vif *vif)
{
switch (vif->type) {
case NL80211_IFTYPE_STATION:
@ -386,7 +393,7 @@ static void iwl_mld_mlo_scan_start_wk(struct wiphy *wiphy,
iwl_mld_int_mlo_scan(mld, iwl_mld_vif_to_mac80211(mld_vif));
}
IWL_MLD_ALLOC_FN(vif, vif)
static IWL_MLD_ALLOC_FN(vif, vif)
/* Constructor function for struct iwl_mld_vif */
static void
@ -397,6 +404,7 @@ iwl_mld_init_vif(struct iwl_mld *mld, struct ieee80211_vif *vif)
lockdep_assert_wiphy(mld->wiphy);
mld_vif->mld = mld;
mld_vif->fw_id = IWL_MLD_INVALID_FW_ID;
mld_vif->roc_activity = ROC_NUM_ACTIVITIES;
if (!mld->fw_status.in_hw_restart) {
@ -444,6 +452,10 @@ void iwl_mld_rm_vif(struct iwl_mld *mld, struct ieee80211_vif *vif)
lockdep_assert_wiphy(mld->wiphy);
/* NAN interface type is not known to FW */
if (vif->type == NL80211_IFTYPE_NAN)
return;
iwl_mld_mac_fw_action(mld, vif, FW_CTXT_ACTION_REMOVE);
if (WARN_ON(mld_vif->fw_id >= ARRAY_SIZE(mld->fw_id_to_vif)))

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2024-2025 Intel Corporation
* Copyright (C) 2024-2026 Intel Corporation
*/
#ifndef __iwl_mld_iface_h__
#define __iwl_mld_iface_h__
@ -33,6 +33,7 @@ enum iwl_mld_cca_40mhz_wa_status {
* there is an indication that a non-BSS interface is to be added.
* @IWL_MLD_EMLSR_BLOCKED_TPT: throughput is too low to make EMLSR worthwhile
* @IWL_MLD_EMLSR_BLOCKED_NAN: NAN is preventing EMLSR.
* @IWL_MLD_EMLSR_BLOCKED_TDLS: TDLS connection is preventing EMLSR.
*/
enum iwl_mld_emlsr_blocked {
IWL_MLD_EMLSR_BLOCKED_PREVENTION = 0x1,
@ -42,6 +43,7 @@ enum iwl_mld_emlsr_blocked {
IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS = 0x10,
IWL_MLD_EMLSR_BLOCKED_TPT = 0x20,
IWL_MLD_EMLSR_BLOCKED_NAN = 0x40,
IWL_MLD_EMLSR_BLOCKED_TDLS = 0x80,
};
/**
@ -201,6 +203,15 @@ iwl_mld_vif_to_mac80211(struct iwl_mld_vif *mld_vif)
return container_of((void *)mld_vif, struct ieee80211_vif, drv_priv);
}
/* Call only for interfaces that were added to the driver! */
static inline bool iwl_mld_vif_fw_id_valid(struct iwl_mld_vif *mld_vif)
{
if (WARN_ON(mld_vif->fw_id >= ARRAY_SIZE(mld_vif->mld->fw_id_to_vif)))
return false;
return true;
}
#define iwl_mld_link_dereference_check(mld_vif, link_id) \
rcu_dereference_check((mld_vif)->link[link_id], \
lockdep_is_held(&mld_vif->mld->wiphy->mtx))
@ -219,8 +230,6 @@ iwl_mld_link_from_mac80211(struct ieee80211_bss_conf *bss_conf)
return iwl_mld_link_dereference_check(mld_vif, bss_conf->link_id);
}
int iwl_mld_mac80211_iftype_to_fw(const struct ieee80211_vif *vif);
/* Cleanup function for struct iwl_mld_vif, will be called in restart */
void iwl_mld_cleanup_vif(void *data, u8 *mac, struct ieee80211_vif *vif);
int iwl_mld_mac_fw_action(struct iwl_mld *mld, struct ieee80211_vif *vif,

View File

@ -437,7 +437,7 @@ iwl_mld_rm_link_from_fw(struct iwl_mld *mld, struct ieee80211_bss_conf *link)
iwl_mld_send_link_cmd(mld, &cmd, FW_CTXT_ACTION_REMOVE);
}
IWL_MLD_ALLOC_FN(link, bss_conf)
static IWL_MLD_ALLOC_FN(link, bss_conf)
/* Constructor function for struct iwl_mld_link */
static int

View File

@ -40,6 +40,7 @@ struct iwl_probe_resp_data {
* @bcast_sta: station used for broadcast packets. Used in AP, GO and IBSS.
* @mcast_sta: station used for multicast packets. Used in AP, GO and IBSS.
* @mon_sta: station used for TX injection in monitor interface.
* @last_cqm_rssi_event: rssi of the last cqm rssi event
* @average_beacon_energy: average beacon energy for beacons received during
* client connections
* @ap_early_keys: The firmware cannot install keys before bcast/mcast STAs,
@ -66,6 +67,7 @@ struct iwl_mld_link {
struct iwl_mld_int_sta bcast_sta;
struct iwl_mld_int_sta mcast_sta;
struct iwl_mld_int_sta mon_sta;
int last_cqm_rssi_event;
/* we can only have 2 GTK + 2 IGTK + 2 BIGTK active at a time */
struct ieee80211_key_conf *ap_early_keys[6];

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2024-2025 Intel Corporation
* Copyright (C) 2024-2026 Intel Corporation
*/
#include "mld.h"
#include "iface.h"
@ -77,9 +77,12 @@ static void iwl_mld_low_latency_iter(void *_data, u8 *mac,
bool prev = mld_vif->low_latency_causes & LOW_LATENCY_TRAFFIC;
bool low_latency;
if (WARN_ON(mld_vif->fw_id >= ARRAY_SIZE(mld->low_latency.result)))
if (!iwl_mld_vif_fw_id_valid(mld_vif))
return;
BUILD_BUG_ON(ARRAY_SIZE(mld->fw_id_to_vif) !=
ARRAY_SIZE(mld->low_latency.result));
low_latency = mld->low_latency.result[mld_vif->fw_id];
if (prev != low_latency)
@ -272,8 +275,10 @@ void iwl_mld_low_latency_update_counters(struct iwl_mld *mld,
if (WARN_ON_ONCE(!mld->low_latency.pkts_counters))
return;
if (WARN_ON_ONCE(fw_id >= ARRAY_SIZE(counters->vo_vi) ||
queue >= mld->trans->info.num_rxqs))
if (!iwl_mld_vif_fw_id_valid(mld_vif))
return;
if (WARN_ON_ONCE(queue >= mld->trans->info.num_rxqs))
return;
if (mld->low_latency.stopped)

View File

@ -754,6 +754,30 @@ void iwl_mld_mac80211_remove_interface(struct ieee80211_hw *hw,
mld->monitor.phy.valid = false;
}
static
int iwl_mld_mac80211_change_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum nl80211_iftype new_type, bool p2p)
{
enum nl80211_iftype old_type = vif->type;
bool old_p2p = vif->p2p;
int ret;
iwl_mld_mac80211_remove_interface(hw, vif);
/* set the new type for adding it cleanly */
vif->type = new_type;
vif->p2p = p2p;
ret = iwl_mld_mac80211_add_interface(hw, vif);
/* restore for mac80211, it will change it again */
vif->type = old_type;
vif->p2p = old_p2p;
return ret;
}
struct iwl_mld_mc_iter_data {
struct iwl_mld *mld;
int port_id;
@ -1124,6 +1148,8 @@ int iwl_mld_assign_vif_chanctx(struct ieee80211_hw *hw,
/* Now activate the link */
if (iwl_mld_can_activate_link(mld, vif, link)) {
iwl_mld_tlc_update_phy(mld, vif, link);
ret = iwl_mld_activate_link(mld, link);
if (ret)
goto err;
@ -1185,6 +1211,8 @@ void iwl_mld_unassign_vif_chanctx(struct ieee80211_hw *hw,
RCU_INIT_POINTER(mld_link->chan_ctx, NULL);
iwl_mld_tlc_update_phy(mld, vif, link);
/* in the non-MLO case, remove/re-add the link to clean up FW state.
* In MLO, it'll be done in drv_change_vif_link
*/
@ -1727,14 +1755,23 @@ static int iwl_mld_move_sta_state_up(struct iwl_mld *mld,
return -EBUSY;
}
ret = iwl_mld_add_sta(mld, sta, vif, STATION_TYPE_PEER);
ret = iwl_mld_add_sta(mld, sta, vif);
if (ret)
return ret;
/* just added first TDLS STA, so disable PM */
if (sta->tdls && tdls_count == 0)
/* just added first TDLS STA, so disable PM and block EMLSR */
if (sta->tdls && tdls_count == 0) {
iwl_mld_update_mac_power(mld, vif, false);
/* TDLS requires single-link operation with
* direct peer communication.
* Block and exit EMLSR when TDLS is established.
*/
iwl_mld_block_emlsr(mld, vif,
IWL_MLD_EMLSR_BLOCKED_TDLS,
iwl_mld_get_primary_link(vif));
}
if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
mld_vif->ap_sta = sta;
@ -1870,8 +1907,14 @@ static int iwl_mld_move_sta_state_down(struct iwl_mld *mld,
iwl_mld_remove_sta(mld, sta);
if (sta->tdls && iwl_mld_tdls_sta_count(mld) == 0) {
/* just removed last TDLS STA, so enable PM */
/* just removed last TDLS STA, so enable PM
* and unblock EMLSR
*/
iwl_mld_update_mac_power(mld, vif, false);
/* Unblock EMLSR when TDLS connection is torn down */
iwl_mld_unblock_emlsr(mld, vif,
IWL_MLD_EMLSR_BLOCKED_TDLS);
}
} else {
return -EINVAL;
@ -2716,6 +2759,7 @@ const struct ieee80211_ops iwl_mld_hw_ops = {
.get_antenna = iwl_mld_get_antenna,
.set_antenna = iwl_mld_set_antenna,
.add_interface = iwl_mld_mac80211_add_interface,
.change_interface = iwl_mld_mac80211_change_interface,
.remove_interface = iwl_mld_mac80211_remove_interface,
.conf_tx = iwl_mld_mac80211_conf_tx,
.prepare_multicast = iwl_mld_mac80211_prepare_multicast,

View File

@ -205,7 +205,7 @@
struct iwl_mld {
/* Add here fields that need clean up on restart */
struct_group(zeroed_on_hw_restart,
struct ieee80211_bss_conf __rcu *fw_id_to_bss_conf[IWL_FW_MAX_LINK_ID + 1];
struct ieee80211_bss_conf __rcu *fw_id_to_bss_conf[IWL_FW_MAX_LINKS];
struct ieee80211_vif __rcu *fw_id_to_vif[NUM_MAC_INDEX_DRIVER];
struct ieee80211_txq __rcu *fw_id_to_txq[IWL_MAX_TVQM_QUEUES];
u8 used_phy_ids: NUM_PHY_CTX;
@ -530,9 +530,9 @@ void iwl_construct_mld(struct iwl_mld *mld, struct iwl_trans *trans,
#define IWL_MLD_INVALID_FW_ID 0xff
#define IWL_MLD_ALLOC_FN(_type, _mac80211_type) \
static int \
int \
iwl_mld_allocate_##_type##_fw_id(struct iwl_mld *mld, \
u8 *fw_id, \
u8 *fw_id, \
struct ieee80211_##_mac80211_type *mac80211_ptr) \
{ \
u8 rand = IWL_MLD_DIS_RANDOM_FW_ID ? 0 : get_random_u8(); \

View File

@ -13,7 +13,8 @@
HOW(NON_BSS) \
HOW(TMP_NON_BSS) \
HOW(TPT) \
HOW(NAN)
HOW(NAN) \
HOW(TDLS)
static const char *
iwl_mld_get_emlsr_blocked_string(enum iwl_mld_emlsr_blocked blocked)
@ -110,7 +111,6 @@ void iwl_mld_emlsr_tmp_non_bss_done_wk(struct wiphy *wiphy,
}
#define IWL_MLD_TRIGGER_LINK_SEL_TIME (HZ * IWL_MLD_TRIGGER_LINK_SEL_TIME_SEC)
#define IWL_MLD_SCAN_EXPIRE_TIME (HZ * IWL_MLD_SCAN_EXPIRE_TIME_SEC)
/* Exit reasons that can cause longer EMLSR prevention */
#define IWL_MLD_PREVENT_EMLSR_REASONS (IWL_MLD_EMLSR_EXIT_MISSED_BEACON | \

View File

@ -2,7 +2,8 @@
/*
* Copyright (C) 2025 Intel Corporation
*/
#ifndef __iwl_mld_nan_h__
#define __iwl_mld_nan_h__
#include <net/cfg80211.h>
#include <linux/etherdevice.h>
@ -26,3 +27,5 @@ bool iwl_mld_cancel_nan_cluster_notif(struct iwl_mld *mld,
bool iwl_mld_cancel_nan_dw_end_notif(struct iwl_mld *mld,
struct iwl_rx_packet *pkt,
u32 obj_id);
#endif /* __iwl_mld_nan_h__ */

View File

@ -32,9 +32,9 @@ struct iwl_mld_phy {
};
static inline struct iwl_mld_phy *
iwl_mld_phy_from_mac80211(struct ieee80211_chanctx_conf *channel)
iwl_mld_phy_from_mac80211(struct ieee80211_chanctx_conf *chanctx)
{
return (void *)channel->drv_priv;
return (void *)chanctx->drv_priv;
}
/* Cleanup function for struct iwl_mld_phy, will be called in restart */

View File

@ -405,7 +405,10 @@ int iwl_mld_set_tx_power(struct iwl_mld *mld,
.common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_LINK),
.common.pwr_restriction = cpu_to_le16(u_tx_power),
};
int len = sizeof(cmd.common) + sizeof(cmd.v10);
int len = sizeof(cmd.common) + sizeof(cmd.v11);
if (iwl_fw_lookup_cmd_ver(mld->fw, cmd_id, 10) == 10)
len = sizeof(cmd.common) + sizeof(cmd.v10);
if (WARN_ON(!mld_link))
return -ENODEV;

View File

@ -301,10 +301,12 @@ void iwl_mld_ptp_init(struct iwl_mld *mld)
mld->ptp_data.ptp_clock =
ptp_clock_register(&mld->ptp_data.ptp_clock_info, mld->dev);
if (IS_ERR_OR_NULL(mld->ptp_data.ptp_clock)) {
if (IS_ERR(mld->ptp_data.ptp_clock)) {
IWL_ERR(mld, "Failed to register PHC clock (%ld)\n",
PTR_ERR(mld->ptp_data.ptp_clock));
mld->ptp_data.ptp_clock = NULL;
} else if (!mld->ptp_data.ptp_clock) {
IWL_DEBUG_INFO(mld, "PTP module unavailable on this kernel\n");
} else {
IWL_DEBUG_INFO(mld, "Registered PHC clock: %s, with index: %d\n",
mld->ptp_data.ptp_clock_info.name,

View File

@ -64,6 +64,7 @@ void iwl_mld_get_bios_tables(struct iwl_mld *mld)
}
iwl_uefi_get_uats_table(mld->trans, &mld->fwrt);
iwl_uefi_get_uneb_table(mld->trans, &mld->fwrt);
iwl_bios_get_phy_filters(&mld->fwrt);
}
@ -72,16 +73,36 @@ static int iwl_mld_geo_sar_init(struct iwl_mld *mld)
{
u32 cmd_id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD);
/* Only set to South Korea if the table revision is 1 */
__le32 sk = cpu_to_le32(mld->fwrt.geo_rev == 1 ? 1 : 0);
u8 sk = mld->fwrt.geo_rev == 1 ? 1 : 0;
union iwl_geo_tx_power_profiles_cmd cmd = {
.v5.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES),
.v5.table_revision = sk,
};
u32 cmd_ver = iwl_fw_lookup_cmd_ver(mld->fw, cmd_id, 0);
int n_subbands;
int cmd_size;
int ret;
ret = iwl_sar_geo_fill_table(&mld->fwrt, &cmd.v5.table[0][0],
ARRAY_SIZE(cmd.v5.table[0]),
BIOS_GEO_MAX_PROFILE_NUM);
switch (cmd_ver) {
case 5:
n_subbands = ARRAY_SIZE(cmd.v5.table[0]);
cmd.v5.table_revision = cpu_to_le32(sk);
cmd_size = sizeof(cmd.v5);
break;
case 6:
n_subbands = ARRAY_SIZE(cmd.v6.table[0]);
cmd.v6.bios_hdr.table_revision = mld->fwrt.geo_rev;
cmd.v6.bios_hdr.table_source = mld->fwrt.geo_bios_source;
cmd_size = sizeof(cmd.v6);
break;
default:
WARN(false, "unsupported version: %d", cmd_ver);
return -EINVAL;
}
BUILD_BUG_ON(offsetof(typeof(cmd), v6.table) !=
offsetof(typeof(cmd), v5.table));
ret = iwl_sar_geo_fill_table(&mld->fwrt, &cmd.v6.table[0][0],
n_subbands, BIOS_GEO_MAX_PROFILE_NUM);
/* It is a valid scenario to not support SAR, or miss wgds table,
* but in that case there is no need to send the command.
@ -89,28 +110,48 @@ static int iwl_mld_geo_sar_init(struct iwl_mld *mld)
if (ret)
return 0;
return iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd, sizeof(cmd.v5));
return iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd, cmd_size);
}
int iwl_mld_config_sar_profile(struct iwl_mld *mld, int prof_a, int prof_b)
{
u32 cmd_id = REDUCE_TX_POWER_CMD;
struct iwl_dev_tx_power_cmd cmd = {
.common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
.v10.flags = cpu_to_le32(mld->fwrt.reduced_power_flags),
};
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mld->fw, REDUCE_TX_POWER_CMD, 10);
int num_subbands;
int cmd_size;
int ret;
switch (cmd_ver) {
case 10:
cmd.v10.flags = cpu_to_le32(mld->fwrt.reduced_power_flags);
cmd_size = sizeof(cmd.common) + sizeof(cmd.v10);
num_subbands = IWL_NUM_SUB_BANDS_V2;
break;
case 11:
cmd.v11.flags = cpu_to_le32(mld->fwrt.reduced_power_flags);
cmd_size = sizeof(cmd.common) + sizeof(cmd.v11);
num_subbands = IWL_NUM_SUB_BANDS_V3;
break;
default:
WARN_ONCE(1, "Bad version for REDUCE_TX_POWER_CMD: %d\n",
cmd_ver);
return -EOPNOTSUPP;
}
/* TODO: CDB - support IWL_NUM_CHAIN_TABLES_V2 */
ret = iwl_sar_fill_profile(&mld->fwrt, &cmd.v10.per_chain[0][0][0],
IWL_NUM_CHAIN_TABLES, IWL_NUM_SUB_BANDS_V2,
/* v10 and v11 have the same position for per_chain */
BUILD_BUG_ON(offsetof(typeof(cmd), v11.per_chain) !=
offsetof(typeof(cmd), v10.per_chain));
ret = iwl_sar_fill_profile(&mld->fwrt, &cmd.v11.per_chain[0][0][0],
IWL_NUM_CHAIN_TABLES, num_subbands,
prof_a, prof_b);
/* return on error or if the profile is disabled (positive number) */
if (ret)
return ret;
return iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd,
sizeof(cmd.common) + sizeof(cmd.v10));
return iwl_mld_send_cmd_pdu(mld, REDUCE_TX_POWER_CMD, &cmd, cmd_size);
}
int iwl_mld_init_sar(struct iwl_mld *mld)
@ -165,30 +206,86 @@ static int iwl_mld_ppag_send_cmd(struct iwl_mld *mld)
{
struct iwl_fw_runtime *fwrt = &mld->fwrt;
union iwl_ppag_table_cmd cmd = {
.v7.ppag_config_info.hdr.table_source = fwrt->ppag_bios_source,
.v7.ppag_config_info.hdr.table_revision = fwrt->ppag_bios_rev,
.v7.ppag_config_info.value = cpu_to_le32(fwrt->ppag_flags),
/* v7 and v8 have the same layout for the ppag_config_info */
.v8.ppag_config_info.hdr.table_source = fwrt->ppag_bios_source,
.v8.ppag_config_info.hdr.table_revision = fwrt->ppag_bios_rev,
.v8.ppag_config_info.value = cpu_to_le32(fwrt->ppag_flags),
};
u32 cmd_id = WIDE_ID(PHY_OPS_GROUP, PER_PLATFORM_ANT_GAIN_CMD);
int cmd_ver = iwl_fw_lookup_cmd_ver(mld->fw, cmd_id, 1);
int cmd_len = sizeof(cmd.v8);
u8 cmd_bios_rev;
int ret;
BUILD_BUG_ON(offsetof(typeof(cmd), v8.ppag_config_info.hdr) !=
offsetof(typeof(cmd), v7.ppag_config_info.hdr));
BUILD_BUG_ON(offsetof(typeof(cmd), v8.gain) !=
offsetof(typeof(cmd), v7.gain));
BUILD_BUG_ON(ARRAY_SIZE(cmd.v7.gain) > ARRAY_SIZE(fwrt->ppag_chains));
BUILD_BUG_ON(ARRAY_SIZE(cmd.v7.gain[0]) >
ARRAY_SIZE(fwrt->ppag_chains[0].subbands));
BUILD_BUG_ON(ARRAY_SIZE(cmd.v8.gain) > ARRAY_SIZE(fwrt->ppag_chains));
BUILD_BUG_ON(ARRAY_SIZE(cmd.v8.gain[0]) >
ARRAY_SIZE(fwrt->ppag_chains[0].subbands));
IWL_DEBUG_RADIO(fwrt,
"PPAG MODE bits going to be sent: %d\n",
fwrt->ppag_flags);
for (int chain = 0; chain < IWL_NUM_CHAIN_LIMITS; chain++) {
for (int subband = 0; subband < IWL_NUM_SUB_BANDS_V2; subband++) {
cmd.v7.gain[chain][subband] =
fwrt->ppag_chains[chain].subbands[subband];
IWL_DEBUG_RADIO(fwrt,
"PPAG table: chain[%d] band[%d]: gain = %d\n",
chain, subband, cmd.v7.gain[chain][subband]);
/* Since ver 7 will be deprecated at some point, don't bother making
* this code generic for both ver 7 and ver 8: duplicate the code.
*/
if (cmd_ver == 7) {
for (int chain = 0; chain < ARRAY_SIZE(cmd.v7.gain); chain++) {
for (int subband = 0;
subband < ARRAY_SIZE(cmd.v7.gain[0]);
subband++) {
cmd.v7.gain[chain][subband] =
fwrt->ppag_chains[chain].subbands[subband];
IWL_DEBUG_RADIO(fwrt,
"PPAG table: chain[%d] band[%d]: gain = %d\n",
chain, subband,
cmd.v7.gain[chain][subband]);
}
}
cmd_len = sizeof(cmd.v7);
cmd_bios_rev =
iwl_fw_lookup_cmd_bios_supported_revision(fwrt->fw,
fwrt->ppag_bios_source,
cmd_id, 4);
} else if (cmd_ver == 8) {
for (int chain = 0; chain < ARRAY_SIZE(cmd.v8.gain); chain++) {
for (int subband = 0;
subband < ARRAY_SIZE(cmd.v8.gain[0]);
subband++) {
cmd.v8.gain[chain][subband] =
fwrt->ppag_chains[chain].subbands[subband];
IWL_DEBUG_RADIO(fwrt,
"PPAG table: chain[%d] band[%d]: gain = %d\n",
chain, subband,
cmd.v8.gain[chain][subband]);
}
}
cmd_bios_rev =
iwl_fw_lookup_cmd_bios_supported_revision(fwrt->fw,
fwrt->ppag_bios_source,
cmd_id, 5);
} else {
WARN(1, "Bad version for PER_PLATFORM_ANT_GAIN_CMD %d\n",
cmd_ver);
return -EINVAL;
}
if (cmd_bios_rev < fwrt->ppag_bios_rev) {
IWL_ERR(mld,
"BIOS revision compatibility check failed - Supported: %d, Current: %d\n",
cmd_bios_rev, fwrt->ppag_bios_rev);
return 0;
}
IWL_DEBUG_RADIO(mld, "Sending PER_PLATFORM_ANT_GAIN_CMD\n");
ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(PHY_OPS_GROUP,
PER_PLATFORM_ANT_GAIN_CMD),
&cmd, sizeof(cmd.v7));
ret = iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd, cmd_len);
if (ret < 0)
IWL_ERR(mld, "failed to send PER_PLATFORM_ANT_GAIN_CMD (%d)\n",
ret);
@ -352,21 +449,42 @@ void iwl_mld_configure_lari(struct iwl_mld *mld)
ret);
}
void iwl_mld_init_uats(struct iwl_mld *mld)
void iwl_mld_init_ap_type_tables(struct iwl_mld *mld)
{
int ret;
struct iwl_host_cmd cmd = {
.id = WIDE_ID(REGULATORY_AND_NVM_GROUP,
MCC_ALLOWED_AP_TYPE_CMD),
.data[0] = &mld->fwrt.uats_table,
.len[0] = sizeof(mld->fwrt.uats_table),
.data[0] = &mld->fwrt.ap_type_cmd,
.len[0] = sizeof(mld->fwrt.ap_type_cmd),
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
};
if (!mld->fwrt.uats_valid)
if (!mld->fwrt.ap_type_cmd_valid)
return;
ret = iwl_mld_send_cmd(mld, &cmd);
if (iwl_fw_lookup_cmd_ver(mld->fw, cmd.id, 1) == 1) {
struct iwl_mcc_allowed_ap_type_cmd_v1 *cmd_v1 =
kzalloc(sizeof(*cmd_v1), GFP_KERNEL);
if (!cmd_v1)
return;
BUILD_BUG_ON(sizeof(mld->fwrt.ap_type_cmd.mcc_to_ap_type_map) !=
sizeof(cmd_v1->mcc_to_ap_type_map));
memcpy(cmd_v1->mcc_to_ap_type_map,
mld->fwrt.ap_type_cmd.mcc_to_ap_type_map,
sizeof(mld->fwrt.ap_type_cmd.mcc_to_ap_type_map));
cmd.data[0] = cmd_v1;
cmd.len[0] = sizeof(*cmd_v1);
ret = iwl_mld_send_cmd(mld, &cmd);
kfree(cmd_v1);
} else {
ret = iwl_mld_send_cmd(mld, &cmd);
}
if (ret)
IWL_ERR(mld, "failed to send MCC_ALLOWED_AP_TYPE_CMD (%d)\n",
ret);

View File

@ -9,7 +9,7 @@
void iwl_mld_get_bios_tables(struct iwl_mld *mld);
void iwl_mld_configure_lari(struct iwl_mld *mld);
void iwl_mld_init_uats(struct iwl_mld *mld);
void iwl_mld_init_ap_type_tables(struct iwl_mld *mld);
void iwl_mld_init_tas(struct iwl_mld *mld);
int iwl_mld_init_ppag(struct iwl_mld *mld);

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2024-2025 Intel Corporation
* Copyright (C) 2024-2026 Intel Corporation
*/
#include <net/mac80211.h>
@ -791,6 +791,9 @@ static void *
iwl_mld_radiotap_put_tlv(struct sk_buff *skb, u16 type, u16 len)
{
struct ieee80211_radiotap_tlv *tlv;
struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
rx_status->flag |= RX_FLAG_RADIOTAP_TLV_AT_END;
tlv = skb_put(skb, sizeof(*tlv));
tlv->type = cpu_to_le16(type);
@ -1234,8 +1237,6 @@ static void iwl_mld_rx_eht(struct iwl_mld *mld, struct sk_buff *skb,
eht = iwl_mld_radiotap_put_tlv(skb, IEEE80211_RADIOTAP_EHT, eht_len);
rx_status->flag |= RX_FLAG_RADIOTAP_TLV_AT_END;
switch (u32_get_bits(rate_n_flags, RATE_MCS_HE_GI_LTF_MSK)) {
case 0:
if (he_type == RATE_MCS_HE_TYPE_TRIG) {
@ -1329,7 +1330,6 @@ static void iwl_mld_rx_eht(struct iwl_mld *mld, struct sk_buff *skb,
static void iwl_mld_add_rtap_sniffer_config(struct iwl_mld *mld,
struct sk_buff *skb)
{
struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_radiotap_vendor_content *radiotap;
const u16 vendor_data_len = sizeof(mld->monitor.cur_aid);
@ -1353,8 +1353,6 @@ static void iwl_mld_add_rtap_sniffer_config(struct iwl_mld *mld,
/* fill the data now */
memcpy(radiotap->data, &mld->monitor.cur_aid,
sizeof(mld->monitor.cur_aid));
rx_status->flag |= RX_FLAG_RADIOTAP_TLV_AT_END;
}
#endif
@ -1362,7 +1360,6 @@ static void iwl_mld_add_rtap_sniffer_phy_data(struct iwl_mld *mld,
struct sk_buff *skb,
struct iwl_rx_phy_air_sniffer_ntfy *ntfy)
{
struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_radiotap_vendor_content *radiotap;
const u16 vendor_data_len = sizeof(*ntfy);
@ -1382,8 +1379,6 @@ static void iwl_mld_add_rtap_sniffer_phy_data(struct iwl_mld *mld,
/* fill the data now */
memcpy(radiotap->data, ntfy, vendor_data_len);
rx_status->flag |= RX_FLAG_RADIOTAP_TLV_AT_END;
}
static void
@ -1407,6 +1402,7 @@ static void iwl_mld_set_rx_rate(struct iwl_mld *mld,
u32 rate_n_flags = phy_data->rate_n_flags;
u8 stbc = u32_get_bits(rate_n_flags, RATE_MCS_STBC_MSK);
u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
u32 he_type = u32_get_bits(rate_n_flags, RATE_MCS_HE_TYPE_MSK);
bool is_sgi = rate_n_flags & RATE_MCS_SGI_MSK;
/* bandwidth may be overridden to RU by PHY ntfy */
@ -1481,6 +1477,12 @@ static void iwl_mld_set_rx_rate(struct iwl_mld *mld,
rx_status->encoding = RX_ENC_EHT;
iwl_mld_set_rx_nonlegacy_rate_info(rate_n_flags, rx_status);
break;
case RATE_MCS_MOD_TYPE_UHR:
rx_status->encoding = RX_ENC_UHR;
iwl_mld_set_rx_nonlegacy_rate_info(rate_n_flags, rx_status);
if (he_type == RATE_MCS_HE_TYPE_UHR_ELR)
rx_status->uhr.elr = 1;
break;
default:
WARN_ON_ONCE(1);
}
@ -2204,8 +2206,9 @@ void iwl_mld_sync_rx_queues(struct iwl_mld *mld,
ret = wait_event_timeout(mld->rxq_sync.waitq,
READ_ONCE(mld->rxq_sync.state) == 0,
SYNC_RX_QUEUE_TIMEOUT);
WARN_ONCE(!ret, "RXQ sync failed: state=0x%lx, cookie=%d\n",
mld->rxq_sync.state, mld->rxq_sync.cookie);
IWL_FW_CHECK(mld, !ret,
"RXQ sync failed: state=0x%lx, cookie=%d\n",
mld->rxq_sync.state, mld->rxq_sync.cookie);
out:
mld->rxq_sync.state = 0;

View File

@ -47,8 +47,6 @@
/* adaptive dwell number of APs override mask for social channels */
#define IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS_BIT BIT(21)
#define SCAN_TIMEOUT_MSEC (30000 * HZ)
/* minimal number of 2GHz and 5GHz channels in the regular scan request */
#define IWL_MLD_6GHZ_PASSIVE_SCAN_MIN_CHANS 4
@ -116,6 +114,13 @@ struct iwl_mld_scan_params {
u8 bssid[ETH_ALEN] __aligned(2);
};
struct iwl_scan_req_params_ptrs {
struct iwl_scan_general_params_v11 *general_params;
struct iwl_scan_channel_params_v8 *channel_params;
struct iwl_scan_periodic_parms_v1 *periodic_params;
struct iwl_scan_probe_params_v4 *probe_params;
};
struct iwl_mld_scan_respect_p2p_go_iter_data {
struct ieee80211_vif *current_vif;
bool p2p_go;
@ -512,9 +517,10 @@ iwl_mld_scan_get_cmd_gen_flags2(struct iwl_mld *mld,
static void
iwl_mld_scan_cmd_set_dwell(struct iwl_mld *mld,
struct iwl_scan_general_params_v11 *gp,
struct iwl_mld_scan_params *params)
struct iwl_mld_scan_params *params,
struct iwl_scan_req_params_ptrs *scan_ptrs)
{
struct iwl_scan_general_params_v11 *gp = scan_ptrs->general_params;
const struct iwl_mld_scan_timing_params *timing =
&scan_timing[params->type];
@ -551,9 +557,10 @@ static void
iwl_mld_scan_cmd_set_gen_params(struct iwl_mld *mld,
struct iwl_mld_scan_params *params,
struct ieee80211_vif *vif,
struct iwl_scan_general_params_v11 *gp,
struct iwl_scan_req_params_ptrs *scan_ptrs,
enum iwl_mld_scan_status scan_status)
{
struct iwl_scan_general_params_v11 *gp = scan_ptrs->general_params;
u16 gen_flags = iwl_mld_scan_get_cmd_gen_flags(mld, params, vif,
scan_status);
u8 gen_flags2 = iwl_mld_scan_get_cmd_gen_flags2(mld, params, vif,
@ -566,7 +573,7 @@ iwl_mld_scan_cmd_set_gen_params(struct iwl_mld *mld,
gp->flags = cpu_to_le16(gen_flags);
gp->flags2 = gen_flags2;
iwl_mld_scan_cmd_set_dwell(mld, gp, params);
iwl_mld_scan_cmd_set_dwell(mld, params, scan_ptrs);
if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1)
gp->num_of_fragments[SCAN_LB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS;
@ -577,9 +584,12 @@ iwl_mld_scan_cmd_set_gen_params(struct iwl_mld *mld,
static int
iwl_mld_scan_cmd_set_sched_params(struct iwl_mld_scan_params *params,
struct iwl_scan_umac_schedule *schedule,
__le16 *delay)
struct iwl_scan_req_params_ptrs *scan_ptrs)
{
struct iwl_scan_umac_schedule *schedule =
scan_ptrs->periodic_params->schedule;
__le16 *delay = &scan_ptrs->periodic_params->delay;
if (WARN_ON(!params->n_scan_plans ||
params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS))
return -EINVAL;
@ -657,11 +667,12 @@ iwl_mld_scan_cmd_build_ssids(struct iwl_mld_scan_params *params,
static void
iwl_mld_scan_fill_6g_chan_list(struct iwl_mld_scan_params *params,
struct iwl_scan_probe_params_v4 *pp)
struct iwl_scan_req_params_ptrs *scan_ptrs)
{
int j, idex_s = 0, idex_b = 0;
struct cfg80211_scan_6ghz_params *scan_6ghz_params =
params->scan_6ghz_params;
struct iwl_scan_probe_params_v4 *pp = scan_ptrs->probe_params;
for (j = 0;
j < params->n_ssids && idex_s < SCAN_SHORT_SSID_MAX_SIZE;
@ -725,13 +736,15 @@ iwl_mld_scan_fill_6g_chan_list(struct iwl_mld_scan_params *params,
static void
iwl_mld_scan_cmd_set_probe_params(struct iwl_mld_scan_params *params,
struct iwl_scan_probe_params_v4 *pp,
struct iwl_scan_req_params_ptrs *scan_ptrs,
u32 *bitmap_ssid)
{
struct iwl_scan_probe_params_v4 *pp = scan_ptrs->probe_params;
pp->preq = params->preq;
if (params->scan_6ghz) {
iwl_mld_scan_fill_6g_chan_list(params, pp);
iwl_mld_scan_fill_6g_chan_list(params, scan_ptrs);
return;
}
@ -821,10 +834,12 @@ static u32 iwl_mld_scan_ch_n_aps_flag(enum nl80211_iftype vif_type, u8 ch_id)
static void
iwl_mld_scan_cmd_set_channels(struct iwl_mld *mld,
struct ieee80211_channel **channels,
struct iwl_scan_channel_params_v7 *cp,
struct iwl_scan_req_params_ptrs *scan_ptrs,
int n_channels, u32 flags,
enum nl80211_iftype vif_type)
{
struct iwl_scan_channel_params_v8 *cp = scan_ptrs->channel_params;
for (int i = 0; i < n_channels; i++) {
enum nl80211_band band = channels[i]->band;
struct iwl_scan_channel_cfg_umac *cfg = &cp->channel_config[i];
@ -862,10 +877,11 @@ static u8
iwl_mld_scan_cfg_channels_6g(struct iwl_mld *mld,
struct iwl_mld_scan_params *params,
u32 n_channels,
struct iwl_scan_probe_params_v4 *pp,
struct iwl_scan_channel_params_v7 *cp,
struct iwl_scan_req_params_ptrs *scan_ptrs,
enum nl80211_iftype vif_type)
{
struct iwl_scan_probe_params_v4 *pp = scan_ptrs->probe_params;
struct iwl_scan_channel_params_v8 *cp = scan_ptrs->channel_params;
struct cfg80211_scan_6ghz_params *scan_6ghz_params =
params->scan_6ghz_params;
u32 i;
@ -1063,25 +1079,23 @@ static int
iwl_mld_scan_cmd_set_6ghz_chan_params(struct iwl_mld *mld,
struct iwl_mld_scan_params *params,
struct ieee80211_vif *vif,
struct iwl_scan_req_params_v17 *scan_p)
struct iwl_scan_req_params_ptrs *scan_ptrs)
{
struct iwl_scan_channel_params_v7 *chan_p = &scan_p->channel_params;
struct iwl_scan_probe_params_v4 *probe_p = &scan_p->probe_params;
struct iwl_scan_channel_params_v8 *cp = scan_ptrs->channel_params;
/* Explicitly clear the flags since most of them are not
* relevant for 6 GHz scan.
*/
chan_p->flags = 0;
chan_p->count = iwl_mld_scan_cfg_channels_6g(mld, params,
params->n_channels,
probe_p, chan_p,
vif->type);
if (!chan_p->count)
cp->flags = 0;
cp->count = iwl_mld_scan_cfg_channels_6g(mld, params,
params->n_channels,
scan_ptrs, vif->type);
if (!cp->count)
return -EINVAL;
if (!params->n_ssids ||
(params->n_ssids == 1 && !params->ssids[0].ssid_len))
chan_p->flags |= IWL_SCAN_CHANNEL_FLAG_6G_PSC_NO_FILTER;
cp->flags |= IWL_SCAN_CHANNEL_FLAG_6G_PSC_NO_FILTER;
return 0;
}
@ -1090,12 +1104,12 @@ static int
iwl_mld_scan_cmd_set_chan_params(struct iwl_mld *mld,
struct iwl_mld_scan_params *params,
struct ieee80211_vif *vif,
struct iwl_scan_req_params_v17 *scan_p,
struct iwl_scan_req_params_ptrs *scan_ptrs,
bool low_latency,
enum iwl_mld_scan_status scan_status,
u32 channel_cfg_flags)
{
struct iwl_scan_channel_params_v7 *cp = &scan_p->channel_params;
struct iwl_scan_channel_params_v8 *cp = scan_ptrs->channel_params;
struct ieee80211_supported_band *sband =
&mld->nvm_data->bands[NL80211_BAND_6GHZ];
@ -1107,14 +1121,14 @@ iwl_mld_scan_cmd_set_chan_params(struct iwl_mld *mld,
if (params->scan_6ghz)
return iwl_mld_scan_cmd_set_6ghz_chan_params(mld, params,
vif, scan_p);
vif, scan_ptrs);
/* relevant only for 2.4 GHz/5 GHz scan */
cp->flags = iwl_mld_scan_cmd_set_chan_flags(mld, params, vif,
low_latency);
cp->count = params->n_channels;
iwl_mld_scan_cmd_set_channels(mld, params->channels, cp,
iwl_mld_scan_cmd_set_channels(mld, params->channels, scan_ptrs,
params->n_channels, channel_cfg_flags,
vif->type);
@ -1144,47 +1158,146 @@ iwl_mld_scan_cmd_set_chan_params(struct iwl_mld *mld,
return 0;
}
struct iwl_scan_umac_handler {
u8 version;
int (*handler)(struct iwl_mld *mld, struct ieee80211_vif *vif,
struct iwl_mld_scan_params *params,
enum iwl_mld_scan_status scan_status,
int uid, u32 ooc_priority, bool low_latency);
};
#define IWL_SCAN_UMAC_HANDLER(_ver) { \
.version = _ver, \
.handler = iwl_mld_scan_umac_v##_ver, \
}
static int iwl_mld_scan_umac_common(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct iwl_mld_scan_params *params,
struct iwl_scan_req_params_ptrs *scan_ptrs,
enum iwl_mld_scan_status scan_status,
bool low_latency)
{
u32 bitmap_ssid = 0;
int ret;
iwl_mld_scan_cmd_set_gen_params(mld, params, vif, scan_ptrs,
scan_status);
ret = iwl_mld_scan_cmd_set_sched_params(params, scan_ptrs);
if (ret)
return ret;
iwl_mld_scan_cmd_set_probe_params(params, scan_ptrs, &bitmap_ssid);
return iwl_mld_scan_cmd_set_chan_params(mld, params, vif, scan_ptrs,
low_latency, scan_status,
bitmap_ssid);
}
static int iwl_mld_scan_umac_v18(struct iwl_mld *mld, struct ieee80211_vif *vif,
struct iwl_mld_scan_params *params,
enum iwl_mld_scan_status scan_status,
int uid, u32 ooc_priority, bool low_latency)
{
struct iwl_scan_req_umac_v18 *cmd = mld->scan.cmd;
struct iwl_scan_req_params_ptrs scan_ptrs = {
.general_params = &cmd->scan_params.general_params,
.probe_params = &cmd->scan_params.probe_params,
.channel_params = &cmd->scan_params.channel_params,
.periodic_params = &cmd->scan_params.periodic_params
};
int ret;
if (WARN_ON(params->n_channels > SCAN_MAX_NUM_CHANS_V4))
return -EINVAL;
cmd->uid = cpu_to_le32(uid);
cmd->ooc_priority = cpu_to_le32(ooc_priority);
ret = iwl_mld_scan_umac_common(mld, vif, params, &scan_ptrs,
scan_status, low_latency);
if (ret)
return ret;
return uid;
}
static int iwl_mld_scan_umac_v17(struct iwl_mld *mld, struct ieee80211_vif *vif,
struct iwl_mld_scan_params *params,
enum iwl_mld_scan_status scan_status,
int uid, u32 ooc_priority, bool low_latency)
{
struct iwl_scan_req_umac_v17 *cmd = mld->scan.cmd;
struct iwl_scan_req_params_ptrs scan_ptrs = {
.general_params = &cmd->scan_params.general_params,
.probe_params = &cmd->scan_params.probe_params,
/* struct iwl_scan_channel_params_v8 and struct
* iwl_scan_channel_params_v7 are almost identical. The only
* difference is that the newer version allows configuration of
* more channels. So casting here is ok as long as we ensure
* that we don't exceed the max number of channels supported by
* the older version (see the WARN_ON below).
*/
.channel_params = (struct iwl_scan_channel_params_v8 *)
&cmd->scan_params.channel_params,
.periodic_params = &cmd->scan_params.periodic_params
};
int ret;
if (WARN_ON(params->n_channels > SCAN_MAX_NUM_CHANS_V3))
return -EINVAL;
cmd->uid = cpu_to_le32(uid);
cmd->ooc_priority = cpu_to_le32(ooc_priority);
ret = iwl_mld_scan_umac_common(mld, vif, params, &scan_ptrs,
scan_status, low_latency);
if (ret)
return ret;
return uid;
}
static const struct iwl_scan_umac_handler iwl_scan_umac_handlers[] = {
/* set the newest version first to shorten the list traverse time */
IWL_SCAN_UMAC_HANDLER(18),
IWL_SCAN_UMAC_HANDLER(17),
};
static int
iwl_mld_scan_build_cmd(struct iwl_mld *mld, struct ieee80211_vif *vif,
struct iwl_mld_scan_params *params,
enum iwl_mld_scan_status scan_status,
bool low_latency)
{
struct iwl_scan_req_umac_v17 *cmd = mld->scan.cmd;
struct iwl_scan_req_params_v17 *scan_p = &cmd->scan_params;
u32 bitmap_ssid = 0;
int uid, ret;
int uid, err;
u32 ooc_priority;
memset(mld->scan.cmd, 0, mld->scan.cmd_size);
/* find a free UID entry */
uid = iwl_mld_scan_uid_by_status(mld, IWL_MLD_SCAN_NONE);
if (uid < 0)
return uid;
cmd->uid = cpu_to_le32(uid);
cmd->ooc_priority =
cpu_to_le32(iwl_mld_scan_ooc_priority(scan_status));
ooc_priority = iwl_mld_scan_ooc_priority(scan_status);
iwl_mld_scan_cmd_set_gen_params(mld, params, vif,
&scan_p->general_params, scan_status);
for (size_t i = 0; i < ARRAY_SIZE(iwl_scan_umac_handlers); i++) {
const struct iwl_scan_umac_handler *ver_handler =
&iwl_scan_umac_handlers[i];
ret = iwl_mld_scan_cmd_set_sched_params(params,
scan_p->periodic_params.schedule,
&scan_p->periodic_params.delay);
if (ret)
return ret;
if (ver_handler->version != mld->scan.cmd_ver)
continue;
iwl_mld_scan_cmd_set_probe_params(params, &scan_p->probe_params,
&bitmap_ssid);
err = ver_handler->handler(mld, vif, params, scan_status,
uid, ooc_priority, low_latency);
return err ? : uid;
}
ret = iwl_mld_scan_cmd_set_chan_params(mld, params, vif, scan_p,
low_latency, scan_status,
bitmap_ssid);
if (ret)
return ret;
IWL_ERR(mld, "No handler for UMAC scan cmd version %d\n",
mld->scan.cmd_ver);
return uid;
return -EINVAL;
}
static bool
@ -1942,9 +2055,7 @@ void iwl_mld_handle_scan_complete_notif(struct iwl_mld *mld,
struct ieee80211_bss_conf *link_conf = NULL;
if (fw_link_id != IWL_MLD_INVALID_FW_ID)
link_conf =
wiphy_dereference(mld->wiphy,
mld->fw_id_to_bss_conf[fw_link_id]);
link_conf = iwl_mld_fw_id_to_link_conf(mld, fw_link_id);
/* It is possible that by the time the scan is complete the
* link was already removed and is not valid.
@ -2031,6 +2142,8 @@ int iwl_mld_alloc_scan_cmd(struct iwl_mld *mld)
if (scan_cmd_ver == 17) {
scan_cmd_size = sizeof(struct iwl_scan_req_umac_v17);
} else if (scan_cmd_ver == 18) {
scan_cmd_size = sizeof(struct iwl_scan_req_umac_v18);
} else {
IWL_ERR(mld, "Unexpected scan cmd version %d\n", scan_cmd_ver);
return -EINVAL;
@ -2041,6 +2154,7 @@ int iwl_mld_alloc_scan_cmd(struct iwl_mld *mld)
return -ENOMEM;
mld->scan.cmd_size = scan_cmd_size;
mld->scan.cmd_ver = scan_cmd_ver;
return 0;
}

View File

@ -109,6 +109,7 @@ enum iwl_mld_traffic_load {
* @traffic_load.status: The current traffic load status, see
* &enum iwl_mld_traffic_load
* @cmd_size: size of %cmd.
* @cmd_ver: version of the scan command format.
* @cmd: pointer to scan cmd buffer (allocated once in op mode start).
* @last_6ghz_passive_jiffies: stores the last 6GHz passive scan time
* in jiffies.
@ -134,6 +135,7 @@ struct iwl_mld_scan {
/* And here fields that survive a fw restart */
size_t cmd_size;
void *cmd;
u8 cmd_ver;
unsigned long last_6ghz_passive_jiffies;
unsigned long last_start_time_jiffies;
u64 last_mlo_scan_time;

View File

@ -398,12 +398,42 @@ static u32 iwl_mld_get_htc_flags(struct ieee80211_link_sta *link_sta)
return htc_flags;
}
/* Note: modifies the command depending on FW command version */
static int iwl_mld_send_sta_cmd(struct iwl_mld *mld,
const struct iwl_sta_cfg_cmd *cmd)
struct iwl_sta_cfg_cmd *cmd)
{
int ret = iwl_mld_send_cmd_pdu(mld,
WIDE_ID(MAC_CONF_GROUP, STA_CONFIG_CMD),
cmd);
int cmd_id = WIDE_ID(MAC_CONF_GROUP, STA_CONFIG_CMD);
int cmd_ver = iwl_fw_lookup_cmd_ver(mld->fw, cmd_id, 0);
int len = sizeof(*cmd);
int ret;
if (cmd_ver < 2) {
IWL_ERR(mld, "Unsupported STA_CONFIG_CMD version %d\n",
cmd_ver);
return -EINVAL;
} else if (cmd_ver == 2) {
struct iwl_sta_cfg_cmd_v2 *cmd_v2 = (void *)cmd;
if (WARN_ON(cmd->station_type == cpu_to_le32(STATION_TYPE_NAN_PEER_NMI) ||
cmd->station_type == cpu_to_le32(STATION_TYPE_NAN_PEER_NDI) ||
hweight32(le32_to_cpu(cmd->link_mask)) != 1))
return -EINVAL;
/*
* These fields are located in a different place in the struct of v2.
* The assumption is that UHR won't be used with FW that has v2.
*/
if (WARN_ON(cmd->mic_prep_pad_delay || cmd->mic_compute_pad_delay))
return -EINVAL;
len = sizeof(struct iwl_sta_cfg_cmd_v2);
cmd_v2->link_id = cpu_to_le32(__ffs(le32_to_cpu(cmd->link_mask)));
} else if (WARN_ON(cmd->station_type != cpu_to_le32(STATION_TYPE_NAN_PEER_NMI) &&
cmd->station_type != cpu_to_le32(STATION_TYPE_NAN_PEER_NDI) &&
hweight32(le32_to_cpu(cmd->link_mask)) != 1)) {
return -EINVAL;
}
ret = iwl_mld_send_cmd_pdu(mld, cmd_id, cmd, len);
if (ret)
IWL_ERR(mld, "STA_CONFIG_CMD send failed, ret=0x%x\n", ret);
return ret;
@ -431,8 +461,8 @@ iwl_mld_add_modify_sta_cmd(struct iwl_mld *mld,
return -EINVAL;
cmd.sta_id = cpu_to_le32(fw_id);
cmd.link_mask = cpu_to_le32(BIT(mld_link->fw_id));
cmd.station_type = cpu_to_le32(mld_sta->sta_type);
cmd.link_id = cpu_to_le32(mld_link->fw_id);
memcpy(&cmd.peer_mld_address, sta->addr, ETH_ALEN);
memcpy(&cmd.peer_link_address, link_sta->addr, ETH_ALEN);
@ -498,7 +528,7 @@ iwl_mld_add_modify_sta_cmd(struct iwl_mld *mld,
return iwl_mld_send_sta_cmd(mld, &cmd);
}
IWL_MLD_ALLOC_FN(link_sta, link_sta)
static IWL_MLD_ALLOC_FN(link_sta, link_sta)
static int
iwl_mld_add_link_sta(struct iwl_mld *mld, struct ieee80211_link_sta *link_sta)
@ -725,14 +755,14 @@ iwl_mld_init_sta(struct iwl_mld *mld, struct ieee80211_sta *sta,
}
int iwl_mld_add_sta(struct iwl_mld *mld, struct ieee80211_sta *sta,
struct ieee80211_vif *vif, enum iwl_fw_sta_type type)
struct ieee80211_vif *vif)
{
struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
struct ieee80211_link_sta *link_sta;
int link_id;
int ret;
ret = iwl_mld_init_sta(mld, sta, vif, type);
ret = iwl_mld_init_sta(mld, sta, vif, STATION_TYPE_PEER);
if (ret)
return ret;
@ -908,7 +938,7 @@ static void iwl_mld_count_mpdu(struct ieee80211_link_sta *link_sta, int queue,
if (!(mld_vif->emlsr.blocked_reasons & IWL_MLD_EMLSR_BLOCKED_TPT))
goto unlock;
for (int i = 0; i <= IWL_FW_MAX_LINK_ID; i++)
for (int i = 0; i < IWL_FW_MAX_LINKS; i++)
total_mpdus += tx ? queue_counter->per_link[i].tx :
queue_counter->per_link[i].rx;
@ -982,7 +1012,7 @@ iwl_mld_add_internal_sta_to_fw(struct iwl_mld *mld,
return iwl_mld_send_aux_sta_cmd(mld, internal_sta);
cmd.sta_id = cpu_to_le32((u8)internal_sta->sta_id);
cmd.link_id = cpu_to_le32(fw_link_id);
cmd.link_mask = cpu_to_le32(BIT(fw_link_id));
cmd.station_type = cpu_to_le32(internal_sta->sta_type);
/* FW doesn't allow to add a IGTK/BIGTK if the sta isn't marked as MFP.

View File

@ -89,7 +89,7 @@ struct iwl_mld_per_link_mpdu_counter {
*/
struct iwl_mld_per_q_mpdu_counter {
spinlock_t lock;
struct iwl_mld_per_link_mpdu_counter per_link[IWL_FW_MAX_LINK_ID + 1];
struct iwl_mld_per_link_mpdu_counter per_link[IWL_FW_MAX_LINKS];
unsigned long window_start_time;
} ____cacheline_aligned_in_smp;
@ -190,7 +190,7 @@ iwl_mld_link_sta_from_mac80211(struct ieee80211_link_sta *link_sta)
}
int iwl_mld_add_sta(struct iwl_mld *mld, struct ieee80211_sta *sta,
struct ieee80211_vif *vif, enum iwl_fw_sta_type type);
struct ieee80211_vif *vif);
void iwl_mld_remove_sta(struct iwl_mld *mld, struct ieee80211_sta *sta);
int iwl_mld_fw_sta_id_from_link_sta(struct iwl_mld *mld,
struct ieee80211_link_sta *link_sta);

View File

@ -369,15 +369,39 @@ static void iwl_mld_stats_recalc_traffic_load(struct iwl_mld *mld,
static void iwl_mld_update_link_sig(struct ieee80211_vif *vif, int sig,
struct ieee80211_bss_conf *bss_conf)
{
struct iwl_mld_link *link = iwl_mld_link_from_mac80211(bss_conf);
struct iwl_mld *mld = iwl_mld_vif_from_mac80211(vif)->mld;
int exit_emlsr_thresh;
int last_event;
if (sig == 0) {
IWL_DEBUG_RX(mld, "RSSI is 0 - skip signal based decision\n");
return;
}
/* TODO: task=statistics handle CQM notifications */
if (WARN_ON(!link))
return;
/* CQM Notification */
if (vif->driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI) {
int thold = bss_conf->cqm_rssi_thold;
int hyst = bss_conf->cqm_rssi_hyst;
last_event = link->last_cqm_rssi_event;
if (thold && sig < thold &&
(last_event == 0 || sig < last_event - hyst)) {
link->last_cqm_rssi_event = sig;
ieee80211_cqm_rssi_notify(vif,
NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
sig, GFP_KERNEL);
} else if (sig > thold &&
(last_event == 0 || sig > last_event + hyst)) {
link->last_cqm_rssi_event = sig;
ieee80211_cqm_rssi_notify(vif,
NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
sig, GFP_KERNEL);
}
}
if (!iwl_mld_emlsr_active(vif)) {
/* We're not in EMLSR and our signal is bad,
@ -407,14 +431,13 @@ iwl_mld_process_per_link_stats(struct iwl_mld *mld,
u32 total_airtime_usec = 0;
for (u32 fw_id = 0;
fw_id < ARRAY_SIZE(mld->fw_id_to_bss_conf);
fw_id < mld->fw->ucode_capa.num_links;
fw_id++) {
const struct iwl_stats_ntfy_per_link *link_stats;
struct ieee80211_bss_conf *bss_conf;
int sig;
bss_conf = wiphy_dereference(mld->wiphy,
mld->fw_id_to_bss_conf[fw_id]);
bss_conf = iwl_mld_fw_id_to_link_conf(mld, fw_id);
if (!bss_conf || bss_conf->vif->type != NL80211_IFTYPE_STATION)
continue;

View File

@ -42,7 +42,7 @@ int iwlmld_kunit_test_init(struct kunit *test)
iwl_construct_mld(mld, trans, cfg, fw, hw, NULL);
fw->ucode_capa.num_stations = IWL_STATION_COUNT_MAX;
fw->ucode_capa.num_links = IWL_FW_MAX_LINK_ID + 1;
fw->ucode_capa.num_links = IWL_FW_MAX_LINKS;
mld->fwrt.trans = trans;
mld->fwrt.fw = fw;
@ -68,7 +68,7 @@ int iwlmld_kunit_test_init(struct kunit *test)
return 0;
}
IWL_MLD_ALLOC_FN(link, bss_conf)
static IWL_MLD_ALLOC_FN(link, bss_conf)
static void iwlmld_kunit_init_link(struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link,
@ -94,7 +94,7 @@ static void iwlmld_kunit_init_link(struct ieee80211_vif *vif,
rcu_assign_pointer(vif->link_conf[link_id], link);
}
IWL_MLD_ALLOC_FN(vif, vif)
static IWL_MLD_ALLOC_FN(vif, vif)
/* Helper function to add and initialize a VIF for KUnit tests */
struct ieee80211_vif *iwlmld_kunit_add_vif(bool mlo, enum nl80211_iftype type)
@ -199,7 +199,7 @@ void iwlmld_kunit_assign_chanctx_to_link(struct ieee80211_vif *vif,
vif->active_links |= BIT(link->link_id);
}
IWL_MLD_ALLOC_FN(link_sta, link_sta)
static IWL_MLD_ALLOC_FN(link_sta, link_sta)
static void iwlmld_kunit_add_link_sta(struct ieee80211_sta *sta,
struct ieee80211_link_sta *link_sta,

View File

@ -9,6 +9,7 @@
#include "hcmd.h"
#include "sta.h"
#include "phy.h"
#include "iface.h"
#include "fw/api/rs.h"
#include "fw/api/context.h"
@ -36,7 +37,8 @@ iwl_mld_get_tlc_cmd_flags(struct iwl_mld *mld,
struct ieee80211_vif *vif,
struct ieee80211_link_sta *link_sta,
const struct ieee80211_sta_he_cap *own_he_cap,
const struct ieee80211_sta_eht_cap *own_eht_cap)
const struct ieee80211_sta_eht_cap *own_eht_cap,
const struct ieee80211_sta_uhr_cap *own_uhr_cap)
{
struct ieee80211_sta_ht_cap *ht_cap = &link_sta->ht_cap;
struct ieee80211_sta_vht_cap *vht_cap = &link_sta->vht_cap;
@ -90,6 +92,12 @@ iwl_mld_get_tlc_cmd_flags(struct iwl_mld *mld,
flags |= IWL_TLC_MNG_CFG_FLAGS_EHT_EXTRA_LTF_MSK;
}
if (link_sta->uhr_cap.has_uhr && own_uhr_cap &&
link_sta->uhr_cap.phy.cap & IEEE80211_UHR_PHY_CAP_ELR_RX &&
own_uhr_cap->phy.cap & IEEE80211_UHR_PHY_CAP_ELR_TX)
flags |= IWL_TLC_MNG_CFG_FLAGS_UHR_ELR_1_5_MBPS_MSK |
IWL_TLC_MNG_CFG_FLAGS_UHR_ELR_3_MBPS_MSK;
return cpu_to_le16(flags);
}
@ -406,6 +414,7 @@ iwl_mld_fill_supp_rates(struct iwl_mld *mld, struct ieee80211_vif *vif,
struct ieee80211_supported_band *sband,
const struct ieee80211_sta_he_cap *own_he_cap,
const struct ieee80211_sta_eht_cap *own_eht_cap,
const struct ieee80211_sta_uhr_cap *own_uhr_cap,
struct iwl_tlc_config_cmd *cmd)
{
int i;
@ -423,7 +432,16 @@ iwl_mld_fill_supp_rates(struct iwl_mld *mld, struct ieee80211_vif *vif,
cmd->non_ht_rates = cpu_to_le16(non_ht_rates);
cmd->mode = IWL_TLC_MNG_MODE_NON_HT;
if (link_sta->eht_cap.has_eht && own_he_cap && own_eht_cap) {
if (link_sta->uhr_cap.has_uhr && own_uhr_cap) {
cmd->mode = IWL_TLC_MNG_MODE_UHR;
/*
* FIXME: spec currently inherits from EHT but has no
* finer MCS bits. Once that's there, need to add them
* to the bitmaps (and maybe copy this to UHR, or so.)
*/
iwl_mld_fill_eht_rates(vif, link_sta, own_he_cap,
own_eht_cap, cmd);
} else if (link_sta->eht_cap.has_eht && own_he_cap && own_eht_cap) {
cmd->mode = IWL_TLC_MNG_MODE_EHT;
iwl_mld_fill_eht_rates(vif, link_sta, own_he_cap,
own_eht_cap, cmd);
@ -513,19 +531,23 @@ static void iwl_mld_send_tlc_cmd(struct iwl_mld *mld,
struct ieee80211_bss_conf *link)
{
struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta);
struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link);
enum nl80211_band band = link->chanreq.oper.chan->band;
struct ieee80211_supported_band *sband = mld->hw->wiphy->bands[band];
const struct ieee80211_sta_he_cap *own_he_cap =
ieee80211_get_he_iftype_cap_vif(sband, vif);
const struct ieee80211_sta_eht_cap *own_eht_cap =
ieee80211_get_eht_iftype_cap_vif(sband, vif);
const struct ieee80211_sta_uhr_cap *own_uhr_cap =
ieee80211_get_uhr_iftype_cap_vif(sband, vif);
struct iwl_tlc_config_cmd cmd = {
/* For AP mode, use 20 MHz until the STA is authorized */
.max_ch_width = mld_sta->sta_state > IEEE80211_STA_ASSOC ?
iwl_mld_fw_bw_from_sta_bw(link_sta) :
IWL_TLC_MNG_CH_WIDTH_20MHZ,
.flags = iwl_mld_get_tlc_cmd_flags(mld, vif, link_sta,
own_he_cap, own_eht_cap),
own_he_cap, own_eht_cap,
own_uhr_cap),
.chains = iwl_mld_get_fw_chains(mld),
.sgi_ch_width_supp = iwl_mld_get_fw_sgi(link_sta),
.max_mpdu_len = cpu_to_le16(link_sta->agg.max_amsdu_len),
@ -546,7 +568,10 @@ static void iwl_mld_send_tlc_cmd(struct iwl_mld *mld,
cmd.sta_mask = cpu_to_le32(BIT(fw_sta_id));
chan_ctx = rcu_dereference_wiphy(mld->wiphy, link->chanctx_conf);
if (WARN_ON_ONCE(!mld_link))
return;
chan_ctx = rcu_dereference_wiphy(mld->wiphy, mld_link->chan_ctx);
if (WARN_ON(!chan_ctx))
return;
@ -555,7 +580,7 @@ static void iwl_mld_send_tlc_cmd(struct iwl_mld *mld,
iwl_mld_fill_supp_rates(mld, vif, link_sta, sband,
own_he_cap, own_eht_cap,
&cmd);
own_uhr_cap, &cmd);
if (cmd_ver == 6) {
cmd_ptr = &cmd;
@ -638,6 +663,49 @@ void iwl_mld_config_tlc_link(struct iwl_mld *mld,
iwl_mld_send_tlc_cmd(mld, vif, link_sta, link_conf);
}
void iwl_mld_tlc_update_phy(struct iwl_mld *mld, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf)
{
struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link_conf);
struct ieee80211_chanctx_conf *chan_ctx;
int link_id = link_conf->link_id;
struct ieee80211_sta *sta;
lockdep_assert_wiphy(mld->wiphy);
if (WARN_ON(!mld_link))
return;
chan_ctx = rcu_dereference_wiphy(mld->wiphy, mld_link->chan_ctx);
for_each_station(sta, mld->hw) {
struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
struct iwl_mld_link_sta *mld_link_sta;
struct ieee80211_link_sta *link_sta;
if (mld_sta->vif != vif)
continue;
link_sta = link_sta_dereference_protected(sta, link_id);
if (!link_sta)
continue;
mld_link_sta = iwl_mld_link_sta_dereference_check(mld_sta,
link_id);
/* In recovery flow, the station may not be (yet) in the
* firmware, don't send a TLC command for a station the
* firmware does not know.
*/
if (!mld_link_sta || !mld_link_sta->in_fw)
continue;
if (chan_ctx)
iwl_mld_config_tlc_link(mld, vif, link_conf, link_sta);
/* TODO: else, remove the TLC object in the firmware */
}
}
void iwl_mld_config_tlc(struct iwl_mld *mld, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{

View File

@ -20,4 +20,7 @@ void iwl_mld_handle_tlc_notif(struct iwl_mld *mld,
int iwl_mld_send_tlc_dhc(struct iwl_mld *mld, u8 sta_id, u32 type, u32 data);
void iwl_mld_tlc_update_phy(struct iwl_mld *mld, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf);
#endif /* __iwl_mld_tlc_h__ */

View File

@ -459,23 +459,18 @@ static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm,
static void iwl_mvm_uats_init(struct iwl_mvm *mvm)
{
int cmd_id = WIDE_ID(REGULATORY_AND_NVM_GROUP,
MCC_ALLOWED_AP_TYPE_CMD);
struct iwl_mcc_allowed_ap_type_cmd_v1 cmd = {};
u8 cmd_ver;
int ret;
struct iwl_host_cmd cmd = {
.id = WIDE_ID(REGULATORY_AND_NVM_GROUP,
MCC_ALLOWED_AP_TYPE_CMD),
.flags = 0,
.data[0] = &mvm->fwrt.uats_table,
.len[0] = sizeof(mvm->fwrt.uats_table),
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
};
if (mvm->trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
IWL_DEBUG_RADIO(mvm, "UATS feature is not supported\n");
return;
}
cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id,
cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
IWL_FW_CMD_VER_UNKNOWN);
if (cmd_ver != 1) {
IWL_DEBUG_RADIO(mvm,
@ -486,10 +481,17 @@ static void iwl_mvm_uats_init(struct iwl_mvm *mvm)
iwl_uefi_get_uats_table(mvm->trans, &mvm->fwrt);
if (!mvm->fwrt.uats_valid)
if (!mvm->fwrt.ap_type_cmd_valid)
return;
ret = iwl_mvm_send_cmd(mvm, &cmd);
BUILD_BUG_ON(sizeof(mvm->fwrt.ap_type_cmd.mcc_to_ap_type_map) !=
sizeof(cmd.mcc_to_ap_type_map));
memcpy(cmd.mcc_to_ap_type_map,
mvm->fwrt.ap_type_cmd.mcc_to_ap_type_map,
sizeof(mvm->fwrt.ap_type_cmd.mcc_to_ap_type_map));
ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cmd), &cmd);
if (ret < 0)
IWL_ERR(mvm, "failed to send MCC_ALLOWED_AP_TYPE_CMD (%d)\n",
ret);
@ -906,7 +908,7 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
{
union iwl_geo_tx_power_profiles_cmd geo_tx_cmd;
union iwl_geo_tx_power_profiles_cmd geo_tx_cmd = {};
struct iwl_geo_tx_power_profiles_resp *resp;
u16 len;
int ret;
@ -958,7 +960,7 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
{
u32 cmd_id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD);
union iwl_geo_tx_power_profiles_cmd cmd;
union iwl_geo_tx_power_profiles_cmd cmd = {};
u16 len;
u32 n_bands;
u32 n_profiles;
@ -1032,12 +1034,139 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd);
}
static bool iwl_mvm_ppag_value_valid(struct iwl_fw_runtime *fwrt, int chain,
int subband)
{
s8 ppag_val = fwrt->ppag_chains[chain].subbands[subband];
if ((subband == 0 &&
(ppag_val > IWL_PPAG_MAX_LB || ppag_val < IWL_PPAG_MIN_LB)) ||
(subband != 0 &&
(ppag_val > IWL_PPAG_MAX_HB || ppag_val < IWL_PPAG_MIN_HB))) {
IWL_DEBUG_RADIO(fwrt, "Invalid PPAG value: %d\n", ppag_val);
return false;
}
return true;
}
static int iwl_mvm_fill_ppag_table(struct iwl_fw_runtime *fwrt,
union iwl_ppag_table_cmd *cmd,
int *cmd_size)
{
u8 cmd_ver;
int i, j, num_sub_bands;
s8 *gain;
bool send_ppag_always;
/* many firmware images for JF lie about this */
if (CSR_HW_RFID_TYPE(fwrt->trans->info.hw_rf_id) ==
CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF))
return -EOPNOTSUPP;
if (!fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) {
IWL_DEBUG_RADIO(fwrt,
"PPAG capability not supported by FW, command not sent.\n");
return -EINVAL;
}
cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw,
WIDE_ID(PHY_OPS_GROUP,
PER_PLATFORM_ANT_GAIN_CMD), 1);
/*
* Starting from ver 4, driver needs to send the PPAG CMD regardless
* if PPAG is enabled/disabled or valid/invalid.
*/
send_ppag_always = cmd_ver > 3;
/* Don't send PPAG if it is disabled */
if (!send_ppag_always && !fwrt->ppag_flags) {
IWL_DEBUG_RADIO(fwrt, "PPAG not enabled, command not sent.\n");
return -EINVAL;
}
IWL_DEBUG_RADIO(fwrt, "PPAG cmd ver is %d\n", cmd_ver);
if (cmd_ver == 1) {
num_sub_bands = IWL_NUM_SUB_BANDS_V1;
gain = cmd->v1.gain[0];
*cmd_size = sizeof(cmd->v1);
cmd->v1.flags =
cpu_to_le32(fwrt->ppag_flags & IWL_PPAG_CMD_V1_MASK);
if (fwrt->ppag_bios_rev >= 1) {
/* in this case FW supports revision 0 */
IWL_DEBUG_RADIO(fwrt,
"PPAG table rev is %d, send truncated table\n",
fwrt->ppag_bios_rev);
}
} else if (cmd_ver == 5) {
num_sub_bands = IWL_NUM_SUB_BANDS_V2;
gain = cmd->v5.gain[0];
*cmd_size = sizeof(cmd->v5);
cmd->v5.flags =
cpu_to_le32(fwrt->ppag_flags & IWL_PPAG_CMD_V5_MASK);
if (fwrt->ppag_bios_rev == 0) {
/* in this case FW supports revisions 1,2 or 3 */
IWL_DEBUG_RADIO(fwrt,
"PPAG table rev is 0, send padded table\n");
}
} else if (cmd_ver == 7) {
num_sub_bands = IWL_NUM_SUB_BANDS_V2;
gain = cmd->v7.gain[0];
*cmd_size = sizeof(cmd->v7);
cmd->v7.ppag_config_info.hdr.table_source =
fwrt->ppag_bios_source;
cmd->v7.ppag_config_info.hdr.table_revision =
fwrt->ppag_bios_rev;
cmd->v7.ppag_config_info.value = cpu_to_le32(fwrt->ppag_flags);
} else {
IWL_DEBUG_RADIO(fwrt, "Unsupported PPAG command version\n");
return -EINVAL;
}
/* ppag mode */
IWL_DEBUG_RADIO(fwrt,
"PPAG MODE bits were read from bios: %d\n",
fwrt->ppag_flags);
if (cmd_ver == 1 &&
!fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_PPAG_CHINA_BIOS_SUPPORT)) {
cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK);
IWL_DEBUG_RADIO(fwrt, "masking ppag China bit\n");
} else {
IWL_DEBUG_RADIO(fwrt, "isn't masking ppag China bit\n");
}
/* The 'flags' field is the same in v1 and v5 so we can just
* use v1 to access it.
*/
IWL_DEBUG_RADIO(fwrt,
"PPAG MODE bits going to be sent: %d\n",
(cmd_ver < 7) ? le32_to_cpu(cmd->v1.flags) :
le32_to_cpu(cmd->v7.ppag_config_info.value));
for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
for (j = 0; j < num_sub_bands; j++) {
if (!send_ppag_always &&
!iwl_mvm_ppag_value_valid(fwrt, i, j))
return -EINVAL;
gain[i * num_sub_bands + j] =
fwrt->ppag_chains[i].subbands[j];
IWL_DEBUG_RADIO(fwrt,
"PPAG table: chain[%d] band[%d]: gain = %d\n",
i, j, gain[i * num_sub_bands + j]);
}
}
return 0;
}
int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
{
union iwl_ppag_table_cmd cmd;
int ret, cmd_size;
ret = iwl_fill_ppag_table(&mvm->fwrt, &cmd, &cmd_size);
ret = iwl_mvm_fill_ppag_table(&mvm->fwrt, &cmd, &cmd_size);
/* Not supporting PPAG table is a valid scenario */
if (ret < 0)
return 0;

View File

@ -6229,9 +6229,10 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
ret = wait_event_timeout(mvm->rx_sync_waitq,
READ_ONCE(mvm->queue_sync_state) == 0,
SYNC_RX_QUEUE_TIMEOUT);
WARN_ONCE(!ret, "queue sync: failed to sync, state is 0x%lx, cookie %d\n",
mvm->queue_sync_state,
mvm->queue_sync_cookie);
IWL_FW_CHECK(mvm, !ret,
"queue sync: failed to sync, state is 0x%lx, cookie %d\n",
mvm->queue_sync_state,
mvm->queue_sync_cookie);
}
out:

View File

@ -121,52 +121,6 @@ struct iwl_mvm_sta_key_update_data {
int err;
};
static void iwl_mvm_mld_update_sta_key(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key,
void *_data)
{
u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, SEC_KEY_CMD);
struct iwl_mvm_sta_key_update_data *data = _data;
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_sec_key_cmd cmd = {
.action = cpu_to_le32(FW_CTXT_ACTION_MODIFY),
.u.modify.old_sta_mask = cpu_to_le32(data->old_sta_mask),
.u.modify.new_sta_mask = cpu_to_le32(data->new_sta_mask),
.u.modify.key_id = cpu_to_le32(key->keyidx),
.u.modify.key_flags =
cpu_to_le32(iwl_mvm_get_sec_flags(mvm, vif, sta, key)),
};
int err;
/* only need to do this for pairwise keys (link_id == -1) */
if (sta != data->sta || key->link_id >= 0)
return;
err = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cmd), &cmd);
if (err)
data->err = err;
}
int iwl_mvm_mld_update_sta_keys(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
u32 old_sta_mask,
u32 new_sta_mask)
{
struct iwl_mvm_sta_key_update_data data = {
.sta = sta,
.old_sta_mask = old_sta_mask,
.new_sta_mask = new_sta_mask,
};
ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_mld_update_sta_key,
&data);
return data.err;
}
static int __iwl_mvm_sec_key_del(struct iwl_mvm *mvm, u32 sta_mask,
u32 key_flags, u32 keyidx, u32 flags)
{

View File

@ -6,7 +6,7 @@
static void iwl_mvm_mld_set_he_support(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_mac_config_cmd *cmd,
struct iwl_mac_config_cmd_v3 *cmd,
int cmd_ver)
{
if (vif->type == NL80211_IFTYPE_AP) {
@ -24,7 +24,7 @@ static void iwl_mvm_mld_set_he_support(struct iwl_mvm *mvm,
static void iwl_mvm_mld_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_mac_config_cmd *cmd,
struct iwl_mac_config_cmd_v3 *cmd,
u32 action)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@ -83,7 +83,7 @@ static void iwl_mvm_mld_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
}
static int iwl_mvm_mld_mac_ctxt_send_cmd(struct iwl_mvm *mvm,
struct iwl_mac_config_cmd *cmd)
struct iwl_mac_config_cmd_v3 *cmd)
{
int ret = iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(MAC_CONF_GROUP, MAC_CONFIG_CMD),
@ -98,7 +98,7 @@ static int iwl_mvm_mld_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
u32 action, bool force_assoc_off)
{
struct iwl_mac_config_cmd cmd = {};
struct iwl_mac_config_cmd_v3 cmd = {};
WARN_ON(vif->type != NL80211_IFTYPE_STATION);
@ -151,7 +151,7 @@ static int iwl_mvm_mld_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
u32 action)
{
struct iwl_mac_config_cmd cmd = {};
struct iwl_mac_config_cmd_v3 cmd = {};
WARN_ON(vif->type != NL80211_IFTYPE_MONITOR);
@ -170,7 +170,7 @@ static int iwl_mvm_mld_mac_ctxt_cmd_ibss(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
u32 action)
{
struct iwl_mac_config_cmd cmd = {};
struct iwl_mac_config_cmd_v3 cmd = {};
WARN_ON(vif->type != NL80211_IFTYPE_ADHOC);
@ -187,7 +187,7 @@ static int iwl_mvm_mld_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
u32 action)
{
struct iwl_mac_config_cmd cmd = {};
struct iwl_mac_config_cmd_v3 cmd = {};
WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE);
@ -210,7 +210,7 @@ static int iwl_mvm_mld_mac_ctxt_cmd_ap_go(struct iwl_mvm *mvm,
u32 action)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mac_config_cmd cmd = {};
struct iwl_mac_config_cmd_v3 cmd = {};
WARN_ON(vif->type != NL80211_IFTYPE_AP);
@ -286,7 +286,7 @@ int iwl_mvm_mld_mac_ctxt_changed(struct iwl_mvm *mvm,
int iwl_mvm_mld_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mac_config_cmd cmd = {
struct iwl_mac_config_cmd_v3 cmd = {
.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
.id_and_color = cpu_to_le32(mvmvif->id),
};

View File

@ -886,133 +886,6 @@ static int iwl_mvm_mld_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return iwl_mvm_roc_common(hw, vif, channel, duration, type, &ops);
}
static int
iwl_mvm_mld_change_vif_links(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
u16 old_links, u16 new_links,
struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS])
{
struct iwl_mvm_vif_link_info *new_link[IEEE80211_MLD_MAX_NUM_LINKS] = {};
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
u16 removed = old_links & ~new_links;
u16 added = new_links & ~old_links;
int err, i;
for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {
if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
break;
if (!(added & BIT(i)))
continue;
new_link[i] = kzalloc_obj(*new_link[i]);
if (!new_link[i]) {
err = -ENOMEM;
goto free;
}
new_link[i]->fw_link_id = IWL_MVM_FW_LINK_ID_INVALID;
iwl_mvm_init_link(new_link[i]);
}
mutex_lock(&mvm->mutex);
/* If we're in RESTART flow, the default link wasn't added in
* drv_add_interface(), and link[0] doesn't point to it.
*/
if (old_links == 0 && !test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
&mvm->status)) {
err = iwl_mvm_disable_link(mvm, vif, &vif->bss_conf);
if (err)
goto out_err;
mvmvif->link[0] = NULL;
}
for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {
if (removed & BIT(i)) {
struct ieee80211_bss_conf *link_conf = old[i];
err = iwl_mvm_disable_link(mvm, vif, link_conf);
if (err)
goto out_err;
kfree(mvmvif->link[i]);
mvmvif->link[i] = NULL;
} else if (added & BIT(i)) {
struct ieee80211_bss_conf *link_conf;
link_conf = link_conf_dereference_protected(vif, i);
if (WARN_ON(!link_conf))
continue;
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
&mvm->status))
mvmvif->link[i] = new_link[i];
new_link[i] = NULL;
err = iwl_mvm_add_link(mvm, vif, link_conf);
if (err)
goto out_err;
}
}
err = 0;
if (new_links == 0) {
mvmvif->link[0] = &mvmvif->deflink;
err = iwl_mvm_add_link(mvm, vif, &vif->bss_conf);
}
out_err:
/* we really don't have a good way to roll back here ... */
mutex_unlock(&mvm->mutex);
free:
for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++)
kfree(new_link[i]);
return err;
}
static int
iwl_mvm_mld_change_sta_links(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
u16 old_links, u16 new_links)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
guard(mvm)(mvm);
return iwl_mvm_mld_update_sta_links(mvm, vif, sta, old_links, new_links);
}
static bool iwl_mvm_mld_can_activate_links(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
u16 desired_links)
{
int n_links = hweight16(desired_links);
if (n_links <= 1)
return true;
WARN_ON(1);
return false;
}
static enum ieee80211_neg_ttlm_res
iwl_mvm_mld_can_neg_ttlm(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_neg_ttlm *neg_ttlm)
{
u16 map;
u8 i;
/* Verify all TIDs are mapped to the same links set */
map = neg_ttlm->downlink[0];
for (i = 0; i < IEEE80211_TTLM_NUM_TIDS; i++) {
if (neg_ttlm->downlink[i] != neg_ttlm->uplink[i] ||
neg_ttlm->uplink[i] != map)
return NEG_TTLM_RES_REJECT;
}
return NEG_TTLM_RES_ACCEPT;
}
const struct ieee80211_ops iwl_mvm_mld_hw_ops = {
.tx = iwl_mvm_mac_tx,
.wake_tx_queue = iwl_mvm_mac_wake_tx_queue,
@ -1102,9 +975,4 @@ const struct ieee80211_ops iwl_mvm_mld_hw_ops = {
.link_sta_add_debugfs = iwl_mvm_link_sta_add_debugfs,
#endif
.set_hw_timestamp = iwl_mvm_set_hw_timestamp,
.change_vif_links = iwl_mvm_mld_change_vif_links,
.change_sta_links = iwl_mvm_mld_change_sta_links,
.can_activate_links = iwl_mvm_mld_can_activate_links,
.can_neg_ttlm = iwl_mvm_mld_can_neg_ttlm,
};

View File

@ -20,7 +20,7 @@ u32 iwl_mvm_sta_fw_id_mask(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
}
static int iwl_mvm_mld_send_sta_cmd(struct iwl_mvm *mvm,
struct iwl_sta_cfg_cmd *cmd)
struct iwl_sta_cfg_cmd_v2 *cmd)
{
u32 cmd_id = WIDE_ID(MAC_CONF_GROUP, STA_CONFIG_CMD);
int cmd_len = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 0) > 1 ?
@ -41,7 +41,7 @@ static int iwl_mvm_mld_add_int_sta_to_fw(struct iwl_mvm *mvm,
struct iwl_mvm_int_sta *sta,
const u8 *addr, int link_id)
{
struct iwl_sta_cfg_cmd cmd;
struct iwl_sta_cfg_cmd_v2 cmd;
lockdep_assert_held(&mvm->mutex);
@ -416,7 +416,7 @@ static int iwl_mvm_mld_cfg_sta(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct iwl_mvm_vif *mvm_vif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_vif_link_info *link_info =
mvm_vif->link[link_conf->link_id];
struct iwl_sta_cfg_cmd cmd = {
struct iwl_sta_cfg_cmd_v2 cmd = {
.sta_id = cpu_to_le32(mvm_link_sta->sta_id),
.station_type = cpu_to_le32(mvm_sta->sta_type),
};
@ -913,288 +913,3 @@ void iwl_mvm_mld_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
rcu_read_unlock();
}
static int iwl_mvm_mld_update_sta_queues(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
u32 old_sta_mask,
u32 new_sta_mask)
{
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_scd_queue_cfg_cmd cmd = {
.operation = cpu_to_le32(IWL_SCD_QUEUE_MODIFY),
.u.modify.old_sta_mask = cpu_to_le32(old_sta_mask),
.u.modify.new_sta_mask = cpu_to_le32(new_sta_mask),
};
struct iwl_host_cmd hcmd = {
.id = WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD),
.len[0] = sizeof(cmd),
.data[0] = &cmd
};
int tid;
int ret;
lockdep_assert_held(&mvm->mutex);
for (tid = 0; tid <= IWL_MAX_TID_COUNT; tid++) {
struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[tid];
int txq_id = tid_data->txq_id;
if (txq_id == IWL_MVM_INVALID_QUEUE)
continue;
if (tid == IWL_MAX_TID_COUNT)
cmd.u.modify.tid = cpu_to_le32(IWL_MGMT_TID);
else
cmd.u.modify.tid = cpu_to_le32(tid);
ret = iwl_mvm_send_cmd(mvm, &hcmd);
if (ret)
return ret;
}
return 0;
}
static int iwl_mvm_mld_update_sta_baids(struct iwl_mvm *mvm,
u32 old_sta_mask,
u32 new_sta_mask)
{
struct iwl_rx_baid_cfg_cmd cmd = {
.action = cpu_to_le32(IWL_RX_BAID_ACTION_MODIFY),
.modify.old_sta_id_mask = cpu_to_le32(old_sta_mask),
.modify.new_sta_id_mask = cpu_to_le32(new_sta_mask),
};
u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD);
int baid;
/* mac80211 will remove sessions later, but we ignore all that */
if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
return 0;
BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid));
for (baid = 0; baid < ARRAY_SIZE(mvm->baid_map); baid++) {
struct iwl_mvm_baid_data *data;
int ret;
data = rcu_dereference_protected(mvm->baid_map[baid],
lockdep_is_held(&mvm->mutex));
if (!data)
continue;
if (!(data->sta_mask & old_sta_mask))
continue;
WARN_ONCE(data->sta_mask != old_sta_mask,
"BAID data for %d corrupted - expected 0x%x found 0x%x\n",
baid, old_sta_mask, data->sta_mask);
cmd.modify.tid = cpu_to_le32(data->tid);
ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_SEND_IN_RFKILL,
sizeof(cmd), &cmd);
data->sta_mask = new_sta_mask;
if (ret)
return ret;
}
return 0;
}
static int iwl_mvm_mld_update_sta_resources(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
u32 old_sta_mask,
u32 new_sta_mask)
{
int ret;
ret = iwl_mvm_mld_update_sta_queues(mvm, sta,
old_sta_mask,
new_sta_mask);
if (ret)
return ret;
ret = iwl_mvm_mld_update_sta_keys(mvm, vif, sta,
old_sta_mask,
new_sta_mask);
if (ret)
return ret;
return iwl_mvm_mld_update_sta_baids(mvm, old_sta_mask, new_sta_mask);
}
int iwl_mvm_mld_update_sta_links(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
u16 old_links, u16 new_links)
{
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_vif *mvm_vif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_link_sta *mvm_sta_link;
struct iwl_mvm_vif_link_info *mvm_vif_link;
unsigned long links_to_add = ~old_links & new_links;
unsigned long links_to_rem = old_links & ~new_links;
unsigned long old_links_long = old_links;
u32 current_sta_mask = 0, sta_mask_added = 0, sta_mask_to_rem = 0;
unsigned long link_sta_added_to_fw = 0, link_sta_allocated = 0;
unsigned int link_id;
int ret;
lockdep_assert_wiphy(mvm->hw->wiphy);
lockdep_assert_held(&mvm->mutex);
for_each_set_bit(link_id, &old_links_long,
IEEE80211_MLD_MAX_NUM_LINKS) {
mvm_sta_link =
rcu_dereference_protected(mvm_sta->link[link_id],
lockdep_is_held(&mvm->mutex));
if (WARN_ON(!mvm_sta_link)) {
ret = -EINVAL;
goto err;
}
current_sta_mask |= BIT(mvm_sta_link->sta_id);
if (links_to_rem & BIT(link_id))
sta_mask_to_rem |= BIT(mvm_sta_link->sta_id);
}
if (sta_mask_to_rem) {
ret = iwl_mvm_mld_update_sta_resources(mvm, vif, sta,
current_sta_mask,
current_sta_mask &
~sta_mask_to_rem);
if (WARN_ON(ret))
goto err;
current_sta_mask &= ~sta_mask_to_rem;
}
for_each_set_bit(link_id, &links_to_rem, IEEE80211_MLD_MAX_NUM_LINKS) {
mvm_sta_link =
rcu_dereference_protected(mvm_sta->link[link_id],
lockdep_is_held(&mvm->mutex));
mvm_vif_link = mvm_vif->link[link_id];
if (WARN_ON(!mvm_sta_link || !mvm_vif_link)) {
ret = -EINVAL;
goto err;
}
ret = iwl_mvm_mld_rm_sta_from_fw(mvm, mvm_sta_link->sta_id);
if (WARN_ON(ret))
goto err;
if (vif->type == NL80211_IFTYPE_STATION)
mvm_vif_link->ap_sta_id = IWL_INVALID_STA;
iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_sta_link, link_id);
}
for_each_set_bit(link_id, &links_to_add, IEEE80211_MLD_MAX_NUM_LINKS) {
struct ieee80211_bss_conf *link_conf =
link_conf_dereference_protected(vif, link_id);
struct ieee80211_link_sta *link_sta =
link_sta_dereference_protected(sta, link_id);
mvm_vif_link = mvm_vif->link[link_id];
if (WARN_ON(!mvm_vif_link || !link_conf || !link_sta)) {
ret = -EINVAL;
goto err;
}
if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
struct iwl_mvm_link_sta *mvm_link_sta =
rcu_dereference_protected(mvm_sta->link[link_id],
lockdep_is_held(&mvm->mutex));
u32 sta_id;
if (WARN_ON(!mvm_link_sta)) {
ret = -EINVAL;
goto err;
}
sta_id = mvm_link_sta->sta_id;
rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
rcu_assign_pointer(mvm->fw_id_to_link_sta[sta_id],
link_sta);
} else {
if (WARN_ON(mvm_sta->link[link_id])) {
ret = -EINVAL;
goto err;
}
ret = iwl_mvm_mld_alloc_sta_link(mvm, vif, sta,
link_id);
if (WARN_ON(ret))
goto err;
}
link_sta->agg.max_rc_amsdu_len = 1;
ieee80211_sta_recalc_aggregates(sta);
mvm_sta_link =
rcu_dereference_protected(mvm_sta->link[link_id],
lockdep_is_held(&mvm->mutex));
if (WARN_ON(!mvm_sta_link)) {
ret = -EINVAL;
goto err;
}
if (vif->type == NL80211_IFTYPE_STATION)
iwl_mvm_mld_set_ap_sta_id(sta, mvm_vif_link,
mvm_sta_link);
link_sta_allocated |= BIT(link_id);
sta_mask_added |= BIT(mvm_sta_link->sta_id);
ret = iwl_mvm_mld_cfg_sta(mvm, sta, vif, link_sta, link_conf,
mvm_sta_link);
if (WARN_ON(ret))
goto err;
link_sta_added_to_fw |= BIT(link_id);
iwl_mvm_rs_add_sta_link(mvm, mvm_sta_link);
iwl_mvm_rs_rate_init(mvm, vif, sta, link_conf, link_sta,
link_conf->chanreq.oper.chan->band);
}
if (sta_mask_added) {
ret = iwl_mvm_mld_update_sta_resources(mvm, vif, sta,
current_sta_mask,
current_sta_mask |
sta_mask_added);
if (WARN_ON(ret))
goto err;
}
return 0;
err:
/* remove all already allocated stations in FW */
for_each_set_bit(link_id, &link_sta_added_to_fw,
IEEE80211_MLD_MAX_NUM_LINKS) {
mvm_sta_link =
rcu_dereference_protected(mvm_sta->link[link_id],
lockdep_is_held(&mvm->mutex));
iwl_mvm_mld_rm_sta_from_fw(mvm, mvm_sta_link->sta_id);
}
/* remove all already allocated station links in driver */
for_each_set_bit(link_id, &link_sta_allocated,
IEEE80211_MLD_MAX_NUM_LINKS) {
mvm_sta_link =
rcu_dereference_protected(mvm_sta->link[link_id],
lockdep_is_held(&mvm->mutex));
iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_sta_link, link_id);
}
return ret;
}

View File

@ -2450,11 +2450,6 @@ void iwl_mvm_sec_key_remove_ap(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_mvm_vif_link_info *link,
unsigned int link_id);
int iwl_mvm_mld_update_sta_keys(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
u32 old_sta_mask,
u32 new_sta_mask);
int iwl_mvm_mld_send_key(struct iwl_mvm *mvm, u32 sta_mask, u32 key_flags,
struct ieee80211_key_conf *keyconf);
u32 iwl_mvm_get_sec_flags(struct iwl_mvm *mvm,

View File

@ -304,7 +304,9 @@ void iwl_mvm_ptp_init(struct iwl_mvm *mvm)
IWL_ERR(mvm, "Failed to register PHC clock (%ld)\n",
PTR_ERR(mvm->ptp_data.ptp_clock));
mvm->ptp_data.ptp_clock = NULL;
} else if (mvm->ptp_data.ptp_clock) {
} else if (!mvm->ptp_data.ptp_clock) {
IWL_DEBUG_INFO(mvm, "PTP module unavailable on this kernel\n");
} else {
IWL_DEBUG_INFO(mvm, "Registered PHC clock: %s, with index: %d\n",
mvm->ptp_data.ptp_clock_info.name,
ptp_clock_index(mvm->ptp_data.ptp_clock));

View File

@ -637,10 +637,6 @@ void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm,
struct iwl_mvm_link_sta *mvm_sta_link,
unsigned int link_id);
int iwl_mvm_mld_rm_sta_id(struct iwl_mvm *mvm, u8 sta_id);
int iwl_mvm_mld_update_sta_links(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
u16 old_links, u16 new_links);
u32 iwl_mvm_sta_fw_id_mask(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int filter_link_id);
int iwl_mvm_mld_add_int_sta_with_queue(struct iwl_mvm *mvm,

View File

@ -234,7 +234,7 @@ void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
* Also convert TU to msec.
*/
delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
mod_delayed_work(system_percpu_wq, &mvm->tdls_cs.dwork,
msecs_to_jiffies(delay));
iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_ACTIVE);
@ -548,7 +548,7 @@ iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
*/
delay = 2 * TU_TO_MS(vif->bss_conf.dtim_period *
vif->bss_conf.beacon_int);
mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
mod_delayed_work(system_percpu_wq, &mvm->tdls_cs.dwork,
msecs_to_jiffies(delay));
return 0;
}
@ -659,6 +659,6 @@ iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
/* register a timeout in case we don't succeed in switching */
delay = vif->bss_conf.dtim_period * vif->bss_conf.beacon_int *
1024 / 1000;
mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
mod_delayed_work(system_percpu_wq, &mvm->tdls_cs.dwork,
msecs_to_jiffies(delay));
}

View File

@ -95,7 +95,9 @@ static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
}
void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans)
static void
_iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans,
bool dump_on_timeout)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret;
@ -133,7 +135,7 @@ void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans)
"timeout waiting for FW reset ACK (inta_hw=0x%x, reset_done %d)\n",
inta_hw, reset_done);
if (!reset_done) {
if (!reset_done && dump_on_timeout) {
struct iwl_fw_error_dump_mode mode = {
.type = IWL_ERR_TYPE_RESET_HS_TIMEOUT,
.context = IWL_ERR_CONTEXT_FROM_OPMODE,
@ -147,6 +149,11 @@ void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans)
trans_pcie->fw_reset_state = FW_RESET_IDLE;
}
void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans)
{
_iwl_trans_pcie_fw_reset_handshake(trans, false);
}
static void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@ -163,7 +170,7 @@ static void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
* should assume that the firmware is already dead.
*/
trans->state = IWL_TRANS_NO_FW;
iwl_trans_pcie_fw_reset_handshake(trans);
_iwl_trans_pcie_fw_reset_handshake(trans, true);
}
trans_pcie->is_down = true;

View File

@ -3197,7 +3197,7 @@ static ssize_t iwl_dbgfs_reset_write(struct file *file,
if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
return -EINVAL;
if (mode == IWL_RESET_MODE_TOP_RESET) {
if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_SC)
if (!iwl_trans_is_top_reset_supported(trans))
return -EINVAL;
trans->request_top_reset = 1;
}

View File

@ -88,6 +88,9 @@ void mt76_change_chanctx(struct ieee80211_hw *hw,
IEEE80211_CHANCTX_CHANGE_RADAR)))
return;
if (phy->roc_vif)
mt76_abort_roc(phy);
cancel_delayed_work_sync(&phy->mac_work);
mutex_lock(&dev->mutex);
@ -155,8 +158,6 @@ void mt76_unassign_vif_chanctx(struct ieee80211_hw *hw,
{
struct mt76_chanctx *ctx = (struct mt76_chanctx *)conf->drv_priv;
struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
struct mt76_vif_data *mvif = mlink->mvif;
int link_id = link_conf->link_id;
struct mt76_phy *phy = ctx->phy;
struct mt76_dev *dev = phy->dev;
@ -173,15 +174,8 @@ void mt76_unassign_vif_chanctx(struct ieee80211_hw *hw,
if (!mlink)
goto out;
if (mlink != (struct mt76_vif_link *)vif->drv_priv)
rcu_assign_pointer(mvif->link[link_id], NULL);
dev->drv->vif_link_remove(phy, vif, link_conf, mlink);
mlink->ctx = NULL;
if (mlink != (struct mt76_vif_link *)vif->drv_priv)
kfree_rcu(mlink, rcu_head);
out:
mutex_unlock(&dev->mutex);
}
@ -254,6 +248,8 @@ int mt76_switch_vif_chanctx(struct ieee80211_hw *hw,
continue;
mlink->ctx = vifs->new_ctx;
if (mlink->beacon_mon_interval)
WRITE_ONCE(mlink->beacon_mon_last, jiffies);
}
out:
@ -324,9 +320,11 @@ void mt76_roc_complete(struct mt76_phy *phy)
if (mlink)
mlink->mvif->roc_phy = NULL;
if (phy->main_chandef.chan &&
!test_bit(MT76_MCU_RESET, &dev->phy.state))
mt76_set_channel(phy, &phy->main_chandef, false);
if (phy->chanctx && phy->main_chandef.chan && phy->offchannel &&
!test_bit(MT76_MCU_RESET, &dev->phy.state)) {
__mt76_set_channel(phy, &phy->main_chandef, false);
mt76_offchannel_notify(phy, false);
}
mt76_put_vif_phy_link(phy, phy->roc_vif, phy->roc_link);
phy->roc_vif = NULL;
phy->roc_link = NULL;
@ -364,12 +362,15 @@ int mt76_remain_on_channel(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct mt76_phy *phy = hw->priv;
struct mt76_dev *dev = phy->dev;
struct mt76_vif_link *mlink;
bool offchannel;
int ret = 0;
phy = dev->band_phys[chan->band];
if (!phy)
return -EINVAL;
cancel_delayed_work_sync(&phy->mac_work);
mutex_lock(&dev->mutex);
if (phy->roc_vif || dev->scan.phy == phy ||
@ -387,8 +388,18 @@ int mt76_remain_on_channel(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mlink->mvif->roc_phy = phy;
phy->roc_vif = vif;
phy->roc_link = mlink;
cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_HT20);
mt76_set_channel(phy, &chandef, true);
offchannel = mt76_offchannel_chandef(phy, chan, &chandef);
if (offchannel)
mt76_offchannel_notify(phy, true);
ret = __mt76_set_channel(phy, &chandef, offchannel);
if (ret) {
mlink->mvif->roc_phy = NULL;
phy->roc_vif = NULL;
phy->roc_link = NULL;
mt76_put_vif_phy_link(phy, vif, mlink);
goto out;
}
ieee80211_ready_on_channel(hw);
ieee80211_queue_delayed_work(phy->hw, &phy->roc_work,
msecs_to_jiffies(duration));

View File

@ -6,6 +6,7 @@
#include <linux/dma-mapping.h>
#include "mt76.h"
#include "dma.h"
#include "mt76_connac.h"
static struct mt76_txwi_cache *
mt76_alloc_txwi(struct mt76_dev *dev)
@ -188,16 +189,18 @@ mt76_dma_queue_magic_cnt_init(struct mt76_dev *dev, struct mt76_queue *q)
static void
mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
{
Q_WRITE(q, desc_base, q->desc_dma);
if ((q->flags & MT_QFLAG_WED_RRO_EN) && !mt76_npu_device_active(dev))
if ((q->flags & MT_QFLAG_WED_RRO_EN) &&
(!is_mt7992(dev) || !mt76_npu_device_active(dev)))
Q_WRITE(q, ring_size, MT_DMA_RRO_EN | q->ndesc);
else
Q_WRITE(q, ring_size, q->ndesc);
if (mt76_queue_is_npu_tx(q)) {
writel(q->desc_dma, &q->regs->desc_base);
writel(q->ndesc, &q->regs->ring_size);
writel(q->desc_dma, &q->regs->desc_base);
}
Q_WRITE(q, desc_base, q->desc_dma);
q->head = Q_READ(q, dma_idx);
q->tail = q->head;
}
@ -663,6 +666,8 @@ mt76_dma_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q,
if (!t)
goto free_skb;
t->phy_idx = phy->band_idx;
t->qid = qid;
txwi = mt76_get_txwi_ptr(dev, t);
skb->prev = skb->next = NULL;
@ -874,7 +879,16 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
if (!buf)
break;
if (!mt76_queue_is_wed_rro(q))
if (mtk_wed_device_active(&dev->mmio.wed) &&
mt76_queue_is_wed_rro(q))
continue;
if (mt76_npu_device_active(dev) &&
mt76_queue_is_wed_rro(q))
continue;
if (!mt76_queue_is_wed_rro_rxdmad_c(q) &&
!mt76_queue_is_wed_rro_ind(q))
mt76_put_page_pool_buf(buf, false);
} while (1);
@ -915,6 +929,13 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
mt76_queue_is_wed_rro(q))
return;
if (mt76_npu_device_active(dev) &&
mt76_queue_is_wed_rro(q))
return;
if (mt76_queue_is_npu_txfree(q))
return;
mt76_dma_sync_idx(dev, q);
if (mt76_queue_is_npu(q))
mt76_npu_fill_rx_queue(dev, q);
@ -1168,10 +1189,6 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
mt76_for_each_q_rx(dev, i) {
struct mt76_queue *q = &dev->q_rx[i];
if (mtk_wed_device_active(&dev->mmio.wed) &&
mt76_queue_is_wed_rro(q))
continue;
netif_napi_del(&dev->napi[i]);
mt76_dma_rx_cleanup(dev, q);

View File

@ -174,7 +174,9 @@ void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
static inline void
mt76_dma_reset_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
dev->queue_ops->reset_q(dev, q, true);
bool reset_idx = q && !mt76_queue_is_npu_tx(q);
dev->queue_ops->reset_q(dev, q, reset_idx);
if (mtk_wed_device_active(&dev->mmio.wed))
mt76_wed_dma_setup(dev, q, true);
}

View File

@ -9,6 +9,13 @@
#include <linux/nvmem-consumer.h>
#include <linux/etherdevice.h>
#include "mt76.h"
#include "mt76_connac.h"
enum mt76_sku_type {
MT76_SKU_RATE,
MT76_SKU_BACKOFF,
MT76_SKU_BACKOFF_BF_OFFSET,
};
static int mt76_get_of_eeprom_data(struct mt76_dev *dev, void *eep, int len)
{
@ -292,7 +299,6 @@ mt76_find_channel_node(struct device_node *np, struct ieee80211_channel *chan)
}
EXPORT_SYMBOL_GPL(mt76_find_channel_node);
static s8
mt76_get_txs_delta(struct device_node *np, u8 nss)
{
@ -306,9 +312,24 @@ mt76_get_txs_delta(struct device_node *np, u8 nss)
return be32_to_cpu(val[nss - 1]);
}
static inline u8 mt76_backoff_n_chains(struct mt76_dev *dev, u8 idx)
{
/* 0:1T1ss, 1:2T1ss, ..., 14:5T5ss */
static const u8 connac3_table[] = {
1, 2, 3, 4, 5, 2, 3, 4, 5, 3, 4, 5, 4, 5, 5};
static const u8 connac2_table[] = {
1, 2, 3, 4, 2, 3, 4, 3, 4, 4, 0, 0, 0, 0, 0};
if (idx >= ARRAY_SIZE(connac3_table))
return 0;
return is_mt799x(dev) ? connac3_table[idx] : connac2_table[idx];
}
static void
mt76_apply_array_limit(s8 *pwr, size_t pwr_len, const s8 *data,
s8 target_power, s8 nss_delta, s8 *max_power)
mt76_apply_array_limit(struct mt76_dev *dev, s8 *pwr, size_t pwr_len,
const s8 *data, s8 target_power, s8 nss_delta,
s8 *max_power, int n_chains, enum mt76_sku_type type)
{
int i;
@ -316,18 +337,51 @@ mt76_apply_array_limit(s8 *pwr, size_t pwr_len, const s8 *data,
return;
for (i = 0; i < pwr_len; i++) {
pwr[i] = min_t(s8, target_power, data[i] + nss_delta);
u8 backoff_chain_idx = i;
int backoff_n_chains;
s8 backoff_delta;
s8 delta;
switch (type) {
case MT76_SKU_RATE:
delta = 0;
backoff_delta = 0;
backoff_n_chains = 0;
break;
case MT76_SKU_BACKOFF_BF_OFFSET:
backoff_chain_idx += 1;
fallthrough;
case MT76_SKU_BACKOFF:
delta = mt76_tx_power_path_delta(n_chains);
backoff_n_chains = mt76_backoff_n_chains(dev, backoff_chain_idx);
backoff_delta = mt76_tx_power_path_delta(backoff_n_chains);
break;
default:
return;
}
pwr[i] = min_t(s8, target_power + delta - backoff_delta, data[i] + nss_delta);
/* used for padding, doesn't need to be considered */
if (data[i] >= S8_MAX - 1)
continue;
/* only consider backoff value for the configured chain number */
if (type != MT76_SKU_RATE && n_chains != backoff_n_chains)
continue;
*max_power = max(*max_power, pwr[i]);
}
}
static void
mt76_apply_multi_array_limit(s8 *pwr, size_t pwr_len, s8 pwr_num,
const s8 *data, size_t len, s8 target_power,
s8 nss_delta)
mt76_apply_multi_array_limit(struct mt76_dev *dev, s8 *pwr, size_t pwr_len,
s8 pwr_num, const s8 *data, size_t len,
s8 target_power, s8 nss_delta, s8 *max_power,
int n_chains, enum mt76_sku_type type)
{
static const int connac2_backoff_ru_idx = 2;
int i, cur;
s8 max_power = -128;
if (!data)
return;
@ -337,8 +391,26 @@ mt76_apply_multi_array_limit(s8 *pwr, size_t pwr_len, s8 pwr_num,
if (len < pwr_len + 1)
break;
mt76_apply_array_limit(pwr + pwr_len * i, pwr_len, data + 1,
target_power, nss_delta, &max_power);
/* Each RU entry (RU26, RU52, RU106, BW20, ...) in the DTS
* corresponds to 10 stream combinations (1T1ss, 2T1ss, 3T1ss,
* 4T1ss, 2T2ss, 3T2ss, 4T2ss, 3T3ss, 4T3ss, 4T4ss).
*
* For beamforming tables:
* - In connac2, beamforming entries for BW20~BW160 and OFDM
* do not include 1T1ss.
* - In connac3, beamforming entries for BW20~BW160 and RU
* include 1T1ss, but OFDM beamforming does not include 1T1ss.
*
* Non-beamforming and RU entries for both connac2 and connac3
* include 1T1ss.
*/
if (!is_mt799x(dev) && type == MT76_SKU_BACKOFF &&
i > connac2_backoff_ru_idx)
type = MT76_SKU_BACKOFF_BF_OFFSET;
mt76_apply_array_limit(dev, pwr + pwr_len * i, pwr_len, data + 1,
target_power, nss_delta, max_power,
n_chains, type);
if (--cur > 0)
continue;
@ -360,18 +432,11 @@ s8 mt76_get_rate_power_limits(struct mt76_phy *phy,
struct device_node *np;
const s8 *val;
char name[16];
u32 mcs_rates = dev->drv->mcs_rates;
u32 ru_rates = ARRAY_SIZE(dest->ru[0]);
char band;
size_t len;
s8 max_power = 0;
s8 max_power_backoff = -127;
s8 max_power = -127;
s8 txs_delta;
int n_chains = hweight16(phy->chainmask);
s8 target_power_combine = target_power + mt76_tx_power_path_delta(n_chains);
if (!mcs_rates)
mcs_rates = 10;
memset(dest, target_power, sizeof(*dest) - sizeof(dest->path));
memset(&dest->path, 0, sizeof(dest->path));
@ -409,46 +474,45 @@ s8 mt76_get_rate_power_limits(struct mt76_phy *phy,
txs_delta = mt76_get_txs_delta(np, hweight16(phy->chainmask));
val = mt76_get_of_array_s8(np, "rates-cck", &len, ARRAY_SIZE(dest->cck));
mt76_apply_array_limit(dest->cck, ARRAY_SIZE(dest->cck), val,
target_power, txs_delta, &max_power);
mt76_apply_array_limit(dev, dest->cck, ARRAY_SIZE(dest->cck), val,
target_power, txs_delta, &max_power, n_chains, MT76_SKU_RATE);
val = mt76_get_of_array_s8(np, "rates-ofdm",
&len, ARRAY_SIZE(dest->ofdm));
mt76_apply_array_limit(dest->ofdm, ARRAY_SIZE(dest->ofdm), val,
target_power, txs_delta, &max_power);
val = mt76_get_of_array_s8(np, "rates-ofdm", &len, ARRAY_SIZE(dest->ofdm));
mt76_apply_array_limit(dev, dest->ofdm, ARRAY_SIZE(dest->ofdm), val,
target_power, txs_delta, &max_power, n_chains, MT76_SKU_RATE);
val = mt76_get_of_array_s8(np, "rates-mcs", &len, mcs_rates + 1);
mt76_apply_multi_array_limit(dest->mcs[0], ARRAY_SIZE(dest->mcs[0]),
ARRAY_SIZE(dest->mcs), val, len,
target_power, txs_delta);
val = mt76_get_of_array_s8(np, "rates-mcs", &len, ARRAY_SIZE(dest->mcs[0]) + 1);
mt76_apply_multi_array_limit(dev, dest->mcs[0], ARRAY_SIZE(dest->mcs[0]),
ARRAY_SIZE(dest->mcs), val, len, target_power,
txs_delta, &max_power, n_chains, MT76_SKU_RATE);
val = mt76_get_of_array_s8(np, "rates-ru", &len, ru_rates + 1);
mt76_apply_multi_array_limit(dest->ru[0], ARRAY_SIZE(dest->ru[0]),
ARRAY_SIZE(dest->ru), val, len,
target_power, txs_delta);
val = mt76_get_of_array_s8(np, "rates-ru", &len, ARRAY_SIZE(dest->ru[0]) + 1);
mt76_apply_multi_array_limit(dev, dest->ru[0], ARRAY_SIZE(dest->ru[0]),
ARRAY_SIZE(dest->ru), val, len, target_power,
txs_delta, &max_power, n_chains, MT76_SKU_RATE);
max_power_backoff = max_power;
val = mt76_get_of_array_s8(np, "paths-cck", &len, ARRAY_SIZE(dest->path.cck));
mt76_apply_array_limit(dest->path.cck, ARRAY_SIZE(dest->path.cck), val,
target_power_combine, txs_delta, &max_power_backoff);
mt76_apply_array_limit(dev, dest->path.cck, ARRAY_SIZE(dest->path.cck), val,
target_power, txs_delta, &max_power, n_chains, MT76_SKU_BACKOFF);
val = mt76_get_of_array_s8(np, "paths-ofdm", &len, ARRAY_SIZE(dest->path.ofdm));
mt76_apply_array_limit(dest->path.ofdm, ARRAY_SIZE(dest->path.ofdm), val,
target_power_combine, txs_delta, &max_power_backoff);
mt76_apply_array_limit(dev, dest->path.ofdm, ARRAY_SIZE(dest->path.ofdm), val,
target_power, txs_delta, &max_power, n_chains, MT76_SKU_BACKOFF);
val = mt76_get_of_array_s8(np, "paths-ofdm-bf", &len, ARRAY_SIZE(dest->path.ofdm_bf));
mt76_apply_array_limit(dest->path.ofdm_bf, ARRAY_SIZE(dest->path.ofdm_bf), val,
target_power_combine, txs_delta, &max_power_backoff);
mt76_apply_array_limit(dev, dest->path.ofdm_bf, ARRAY_SIZE(dest->path.ofdm_bf), val,
target_power, txs_delta, &max_power, n_chains,
MT76_SKU_BACKOFF_BF_OFFSET);
val = mt76_get_of_array_s8(np, "paths-ru", &len, ARRAY_SIZE(dest->path.ru[0]) + 1);
mt76_apply_multi_array_limit(dest->path.ru[0], ARRAY_SIZE(dest->path.ru[0]),
ARRAY_SIZE(dest->path.ru), val, len,
target_power_combine, txs_delta);
mt76_apply_multi_array_limit(dev, dest->path.ru[0], ARRAY_SIZE(dest->path.ru[0]),
ARRAY_SIZE(dest->path.ru), val, len, target_power,
txs_delta, &max_power, n_chains, MT76_SKU_BACKOFF);
val = mt76_get_of_array_s8(np, "paths-ru-bf", &len, ARRAY_SIZE(dest->path.ru_bf[0]) + 1);
mt76_apply_multi_array_limit(dest->path.ru_bf[0], ARRAY_SIZE(dest->path.ru_bf[0]),
ARRAY_SIZE(dest->path.ru_bf), val, len,
target_power_combine, txs_delta);
mt76_apply_multi_array_limit(dev, dest->path.ru_bf[0], ARRAY_SIZE(dest->path.ru_bf[0]),
ARRAY_SIZE(dest->path.ru_bf), val, len, target_power,
txs_delta, &max_power, n_chains, MT76_SKU_BACKOFF);
return max_power;
}

View File

@ -726,6 +726,7 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
INIT_LIST_HEAD(&dev->rxwi_cache);
dev->token_size = dev->drv->token_size;
INIT_DELAYED_WORK(&dev->scan_work, mt76_scan_work);
spin_lock_init(&dev->scan_lock);
for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
skb_queue_head_init(&dev->rx_skb[i]);
@ -970,6 +971,9 @@ bool mt76_has_tx_pending(struct mt76_phy *phy)
return true;
}
if (atomic_read(&phy->mgmt_tx_pending))
return true;
return false;
}
EXPORT_SYMBOL_GPL(mt76_has_tx_pending);
@ -1030,9 +1034,10 @@ int __mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
int timeout = HZ / 5;
int ret;
set_bit(MT76_RESET, &phy->state);
mt76_worker_disable(&dev->tx_worker);
mt76_txq_schedule_pending(phy);
set_bit(MT76_RESET, &phy->state);
wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
mt76_update_survey(phy);
@ -1716,6 +1721,16 @@ void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid)
idr_destroy(&wcid->pktid);
/* Remove from sta_poll_list to prevent list corruption after reset.
* Without this, mt76_reset_device() reinitializes sta_poll_list but
* leaves wcid->poll_list with stale pointers, causing list corruption
* when mt76_wcid_add_poll() checks list_empty().
*/
spin_lock_bh(&dev->sta_poll_lock);
if (!list_empty(&wcid->poll_list))
list_del_init(&wcid->poll_list);
spin_unlock_bh(&dev->sta_poll_lock);
spin_lock_bh(&phy->tx_lock);
if (!list_empty(&wcid->tx_list))
@ -2121,3 +2136,214 @@ u16 mt76_select_links(struct ieee80211_vif *vif, int max_active_links)
return sel_links;
}
EXPORT_SYMBOL_GPL(mt76_select_links);
struct mt76_offchannel_cb_data {
struct mt76_phy *phy;
bool offchannel;
};
static void
mt76_offchannel_send_nullfunc(struct mt76_offchannel_cb_data *data,
struct ieee80211_vif *vif, int link_id)
{
struct mt76_phy *phy = data->phy;
struct ieee80211_tx_info *info;
struct ieee80211_sta *sta = NULL;
struct ieee80211_hdr *hdr;
struct mt76_wcid *wcid;
struct sk_buff *skb;
skb = ieee80211_nullfunc_get(phy->hw, vif, link_id, true);
if (!skb)
return;
hdr = (struct ieee80211_hdr *)skb->data;
if (data->offchannel)
hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
skb->priority = 7;
skb_set_queue_mapping(skb, IEEE80211_AC_VO);
if (!ieee80211_tx_prepare_skb(phy->hw, vif, skb,
phy->main_chandef.chan->band,
&sta))
return;
if (sta)
wcid = (struct mt76_wcid *)sta->drv_priv;
else
wcid = ((struct mt76_vif_link *)vif->drv_priv)->wcid;
if (link_id >= 0) {
info = IEEE80211_SKB_CB(skb);
info->control.flags &= ~IEEE80211_TX_CTRL_MLO_LINK;
info->control.flags |=
u32_encode_bits(link_id, IEEE80211_TX_CTRL_MLO_LINK);
}
mt76_tx(phy, sta, wcid, skb);
}
static void
mt76_offchannel_notify_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
{
struct mt76_offchannel_cb_data *data = _data;
struct mt76_vif_link *mlink;
struct mt76_vif_data *mvif;
int link_id;
if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc)
return;
mlink = (struct mt76_vif_link *)vif->drv_priv;
mvif = mlink->mvif;
if (!ieee80211_vif_is_mld(vif)) {
if (mt76_vif_link_phy(mlink) == data->phy) {
if (!data->offchannel && mlink->beacon_mon_interval)
WRITE_ONCE(mlink->beacon_mon_last, jiffies);
mt76_offchannel_send_nullfunc(data, vif, -1);
}
return;
}
for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
if (link_id == mvif->deflink_id)
mlink = (struct mt76_vif_link *)vif->drv_priv;
else
mlink = rcu_dereference(mvif->link[link_id]);
if (!mlink)
continue;
if (mt76_vif_link_phy(mlink) != data->phy)
continue;
if (!data->offchannel && mlink->beacon_mon_interval)
WRITE_ONCE(mlink->beacon_mon_last, jiffies);
mt76_offchannel_send_nullfunc(data, vif, link_id);
}
}
void mt76_offchannel_notify(struct mt76_phy *phy, bool offchannel)
{
struct mt76_offchannel_cb_data data = {
.phy = phy,
.offchannel = offchannel,
};
if (!phy->num_sta)
return;
local_bh_disable();
ieee80211_iterate_active_interfaces_atomic(phy->hw,
IEEE80211_IFACE_ITER_NORMAL,
mt76_offchannel_notify_iter, &data);
local_bh_enable();
}
EXPORT_SYMBOL_GPL(mt76_offchannel_notify);
struct mt76_rx_beacon_data {
struct mt76_phy *phy;
const u8 *bssid;
};
static void mt76_rx_beacon_iter(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
struct mt76_rx_beacon_data *data = _data;
struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
struct mt76_vif_data *mvif = mlink->mvif;
int link_id;
if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc)
return;
for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
struct ieee80211_bss_conf *link_conf;
if (link_id == mvif->deflink_id)
mlink = (struct mt76_vif_link *)vif->drv_priv;
else
mlink = rcu_dereference(mvif->link[link_id]);
if (!mlink || !mlink->beacon_mon_interval)
continue;
if (mt76_vif_link_phy(mlink) != data->phy)
continue;
link_conf = rcu_dereference(vif->link_conf[link_id]);
if (!link_conf)
continue;
if (!ether_addr_equal(link_conf->bssid, data->bssid) &&
(!link_conf->nontransmitted ||
!ether_addr_equal(link_conf->transmitter_bssid,
data->bssid)))
continue;
WRITE_ONCE(mlink->beacon_mon_last, jiffies);
}
}
void mt76_rx_beacon(struct mt76_phy *phy, struct sk_buff *skb)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
struct mt76_rx_beacon_data data = {
.phy = phy,
.bssid = hdr->addr3,
};
mt76_scan_rx_beacon(phy->dev, phy->chandef.chan);
if (!phy->num_sta)
return;
if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_ONLY_MONITOR))
return;
ieee80211_iterate_active_interfaces_atomic(phy->hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
mt76_rx_beacon_iter, &data);
}
EXPORT_SYMBOL_GPL(mt76_rx_beacon);
static void mt76_beacon_mon_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
struct mt76_phy *phy = data;
struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
struct mt76_vif_data *mvif = mlink->mvif;
int link_id;
if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc)
return;
for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
if (link_id == mvif->deflink_id)
mlink = (struct mt76_vif_link *)vif->drv_priv;
else
mlink = rcu_dereference(mvif->link[link_id]);
if (!mlink || !mlink->beacon_mon_interval)
continue;
if (mt76_vif_link_phy(mlink) != phy)
continue;
if (time_after(jiffies,
READ_ONCE(mlink->beacon_mon_last) +
MT76_BEACON_MON_MAX_MISS * mlink->beacon_mon_interval))
ieee80211_beacon_loss(vif);
}
}
void mt76_beacon_mon_check(struct mt76_phy *phy)
{
if (phy->offchannel)
return;
ieee80211_iterate_active_interfaces_atomic(phy->hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
mt76_beacon_mon_iter, phy);
}
EXPORT_SYMBOL_GPL(mt76_beacon_mon_check);

View File

@ -98,7 +98,7 @@ int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb,
/* orig skb might be needed for retry, mcu_skb_send_msg consumes it */
if (orig_skb)
skb_get(orig_skb);
ret = dev->mcu_ops->mcu_skb_send_msg(dev, skb, cmd, &seq);
ret = dev->mcu_ops->mcu_skb_send_msg(dev, skb, cmd, wait_resp ? &seq : NULL);
if (ret < 0)
goto out;

View File

@ -55,6 +55,8 @@
FIELD_PREP(MT_QFLAG_WED_RING, _n))
#define MT_NPU_Q_TX(_n) __MT_NPU_Q(MT76_WED_Q_TX, _n)
#define MT_NPU_Q_RX(_n) __MT_NPU_Q(MT76_WED_Q_RX, _n)
#define MT_NPU_Q_TXFREE(_n) (FIELD_PREP(MT_QFLAG_WED_TYPE, MT76_WED_Q_TXFREE) | \
FIELD_PREP(MT_QFLAG_WED_RING, _n))
struct mt76_dev;
struct mt76_phy;
@ -362,6 +364,7 @@ enum mt76_wcid_flags {
};
#define MT76_N_WCIDS 1088
#define MT76_BEACON_MON_MAX_MISS 7
/* stored in ieee80211_tx_info::hw_queue */
#define MT_TX_HW_QUEUE_PHY GENMASK(3, 2)
@ -448,6 +451,7 @@ struct mt76_txwi_cache {
};
u8 qid;
u8 phy_idx;
};
struct mt76_rx_tid {
@ -540,7 +544,6 @@ struct mt76_driver_ops {
u32 survey_flags;
u16 txwi_size;
u16 token_size;
u8 mcs_rates;
unsigned int link_data_size;
@ -831,6 +834,8 @@ struct mt76_vif_link {
u8 mcast_rates_idx;
u8 beacon_rates_idx;
bool offchannel;
unsigned long beacon_mon_last;
u16 beacon_mon_interval;
struct ieee80211_chanctx_conf *ctx;
struct mt76_wcid *wcid;
struct mt76_vif_data *mvif;
@ -859,6 +864,8 @@ struct mt76_phy {
struct list_head tx_list;
struct mt76_queue *q_tx[__MT_TXQ_MAX];
atomic_t mgmt_tx_pending;
struct cfg80211_chan_def chandef;
struct cfg80211_chan_def main_chandef;
bool offchannel;
@ -1002,6 +1009,7 @@ struct mt76_dev {
u32 rxfilter;
struct delayed_work scan_work;
spinlock_t scan_lock;
struct {
struct cfg80211_scan_request *req;
struct ieee80211_channel *chan;
@ -1009,6 +1017,8 @@ struct mt76_dev {
struct mt76_vif_link *mlink;
struct mt76_phy *phy;
int chan_idx;
bool beacon_wait;
bool beacon_received;
} scan;
#ifdef CONFIG_NL80211_TESTMODE
@ -1518,6 +1528,7 @@ void mt76_stop_tx_queues(struct mt76_phy *phy, struct ieee80211_sta *sta,
void mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb);
void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid);
void mt76_txq_schedule_all(struct mt76_phy *phy);
void mt76_txq_schedule_pending(struct mt76_phy *phy);
void mt76_tx_worker_run(struct mt76_dev *dev);
void mt76_tx_worker(struct mt76_worker *w);
void mt76_release_buffered_frames(struct ieee80211_hw *hw,
@ -1596,6 +1607,9 @@ int mt76_get_rate(struct mt76_dev *dev,
int mt76_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_scan_request *hw_req);
void mt76_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
void mt76_scan_rx_beacon(struct mt76_dev *dev, struct ieee80211_channel *chan);
void mt76_rx_beacon(struct mt76_phy *phy, struct sk_buff *skb);
void mt76_beacon_mon_check(struct mt76_phy *phy);
void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const u8 *mac);
void mt76_sw_scan_complete(struct ieee80211_hw *hw,
@ -1649,6 +1663,9 @@ void mt76_npu_txdesc_cleanup(struct mt76_queue *q, int index);
int mt76_npu_net_setup_tc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct net_device *dev, enum tc_setup_type type,
void *type_data);
int mt76_npu_send_txrx_addr(struct mt76_dev *dev, int ifindex,
u32 direction, u32 i_count_addr,
u32 o_status_addr, u32 o_count_addr);
#else
static inline void mt76_npu_check_ppe(struct mt76_dev *dev,
struct sk_buff *skb, u32 info)
@ -1707,6 +1724,13 @@ static inline int mt76_npu_net_setup_tc(struct ieee80211_hw *hw,
{
return -EOPNOTSUPP;
}
static inline int mt76_npu_send_txrx_addr(struct mt76_dev *dev, int ifindex,
u32 direction, u32 i_count_addr,
u32 o_status_addr, u32 o_count_addr)
{
return -EOPNOTSUPP;
}
#endif /* CONFIG_MT76_NPU */
static inline bool mt76_npu_device_active(struct mt76_dev *dev)
@ -1775,6 +1799,18 @@ void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
struct mt76_queue_entry *e);
int __mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
bool offchannel);
static inline bool
mt76_offchannel_chandef(struct mt76_phy *phy, struct ieee80211_channel *chan,
struct cfg80211_chan_def *chandef)
{
cfg80211_chandef_create(chandef, chan, NL80211_CHAN_HT20);
if (phy->main_chandef.chan != chan)
return true;
*chandef = phy->main_chandef;
return false;
}
int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef,
bool offchannel);
void mt76_scan_work(struct work_struct *work);
@ -1786,6 +1822,7 @@ struct mt76_vif_link *mt76_get_vif_phy_link(struct mt76_phy *phy,
struct ieee80211_vif *vif);
void mt76_put_vif_phy_link(struct mt76_phy *phy, struct ieee80211_vif *vif,
struct mt76_vif_link *mlink);
void mt76_offchannel_notify(struct mt76_phy *phy, bool offchannel);
/* usb */
static inline bool mt76u_urb_error(struct urb *urb)
@ -1993,6 +2030,14 @@ static inline bool mt76_queue_is_npu_rx(struct mt76_queue *q)
FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX;
}
static inline bool mt76_queue_is_npu_txfree(struct mt76_queue *q)
{
if (q->flags & MT_QFLAG_WED)
return false;
return FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_TXFREE;
}
struct mt76_txwi_cache *
mt76_token_release(struct mt76_dev *dev, int token, bool *wake);
int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi);

View File

@ -1167,21 +1167,6 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
}
EXPORT_SYMBOL_GPL(mt7615_mac_set_rates);
void mt7615_mac_enable_rtscts(struct mt7615_dev *dev,
struct ieee80211_vif *vif, bool enable)
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
u32 addr;
addr = mt7615_mac_wtbl_addr(dev, mvif->sta.wcid.idx) + 3 * 4;
if (enable)
mt76_set(dev, addr, MT_WTBL_W3_RTS);
else
mt76_clear(dev, addr, MT_WTBL_W3_RTS);
}
EXPORT_SYMBOL_GPL(mt7615_mac_enable_rtscts);
static int
mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
struct ieee80211_key_conf *key,

View File

@ -583,9 +583,6 @@ static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
}
}
if (changed & BSS_CHANGED_ERP_CTS_PROT)
mt7615_mac_enable_rtscts(dev, vif, info->use_cts_prot);
if (changed & BSS_CHANGED_BEACON_ENABLED && info->enable_beacon) {
mt7615_mcu_add_bss_info(phy, vif, NULL, true);
mt7615_mcu_sta_add(phy, vif, NULL, true);
@ -598,6 +595,10 @@ static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
BSS_CHANGED_BEACON_ENABLED))
mt7615_mcu_add_beacon(dev, hw, vif, info->enable_beacon);
if (changed & BSS_CHANGED_HT || changed & BSS_CHANGED_ERP_CTS_PROT)
mt7615_mcu_set_protection(phy, vif, info->ht_operation_mode,
info->use_cts_prot);
if (changed & BSS_CHANGED_PS)
mt76_connac_mcu_set_vif_ps(&dev->mt76, vif);

View File

@ -2564,3 +2564,50 @@ int mt7615_mcu_set_roc(struct mt7615_phy *phy, struct ieee80211_vif *vif,
return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_ROC),
&req, sizeof(req), false);
}
int mt7615_mcu_set_protection(struct mt7615_phy *phy, struct ieee80211_vif *vif,
u8 ht_mode, bool use_cts_prot)
{
struct mt7615_dev *dev = phy->dev;
struct {
u8 prot_idx;
u8 band;
u8 rsv[2];
bool long_nav;
bool prot_mm;
bool prot_gf;
bool prot_bw40;
bool prot_rifs;
bool prot_bw80;
bool prot_bw160;
u8 prot_erp_mask;
} __packed req = {
.prot_idx = 0x2,
.band = phy != &dev->phy,
};
switch (ht_mode & IEEE80211_HT_OP_MODE_PROTECTION) {
case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
req.prot_mm = true;
req.prot_gf = true;
fallthrough;
case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
req.prot_bw40 = true;
break;
}
if (ht_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT)
req.prot_gf = true;
if (use_cts_prot) {
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
u8 i = mvif->mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0 : mvif->mt76.omac_idx;
req.prot_erp_mask = BIT(i);
}
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(PROTECT_CTRL), &req,
sizeof(req), true);
}

View File

@ -467,8 +467,6 @@ void mt7615_mac_reset_counters(struct mt7615_phy *phy);
void mt7615_mac_cca_stats_reset(struct mt7615_phy *phy);
void mt7615_mac_set_scs(struct mt7615_phy *phy, bool enable);
void mt7615_mac_enable_nf(struct mt7615_dev *dev, bool ext_phy);
void mt7615_mac_enable_rtscts(struct mt7615_dev *dev,
struct ieee80211_vif *vif, bool enable);
void mt7615_mac_sta_poll(struct mt7615_dev *dev);
int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
@ -523,7 +521,8 @@ int mt7615_mcu_set_sku_en(struct mt7615_phy *phy, bool enable);
int mt7615_mcu_apply_rx_dcoc(struct mt7615_phy *phy);
int mt7615_mcu_apply_tx_dpd(struct mt7615_phy *phy);
int mt7615_dfs_init_radar_detector(struct mt7615_phy *phy);
int mt7615_mcu_set_protection(struct mt7615_phy *phy, struct ieee80211_vif *vif,
u8 ht_mode, bool use_cts_prot);
int mt7615_mcu_set_roc(struct mt7615_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_channel *chan, int duration);

View File

@ -455,8 +455,6 @@ enum mt7615_reg_base {
#define MT_WTBL_RIUCR3_RATE6 GENMASK(19, 8)
#define MT_WTBL_RIUCR3_RATE7 GENMASK(31, 20)
#define MT_WTBL_W3_RTS BIT(22)
#define MT_WTBL_W5_CHANGE_BW_RATE GENMASK(7, 5)
#define MT_WTBL_W5_SHORT_GI_20 BIT(8)
#define MT_WTBL_W5_SHORT_GI_40 BIT(9)

View File

@ -182,14 +182,20 @@ static inline bool is_mt7920(struct mt76_dev *dev)
return mt76_chip(dev) == 0x7920;
}
static inline bool is_mt7902(struct mt76_dev *dev)
{
return mt76_chip(dev) == 0x7902;
}
static inline bool is_mt7922(struct mt76_dev *dev)
{
return mt76_chip(dev) == 0x7922;
}
static inline bool is_mt7921(struct mt76_dev *dev)
static inline bool is_connac2(struct mt76_dev *dev)
{
return mt76_chip(dev) == 0x7961 || is_mt7922(dev) || is_mt7920(dev);
return mt76_chip(dev) == 0x7961 || is_mt7922(dev) || is_mt7920(dev) ||
is_mt7902(dev);
}
static inline bool is_mt7663(struct mt76_dev *dev)
@ -271,6 +277,7 @@ static inline bool is_mt76_fw_txp(struct mt76_dev *dev)
case 0x7961:
case 0x7920:
case 0x7922:
case 0x7902:
case 0x7925:
case 0x7663:
case 0x7622:

View File

@ -173,7 +173,7 @@ void mt76_connac_write_hw_txp(struct mt76_dev *dev,
txp->msdu_id[0] = cpu_to_le16(id | MT_MSDU_ID_VALID);
if (is_mt7663(dev) || is_mt7921(dev) || is_mt7925(dev))
if (is_mt7663(dev) || is_connac2(dev) || is_mt7925(dev))
last_mask = MT_TXD_LEN_LAST;
else
last_mask = MT_TXD_LEN_AMSDU_LAST |
@ -217,7 +217,7 @@ mt76_connac_txp_skb_unmap_hw(struct mt76_dev *dev,
u32 last_mask;
int i;
if (is_mt7663(dev) || is_mt7921(dev) || is_mt7925(dev))
if (is_mt7663(dev) || is_connac2(dev) || is_mt7925(dev))
last_mask = MT_TXD_LEN_LAST;
else
last_mask = MT_TXD_LEN_MSDU_LAST;
@ -309,7 +309,7 @@ u16 mt76_connac2_mac_tx_rate_val(struct mt76_phy *mphy,
chandef = mvif->ctx ? &mvif->ctx->def : &mphy->chandef;
band = chandef->chan->band;
if (is_mt7921(mphy->dev)) {
if (is_connac2(mphy->dev)) {
rateidx = ffs(conf->basic_rates) - 1;
goto legacy;
}
@ -548,7 +548,7 @@ void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
val = MT_TXD1_LONG_FORMAT |
FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
if (!is_mt7921(dev))
if (!is_connac2(dev))
val |= MT_TXD1_VTA;
if (phy_idx || band_idx)
val |= MT_TXD1_TGID;
@ -557,7 +557,7 @@ void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
txwi[2] = 0;
val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, 15);
if (!is_mt7921(dev))
if (!is_connac2(dev))
val |= MT_TXD3_SW_POWER_MGMT;
if (key)
val |= MT_TXD3_PROTECT_FRAME;
@ -599,7 +599,7 @@ void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
txwi[6] |= cpu_to_le32(val);
txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
if (!is_mt7921(dev)) {
if (!is_connac2(dev)) {
u8 spe_idx = mt76_connac_spe_idx(mphy->antenna_mask);
if (!spe_idx)
@ -831,7 +831,7 @@ mt76_connac2_mac_decode_he_mu_radiotap(struct mt76_dev *dev, struct sk_buff *skb
};
struct ieee80211_radiotap_he_mu *he_mu;
if (is_mt7921(dev)) {
if (is_connac2(dev)) {
mu_known.flags1 |= HE_BITS(MU_FLAGS1_SIG_B_COMP_KNOWN);
mu_known.flags2 |= HE_BITS(MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN);
}
@ -1047,7 +1047,7 @@ int mt76_connac2_mac_fill_rx_rate(struct mt76_dev *dev,
stbc = FIELD_GET(MT_PRXV_HT_STBC, v0);
gi = FIELD_GET(MT_PRXV_HT_SGI, v0);
*mode = FIELD_GET(MT_PRXV_TX_MODE, v0);
if (is_mt7921(dev))
if (is_connac2(dev))
dcm = !!(idx & MT_PRXV_TX_DCM);
else
dcm = FIELD_GET(MT_PRXV_DCM, v0);
@ -1153,8 +1153,10 @@ void mt76_connac2_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
return;
wcid = (struct mt76_wcid *)sta->drv_priv;
if (!test_and_set_bit(tid, &wcid->ampdu_state))
ieee80211_start_tx_ba_session(sta, tid, 0);
if (!test_and_set_bit(tid, &wcid->ampdu_state)) {
if (ieee80211_start_tx_ba_session(sta, tid, 0))
clear_bit(tid, &wcid->ampdu_state);
}
}
EXPORT_SYMBOL_GPL(mt76_connac2_tx_check_aggr);
@ -1207,5 +1209,11 @@ void mt76_connac2_tx_token_put(struct mt76_dev *dev)
}
spin_unlock_bh(&dev->token_lock);
idr_destroy(&dev->token);
for (id = 0; id < __MT_MAX_BAND; id++) {
struct mt76_phy *phy = dev->phys[id];
if (phy)
atomic_set(&phy->mgmt_tx_pending, 0);
}
}
EXPORT_SYMBOL_GPL(mt76_connac2_tx_token_put);

View File

@ -4,6 +4,7 @@
#include <linux/firmware.h>
#include "mt76_connac2_mac.h"
#include "mt76_connac_mcu.h"
#include "mt792x_regs.h"
int mt76_connac_mcu_start_firmware(struct mt76_dev *dev, u32 addr, u32 option)
{
@ -65,7 +66,7 @@ int mt76_connac_mcu_init_download(struct mt76_dev *dev, u32 addr, u32 len,
int cmd;
if ((!is_connac_v1(dev) && addr == MCU_PATCH_ADDRESS) ||
(is_mt7921(dev) && addr == 0x900000) ||
(is_connac2(dev) && addr == 0x900000) ||
(is_mt7925(dev) && (addr == 0x900000 || addr == 0xe0002800)) ||
(is_mt799x(dev) && addr == 0x900000))
cmd = MCU_CMD(PATCH_START_REQ);
@ -402,7 +403,7 @@ void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
switch (vif->type) {
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_AP:
if (vif->p2p && !is_mt7921(dev))
if (vif->p2p && !is_connac2(dev))
conn_type = CONNECTION_P2P_GC;
else
conn_type = CONNECTION_INFRA_STA;
@ -410,7 +411,7 @@ void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
basic->aid = cpu_to_le16(link_sta->sta->aid);
break;
case NL80211_IFTYPE_STATION:
if (vif->p2p && !is_mt7921(dev))
if (vif->p2p && !is_connac2(dev))
conn_type = CONNECTION_P2P_GO;
else
conn_type = CONNECTION_INFRA_AP;
@ -874,7 +875,7 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
struct sta_rec_vht *vht;
int len;
len = is_mt7921(dev) ? sizeof(*vht) : sizeof(*vht) - 4;
len = is_connac2(dev) ? sizeof(*vht) : sizeof(*vht) - 4;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_VHT, len);
vht = (struct sta_rec_vht *)tlv;
vht->vht_cap = cpu_to_le32(sta->deflink.vht_cap.cap);
@ -885,7 +886,7 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
/* starec uapsd */
mt76_connac_mcu_sta_uapsd(skb, vif, sta);
if (!is_mt7921(dev))
if (!is_connac2(dev))
return;
if (sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he)
@ -1295,8 +1296,10 @@ int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif_link *mvif,
wtbl_hdr);
ret = mt76_connac_mcu_sta_wed_update(dev, skb);
if (ret)
if (ret) {
dev_kfree_skb(skb);
return ret;
}
ret = mt76_mcu_skb_send_msg(dev, skb, cmd, true);
if (ret)
@ -1309,8 +1312,10 @@ int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif_link *mvif,
mt76_connac_mcu_sta_ba_tlv(skb, params, enable, tx);
ret = mt76_connac_mcu_sta_wed_update(dev, skb);
if (ret)
if (ret) {
dev_kfree_skb(skb);
return ret;
}
return mt76_mcu_skb_send_msg(dev, skb, cmd, true);
}
@ -1774,7 +1779,7 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
req->ssid_type_ext = n_ssids ? BIT(0) : 0;
req->ssids_num = n_ssids;
duration = is_mt7921(phy->dev) ? 0 : MT76_CONNAC_SCAN_CHANNEL_TIME;
duration = is_connac2(phy->dev) ? 0 : MT76_CONNAC_SCAN_CHANNEL_TIME;
/* increase channel time for passive scan */
if (!sreq->n_ssids)
duration *= 2;
@ -1817,7 +1822,7 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
req->ies_len = cpu_to_le16(sreq->ie_len);
}
if (is_mt7921(phy->dev))
if (is_connac2(phy->dev))
req->scan_func |= SCAN_FUNC_SPLIT_SCAN;
memcpy(req->bssid, sreq->bssid, ETH_ALEN);
@ -1893,7 +1898,7 @@ int mt76_connac_mcu_sched_scan_req(struct mt76_phy *phy,
get_random_mask_addr(addr, sreq->mac_addr,
sreq->mac_addr_mask);
}
if (is_mt7921(phy->dev)) {
if (is_connac2(phy->dev)) {
req->mt7921.bss_idx = mvif->idx;
req->mt7921.delay = cpu_to_le32(sreq->delay);
}
@ -2033,7 +2038,7 @@ mt76_connac_mcu_build_sku(struct mt76_dev *dev, s8 *sku,
struct mt76_power_limits *limits,
enum nl80211_band band)
{
int max_power = is_mt7921(dev) ? 127 : 63;
int max_power = is_connac2(dev) ? 127 : 63;
int i, offset = sizeof(limits->cck);
memset(sku, max_power, MT_SKU_POWER_LIMIT);
@ -2061,7 +2066,7 @@ mt76_connac_mcu_build_sku(struct mt76_dev *dev, s8 *sku,
offset += 12;
}
if (!is_mt7921(dev))
if (!is_connac2(dev))
return;
/* he */
@ -2117,7 +2122,7 @@ mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy,
enum nl80211_band band)
{
struct mt76_dev *dev = phy->dev;
int sku_len, batch_len = is_mt7921(dev) ? 8 : 16;
int sku_len, batch_len = is_connac2(dev) ? 8 : 16;
static const u8 chan_list_2ghz[] = {
1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14
@ -2158,7 +2163,7 @@ mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy,
if (!limits)
return -ENOMEM;
sku_len = is_mt7921(dev) ? sizeof(sku_tlbv) : sizeof(sku_tlbv) - 92;
sku_len = is_connac2(dev) ? sizeof(sku_tlbv) : sizeof(sku_tlbv) - 92;
tx_power = 2 * phy->hw->conf.power_level;
if (!tx_power)
tx_power = 127;
@ -2242,6 +2247,9 @@ mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy,
false);
if (err < 0)
goto out;
/* read a CR to avoid PSE buffer underflow */
mt76_connac_mcu_reg_rr(dev, MT_PSE_BASE);
}
out:
@ -2764,12 +2772,16 @@ int mt76_connac_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
return PTR_ERR(skb);
ret = mt76_connac_mcu_sta_key_tlv(sta_key_conf, skb, key, cmd);
if (ret)
if (ret) {
dev_kfree_skb(skb);
return ret;
}
ret = mt76_connac_mcu_sta_wed_update(dev, skb);
if (ret)
if (ret) {
dev_kfree_skb(skb);
return ret;
}
return mt76_mcu_skb_send_msg(dev, skb, mcu_cmd, true);
}
@ -3072,7 +3084,7 @@ static u32 mt76_connac2_get_data_mode(struct mt76_dev *dev, u32 info)
{
u32 mode = DL_MODE_NEED_RSP;
if ((!is_mt7921(dev) && !is_mt7925(dev)) || info == PATCH_SEC_NOT_SUPPORT)
if ((!is_connac2(dev) && !is_mt7925(dev)) || info == PATCH_SEC_NOT_SUPPORT)
return mode;
switch (FIELD_GET(PATCH_SEC_ENC_TYPE_MASK, info)) {

View File

@ -628,6 +628,13 @@ struct sta_rec_tx_proc {
__le32 flag;
} __packed;
struct sta_rec_eml_op {
__le16 tag;
__le16 len;
u8 link_bitmap;
u8 link_ant_num[3];
} __packed;
/* wtbl_rec */
struct wtbl_req_hdr {
@ -796,6 +803,7 @@ struct wtbl_raw {
sizeof(struct sta_rec_he_6g_capa) + \
sizeof(struct sta_rec_pn_info) + \
sizeof(struct sta_rec_tx_proc) + \
sizeof(struct sta_rec_eml_op) + \
sizeof(struct tlv) + \
MT76_CONNAC_WTBL_UPDATE_MAX_SIZE)
@ -832,6 +840,7 @@ enum {
STA_REC_PN_INFO = 0x26,
STA_REC_KEY_V3 = 0x27,
STA_REC_HDRT = 0x28,
STA_REC_EML_OP = 0x29,
STA_REC_HDR_TRANS = 0x2B,
STA_REC_MAX_NUM
};
@ -1308,7 +1317,9 @@ enum {
MCU_UNI_CMD_PER_STA_INFO = 0x6d,
MCU_UNI_CMD_ALL_STA_INFO = 0x6e,
MCU_UNI_CMD_ASSERT_DUMP = 0x6f,
MCU_UNI_CMD_EXT_EEPROM_CTRL = 0x74,
MCU_UNI_CMD_RADIO_STATUS = 0x80,
MCU_UNI_CMD_MLD = 0x82,
MCU_UNI_CMD_SDO = 0x88,
};
@ -1363,6 +1374,7 @@ enum {
UNI_BSS_INFO_BASIC = 0,
UNI_BSS_INFO_RA = 1,
UNI_BSS_INFO_RLM = 2,
UNI_BSS_INFO_PROTECT_INFO = 3,
UNI_BSS_INFO_BSS_COLOR = 4,
UNI_BSS_INFO_HE_BASIC = 5,
UNI_BSS_INFO_11V_MBSSID = 6,
@ -1383,6 +1395,7 @@ enum {
UNI_BSS_INFO_MLD = 26,
UNI_BSS_INFO_PM_DISABLE = 27,
UNI_BSS_INFO_EHT = 30,
UNI_BSS_INFO_MLD_LINK_OP = 36,
};
enum {
@ -1865,7 +1878,7 @@ mt76_connac_mcu_gen_dl_mode(struct mt76_dev *dev, u8 feature_set, bool is_wa)
ret |= feature_set & FW_FEATURE_SET_ENCRYPT ?
DL_MODE_ENCRYPT | DL_MODE_RESET_SEC_IV : 0;
if (is_mt7921(dev) || is_mt7925(dev))
if (is_connac2(dev) || is_mt7925(dev))
ret |= feature_set & FW_FEATURE_ENCRY_MODE ?
DL_CONFIG_ENCRY_MODE_SEL : 0;
ret |= FIELD_PREP(DL_MODE_KEY_IDX,

View File

@ -534,6 +534,7 @@ void mt76x02_reconfig_complete(struct ieee80211_hw *hw,
return;
clear_bit(MT76_RESTART, &dev->mphy.state);
ieee80211_wake_queues(hw);
}
EXPORT_SYMBOL_GPL(mt76x02_reconfig_complete);

View File

@ -1294,6 +1294,7 @@ int mt7915_register_device(struct mt7915_dev *dev)
void mt7915_unregister_device(struct mt7915_dev *dev)
{
cancel_work_sync(&dev->dump_work);
mt7915_unregister_ext_phy(dev);
mt7915_coredump_unregister(dev);
mt7915_unregister_thermal(&dev->phy);

View File

@ -232,19 +232,6 @@ static void mt7915_mac_sta_poll(struct mt7915_dev *dev)
rcu_read_unlock();
}
void mt7915_mac_enable_rtscts(struct mt7915_dev *dev,
struct ieee80211_vif *vif, bool enable)
{
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
u32 addr;
addr = mt7915_mac_wtbl_lmac_addr(dev, mvif->sta.wcid.idx, 5);
if (enable)
mt76_set(dev, addr, BIT(5));
else
mt76_clear(dev, addr, BIT(5));
}
static void
mt7915_wed_check_ppe(struct mt7915_dev *dev, struct mt76_queue *q,
struct mt7915_sta *msta, struct sk_buff *skb,

View File

@ -68,7 +68,7 @@ int mt7915_run(struct ieee80211_hw *hw)
if (ret)
goto out;
ret = mt76_connac_mcu_set_rts_thresh(&dev->mt76, 0x92b,
ret = mt76_connac_mcu_set_rts_thresh(&dev->mt76, MT7915_RTS_LEN_THRES,
phy->mt76->band_idx);
if (ret)
goto out;
@ -633,8 +633,9 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw,
if (set_sta == 1)
mt7915_mcu_add_sta(dev, vif, NULL, CONN_STATE_PORT_SECURE, false);
if (changed & BSS_CHANGED_ERP_CTS_PROT)
mt7915_mac_enable_rtscts(dev, vif, info->use_cts_prot);
if (changed & BSS_CHANGED_HT || changed & BSS_CHANGED_ERP_CTS_PROT)
mt7915_mcu_set_protection(phy, vif, info->ht_operation_mode,
info->use_cts_prot);
if (changed & BSS_CHANGED_ERP_SLOT) {
int slottime = 9;
@ -851,8 +852,10 @@ int mt7915_mac_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif,
return mt7915_mcu_add_sta(dev, vif, sta, CONN_STATE_PORT_SECURE, false);
case MT76_STA_EVENT_DISASSOC:
mutex_lock(&dev->mt76.mutex);
for (i = 0; i < ARRAY_SIZE(msta->twt.flow); i++)
mt7915_mac_twt_teardown_flow(dev, msta, i);
mutex_unlock(&dev->mt76.mutex);
mt7915_mcu_add_sta(dev, vif, sta, CONN_STATE_DISCONNECT, false);
msta->wcid.sta_disabled = 1;

View File

@ -1765,8 +1765,10 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
}
out:
ret = mt76_connac_mcu_sta_wed_update(&dev->mt76, skb);
if (ret)
if (ret) {
dev_kfree_skb(skb);
return ret;
}
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_EXT_CMD(STA_REC_UPDATE), true);
@ -3954,6 +3956,68 @@ int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
return ret;
}
int mt7915_mcu_set_protection(struct mt7915_phy *phy, struct ieee80211_vif *vif,
u8 ht_mode, bool use_cts_prot)
{
struct mt7915_dev *dev = phy->dev;
int len = sizeof(struct sta_req_hdr) + sizeof(struct bss_info_prot);
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
struct bss_info_prot *prot;
struct sk_buff *skb;
struct tlv *tlv;
enum {
PROT_NONMEMBER = BIT(1),
PROT_20MHZ = BIT(2),
PROT_NONHT_MIXED = BIT(3),
PROT_LEGACY_ERP = BIT(5),
PROT_NONGF_STA = BIT(7),
};
u32 rts_threshold;
skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
NULL, len);
if (IS_ERR(skb))
return PTR_ERR(skb);
tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_PROTECT_INFO,
sizeof(*prot));
prot = (struct bss_info_prot *)tlv;
switch (ht_mode & IEEE80211_HT_OP_MODE_PROTECTION) {
case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
prot->prot_mode = cpu_to_le32(PROT_NONMEMBER);
break;
case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
prot->prot_mode = cpu_to_le32(PROT_20MHZ);
break;
case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
prot->prot_mode = cpu_to_le32(PROT_NONHT_MIXED);
break;
}
if (ht_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT)
prot->prot_mode |= cpu_to_le32(PROT_NONGF_STA);
if (use_cts_prot)
prot->prot_mode |= cpu_to_le32(PROT_LEGACY_ERP);
/* reuse current RTS setting */
rts_threshold = phy->mt76->hw->wiphy->rts_threshold;
if (rts_threshold == (u32)-1)
prot->rts_len_thres = cpu_to_le32(MT7915_RTS_LEN_THRES);
else
prot->rts_len_thres = cpu_to_le32(rts_threshold);
prot->rts_pkt_thres = 0x2;
prot->he_rts_thres = cpu_to_le16(vif->bss_conf.frame_time_rts_th);
if (!prot->he_rts_thres)
prot->he_rts_thres = cpu_to_le16(DEFAULT_HE_DURATION_RTS_THRES);
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_EXT_CMD(BSS_INFO_UPDATE), true);
}
int mt7915_mcu_update_bss_color(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct cfg80211_he_bss_color *he_bss_color)
{

View File

@ -399,6 +399,17 @@ struct bss_info_inband_discovery {
__le16 prob_rsp_len;
} __packed __aligned(4);
struct bss_info_prot {
__le16 tag;
__le16 len;
__le32 prot_type;
__le32 prot_mode;
__le32 rts_len_thres;
__le16 he_rts_thres;
u8 rts_pkt_thres;
u8 rsv[5];
} __packed;
enum {
BSS_INFO_BCN_CSA,
BSS_INFO_BCN_BCC,

Some files were not shown because too many files have changed in this diff Show More