diff options
author | Bjoern A. Zeeb <bz@FreeBSD.org> | 2024-05-17 01:01:18 +0000 |
---|---|---|
committer | Bjoern A. Zeeb <bz@FreeBSD.org> | 2024-07-29 14:58:09 +0000 |
commit | c6bcce0f5b920c7be597b105cc01ae72f9189747 (patch) | |
tree | 9b88cb0d517478edbfc544289148e335f4d97cdf | |
parent | 474ebd350c8c7ca7414c35973808f078b6006670 (diff) |
iwlwifi: update Intel's iwlwifi/mvm driver.vendor/Linux/iwlwifi/wireless-testing-62e409149b62
This version is based on
https://git.kernel.org/pub/scm/linux/kernel/git/wireless/wireless-testing.git
62e409149b62a285e89018e49b2e115757fb9022 ( tag: wt-2023-07-24 )
and was committed to FreeBSD main as
9af1bba44e1ce9b0296ae56760b564d67ab7a1cf.
-rw-r--r-- | cfg/22000.c | 617 | ||||
-rw-r--r-- | cfg/7000.c | 22 | ||||
-rw-r--r-- | cfg/8000.c | 10 | ||||
-rw-r--r-- | cfg/9000.c | 10 | ||||
-rw-r--r-- | cfg/ax210.c | 301 | ||||
-rw-r--r-- | cfg/bz.c | 183 | ||||
-rw-r--r-- | cfg/sc.c | 166 | ||||
-rw-r--r-- | fw/acpi.c | 172 | ||||
-rw-r--r-- | fw/acpi.h | 41 | ||||
-rw-r--r-- | fw/api/binding.h | 14 | ||||
-rw-r--r-- | fw/api/commands.h | 47 | ||||
-rw-r--r-- | fw/api/config.h | 15 | ||||
-rw-r--r-- | fw/api/context.h | 13 | ||||
-rw-r--r-- | fw/api/d3.h | 98 | ||||
-rw-r--r-- | fw/api/datapath.h | 287 | ||||
-rw-r--r-- | fw/api/dbg-tlv.h | 4 | ||||
-rw-r--r-- | fw/api/debug.h | 100 | ||||
-rw-r--r-- | fw/api/filter.h | 2 | ||||
-rw-r--r-- | fw/api/location.h | 16 | ||||
-rw-r--r-- | fw/api/mac-cfg.h | 465 | ||||
-rw-r--r-- | fw/api/mac.h | 30 | ||||
-rw-r--r-- | fw/api/nvm-reg.h | 46 | ||||
-rw-r--r-- | fw/api/offload.h | 20 | ||||
-rw-r--r-- | fw/api/phy-ctxt.h | 39 | ||||
-rw-r--r-- | fw/api/phy.h | 10 | ||||
-rw-r--r-- | fw/api/power.h | 2 | ||||
-rw-r--r-- | fw/api/rs.h | 39 | ||||
-rw-r--r-- | fw/api/rx.h | 215 | ||||
-rw-r--r-- | fw/api/scan.h | 66 | ||||
-rw-r--r-- | fw/api/sta.h | 2 | ||||
-rw-r--r-- | fw/api/tdls.h | 2 | ||||
-rw-r--r-- | fw/api/time-event.h | 7 | ||||
-rw-r--r-- | fw/api/tx.h | 23 | ||||
-rw-r--r-- | fw/dbg.c | 129 | ||||
-rw-r--r-- | fw/dbg.h | 18 | ||||
-rw-r--r-- | fw/debugfs.c | 39 | ||||
-rw-r--r-- | fw/dump.c | 87 | ||||
-rw-r--r-- | fw/error-dump.h | 19 | ||||
-rw-r--r-- | fw/file.h | 23 | ||||
-rw-r--r-- | fw/img.h | 5 | ||||
-rw-r--r-- | fw/pnvm.c | 252 | ||||
-rw-r--r-- | fw/pnvm.h | 27 | ||||
-rw-r--r-- | fw/rs.c | 6 | ||||
-rw-r--r-- | fw/runtime.h | 11 | ||||
-rw-r--r-- | fw/uefi.c | 331 | ||||
-rw-r--r-- | fw/uefi.h | 58 | ||||
-rw-r--r-- | iwl-config.h | 55 | ||||
-rw-r--r-- | iwl-context-info-gen3.h | 53 | ||||
-rw-r--r-- | iwl-context-info.h | 5 | ||||
-rw-r--r-- | iwl-csr.h | 5 | ||||
-rw-r--r-- | iwl-dbg-tlv.c | 99 | ||||
-rw-r--r-- | iwl-debug.c | 3 | ||||
-rw-r--r-- | iwl-drv.c | 104 | ||||
-rw-r--r-- | iwl-drv.h | 6 | ||||
-rw-r--r-- | iwl-eeprom-parse.h | 8 | ||||
-rw-r--r-- | iwl-fh.h | 4 | ||||
-rw-r--r-- | iwl-io.c | 4 | ||||
-rw-r--r-- | iwl-modparams.h | 1 | ||||
-rw-r--r-- | iwl-nvm-parse.c | 393 | ||||
-rw-r--r-- | iwl-nvm-parse.h | 4 | ||||
-rw-r--r-- | iwl-prph.h | 20 | ||||
-rw-r--r-- | iwl-trans.c | 6 | ||||
-rw-r--r-- | iwl-trans.h | 138 | ||||
-rw-r--r-- | mvm/binding.c | 23 | ||||
-rw-r--r-- | mvm/coex.c | 112 | ||||
-rw-r--r-- | mvm/constants.h | 6 | ||||
-rw-r--r-- | mvm/d3.c | 1150 | ||||
-rw-r--r-- | mvm/debugfs-vif.c | 765 | ||||
-rw-r--r-- | mvm/debugfs.c | 470 | ||||
-rw-r--r-- | mvm/ftm-initiator.c | 98 | ||||
-rw-r--r-- | mvm/ftm-responder.c | 44 | ||||
-rw-r--r-- | mvm/fw.c | 386 | ||||
-rw-r--r-- | mvm/link.c | 285 | ||||
-rw-r--r-- | mvm/mac-ctxt.c | 686 | ||||
-rw-r--r-- | mvm/mac80211.c | 2333 | ||||
-rw-r--r-- | mvm/mld-key.c | 376 | ||||
-rw-r--r-- | mvm/mld-mac.c | 321 | ||||
-rw-r--r-- | mvm/mld-mac80211.c | 1215 | ||||
-rw-r--r-- | mvm/mld-sta.c | 1181 | ||||
-rw-r--r-- | mvm/mvm.h | 669 | ||||
-rw-r--r-- | mvm/nvm.c | 73 | ||||
-rw-r--r-- | mvm/offloading.c | 10 | ||||
-rw-r--r-- | mvm/ops.c | 129 | ||||
-rw-r--r-- | mvm/phy-ctxt.c | 73 | ||||
-rw-r--r-- | mvm/power.c | 104 | ||||
-rw-r--r-- | mvm/ptp.c | 326 | ||||
-rw-r--r-- | mvm/quota.c | 15 | ||||
-rw-r--r-- | mvm/rfi.c | 16 | ||||
-rw-r--r-- | mvm/rs-fw.c | 365 | ||||
-rw-r--r-- | mvm/rx.c | 91 | ||||
-rw-r--r-- | mvm/rxmq.c | 1201 | ||||
-rw-r--r-- | mvm/scan.c | 313 | ||||
-rw-r--r-- | mvm/sf.c | 66 | ||||
-rw-r--r-- | mvm/sta.c | 842 | ||||
-rw-r--r-- | mvm/sta.h | 154 | ||||
-rw-r--r-- | mvm/tdls.c | 12 | ||||
-rw-r--r-- | mvm/time-event.c | 33 | ||||
-rw-r--r-- | mvm/time-sync.c | 173 | ||||
-rw-r--r-- | mvm/time-sync.h | 30 | ||||
-rw-r--r-- | mvm/tt.c | 79 | ||||
-rw-r--r-- | mvm/tx.c | 344 | ||||
-rw-r--r-- | mvm/utils.c | 115 | ||||
-rw-r--r-- | pcie/ctxt-info-gen3.c | 278 | ||||
-rw-r--r-- | pcie/ctxt-info.c | 8 | ||||
-rw-r--r-- | pcie/drv.c | 711 | ||||
-rw-r--r-- | pcie/internal.h | 13 | ||||
-rw-r--r-- | pcie/rx.c | 51 | ||||
-rw-r--r-- | pcie/trans-gen2.c | 89 | ||||
-rw-r--r-- | pcie/trans.c | 221 | ||||
-rw-r--r-- | pcie/tx.c | 7 | ||||
-rw-r--r-- | queue/tx.c | 32 | ||||
-rw-r--r-- | queue/tx.h | 8 |
112 files changed, 16146 insertions, 4953 deletions
diff --git a/cfg/22000.c b/cfg/22000.c index 8ff967edc8f0..d594694206b3 100644 --- a/cfg/22000.c +++ b/cfg/22000.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation */ #include <linux/module.h> #include <linux/stringify.h> @@ -10,10 +10,10 @@ #include "fw/api/txq.h" /* Highest firmware API version supported */ -#define IWL_22000_UCODE_API_MAX 72 +#define IWL_22000_UCODE_API_MAX 77 /* Lowest firmware API version supported */ -#define IWL_22000_UCODE_API_MIN 39 +#define IWL_22000_UCODE_API_MIN 50 /* NVM versions */ #define IWL_22000_NVM_VERSION 0x0a1d @@ -26,111 +26,26 @@ #define IWL_22000_SMEM_OFFSET 0x400000 #define IWL_22000_SMEM_LEN 0xD0000 -#define IWL_QU_B_HR_B_FW_PRE "iwlwifi-Qu-b0-hr-b0-" -#define IWL_QNJ_B_HR_B_FW_PRE "iwlwifi-QuQnj-b0-hr-b0-" -#define IWL_QU_C_HR_B_FW_PRE "iwlwifi-Qu-c0-hr-b0-" -#define IWL_QU_B_JF_B_FW_PRE "iwlwifi-Qu-b0-jf-b0-" -#define IWL_QU_C_JF_B_FW_PRE "iwlwifi-Qu-c0-jf-b0-" -#define IWL_QUZ_A_HR_B_FW_PRE "iwlwifi-QuZ-a0-hr-b0-" -#define IWL_QUZ_A_JF_B_FW_PRE "iwlwifi-QuZ-a0-jf-b0-" -#define IWL_QNJ_B_JF_B_FW_PRE "iwlwifi-QuQnj-b0-jf-b0-" -#define IWL_CC_A_FW_PRE "iwlwifi-cc-a0-" -#define IWL_SO_A_JF_B_FW_PRE "iwlwifi-so-a0-jf-b0-" -#define IWL_SO_A_HR_B_FW_PRE "iwlwifi-so-a0-hr-b0-" -#define IWL_SO_A_GF_A_FW_PRE "iwlwifi-so-a0-gf-a0-" -#define IWL_TY_A_GF_A_FW_PRE "iwlwifi-ty-a0-gf-a0-" -#define IWL_SO_A_GF4_A_FW_PRE "iwlwifi-so-a0-gf4-a0-" -#define IWL_SO_A_MR_A_FW_PRE "iwlwifi-so-a0-mr-a0-" -#define IWL_SNJ_A_GF4_A_FW_PRE "iwlwifi-SoSnj-a0-gf4-a0-" -#define IWL_SNJ_A_GF_A_FW_PRE "iwlwifi-SoSnj-a0-gf-a0-" -#define IWL_SNJ_A_HR_B_FW_PRE "iwlwifi-SoSnj-a0-hr-b0-" -#define IWL_SNJ_A_JF_B_FW_PRE "iwlwifi-SoSnj-a0-jf-b0-" -#define IWL_MA_A_HR_B_FW_PRE "iwlwifi-ma-a0-hr-b0-" -#define IWL_MA_A_GF_A_FW_PRE "iwlwifi-ma-a0-gf-a0-" -#define IWL_MA_A_GF4_A_FW_PRE "iwlwifi-ma-a0-gf4-a0-" -#define IWL_MA_A_MR_A_FW_PRE "iwlwifi-ma-a0-mr-a0-" -#define IWL_MA_A_FM_A_FW_PRE "iwlwifi-ma-a0-fm-a0-" -#define IWL_SNJ_A_MR_A_FW_PRE "iwlwifi-SoSnj-a0-mr-a0-" -#define IWL_BZ_A_HR_B_FW_PRE "iwlwifi-bz-a0-hr-b0-" -#define IWL_BZ_A_GF_A_FW_PRE "iwlwifi-bz-a0-gf-a0-" -#define IWL_BZ_A_GF4_A_FW_PRE "iwlwifi-bz-a0-gf4-a0-" -#define IWL_BZ_A_MR_A_FW_PRE "iwlwifi-bz-a0-mr-a0-" -#define IWL_BZ_A_FM_A_FW_PRE "iwlwifi-bz-a0-fm-a0-" -#define IWL_GL_A_FM_A_FW_PRE "iwlwifi-gl-a0-fm-a0-" -#define IWL_BZ_Z_GF_A_FW_PRE "iwlwifi-bz-z0-gf-a0-" -#define IWL_BNJ_A_FM_A_FW_PRE "iwlwifi-BzBnj-a0-fm-a0-" -#define IWL_BNJ_A_FM4_A_FW_PRE "iwlwifi-BzBnj-a0-fm4-a0-" -#define IWL_BNJ_A_GF_A_FW_PRE "iwlwifi-BzBnj-a0-gf-a0-" -#define IWL_BNJ_A_GF4_A_FW_PRE "iwlwifi-BzBnj-a0-gf4-a0-" -#define IWL_BNJ_A_HR_B_FW_PRE "iwlwifi-BzBnj-a0-hr-b0-" - +#define IWL_QU_B_HR_B_FW_PRE "iwlwifi-Qu-b0-hr-b0" +#define IWL_QU_C_HR_B_FW_PRE "iwlwifi-Qu-c0-hr-b0" +#define IWL_QU_B_JF_B_FW_PRE "iwlwifi-Qu-b0-jf-b0" +#define IWL_QU_C_JF_B_FW_PRE "iwlwifi-Qu-c0-jf-b0" +#define IWL_QUZ_A_HR_B_FW_PRE "iwlwifi-QuZ-a0-hr-b0" +#define IWL_QUZ_A_JF_B_FW_PRE "iwlwifi-QuZ-a0-jf-b0" +#define IWL_CC_A_FW_PRE "iwlwifi-cc-a0" #define IWL_QU_B_HR_B_MODULE_FIRMWARE(api) \ - IWL_QU_B_HR_B_FW_PRE __stringify(api) ".ucode" -#define IWL_QNJ_B_HR_B_MODULE_FIRMWARE(api) \ - IWL_QNJ_B_HR_B_FW_PRE __stringify(api) ".ucode" + IWL_QU_B_HR_B_FW_PRE "-" __stringify(api) ".ucode" #define IWL_QUZ_A_HR_B_MODULE_FIRMWARE(api) \ - IWL_QUZ_A_HR_B_FW_PRE __stringify(api) ".ucode" + IWL_QUZ_A_HR_B_FW_PRE "-" __stringify(api) ".ucode" #define IWL_QUZ_A_JF_B_MODULE_FIRMWARE(api) \ - IWL_QUZ_A_JF_B_FW_PRE __stringify(api) ".ucode" + IWL_QUZ_A_JF_B_FW_PRE "-" __stringify(api) ".ucode" #define IWL_QU_C_HR_B_MODULE_FIRMWARE(api) \ - IWL_QU_C_HR_B_FW_PRE __stringify(api) ".ucode" + IWL_QU_C_HR_B_FW_PRE "-" __stringify(api) ".ucode" #define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \ - IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode" -#define IWL_QNJ_B_JF_B_MODULE_FIRMWARE(api) \ - IWL_QNJ_B_JF_B_FW_PRE __stringify(api) ".ucode" + IWL_QU_B_JF_B_FW_PRE "-" __stringify(api) ".ucode" #define IWL_CC_A_MODULE_FIRMWARE(api) \ - IWL_CC_A_FW_PRE __stringify(api) ".ucode" -#define IWL_SO_A_JF_B_MODULE_FIRMWARE(api) \ - IWL_SO_A_JF_B_FW_PRE __stringify(api) ".ucode" -#define IWL_SO_A_HR_B_MODULE_FIRMWARE(api) \ - IWL_SO_A_HR_B_FW_PRE __stringify(api) ".ucode" -#define IWL_SO_A_GF_A_MODULE_FIRMWARE(api) \ - IWL_SO_A_GF_A_FW_PRE __stringify(api) ".ucode" -#define IWL_TY_A_GF_A_MODULE_FIRMWARE(api) \ - IWL_TY_A_GF_A_FW_PRE __stringify(api) ".ucode" -#define IWL_SNJ_A_GF4_A_MODULE_FIRMWARE(api) \ - IWL_SNJ_A_GF4_A_FW_PRE __stringify(api) ".ucode" -#define IWL_SNJ_A_GF_A_MODULE_FIRMWARE(api) \ - IWL_SNJ_A_GF_A_FW_PRE __stringify(api) ".ucode" -#define IWL_SNJ_A_HR_B_MODULE_FIRMWARE(api) \ - IWL_SNJ_A_HR_B_FW_PRE __stringify(api) ".ucode" -#define IWL_SNJ_A_JF_B_MODULE_FIRMWARE(api) \ - IWL_SNJ_A_JF_B_FW_PRE __stringify(api) ".ucode" -#define IWL_MA_A_HR_B_FW_MODULE_FIRMWARE(api) \ - IWL_MA_A_HR_B_FW_PRE __stringify(api) ".ucode" -#define IWL_MA_A_GF_A_FW_MODULE_FIRMWARE(api) \ - IWL_MA_A_GF_A_FW_PRE __stringify(api) ".ucode" -#define IWL_MA_A_GF4_A_FW_MODULE_FIRMWARE(api) \ - IWL_MA_A_GF4_A_FW_PRE __stringify(api) ".ucode" -#define IWL_MA_A_MR_A_FW_MODULE_FIRMWARE(api) \ - IWL_MA_A_MR_A_FW_PRE __stringify(api) ".ucode" -#define IWL_MA_A_FM_A_FW_MODULE_FIRMWARE(api) \ - IWL_MA_A_FM_A_FW_PRE __stringify(api) ".ucode" -#define IWL_SNJ_A_MR_A_MODULE_FIRMWARE(api) \ - IWL_SNJ_A_MR_A_FW_PRE __stringify(api) ".ucode" -#define IWL_BZ_A_HR_B_MODULE_FIRMWARE(api) \ - IWL_BZ_A_HR_B_FW_PRE __stringify(api) ".ucode" -#define IWL_BZ_A_GF_A_MODULE_FIRMWARE(api) \ - IWL_BZ_A_GF_A_FW_PRE __stringify(api) ".ucode" -#define IWL_BZ_A_GF4_A_MODULE_FIRMWARE(api) \ - IWL_BZ_A_GF4_A_FW_PRE __stringify(api) ".ucode" -#define IWL_BZ_A_MR_A_MODULE_FIRMWARE(api) \ - IWL_BZ_A_MR_A_FW_PRE __stringify(api) ".ucode" -#define IWL_BZ_A_FM_A_MODULE_FIRMWARE(api) \ - IWL_BZ_A_FM_A_FW_PRE __stringify(api) ".ucode" -#define IWL_GL_A_FM_A_MODULE_FIRMWARE(api) \ - IWL_GL_A_FM_A_FW_PRE __stringify(api) ".ucode" -#define IWL_BNJ_A_FM_A_MODULE_FIRMWARE(api) \ - IWL_BNJ_A_FM_A_FW_PRE __stringify(api) ".ucode" -#define IWL_BNJ_A_FM4_A_MODULE_FIRMWARE(api) \ - IWL_BNJ_A_FM4_A_FW_PRE __stringify(api) ".ucode" -#define IWL_BNJ_A_GF_A_MODULE_FIRMWARE(api) \ - IWL_BNJ_A_GF_A_FW_PRE __stringify(api) ".ucode" -#define IWL_BNJ_A_GF4_A_MODULE_FIRMWARE(api) \ - IWL_BNJ_A_GF4_A_FW_PRE __stringify(api) ".ucode" -#define IWL_BNJ_A_HR_B_MODULE_FIRMWARE(api) \ - IWL_BNJ_A_HR_B_FW_PRE __stringify(api) ".ucode" + IWL_CC_A_FW_PRE "-" __stringify(api) ".ucode" static const struct iwl_base_params iwl_22000_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE_32K, @@ -144,19 +59,7 @@ static const struct iwl_base_params iwl_22000_base_params = { .pcie_l1_allowed = true, }; -static const struct iwl_base_params iwl_ax210_base_params = { - .eeprom_size = OTP_LOW_IMAGE_SIZE_32K, - .num_of_queues = 512, - .max_tfd_queue_size = 65536, - .shadow_ram_support = true, - .led_compensation = 57, - .wd_timeout = IWL_LONG_WD_TIMEOUT, - .max_event_log_size = 512, - .shadow_reg_enable = true, - .pcie_l1_allowed = true, -}; - -static const struct iwl_ht_params iwl_22000_ht_params = { +const struct iwl_ht_params iwl_22000_ht_params = { .stbc = true, .ldpc = true, .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ) | @@ -164,7 +67,6 @@ static const struct iwl_ht_params iwl_22000_ht_params = { }; #define IWL_DEVICE_22000_COMMON \ - .ucode_api_max = IWL_22000_UCODE_API_MAX, \ .ucode_api_min = IWL_22000_UCODE_API_MIN, \ .led_mode = IWL_LED_RF_STATE, \ .nvm_hw_section_num = 10, \ @@ -182,7 +84,6 @@ static const struct iwl_ht_params iwl_22000_ht_params = { .mac_addr_from_csr = 0x380, \ .ht_params = &iwl_22000_ht_params, \ .nvm_ver = IWL_22000_NVM_VERSION, \ - .trans.use_tfh = true, \ .trans.rf_id = true, \ .trans.gen2 = true, \ .nvm_type = IWL_NVM_EXT, \ @@ -203,6 +104,7 @@ static const struct iwl_ht_params iwl_22000_ht_params = { #define IWL_DEVICE_22500 \ IWL_DEVICE_22000_COMMON, \ + .ucode_api_max = IWL_22000_UCODE_API_MAX, \ .trans.device_family = IWL_DEVICE_FAMILY_22000, \ .trans.base_params = &iwl_22000_base_params, \ .gp2_reg_addr = 0xa02c68, \ @@ -217,108 +119,8 @@ static const struct iwl_ht_params iwl_22000_ht_params = { }, \ } -#define IWL_DEVICE_AX210 \ - IWL_DEVICE_22000_COMMON, \ - .trans.umac_prph_offset = 0x300000, \ - .trans.device_family = IWL_DEVICE_FAMILY_AX210, \ - .trans.base_params = &iwl_ax210_base_params, \ - .min_txq_size = 128, \ - .gp2_reg_addr = 0xd02c68, \ - .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_HE, \ - .mon_dram_regs = { \ - .write_ptr = { \ - .addr = DBGC_CUR_DBGBUF_STATUS, \ - .mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK, \ - }, \ - .cycle_cnt = { \ - .addr = DBGC_DBGBUF_WRAP_AROUND, \ - .mask = 0xffffffff, \ - }, \ - .cur_frag = { \ - .addr = DBGC_CUR_DBGBUF_STATUS, \ - .mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK, \ - }, \ - } - -#define IWL_DEVICE_BZ_COMMON \ - .ucode_api_max = IWL_22000_UCODE_API_MAX, \ - .ucode_api_min = IWL_22000_UCODE_API_MIN, \ - .led_mode = IWL_LED_RF_STATE, \ - .nvm_hw_section_num = 10, \ - .non_shared_ant = ANT_B, \ - .dccm_offset = IWL_22000_DCCM_OFFSET, \ - .dccm_len = IWL_22000_DCCM_LEN, \ - .dccm2_offset = IWL_22000_DCCM2_OFFSET, \ - .dccm2_len = IWL_22000_DCCM2_LEN, \ - .smem_offset = IWL_22000_SMEM_OFFSET, \ - .smem_len = IWL_22000_SMEM_LEN, \ - .features = IWL_TX_CSUM_NETIF_FLAGS_BZ | NETIF_F_RXCSUM, \ - .apmg_not_supported = true, \ - .trans.mq_rx_supported = true, \ - .vht_mu_mimo_supported = true, \ - .mac_addr_from_csr = 0x30, \ - .ht_params = &iwl_22000_ht_params, \ - .nvm_ver = IWL_22000_NVM_VERSION, \ - .trans.use_tfh = true, \ - .trans.rf_id = true, \ - .trans.gen2 = true, \ - .nvm_type = IWL_NVM_EXT, \ - .dbgc_supported = true, \ - .min_umac_error_event_table = 0x400000, \ - .d3_debug_data_base_addr = 0x401000, \ - .d3_debug_data_length = 60 * 1024, \ - .mon_smem_regs = { \ - .write_ptr = { \ - .addr = LDBG_M2S_BUF_WPTR, \ - .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, \ - }, \ - .cycle_cnt = { \ - .addr = LDBG_M2S_BUF_WRAP_CNT, \ - .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \ - }, \ - } - -#define IWL_DEVICE_BZ \ - IWL_DEVICE_BZ_COMMON, \ - .trans.umac_prph_offset = 0x300000, \ - .trans.device_family = IWL_DEVICE_FAMILY_BZ, \ - .trans.base_params = &iwl_ax210_base_params, \ - .min_txq_size = 128, \ - .gp2_reg_addr = 0xd02c68, \ - .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_EHT, \ - .mon_dram_regs = { \ - .write_ptr = { \ - .addr = DBGC_CUR_DBGBUF_STATUS, \ - .mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK, \ - }, \ - .cycle_cnt = { \ - .addr = DBGC_DBGBUF_WRAP_AROUND, \ - .mask = 0xffffffff, \ - }, \ - .cur_frag = { \ - .addr = DBGC_CUR_DBGBUF_STATUS, \ - .mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK, \ - }, \ - }, \ - .mon_dbgi_regs = { \ - .write_ptr = { \ - .addr = DBGI_SRAM_FIFO_POINTERS, \ - .mask = DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK, \ - }, \ - } - -const struct iwl_cfg_trans_params iwl_qnj_trans_cfg = { - .mq_rx_supported = true, - .use_tfh = true, - .rf_id = true, - .gen2 = true, - .device_family = IWL_DEVICE_FAMILY_22000, - .base_params = &iwl_22000_base_params, -}; - const struct iwl_cfg_trans_params iwl_qu_trans_cfg = { .mq_rx_supported = true, - .use_tfh = true, .rf_id = true, .gen2 = true, .device_family = IWL_DEVICE_FAMILY_22000, @@ -330,7 +132,6 @@ const struct iwl_cfg_trans_params iwl_qu_trans_cfg = { const struct iwl_cfg_trans_params iwl_qu_medium_latency_trans_cfg = { .mq_rx_supported = true, - .use_tfh = true, .rf_id = true, .gen2 = true, .device_family = IWL_DEVICE_FAMILY_22000, @@ -342,7 +143,6 @@ const struct iwl_cfg_trans_params iwl_qu_medium_latency_trans_cfg = { const struct iwl_cfg_trans_params iwl_qu_long_latency_trans_cfg = { .mq_rx_supported = true, - .use_tfh = true, .rf_id = true, .gen2 = true, .device_family = IWL_DEVICE_FAMILY_22000, @@ -353,59 +153,6 @@ const struct iwl_cfg_trans_params iwl_qu_long_latency_trans_cfg = { .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US, }; -const struct iwl_cfg_trans_params iwl_snj_trans_cfg = { - .mq_rx_supported = true, - .use_tfh = true, - .rf_id = true, - .gen2 = true, - .device_family = IWL_DEVICE_FAMILY_AX210, - .base_params = &iwl_ax210_base_params, - .umac_prph_offset = 0x300000, -}; - -const struct iwl_cfg_trans_params iwl_so_trans_cfg = { - .mq_rx_supported = true, - .use_tfh = true, - .rf_id = true, - .gen2 = true, - .device_family = IWL_DEVICE_FAMILY_AX210, - .base_params = &iwl_ax210_base_params, - .umac_prph_offset = 0x300000, - .integrated = true, - /* TODO: the following values need to be checked */ - .xtal_latency = 500, - .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_200US, -}; - -const struct iwl_cfg_trans_params iwl_so_long_latency_trans_cfg = { - .mq_rx_supported = true, - .use_tfh = true, - .rf_id = true, - .gen2 = true, - .device_family = IWL_DEVICE_FAMILY_AX210, - .base_params = &iwl_ax210_base_params, - .umac_prph_offset = 0x300000, - .integrated = true, - .low_latency_xtal = true, - .xtal_latency = 12000, - .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US, -}; - -const struct iwl_cfg_trans_params iwl_so_long_latency_imr_trans_cfg = { - .mq_rx_supported = true, - .use_tfh = true, - .rf_id = true, - .gen2 = true, - .device_family = IWL_DEVICE_FAMILY_AX210, - .base_params = &iwl_ax210_base_params, - .umac_prph_offset = 0x300000, - .integrated = true, - .low_latency_xtal = true, - .xtal_latency = 12000, - .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US, - .imr_enabled = true, -}; - /* * If the device doesn't support HE, no need to have that many buffers. * 22000 devices can split multiple frames into a single RB, so fewer are @@ -415,7 +162,6 @@ const struct iwl_cfg_trans_params iwl_so_long_latency_imr_trans_cfg = { */ #define IWL_NUM_RBDS_NON_HE 512 #define IWL_NUM_RBDS_22000_HE 2048 -#define IWL_NUM_RBDS_AX210_HE 4096 /* * All JF radio modules are part of the 9000 series, but the MAC part @@ -446,63 +192,20 @@ const struct iwl_cfg iwl9560_quz_a0_jf_b0_cfg = { .num_rbds = IWL_NUM_RBDS_NON_HE, }; -const struct iwl_cfg iwl9560_qnj_b0_jf_b0_cfg = { - .fw_name_pre = IWL_QNJ_B_JF_B_FW_PRE, - IWL_DEVICE_22500, - /* - * This device doesn't support receiving BlockAck with a large bitmap - * so we need to restrict the size of transmitted aggregation to the - * HT size; mac80211 would otherwise pick the HE max (256) by default. - */ - .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, - .num_rbds = IWL_NUM_RBDS_NON_HE, -}; - const struct iwl_cfg_trans_params iwl_ax200_trans_cfg = { .device_family = IWL_DEVICE_FAMILY_22000, .base_params = &iwl_22000_base_params, .mq_rx_supported = true, - .use_tfh = true, .rf_id = true, .gen2 = true, .bisr_workaround = 1, }; -const struct iwl_cfg_trans_params iwl_ma_trans_cfg = { - .device_family = IWL_DEVICE_FAMILY_AX210, - .base_params = &iwl_ax210_base_params, - .mq_rx_supported = true, - .use_tfh = true, - .rf_id = true, - .gen2 = true, - .integrated = true, - .umac_prph_offset = 0x300000 -}; - -const struct iwl_cfg_trans_params iwl_bz_trans_cfg = { - .device_family = IWL_DEVICE_FAMILY_BZ, - .base_params = &iwl_ax210_base_params, - .mq_rx_supported = true, - .use_tfh = true, - .rf_id = true, - .gen2 = true, - .integrated = true, - .umac_prph_offset = 0x300000, - .xtal_latency = 12000, - .low_latency_xtal = true, - .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US, -}; - const char iwl_ax101_name[] = "Intel(R) Wi-Fi 6 AX101"; const char iwl_ax200_name[] = "Intel(R) Wi-Fi 6 AX200 160MHz"; const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz"; const char iwl_ax203_name[] = "Intel(R) Wi-Fi 6 AX203"; const char iwl_ax204_name[] = "Intel(R) Wi-Fi 6 AX204 160MHz"; -const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6E AX211 160MHz"; -const char iwl_ax221_name[] = "Intel(R) Wi-Fi 6E AX221 160MHz"; -const char iwl_ax231_name[] = "Intel(R) Wi-Fi 6E AX231 160MHz"; -const char iwl_ax411_name[] = "Intel(R) Wi-Fi 6E AX411 160MHz"; -const char iwl_bz_name[] = "Intel(R) TBD Bz device"; const char iwl_ax200_killer_1650w_name[] = "Killer(R) Wi-Fi 6 AX1650w 160MHz Wireless Network Adapter (200D2W)"; @@ -512,18 +215,6 @@ const char iwl_ax201_killer_1650s_name[] = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)"; const char iwl_ax201_killer_1650i_name[] = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)"; -const char iwl_ax210_killer_1675w_name[] = - "Killer(R) Wi-Fi 6E AX1675w 160MHz Wireless Network Adapter (210D2W)"; -const char iwl_ax210_killer_1675x_name[] = - "Killer(R) Wi-Fi 6E AX1675x 160MHz Wireless Network Adapter (210NGW)"; -const char iwl_ax211_killer_1675s_name[] = - "Killer(R) Wi-Fi 6E AX1675s 160MHz Wireless Network Adapter (211NGW)"; -const char iwl_ax211_killer_1675i_name[] = - "Killer(R) Wi-Fi 6E AX1675i 160MHz Wireless Network Adapter (211NGW)"; -const char iwl_ax411_killer_1690s_name[] = - "Killer(R) Wi-Fi 6E AX1690s 160MHz Wireless Network Adapter (411D2W)"; -const char iwl_ax411_killer_1690i_name[] = - "Killer(R) Wi-Fi 6E AX1690i 160MHz Wireless Network Adapter (411NGW)"; const struct iwl_cfg iwl_qu_b0_hr1_b0 = { .fw_name_pre = IWL_QU_B_HR_B_FW_PRE, @@ -717,168 +408,6 @@ const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0 = { .num_rbds = IWL_NUM_RBDS_22000_HE, }; -const struct iwl_cfg iwl_qnj_b0_hr_b0_cfg = { - .fw_name_pre = IWL_QNJ_B_HR_B_FW_PRE, - IWL_DEVICE_22500, - /* - * This device doesn't support receiving BlockAck with a large bitmap - * so we need to restrict the size of transmitted aggregation to the - * HT size; mac80211 would otherwise pick the HE max (256) by default. - */ - .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, - .num_rbds = IWL_NUM_RBDS_22000_HE, -}; - -const struct iwl_cfg iwlax210_2ax_cfg_so_jf_b0 = { - .name = "Intel(R) Wireless-AC 9560 160MHz", - .fw_name_pre = IWL_SO_A_JF_B_FW_PRE, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_NON_HE, -}; - -const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0 = { - .name = iwl_ax211_name, - .fw_name_pre = IWL_SO_A_GF_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0_long = { - .name = iwl_ax211_name, - .fw_name_pre = IWL_SO_A_GF_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, - .trans.xtal_latency = 12000, - .trans.low_latency_xtal = true, -}; - -const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0 = { - .name = "Intel(R) Wi-Fi 6 AX210 160MHz", - .fw_name_pre = IWL_TY_A_GF_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0 = { - .name = iwl_ax411_name, - .fw_name_pre = IWL_SO_A_GF4_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0_long = { - .name = iwl_ax411_name, - .fw_name_pre = IWL_SO_A_GF4_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, - .trans.xtal_latency = 12000, - .trans.low_latency_xtal = true, -}; - -const struct iwl_cfg iwlax411_2ax_cfg_sosnj_gf4_a0 = { - .name = iwl_ax411_name, - .fw_name_pre = IWL_SNJ_A_GF4_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwlax211_cfg_snj_gf_a0 = { - .name = iwl_ax211_name, - .fw_name_pre = IWL_SNJ_A_GF_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_snj_hr_b0 = { - .fw_name_pre = IWL_SNJ_A_HR_B_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_snj_a0_jf_b0 = { - .fw_name_pre = IWL_SNJ_A_JF_B_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_ma_a0_hr_b0 = { - .fw_name_pre = IWL_MA_A_HR_B_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_ma_a0_gf_a0 = { - .fw_name_pre = IWL_MA_A_GF_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_ma_a0_gf4_a0 = { - .fw_name_pre = IWL_MA_A_GF4_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_ma_a0_mr_a0 = { - .fw_name_pre = IWL_MA_A_MR_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_ma_a0_ms_a0 = { - .fw_name_pre = IWL_MA_A_MR_A_FW_PRE, - .uhb_supported = false, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_so_a0_ms_a0 = { - .fw_name_pre = IWL_SO_A_MR_A_FW_PRE, - .uhb_supported = false, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_ma_a0_fm_a0 = { - .fw_name_pre = IWL_MA_A_FM_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_snj_a0_mr_a0 = { - .fw_name_pre = IWL_SNJ_A_MR_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_snj_a0_ms_a0 = { - .fw_name_pre = IWL_SNJ_A_MR_A_FW_PRE, - .uhb_supported = false, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_so_a0_hr_a0 = { - .fw_name_pre = IWL_SO_A_HR_B_FW_PRE, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - const struct iwl_cfg iwl_cfg_quz_a0_hr_b0 = { .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE, IWL_DEVICE_22500, @@ -891,119 +420,9 @@ const struct iwl_cfg iwl_cfg_quz_a0_hr_b0 = { .num_rbds = IWL_NUM_RBDS_22000_HE, }; -const struct iwl_cfg iwl_cfg_bz_a0_hr_b0 = { - .fw_name_pre = IWL_BZ_A_HR_B_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_BZ, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_bz_a0_gf_a0 = { - .fw_name_pre = IWL_BZ_A_GF_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_BZ, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_bz_a0_gf4_a0 = { - .fw_name_pre = IWL_BZ_A_GF4_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_BZ, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_bz_a0_mr_a0 = { - .fw_name_pre = IWL_BZ_A_MR_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_BZ, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_bz_a0_fm_a0 = { - .fw_name_pre = IWL_BZ_A_FM_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_BZ, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_gl_a0_fm_a0 = { - .fw_name_pre = IWL_GL_A_FM_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_BZ, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_bz_z0_gf_a0 = { - .fw_name_pre = IWL_BZ_Z_GF_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_BZ, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_bnj_a0_fm_a0 = { - .fw_name_pre = IWL_BNJ_A_FM_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_BZ, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_bnj_a0_fm4_a0 = { - .fw_name_pre = IWL_BNJ_A_FM4_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_BZ, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_bnj_a0_gf_a0 = { - .fw_name_pre = IWL_BNJ_A_GF_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_BZ, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_bnj_a0_gf4_a0 = { - .fw_name_pre = IWL_BNJ_A_GF4_A_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_BZ, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - -const struct iwl_cfg iwl_cfg_bnj_a0_hr_b0 = { - .fw_name_pre = IWL_BNJ_A_HR_B_FW_PRE, - .uhb_supported = true, - IWL_DEVICE_BZ, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; MODULE_FIRMWARE(IWL_QU_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_QNJ_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QU_C_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QUZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QUZ_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_QNJ_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_CC_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_SO_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_SO_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_SO_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_TY_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_SNJ_A_GF4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_SNJ_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_SNJ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_SNJ_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_MA_A_HR_B_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_MA_A_GF_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_MA_A_GF4_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_MA_A_MR_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_MA_A_FM_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_SNJ_A_MR_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_BZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_BZ_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_BZ_A_GF4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_BZ_A_MR_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_BZ_A_FM_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_GL_A_FM_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_BNJ_A_FM_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_BNJ_A_FM4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_BNJ_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_BNJ_A_GF4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_BNJ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); diff --git a/cfg/7000.c b/cfg/7000.c index b24dc5523a52..4e2afdedf4c6 100644 --- a/cfg/7000.c +++ b/cfg/7000.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2012-2014, 2018-2020, 2023 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015 Intel Deutschland GmbH */ @@ -34,20 +34,20 @@ #define IWL3160_DCCM_LEN 0x10000 #define IWL7265_DCCM_LEN 0x17A00 -#define IWL7260_FW_PRE "iwlwifi-7260-" -#define IWL7260_MODULE_FIRMWARE(api) IWL7260_FW_PRE __stringify(api) ".ucode" +#define IWL7260_FW_PRE "iwlwifi-7260" +#define IWL7260_MODULE_FIRMWARE(api) IWL7260_FW_PRE "-" __stringify(api) ".ucode" -#define IWL3160_FW_PRE "iwlwifi-3160-" -#define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode" +#define IWL3160_FW_PRE "iwlwifi-3160" +#define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE "-" __stringify(api) ".ucode" -#define IWL3168_FW_PRE "iwlwifi-3168-" -#define IWL3168_MODULE_FIRMWARE(api) IWL3168_FW_PRE __stringify(api) ".ucode" +#define IWL3168_FW_PRE "iwlwifi-3168" +#define IWL3168_MODULE_FIRMWARE(api) IWL3168_FW_PRE "-" __stringify(api) ".ucode" -#define IWL7265_FW_PRE "iwlwifi-7265-" -#define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode" +#define IWL7265_FW_PRE "iwlwifi-7265" +#define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE "-" __stringify(api) ".ucode" -#define IWL7265D_FW_PRE "iwlwifi-7265D-" -#define IWL7265D_MODULE_FIRMWARE(api) IWL7265D_FW_PRE __stringify(api) ".ucode" +#define IWL7265D_FW_PRE "iwlwifi-7265D" +#define IWL7265D_MODULE_FIRMWARE(api) IWL7265D_FW_PRE "-" __stringify(api) ".ucode" static const struct iwl_base_params iwl7000_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE_16K, diff --git a/cfg/8000.c b/cfg/8000.c index a6454287d506..d09cf8d7dc01 100644 --- a/cfg/8000.c +++ b/cfg/8000.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2014, 2018-2020 Intel Corporation + * Copyright (C) 2014, 2018-2020, 2023 Intel Corporation * Copyright (C) 2014-2015 Intel Mobile Communications GmbH * Copyright (C) 2016 Intel Deutschland GmbH */ @@ -27,13 +27,13 @@ #define IWL8260_SMEM_OFFSET 0x400000 #define IWL8260_SMEM_LEN 0x68000 -#define IWL8000_FW_PRE "iwlwifi-8000C-" +#define IWL8000_FW_PRE "iwlwifi-8000C" #define IWL8000_MODULE_FIRMWARE(api) \ - IWL8000_FW_PRE __stringify(api) ".ucode" + IWL8000_FW_PRE "-" __stringify(api) ".ucode" -#define IWL8265_FW_PRE "iwlwifi-8265-" +#define IWL8265_FW_PRE "iwlwifi-8265" #define IWL8265_MODULE_FIRMWARE(api) \ - IWL8265_FW_PRE __stringify(api) ".ucode" + IWL8265_FW_PRE "-" __stringify(api) ".ucode" #define DEFAULT_NVM_FILE_FAMILY_8000C "nvmData-8000C" diff --git a/cfg/9000.c b/cfg/9000.c index 7a7ca06d46c1..0130d9a9b78b 100644 --- a/cfg/9000.c +++ b/cfg/9000.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2021 Intel Corporation + * Copyright (C) 2018-2021, 2023 Intel Corporation */ #include <linux/module.h> #include <linux/stringify.h> @@ -26,12 +26,12 @@ #define IWL9000_SMEM_OFFSET 0x400000 #define IWL9000_SMEM_LEN 0x68000 -#define IWL9000_FW_PRE "iwlwifi-9000-pu-b0-jf-b0-" -#define IWL9260_FW_PRE "iwlwifi-9260-th-b0-jf-b0-" +#define IWL9000_FW_PRE "iwlwifi-9000-pu-b0-jf-b0" +#define IWL9260_FW_PRE "iwlwifi-9260-th-b0-jf-b0" #define IWL9000_MODULE_FIRMWARE(api) \ - IWL9000_FW_PRE __stringify(api) ".ucode" + IWL9000_FW_PRE "-" __stringify(api) ".ucode" #define IWL9260_MODULE_FIRMWARE(api) \ - IWL9260_FW_PRE __stringify(api) ".ucode" + IWL9260_FW_PRE "-" __stringify(api) ".ucode" static const struct iwl_base_params iwl9000_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE_32K, diff --git a/cfg/ax210.c b/cfg/ax210.c new file mode 100644 index 000000000000..8d5f9dce71d5 --- /dev/null +++ b/cfg/ax210.c @@ -0,0 +1,301 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2015-2017 Intel Deutschland GmbH + * Copyright (C) 2018-2023 Intel Corporation + */ +#include <linux/module.h> +#include <linux/stringify.h> +#include "iwl-config.h" +#include "iwl-prph.h" +#include "fw/api/txq.h" + +/* Highest firmware API version supported */ +#define IWL_AX210_UCODE_API_MAX 83 + +/* Lowest firmware API version supported */ +#define IWL_AX210_UCODE_API_MIN 59 + +/* NVM versions */ +#define IWL_AX210_NVM_VERSION 0x0a1d + +/* Memory offsets and lengths */ +#define IWL_AX210_DCCM_OFFSET 0x800000 /* LMAC1 */ +#define IWL_AX210_DCCM_LEN 0x10000 /* LMAC1 */ +#define IWL_AX210_DCCM2_OFFSET 0x880000 +#define IWL_AX210_DCCM2_LEN 0x8000 +#define IWL_AX210_SMEM_OFFSET 0x400000 +#define IWL_AX210_SMEM_LEN 0xD0000 + +#define IWL_SO_A_JF_B_FW_PRE "iwlwifi-so-a0-jf-b0" +#define IWL_SO_A_HR_B_FW_PRE "iwlwifi-so-a0-hr-b0" +#define IWL_SO_A_GF_A_FW_PRE "iwlwifi-so-a0-gf-a0" +#define IWL_TY_A_GF_A_FW_PRE "iwlwifi-ty-a0-gf-a0" +#define IWL_SO_A_GF4_A_FW_PRE "iwlwifi-so-a0-gf4-a0" +#define IWL_SO_A_MR_A_FW_PRE "iwlwifi-so-a0-mr-a0" +#define IWL_MA_A_HR_B_FW_PRE "iwlwifi-ma-a0-hr-b0" +#define IWL_MA_A_GF_A_FW_PRE "iwlwifi-ma-a0-gf-a0" +#define IWL_MA_A_GF4_A_FW_PRE "iwlwifi-ma-a0-gf4-a0" +#define IWL_MA_A_MR_A_FW_PRE "iwlwifi-ma-a0-mr-a0" +#define IWL_MA_B_HR_B_FW_PRE "iwlwifi-ma-b0-hr-b0" +#define IWL_MA_B_GF_A_FW_PRE "iwlwifi-ma-b0-gf-a0" +#define IWL_MA_B_GF4_A_FW_PRE "iwlwifi-ma-b0-gf4-a0" +#define IWL_MA_B_MR_A_FW_PRE "iwlwifi-ma-b0-mr-a0" + +#define IWL_SO_A_JF_B_MODULE_FIRMWARE(api) \ + IWL_SO_A_JF_B_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_SO_A_HR_B_MODULE_FIRMWARE(api) \ + IWL_SO_A_HR_B_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_SO_A_GF_A_MODULE_FIRMWARE(api) \ + IWL_SO_A_GF_A_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_TY_A_GF_A_MODULE_FIRMWARE(api) \ + IWL_TY_A_GF_A_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_MA_A_HR_B_FW_MODULE_FIRMWARE(api) \ + IWL_MA_A_HR_B_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_MA_A_GF_A_FW_MODULE_FIRMWARE(api) \ + IWL_MA_A_GF_A_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_MA_A_GF4_A_FW_MODULE_FIRMWARE(api) \ + IWL_MA_A_GF4_A_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_MA_A_MR_A_FW_MODULE_FIRMWARE(api) \ + IWL_MA_A_MR_A_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_MA_B_HR_B_FW_MODULE_FIRMWARE(api) \ + IWL_MA_B_HR_B_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_MA_B_GF_A_FW_MODULE_FIRMWARE(api) \ + IWL_MA_B_GF_A_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_MA_B_GF4_A_FW_MODULE_FIRMWARE(api) \ + IWL_MA_B_GF4_A_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_MA_B_MR_A_FW_MODULE_FIRMWARE(api) \ + IWL_MA_B_MR_A_FW_PRE "-" __stringify(api) ".ucode" + +static const struct iwl_base_params iwl_ax210_base_params = { + .eeprom_size = OTP_LOW_IMAGE_SIZE_32K, + .num_of_queues = 512, + .max_tfd_queue_size = 65536, + .shadow_ram_support = true, + .led_compensation = 57, + .wd_timeout = IWL_LONG_WD_TIMEOUT, + .max_event_log_size = 512, + .shadow_reg_enable = true, + .pcie_l1_allowed = true, +}; + +#define IWL_DEVICE_AX210_COMMON \ + .ucode_api_min = IWL_AX210_UCODE_API_MIN, \ + .led_mode = IWL_LED_RF_STATE, \ + .nvm_hw_section_num = 10, \ + .non_shared_ant = ANT_B, \ + .dccm_offset = IWL_AX210_DCCM_OFFSET, \ + .dccm_len = IWL_AX210_DCCM_LEN, \ + .dccm2_offset = IWL_AX210_DCCM2_OFFSET, \ + .dccm2_len = IWL_AX210_DCCM2_LEN, \ + .smem_offset = IWL_AX210_SMEM_OFFSET, \ + .smem_len = IWL_AX210_SMEM_LEN, \ + .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \ + .apmg_not_supported = true, \ + .trans.mq_rx_supported = true, \ + .vht_mu_mimo_supported = true, \ + .mac_addr_from_csr = 0x380, \ + .ht_params = &iwl_22000_ht_params, \ + .nvm_ver = IWL_AX210_NVM_VERSION, \ + .trans.rf_id = true, \ + .trans.gen2 = true, \ + .nvm_type = IWL_NVM_EXT, \ + .dbgc_supported = true, \ + .min_umac_error_event_table = 0x400000, \ + .d3_debug_data_base_addr = 0x401000, \ + .d3_debug_data_length = 60 * 1024, \ + .mon_smem_regs = { \ + .write_ptr = { \ + .addr = LDBG_M2S_BUF_WPTR, \ + .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, \ + }, \ + .cycle_cnt = { \ + .addr = LDBG_M2S_BUF_WRAP_CNT, \ + .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \ + }, \ + } + +#define IWL_DEVICE_AX210 \ + IWL_DEVICE_AX210_COMMON, \ + .ucode_api_max = IWL_AX210_UCODE_API_MAX, \ + .trans.umac_prph_offset = 0x300000, \ + .trans.device_family = IWL_DEVICE_FAMILY_AX210, \ + .trans.base_params = &iwl_ax210_base_params, \ + .min_txq_size = 128, \ + .gp2_reg_addr = 0xd02c68, \ + .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_HE, \ + .mon_dram_regs = { \ + .write_ptr = { \ + .addr = DBGC_CUR_DBGBUF_STATUS, \ + .mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK, \ + }, \ + .cycle_cnt = { \ + .addr = DBGC_DBGBUF_WRAP_AROUND, \ + .mask = 0xffffffff, \ + }, \ + .cur_frag = { \ + .addr = DBGC_CUR_DBGBUF_STATUS, \ + .mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK, \ + }, \ + } + +const struct iwl_cfg_trans_params iwl_so_trans_cfg = { + .mq_rx_supported = true, + .rf_id = true, + .gen2 = true, + .device_family = IWL_DEVICE_FAMILY_AX210, + .base_params = &iwl_ax210_base_params, + .umac_prph_offset = 0x300000, + .integrated = true, + /* TODO: the following values need to be checked */ + .xtal_latency = 500, + .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_200US, +}; + +const struct iwl_cfg_trans_params iwl_so_long_latency_trans_cfg = { + .mq_rx_supported = true, + .rf_id = true, + .gen2 = true, + .device_family = IWL_DEVICE_FAMILY_AX210, + .base_params = &iwl_ax210_base_params, + .umac_prph_offset = 0x300000, + .integrated = true, + .low_latency_xtal = true, + .xtal_latency = 12000, + .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US, +}; + +const struct iwl_cfg_trans_params iwl_so_long_latency_imr_trans_cfg = { + .mq_rx_supported = true, + .rf_id = true, + .gen2 = true, + .device_family = IWL_DEVICE_FAMILY_AX210, + .base_params = &iwl_ax210_base_params, + .umac_prph_offset = 0x300000, + .integrated = true, + .low_latency_xtal = true, + .xtal_latency = 12000, + .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US, + .imr_enabled = true, +}; + +/* + * If the device doesn't support HE, no need to have that many buffers. + * AX210 devices can split multiple frames into a single RB, so fewer are + * needed; AX210 cannot (but use smaller RBs by default) - these sizes + * were picked according to 8 MSDUs inside 256 A-MSDUs in an A-MPDU, with + * additional overhead to account for processing time. + */ +#define IWL_NUM_RBDS_NON_HE 512 +#define IWL_NUM_RBDS_AX210_HE 4096 + +const struct iwl_cfg_trans_params iwl_ma_trans_cfg = { + .device_family = IWL_DEVICE_FAMILY_AX210, + .base_params = &iwl_ax210_base_params, + .mq_rx_supported = true, + .rf_id = true, + .gen2 = true, + .integrated = true, + .umac_prph_offset = 0x300000 +}; + +const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6E AX211 160MHz"; +const char iwl_ax221_name[] = "Intel(R) Wi-Fi 6E AX221 160MHz"; +const char iwl_ax231_name[] = "Intel(R) Wi-Fi 6E AX231 160MHz"; +const char iwl_ax411_name[] = "Intel(R) Wi-Fi 6E AX411 160MHz"; + +const char iwl_ax210_killer_1675w_name[] = + "Killer(R) Wi-Fi 6E AX1675w 160MHz Wireless Network Adapter (210D2W)"; +const char iwl_ax210_killer_1675x_name[] = + "Killer(R) Wi-Fi 6E AX1675x 160MHz Wireless Network Adapter (210NGW)"; +const char iwl_ax211_killer_1675s_name[] = + "Killer(R) Wi-Fi 6E AX1675s 160MHz Wireless Network Adapter (211NGW)"; +const char iwl_ax211_killer_1675i_name[] = + "Killer(R) Wi-Fi 6E AX1675i 160MHz Wireless Network Adapter (211NGW)"; +const char iwl_ax411_killer_1690s_name[] = + "Killer(R) Wi-Fi 6E AX1690s 160MHz Wireless Network Adapter (411D2W)"; +const char iwl_ax411_killer_1690i_name[] = + "Killer(R) Wi-Fi 6E AX1690i 160MHz Wireless Network Adapter (411NGW)"; + +const struct iwl_cfg iwlax210_2ax_cfg_so_jf_b0 = { + .name = "Intel(R) Wireless-AC 9560 160MHz", + .fw_name_pre = IWL_SO_A_JF_B_FW_PRE, + IWL_DEVICE_AX210, + .num_rbds = IWL_NUM_RBDS_NON_HE, +}; + +const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0 = { + .name = iwl_ax211_name, + .fw_name_pre = IWL_SO_A_GF_A_FW_PRE, + .uhb_supported = true, + IWL_DEVICE_AX210, + .num_rbds = IWL_NUM_RBDS_AX210_HE, +}; + +const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0_long = { + .name = iwl_ax211_name, + .fw_name_pre = IWL_SO_A_GF_A_FW_PRE, + .uhb_supported = true, + IWL_DEVICE_AX210, + .num_rbds = IWL_NUM_RBDS_AX210_HE, + .trans.xtal_latency = 12000, + .trans.low_latency_xtal = true, +}; + +const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0 = { + .name = "Intel(R) Wi-Fi 6 AX210 160MHz", + .fw_name_pre = IWL_TY_A_GF_A_FW_PRE, + .uhb_supported = true, + IWL_DEVICE_AX210, + .num_rbds = IWL_NUM_RBDS_AX210_HE, +}; + +const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0 = { + .name = iwl_ax411_name, + .fw_name_pre = IWL_SO_A_GF4_A_FW_PRE, + .uhb_supported = true, + IWL_DEVICE_AX210, + .num_rbds = IWL_NUM_RBDS_AX210_HE, +}; + +const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0_long = { + .name = iwl_ax411_name, + .fw_name_pre = IWL_SO_A_GF4_A_FW_PRE, + .uhb_supported = true, + IWL_DEVICE_AX210, + .num_rbds = IWL_NUM_RBDS_AX210_HE, + .trans.xtal_latency = 12000, + .trans.low_latency_xtal = true, +}; + +const struct iwl_cfg iwl_cfg_so_a0_ms_a0 = { + .fw_name_pre = IWL_SO_A_MR_A_FW_PRE, + .uhb_supported = false, + IWL_DEVICE_AX210, + .num_rbds = IWL_NUM_RBDS_AX210_HE, +}; + +const struct iwl_cfg iwl_cfg_ma = { + .fw_name_mac = "ma", + .uhb_supported = true, + IWL_DEVICE_AX210, + .num_rbds = IWL_NUM_RBDS_AX210_HE, +}; + +const struct iwl_cfg iwl_cfg_so_a0_hr_a0 = { + .fw_name_pre = IWL_SO_A_HR_B_FW_PRE, + IWL_DEVICE_AX210, + .num_rbds = IWL_NUM_RBDS_AX210_HE, +}; + +MODULE_FIRMWARE(IWL_SO_A_JF_B_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_SO_A_HR_B_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_SO_A_GF_A_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_TY_A_GF_A_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_MA_A_HR_B_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_MA_A_GF_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_MA_A_GF4_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_MA_A_MR_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_MA_B_HR_B_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_MA_B_GF_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_MA_B_GF4_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_MA_B_MR_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX)); diff --git a/cfg/bz.c b/cfg/bz.c new file mode 100644 index 000000000000..b9893b22e41d --- /dev/null +++ b/cfg/bz.c @@ -0,0 +1,183 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2015-2017 Intel Deutschland GmbH + * Copyright (C) 2018-2023 Intel Corporation + */ +#include <linux/module.h> +#include <linux/stringify.h> +#include "iwl-config.h" +#include "iwl-prph.h" +#include "fw/api/txq.h" + +/* Highest firmware API version supported */ +#define IWL_BZ_UCODE_API_MAX 83 + +/* Lowest firmware API version supported */ +#define IWL_BZ_UCODE_API_MIN 80 + +/* NVM versions */ +#define IWL_BZ_NVM_VERSION 0x0a1d + +/* Memory offsets and lengths */ +#define IWL_BZ_DCCM_OFFSET 0x800000 /* LMAC1 */ +#define IWL_BZ_DCCM_LEN 0x10000 /* LMAC1 */ +#define IWL_BZ_DCCM2_OFFSET 0x880000 +#define IWL_BZ_DCCM2_LEN 0x8000 +#define IWL_BZ_SMEM_OFFSET 0x400000 +#define IWL_BZ_SMEM_LEN 0xD0000 + +#define IWL_BZ_A_HR_B_FW_PRE "iwlwifi-bz-a0-hr-b0" +#define IWL_BZ_A_GF_A_FW_PRE "iwlwifi-bz-a0-gf-a0" +#define IWL_BZ_A_GF4_A_FW_PRE "iwlwifi-bz-a0-gf4-a0" +#define IWL_BZ_A_FM_B_FW_PRE "iwlwifi-bz-a0-fm-b0" +#define IWL_BZ_A_FM_C_FW_PRE "iwlwifi-bz-a0-fm-c0" +#define IWL_BZ_A_FM4_B_FW_PRE "iwlwifi-bz-a0-fm4-b0" +#define IWL_GL_B_FM_B_FW_PRE "iwlwifi-gl-b0-fm-b0" +#define IWL_GL_C_FM_C_FW_PRE "iwlwifi-gl-c0-fm-c0" + +#define IWL_BZ_A_HR_B_MODULE_FIRMWARE(api) \ + IWL_BZ_A_HR_B_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_BZ_A_GF_A_MODULE_FIRMWARE(api) \ + IWL_BZ_A_GF_A_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_BZ_A_GF4_A_MODULE_FIRMWARE(api) \ + IWL_BZ_A_GF4_A_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_BZ_A_FM_B_MODULE_FIRMWARE(api) \ + IWL_BZ_A_FM_B_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_BZ_A_FM_C_MODULE_FIRMWARE(api) \ + IWL_BZ_A_FM_C_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_BZ_A_FM4_B_MODULE_FIRMWARE(api) \ + IWL_BZ_A_FM4_B_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_GL_B_FM_B_MODULE_FIRMWARE(api) \ + IWL_GL_B_FM_B_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_GL_C_FM_C_MODULE_FIRMWARE(api) \ + IWL_GL_C_FM_C_FW_PRE "-" __stringify(api) ".ucode" + +static const struct iwl_base_params iwl_bz_base_params = { + .eeprom_size = OTP_LOW_IMAGE_SIZE_32K, + .num_of_queues = 512, + .max_tfd_queue_size = 65536, + .shadow_ram_support = true, + .led_compensation = 57, + .wd_timeout = IWL_LONG_WD_TIMEOUT, + .max_event_log_size = 512, + .shadow_reg_enable = true, + .pcie_l1_allowed = true, +}; + +#define IWL_DEVICE_BZ_COMMON \ + .ucode_api_max = IWL_BZ_UCODE_API_MAX, \ + .ucode_api_min = IWL_BZ_UCODE_API_MIN, \ + .led_mode = IWL_LED_RF_STATE, \ + .nvm_hw_section_num = 10, \ + .non_shared_ant = ANT_B, \ + .dccm_offset = IWL_BZ_DCCM_OFFSET, \ + .dccm_len = IWL_BZ_DCCM_LEN, \ + .dccm2_offset = IWL_BZ_DCCM2_OFFSET, \ + .dccm2_len = IWL_BZ_DCCM2_LEN, \ + .smem_offset = IWL_BZ_SMEM_OFFSET, \ + .smem_len = IWL_BZ_SMEM_LEN, \ + .apmg_not_supported = true, \ + .trans.mq_rx_supported = true, \ + .vht_mu_mimo_supported = true, \ + .mac_addr_from_csr = 0x30, \ + .nvm_ver = IWL_BZ_NVM_VERSION, \ + .trans.rf_id = true, \ + .trans.gen2 = true, \ + .nvm_type = IWL_NVM_EXT, \ + .dbgc_supported = true, \ + .min_umac_error_event_table = 0xD0000, \ + .d3_debug_data_base_addr = 0x401000, \ + .d3_debug_data_length = 60 * 1024, \ + .mon_smem_regs = { \ + .write_ptr = { \ + .addr = LDBG_M2S_BUF_WPTR, \ + .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, \ + }, \ + .cycle_cnt = { \ + .addr = LDBG_M2S_BUF_WRAP_CNT, \ + .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \ + }, \ + }, \ + .trans.umac_prph_offset = 0x300000, \ + .trans.device_family = IWL_DEVICE_FAMILY_BZ, \ + .trans.base_params = &iwl_bz_base_params, \ + .min_txq_size = 128, \ + .gp2_reg_addr = 0xd02c68, \ + .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_EHT, \ + .mon_dram_regs = { \ + .write_ptr = { \ + .addr = DBGC_CUR_DBGBUF_STATUS, \ + .mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK, \ + }, \ + .cycle_cnt = { \ + .addr = DBGC_DBGBUF_WRAP_AROUND, \ + .mask = 0xffffffff, \ + }, \ + .cur_frag = { \ + .addr = DBGC_CUR_DBGBUF_STATUS, \ + .mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK, \ + }, \ + }, \ + .mon_dbgi_regs = { \ + .write_ptr = { \ + .addr = DBGI_SRAM_FIFO_POINTERS, \ + .mask = DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK, \ + }, \ + } + +#define IWL_DEVICE_BZ \ + IWL_DEVICE_BZ_COMMON, \ + .ht_params = &iwl_22000_ht_params + +#define IWL_DEVICE_GL_A \ + IWL_DEVICE_BZ_COMMON, \ + .ht_params = &iwl_gl_a_ht_params + +/* + * If the device doesn't support HE, no need to have that many buffers. + * These sizes were picked according to 8 MSDUs inside 256 A-MSDUs in an + * A-MPDU, with additional overhead to account for processing time. + */ +#define IWL_NUM_RBDS_NON_HE 512 +#define IWL_NUM_RBDS_BZ_HE 4096 + +const struct iwl_cfg_trans_params iwl_bz_trans_cfg = { + .device_family = IWL_DEVICE_FAMILY_BZ, + .base_params = &iwl_bz_base_params, + .mq_rx_supported = true, + .rf_id = true, + .gen2 = true, + .integrated = true, + .umac_prph_offset = 0x300000, + .xtal_latency = 12000, + .low_latency_xtal = true, + .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US, +}; + +const char iwl_bz_name[] = "Intel(R) TBD Bz device"; + +const struct iwl_cfg iwl_cfg_bz = { + .fw_name_mac = "bz", + .uhb_supported = true, + IWL_DEVICE_BZ, + .features = IWL_TX_CSUM_NETIF_FLAGS_BZ | NETIF_F_RXCSUM, + .num_rbds = IWL_NUM_RBDS_BZ_HE, +}; + +const struct iwl_cfg iwl_cfg_gl = { + .fw_name_mac = "gl", + .uhb_supported = true, + IWL_DEVICE_BZ, + .features = IWL_TX_CSUM_NETIF_FLAGS_BZ | NETIF_F_RXCSUM, + .num_rbds = IWL_NUM_RBDS_BZ_HE, +}; + + +MODULE_FIRMWARE(IWL_BZ_A_HR_B_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_BZ_A_GF_A_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_BZ_A_GF4_A_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_BZ_A_FM_B_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_BZ_A_FM_C_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_BZ_A_FM4_B_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_GL_B_FM_B_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_GL_C_FM_C_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX)); diff --git a/cfg/sc.c b/cfg/sc.c new file mode 100644 index 000000000000..ad283fd22e2a --- /dev/null +++ b/cfg/sc.c @@ -0,0 +1,166 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2015-2017 Intel Deutschland GmbH + * Copyright (C) 2018-2023 Intel Corporation + */ +#include <linux/module.h> +#include <linux/stringify.h> +#include "iwl-config.h" +#include "iwl-prph.h" +#include "fw/api/txq.h" + +/* Highest firmware API version supported */ +#define IWL_SC_UCODE_API_MAX 83 + +/* Lowest firmware API version supported */ +#define IWL_SC_UCODE_API_MIN 82 + +/* NVM versions */ +#define IWL_SC_NVM_VERSION 0x0a1d + +/* Memory offsets and lengths */ +#define IWL_SC_DCCM_OFFSET 0x800000 /* LMAC1 */ +#define IWL_SC_DCCM_LEN 0x10000 /* LMAC1 */ +#define IWL_SC_DCCM2_OFFSET 0x880000 +#define IWL_SC_DCCM2_LEN 0x8000 +#define IWL_SC_SMEM_OFFSET 0x400000 +#define IWL_SC_SMEM_LEN 0xD0000 + +#define IWL_SC_A_FM_B_FW_PRE "iwlwifi-sc-a0-fm-b0" +#define IWL_SC_A_FM_C_FW_PRE "iwlwifi-sc-a0-fm-c0" +#define IWL_SC_A_HR_A_FW_PRE "iwlwifi-sc-a0-hr-b0" +#define IWL_SC_A_HR_B_FW_PRE "iwlwifi-sc-a0-hr-b0" +#define IWL_SC_A_GF_A_FW_PRE "iwlwifi-sc-a0-gf-a0" +#define IWL_SC_A_GF4_A_FW_PRE "iwlwifi-sc-a0-gf4-a0" +#define IWL_SC_A_WH_A_FW_PRE "iwlwifi-sc-a0-wh-a0" + +#define IWL_SC_A_FM_B_FW_MODULE_FIRMWARE(api) \ + IWL_SC_A_FM_B_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_SC_A_FM_C_FW_MODULE_FIRMWARE(api) \ + IWL_SC_A_FM_C_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_SC_A_HR_A_FW_MODULE_FIRMWARE(api) \ + IWL_SC_A_HR_A_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_SC_A_HR_B_FW_MODULE_FIRMWARE(api) \ + IWL_SC_A_HR_B_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_SC_A_GF_A_FW_MODULE_FIRMWARE(api) \ + IWL_SC_A_GF_A_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_SC_A_GF4_A_FW_MODULE_FIRMWARE(api) \ + IWL_SC_A_GF4_A_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_SC_A_WH_A_FW_MODULE_FIRMWARE(api) \ + IWL_SC_A_WH_A_FW_PRE "-" __stringify(api) ".ucode" + +static const struct iwl_base_params iwl_sc_base_params = { + .eeprom_size = OTP_LOW_IMAGE_SIZE_32K, + .num_of_queues = 512, + .max_tfd_queue_size = 65536, + .shadow_ram_support = true, + .led_compensation = 57, + .wd_timeout = IWL_LONG_WD_TIMEOUT, + .max_event_log_size = 512, + .shadow_reg_enable = true, + .pcie_l1_allowed = true, +}; + +#define IWL_DEVICE_BZ_COMMON \ + .ucode_api_max = IWL_SC_UCODE_API_MAX, \ + .ucode_api_min = IWL_SC_UCODE_API_MIN, \ + .led_mode = IWL_LED_RF_STATE, \ + .nvm_hw_section_num = 10, \ + .non_shared_ant = ANT_B, \ + .dccm_offset = IWL_SC_DCCM_OFFSET, \ + .dccm_len = IWL_SC_DCCM_LEN, \ + .dccm2_offset = IWL_SC_DCCM2_OFFSET, \ + .dccm2_len = IWL_SC_DCCM2_LEN, \ + .smem_offset = IWL_SC_SMEM_OFFSET, \ + .smem_len = IWL_SC_SMEM_LEN, \ + .apmg_not_supported = true, \ + .trans.mq_rx_supported = true, \ + .vht_mu_mimo_supported = true, \ + .mac_addr_from_csr = 0x30, \ + .nvm_ver = IWL_SC_NVM_VERSION, \ + .trans.rf_id = true, \ + .trans.gen2 = true, \ + .nvm_type = IWL_NVM_EXT, \ + .dbgc_supported = true, \ + .min_umac_error_event_table = 0xD0000, \ + .d3_debug_data_base_addr = 0x401000, \ + .d3_debug_data_length = 60 * 1024, \ + .mon_smem_regs = { \ + .write_ptr = { \ + .addr = LDBG_M2S_BUF_WPTR, \ + .mask = LDBG_M2S_BUF_WPTR_VAL_MSK, \ + }, \ + .cycle_cnt = { \ + .addr = LDBG_M2S_BUF_WRAP_CNT, \ + .mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \ + }, \ + }, \ + .trans.umac_prph_offset = 0x300000, \ + .trans.device_family = IWL_DEVICE_FAMILY_SC, \ + .trans.base_params = &iwl_sc_base_params, \ + .min_txq_size = 128, \ + .gp2_reg_addr = 0xd02c68, \ + .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_EHT, \ + .mon_dram_regs = { \ + .write_ptr = { \ + .addr = DBGC_CUR_DBGBUF_STATUS, \ + .mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK, \ + }, \ + .cycle_cnt = { \ + .addr = DBGC_DBGBUF_WRAP_AROUND, \ + .mask = 0xffffffff, \ + }, \ + .cur_frag = { \ + .addr = DBGC_CUR_DBGBUF_STATUS, \ + .mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK, \ + }, \ + }, \ + .mon_dbgi_regs = { \ + .write_ptr = { \ + .addr = DBGI_SRAM_FIFO_POINTERS, \ + .mask = DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK, \ + }, \ + } + +#define IWL_DEVICE_SC \ + IWL_DEVICE_BZ_COMMON, \ + .ht_params = &iwl_22000_ht_params + +/* + * If the device doesn't support HE, no need to have that many buffers. + * These sizes were picked according to 8 MSDUs inside 256 A-MSDUs in an + * A-MPDU, with additional overhead to account for processing time. + */ +#define IWL_NUM_RBDS_NON_HE 512 +#define IWL_NUM_RBDS_SC_HE 4096 + +const struct iwl_cfg_trans_params iwl_sc_trans_cfg = { + .device_family = IWL_DEVICE_FAMILY_SC, + .base_params = &iwl_sc_base_params, + .mq_rx_supported = true, + .rf_id = true, + .gen2 = true, + .integrated = true, + .umac_prph_offset = 0x300000, + .xtal_latency = 12000, + .low_latency_xtal = true, + .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US, +}; + +const char iwl_sc_name[] = "Intel(R) TBD Sc device"; + +const struct iwl_cfg iwl_cfg_sc = { + .fw_name_mac = "sc", + .uhb_supported = true, + IWL_DEVICE_SC, + .features = IWL_TX_CSUM_NETIF_FLAGS_BZ | NETIF_F_RXCSUM, + .num_rbds = IWL_NUM_RBDS_SC_HE, +}; + +MODULE_FIRMWARE(IWL_SC_A_FM_B_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_SC_A_FM_C_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_SC_A_HR_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_SC_A_HR_B_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_SC_A_GF_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_SC_A_GF4_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_SC_A_WH_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX)); diff --git a/fw/acpi.c b/fw/acpi.c index 33aae639ad37..dfe8357036eb 100644 --- a/fw/acpi.c +++ b/fw/acpi.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2019-2022 Intel Corporation + * Copyright (C) 2019-2023 Intel Corporation */ #include <linux/uuid.h> #include <linux/dmi.h> @@ -38,7 +38,35 @@ static const struct dmi_system_id dmi_ppag_approved_list[] = { }, { .ident = "ASUS", .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek COMPUTER INC."), + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + }, + }, + { .ident = "GOOGLE-HP", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Google"), + DMI_MATCH(DMI_BOARD_VENDOR, "HP"), + }, + }, + { .ident = "GOOGLE-ASUS", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Google"), + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek COMPUTER INC."), + }, + }, + { .ident = "GOOGLE-SAMSUNG", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Google"), + DMI_MATCH(DMI_BOARD_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"), + }, + }, + { .ident = "DELL", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + }, + }, + { .ident = "DELL", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Alienware"), }, }, {} @@ -66,7 +94,7 @@ static int iwl_acpi_get_handle(struct device *dev, acpi_string method, return 0; } -void *iwl_acpi_get_object(struct device *dev, acpi_string method) +static void *iwl_acpi_get_object(struct device *dev, acpi_string method) { struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL}; acpi_handle handle; @@ -87,7 +115,6 @@ void *iwl_acpi_get_object(struct device *dev, acpi_string method) } return buf.pointer; } -IWL_EXPORT_SYMBOL(iwl_acpi_get_object); /* * Generic function for evaluating a method defined in the device specific @@ -209,11 +236,12 @@ int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func, } IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u32); -union acpi_object *iwl_acpi_get_wifi_pkg_range(struct device *dev, - union acpi_object *data, - int min_data_size, - int max_data_size, - int *tbl_rev) +static union acpi_object * +iwl_acpi_get_wifi_pkg_range(struct device *dev, + union acpi_object *data, + int min_data_size, + int max_data_size, + int *tbl_rev) { int i; union acpi_object *wifi_pkg; @@ -264,7 +292,16 @@ union acpi_object *iwl_acpi_get_wifi_pkg_range(struct device *dev, found: return wifi_pkg; } -IWL_EXPORT_SYMBOL(iwl_acpi_get_wifi_pkg_range); + +static union acpi_object * +iwl_acpi_get_wifi_pkg(struct device *dev, + union acpi_object *data, + int data_size, int *tbl_rev) +{ + return iwl_acpi_get_wifi_pkg_range(dev, data, data_size, data_size, + tbl_rev); +} + int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, union iwl_tas_config_cmd *cmd, int fw_ver) @@ -937,6 +974,9 @@ int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt, { int i, j; + if (!fwrt->geo_enabled) + return -ENODATA; + if (!iwl_sar_geo_support(fwrt)) return -EOPNOTSUPP; @@ -1003,8 +1043,10 @@ int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt) union acpi_object *wifi_pkg, *data, *flags; int i, j, ret, tbl_rev, num_sub_bands = 0; int idx = 2; + u8 cmd_ver; fwrt->ppag_flags = 0; + fwrt->ppag_table_valid = false; data = iwl_acpi_get_object(fwrt->dev, ACPI_PPAG_METHOD); if (IS_ERR(data)) @@ -1051,8 +1093,15 @@ read_table: } fwrt->ppag_flags = flags->integer.value & ACPI_PPAG_MASK; - - if (!fwrt->ppag_flags) { + cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw, + WIDE_ID(PHY_OPS_GROUP, + PER_PLATFORM_ANT_GAIN_CMD), + IWL_FW_CMD_VER_UNKNOWN); + if (cmd_ver == IWL_FW_CMD_VER_UNKNOWN) { + ret = -EINVAL; + goto out_free; + } + if (!fwrt->ppag_flags && cmd_ver <= 3) { ret = 0; goto out_free; } @@ -1073,21 +1122,22 @@ read_table: } fwrt->ppag_chains[i].subbands[j] = ent->integer.value; - + /* from ver 4 the fw deals with out of range values */ + if (cmd_ver >= 4) + continue; if ((j == 0 && (fwrt->ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_LB || fwrt->ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_LB)) || (j != 0 && (fwrt->ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_HB || fwrt->ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_HB))) { - fwrt->ppag_flags = 0; ret = -EINVAL; goto out_free; } } } - + fwrt->ppag_table_valid = true; ret = 0; out_free: @@ -1103,50 +1153,73 @@ int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt, union iwl_ppag_table_cmd *c int i, j, num_sub_bands; s8 *gain; + /* many firmware images for JF lie about this */ + if (CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id) == + CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF)) + return -EOPNOTSUPP; + if (!fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) { IWL_DEBUG_RADIO(fwrt, "PPAG capability not supported by FW, command not sent.\n"); return -EINVAL; - } - if (!fwrt->ppag_flags) { - IWL_DEBUG_RADIO(fwrt, "PPAG not enabled, command not sent.\n"); - return -EINVAL; - } + } + + cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw, + WIDE_ID(PHY_OPS_GROUP, + PER_PLATFORM_ANT_GAIN_CMD), + IWL_FW_CMD_VER_UNKNOWN); + if (!fwrt->ppag_table_valid || (cmd_ver <= 3 && !fwrt->ppag_flags)) { + IWL_DEBUG_RADIO(fwrt, "PPAG not enabled, command not sent.\n"); + return -EINVAL; + } /* The 'flags' field is the same in v1 and in v2 so we can just * use v1 to access it. */ cmd->v1.flags = cpu_to_le32(fwrt->ppag_flags); - cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw, - WIDE_ID(PHY_OPS_GROUP, PER_PLATFORM_ANT_GAIN_CMD), - IWL_FW_CMD_VER_UNKNOWN); + + IWL_DEBUG_RADIO(fwrt, "PPAG cmd ver is %d\n", cmd_ver); if (cmd_ver == 1) { num_sub_bands = IWL_NUM_SUB_BANDS_V1; gain = cmd->v1.gain[0]; *cmd_size = sizeof(cmd->v1); if (fwrt->ppag_ver == 1 || fwrt->ppag_ver == 2) { + /* in this case FW supports revision 0 */ IWL_DEBUG_RADIO(fwrt, - "PPAG table rev is %d but FW supports v1, sending truncated table\n", + "PPAG table rev is %d, send truncated table\n", fwrt->ppag_ver); - cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK); } - } else if (cmd_ver == 2 || cmd_ver == 3) { + } else if (cmd_ver >= 2 && cmd_ver <= 4) { num_sub_bands = IWL_NUM_SUB_BANDS_V2; gain = cmd->v2.gain[0]; *cmd_size = sizeof(cmd->v2); if (fwrt->ppag_ver == 0) { + /* in this case FW supports revisions 1 or 2 */ IWL_DEBUG_RADIO(fwrt, - "PPAG table is v1 but FW supports v2, sending padded table\n"); - } else if (cmd_ver == 2 && fwrt->ppag_ver == 2) { - IWL_DEBUG_RADIO(fwrt, - "PPAG table is v3 but FW supports v2, sending partial bitmap.\n"); - cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK); + "PPAG table rev is 0, send padded table\n"); } } else { IWL_DEBUG_RADIO(fwrt, "Unsupported PPAG command version\n"); return -EINVAL; } + /* ppag mode */ + IWL_DEBUG_RADIO(fwrt, + "PPAG MODE bits were read from bios: %d\n", + cmd->v1.flags & cpu_to_le32(ACPI_PPAG_MASK)); + if ((cmd_ver == 1 && !fw_has_capa(&fwrt->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_PPAG_CHINA_BIOS_SUPPORT)) || + (cmd_ver == 2 && fwrt->ppag_ver == 2)) { + cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK); + IWL_DEBUG_RADIO(fwrt, "masking ppag China bit\n"); + } else { + IWL_DEBUG_RADIO(fwrt, "isn't masking ppag China bit\n"); + } + + IWL_DEBUG_RADIO(fwrt, + "PPAG MODE bits going to be sent: %d\n", + cmd->v1.flags & cpu_to_le32(ACPI_PPAG_MASK)); + for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) { for (j = 0; j < num_sub_bands; j++) { gain[i * num_sub_bands + j] = @@ -1175,3 +1248,40 @@ bool iwl_acpi_is_ppag_approved(struct iwl_fw_runtime *fwrt) return true; } IWL_EXPORT_SYMBOL(iwl_acpi_is_ppag_approved); + +void iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt, + struct iwl_phy_specific_cfg *filters) +{ + struct iwl_phy_specific_cfg tmp = {}; + union acpi_object *wifi_pkg, *data; + int tbl_rev, i; + + data = iwl_acpi_get_object(fwrt->dev, ACPI_WPFC_METHOD); + if (IS_ERR(data)) + return; + + /* try to read wtas table revision 1 or revision 0*/ + wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, + ACPI_WPFC_WIFI_DATA_SIZE, + &tbl_rev); + if (IS_ERR(wifi_pkg)) + goto out_free; + + if (tbl_rev != 0) + goto out_free; + + BUILD_BUG_ON(ARRAY_SIZE(filters->filter_cfg_chains) != ACPI_WPFC_WIFI_DATA_SIZE); + + for (i = 0; i < ARRAY_SIZE(filters->filter_cfg_chains); i++) { + if (wifi_pkg->package.elements[i].type != ACPI_TYPE_INTEGER) + return; + tmp.filter_cfg_chains[i] = + cpu_to_le32(wifi_pkg->package.elements[i].integer.value); + } + + IWL_DEBUG_RADIO(fwrt, "Loaded WPFC filter config from ACPI\n"); + *filters = tmp; +out_free: + kfree(data); +} +IWL_EXPORT_SYMBOL(iwl_acpi_get_phy_filters); diff --git a/fw/acpi.h b/fw/acpi.h index 6f361c59106f..c36c62d6414d 100644 --- a/fw/acpi.h +++ b/fw/acpi.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation */ #ifndef __iwl_fw_acpi__ #define __iwl_fw_acpi__ @@ -11,6 +11,7 @@ #include "fw/api/power.h" #include "fw/api/phy.h" #include "fw/api/nvm-reg.h" +#include "fw/api/config.h" #include "fw/img.h" #include "iwl-trans.h" @@ -23,6 +24,7 @@ #define ACPI_ECKV_METHOD "ECKV" #define ACPI_PPAG_METHOD "PPAG" #define ACPI_WTAS_METHOD "WTAS" +#define ACPI_WPFC_METHOD "WPFC" #define ACPI_WIFI_DOMAIN (0x07) @@ -54,6 +56,7 @@ #define ACPI_EWRD_WIFI_DATA_SIZE_REV2 ((ACPI_SAR_PROFILE_NUM - 1) * \ ACPI_SAR_NUM_CHAINS_REV2 * \ ACPI_SAR_NUM_SUB_BANDS_REV2 + 3) +#define ACPI_WPFC_WIFI_DATA_SIZE 4 /* 4 filter config words */ /* revision 0 and 1 are identical, except for the semantics in the FW */ #define ACPI_GEO_NUM_BANDS_REV0 2 @@ -168,19 +171,12 @@ struct iwl_fw_runtime; extern const guid_t iwl_guid; extern const guid_t iwl_rfi_guid; -void *iwl_acpi_get_object(struct device *dev, acpi_string method); - int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func, const guid_t *guid, u8 *value); int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func, const guid_t *guid, u32 *value); -union acpi_object *iwl_acpi_get_wifi_pkg_range(struct device *dev, - union acpi_object *data, - int min_data_size, - int max_data_size, - int *tbl_rev); /** * iwl_acpi_get_mcc - read MCC from ACPI, if available * @@ -232,12 +228,10 @@ int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt, union iwl_ppag_table_cmd *c bool iwl_acpi_is_ppag_approved(struct iwl_fw_runtime *fwrt); -#else /* CONFIG_ACPI */ +void iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt, + struct iwl_phy_specific_cfg *filters); -static inline void *iwl_acpi_get_object(struct device *dev, acpi_string method) -{ - return ERR_PTR(-ENOENT); -} +#else /* CONFIG_ACPI */ static inline void *iwl_acpi_get_dsm_object(struct device *dev, int rev, int func, union acpi_object *args) @@ -257,15 +251,6 @@ static inline int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func, return -ENOENT; } -static inline union acpi_object * -iwl_acpi_get_wifi_pkg_range(struct device *dev, - union acpi_object *data, - int min_data_size, int max_data_size, - int *tbl_rev) -{ - return ERR_PTR(-ENOENT); -} - static inline int iwl_acpi_get_mcc(struct device *dev, char *mcc) { return -ENOENT; @@ -335,15 +320,11 @@ static inline bool iwl_acpi_is_ppag_approved(struct iwl_fw_runtime *fwrt) return false; } -#endif /* CONFIG_ACPI */ - -static inline union acpi_object * -iwl_acpi_get_wifi_pkg(struct device *dev, - union acpi_object *data, - int data_size, int *tbl_rev) +static inline void iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt, + struct iwl_phy_specific_cfg *filters) { - return iwl_acpi_get_wifi_pkg_range(dev, data, data_size, data_size, - tbl_rev); } +#endif /* CONFIG_ACPI */ + #endif /* __iwl_fw_acpi__ */ diff --git a/fw/api/binding.h b/fw/api/binding.h index 29e2816e7052..d9044ada6a43 100644 --- a/fw/api/binding.h +++ b/fw/api/binding.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2020 Intel Corporation + * Copyright (C) 2012-2014, 2020, 2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -18,7 +18,7 @@ * ( BINDING_CONTEXT_CMD = 0x2b ) * @id_and_color: ID and color of the relevant Binding, * &enum iwl_ctxt_id_and_color - * @action: action to perform, one of FW_CTXT_ACTION_* + * @action: action to perform, see &enum iwl_ctxt_action * @macs: array of MAC id and colors which belong to the binding, * &enum iwl_ctxt_id_and_color * @phy: PHY id and color which belongs to the binding, @@ -38,7 +38,7 @@ struct iwl_binding_cmd_v1 { * ( BINDING_CONTEXT_CMD = 0x2b ) * @id_and_color: ID and color of the relevant Binding, * &enum iwl_ctxt_id_and_color - * @action: action to perform, one of FW_CTXT_ACTION_* + * @action: action to perform, see &enum iwl_ctxt_action * @macs: array of MAC id and colors which belong to the binding * &enum iwl_ctxt_id_and_color * @phy: PHY id and color which belongs to the binding @@ -59,14 +59,6 @@ struct iwl_binding_cmd { #define IWL_LMAC_24G_INDEX 0 #define IWL_LMAC_5G_INDEX 1 -static inline u32 iwl_mvm_get_lmac_id(const struct iwl_fw *fw, - enum nl80211_band band){ - if (!fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_CDB_SUPPORT) || - band == NL80211_BAND_2GHZ) - return IWL_LMAC_24G_INDEX; - return IWL_LMAC_5G_INDEX; -} - /* The maximal number of fragments in the FW's schedule session */ #define IWL_MVM_MAX_QUOTA 128 diff --git a/fw/api/commands.h b/fw/api/commands.h index c78d2f1c722c..13cb0d53a1a3 100644 --- a/fw/api/commands.h +++ b/fw/api/commands.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2020 Intel Corporation + * Copyright (C) 2018-2022 Intel Corporation */ #ifndef __iwl_fw_api_commands_h__ #define __iwl_fw_api_commands_h__ @@ -20,6 +20,8 @@ * &enum iwl_phy_ops_subcmd_ids * @DATA_PATH_GROUP: data path group, uses command IDs from * &enum iwl_data_path_subcmd_ids + * @SCAN_GROUP: scan group, uses command IDs from + * &enum iwl_scan_subcmd_ids * @NAN_GROUP: NAN group, uses command IDs from &enum iwl_nan_subcmd_ids * @LOCATION_GROUP: location group, uses command IDs from * &enum iwl_location_subcmd_ids @@ -36,6 +38,7 @@ enum iwl_mvm_command_groups { MAC_CONF_GROUP = 0x3, PHY_OPS_GROUP = 0x4, DATA_PATH_GROUP = 0x5, + SCAN_GROUP = 0x6, NAN_GROUP = 0x7, LOCATION_GROUP = 0x8, PROT_OFFLOAD_GROUP = 0xb, @@ -136,11 +139,6 @@ enum iwl_legacy_cmds { REMOVE_STA = 0x19, /** - * @FW_GET_ITEM_CMD: uses &struct iwl_fw_get_item_cmd - */ - FW_GET_ITEM_CMD = 0x1a, - - /** * @TX_CMD: uses &struct iwl_tx_cmd or &struct iwl_tx_cmd_gen2 or * &struct iwl_tx_cmd_gen3, * response in &struct iwl_mvm_tx_resp or @@ -150,6 +148,7 @@ enum iwl_legacy_cmds { /** * @TXPATH_FLUSH: &struct iwl_tx_path_flush_cmd + * response in &struct iwl_tx_path_flush_cmd_rsp */ TXPATH_FLUSH = 0x1e, @@ -262,6 +261,24 @@ enum iwl_legacy_cmds { HOT_SPOT_CMD = 0x53, /** + * @WNM_80211V_TIMING_MEASUREMENT_NOTIFICATION: Time Sync + * measurement notification for TM/FTM. Sent on receipt of + * respective WNM action frame for TM protocol or public action + * frame for FTM protocol from peer device along with additional + * meta data specified in &struct iwl_time_msmt_notify + */ + WNM_80211V_TIMING_MEASUREMENT_NOTIFICATION = 0x67, + + /** + * @WNM_80211V_TIMING_MEASUREMENT_CONFIRM_NOTIFICATION: Time Sync + * measurement confirmation notification for TM/FTM. Sent on + * receipt of Ack from peer for previously Tx'ed TM/FTM + * action frame along with additional meta data specified in + * &struct iwl_time_msmt_cfm_notify + */ + WNM_80211V_TIMING_MEASUREMENT_CONFIRM_NOTIFICATION = 0x68, + + /** * @SCAN_OFFLOAD_COMPLETE: * notification, &struct iwl_periodic_scan_complete */ @@ -512,12 +529,6 @@ enum iwl_legacy_cmds { PROT_OFFLOAD_CONFIG_CMD = 0xd4, /** - * @OFFLOADS_QUERY_CMD: - * No data in command, response in &struct iwl_wowlan_status - */ - OFFLOADS_QUERY_CMD = 0xd5, - - /** * @D0I3_END_CMD: End D0i3/D3 state, no command data */ D0I3_END_CMD = 0xed, @@ -544,18 +555,22 @@ enum iwl_legacy_cmds { WOWLAN_TKIP_PARAM = 0xe3, /** - * @WOWLAN_KEK_KCK_MATERIAL: &struct iwl_wowlan_kek_kck_material_cmd + * @WOWLAN_KEK_KCK_MATERIAL: &struct iwl_wowlan_kek_kck_material_cmd_v2, + * &struct iwl_wowlan_kek_kck_material_cmd_v3 or + * &struct iwl_wowlan_kek_kck_material_cmd_v4 */ WOWLAN_KEK_KCK_MATERIAL = 0xe4, /** - * @WOWLAN_GET_STATUSES: response in &struct iwl_wowlan_status + * @WOWLAN_GET_STATUSES: response in &struct iwl_wowlan_status_v6, + * &struct iwl_wowlan_status_v7, &struct iwl_wowlan_status_v9 or + * &struct iwl_wowlan_status_v12 */ WOWLAN_GET_STATUSES = 0xe5, /** - * @SCAN_OFFLOAD_PROFILES_QUERY_CMD: - * No command data, response is &struct iwl_scan_offload_profiles_query + * @SCAN_OFFLOAD_PROFILES_QUERY_CMD: No command data, response is + * &struct iwl_scan_offload_profiles_query_v1 */ SCAN_OFFLOAD_PROFILES_QUERY_CMD = 0x56, }; diff --git a/fw/api/config.h b/fw/api/config.h index 087354b3c308..4419631604b4 100644 --- a/fw/api/config.h +++ b/fw/api/config.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2019 Intel Corporation + * Copyright (C) 2012-2014, 2018-2019, 2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -67,17 +67,12 @@ enum iwl_calib_cfg { * Sent as part of the phy configuration command (v3) to configure specific FW * defined PHY filters that can be applied to each antenna. * - * @filter_cfg_chain_a: filter config id for LMAC1 chain A - * @filter_cfg_chain_b: filter config id for LMAC1 chain B - * @filter_cfg_chain_c: filter config id for LMAC2 chain A - * @filter_cfg_chain_d: filter config id for LMAC2 chain B - * values: 0 - no filter; 0xffffffff - reserved; otherwise - filter id + * @filter_cfg_chains: filter config id for LMAC1 chain A, LMAC1 chain B, + * LMAC2 chain A, LMAC2 chain B (in that order) + * values: 0: no filter; 0xffffffff: reserved; otherwise: filter id */ struct iwl_phy_specific_cfg { - __le32 filter_cfg_chain_a; - __le32 filter_cfg_chain_b; - __le32 filter_cfg_chain_c; - __le32 filter_cfg_chain_d; + __le32 filter_cfg_chains[4]; } __packed; /* PHY_SPECIFIC_CONFIGURATION_API_VER_1*/ /** diff --git a/fw/api/context.h b/fw/api/context.h index 105ba7170c3f..1fa5678c1cd6 100644 --- a/fw/api/context.h +++ b/fw/api/context.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014 Intel Corporation + * Copyright (C) 2012-2014, 2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -26,13 +26,18 @@ enum iwl_ctxt_id_and_color { #define FW_CMD_ID_AND_COLOR(_id, _color) (((_id) << FW_CTXT_ID_POS) |\ ((_color) << FW_CTXT_COLOR_POS)) -/* Possible actions on PHYs, MACs and Bindings */ +/** + * enum iwl_ctxt_action - Posssible actions on PHYs, MACs, Bindings and other + * @FW_CTXT_ACTION_INVALID: unused, invalid action + * @FW_CTXT_ACTION_ADD: add the context + * @FW_CTXT_ACTION_MODIFY: modify the context + * @FW_CTXT_ACTION_REMOVE: remove the context + */ enum iwl_ctxt_action { - FW_CTXT_ACTION_STUB = 0, + FW_CTXT_ACTION_INVALID = 0, FW_CTXT_ACTION_ADD, FW_CTXT_ACTION_MODIFY, FW_CTXT_ACTION_REMOVE, - FW_CTXT_ACTION_NUM }; /* COMMON_CONTEXT_ACTION_API_E_VER_1 */ #endif /* __iwl_fw_api_context_h__ */ diff --git a/fw/api/d3.h b/fw/api/d3.h index 4cd9ab23954e..72d461c47323 100644 --- a/fw/api/d3.h +++ b/fw/api/d3.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ @@ -47,12 +47,14 @@ struct iwl_d3_manager_config { * @IWL_D3_PROTO_OFFLOAD_NS: NS (Neighbor Solicitation) is enabled * @IWL_D3_PROTO_IPV4_VALID: IPv4 data is valid * @IWL_D3_PROTO_IPV6_VALID: IPv6 data is valid + * @IWL_D3_PROTO_OFFLOAD_BTM: BTM offload is enabled */ enum iwl_proto_offloads { IWL_D3_PROTO_OFFLOAD_ARP = BIT(0), IWL_D3_PROTO_OFFLOAD_NS = BIT(1), IWL_D3_PROTO_IPV4_VALID = BIT(2), IWL_D3_PROTO_IPV6_VALID = BIT(3), + IWL_D3_PROTO_OFFLOAD_BTM = BIT(4), }; #define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1 2 @@ -394,6 +396,7 @@ struct iwl_wowlan_config_cmd { #define WOWLAN_KEY_MAX_SIZE 32 #define WOWLAN_GTK_KEYS_NUM 2 #define WOWLAN_IGTK_KEYS_NUM 2 +#define WOWLAN_IGTK_MIN_INDEX 4 /* * WOWLAN_TSC_RSC_PARAMS @@ -610,6 +613,7 @@ struct iwl_wowlan_gtk_status_v3 { } __packed; /* WOWLAN_GTK_MATERIAL_VER_3 */ #define IWL_WOWLAN_GTK_IDX_MASK (BIT(0) | BIT(1)) +#define IWL_WOWLAN_IGTK_BIGTK_IDX_MASK (BIT(0)) /** * struct iwl_wowlan_igtk_status - IGTK status @@ -766,6 +770,98 @@ struct iwl_wowlan_status_v12 { u8 wake_packet[]; /* can be truncated from _length to _bufsize */ } __packed; /* WOWLAN_STATUSES_RSP_API_S_VER_12 */ +/** + * struct iwl_wowlan_info_notif_v1 - WoWLAN information notification + * @gtk: GTK data + * @igtk: IGTK data + * @replay_ctr: GTK rekey replay counter + * @pattern_number: number of the matched patterns + * @reserved1: reserved + * @qos_seq_ctr: QoS sequence counters to use next + * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason + * @num_of_gtk_rekeys: number of GTK rekeys + * @transmitted_ndps: number of transmitted neighbor discovery packets + * @received_beacons: number of received beacons + * @wake_packet_length: wakeup packet length + * @wake_packet_bufsize: wakeup packet buffer size + * @tid_tear_down: bit mask of tids whose BA sessions were closed + * in suspend state + * @station_id: station id + * @reserved2: reserved + */ +struct iwl_wowlan_info_notif_v1 { + struct iwl_wowlan_gtk_status_v3 gtk[WOWLAN_GTK_KEYS_NUM]; + struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM]; + __le64 replay_ctr; + __le16 pattern_number; + __le16 reserved1; + __le16 qos_seq_ctr[8]; + __le32 wakeup_reasons; + __le32 num_of_gtk_rekeys; + __le32 transmitted_ndps; + __le32 received_beacons; + __le32 wake_packet_length; + __le32 wake_packet_bufsize; + u8 tid_tear_down; + u8 station_id; + u8 reserved2[2]; +} __packed; /* WOWLAN_INFO_NTFY_API_S_VER_1 */ + +/** + * struct iwl_wowlan_info_notif - WoWLAN information notification + * @gtk: GTK data + * @igtk: IGTK data + * @replay_ctr: GTK rekey replay counter + * @pattern_number: number of the matched patterns + * @reserved1: reserved + * @qos_seq_ctr: QoS sequence counters to use next + * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason + * @num_of_gtk_rekeys: number of GTK rekeys + * @transmitted_ndps: number of transmitted neighbor discovery packets + * @received_beacons: number of received beacons + * @tid_tear_down: bit mask of tids whose BA sessions were closed + * in suspend state + * @station_id: station id + * @reserved2: reserved + */ +struct iwl_wowlan_info_notif { + struct iwl_wowlan_gtk_status_v3 gtk[WOWLAN_GTK_KEYS_NUM]; + struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM]; + __le64 replay_ctr; + __le16 pattern_number; + __le16 reserved1; + __le16 qos_seq_ctr[8]; + __le32 wakeup_reasons; + __le32 num_of_gtk_rekeys; + __le32 transmitted_ndps; + __le32 received_beacons; + u8 tid_tear_down; + u8 station_id; + u8 reserved2[2]; +} __packed; /* WOWLAN_INFO_NTFY_API_S_VER_2 */ + +/** + * struct iwl_wowlan_wake_pkt_notif - WoWLAN wake packet notification + * @wake_packet_length: wakeup packet length + * @station_id: station id + * @reserved: unused + * @wake_packet: wakeup packet + */ +struct iwl_wowlan_wake_pkt_notif { + __le32 wake_packet_length; + u8 station_id; + u8 reserved[3]; + u8 wake_packet[1]; +} __packed; /* WOWLAN_WAKE_PKT_NTFY_API_S_VER_1 */ + +/** + * struct iwl_mvm_d3_end_notif - d3 end notification + * @flags: See &enum iwl_d0i3_flags + */ +struct iwl_mvm_d3_end_notif { + __le32 flags; +} __packed; + /* TODO: NetDetect API */ #endif /* __iwl_fw_api_d3_h__ */ diff --git a/fw/api/datapath.h b/fw/api/datapath.h index 43619acc29fd..751b596ea1a5 100644 --- a/fw/api/datapath.h +++ b/fw/api/datapath.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2012-2014, 2018-2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -27,7 +27,20 @@ enum iwl_data_path_subcmd_ids { TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2, /** - * @STA_HE_CTXT_CMD: &struct iwl_he_sta_context_cmd + * @WNM_PLATFORM_PTM_REQUEST_CMD: &struct iwl_time_sync_cfg_cmd + */ + WNM_PLATFORM_PTM_REQUEST_CMD = 0x3, + + /** + * @WNM_80211V_TIMING_MEASUREMENT_CONFIG_CMD: + * &struct iwl_time_sync_cfg_cmd + */ + WNM_80211V_TIMING_MEASUREMENT_CONFIG_CMD = 0x4, + + /** + * @STA_HE_CTXT_CMD: &struct iwl_he_sta_context_cmd_v1, + * &struct iwl_he_sta_context_cmd_v2 or + * &struct iwl_he_sta_context_cmd_v3 */ STA_HE_CTXT_CMD = 0x7, @@ -72,13 +85,18 @@ enum iwl_data_path_subcmd_ids { SCD_QUEUE_CONFIG_CMD = 0x17, /** + * @SEC_KEY_CMD: security key command, uses &struct iwl_sec_key_cmd + */ + SEC_KEY_CMD = 0x18, + + /** * @MONITOR_NOTIF: Datapath monitoring notification, using * &struct iwl_datapath_monitor_notif */ MONITOR_NOTIF = 0xF4, /** - * @RX_NO_DATA_NOTIF: &struct iwl_rx_no_data + * @RX_NO_DATA_NOTIF: &struct iwl_rx_no_data or &struct iwl_rx_no_data_ver_3 */ RX_NO_DATA_NOTIF = 0xF5, @@ -141,6 +159,177 @@ enum iwl_channel_estimation_flags { IWL_CHANNEL_ESTIMATION_COUNTER = BIT(2), }; +enum iwl_time_sync_protocol_type { + IWL_TIME_SYNC_PROTOCOL_TM = BIT(0), + IWL_TIME_SYNC_PROTOCOL_FTM = BIT(1), +}; /* WNM_TIMING_ENABLED_PROTOCOL_API_E_VER_1 */ + +/** + * struct iwl_time_sync_cfg_cmd - TM/FTM time sync measurement configuration + * + * @protocols: The type of frames to raise notifications for. A bitmap + * of @iwl_time_sync_protocol_type + * @peer_addr: peer address with which TM/FTM measurements are required + * @reserved: for alignment + */ +struct iwl_time_sync_cfg_cmd { + __le32 protocols; + u8 peer_addr[ETH_ALEN]; + u8 reserved[2]; +} __packed; /* WNM_80211V_TIMING_MEASUREMENT_CONFIG_CMD_API_S_VER_1 */ + +/** + * enum iwl_synced_time_operation - PTM request options + * + * @IWL_SYNCED_TIME_OPERATION_READ_ARTB: read only the ARTB time + * @IWL_SYNCED_TIME_OPERATION_READ_GP2: read only the GP2 time + * @IWL_SYNCED_TIME_OPERATION_READ_BOTH: latch the ARTB and GP2 clocks and + * provide timestamps from both clocks for the same time point + */ +enum iwl_synced_time_operation { + IWL_SYNCED_TIME_OPERATION_READ_ARTB = 1, + IWL_SYNCED_TIME_OPERATION_READ_GP2, + IWL_SYNCED_TIME_OPERATION_READ_BOTH, +}; + +/** + * struct iwl_synced_time_cmd - request synced GP2/ARTB timestamps + * + * @operation: one of &enum iwl_synced_time_operation + */ +struct iwl_synced_time_cmd { + __le32 operation; +} __packed; /* WNM_80211V_TIMING_CMD_API_S_VER_1 */ + +/** + * struct iwl_synced_time_rsp - response to iwl_synced_time_cmd + * + * @operation: one of &enum iwl_synced_time_operation + * @platform_timestamp_hi: high DWORD of the ARTB clock timestamp in nanoseconds + * @platform_timestamp_lo: low DWORD of the ARTB clock timestamp in nanoseconds + * @gp2_timestamp_hi: high DWORD of the GP2 clock timestamp in 10's of + * nanoseconds + * @gp2_timestamp_lo: low DWORD of the GP2 clock timestamp in 10's of + * nanoseconds + */ +struct iwl_synced_time_rsp { + __le32 operation; + __le32 platform_timestamp_hi; + __le32 platform_timestamp_lo; + __le32 gp2_timestamp_hi; + __le32 gp2_timestamp_lo; +} __packed; /* WNM_80211V_TIMING_RSP_API_S_VER_1 */ + +/* PTP_CTX_MAX_DATA_SIZE_IN_API_D_VER_1 */ +#define PTP_CTX_MAX_DATA_SIZE 128 + +/** + * struct iwl_time_msmt_ptp_ctx - Vendor specific information element + * to allow a space for flexibility for the userspace App + * + * @element_id: element id of vendor specific ie + * @length: length of vendor specific ie + * @reserved: for alignment + * @data: vendor specific data blob + */ +struct iwl_time_msmt_ptp_ctx { + /* Differentiate between FTM and TM specific Vendor IEs */ + union { + struct { + u8 element_id; + u8 length; + __le16 reserved; + u8 data[PTP_CTX_MAX_DATA_SIZE]; + } ftm; /* FTM specific vendor IE */ + struct { + u8 element_id; + u8 length; + u8 data[PTP_CTX_MAX_DATA_SIZE]; + } tm; /* TM specific vendor IE */ + }; +} __packed /* PTP_CTX_VER_1 */; + +/** + * struct iwl_time_msmt_notify - Time Sync measurement notification + * for TM/FTM, along with additional meta data. + * + * @peer_addr: peer address + * @reserved: for alignment + * @dialog_token: measurement flow dialog token number + * @followup_dialog_token: Measurement flow previous dialog token number + * @t1_hi: high dword of t1-time of the Tx'ed action frame departure on + * sender side in units of 10 nano seconds + * @t1_lo: low dword of t1-time of the Tx'ed action frame departure on + * sender side in units of 10 nano seconds + * @t1_max_err: maximum t1-time error in units of 10 nano seconds + * @t4_hi: high dword of t4-time of the Rx'ed action frame's Ack arrival on + * sender side in units of 10 nano seconds + * @t4_lo: low dword of t4-time of the Rx'ed action frame's Ack arrival on + * sender side in units of 10 nano seconds + * @t4_max_err: maximum t4-time error in units of 10 nano seconds + * @t2_hi: high dword of t2-time of the Rx'ed action frame arrival on + * receiver side in units of 10 nano seconds + * @t2_lo: low dword of t2-time of the Rx'ed action frame arrival on + * receiver side in units of 10 nano seconds + * @t2_max_err: maximum t2-time error in units of 10 nano seconds + * @t3_hi: high dword of t3-time of the Tx'ed action frame's Ack departure on + * receiver side in units of 10 nano seconds + * @t3_lo: low dword of t3-time of the Tx'ed action frame's Ack departure on + * receiver side in units of 10 nano seconds + * @t3_max_err: maximum t3-time error in units of 10 nano seconds + * @ptp: vendor specific information element + */ +struct iwl_time_msmt_notify { + u8 peer_addr[ETH_ALEN]; + u8 reserved[2]; + __le32 dialog_token; + __le32 followup_dialog_token; + __le32 t1_hi; + __le32 t1_lo; + __le32 t1_max_err; + __le32 t4_hi; + __le32 t4_lo; + __le32 t4_max_err; + __le32 t2_hi; + __le32 t2_lo; + __le32 t2_max_err; + __le32 t3_hi; + __le32 t3_lo; + __le32 t3_max_err; + struct iwl_time_msmt_ptp_ctx ptp; +} __packed; /* WNM_80211V_TIMING_MEASUREMENT_NTFY_API_S_VER_1 */ + +/** + * struct iwl_time_msmt_cfm_notify - Time Sync measurement confirmation + * notification for TM/FTM. Sent on receipt of 802.11 Ack from peer for the + * Tx'ed TM/FTM measurement action frame. + * + * @peer_addr: peer address + * @reserved: for alignment + * @dialog_token: measurement flow dialog token number + * @t1_hi: high dword of t1-time of the Tx'ed action frame departure on + * sender side in units of 10 nano seconds + * @t1_lo: low dword of t1-time of the Tx'ed action frame departure on + * sender side in units of 10 nano seconds + * @t1_max_err: maximum t1-time error in units of 10 nano seconds + * @t4_hi: high dword of t4-time of the Rx'ed action frame's Ack arrival on + * sender side in units of 10 nano seconds + * @t4_lo: low dword of t4-time of the Rx'ed action frame's Ack arrival on + * sender side in units of 10 nano seconds + * @t4_max_err: maximum t4-time error in units of 10 nano seconds + */ +struct iwl_time_msmt_cfm_notify { + u8 peer_addr[ETH_ALEN]; + u8 reserved[2]; + __le32 dialog_token; + __le32 t1_hi; + __le32 t1_lo; + __le32 t1_max_err; + __le32 t4_hi; + __le32 t4_lo; + __le32 t4_max_err; +} __packed; /* WNM_80211V_TIMING_MEASUREMENT_CONFIRM_NTFY_API_S_VER_1 */ + /** * struct iwl_channel_estimation_cfg - channel estimation reporting config */ @@ -260,7 +449,7 @@ struct iwl_sad_properties { * @phy_id: PHY index * @rlc: RLC properties, &struct iwl_rlc_properties * @sad: SAD (single antenna diversity) options, &struct iwl_sad_properties - * @flags: flags, &enum iwl_rlc_flags + * @flags: flags (unused) * @reserved: reserved */ struct iwl_rlc_config_cmd { @@ -377,9 +566,11 @@ enum iwl_scd_queue_cfg_operation { * @u.add.cb_size: size code * @u.add.bc_dram_addr: byte-count table IOVA * @u.add.tfdq_dram_addr: TFD queue IOVA - * @u.remove.queue: queue ID for removal - * @u.modify.sta_mask: new station mask for modify - * @u.modify.queue: queue ID to modify + * @u.remove.sta_mask: station mask of queue to remove + * @u.remove.tid: TID of queue to remove + * @u.modify.old_sta_mask: old station mask for modify + * @u.modify.tid: TID of queue to modify + * @u.modify.new_sta_mask: new station mask for modify */ struct iwl_scd_queue_cfg_cmd { __le32 operation; @@ -394,13 +585,89 @@ struct iwl_scd_queue_cfg_cmd { __le64 tfdq_dram_addr; } __packed add; /* TX_QUEUE_CFG_CMD_ADD_API_S_VER_1 */ struct { - __le32 queue; + __le32 sta_mask; + __le32 tid; } __packed remove; /* TX_QUEUE_CFG_CMD_REMOVE_API_S_VER_1 */ struct { - __le32 sta_mask; - __le32 queue; + __le32 old_sta_mask; + __le32 tid; + __le32 new_sta_mask; } __packed modify; /* TX_QUEUE_CFG_CMD_MODIFY_API_S_VER_1 */ } __packed u; /* TX_QUEUE_CFG_CMD_OPERATION_API_U_VER_1 */ } __packed; /* TX_QUEUE_CFG_CMD_API_S_VER_3 */ +/** + * enum iwl_sec_key_flags - security key command key flags + * @IWL_SEC_KEY_FLAG_CIPHER_MASK: cipher mask + * @IWL_SEC_KEY_FLAG_CIPHER_WEP: WEP cipher + * @IWL_SEC_KEY_FLAG_CIPHER_CCMP: CCMP/CMAC cipher + * @IWL_SEC_KEY_FLAG_CIPHER_TKIP: TKIP cipher + * @IWL_SEC_KEY_FLAG_CIPHER_GCMP: GCMP/GMAC cipher + * @IWL_SEC_KEY_FLAG_NO_TX: don't install for TX + * @IWL_SEC_KEY_FLAG_KEY_SIZE: large key size (WEP-104, GCMP-256, GMAC-256) + * @IWL_SEC_KEY_FLAG_MFP: MFP is in used for this key + * @IWL_SEC_KEY_FLAG_MCAST_KEY: this is a multicast key + * @IWL_SEC_KEY_FLAG_SPP_AMSDU: SPP A-MSDU should be used + */ +enum iwl_sec_key_flags { + IWL_SEC_KEY_FLAG_CIPHER_MASK = 0x07, + IWL_SEC_KEY_FLAG_CIPHER_WEP = 0x01, + IWL_SEC_KEY_FLAG_CIPHER_CCMP = 0x02, + IWL_SEC_KEY_FLAG_CIPHER_TKIP = 0x03, + IWL_SEC_KEY_FLAG_CIPHER_GCMP = 0x05, + IWL_SEC_KEY_FLAG_NO_TX = 0x08, + IWL_SEC_KEY_FLAG_KEY_SIZE = 0x10, + IWL_SEC_KEY_FLAG_MFP = 0x20, + IWL_SEC_KEY_FLAG_MCAST_KEY = 0x40, + IWL_SEC_KEY_FLAG_SPP_AMSDU = 0x80, +}; + +#define IWL_SEC_WEP_KEY_OFFSET 3 + +/** + * struct iwl_sec_key_cmd - security key command + * @action: action from &enum iwl_ctxt_action + * @u.add.sta_mask: station mask for the new key + * @u.add.key_id: key ID (0-7) for the new key + * @u.add.key_flags: key flags per &enum iwl_sec_key_flags + * @u.add.key: key material. WEP keys should start from &IWL_SEC_WEP_KEY_OFFSET. + * @u.add.tkip_mic_rx_key: TKIP MIC RX key + * @u.add.tkip_mic_tx_key: TKIP MIC TX key + * @u.add.rx_seq: RX sequence counter value + * @u.add.tx_seq: TX sequence counter value + * @u.modify.old_sta_mask: old station mask + * @u.modify.new_sta_mask: new station mask + * @u.modify.key_id: key ID + * @u.modify.key_flags: new key flags + * @u.remove.sta_mask: station mask + * @u.remove.key_id: key ID + * @u.remove.key_flags: key flags + */ +struct iwl_sec_key_cmd { + __le32 action; + union { + struct { + __le32 sta_mask; + __le32 key_id; + __le32 key_flags; + u8 key[32]; + u8 tkip_mic_rx_key[8]; + u8 tkip_mic_tx_key[8]; + __le64 rx_seq; + __le64 tx_seq; + } __packed add; /* SEC_KEY_ADD_CMD_API_S_VER_1 */ + struct { + __le32 old_sta_mask; + __le32 new_sta_mask; + __le32 key_id; + __le32 key_flags; + } __packed modify; /* SEC_KEY_MODIFY_CMD_API_S_VER_1 */ + struct { + __le32 sta_mask; + __le32 key_id; + __le32 key_flags; + } __packed remove; /* SEC_KEY_REMOVE_CMD_API_S_VER_1 */ + } __packed u; /* SEC_KEY_OPERATION_API_U_VER_1 */ +} __packed; /* SEC_KEY_CMD_API_S_VER_1 */ + #endif /* __iwl_fw_api_datapath_h__ */ diff --git a/fw/api/dbg-tlv.h b/fw/api/dbg-tlv.h index 52bf96585fc6..ba538d70985f 100644 --- a/fw/api/dbg-tlv.h +++ b/fw/api/dbg-tlv.h @@ -26,7 +26,7 @@ struct iwl_fw_ini_hcmd { u8 id; u8 group; __le16 reserved; - u8 data[0]; + u8 data[]; } __packed; /* FW_DEBUG_TLV_HCMD_DATA_API_S_VER_1 */ /** @@ -275,7 +275,7 @@ struct iwl_fw_ini_conf_set_tlv { __le32 time_point; __le32 set_type; __le32 addr_offset; - struct iwl_fw_ini_addr_val addr_val[0]; + struct iwl_fw_ini_addr_val addr_val[]; } __packed; /* FW_TLV_DEBUG_CONFIG_SET_API_S_VER_1 */ /** diff --git a/fw/api/debug.h b/fw/api/debug.h index 6255257ddebe..8fef38139bf6 100644 --- a/fw/api/debug.h +++ b/fw/api/debug.h @@ -43,6 +43,12 @@ enum iwl_debug_cmds { */ BUFFER_ALLOCATION = 0x8, /** + * @GET_TAS_STATUS: + * sends command to fw to get TAS status + * the response is &struct iwl_mvm_tas_status_resp + */ + GET_TAS_STATUS = 0xA, + /** * @FW_DUMP_COMPLETE_CMD: * sends command to fw once dump collection completed * &struct iwl_dbg_dump_complete_cmd @@ -240,7 +246,7 @@ struct iwl_mfu_assert_dump_notif { __le16 index_num; __le16 parts_num; __le32 data_size; - __le32 data[0]; + __le32 data[]; } __packed; /* MFU_DUMP_ASSERT_API_S_VER_1 */ /** @@ -276,7 +282,7 @@ struct iwl_mvm_marker { u8 marker_id; __le16 reserved; __le64 timestamp; - __le32 metadata[0]; + __le32 metadata[]; } __packed; /* MARKER_API_S_VER_1 */ /** @@ -421,4 +427,94 @@ struct iwl_dbg_dump_complete_cmd { __le32 tp_data; } __packed; /* FW_DUMP_COMPLETE_CMD_API_S_VER_1 */ +#define TAS_LMAC_BAND_HB 0 +#define TAS_LMAC_BAND_LB 1 +#define TAS_LMAC_BAND_UHB 2 +#define TAS_LMAC_BAND_INVALID 3 + +/** + * struct iwl_mvm_tas_status_per_mac - tas status per lmac + * @static_status: tas statically enabled or disabled per lmac - TRUE/FALSE + * @static_dis_reason: TAS static disable reason, uses + * &enum iwl_mvm_tas_statically_disabled_reason + * @dynamic_status: Current TAS status. uses + * &enum iwl_mvm_tas_dyna_status + * @near_disconnection: is TAS currently near disconnection per lmac? - TRUE/FALSE + * @max_reg_pwr_limit: Regulatory power limits in dBm + * @sar_limit: SAR limits per lmac in dBm + * @band: Band per lmac + * @reserved: reserved + */ +struct iwl_mvm_tas_status_per_mac { + u8 static_status; + u8 static_dis_reason; + u8 dynamic_status; + u8 near_disconnection; + __le16 max_reg_pwr_limit; + __le16 sar_limit; + u8 band; + u8 reserved[3]; +} __packed; /*DEBUG_GET_TAS_STATUS_PER_MAC_S_VER_1*/ + +/** + * struct iwl_mvm_tas_status_resp - Response to GET_TAS_STATUS + * @tas_fw_version: TAS FW version + * @is_uhb_for_usa_enable: is UHB enabled in USA? - TRUE/FALSE + * @curr_mcc: current mcc + * @block_list: country block list + * @tas_status_mac: TAS status per lmac, uses + * &struct iwl_mvm_tas_status_per_mac + * @in_dual_radio: is TAS in dual radio? - TRUE/FALSE + * @reserved: reserved + */ +struct iwl_mvm_tas_status_resp { + u8 tas_fw_version; + u8 is_uhb_for_usa_enable; + __le16 curr_mcc; + __le16 block_list[16]; + struct iwl_mvm_tas_status_per_mac tas_status_mac[2]; + u8 in_dual_radio; + u8 reserved[3]; +} __packed; /*DEBUG_GET_TAS_STATUS_RSP_API_S_VER_3*/ + +/** + * enum iwl_mvm_tas_dyna_status - TAS current running status + * @TAS_DYNA_INACTIVE: TAS status is inactive + * @TAS_DYNA_INACTIVE_MVM_MODE: TAS is disabled due because FW is in MVM mode + * or is in softap mode. + * @TAS_DYNA_INACTIVE_TRIGGER_MODE: TAS is disabled because FW is in + * multi user trigger mode + * @TAS_DYNA_INACTIVE_BLOCK_LISTED: TAS is disabled because current mcc + * is blocklisted mcc + * @TAS_DYNA_INACTIVE_UHB_NON_US: TAS is disabled because current band is UHB + * and current mcc is USA + * @TAS_DYNA_ACTIVE: TAS is currently active + * @TAS_DYNA_STATUS_MAX: TAS status max value + */ +enum iwl_mvm_tas_dyna_status { + TAS_DYNA_INACTIVE, + TAS_DYNA_INACTIVE_MVM_MODE, + TAS_DYNA_INACTIVE_TRIGGER_MODE, + TAS_DYNA_INACTIVE_BLOCK_LISTED, + TAS_DYNA_INACTIVE_UHB_NON_US, + TAS_DYNA_ACTIVE, + + TAS_DYNA_STATUS_MAX, +}; /*_TAS_DYNA_STATUS_E*/ + +/** + * enum iwl_mvm_tas_statically_disabled_reason - TAS statically disabled reason + * @TAS_DISABLED_DUE_TO_BIOS: TAS is disabled because TAS is disabled in BIOS + * @TAS_DISABLED_DUE_TO_SAR_6DBM: TAS is disabled because SAR limit is less than 6 Dbm + * @TAS_DISABLED_REASON_INVALID: TAS disable reason is invalid + * @TAS_DISABLED_REASON_MAX: TAS disable reason max value + */ +enum iwl_mvm_tas_statically_disabled_reason { + TAS_DISABLED_DUE_TO_BIOS, + TAS_DISABLED_DUE_TO_SAR_6DBM, + TAS_DISABLED_REASON_INVALID, + + TAS_DISABLED_REASON_MAX, +}; /*_TAS_STATICALLY_DISABLED_REASON_E*/ + #endif /* __iwl_fw_api_debug_h__ */ diff --git a/fw/api/filter.h b/fw/api/filter.h index e44c70b7c790..88fe61d144d4 100644 --- a/fw/api/filter.h +++ b/fw/api/filter.h @@ -33,7 +33,7 @@ struct iwl_mcast_filter_cmd { u8 pass_all; u8 bssid[6]; u8 reserved[2]; - u8 addr_list[0]; + u8 addr_list[]; } __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */ #endif /* __iwl_fw_api_filter_h__ */ diff --git a/fw/api/location.h b/fw/api/location.h index 12af94e166ed..b044990c7b87 100644 --- a/fw/api/location.h +++ b/fw/api/location.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2021 Intel Corporation + * Copyright (C) 2018-2022 Intel Corporation */ #ifndef __iwl_fw_api_location_h__ #define __iwl_fw_api_location_h__ @@ -35,8 +35,11 @@ enum iwl_location_subcmd_ids { */ TOF_RANGE_REQ_EXT_CMD = 0x3, /** - * @TOF_RESPONDER_CONFIG_CMD: FTM responder configuration, - * uses &struct iwl_tof_responder_config_cmd + * @TOF_RESPONDER_CONFIG_CMD: FTM responder configuration, one of + * &struct iwl_tof_responder_config_cmd_v6, + * &struct iwl_tof_responder_config_cmd_v7, + * &struct iwl_tof_responder_config_cmd_v8 or + * &struct iwl_tof_responder_config_cmd_v9 */ TOF_RESPONDER_CONFIG_CMD = 0x4, /** @@ -69,8 +72,11 @@ enum iwl_location_subcmd_ids { */ TOF_MCSI_DEBUG_NOTIF = 0xFE, /** - * @TOF_RANGE_RESPONSE_NOTIF: ranging response, using - * &struct iwl_tof_range_rsp_ntfy + * @TOF_RANGE_RESPONSE_NOTIF: ranging response, using one of + * &struct iwl_tof_range_rsp_ntfy_v5, + * &struct iwl_tof_range_rsp_ntfy_v6, + * &struct iwl_tof_range_rsp_ntfy_v7 or + * &struct iwl_tof_range_rsp_ntfy_v8 */ TOF_RANGE_RESPONSE_NOTIF = 0xFF, }; diff --git a/fw/api/mac-cfg.h b/fw/api/mac-cfg.h index 712532f17630..184db5a6f06f 100644 --- a/fw/api/mac-cfg.h +++ b/fw/api/mac-cfg.h @@ -1,12 +1,14 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2019, 2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2019, 2021-2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_api_mac_cfg_h__ #define __iwl_fw_api_mac_cfg_h__ +#include "mac.h" + /** * enum iwl_mac_conf_subcmd_ids - mac configuration command IDs */ @@ -31,7 +33,30 @@ enum iwl_mac_conf_subcmd_ids { * @CANCEL_CHANNEL_SWITCH_CMD: &struct iwl_cancel_channel_switch_cmd */ CANCEL_CHANNEL_SWITCH_CMD = 0x6, - + /** + * @MAC_CONFIG_CMD: &struct iwl_mac_config_cmd + */ + MAC_CONFIG_CMD = 0x8, + /** + * @LINK_CONFIG_CMD: &struct iwl_link_config_cmd + */ + LINK_CONFIG_CMD = 0x9, + /** + * @STA_CONFIG_CMD: &struct iwl_mvm_sta_cfg_cmd + */ + STA_CONFIG_CMD = 0xA, + /** + * @AUX_STA_CMD: &struct iwl_mvm_aux_sta_cmd + */ + AUX_STA_CMD = 0xB, + /** + * @STA_REMOVE_CMD: &struct iwl_mvm_remove_sta_cmd + */ + STA_REMOVE_CMD = 0xC, + /** + * @STA_DISABLE_TX_CMD: &struct iwl_mvm_sta_disable_tx_cmd + */ + STA_DISABLE_TX_CMD = 0xD, /** * @SESSION_PROTECTION_NOTIF: &struct iwl_mvm_session_prot_notif */ @@ -115,40 +140,60 @@ struct iwl_missed_vap_notif { * * @id_and_color: ID and color of the MAC */ -struct iwl_channel_switch_start_notif { +struct iwl_channel_switch_start_notif_v1 { __le32 id_and_color; } __packed; /* CHANNEL_SWITCH_START_NTFY_API_S_VER_1 */ +/** + * struct iwl_channel_switch_start_notif - Channel switch start notification + * + * @link_id: FW link id + */ +struct iwl_channel_switch_start_notif { + __le32 link_id; +} __packed; /* CHANNEL_SWITCH_START_NTFY_API_S_VER_3 */ + #define CS_ERR_COUNT_ERROR BIT(0) #define CS_ERR_LONG_DELAY_AFTER_CS BIT(1) #define CS_ERR_LONG_TX_BLOCK BIT(2) #define CS_ERR_TX_BLOCK_TIMER_EXPIRED BIT(3) /** - * struct iwl_channel_switch_error_notif - Channel switch error notification + * struct iwl_channel_switch_error_notif_v1 - Channel switch error notification * * @mac_id: the mac for which the ucode sends the notification for * @csa_err_mask: mask of channel switch error that can occur */ -struct iwl_channel_switch_error_notif { +struct iwl_channel_switch_error_notif_v1 { __le32 mac_id; __le32 csa_err_mask; } __packed; /* CHANNEL_SWITCH_ERROR_NTFY_API_S_VER_1 */ /** + * struct iwl_channel_switch_error_notif - Channel switch error notification + * + * @link_id: FW link id + * @csa_err_mask: mask of channel switch error that can occur + */ +struct iwl_channel_switch_error_notif { + __le32 link_id; + __le32 csa_err_mask; +} __packed; /* CHANNEL_SWITCH_ERROR_NTFY_API_S_VER_2 */ + +/** * struct iwl_cancel_channel_switch_cmd - Cancel Channel Switch command * - * @mac_id: the mac that should cancel the channel switch + * @id: the id of the link or mac that should cancel the channel switch */ struct iwl_cancel_channel_switch_cmd { - __le32 mac_id; + __le32 id; } __packed; /* MAC_CANCEL_CHANNEL_SWITCH_S_VER_1 */ /** * struct iwl_chan_switch_te_cmd - Channel Switch Time Event command * * @mac_id: MAC ID for channel switch - * @action: action to perform, one of FW_CTXT_ACTION_* + * @action: action to perform, see &enum iwl_ctxt_action * @tsf: beacon tsf * @cs_count: channel switch count from CSA/eCSA IE * @cs_delayed_bcn_count: if set to N (!= 0) GO/AP can delay N beacon intervals @@ -182,4 +227,408 @@ struct iwl_mac_low_latency_cmd { __le16 reserved; } __packed; /* MAC_LOW_LATENCY_API_S_VER_1 */ +/** + * struct iwl_mac_client_data - configuration data for client MAC context + * + * @is_assoc: 1 for associated state, 0 otherwise + * @esr_transition_timeout: the timeout required by the AP for the + * eSR transition. + * Available only from version 2 of the command. + * This values comes from the EMLSR transition delay in the EML + * Capabilities subfield. + * @medium_sync_delay: the value as it appeasr in P802.11be_D2.2 Figure 9-1002j. + * @assoc_id: unique ID assigned by the AP during association + * @reserved1: alignment + * @data_policy: see &enum iwl_mac_data_policy + * @reserved2: alignment + * @ctwin: client traffic window in TU (period after TBTT when GO is present). + * 0 indicates that there is no CT window. + */ +struct iwl_mac_client_data { + u8 is_assoc; + u8 esr_transition_timeout; + __le16 medium_sync_delay; + + __le16 assoc_id; + __le16 reserved1; + __le16 data_policy; + __le16 reserved2; + __le32 ctwin; +} __packed; /* MAC_CONTEXT_CONFIG_CLIENT_DATA_API_S_VER_2 */ + +/** + * struct iwl_mac_p2p_dev_data - configuration data for P2P device MAC context + * + * @is_disc_extended: if set to true, P2P Device discoverability is enabled on + * other channels as well. This should be to true only in case that the + * device is discoverable and there is an active GO. Note that setting this + * field when not needed, will increase the number of interrupts and have + * effect on the platform power, as this setting opens the Rx filters on + * all macs. + */ +struct iwl_mac_p2p_dev_data { + __le32 is_disc_extended; +} __packed; /* MAC_CONTEXT_CONFIG_P2P_DEV_DATA_API_S_VER_1 */ + +/** + * enum iwl_mac_config_filter_flags - MAC context configuration filter flags + * + * @MAC_CFG_FILTER_PROMISC: accept all data frames + * @MAC_CFG_FILTER_ACCEPT_CONTROL_AND_MGMT: pass all management and + * control frames to the host + * @MAC_CFG_FILTER_ACCEPT_GRP: accept multicast frames + * @MAC_CFG_FILTER_ACCEPT_BEACON: accept beacon frames + * @MAC_CFG_FILTER_ACCEPT_BCAST_PROBE_RESP: accept broadcast probe response + * @MAC_CFG_FILTER_ACCEPT_PROBE_REQ: accept probe requests + */ +enum iwl_mac_config_filter_flags { + MAC_CFG_FILTER_PROMISC = BIT(0), + MAC_CFG_FILTER_ACCEPT_CONTROL_AND_MGMT = BIT(1), + MAC_CFG_FILTER_ACCEPT_GRP = BIT(2), + MAC_CFG_FILTER_ACCEPT_BEACON = BIT(3), + MAC_CFG_FILTER_ACCEPT_BCAST_PROBE_RESP = BIT(4), + MAC_CFG_FILTER_ACCEPT_PROBE_REQ = BIT(5), +}; /* MAC_FILTER_FLAGS_MASK_E_VER_1 */ + +/** + * struct iwl_mac_config_cmd - command structure to configure MAC contexts in + * MLD API + * ( MAC_CONTEXT_CONFIG_CMD = 0x8 ) + * + * @id_and_color: ID and color of the MAC + * @action: action to perform, see &enum iwl_ctxt_action + * @mac_type: one of &enum iwl_mac_types + * @local_mld_addr: mld address + * @reserved_for_local_mld_addr: reserved + * @filter_flags: combination of &enum iwl_mac_config_filter_flags + * @he_support: does this MAC support HE + * @he_ap_support: HE AP enabled, "pseudo HE", no trigger frame handling + * @eht_support: does this MAC support EHT. Requires he_support + * @nic_not_ack_enabled: mark that the NIC doesn't support receiving + * ACK-enabled AGG, (i.e. both BACK and non-BACK frames in single AGG). + * If the NIC is not ACK_ENABLED it may use the EOF-bit in first non-0 + * len delim to determine if AGG or single. + * @client: client mac data + * @go_ibss: mac data for go or ibss + * @p2p_dev: mac data for p2p device + */ +struct iwl_mac_config_cmd { + /* COMMON_INDEX_HDR_API_S_VER_1 */ + __le32 id_and_color; + __le32 action; + /* MAC_CONTEXT_TYPE_API_E */ + __le32 mac_type; + u8 local_mld_addr[6]; + __le16 reserved_for_local_mld_addr; + __le32 filter_flags; + __le16 he_support; + __le16 he_ap_support; + __le32 eht_support; + __le32 nic_not_ack_enabled; + /* MAC_CONTEXT_CONFIG_SPECIFIC_DATA_API_U_VER_2 */ + union { + struct iwl_mac_client_data client; + struct iwl_mac_p2p_dev_data p2p_dev; + }; +} __packed; /* MAC_CONTEXT_CONFIG_CMD_API_S_VER_2 */ + +/** + * enum iwl_link_ctx_modify_flags - indicate to the fw what fields are being + * modified in &iwl_link_ctx_cfg_cmd + * + * @LINK_CONTEXT_MODIFY_ACTIVE: covers iwl_link_ctx_cfg_cmd::active + * @LINK_CONTEXT_MODIFY_RATES_INFO: covers iwl_link_ctx_cfg_cmd::cck_rates, + * iwl_link_ctx_cfg_cmd::ofdm_rates, + * iwl_link_ctx_cfg_cmd::cck_short_preamble, + * iwl_link_ctx_cfg_cmd::short_slot + * @LINK_CONTEXT_MODIFY_PROTECT_FLAGS: covers + * iwl_link_ctx_cfg_cmd::protection_flags + * @LINK_CONTEXT_MODIFY_QOS_PARAMS: covers iwl_link_ctx_cfg_cmd::qos_flags, + * iwl_link_ctx_cfg_cmd::ac, + * @LINK_CONTEXT_MODIFY_BEACON_TIMING: covers iwl_link_ctx_cfg_cmd::bi, + * iwl_link_ctx_cfg_cmd::dtim_interval, + * iwl_link_ctx_cfg_cmd::dtim_time, + * iwl_link_ctx_cfg_cmd::dtim_tsf, + * iwl_link_ctx_cfg_cmd::assoc_beacon_arrive_time. + * This flag can be set only once after assoc. + * @LINK_CONTEXT_MODIFY_HE_PARAMS: covers + * iwl_link_ctx_cfg_cmd::htc_trig_based_pkt_ext + * iwl_link_ctx_cfg_cmd::rand_alloc_ecwmin, + * iwl_link_ctx_cfg_cmd::rand_alloc_ecwmax, + * iwl_link_ctx_cfg_cmd::trig_based_txf, + * iwl_link_ctx_cfg_cmd::bss_color, + * iwl_link_ctx_cfg_cmd::ndp_fdbk_buff_th_exp, + * iwl_link_ctx_cfg_cmd::ref_bssid_addr + * iwl_link_ctx_cfg_cmd::bssid_index, + * iwl_link_ctx_cfg_cmd::frame_time_rts_th. + * This flag can be set any time. + * @LINK_CONTEXT_MODIFY_BSS_COLOR_DISABLE: covers + * iwl_link_ctx_cfg_cmd::bss_color_disable + * @LINK_CONTEXT_MODIFY_EHT_PARAMS: covers iwl_link_ctx_cfg_cmd::puncture_mask. + * This flag can be set only if the MAC that this link relates to has + * eht_support set to true. + * @LINK_CONTEXT_MODIFY_ALL: set all above flags + */ +enum iwl_link_ctx_modify_flags { + LINK_CONTEXT_MODIFY_ACTIVE = BIT(0), + LINK_CONTEXT_MODIFY_RATES_INFO = BIT(1), + LINK_CONTEXT_MODIFY_PROTECT_FLAGS = BIT(2), + LINK_CONTEXT_MODIFY_QOS_PARAMS = BIT(3), + LINK_CONTEXT_MODIFY_BEACON_TIMING = BIT(4), + LINK_CONTEXT_MODIFY_HE_PARAMS = BIT(5), + LINK_CONTEXT_MODIFY_BSS_COLOR_DISABLE = BIT(6), + LINK_CONTEXT_MODIFY_EHT_PARAMS = BIT(7), + LINK_CONTEXT_MODIFY_ALL = 0xff, +}; /* LINK_CONTEXT_MODIFY_MASK_E_VER_1 */ + +/** + * enum iwl_link_ctx_protection_flags - link protection flags + * @LINK_PROT_FLG_TGG_PROTECT: 11g protection when transmitting OFDM frames, + * this will require CCK RTS/CTS2self. + * RTS/CTS will protect full burst time. + * @LINK_PROT_FLG_HT_PROT: enable HT protection + * @LINK_PROT_FLG_FAT_PROT: protect 40 MHz transmissions + * @LINK_PROT_FLG_SELF_CTS_EN: allow CTS2self + */ +enum iwl_link_ctx_protection_flags { + LINK_PROT_FLG_TGG_PROTECT = BIT(0), + LINK_PROT_FLG_HT_PROT = BIT(1), + LINK_PROT_FLG_FAT_PROT = BIT(2), + LINK_PROT_FLG_SELF_CTS_EN = BIT(3), +}; /* LINK_PROTECT_FLAGS_E_VER_1 */ + +/** + * enum iwl_link_ctx_flags - link context flags + * + * @LINK_FLG_BSS_COLOR_DIS: BSS color disable, don't use the BSS + * color for RX filter but use MAC header + * enabled AGG, i.e. both BACK and non-BACK frames in a single AGG + * @LINK_FLG_MU_EDCA_CW: indicates that there is an element of MU EDCA + * parameter set, i.e. the backoff counters for trig-based ACs + * @LINK_FLG_RU_2MHZ_BLOCK: indicates that 26-tone RU OFDMA transmission are + * not allowed (as there are OBSS that might classify such transmissions as + * radar pulses). + * @LINK_FLG_NDP_FEEDBACK_ENABLED: mark support for NDP feedback and change + * of threshold + */ +enum iwl_link_ctx_flags { + LINK_FLG_BSS_COLOR_DIS = BIT(0), + LINK_FLG_MU_EDCA_CW = BIT(1), + LINK_FLG_RU_2MHZ_BLOCK = BIT(2), + LINK_FLG_NDP_FEEDBACK_ENABLED = BIT(3), +}; /* LINK_CONTEXT_FLAG_E_VER_1 */ + +/** + * struct iwl_link_config_cmd - command structure to configure the LINK context + * in MLD API + * ( LINK_CONFIG_CMD =0x9 ) + * + * @action: action to perform, see &enum iwl_ctxt_action + * @link_id: the id of the link that this cmd configures + * @mac_id: interface ID. Relevant only if action is FW_CTXT_ACTION_ADD + * @phy_id: PHY index. Can be changed only if the link was inactive + * (and stays inactive). If the link is active (or becomes active), + * this field is ignored. + * @local_link_addr: the links MAC address. Can be changed only if the link was + * inactive (and stays inactive). If the link is active + * (or becomes active), this field is ignored. + * @reserved_for_local_link_addr: reserved + * @modify_mask: from &enum iwl_link_ctx_modify_flags, selects what to change. + * Relevant only if action is FW_CTXT_ACTION_MODIFY + * @active: indicates whether the link is active or not + * @listen_lmac: indicates whether the link should be allocated on the Listen + * Lmac or on the Main Lmac. Cannot be changed on an active Link. + * Relevant only for eSR. + * @cck_rates: basic rates available for CCK + * @ofdm_rates: basic rates available for OFDM + * @cck_short_preamble: 1 for enabling short preamble, 0 otherwise + * @short_slot: 1 for enabling short slots, 0 otherwise + * @protection_flags: combination of &enum iwl_link_ctx_protection_flags + * @qos_flags: from &enum iwl_mac_qos_flags + * @ac: one iwl_mac_qos configuration for each AC + * @htc_trig_based_pkt_ext: default PE in 4us units + * @rand_alloc_ecwmin: random CWmin = 2**ECWmin-1 + * @rand_alloc_ecwmax: random CWmax = 2**ECWmax-1 + * @ndp_fdbk_buff_th_exp: set exponent for the NDP feedback buffered threshold + * @trig_based_txf: MU EDCA Parameter set for the trigger based traffic queues + * @bi: beacon interval in TU, applicable only when associated + * @dtim_interval: DTIM interval in TU. + * Relevant only for GO, otherwise this is offloaded. + * @puncture_mask: puncture mask for EHT + * @frame_time_rts_th: HE duration RTS threshold, in units of 32us + * @flags: a combination from &enum iwl_link_ctx_flags + * @flags_mask: what of %flags have changed. Also &enum iwl_link_ctx_flags + * Below fields are for multi-bssid: + * @ref_bssid_addr: reference BSSID used by the AP + * @reserved_for_ref_bssid_addr: reserved + * @bssid_index: index of the associated VAP + * @bss_color: 11ax AP ID that is used in the HE SIG-A to mark inter BSS frame + * @spec_link_id: link_id as the AP knows it + * @reserved: alignment + * @ibss_bssid_addr: bssid for ibss + * @reserved_for_ibss_bssid_addr: reserved + * @reserved1: reserved for future use + */ +struct iwl_link_config_cmd { + __le32 action; + __le32 link_id; + __le32 mac_id; + __le32 phy_id; + u8 local_link_addr[6]; + __le16 reserved_for_local_link_addr; + __le32 modify_mask; + __le32 active; + __le32 listen_lmac; + __le32 cck_rates; + __le32 ofdm_rates; + __le32 cck_short_preamble; + __le32 short_slot; + __le32 protection_flags; + /* MAC_QOS_PARAM_API_S_VER_1 */ + __le32 qos_flags; + struct iwl_ac_qos ac[AC_NUM + 1]; + u8 htc_trig_based_pkt_ext; + u8 rand_alloc_ecwmin; + u8 rand_alloc_ecwmax; + u8 ndp_fdbk_buff_th_exp; + struct iwl_he_backoff_conf trig_based_txf[AC_NUM]; + __le32 bi; + __le32 dtim_interval; + __le16 puncture_mask; + __le16 frame_time_rts_th; + __le32 flags; + __le32 flags_mask; + /* The below fields are for multi-bssid */ + u8 ref_bssid_addr[6]; + __le16 reserved_for_ref_bssid_addr; + u8 bssid_index; + u8 bss_color; + u8 spec_link_id; + u8 reserved; + u8 ibss_bssid_addr[6]; + __le16 reserved_for_ibss_bssid_addr; + __le32 reserved1[8]; +} __packed; /* LINK_CONTEXT_CONFIG_CMD_API_S_VER_1 */ + +/* Currently FW supports link ids in the range 0-3 and can have + * at most two active links for each vif. + */ +#define IWL_MVM_FW_MAX_ACTIVE_LINKS_NUM 2 +#define IWL_MVM_FW_MAX_LINK_ID 3 + +/** + * enum iwl_fw_sta_type - FW station types + * @STATION_TYPE_PEER: represents a peer - AP in BSS, a TDLS sta, a client in + * P2P. + * @STATION_TYPE_BCAST_MGMT: The station used to send beacons and + * probe responses. Also used for traffic injection in sniffer mode + * @STATION_TYPE_MCAST: the station used for BCAST / MCAST in GO. Will be + * suspended / resumed at the right timing depending on the clients' + * power save state and the DTIM timing + * @STATION_TYPE_AUX: aux sta. In the FW there is no need for a special type + * for the aux sta, so this type is only for driver - internal use. + */ +enum iwl_fw_sta_type { + STATION_TYPE_PEER, + STATION_TYPE_BCAST_MGMT, + STATION_TYPE_MCAST, + STATION_TYPE_AUX, +}; /* STATION_TYPE_E_VER_1 */ + +/** + * struct iwl_mvm_sta_cfg_cmd - cmd structure to add a peer sta to the uCode's + * station table + * ( STA_CONFIG_CMD = 0xA ) + * + * @sta_id: index of station in uCode's station table + * @link_id: the id of the link that is used to communicate with this sta + * @peer_mld_address: the peers mld address + * @reserved_for_peer_mld_address: reserved + * @peer_link_address: the address of the link that is used to communicate + * with this sta + * @reserved_for_peer_link_address: reserved + * @station_type: type of this station. See &enum iwl_fw_sta_type + * @assoc_id: for GO only + * @beamform_flags: beam forming controls + * @mfp: indicates whether the STA uses management frame protection or not. + * @mimo: indicates whether the sta uses mimo or not + * @mimo_protection: indicates whether the sta uses mimo protection or not + * @ack_enabled: indicates that the AP supports receiving ACK- + * enabled AGG, i.e. both BACK and non-BACK frames in a single AGG + * @trig_rnd_alloc: indicates that trigger based random allocation + * is enabled according to UORA element existence + * @tx_ampdu_spacing: minimum A-MPDU spacing: + * 4 - 2us density, 5 - 4us density, 6 - 8us density, 7 - 16us density + * @tx_ampdu_max_size: maximum A-MPDU length: 0 - 8K, 1 - 16K, 2 - 32K, + * 3 - 64K, 4 - 128K, 5 - 256K, 6 - 512K, 7 - 1024K. + * @sp_length: the size of the SP in actual number of frames + * @uapsd_acs: 4 LS bits are trigger enabled ACs, 4 MS bits are the deliver + * enabled ACs. + * @pkt_ext: optional, exists according to PPE-present bit in the HE/EHT-PHY + * capa + * @htc_flags: which features are supported in HTC + */ +struct iwl_mvm_sta_cfg_cmd { + __le32 sta_id; + __le32 link_id; + u8 peer_mld_address[ETH_ALEN]; + __le16 reserved_for_peer_mld_address; + u8 peer_link_address[ETH_ALEN]; + __le16 reserved_for_peer_link_address; + __le32 station_type; + __le32 assoc_id; + __le32 beamform_flags; + __le32 mfp; + __le32 mimo; + __le32 mimo_protection; + __le32 ack_enabled; + __le32 trig_rnd_alloc; + __le32 tx_ampdu_spacing; + __le32 tx_ampdu_max_size; + __le32 sp_length; + __le32 uapsd_acs; + struct iwl_he_pkt_ext_v2 pkt_ext; + __le32 htc_flags; +} __packed; /* STA_CMD_API_S_VER_1 */ + +/** + * struct iwl_mvm_aux_sta_cmd - command for AUX STA configuration + * ( AUX_STA_CMD = 0xB ) + * + * @sta_id: index of aux sta to configure + * @lmac_id: ? + * @mac_addr: mac addr of the auxilary sta + * @reserved_for_mac_addr: reserved + */ +struct iwl_mvm_aux_sta_cmd { + __le32 sta_id; + __le32 lmac_id; + u8 mac_addr[ETH_ALEN]; + __le16 reserved_for_mac_addr; + +} __packed; /* AUX_STA_CMD_API_S_VER_1 */ + +/** + * struct iwl_mvm_remove_sta_cmd - a cmd structure to remove a sta added by + * STA_CONFIG_CMD or AUX_STA_CONFIG_CMD + * ( STA_REMOVE_CMD = 0xC ) + * + * @sta_id: index of station to remove + */ +struct iwl_mvm_remove_sta_cmd { + __le32 sta_id; +} __packed; /* REMOVE_STA_API_S_VER_1 */ + +/** + * struct iwl_mvm_sta_disable_tx_cmd - disable / re-enable tx to a sta + * ( STA_DISABLE_TX_CMD = 0xD ) + * + * @sta_id: index of the station to disable tx to + * @disable: indicates if to disable or re-enable tx + */ +struct iwl_mvm_sta_disable_tx_cmd { + __le32 sta_id; + __le32 disable; +} __packed; /* STA_DISABLE_TX_API_S_VER_1 */ + #endif /* __iwl_fw_api_mac_cfg_h__ */ diff --git a/fw/api/mac.h b/fw/api/mac.h index 9b7caf968346..55882190251c 100644 --- a/fw/api/mac.h +++ b/fw/api/mac.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2022 Intel Corporation * Copyright (C) 2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_api_mac_h__ @@ -295,7 +295,7 @@ struct iwl_ac_qos { * struct iwl_mac_ctx_cmd - command structure to configure MAC contexts * ( MAC_CONTEXT_CMD = 0x28 ) * @id_and_color: ID and color of the MAC - * @action: action to perform, one of FW_CTXT_ACTION_* + * @action: action to perform, see &enum iwl_ctxt_action * @mac_type: one of &enum iwl_mac_types * @tsf_id: TSF HW timer, one of &enum iwl_tsf_id * @node_addr: MAC address @@ -353,7 +353,7 @@ struct iwl_nonqos_seq_query_cmd { } __packed; /* NON_QOS_TX_COUNTER_GET_SET_API_S_VER_1 */ /** - * struct iwl_missed_beacons_notif - information on missed beacons + * struct iwl_missed_beacons_notif_ver_3 - information on missed beacons * ( MISSED_BEACONS_NOTIFICATION = 0xa2 ) * @mac_id: interface ID * @consec_missed_beacons_since_last_rx: number of consecutive missed @@ -362,7 +362,7 @@ struct iwl_nonqos_seq_query_cmd { * @num_expected_beacons: number of expected beacons * @num_recvd_beacons: number of received beacons */ -struct iwl_missed_beacons_notif { +struct iwl_missed_beacons_notif_ver_3 { __le32 mac_id; __le32 consec_missed_beacons_since_last_rx; __le32 consec_missed_beacons; @@ -371,6 +371,24 @@ struct iwl_missed_beacons_notif { } __packed; /* MISSED_BEACON_NTFY_API_S_VER_3 */ /** + * struct iwl_missed_beacons_notif - information on missed beacons + * ( MISSED_BEACONS_NOTIFICATION = 0xa2 ) + * @link_id: fw link ID + * @consec_missed_beacons_since_last_rx: number of consecutive missed + * beacons since last RX. + * @consec_missed_beacons: number of consecutive missed beacons + * @num_expected_beacons: number of expected beacons + * @num_recvd_beacons: number of received beacons + */ +struct iwl_missed_beacons_notif { + __le32 link_id; + __le32 consec_missed_beacons_since_last_rx; + __le32 consec_missed_beacons; + __le32 num_expected_beacons; + __le32 num_recvd_beacons; +} __packed; /* MISSED_BEACON_NTFY_API_S_VER_4 */ + +/** * struct iwl_he_backoff_conf - used for backoff configuration * Per each trigger-based AC, (set by MU EDCA Parameter set info-element) * used for backoff configuration of TXF5..TXF8 trigger based. @@ -398,7 +416,7 @@ struct iwl_he_backoff_conf { * @IWL_HE_PKT_EXT_64QAM: 64-QAM * @IWL_HE_PKT_EXT_256QAM: 256-QAM * @IWL_HE_PKT_EXT_1024QAM: 1024-QAM - * @IWL_HE_PKT_EXT_RESERVED: reserved value + * @IWL_HE_PKT_EXT_4096QAM: 4096-QAM, for EHT only * @IWL_HE_PKT_EXT_NONE: not defined */ enum iwl_he_pkt_ext_constellations { @@ -408,7 +426,7 @@ enum iwl_he_pkt_ext_constellations { IWL_HE_PKT_EXT_64QAM, IWL_HE_PKT_EXT_256QAM, IWL_HE_PKT_EXT_1024QAM, - IWL_HE_PKT_EXT_RESERVED, + IWL_HE_PKT_EXT_4096QAM, IWL_HE_PKT_EXT_NONE, }; diff --git a/fw/api/nvm-reg.h b/fw/api/nvm-reg.h index 91bfde6d5367..28bfabb399b2 100644 --- a/fw/api/nvm-reg.h +++ b/fw/api/nvm-reg.h @@ -17,7 +17,12 @@ enum iwl_regulatory_and_nvm_subcmd_ids { NVM_ACCESS_COMPLETE = 0x0, /** - * @LARI_CONFIG_CHANGE: &struct iwl_lari_config_change_cmd + * @LARI_CONFIG_CHANGE: &struct iwl_lari_config_change_cmd_v1, + * &struct iwl_lari_config_change_cmd_v2, + * &struct iwl_lari_config_change_cmd_v3, + * &struct iwl_lari_config_change_cmd_v4, + * &struct iwl_lari_config_change_cmd_v5 or + * &struct iwl_lari_config_change_cmd_v6 */ LARI_CONFIG_CHANGE = 0x1, @@ -29,12 +34,12 @@ enum iwl_regulatory_and_nvm_subcmd_ids { NVM_GET_INFO = 0x2, /** - * @TAS_CONFIG: &struct iwl_tas_config_cmd + * @TAS_CONFIG: &union iwl_tas_config_cmd */ TAS_CONFIG = 0x3, /** - * @SAR_OFFSET_MAPPING_TABLE_CMD: &iwl_sar_offset_mapping_cmd + * @SAR_OFFSET_MAPPING_TABLE_CMD: &struct iwl_sar_offset_mapping_cmd */ SAR_OFFSET_MAPPING_TABLE_CMD = 0x4, @@ -317,7 +322,7 @@ struct iwl_mcc_update_resp_v3 { } __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_3 */ /** - * struct iwl_mcc_update_resp - response to MCC_UPDATE_CMD. + * struct iwl_mcc_update_resp_v4 - response to MCC_UPDATE_CMD. * Contains the new channel control profile map, if changed, and the new MCC * (mobile country code). * The new MCC may be different than what was requested in MCC_UPDATE_CMD. @@ -333,7 +338,7 @@ struct iwl_mcc_update_resp_v3 { * @channels: channel control data map, DWORD for each channel. Only the first * 16bits are used. */ -struct iwl_mcc_update_resp { +struct iwl_mcc_update_resp_v4 { __le32 status; __le16 mcc; __le16 cap; @@ -346,6 +351,37 @@ struct iwl_mcc_update_resp { } __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_4 */ /** + * struct iwl_mcc_update_resp_v8 - response to MCC_UPDATE_CMD. + * Contains the new channel control profile map, if changed, and the new MCC + * (mobile country code). + * The new MCC may be different than what was requested in MCC_UPDATE_CMD. + * @status: see &enum iwl_mcc_update_status + * @mcc: the new applied MCC + * @padding: padding for 2 bytes. + * @cap: capabilities for all channels which matches the MCC + * @time: time elapsed from the MCC test start (in units of 30 seconds) + * @geo_info: geographic specific profile information + * see &enum iwl_geo_information. + * @source_id: the MCC source, see iwl_mcc_source + * @reserved: for four bytes alignment. + * @n_channels: number of channels in @channels_data. + * @channels: channel control data map, DWORD for each channel. Only the first + * 16bits are used. + */ +struct iwl_mcc_update_resp_v8 { + __le32 status; + __le16 mcc; + u8 padding[2]; + __le32 cap; + __le16 time; + __le16 geo_info; + u8 source_id; + u8 reserved[3]; + __le32 n_channels; + __le32 channels[]; +} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_8 */ + +/** * struct iwl_mcc_chub_notif - chub notifies of mcc change * (MCC_CHUB_UPDATE_CMD = 0xc9) * The Chub (Communication Hub, CommsHUB) is a HW component that connects to diff --git a/fw/api/offload.h b/fw/api/offload.h index 5204aa94e72a..898bf351f6e4 100644 --- a/fw/api/offload.h +++ b/fw/api/offload.h @@ -3,7 +3,7 @@ * Copyright (C) 2012-2014 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH - * Copyright (C) 2021 Intel Corporation + * Copyright (C) 2021-2022 Intel Corporation */ #ifndef __iwl_fw_api_offload_h__ #define __iwl_fw_api_offload_h__ @@ -13,7 +13,23 @@ */ enum iwl_prot_offload_subcmd_ids { /** - * @STORED_BEACON_NTF: &struct iwl_stored_beacon_notif + * @WOWLAN_WAKE_PKT_NOTIFICATION: Notification in &struct iwl_wowlan_wake_pkt_notif + */ + WOWLAN_WAKE_PKT_NOTIFICATION = 0xFC, + + /** + * @WOWLAN_INFO_NOTIFICATION: Notification in &struct iwl_wowlan_info_notif + */ + WOWLAN_INFO_NOTIFICATION = 0xFD, + + /** + * @D3_END_NOTIFICATION: End D3 state notification + */ + D3_END_NOTIFICATION = 0xFE, + + /** + * @STORED_BEACON_NTF: &struct iwl_stored_beacon_notif_v2 or + * &struct iwl_stored_beacon_notif_v3 */ STORED_BEACON_NTF = 0xFF, }; diff --git a/fw/api/phy-ctxt.h b/fw/api/phy-ctxt.h index e66f77924f83..8fe42cff1102 100644 --- a/fw/api/phy-ctxt.h +++ b/fw/api/phy-ctxt.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018, 2020-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018, 2020-2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -13,10 +13,12 @@ #define PHY_BAND_6 (2) /* Supported channel width, vary if there is VHT support */ -#define PHY_VHT_CHANNEL_MODE20 (0x0) -#define PHY_VHT_CHANNEL_MODE40 (0x1) -#define PHY_VHT_CHANNEL_MODE80 (0x2) -#define PHY_VHT_CHANNEL_MODE160 (0x3) +#define IWL_PHY_CHANNEL_MODE20 0x0 +#define IWL_PHY_CHANNEL_MODE40 0x1 +#define IWL_PHY_CHANNEL_MODE80 0x2 +#define IWL_PHY_CHANNEL_MODE160 0x3 +/* and 320 MHz for EHT */ +#define IWL_PHY_CHANNEL_MODE320 0x4 /* * Control channel position: @@ -24,20 +26,17 @@ * For VHT - bit-2 marks if the control is lower/upper relative to center-freq * bits-1:0 mark the distance from the center freq. for 20Mhz, offset is 0. * center_freq - * | - * 40Mhz |_______|_______| - * 80Mhz |_______|_______|_______|_______| - * 160Mhz |_______|_______|_______|_______|_______|_______|_______|_______| - * code 011 010 001 000 | 100 101 110 111 + * For EHT - bit-3 is used for extended distance + * | + * 40Mhz |____|____| + * 80Mhz |____|____|____|____| + * 160Mhz |____|____|____|____|____|____|____|____| + * 320MHz |____|____|____|____|____|____|____|____|____|____|____|____|____|____|____|____| + * code 1011 1010 1001 1000 0011 0010 0001 0000 0100 0101 0110 0111 1100 1101 1110 1111 */ -#define PHY_VHT_CTRL_POS_1_BELOW (0x0) -#define PHY_VHT_CTRL_POS_2_BELOW (0x1) -#define PHY_VHT_CTRL_POS_3_BELOW (0x2) -#define PHY_VHT_CTRL_POS_4_BELOW (0x3) -#define PHY_VHT_CTRL_POS_1_ABOVE (0x4) -#define PHY_VHT_CTRL_POS_2_ABOVE (0x5) -#define PHY_VHT_CTRL_POS_3_ABOVE (0x6) -#define PHY_VHT_CTRL_POS_4_ABOVE (0x7) +#define IWL_PHY_CTRL_POS_ABOVE 0x4 +#define IWL_PHY_CTRL_POS_OFFS_EXT 0x8 +#define IWL_PHY_CTRL_POS_OFFS_MSK 0x3 /* * struct iwl_fw_channel_info_v1 - channel information @@ -117,7 +116,7 @@ struct iwl_phy_context_cmd_tail { * struct iwl_phy_context_cmd - config of the PHY context * ( PHY_CONTEXT_CMD = 0x8 ) * @id_and_color: ID and color of the relevant Binding - * @action: action to perform, one of FW_CTXT_ACTION_* + * @action: action to perform, see &enum iwl_ctxt_action * @apply_time: 0 means immediate apply and context switch. * other value means apply new params after X usecs * @tx_param_color: ??? @@ -139,7 +138,7 @@ struct iwl_phy_context_cmd_v1 { * struct iwl_phy_context_cmd - config of the PHY context * ( PHY_CONTEXT_CMD = 0x8 ) * @id_and_color: ID and color of the relevant Binding - * @action: action to perform, one of FW_CTXT_ACTION_* + * @action: action to perform, see &enum iwl_ctxt_action * @lmac_id: the lmac id the phy context belongs to * @ci: channel info * @rxchain_info: ??? diff --git a/fw/api/phy.h b/fw/api/phy.h index b1b9c29859c1..5a3f30e5e06d 100644 --- a/fw/api/phy.h +++ b/fw/api/phy.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2019-2021 Intel Corporation + * Copyright (C) 2012-2014, 2019-2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -29,12 +29,16 @@ enum iwl_phy_ops_subcmd_ids { TEMP_REPORTING_THRESHOLDS_CMD = 0x04, /** - * @PER_CHAIN_LIMIT_OFFSET_CMD: &struct iwl_geo_tx_power_profiles_cmd + * @PER_CHAIN_LIMIT_OFFSET_CMD: &struct iwl_geo_tx_power_profiles_cmd_v1, + * &struct iwl_geo_tx_power_profiles_cmd_v2, + * &struct iwl_geo_tx_power_profiles_cmd_v3, + * &struct iwl_geo_tx_power_profiles_cmd_v4 or + * &struct iwl_geo_tx_power_profiles_cmd_v5 */ PER_CHAIN_LIMIT_OFFSET_CMD = 0x05, /** - * @PER_PLATFORM_ANT_GAIN_CMD: &struct iwl_ppag_table_cmd + * @PER_PLATFORM_ANT_GAIN_CMD: &union iwl_ppag_table_cmd */ PER_PLATFORM_ANT_GAIN_CMD = 0x07, diff --git a/fw/api/power.h b/fw/api/power.h index f92cac1da764..85d89f559f6c 100644 --- a/fw/api/power.h +++ b/fw/api/power.h @@ -537,7 +537,7 @@ union iwl_ppag_table_cmd { struct iwl_sar_offset_mapping_cmd { u8 offset_map[MCC_TO_SAR_OFFSET_TABLE_ROW_SIZE] [MCC_TO_SAR_OFFSET_TABLE_COL_SIZE]; - u16 reserved; + __le16 reserved; } __packed; /*SAR_OFFSET_MAPPING_TABLE_CMD_API_S*/ /** diff --git a/fw/api/rs.h b/fw/api/rs.h index 687f804c46b7..a1a272433b09 100644 --- a/fw/api/rs.h +++ b/fw/api/rs.h @@ -21,6 +21,7 @@ * @IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_2_MSK: enable HE Dual Carrier Modulation * for BPSK (MCS 0) with 2 spatial * streams + * @IWL_TLC_MNG_CFG_FLAGS_EHT_EXTRA_LTF_MSK: enable support for EHT extra LTF */ enum iwl_tlc_mng_cfg_flags { IWL_TLC_MNG_CFG_FLAGS_STBC_MSK = BIT(0), @@ -28,6 +29,7 @@ enum iwl_tlc_mng_cfg_flags { IWL_TLC_MNG_CFG_FLAGS_HE_STBC_160MHZ_MSK = BIT(2), IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK = BIT(3), IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_2_MSK = BIT(4), + IWL_TLC_MNG_CFG_FLAGS_EHT_EXTRA_LTF_MSK = BIT(6), }; /** @@ -36,14 +38,14 @@ enum iwl_tlc_mng_cfg_flags { * @IWL_TLC_MNG_CH_WIDTH_40MHZ: 40MHZ channel * @IWL_TLC_MNG_CH_WIDTH_80MHZ: 80MHZ channel * @IWL_TLC_MNG_CH_WIDTH_160MHZ: 160MHZ channel - * @IWL_TLC_MNG_CH_WIDTH_LAST: maximum value + * @IWL_TLC_MNG_CH_WIDTH_320MHZ: 320MHZ channel */ enum iwl_tlc_mng_cfg_cw { IWL_TLC_MNG_CH_WIDTH_20MHZ, IWL_TLC_MNG_CH_WIDTH_40MHZ, IWL_TLC_MNG_CH_WIDTH_80MHZ, IWL_TLC_MNG_CH_WIDTH_160MHZ, - IWL_TLC_MNG_CH_WIDTH_LAST = IWL_TLC_MNG_CH_WIDTH_160MHZ, + IWL_TLC_MNG_CH_WIDTH_320MHZ, }; /** @@ -64,8 +66,7 @@ enum iwl_tlc_mng_cfg_chains { * @IWL_TLC_MNG_MODE_HT: enable HT * @IWL_TLC_MNG_MODE_VHT: enable VHT * @IWL_TLC_MNG_MODE_HE: enable HE - * @IWL_TLC_MNG_MODE_INVALID: invalid value - * @IWL_TLC_MNG_MODE_NUM: a count of possible modes + * @IWL_TLC_MNG_MODE_EHT: enable EHT */ enum iwl_tlc_mng_cfg_mode { IWL_TLC_MNG_MODE_CCK = 0, @@ -74,8 +75,7 @@ enum iwl_tlc_mng_cfg_mode { IWL_TLC_MNG_MODE_HT, IWL_TLC_MNG_MODE_VHT, IWL_TLC_MNG_MODE_HE, - IWL_TLC_MNG_MODE_INVALID, - IWL_TLC_MNG_MODE_NUM = IWL_TLC_MNG_MODE_INVALID, + IWL_TLC_MNG_MODE_EHT, }; /** @@ -375,9 +375,6 @@ enum { /* Bit 4-5: (0) SISO, (1) MIMO2 (2) MIMO3 */ #define RATE_VHT_MCS_RATE_CODE_MSK 0xf -#define RATE_VHT_MCS_NSS_POS 4 -#define RATE_VHT_MCS_NSS_MSK (3 << RATE_VHT_MCS_NSS_POS) -#define RATE_VHT_MCS_MIMO2_MSK BIT(RATE_VHT_MCS_NSS_POS) /* * Legacy OFDM rate format for bits 7:0 @@ -451,11 +448,16 @@ enum { * 1 2xLTF+0.8us * 2 2xLTF+1.6us * 3 4xLTF+3.2us - * HE TRIG: + * HE-EHT TRIG: * 0 1xLTF+1.6us * 1 2xLTF+1.6us * 2 4xLTF+3.2us * 3 (does not occur) + * EHT MU: + * 0 2xLTF+0.8us + * 1 2xLTF+1.6us + * 2 4xLTF+0.8us + * 3 4xLTF+3.2us */ #define RATE_MCS_HE_GI_LTF_POS 20 #define RATE_MCS_HE_GI_LTF_MSK_V1 (3 << RATE_MCS_HE_GI_LTF_POS) @@ -548,12 +550,17 @@ enum { /* * Bits 13-11: (0) 20MHz, (1) 40MHz, (2) 80MHz, (3) 160MHz, (4) 320MHz */ -#define RATE_MCS_CHAN_WIDTH_MSK (0x7 << RATE_MCS_CHAN_WIDTH_POS) -#define RATE_MCS_CHAN_WIDTH_20 (0 << RATE_MCS_CHAN_WIDTH_POS) -#define RATE_MCS_CHAN_WIDTH_40 (1 << RATE_MCS_CHAN_WIDTH_POS) -#define RATE_MCS_CHAN_WIDTH_80 (2 << RATE_MCS_CHAN_WIDTH_POS) -#define RATE_MCS_CHAN_WIDTH_160 (3 << RATE_MCS_CHAN_WIDTH_POS) -#define RATE_MCS_CHAN_WIDTH_320 (4 << RATE_MCS_CHAN_WIDTH_POS) +#define RATE_MCS_CHAN_WIDTH_MSK (0x7 << RATE_MCS_CHAN_WIDTH_POS) +#define RATE_MCS_CHAN_WIDTH_20_VAL 0 +#define RATE_MCS_CHAN_WIDTH_20 (RATE_MCS_CHAN_WIDTH_20_VAL << RATE_MCS_CHAN_WIDTH_POS) +#define RATE_MCS_CHAN_WIDTH_40_VAL 1 +#define RATE_MCS_CHAN_WIDTH_40 (RATE_MCS_CHAN_WIDTH_40_VAL << RATE_MCS_CHAN_WIDTH_POS) +#define RATE_MCS_CHAN_WIDTH_80_VAL 2 +#define RATE_MCS_CHAN_WIDTH_80 (RATE_MCS_CHAN_WIDTH_80_VAL << RATE_MCS_CHAN_WIDTH_POS) +#define RATE_MCS_CHAN_WIDTH_160_VAL 3 +#define RATE_MCS_CHAN_WIDTH_160 (RATE_MCS_CHAN_WIDTH_160_VAL << RATE_MCS_CHAN_WIDTH_POS) +#define RATE_MCS_CHAN_WIDTH_320_VAL 4 +#define RATE_MCS_CHAN_WIDTH_320 (RATE_MCS_CHAN_WIDTH_320_VAL << RATE_MCS_CHAN_WIDTH_POS) /* Bit 15-14: Antenna selection: * Bit 14: Ant A active diff --git a/fw/api/rx.h b/fw/api/rx.h index 1989b270862b..25e2e23dce3d 100644 --- a/fw/api/rx.h +++ b/fw/api/rx.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ @@ -273,7 +273,7 @@ enum iwl_rx_mpdu_mac_info { }; /* TSF overload low dword */ -enum iwl_rx_phy_data0 { +enum iwl_rx_phy_he_data0 { /* info type: HE any */ IWL_RX_PHY_DATA0_HE_BEAM_CHNG = 0x00000001, IWL_RX_PHY_DATA0_HE_UPLINK = 0x00000002, @@ -289,6 +289,25 @@ enum iwl_rx_phy_data0 { IWL_RX_PHY_DATA0_HE_DELIM_EOF = 0x80000000, }; +/* TSF overload low dword */ +enum iwl_rx_phy_eht_data0 { + /* info type: EHT any */ + IWL_RX_PHY_DATA0_EHT_VALIDATE = BIT(0), + IWL_RX_PHY_DATA0_EHT_UPLINK = BIT(1), + IWL_RX_PHY_DATA0_EHT_BSS_COLOR_MASK = 0x000000fc, + IWL_RX_PHY_DATA0_ETH_SPATIAL_REUSE_MASK = 0x00000f00, + IWL_RX_PHY_DATA0_EHT_PS160 = BIT(12), + IWL_RX_PHY_DATA0_EHT_TXOP_DUR_MASK = 0x000fe000, + IWL_RX_PHY_DATA0_EHT_LDPC_EXT_SYM = BIT(20), + IWL_RX_PHY_DATA0_EHT_PRE_FEC_PAD_MASK = 0x00600000, + IWL_RX_PHY_DATA0_EHT_PE_DISAMBIG = BIT(23), + IWL_RX_PHY_DATA0_EHT_BW320_SLOT = BIT(24), + IWL_RX_PHY_DATA0_EHT_SIGA_CRC_OK = BIT(25), + IWL_RX_PHY_DATA0_EHT_PHY_VER = 0x1c000000, + /* 2 bits reserved */ + IWL_RX_PHY_DATA0_EHT_DELIM_EOF = BIT(31), +}; + enum iwl_rx_phy_info_type { IWL_RX_PHY_INFO_TYPE_NONE = 0, IWL_RX_PHY_INFO_TYPE_CCK = 1, @@ -301,19 +320,26 @@ enum iwl_rx_phy_info_type { IWL_RX_PHY_INFO_TYPE_HE_TB = 8, IWL_RX_PHY_INFO_TYPE_HE_MU_EXT = 9, IWL_RX_PHY_INFO_TYPE_HE_TB_EXT = 10, + IWL_RX_PHY_INFO_TYPE_EHT_MU = 11, + IWL_RX_PHY_INFO_TYPE_EHT_TB = 12, + IWL_RX_PHY_INFO_TYPE_EHT_MU_EXT = 13, + IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT = 14, }; /* TSF overload high dword */ -enum iwl_rx_phy_data1 { +enum iwl_rx_phy_common_data1 { /* * check this first - if TSF overload is set, * see &enum iwl_rx_phy_info_type */ IWL_RX_PHY_DATA1_INFO_TYPE_MASK = 0xf0000000, - /* info type: HT/VHT/HE any */ + /* info type: HT/VHT/HE/EHT any */ IWL_RX_PHY_DATA1_LSIG_LEN_MASK = 0x0fff0000, +}; +/* TSF overload high dword For HE rates*/ +enum iwl_rx_phy_he_data1 { /* info type: HE MU/MU-EXT */ IWL_RX_PHY_DATA1_HE_MU_SIGB_COMPRESSION = 0x00000001, IWL_RX_PHY_DATA1_HE_MU_SIBG_SYM_OR_USER_NUM_MASK = 0x0000001e, @@ -329,8 +355,24 @@ enum iwl_rx_phy_data1 { IWL_RX_PHY_DATA1_HE_TB_LOW_SS_MASK = 0x0000000e, }; +/* TSF overload high dword For EHT-MU/TB rates*/ +enum iwl_rx_phy_eht_data1 { + /* info type: EHT-MU */ + IWL_RX_PHY_DATA1_EHT_MU_NUM_SIG_SYM_USIGA2 = 0x0000001f, + /* info type: EHT-TB */ + IWL_RX_PHY_DATA1_EHT_TB_PILOT_TYPE = BIT(0), + IWL_RX_PHY_DATA1_EHT_TB_LOW_SS = 0x0000001e, + + /* info type: EHT any */ + /* number of EHT-LTF symbols 0 - 1 EHT-LTF, 1 - 2 EHT-LTFs, 2 - 4 EHT-LTFs, + * 3 - 6 EHT-LTFs, 4 - 8 EHT-LTFs */ + IWL_RX_PHY_DATA1_EHT_SIG_LTF_NUM = 0x000000e0, + IWL_RX_PHY_DATA1_EHT_RU_ALLOC_B0 = 0x00000100, + IWL_RX_PHY_DATA1_EHT_RU_ALLOC_B1_B7 = 0x0000fe00, +}; + /* goes into Metadata DW 7 */ -enum iwl_rx_phy_data2 { +enum iwl_rx_phy_he_data2 { /* info type: HE MU-EXT */ /* the a1/a2/... is what the PHY/firmware calls the values */ IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU0 = 0x000000ff, /* a1 */ @@ -346,7 +388,7 @@ enum iwl_rx_phy_data2 { }; /* goes into Metadata DW 8 */ -enum iwl_rx_phy_data3 { +enum iwl_rx_phy_he_data3 { /* info type: HE MU-EXT */ IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU1 = 0x000000ff, /* c1 */ IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU3 = 0x0000ff00, /* c2 */ @@ -355,7 +397,7 @@ enum iwl_rx_phy_data3 { }; /* goes into Metadata DW 4 high 16 bits */ -enum iwl_rx_phy_data4 { +enum iwl_rx_phy_he_he_data4 { /* info type: HE MU-EXT */ IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CTR_RU = 0x0001, IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CTR_RU = 0x0002, @@ -366,6 +408,50 @@ enum iwl_rx_phy_data4 { IWL_RX_PHY_DATA4_HE_MU_EXT_PREAMBLE_PUNC_TYPE_MASK = 0x0600, }; +/* goes into Metadata DW 7 */ +enum iwl_rx_phy_eht_data2 { + /* info type: EHT-MU-EXT */ + /* OFDM_RX_VECTOR_COMMON_RU_ALLOC_0_OUT */ + IWL_RX_PHY_DATA2_EHT_MU_EXT_RU_ALLOC_A1 = 0x000001ff, + IWL_RX_PHY_DATA2_EHT_MU_EXT_RU_ALLOC_A2 = 0x0003fe00, + IWL_RX_PHY_DATA2_EHT_MU_EXT_RU_ALLOC_B1 = 0x07fc0000, + + /* info type: EHT-TB-EXT */ + IWL_RX_PHY_DATA2_EHT_TB_EXT_TRIG_SIGA1 = 0xffffffff, +}; + +/* goes into Metadata DW 8 */ +enum iwl_rx_phy_eht_data3 { + /* info type: EHT-MU-EXT */ + /* OFDM_RX_VECTOR_COMMON_RU_ALLOC_1_OUT */ + IWL_RX_PHY_DATA3_EHT_MU_EXT_RU_ALLOC_B2 = 0x000001ff, + IWL_RX_PHY_DATA3_EHT_MU_EXT_RU_ALLOC_C1 = 0x0003fe00, + IWL_RX_PHY_DATA3_EHT_MU_EXT_RU_ALLOC_C2 = 0x07fc0000, +}; + +/* goes into Metadata DW 4 */ +enum iwl_rx_phy_eht_data4 { + /* info type: EHT-MU-EXT */ + /* OFDM_RX_VECTOR_COMMON_RU_ALLOC_2_OUT */ + IWL_RX_PHY_DATA4_EHT_MU_EXT_RU_ALLOC_D1 = 0x000001ff, + IWL_RX_PHY_DATA4_EHT_MU_EXT_RU_ALLOC_D2 = 0x0003fe00, + IWL_RX_PHY_DATA4_EHT_MU_EXT_SIGB_MCS = 0x000c0000, +}; + +/* goes into Metadata DW 16 */ +enum iwl_rx_phy_data5 { + /* info type: EHT any */ + IWL_RX_PHY_DATA5_EHT_TYPE_AND_COMP = 0x00000003, + /* info type: EHT-TB */ + IWL_RX_PHY_DATA5_EHT_TB_SPATIAL_REUSE1 = 0x0000003c, + IWL_RX_PHY_DATA5_EHT_TB_SPATIAL_REUSE2 = 0x000003c0, + /* info type: EHT-MU */ + IWL_RX_PHY_DATA5_EHT_MU_PUNC_CH_CODE = 0x0000007c, + IWL_RX_PHY_DATA5_EHT_MU_STA_ID_USR = 0x0003ff80, + IWL_RX_PHY_DATA5_EHT_MU_NUM_USR_NON_OFDMA = 0x001c0000, + IWL_RX_PHY_DATA5_EHT_MU_SPATIAL_CONF_USR_FIELD = 0x0fe00000, +}; + /** * struct iwl_rx_mpdu_desc_v1 - RX MPDU descriptor */ @@ -440,7 +526,9 @@ struct iwl_rx_mpdu_desc_v1 { /** * @phy_data1: valid only if * %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set, - * see &enum iwl_rx_phy_data1. + * see &enum iwl_rx_phy_common_data1 or + * &enum iwl_rx_phy_he_data1 or + * &enum iwl_rx_phy_eht_data1. */ __le32 phy_data1; }; @@ -540,11 +628,18 @@ struct iwl_rx_mpdu_desc_v3 { __le32 phy_data1; }; }; - /* DW16 & DW17 */ + /* DW16 */ + /** + * @phy_data5: valid only if + * %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set, + * see &enum iwl_rx_phy_data5. + */ + __le32 phy_data5; + /* DW17 */ /** * @reserved: reserved */ - __le32 reserved[2]; + __le32 reserved[1]; } __packed; /* RX_MPDU_RES_START_API_S_VER_3, RX_MPDU_RES_START_API_S_VER_5 */ @@ -578,22 +673,31 @@ struct iwl_rx_mpdu_desc { * @mac_phy_idx: MAC/PHY index */ u8 mac_phy_idx; - /* DW4 - carries csum data only when rpa_en == 1 */ - /** - * @raw_csum: raw checksum (alledgedly unreliable) - */ - __le16 raw_csum; - + /* DW4 */ union { + struct { + /* carries csum data only when rpa_en == 1 */ + /** + * @raw_csum: raw checksum (alledgedly unreliable) + */ + __le16 raw_csum; + + union { + /** + * @l3l4_flags: &enum iwl_rx_l3l4_flags + */ + __le16 l3l4_flags; + + /** + * @phy_data4: depends on info type, see phy_data1 + */ + __le16 phy_data4; + }; + }; /** - * @l3l4_flags: &enum iwl_rx_l3l4_flags - */ - __le16 l3l4_flags; - - /** - * @phy_data4: depends on info type, see phy_data1 + * @phy_eht_data4: depends on info type, see phy_data1 */ - __le16 phy_data4; + __le32 phy_eht_data4; }; /* DW5 */ /** @@ -630,7 +734,7 @@ struct iwl_rx_mpdu_desc { #define RX_NO_DATA_INFO_TYPE_RX_ERR 1 #define RX_NO_DATA_INFO_TYPE_NDP 2 #define RX_NO_DATA_INFO_TYPE_MU_UNMATCHED 3 -#define RX_NO_DATA_INFO_TYPE_HE_TB_UNMATCHED 4 +#define RX_NO_DATA_INFO_TYPE_TB_UNMATCHED 4 #define RX_NO_DATA_INFO_ERR_POS 8 #define RX_NO_DATA_INFO_ERR_MSK (0xff << RX_NO_DATA_INFO_ERR_POS) @@ -639,12 +743,43 @@ struct iwl_rx_mpdu_desc { #define RX_NO_DATA_INFO_ERR_UNSUPPORTED_RATE 2 #define RX_NO_DATA_INFO_ERR_NO_DELIM 3 #define RX_NO_DATA_INFO_ERR_BAD_MAC_HDR 4 +#define RX_NO_DATA_INFO_LOW_ENERGY 5 #define RX_NO_DATA_FRAME_TIME_POS 0 #define RX_NO_DATA_FRAME_TIME_MSK (0xfffff << RX_NO_DATA_FRAME_TIME_POS) #define RX_NO_DATA_RX_VEC0_HE_NSTS_MSK 0x03800000 #define RX_NO_DATA_RX_VEC0_VHT_NSTS_MSK 0x38000000 +#define RX_NO_DATA_RX_VEC2_EHT_NSTS_MSK 0x00f00000 + +/* content of OFDM_RX_VECTOR_USIG_A1_OUT */ +enum iwl_rx_usig_a1 { + IWL_RX_USIG_A1_ENHANCED_WIFI_VER_ID = 0x00000007, + IWL_RX_USIG_A1_BANDWIDTH = 0x00000038, + IWL_RX_USIG_A1_UL_FLAG = 0x00000040, + IWL_RX_USIG_A1_BSS_COLOR = 0x00001f80, + IWL_RX_USIG_A1_TXOP_DURATION = 0x000fe000, + IWL_RX_USIG_A1_DISREGARD = 0x01f00000, + IWL_RX_USIG_A1_VALIDATE = 0x02000000, + IWL_RX_USIG_A1_EHT_BW320_SLOT = 0x04000000, + IWL_RX_USIG_A1_EHT_TYPE = 0x18000000, + IWL_RX_USIG_A1_RDY = 0x80000000, +}; + +/* content of OFDM_RX_VECTOR_USIG_A2_EHT_OUT */ +enum iwl_rx_usig_a2_eht { + IWL_RX_USIG_A2_EHT_PPDU_TYPE = 0x00000003, + IWL_RX_USIG_A2_EHT_USIG2_VALIDATE_B2 = 0x00000004, + IWL_RX_USIG_A2_EHT_PUNC_CHANNEL = 0x000000f8, + IWL_RX_USIG_A2_EHT_USIG2_VALIDATE_B8 = 0x00000100, + IWL_RX_USIG_A2_EHT_SIG_MCS = 0x00000600, + IWL_RX_USIG_A2_EHT_SIG_SYM_NUM = 0x0000f800, + IWL_RX_USIG_A2_EHT_TRIG_SPATIAL_REUSE_1 = 0x000f0000, + IWL_RX_USIG_A2_EHT_TRIG_SPATIAL_REUSE_2 = 0x00f00000, + IWL_RX_USIG_A2_EHT_TRIG_USIG2_DISREGARD = 0x1f000000, + IWL_RX_USIG_A2_EHT_CRC_OK = 0x40000000, + IWL_RX_USIG_A2_EHT_RDY = 0x80000000, +}; /** * struct iwl_rx_no_data - RX no data descriptor @@ -654,7 +789,8 @@ struct iwl_rx_mpdu_desc { * @on_air_rise_time: GP2 during on air rise * @fr_time: frame time * @rate: rate/mcs of frame - * @phy_info: &enum iwl_rx_phy_data0 and &enum iwl_rx_phy_info_type + * @phy_info: &enum iwl_rx_phy_he_data0 or &enum iwl_rx_phy_eht_data0 + * based on &enum iwl_rx_phy_info_type * @rx_vec: DW-12:9 raw RX vectors from DSP according to modulation type. * for VHT: OFDM_RX_VECTOR_SIGA1_OUT, OFDM_RX_VECTOR_SIGA2_OUT * for HE: OFDM_RX_VECTOR_HE_SIGA1_OUT, OFDM_RX_VECTOR_HE_SIGA2_OUT @@ -668,7 +804,34 @@ struct iwl_rx_no_data { __le32 phy_info[2]; __le32 rx_vec[2]; } __packed; /* RX_NO_DATA_NTFY_API_S_VER_1, - TX_NO_DATA_NTFY_API_S_VER_2 */ + RX_NO_DATA_NTFY_API_S_VER_2 */ + +/** + * struct iwl_rx_no_data_ver_3 - RX no data descriptor + * @info: 7:0 frame type, 15:8 RX error type + * @rssi: 7:0 energy chain-A, + * 15:8 chain-B, measured at FINA time (FINA_ENERGY), 16:23 channel + * @on_air_rise_time: GP2 during on air rise + * @fr_time: frame time + * @rate: rate/mcs of frame + * @phy_info: &enum iwl_rx_phy_eht_data0 and &enum iwl_rx_phy_info_type + * @rx_vec: DW-12:9 raw RX vectors from DSP according to modulation type. + * for VHT: OFDM_RX_VECTOR_SIGA1_OUT, OFDM_RX_VECTOR_SIGA2_OUT + * for HE: OFDM_RX_VECTOR_HE_SIGA1_OUT, OFDM_RX_VECTOR_HE_SIGA2_OUT + * for EHT: OFDM_RX_VECTOR_USIG_A1_OUT, OFDM_RX_VECTOR_USIG_A2_EHT_OUT, + * OFDM_RX_VECTOR_EHT_OUT, OFDM_RX_VECTOR_EHT_USER_FIELD_OUT + */ +struct iwl_rx_no_data_ver_3 { + __le32 info; + __le32 rssi; + __le32 on_air_rise_time; + __le32 fr_time; + __le32 rate; + __le32 phy_info[2]; + __le32 rx_vec[4]; +} __packed; /* RX_NO_DATA_NTFY_API_S_VER_1, + RX_NO_DATA_NTFY_API_S_VER_2 + RX_NO_DATA_NTFY_API_S_VER_3 */ struct iwl_frame_release { u8 baid; diff --git a/fw/api/scan.h b/fw/api/scan.h index 5413087ae909..93078f8cc08c 100644 --- a/fw/api/scan.h +++ b/fw/api/scan.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -9,6 +9,16 @@ /* Scan Commands, Responses, Notifications */ +/** + * enum iwl_scan_subcmd_ids - scan commands + */ +enum iwl_scan_subcmd_ids { + /** + * @OFFLOAD_MATCH_INFO_NOTIF: &struct iwl_scan_offload_match_info + */ + OFFLOAD_MATCH_INFO_NOTIF = 0xFC, +}; + /* Max number of IEs for direct SSID scans in a command */ #define PROBE_OPTION_MAX 20 @@ -699,10 +709,13 @@ enum iwl_umac_scan_general_flags_v2 { * should be aware of a P2P GO operation on the 2GHz band. * @IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_HB: scan event scheduling * should be aware of a P2P GO operation on the 5GHz or 6GHz band. + * @IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_DONT_TOGGLE_ANT: don't toggle between + * valid antennas, and use the same antenna as in previous scan */ enum iwl_umac_scan_general_params_flags2 { IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_LB = BIT(0), IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_HB = BIT(1), + IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_DONT_TOGGLE_ANT = BIT(2), }; /** @@ -714,8 +727,10 @@ enum iwl_umac_scan_general_params_flags2 { * @iter_interval: interval between two scan iterations on one channel. */ struct iwl_scan_channel_cfg_umac { +#define IWL_CHAN_CFG_FLAGS_BAND_POS 30 __le32 flags; - /* Both versions are of the same size, so use a union without adjusting + + /* All versions are of the same size, so use a union without adjusting * the command size later */ union { @@ -733,6 +748,12 @@ struct iwl_scan_channel_cfg_umac { * SCAN_CHANNEL_CONFIG_API_S_VER_3 * SCAN_CHANNEL_CONFIG_API_S_VER_4 */ + struct { + u8 channel_num; + u8 psd_20; + u8 iter_count; + u8 iter_interval; + } v5; /* SCAN_CHANNEL_CONFIG_API_S_VER_5 */ }; } __packed; @@ -969,7 +990,7 @@ struct iwl_scan_channel_params_v4 { SCAN_CHANNEL_PARAMS_API_S_VER_5 */ /** - * struct iwl_scan_channel_params_v6 + * struct iwl_scan_channel_params_v7 * @flags: channel flags &enum iwl_scan_channel_flags * @count: num of channels in scan request * @n_aps_override: override the number of APs the FW uses to calculate dwell @@ -979,7 +1000,7 @@ struct iwl_scan_channel_params_v4 { * @channel_config: array of explicit channel configurations * for 2.4Ghz and 5.2Ghz bands */ -struct iwl_scan_channel_params_v6 { +struct iwl_scan_channel_params_v7 { u8 flags; u8 count; u8 n_aps_override[2]; @@ -990,7 +1011,8 @@ struct iwl_scan_channel_params_v6 { * struct iwl_scan_general_params_v11 * @flags: &enum iwl_umac_scan_general_flags_v2 * @reserved: reserved for future - * @scan_start_mac_id: report the scan start TSF time according to this mac TSF + * @scan_start_mac_or_link_id: report the scan start TSF time according to this + * mac (up to verion 11) or link (starting with version 12) TSF * @active_dwell: dwell time for active scan per LMAC * @adwell_default_2g: adaptive dwell default number of APs * for 2.4GHz channel @@ -1013,7 +1035,7 @@ struct iwl_scan_channel_params_v6 { struct iwl_scan_general_params_v11 { __le16 flags; u8 reserved; - u8 scan_start_mac_id; + u8 scan_start_mac_or_link_id; u8 active_dwell[SCAN_TWO_LMACS]; u8 adwell_default_2g; u8 adwell_default_5g; @@ -1025,7 +1047,7 @@ struct iwl_scan_general_params_v11 { __le32 scan_priority; u8 passive_dwell[SCAN_TWO_LMACS]; u8 num_of_fragments[SCAN_TWO_LMACS]; -} __packed; /* SCAN_GENERAL_PARAMS_API_S_VER_11 and *_VER_10 */ +} __packed; /* SCAN_GENERAL_PARAMS_API_S_VER_12, *_VER_11 and *_VER_10 */ /** * struct iwl_scan_periodic_parms_v1 @@ -1054,18 +1076,18 @@ struct iwl_scan_req_params_v12 { } __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_12 */ /** - * struct iwl_scan_req_params_v15 + * struct iwl_scan_req_params_v16 * @general_params: &struct iwl_scan_general_params_v11 - * @channel_params: &struct iwl_scan_channel_params_v6 + * @channel_params: &struct iwl_scan_channel_params_v7 * @periodic_params: &struct iwl_scan_periodic_parms_v1 * @probe_params: &struct iwl_scan_probe_params_v4 */ -struct iwl_scan_req_params_v15 { +struct iwl_scan_req_params_v17 { struct iwl_scan_general_params_v11 general_params; - struct iwl_scan_channel_params_v6 channel_params; + struct iwl_scan_channel_params_v7 channel_params; struct iwl_scan_periodic_parms_v1 periodic_params; struct iwl_scan_probe_params_v4 probe_params; -} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_15 and *_VER_14 */ +} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_17 - 14 */ /** * struct iwl_scan_req_umac_v12 @@ -1080,16 +1102,16 @@ struct iwl_scan_req_umac_v12 { } __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_12 */ /** - * struct iwl_scan_req_umac_v15 + * struct iwl_scan_req_umac_v16 * @uid: scan id, &enum iwl_umac_scan_uid_offsets * @ooc_priority: out of channel priority - &enum iwl_scan_priority * @scan_params: scan parameters */ -struct iwl_scan_req_umac_v15 { +struct iwl_scan_req_umac_v17 { __le32 uid; __le32 ooc_priority; - struct iwl_scan_req_params_v15 scan_params; -} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_15 and *_VER_14 */ + struct iwl_scan_req_params_v17 scan_params; +} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_17 - 14 */ /** * struct iwl_umac_scan_abort @@ -1165,7 +1187,7 @@ struct iwl_scan_offload_profiles_query_v1 { u8 resume_while_scanning; u8 self_recovery; __le16 reserved; - struct iwl_scan_offload_profile_match_v1 matches[0]; + struct iwl_scan_offload_profile_match_v1 matches[]; } __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_2 */ /** @@ -1188,7 +1210,7 @@ struct iwl_scan_offload_profile_match { } __packed; /* SCAN_OFFLOAD_PROFILE_MATCH_RESULTS_S_VER_2 */ /** - * struct iwl_scan_offload_profiles_query - match results query response + * struct iwl_scan_offload_match_info - match results information * @matched_profiles: bitmap of matched profiles, referencing the * matches passed in the scan offload request * @last_scan_age: age of the last offloaded scan @@ -1200,7 +1222,7 @@ struct iwl_scan_offload_profile_match { * @reserved: reserved * @matches: array of match information, one for each match */ -struct iwl_scan_offload_profiles_query { +struct iwl_scan_offload_match_info { __le32 matched_profiles; __le32 last_scan_age; __le32 n_scans_done; @@ -1209,8 +1231,10 @@ struct iwl_scan_offload_profiles_query { u8 resume_while_scanning; u8 self_recovery; __le16 reserved; - struct iwl_scan_offload_profile_match matches[0]; -} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_3 */ + struct iwl_scan_offload_profile_match matches[]; +} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_3 and + * SCAN_OFFLOAD_MATCH_INFO_NOTIFICATION_S_VER_1 + */ /** * struct iwl_umac_scan_iter_complete_notif - notifies end of scanning iteration diff --git a/fw/api/sta.h b/fw/api/sta.h index 5edbe27c0922..d62fed543276 100644 --- a/fw/api/sta.h +++ b/fw/api/sta.h @@ -477,7 +477,7 @@ struct iwl_mvm_wep_key_cmd { u8 decryption_type; u8 flags; u8 reserved; - struct iwl_mvm_wep_key wep_key[0]; + struct iwl_mvm_wep_key wep_key[]; } __packed; /* SEC_CURR_WEP_KEY_CMD_API_S_VER_2 */ /** diff --git a/fw/api/tdls.h b/fw/api/tdls.h index 14d35000abed..893438aadab0 100644 --- a/fw/api/tdls.h +++ b/fw/api/tdls.h @@ -132,7 +132,7 @@ struct iwl_tdls_config_cmd { __le32 pti_req_data_offset; struct iwl_tx_cmd pti_req_tx_cmd; - u8 pti_req_template[0]; + u8 pti_req_template[]; } __packed; /* TDLS_CONFIG_CMD_API_S_VER_1 */ /** diff --git a/fw/api/time-event.h b/fw/api/time-event.h index 904cd78a9fa0..7cc706731d70 100644 --- a/fw/api/time-event.h +++ b/fw/api/time-event.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2012-2014, 2018-2020, 2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -292,7 +292,7 @@ struct iwl_hs20_roc_req_tail { * ( HOT_SPOT_CMD 0x53 ) * * @id_and_color: ID and color of the MAC - * @action: action to perform, one of FW_CTXT_ACTION_* + * @action: action to perform, see &enum iwl_ctxt_action * @event_unique_id: If the action FW_CTXT_ACTION_REMOVE then the * event_unique_id should be the id of the time event assigned by ucode. * Otherwise ignore the event_unique_id. @@ -377,7 +377,8 @@ enum iwl_mvm_session_prot_conf_id { * struct iwl_mvm_session_prot_cmd - configure a session protection * @id_and_color: the id and color of the mac for which this session protection * is sent - * @action: can be either FW_CTXT_ACTION_ADD or FW_CTXT_ACTION_REMOVE + * @action: can be either FW_CTXT_ACTION_ADD or FW_CTXT_ACTION_REMOVE, + * see &enum iwl_ctxt_action * @conf_id: see &enum iwl_mvm_session_prot_conf_id * @duration_tu: the duration of the whole protection in TUs. * @repetition_count: not used diff --git a/fw/api/tx.h b/fw/api/tx.h index ecc6706f66ed..842360b1e995 100644 --- a/fw/api/tx.h +++ b/fw/api/tx.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2022 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_api_tx_h__ @@ -177,17 +177,6 @@ enum iwl_tx_offload_assist_flags_pos { #define IWL_TX_CMD_OFFLD_MH_MASK 0x1f #define IWL_TX_CMD_OFFLD_IP_HDR_MASK 0x3f -enum iwl_tx_offload_assist_bz { - IWL_TX_CMD_OFFLD_BZ_RESULT_OFFS = 0x000003ff, - IWL_TX_CMD_OFFLD_BZ_START_OFFS = 0x001ff800, - IWL_TX_CMD_OFFLD_BZ_MH_LEN = 0x07c00000, - IWL_TX_CMD_OFFLD_BZ_MH_PAD = 0x08000000, - IWL_TX_CMD_OFFLD_BZ_AMSDU = 0x10000000, - IWL_TX_CMD_OFFLD_BZ_ZERO2ONES = 0x20000000, - IWL_TX_CMD_OFFLD_BZ_ENABLE_CSUM = 0x40000000, - IWL_TX_CMD_OFFLD_BZ_PARTIAL_CSUM = 0x80000000, -}; - /* TODO: complete documentation for try_cnt and btkill_cnt */ /** * struct iwl_tx_cmd - TX command struct to FW @@ -800,7 +789,7 @@ enum iwl_mac_beacon_flags { * is &enum iwl_mac_beacon_flags. * @short_ssid: Short SSID * @reserved: reserved - * @template_id: currently equal to the mac context id of the coresponding mac. + * @link_id: the firmware id of the link that will use this beacon * @tim_idx: the offset of the tim IE in the beacon * @tim_size: the length of the tim IE * @ecsa_offset: offset to the ECSA IE if present @@ -812,15 +801,17 @@ struct iwl_mac_beacon_cmd { __le16 flags; __le32 short_ssid; __le32 reserved; - __le32 template_id; + __le32 link_id; __le32 tim_idx; __le32 tim_size; __le32 ecsa_offset; __le32 csa_offset; struct ieee80211_hdr frame[]; } __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_10, - BEACON_TEMPLATE_CMD_API_S_VER_11, - BEACON_TEMPLATE_CMD_API_S_VER_12 */ + * BEACON_TEMPLATE_CMD_API_S_VER_11, + * BEACON_TEMPLATE_CMD_API_S_VER_12, + * BEACON_TEMPLATE_CMD_API_S_VER_13 + */ struct iwl_beacon_notif { struct iwl_mvm_tx_resp beacon_notify_hdr; @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2005-2014, 2018-2021 Intel Corporation + * Copyright (C) 2005-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ @@ -1038,7 +1038,7 @@ iwl_dump_ini_prph_mac_iter(struct iwl_fw_runtime *fwrt, range->range_data_size = reg->dev_addr.size; for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4) { prph_val = iwl_read_prph(fwrt->trans, addr + i); - if (prph_val == 0x5a5a5a5a) + if (iwl_trans_is_hw_error_value(prph_val)) return -EBUSY; *val++ = cpu_to_le32(prph_val); } @@ -1388,13 +1388,13 @@ static void iwl_ini_get_rxf_data(struct iwl_fw_runtime *fwrt, if (!data) return; + memset(data, 0, sizeof(*data)); + /* make sure only one bit is set in only one fid */ if (WARN_ONCE(hweight_long(fid1) + hweight_long(fid2) != 1, "fid1=%x, fid2=%x\n", fid1, fid2)) return; - memset(data, 0, sizeof(*data)); - if (fid1) { fifo_idx = ffs(fid1) - 1; if (WARN_ONCE(fifo_idx >= MAX_NUM_LMAC, "fifo_idx=%d\n", @@ -1562,7 +1562,7 @@ iwl_dump_ini_dbgi_sram_iter(struct iwl_fw_runtime *fwrt, prph_data = iwl_read_prph_no_grab(fwrt->trans, (i % 2) ? DBGI_SRAM_TARGET_ACCESS_RDATA_MSB : DBGI_SRAM_TARGET_ACCESS_RDATA_LSB); - if (prph_data == 0x5a5a5a5a) { + if (iwl_trans_is_hw_error_value(prph_data)) { iwl_trans_release_nic_access(fwrt->trans); return -EBUSY; } @@ -1664,14 +1664,10 @@ static __le32 iwl_get_mon_reg(struct iwl_fw_runtime *fwrt, u32 alloc_id, } static void * -iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt, - struct iwl_dump_ini_region_data *reg_data, +iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt, u32 alloc_id, struct iwl_fw_ini_monitor_dump *data, const struct iwl_fw_mon_regs *addrs) { - struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; - u32 alloc_id = le32_to_cpu(reg->dram_alloc_id); - if (!iwl_trans_grab_nic_access(fwrt->trans)) { IWL_ERR(fwrt, "Failed to get monitor header\n"); return NULL; @@ -1702,8 +1698,10 @@ iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt, void *data, u32 data_len) { struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data; + struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; + u32 alloc_id = le32_to_cpu(reg->dram_alloc_id); - return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump, + return iwl_dump_ini_mon_fill_header(fwrt, alloc_id, mon_dump, &fwrt->trans->cfg->mon_dram_regs); } @@ -1713,8 +1711,10 @@ iwl_dump_ini_mon_smem_fill_header(struct iwl_fw_runtime *fwrt, void *data, u32 data_len) { struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data; + struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; + u32 alloc_id = le32_to_cpu(reg->internal_buffer.alloc_id); - return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump, + return iwl_dump_ini_mon_fill_header(fwrt, alloc_id, mon_dump, &fwrt->trans->cfg->mon_smem_regs); } @@ -1725,7 +1725,10 @@ iwl_dump_ini_mon_dbgi_fill_header(struct iwl_fw_runtime *fwrt, { struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data; - return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump, + return iwl_dump_ini_mon_fill_header(fwrt, + /* no offset calculation later */ + IWL_FW_INI_ALLOCATION_ID_DBGC1, + mon_dump, &fwrt->trans->cfg->mon_dbgi_regs); } @@ -2031,7 +2034,6 @@ static u32 iwl_dump_ini_imr_get_size(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { - u32 size = 0; u32 ranges = 0; u32 imr_enable = fwrt->trans->dbg.imr_data.imr_enable; u32 imr_size = fwrt->trans->dbg.imr_data.imr_size; @@ -2041,17 +2043,16 @@ iwl_dump_ini_imr_get_size(struct iwl_fw_runtime *fwrt, IWL_DEBUG_INFO(fwrt, "WRT: Invalid imr data enable: %d, imr_size: %d, sram_size: %d\n", imr_enable, imr_size, sram_size); - return size; + return 0; } - size = imr_size; ranges = iwl_dump_ini_imr_ranges(fwrt, reg_data); - if (!size && !ranges) { - IWL_ERR(fwrt, "WRT: imr_size :=%d, ranges :=%d\n", size, ranges); + if (!ranges) { + IWL_ERR(fwrt, "WRT: ranges :=%d\n", ranges); return 0; } - size += sizeof(struct iwl_fw_ini_error_dump) + + imr_size += sizeof(struct iwl_fw_ini_error_dump) + ranges * sizeof(struct iwl_fw_ini_error_dump_range); - return size; + return imr_size; } /** @@ -2320,6 +2321,36 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt, return entry->size; } +static u32 iwl_dump_ini_file_name_info(struct iwl_fw_runtime *fwrt, + struct list_head *list) +{ + struct iwl_fw_ini_dump_entry *entry; + struct iwl_dump_file_name_info *tlv; + u32 len = strnlen(fwrt->trans->dbg.dump_file_name_ext, + IWL_FW_INI_MAX_NAME); + + if (!fwrt->trans->dbg.dump_file_name_ext_valid) + return 0; + + entry = vzalloc(sizeof(*entry) + sizeof(*tlv) + len); + if (!entry) + return 0; + + entry->size = sizeof(*tlv) + len; + + tlv = (void *)entry->data; + tlv->type = cpu_to_le32(IWL_INI_DUMP_NAME_TYPE); + tlv->len = cpu_to_le32(len); + memcpy(tlv->data, fwrt->trans->dbg.dump_file_name_ext, len); + + /* add the dump file name extension tlv to the list */ + list_add_tail(&entry->list, list); + + fwrt->trans->dbg.dump_file_name_ext_valid = false; + + return entry->size; +} + static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = { [IWL_FW_INI_REGION_INVALID] = {}, [IWL_FW_INI_REGION_INTERNAL_BUFFER] = { @@ -2495,8 +2526,10 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt, size += iwl_dump_ini_mem(fwrt, list, ®_data, &iwl_dump_ini_region_ops[IWL_FW_INI_REGION_DRAM_IMR]); - if (size) + if (size) { + size += iwl_dump_ini_file_name_info(fwrt, list); size += iwl_dump_ini_info(fwrt, trigger, list); + } return size; } @@ -3121,6 +3154,51 @@ static int iwl_fw_dbg_restart_recording(struct iwl_trans *trans, return 0; } +int iwl_fw_send_timestamp_marker_cmd(struct iwl_fw_runtime *fwrt) +{ + struct iwl_mvm_marker marker = { + .dw_len = sizeof(struct iwl_mvm_marker) / 4, + .marker_id = MARKER_ID_SYNC_CLOCK, + }; + struct iwl_host_cmd hcmd = { + .flags = CMD_ASYNC, + .id = WIDE_ID(LONG_GROUP, MARKER_CMD), + .dataflags = {}, + }; + struct iwl_mvm_marker_rsp *resp; + int cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw, + WIDE_ID(LONG_GROUP, MARKER_CMD), + IWL_FW_CMD_VER_UNKNOWN); + int ret; + + if (cmd_ver == 1) { + /* the real timestamp is taken from the ftrace clock + * this is for finding the match between fw and kernel logs + */ + marker.timestamp = cpu_to_le64(fwrt->timestamp.seq++); + } else if (cmd_ver == 2) { + marker.timestamp = cpu_to_le64(ktime_get_boottime_ns()); + } else { + IWL_DEBUG_INFO(fwrt, + "Invalid version of Marker CMD. Ver = %d\n", + cmd_ver); + return -EINVAL; + } + + hcmd.data[0] = ▮ + hcmd.len[0] = sizeof(marker); + + ret = iwl_trans_send_cmd(fwrt->trans, &hcmd); + + if (cmd_ver > 1 && hcmd.resp_pkt) { + resp = (void *)hcmd.resp_pkt->data; + IWL_DEBUG_INFO(fwrt, "FW GP2 time: %u\n", + le32_to_cpu(resp->gp2)); + } + + return ret; +} + void iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt, struct iwl_fw_dbg_params *params, bool stop) @@ -3131,12 +3209,15 @@ void iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt, return; if (fw_has_capa(&fwrt->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_DBG_SUSPEND_RESUME_CMD_SUPP)) + IWL_UCODE_TLV_CAPA_DBG_SUSPEND_RESUME_CMD_SUPP)) { + if (stop) + iwl_fw_send_timestamp_marker_cmd(fwrt); ret = iwl_fw_dbg_suspend_resume_hcmd(fwrt->trans, stop); - else if (stop) + } else if (stop) { iwl_fw_dbg_stop_recording(fwrt->trans, params); - else + } else { ret = iwl_fw_dbg_restart_recording(fwrt->trans, params); + } #ifdef CONFIG_IWLWIFI_DEBUGFS if (!ret) { if (stop) @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2019, 2021-2022 Intel Corporation + * Copyright (C) 2005-2014, 2018-2019, 2021-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ @@ -227,6 +227,8 @@ static inline void iwl_fw_flush_dumps(struct iwl_fw_runtime *fwrt) flush_delayed_work(&fwrt->dump.wks[i].wk); } +int iwl_fw_send_timestamp_marker_cmd(struct iwl_fw_runtime *fwrt); + #ifdef CONFIG_IWLWIFI_DEBUGFS static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt) { @@ -327,4 +329,18 @@ void iwl_fwrt_dump_error_logs(struct iwl_fw_runtime *fwrt); void iwl_send_dbg_dump_complete_cmd(struct iwl_fw_runtime *fwrt, u32 timepoint, u32 timepoint_data); + +#define IWL_FW_CHECK_FAILED(_obj, _fmt, ...) \ + IWL_ERR_LIMIT(_obj, _fmt, __VA_ARGS__) + +#define IWL_FW_CHECK(_obj, _cond, _fmt, ...) \ + ({ \ + bool __cond = (_cond); \ + \ + if (unlikely(__cond)) \ + IWL_FW_CHECK_FAILED(_obj, _fmt, __VA_ARGS__); \ + \ + unlikely(__cond); \ + }) + #endif /* __iwl_fw_dbg_h__ */ diff --git a/fw/debugfs.c b/fw/debugfs.c index 43e997283db0..3cdbc6ac7ae5 100644 --- a/fw/debugfs.c +++ b/fw/debugfs.c @@ -123,28 +123,6 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \ #define FWRT_DEBUGFS_ADD_FILE(name, parent, mode) \ FWRT_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode) -static int iwl_fw_send_timestamp_marker_cmd(struct iwl_fw_runtime *fwrt) -{ - struct iwl_mvm_marker marker = { - .dw_len = sizeof(struct iwl_mvm_marker) / 4, - .marker_id = MARKER_ID_SYNC_CLOCK, - - /* the real timestamp is taken from the ftrace clock - * this is for finding the match between fw and kernel logs - */ - .timestamp = cpu_to_le64(fwrt->timestamp.seq++), - }; - - struct iwl_host_cmd hcmd = { - .id = MARKER_CMD, - .flags = CMD_ASYNC, - .data[0] = &marker, - .len[0] = sizeof(marker), - }; - - return iwl_trans_send_cmd(fwrt->trans, &hcmd); -} - static int iwl_dbgfs_enabled_severities_write(struct iwl_fw_runtime *fwrt, char *buf, size_t count) { @@ -317,8 +295,10 @@ static void *iwl_dbgfs_fw_info_seq_next(struct seq_file *seq, const struct iwl_fw *fw = priv->fwrt->fw; *pos = ++state->pos; - if (*pos >= fw->ucode_capa.n_cmd_versions) + if (*pos >= fw->ucode_capa.n_cmd_versions) { + kfree(state); return NULL; + } return state; } @@ -352,9 +332,18 @@ static int iwl_dbgfs_fw_info_seq_show(struct seq_file *seq, void *v) const struct iwl_fw *fw = priv->fwrt->fw; const struct iwl_fw_cmd_version *ver; u32 cmd_id; - - if (!state->pos) + int has_capa; + + if (!state->pos) { + seq_puts(seq, "fw_capa:\n"); + has_capa = fw_has_capa(&fw->ucode_capa, + IWL_UCODE_TLV_CAPA_PPAG_CHINA_BIOS_SUPPORT) ? 1 : 0; + seq_printf(seq, + " %d: %d\n", + IWL_UCODE_TLV_CAPA_PPAG_CHINA_BIOS_SUPPORT, + has_capa); seq_puts(seq, "fw_api_ver:\n"); + } ver = &fw->ucode_capa.cmd_versions[state->pos]; diff --git a/fw/dump.c b/fw/dump.c index b90f1e9ce691..5876f917e536 100644 --- a/fw/dump.c +++ b/fw/dump.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ @@ -14,6 +14,13 @@ #include "iwl-csr.h" #include "pnvm.h" +#define FW_ASSERT_LMAC_FATAL 0x70 +#define FW_ASSERT_LMAC2_FATAL 0x72 +#define FW_ASSERT_UMAC_FATAL 0x71 +#define UMAC_RT_NMI_LMAC2_FATAL 0x72 +#define RT_NMI_INTERRUPT_OTHER_LMAC_FATAL 0x73 +#define FW_ASSERT_NMI_UNKNOWN 0x84 + /* * Note: This structure is read from the device with IO accesses, * and the reading already does the endian conversion. As it is @@ -96,6 +103,17 @@ struct iwl_umac_error_event_table { #define ERROR_START_OFFSET (1 * sizeof(u32)) #define ERROR_ELEM_SIZE (7 * sizeof(u32)) +static bool iwl_fwrt_if_errorid_other_cpu(u32 err_id) +{ + err_id &= 0xFF; + + if ((err_id >= FW_ASSERT_LMAC_FATAL && + err_id <= RT_NMI_INTERRUPT_OTHER_LMAC_FATAL) || + err_id == FW_ASSERT_NMI_UNKNOWN) + return true; + return false; +} + static void iwl_fwrt_dump_umac_error_log(struct iwl_fw_runtime *fwrt) { struct iwl_trans *trans = fwrt->trans; @@ -113,6 +131,13 @@ static void iwl_fwrt_dump_umac_error_log(struct iwl_fw_runtime *fwrt) if (table.valid) fwrt->dump.umac_err_id = table.error_id; + if (!iwl_fwrt_if_errorid_other_cpu(fwrt->dump.umac_err_id) && + !fwrt->trans->dbg.dump_file_name_ext_valid) { + fwrt->trans->dbg.dump_file_name_ext_valid = true; + snprintf(fwrt->trans->dbg.dump_file_name_ext, IWL_FW_INI_MAX_NAME, + "0x%x", fwrt->dump.umac_err_id); + } + if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { IWL_ERR(trans, "Start IWL Error Log Dump:\n"); IWL_ERR(trans, "Transport status: 0x%08lX, valid: %d\n", @@ -157,7 +182,8 @@ static void iwl_fwrt_dump_lmac_error_log(struct iwl_fw_runtime *fwrt, u8 lmac_nu base = fwrt->fw->inst_errlog_ptr; } - if (base < 0x400000) { + if ((fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ && !base) || + (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ && base < 0x400000)) { IWL_ERR(fwrt, "Not valid error log pointer 0x%08X for %s uCode\n", base, @@ -168,7 +194,7 @@ static void iwl_fwrt_dump_lmac_error_log(struct iwl_fw_runtime *fwrt, u8 lmac_nu /* check if there is a HW error */ val = iwl_trans_read_mem32(trans, base); - if (((val & ~0xf) == 0xa5a5a5a0) || ((val & ~0xf) == 0x5a5a5a50)) { + if (iwl_trans_is_hw_error_value(val)) { int err; IWL_ERR(trans, "HW error, resetting before reading\n"); @@ -188,6 +214,13 @@ static void iwl_fwrt_dump_lmac_error_log(struct iwl_fw_runtime *fwrt, u8 lmac_nu if (table.valid) fwrt->dump.lmac_err_id[lmac_num] = table.error_id; + if (!iwl_fwrt_if_errorid_other_cpu(fwrt->dump.lmac_err_id[lmac_num]) && + !fwrt->trans->dbg.dump_file_name_ext_valid) { + fwrt->trans->dbg.dump_file_name_ext_valid = true; + snprintf(fwrt->trans->dbg.dump_file_name_ext, IWL_FW_INI_MAX_NAME, + "0x%x", fwrt->dump.lmac_err_id[lmac_num]); + } + if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { IWL_ERR(trans, "Start IWL Error Log Dump:\n"); IWL_ERR(trans, "Transport status: 0x%08lX, valid: %d\n", @@ -273,6 +306,16 @@ static void iwl_fwrt_dump_tcm_error_log(struct iwl_fw_runtime *fwrt, int idx) iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); + if (table.valid) + fwrt->dump.tcm_err_id[idx] = table.error_id; + + if (!iwl_fwrt_if_errorid_other_cpu(fwrt->dump.tcm_err_id[idx]) && + !fwrt->trans->dbg.dump_file_name_ext_valid) { + fwrt->trans->dbg.dump_file_name_ext_valid = true; + snprintf(fwrt->trans->dbg.dump_file_name_ext, IWL_FW_INI_MAX_NAME, + "0x%x", fwrt->dump.tcm_err_id[idx]); + } + IWL_ERR(fwrt, "TCM%d status:\n", idx + 1); IWL_ERR(fwrt, "0x%08X | error ID\n", table.error_id); IWL_ERR(fwrt, "0x%08X | tcm branchlink2\n", table.blink2); @@ -336,6 +379,16 @@ static void iwl_fwrt_dump_rcm_error_log(struct iwl_fw_runtime *fwrt, int idx) iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); + if (table.valid) + fwrt->dump.rcm_err_id[idx] = table.error_id; + + if (!iwl_fwrt_if_errorid_other_cpu(fwrt->dump.rcm_err_id[idx]) && + !fwrt->trans->dbg.dump_file_name_ext_valid) { + fwrt->trans->dbg.dump_file_name_ext_valid = true; + snprintf(fwrt->trans->dbg.dump_file_name_ext, IWL_FW_INI_MAX_NAME, + "0x%x", fwrt->dump.rcm_err_id[idx]); + } + IWL_ERR(fwrt, "RCM%d status:\n", idx + 1); IWL_ERR(fwrt, "0x%08X | error ID\n", table.error_id); IWL_ERR(fwrt, "0x%08X | rcm branchlink2\n", table.blink2); @@ -376,7 +429,7 @@ static void iwl_fwrt_dump_iml_error_log(struct iwl_fw_runtime *fwrt) return; } - error = iwl_read_umac_prph(trans, UMAG_SB_CPU_2_STATUS); + error = iwl_read_umac_prph(trans, error); IWL_ERR(trans, "IML/ROM dump:\n"); @@ -414,6 +467,10 @@ static void iwl_fwrt_dump_fseq_regs(struct iwl_fw_runtime *fwrt) FSEQ_REG(CNVR_AUX_MISC_CHIP), FSEQ_REG(CNVR_SCU_SD_REGS_SD_REG_DIG_DCDC_VTRIM), FSEQ_REG(CNVR_SCU_SD_REGS_SD_REG_ACTIVE_VDIG_MIRROR), + FSEQ_REG(FSEQ_PREV_CNVIO_INIT_VERSION), + FSEQ_REG(FSEQ_WIFI_FSEQ_VERSION), + FSEQ_REG(FSEQ_BT_FSEQ_VERSION), + FSEQ_REG(FSEQ_CLASS_TP_VERSION), }; if (!iwl_trans_grab_nic_access(trans)) @@ -431,6 +488,9 @@ static void iwl_fwrt_dump_fseq_regs(struct iwl_fw_runtime *fwrt) void iwl_fwrt_dump_error_logs(struct iwl_fw_runtime *fwrt) { + struct iwl_pc_data *pc_data; + u8 count; + if (!test_bit(STATUS_DEVICE_ENABLED, &fwrt->trans->status)) { IWL_ERR(fwrt, "DEVICE_ENABLED bit is not set. Aborting dump.\n"); @@ -443,10 +503,25 @@ void iwl_fwrt_dump_error_logs(struct iwl_fw_runtime *fwrt) iwl_fwrt_dump_umac_error_log(fwrt); iwl_fwrt_dump_tcm_error_log(fwrt, 0); iwl_fwrt_dump_rcm_error_log(fwrt, 0); - iwl_fwrt_dump_tcm_error_log(fwrt, 1); - iwl_fwrt_dump_rcm_error_log(fwrt, 1); + if (fwrt->trans->dbg.tcm_error_event_table[1]) + iwl_fwrt_dump_tcm_error_log(fwrt, 1); + if (fwrt->trans->dbg.rcm_error_event_table[1]) + iwl_fwrt_dump_rcm_error_log(fwrt, 1); iwl_fwrt_dump_iml_error_log(fwrt); iwl_fwrt_dump_fseq_regs(fwrt); + if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) { + pc_data = fwrt->trans->dbg.pc_data; + + if (!iwl_trans_grab_nic_access(fwrt->trans)) + return; + for (count = 0; count < fwrt->trans->dbg.num_pc; + count++, pc_data++) + IWL_ERR(fwrt, "%s: 0x%x\n", + pc_data->pc_name, + iwl_read_prph_no_grab(fwrt->trans, + pc_data->pc_address)); + iwl_trans_release_nic_access(fwrt->trans); + } if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { u32 scratch = iwl_read32(fwrt->trans, CSR_FUNC_SCRATCH); diff --git a/fw/error-dump.h b/fw/error-dump.h index 079fa0023bd8..f5e08988dc7b 100644 --- a/fw/error-dump.h +++ b/fw/error-dump.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2014, 2018-2021 Intel Corporation + * Copyright (C) 2014, 2018-2022 Intel Corporation * Copyright (C) 2014-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -76,6 +76,18 @@ struct iwl_fw_error_dump_data { } __packed; /** + * struct iwl_dump_file_name_info - data for dump file name addition + * @type: region type with reserved bits + * @len: the length of file name string to be added to dump file + * @data: the string need to be added to dump file + */ +struct iwl_dump_file_name_info { + __le32 type; + __le32 len; + __u8 data[]; +} __packed; + +/** * struct iwl_fw_error_dump_file - the layout of the header of the file * @barker: must be %IWL_FW_ERROR_DUMP_BARKER * @file_len: the length of all the file starting from %barker @@ -84,7 +96,7 @@ struct iwl_fw_error_dump_data { struct iwl_fw_error_dump_file { __le32 barker; __le32 file_len; - u8 data[0]; + u8 data[]; } __packed; /** @@ -231,6 +243,9 @@ struct iwl_fw_error_dump_mem { /* Use bit 31 as dump info type to avoid colliding with region types */ #define IWL_INI_DUMP_INFO_TYPE BIT(31) +/* Use bit 31 and bit 24 as dump name type to avoid colliding with region types */ +#define IWL_INI_DUMP_NAME_TYPE (BIT(31) | BIT(24)) + /** * struct iwl_fw_error_dump_data - data for one type * @type: &enum iwl_fw_ini_region_type diff --git a/fw/file.h b/fw/file.h index 5679a78758be..b36e9613a52c 100644 --- a/fw/file.h +++ b/fw/file.h @@ -101,8 +101,10 @@ enum iwl_ucode_tlv_type { IWL_UCODE_TLV_SEC_TABLE_ADDR = 66, IWL_UCODE_TLV_D3_KEK_KCK_ADDR = 67, + IWL_UCODE_TLV_CURRENT_PC = 68, IWL_UCODE_TLV_FW_NUM_STATIONS = IWL_UCODE_TLV_CONST_BASE + 0, + IWL_UCODE_TLV_FW_NUM_BEACONS = IWL_UCODE_TLV_CONST_BASE + 2, IWL_UCODE_TLV_TYPE_DEBUG_INFO = IWL_UCODE_TLV_DEBUG_BASE + 0, IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION = IWL_UCODE_TLV_DEBUG_BASE + 1, @@ -145,7 +147,7 @@ struct iwl_tlv_ucode_header { * Note that each TLV is padded to a length * that is a multiple of 4 for alignment. */ - u8 data[0]; + u8 data[]; }; /* @@ -321,6 +323,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t; * is supported. * @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan (no longer used) + * @IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG: supports fragmented PNVM image * @IWL_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT: the firmware supports setting * stabilization latency for SoCs. * @IWL_UCODE_TLV_CAPA_STA_PM_NOTIF: firmware will send STA PM notification @@ -396,6 +399,7 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)31, /* set 1 */ + IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG = (__force iwl_ucode_tlv_capa_t)32, IWL_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT = (__force iwl_ucode_tlv_capa_t)37, IWL_UCODE_TLV_CAPA_STA_PM_NOTIF = (__force iwl_ucode_tlv_capa_t)38, IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT = (__force iwl_ucode_tlv_capa_t)39, @@ -455,6 +459,15 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT = (__force iwl_ucode_tlv_capa_t)100, IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT = (__force iwl_ucode_tlv_capa_t)104, IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT = (__force iwl_ucode_tlv_capa_t)105, + IWL_UCODE_TLV_CAPA_SYNCED_TIME = (__force iwl_ucode_tlv_capa_t)106, + IWL_UCODE_TLV_CAPA_TIME_SYNC_BOTH_FTM_TM = (__force iwl_ucode_tlv_capa_t)108, + IWL_UCODE_TLV_CAPA_BIGTK_TX_SUPPORT = (__force iwl_ucode_tlv_capa_t)109, + IWL_UCODE_TLV_CAPA_MLD_API_SUPPORT = (__force iwl_ucode_tlv_capa_t)110, + IWL_UCODE_TLV_CAPA_SCAN_DONT_TOGGLE_ANT = (__force iwl_ucode_tlv_capa_t)111, + IWL_UCODE_TLV_CAPA_PPAG_CHINA_BIOS_SUPPORT = (__force iwl_ucode_tlv_capa_t)112, + IWL_UCODE_TLV_CAPA_OFFLOAD_BTM_SUPPORT = (__force iwl_ucode_tlv_capa_t)113, + IWL_UCODE_TLV_CAPA_STA_EXP_MFP_SUPPORT = (__force iwl_ucode_tlv_capa_t)114, + IWL_UCODE_TLV_CAPA_SNIFF_VALIDATE_SUPPORT = (__force iwl_ucode_tlv_capa_t)116, #ifdef __CHECKER__ /* sparse says it cannot increment the previous enum member */ @@ -603,7 +616,7 @@ struct iwl_fw_dbg_dest_tlv_v1 { __le32 wrap_count; u8 base_shift; u8 end_shift; - struct iwl_fw_dbg_reg_op reg_ops[0]; + struct iwl_fw_dbg_reg_op reg_ops[]; } __packed; /* Mask of the register for defining the LDBG MAC2SMEM buffer SMEM size */ @@ -623,14 +636,14 @@ struct iwl_fw_dbg_dest_tlv { __le32 wrap_count; u8 base_shift; u8 size_shift; - struct iwl_fw_dbg_reg_op reg_ops[0]; + struct iwl_fw_dbg_reg_op reg_ops[]; } __packed; struct iwl_fw_dbg_conf_hcmd { u8 id; u8 reserved; __le16 len; - u8 data[0]; + u8 data[]; } __packed; /** @@ -705,7 +718,7 @@ struct iwl_fw_dbg_trigger_tlv { u8 flags; u8 reserved[5]; - u8 data[0]; + u8 data[]; } __packed; #define FW_DBG_START_FROM_ALIVE 0 @@ -51,6 +51,7 @@ struct iwl_ucode_capabilities { u32 error_log_addr; u32 error_log_size; u32 num_stations; + u32 num_beacons; unsigned long _api[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_API)]; unsigned long _capa[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_CAPA)]; @@ -182,10 +183,10 @@ struct iwl_dump_exclude { * @enhance_sensitivity_table: device can do enhanced sensitivity. * @init_evtlog_ptr: event log offset for init ucode. * @init_evtlog_size: event log size for init ucode. - * @init_errlog_ptr: error log offfset for init ucode. + * @init_errlog_ptr: error log offset for init ucode. * @inst_evtlog_ptr: event log offset for runtime ucode. * @inst_evtlog_size: event log size for runtime ucode. - * @inst_errlog_ptr: error log offfset for runtime ucode. + * @inst_errlog_ptr: error log offset for runtime ucode. * @type: firmware type (&enum iwl_fw_type) * @human_readable: human readable version * we get the ALIVE from the uCode diff --git a/fw/pnvm.c b/fw/pnvm.c index 7d4aa398729a..650e4bde9c17 100644 --- a/fw/pnvm.c +++ b/fw/pnvm.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright(c) 2020-2021 Intel Corporation + * Copyright(c) 2020-2023 Intel Corporation */ #include "iwl-drv.h" @@ -31,23 +31,23 @@ static bool iwl_pnvm_complete_fn(struct iwl_notif_wait_data *notif_wait, } static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data, - size_t len) + size_t len, + struct iwl_pnvm_image *pnvm_data) { - struct iwl_ucode_tlv *tlv; + const struct iwl_ucode_tlv *tlv; u32 sha1 = 0; u16 mac_type = 0, rf_id = 0; - u8 *pnvm_data = NULL, *tmp; bool hw_match = false; - u32 size = 0; - int ret; IWL_DEBUG_FW(trans, "Handling PNVM section\n"); + memset(pnvm_data, 0, sizeof(*pnvm_data)); + while (len >= sizeof(*tlv)) { u32 tlv_len, tlv_type; len -= sizeof(*tlv); - tlv = (void *)data; + tlv = (const void *)data; tlv_len = le32_to_cpu(tlv->length); tlv_type = le32_to_cpu(tlv->type); @@ -55,8 +55,7 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data, if (len < tlv_len) { IWL_ERR(trans, "invalid TLV len: %zd/%u\n", len, tlv_len); - ret = -EINVAL; - goto out; + return -EINVAL; } data += sizeof(*tlv); @@ -70,11 +69,12 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data, break; } - sha1 = le32_to_cpup((__le32 *)data); + sha1 = le32_to_cpup((const __le32 *)data); IWL_DEBUG_FW(trans, "Got IWL_UCODE_TLV_PNVM_VERSION %0x\n", sha1); + pnvm_data->version = sha1; break; case IWL_UCODE_TLV_HW_TYPE: if (tlv_len < 2 * sizeof(__le16)) { @@ -87,8 +87,8 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data, if (hw_match) break; - mac_type = le16_to_cpup((__le16 *)data); - rf_id = le16_to_cpup((__le16 *)(data + sizeof(__le16))); + mac_type = le16_to_cpup((const __le16 *)data); + rf_id = le16_to_cpup((const __le16 *)(data + sizeof(__le16))); IWL_DEBUG_FW(trans, "Got IWL_UCODE_TLV_HW_TYPE mac_type 0x%0x rf_id 0x%0x\n", @@ -99,7 +99,7 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data, hw_match = true; break; case IWL_UCODE_TLV_SEC_RT: { - struct iwl_pnvm_section *section = (void *)data; + const struct iwl_pnvm_section *section = (const void *)data; u32 data_len = tlv_len - sizeof(*section); IWL_DEBUG_FW(trans, @@ -107,31 +107,31 @@ static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data, tlv_len); /* TODO: remove, this is a deprecated separator */ - if (le32_to_cpup((__le32 *)data) == 0xddddeeee) { + if (le32_to_cpup((const __le32 *)data) == 0xddddeeee) { IWL_DEBUG_FW(trans, "Ignoring separator.\n"); break; } - IWL_DEBUG_FW(trans, "Adding data (size %d)\n", - data_len); - - tmp = krealloc(pnvm_data, size + data_len, GFP_KERNEL); - if (!tmp) { + if (pnvm_data->n_chunks == IPC_DRAM_MAP_ENTRY_NUM_MAX) { IWL_DEBUG_FW(trans, - "Couldn't allocate (more) pnvm_data\n"); - - ret = -ENOMEM; - goto out; + "too many payloads to allocate in DRAM.\n"); + return -EINVAL; } - pnvm_data = tmp; - - memcpy(pnvm_data + size, section->data, data_len); + IWL_DEBUG_FW(trans, "Adding data (size %d)\n", + data_len); - size += data_len; + pnvm_data->chunks[pnvm_data->n_chunks].data = section->data; + pnvm_data->chunks[pnvm_data->n_chunks].len = data_len; + pnvm_data->n_chunks++; break; } + case IWL_UCODE_TLV_MEM_DESC: + if (iwl_uefi_handle_tlv_mem_desc(trans, data, tlv_len, + pnvm_data)) + return -EINVAL; + break; case IWL_UCODE_TLV_PNVM_SKU: IWL_DEBUG_FW(trans, "New PNVM section started, stop parsing.\n"); @@ -152,28 +152,22 @@ done: "HW mismatch, skipping PNVM section (need mac_type 0x%x rf_id 0x%x)\n", CSR_HW_REV_TYPE(trans->hw_rev), CSR_HW_RFID_TYPE(trans->hw_rf_id)); - ret = -ENOENT; - goto out; + return -ENOENT; } - if (!size) { + if (!pnvm_data->n_chunks) { IWL_DEBUG_FW(trans, "Empty PNVM, skipping.\n"); - ret = -ENOENT; - goto out; + return -ENOENT; } - IWL_INFO(trans, "loaded PNVM version %08x\n", sha1); - - ret = iwl_trans_set_pnvm(trans, pnvm_data, size); -out: - kfree(pnvm_data); - return ret; + return 0; } static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data, - size_t len) + size_t len, + struct iwl_pnvm_image *pnvm_data) { - struct iwl_ucode_tlv *tlv; + const struct iwl_ucode_tlv *tlv; IWL_DEBUG_FW(trans, "Parsing PNVM file\n"); @@ -181,7 +175,7 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data, u32 tlv_len, tlv_type; len -= sizeof(*tlv); - tlv = (void *)data; + tlv = (const void *)data; tlv_len = le32_to_cpu(tlv->length); tlv_type = le32_to_cpu(tlv->type); @@ -193,8 +187,8 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data, } if (tlv_type == IWL_UCODE_TLV_PNVM_SKU) { - struct iwl_sku_id *sku_id = - (void *)(data + sizeof(*tlv)); + const struct iwl_sku_id *sku_id = + (const void *)(data + sizeof(*tlv)); IWL_DEBUG_FW(trans, "Got IWL_UCODE_TLV_PNVM_SKU len %d\n", @@ -212,7 +206,8 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data, trans->sku_id[2] == le32_to_cpu(sku_id->data[2])) { int ret; - ret = iwl_pnvm_handle_section(trans, data, len); + ret = iwl_pnvm_handle_section(trans, data, len, + pnvm_data); if (!ret) return 0; } else { @@ -255,93 +250,136 @@ static int iwl_pnvm_get_from_fs(struct iwl_trans *trans, u8 **data, size_t *len) return 0; } -int iwl_pnvm_load(struct iwl_trans *trans, - struct iwl_notif_wait_data *notif_wait) +static u8 *iwl_get_pnvm_image(struct iwl_trans *trans_p, size_t *len) { - u8 *data; - size_t len; struct pnvm_sku_package *package; - struct iwl_notification_wait pnvm_wait; - static const u16 ntf_cmds[] = { WIDE_ID(REGULATORY_AND_NVM_GROUP, - PNVM_INIT_COMPLETE_NTFY) }; - int ret; - - /* if the SKU_ID is empty, there's nothing to do */ - if (!trans->sku_id[0] && !trans->sku_id[1] && !trans->sku_id[2]) - return 0; - - /* - * If we already loaded (or tried to load) it before, we just - * need to set it again. - */ - if (trans->pnvm_loaded) { - ret = iwl_trans_set_pnvm(trans, NULL, 0); - if (ret) - return ret; - goto skip_parse; - } + u8 *image = NULL; /* First attempt to get the PNVM from BIOS */ - package = iwl_uefi_get_pnvm(trans, &len); + package = iwl_uefi_get_pnvm(trans_p, len); if (!IS_ERR_OR_NULL(package)) { - if (len >= sizeof(*package)) { + if (*len >= sizeof(*package)) { /* we need only the data */ - len -= sizeof(*package); - data = kmemdup(package->data, len, GFP_KERNEL); - } else { - data = NULL; + *len -= sizeof(*package); + image = kmemdup(package->data, *len, GFP_KERNEL); } - /* free package regardless of whether kmemdup succeeded */ kfree(package); - - if (data) - goto parse; + if (image) + return image; } /* If it's not available, try from the filesystem */ - ret = iwl_pnvm_get_from_fs(trans, &data, &len); + if (iwl_pnvm_get_from_fs(trans_p, &image, len)) + return NULL; + return image; +} + +static void iwl_pnvm_load_pnvm_to_trans(struct iwl_trans *trans, + const struct iwl_ucode_capabilities *capa) +{ + struct iwl_pnvm_image *pnvm_data = NULL; + u8 *data = NULL; + size_t length; + int ret; + + /* failed to get/parse the image in the past, no use trying again */ + if (trans->fail_to_parse_pnvm_image) + return; + + if (trans->pnvm_loaded) + goto set; + + data = iwl_get_pnvm_image(trans, &length); + if (!data) { + trans->fail_to_parse_pnvm_image = true; + return; + } + + pnvm_data = kzalloc(sizeof(*pnvm_data), GFP_KERNEL); + if (!pnvm_data) + goto free; + + ret = iwl_pnvm_parse(trans, data, length, pnvm_data); if (ret) { - /* - * Pretend we've loaded it - at least we've tried and - * couldn't load it at all, so there's no point in - * trying again over and over. - */ - trans->pnvm_loaded = true; - - goto skip_parse; + trans->fail_to_parse_pnvm_image = true; + goto free; } -parse: - iwl_pnvm_parse(trans, data, len); + ret = iwl_trans_load_pnvm(trans, pnvm_data, capa); + if (ret) + goto free; + IWL_INFO(trans, "loaded PNVM version %08x\n", pnvm_data->version); +set: + iwl_trans_set_pnvm(trans, capa); +free: kfree(data); + kfree(pnvm_data); +} -skip_parse: - data = NULL; - /* now try to get the reduce power table, if not loaded yet */ - if (!trans->reduce_power_loaded) { - data = iwl_uefi_get_reduced_power(trans, &len); - if (IS_ERR_OR_NULL(data)) { - /* - * Pretend we've loaded it - at least we've tried and - * couldn't load it at all, so there's no point in - * trying again over and over. - */ - trans->reduce_power_loaded = true; - - goto skip_reduce_power; - } +static void +iwl_pnvm_load_reduce_power_to_trans(struct iwl_trans *trans, + const struct iwl_ucode_capabilities *capa) +{ + struct iwl_pnvm_image *pnvm_data = NULL; + u8 *data = NULL; + size_t length; + int ret; + + if (trans->failed_to_load_reduce_power_image) + return; + + if (trans->reduce_power_loaded) + goto set; + + data = iwl_uefi_get_reduced_power(trans, &length); + if (IS_ERR(data)) { + trans->failed_to_load_reduce_power_image = true; + return; } - ret = iwl_trans_set_reduce_power(trans, data, len); - if (ret) + pnvm_data = kzalloc(sizeof(*pnvm_data), GFP_KERNEL); + if (!pnvm_data) + goto free; + + ret = iwl_uefi_reduce_power_parse(trans, data, length, pnvm_data); + if (ret) { + trans->failed_to_load_reduce_power_image = true; + goto free; + } + + ret = iwl_trans_load_reduce_power(trans, pnvm_data, capa); + if (ret) { IWL_DEBUG_FW(trans, - "Failed to set reduce power table %d\n", + "Failed to load reduce power table %d\n", ret); + trans->failed_to_load_reduce_power_image = true; + goto free; + } + +set: + iwl_trans_set_reduce_power(trans, capa); +free: kfree(data); + kfree(pnvm_data); +} + +int iwl_pnvm_load(struct iwl_trans *trans, + struct iwl_notif_wait_data *notif_wait, + const struct iwl_ucode_capabilities *capa) +{ + struct iwl_notification_wait pnvm_wait; + static const u16 ntf_cmds[] = { WIDE_ID(REGULATORY_AND_NVM_GROUP, + PNVM_INIT_COMPLETE_NTFY) }; + + /* if the SKU_ID is empty, there's nothing to do */ + if (!trans->sku_id[0] && !trans->sku_id[1] && !trans->sku_id[2]) + return 0; + + iwl_pnvm_load_pnvm_to_trans(trans, capa); + iwl_pnvm_load_reduce_power_to_trans(trans, capa); -skip_reduce_power: iwl_init_notification_wait(notif_wait, &pnvm_wait, ntf_cmds, ARRAY_SIZE(ntf_cmds), iwl_pnvm_complete_fn, trans); diff --git a/fw/pnvm.h b/fw/pnvm.h index 203c367dd4de..1bac3466154c 100644 --- a/fw/pnvm.h +++ b/fw/pnvm.h @@ -1,13 +1,11 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ -/****************************************************************************** - * - * Copyright(c) 2020-2021 Intel Corporation - * - *****************************************************************************/ - +/* + * Copyright(c) 2020-2023 Intel Corporation + */ #ifndef __IWL_PNVM_H__ #define __IWL_PNVM_H__ +#include "iwl-drv.h" #include "fw/notif-wait.h" #define MVM_UCODE_PNVM_TIMEOUT (HZ / 4) @@ -15,24 +13,17 @@ #define MAX_PNVM_NAME 64 int iwl_pnvm_load(struct iwl_trans *trans, - struct iwl_notif_wait_data *notif_wait); + struct iwl_notif_wait_data *notif_wait, + const struct iwl_ucode_capabilities *capa); static inline void iwl_pnvm_get_fs_name(struct iwl_trans *trans, u8 *pnvm_name, size_t max_len) { - int pre_len; - - /* - * The prefix unfortunately includes a hyphen at the end, so - * don't add the dot here... - */ - snprintf(pnvm_name, max_len, "%spnvm", trans->cfg->fw_name_pre); + char _fw_name_pre[FW_NAME_PRE_BUFSIZE]; - /* ...but replace the hyphen with the dot here. */ - pre_len = strlen(trans->cfg->fw_name_pre); - if (pre_len < max_len && pre_len > 0) - pnvm_name[pre_len - 1] = '.'; + snprintf(pnvm_name, max_len, "%s.pnvm", + iwl_drv_get_fwname_pre(trans, _fw_name_pre)); } #endif /* __IWL_PNVM_H__ */ @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2021 Intel Corporation + * Copyright (C) 2021-2022 Intel Corporation */ #include <net/mac80211.h> @@ -126,7 +126,7 @@ u32 iwl_new_rate_from_v1(u32 rate_v1) rate_v1 & RATE_MCS_HE_MSK_V1) { rate_v2 |= rate_v1 & RATE_VHT_MCS_RATE_CODE_MSK; - rate_v2 |= rate_v1 & RATE_VHT_MCS_MIMO2_MSK; + rate_v2 |= rate_v1 & RATE_MCS_NSS_MSK; if (rate_v1 & RATE_MCS_HE_MSK_V1) { u32 he_type_bits = rate_v1 & RATE_MCS_HE_TYPE_MSK_V1; @@ -218,6 +218,8 @@ int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate) type = "HT"; else if (format == RATE_MCS_HE_MSK) type = "HE"; + else if (format == RATE_MCS_EHT_MSK) + type = "EHT"; else type = "Unknown"; /* shouldn't happen */ diff --git a/fw/runtime.h b/fw/runtime.h index d3cb1ae68a96..702586945533 100644 --- a/fw/runtime.h +++ b/fw/runtime.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation */ #ifndef __iwl_fw_runtime_h__ #define __iwl_fw_runtime_h__ @@ -24,6 +24,8 @@ struct iwl_fw_runtime_ops { }; #define MAX_NUM_LMAC 2 +#define MAX_NUM_TCM 2 +#define MAX_NUM_RCM 2 struct iwl_fwrt_shared_mem_cfg { int num_lmacs; int num_txfifo_entries; @@ -129,6 +131,8 @@ struct iwl_fw_runtime { unsigned long non_collect_ts_start[IWL_FW_INI_TIME_POINT_NUM]; u32 *d3_debug_data; u32 lmac_err_id[MAX_NUM_LMAC]; + u32 tcm_err_id[MAX_NUM_TCM]; + u32 rcm_err_id[MAX_NUM_RCM]; u32 umac_err_id; struct iwl_txf_iter_data txf_iter_data; @@ -142,12 +146,14 @@ struct iwl_fw_runtime { u32 umac_minor; } fw_ver; } dump; -#ifdef CONFIG_IWLWIFI_DEBUGFS struct { +#ifdef CONFIG_IWLWIFI_DEBUGFS struct delayed_work wk; u32 delay; +#endif u64 seq; } timestamp; +#ifdef CONFIG_IWLWIFI_DEBUGFS bool tpc_enabled; #endif /* CONFIG_IWLWIFI_DEBUGFS */ #ifdef CONFIG_ACPI @@ -161,6 +167,7 @@ struct iwl_fw_runtime { struct iwl_ppag_chain ppag_chains[IWL_NUM_CHAIN_LIMITS]; u32 ppag_flags; u32 ppag_ver; + bool ppag_table_valid; struct iwl_sar_offset_mapping_cmd sgom_table; bool sgom_enabled; u8 reduced_power_flags; diff --git a/fw/uefi.c b/fw/uefi.c index 23b1d689ba7b..9877988db0d2 100644 --- a/fw/uefi.c +++ b/fw/uefi.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright(c) 2021 Intel Corporation + * Copyright(c) 2021-2023 Intel Corporation */ #include "iwl-drv.h" @@ -17,63 +17,109 @@ 0xb2, 0xec, 0xf5, 0xa3, \ 0x59, 0x4f, 0x4a, 0xea) -void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len) +struct iwl_uefi_pnvm_mem_desc { + __le32 addr; + __le32 size; + const u8 data[]; +} __packed; + +static void *iwl_uefi_get_variable(efi_char16_t *name, efi_guid_t *guid, + unsigned long *data_size) { - struct efivar_entry *pnvm_efivar; + efi_status_t status; void *data; - unsigned long package_size; - int err; - *len = 0; + if (!data_size) + return ERR_PTR(-EINVAL); - pnvm_efivar = kzalloc(sizeof(*pnvm_efivar), GFP_KERNEL); - if (!pnvm_efivar) - return ERR_PTR(-ENOMEM); + if (!efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE)) + return ERR_PTR(-ENODEV); - memcpy(&pnvm_efivar->var.VariableName, IWL_UEFI_OEM_PNVM_NAME, - sizeof(IWL_UEFI_OEM_PNVM_NAME)); - pnvm_efivar->var.VendorGuid = IWL_EFI_VAR_GUID; + /* first call with NULL data to get the exact entry size */ + *data_size = 0; + status = efi.get_variable(name, guid, NULL, data_size, NULL); + if (status != EFI_BUFFER_TOO_SMALL || !*data_size) + return ERR_PTR(-EIO); - /* - * TODO: we hardcode a maximum length here, because reading - * from the UEFI is not working. To implement this properly, - * we have to call efivar_entry_size(). - */ - package_size = IWL_HARDCODED_PNVM_SIZE; + data = kmalloc(*data_size, GFP_KERNEL); + if (!data) + return ERR_PTR(-ENOMEM); - data = kmalloc(package_size, GFP_KERNEL); - if (!data) { - data = ERR_PTR(-ENOMEM); - goto out; + status = efi.get_variable(name, guid, NULL, data_size, data); + if (status != EFI_SUCCESS) { + kfree(data); + return ERR_PTR(-ENOENT); } - err = efivar_entry_get(pnvm_efivar, NULL, &package_size, data); - if (err) { + return data; +} + +void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len) +{ + unsigned long package_size; + void *data; + + *len = 0; + + data = iwl_uefi_get_variable(IWL_UEFI_OEM_PNVM_NAME, &IWL_EFI_VAR_GUID, + &package_size); + if (IS_ERR(data)) { IWL_DEBUG_FW(trans, - "PNVM UEFI variable not found %d (len %lu)\n", - err, package_size); - kfree(data); - data = ERR_PTR(err); - goto out; + "PNVM UEFI variable not found 0x%lx (len %lu)\n", + PTR_ERR(data), package_size); + return data; } IWL_DEBUG_FW(trans, "Read PNVM from UEFI with size %lu\n", package_size); *len = package_size; -out: - kfree(pnvm_efivar); - return data; } -static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans, - const u8 *data, size_t len) +int iwl_uefi_handle_tlv_mem_desc(struct iwl_trans *trans, const u8 *data, + u32 tlv_len, struct iwl_pnvm_image *pnvm_data) +{ + const struct iwl_uefi_pnvm_mem_desc *desc = (const void *)data; + u32 data_len; + + if (tlv_len < sizeof(*desc)) { + IWL_DEBUG_FW(trans, "TLV len (%d) is too small\n", tlv_len); + return -EINVAL; + } + + data_len = tlv_len - sizeof(*desc); + + IWL_DEBUG_FW(trans, + "Handle IWL_UCODE_TLV_MEM_DESC, len %d data_len %d\n", + tlv_len, data_len); + + if (le32_to_cpu(desc->size) != data_len) { + IWL_DEBUG_FW(trans, "invalid mem desc size %d\n", desc->size); + return -EINVAL; + } + + if (pnvm_data->n_chunks == IPC_DRAM_MAP_ENTRY_NUM_MAX) { + IWL_DEBUG_FW(trans, "too many payloads to allocate in DRAM.\n"); + return -EINVAL; + } + + IWL_DEBUG_FW(trans, "Adding data (size %d)\n", data_len); + + pnvm_data->chunks[pnvm_data->n_chunks].data = desc->data; + pnvm_data->chunks[pnvm_data->n_chunks].len = data_len; + pnvm_data->n_chunks++; + + return 0; +} + +static int iwl_uefi_reduce_power_section(struct iwl_trans *trans, + const u8 *data, size_t len, + struct iwl_pnvm_image *pnvm_data) { const struct iwl_ucode_tlv *tlv; - u8 *reduce_power_data = NULL, *tmp; - u32 size = 0; IWL_DEBUG_FW(trans, "Handling REDUCE_POWER section\n"); + memset(pnvm_data, 0, sizeof(*pnvm_data)); while (len >= sizeof(*tlv)) { u32 tlv_len, tlv_type; @@ -87,39 +133,17 @@ static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans, if (len < tlv_len) { IWL_ERR(trans, "invalid TLV len: %zd/%u\n", len, tlv_len); - kfree(reduce_power_data); - reduce_power_data = ERR_PTR(-EINVAL); - goto out; + return -EINVAL; } data += sizeof(*tlv); switch (tlv_type) { - case IWL_UCODE_TLV_MEM_DESC: { - IWL_DEBUG_FW(trans, - "Got IWL_UCODE_TLV_MEM_DESC len %d\n", - tlv_len); - - IWL_DEBUG_FW(trans, "Adding data (size %d)\n", tlv_len); - - tmp = krealloc(reduce_power_data, size + tlv_len, GFP_KERNEL); - if (!tmp) { - IWL_DEBUG_FW(trans, - "Couldn't allocate (more) reduce_power_data\n"); - - kfree(reduce_power_data); - reduce_power_data = ERR_PTR(-ENOMEM); - goto out; - } - - reduce_power_data = tmp; - - memcpy(reduce_power_data + size, data, tlv_len); - - size += tlv_len; - + case IWL_UCODE_TLV_MEM_DESC: + if (iwl_uefi_handle_tlv_mem_desc(trans, data, tlv_len, + pnvm_data)) + return -EINVAL; break; - } case IWL_UCODE_TLV_PNVM_SKU: IWL_DEBUG_FW(trans, "New REDUCE_POWER section started, stop parsing.\n"); @@ -135,27 +159,18 @@ static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans, } done: - if (!size) { + if (!pnvm_data->n_chunks) { IWL_DEBUG_FW(trans, "Empty REDUCE_POWER, skipping.\n"); - /* Better safe than sorry, but 'reduce_power_data' should - * always be NULL if !size. - */ - kfree(reduce_power_data); - reduce_power_data = ERR_PTR(-ENOENT); - goto out; + return -ENOENT; } - - IWL_INFO(trans, "loaded REDUCE_POWER\n"); - -out: - return reduce_power_data; + return 0; } -static void *iwl_uefi_reduce_power_parse(struct iwl_trans *trans, - const u8 *data, size_t len) +int iwl_uefi_reduce_power_parse(struct iwl_trans *trans, + const u8 *data, size_t len, + struct iwl_pnvm_image *pnvm_data) { const struct iwl_ucode_tlv *tlv; - void *sec_data; IWL_DEBUG_FW(trans, "Parsing REDUCE_POWER data\n"); @@ -171,7 +186,7 @@ static void *iwl_uefi_reduce_power_parse(struct iwl_trans *trans, if (len < tlv_len) { IWL_ERR(trans, "invalid TLV len: %zd/%u\n", len, tlv_len); - return ERR_PTR(-EINVAL); + return -EINVAL; } if (tlv_type == IWL_UCODE_TLV_PNVM_SKU) { @@ -192,11 +207,11 @@ static void *iwl_uefi_reduce_power_parse(struct iwl_trans *trans, if (trans->sku_id[0] == le32_to_cpu(sku_id->data[0]) && trans->sku_id[1] == le32_to_cpu(sku_id->data[1]) && trans->sku_id[2] == le32_to_cpu(sku_id->data[2])) { - sec_data = iwl_uefi_reduce_power_section(trans, - data, - len); - if (!IS_ERR(sec_data)) - return sec_data; + int ret = iwl_uefi_reduce_power_section(trans, + data, len, + pnvm_data); + if (!ret) + return 0; } else { IWL_DEBUG_FW(trans, "SKU ID didn't match!\n"); } @@ -206,68 +221,103 @@ static void *iwl_uefi_reduce_power_parse(struct iwl_trans *trans, } } - return ERR_PTR(-ENOENT); + return -ENOENT; } -void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len) +u8 *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len) { - struct efivar_entry *reduce_power_efivar; struct pnvm_sku_package *package; - void *data = NULL; unsigned long package_size; - int err; + u8 *data; - *len = 0; + package = iwl_uefi_get_variable(IWL_UEFI_REDUCED_POWER_NAME, + &IWL_EFI_VAR_GUID, &package_size); - reduce_power_efivar = kzalloc(sizeof(*reduce_power_efivar), GFP_KERNEL); - if (!reduce_power_efivar) - return ERR_PTR(-ENOMEM); - - memcpy(&reduce_power_efivar->var.VariableName, IWL_UEFI_REDUCED_POWER_NAME, - sizeof(IWL_UEFI_REDUCED_POWER_NAME)); - reduce_power_efivar->var.VendorGuid = IWL_EFI_VAR_GUID; - - /* - * TODO: we hardcode a maximum length here, because reading - * from the UEFI is not working. To implement this properly, - * we have to call efivar_entry_size(). - */ - package_size = IWL_HARDCODED_REDUCE_POWER_SIZE; - - package = kmalloc(package_size, GFP_KERNEL); - if (!package) { - package = ERR_PTR(-ENOMEM); - goto out; + if (IS_ERR(package)) { + IWL_DEBUG_FW(trans, + "Reduced Power UEFI variable not found 0x%lx (len %lu)\n", + PTR_ERR(package), package_size); + return ERR_CAST(package); } - err = efivar_entry_get(reduce_power_efivar, NULL, &package_size, package); - if (err) { + if (package_size < sizeof(*package)) { IWL_DEBUG_FW(trans, - "Reduced Power UEFI variable not found %d (len %lu)\n", - err, package_size); + "Invalid Reduced Power UEFI variable len (%lu)\n", + package_size); kfree(package); - data = ERR_PTR(err); - goto out; + return ERR_PTR(-EINVAL); } IWL_DEBUG_FW(trans, "Read reduced power from UEFI with size %lu\n", package_size); - *len = package_size; IWL_DEBUG_FW(trans, "rev %d, total_size %d, n_skus %d\n", package->rev, package->total_size, package->n_skus); - data = iwl_uefi_reduce_power_parse(trans, package->data, - *len - sizeof(*package)); + *len = package_size - sizeof(*package); + data = kmemdup(package->data, *len, GFP_KERNEL); + if (!data) { + kfree(package); + return ERR_PTR(-ENOMEM); + } kfree(package); -out: - kfree(reduce_power_efivar); - return data; } +static int iwl_uefi_step_parse(struct uefi_cnv_common_step_data *common_step_data, + struct iwl_trans *trans) +{ + if (common_step_data->revision != 1) + return -EINVAL; + + trans->mbx_addr_0_step = (u32)common_step_data->revision | + (u32)common_step_data->cnvi_eq_channel << 8 | + (u32)common_step_data->cnvr_eq_channel << 16 | + (u32)common_step_data->radio1 << 24; + trans->mbx_addr_1_step = (u32)common_step_data->radio2; + return 0; +} + +void iwl_uefi_get_step_table(struct iwl_trans *trans) +{ + struct uefi_cnv_common_step_data *data; + unsigned long package_size; + int ret; + + if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) + return; + + data = iwl_uefi_get_variable(IWL_UEFI_STEP_NAME, &IWL_EFI_VAR_GUID, + &package_size); + + if (IS_ERR(data)) { + IWL_DEBUG_FW(trans, + "STEP UEFI variable not found 0x%lx\n", + PTR_ERR(data)); + return; + } + + if (package_size < sizeof(*data)) { + IWL_DEBUG_FW(trans, + "Invalid STEP table UEFI variable len (%lu)\n", + package_size); + kfree(data); + return; + } + + IWL_DEBUG_FW(trans, "Read STEP from UEFI with size %lu\n", + package_size); + + ret = iwl_uefi_step_parse(data, trans); + if (ret < 0) + IWL_DEBUG_FW(trans, "Cannot read STEP tables. rev is invalid\n"); + + kfree(data); +} +IWL_EXPORT_SYMBOL(iwl_uefi_get_step_table); + #ifdef CONFIG_ACPI static int iwl_uefi_sgom_parse(struct uefi_cnv_wlan_sgom_data *sgom_data, struct iwl_fw_runtime *fwrt) @@ -304,39 +354,28 @@ static int iwl_uefi_sgom_parse(struct uefi_cnv_wlan_sgom_data *sgom_data, void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt) { - struct efivar_entry *sgom_efivar; struct uefi_cnv_wlan_sgom_data *data; unsigned long package_size; - int err, ret; + int ret; if (!fwrt->geo_enabled) return; - sgom_efivar = kzalloc(sizeof(*sgom_efivar), GFP_KERNEL); - if (!sgom_efivar) + data = iwl_uefi_get_variable(IWL_UEFI_SGOM_NAME, &IWL_EFI_VAR_GUID, + &package_size); + if (IS_ERR(data)) { + IWL_DEBUG_FW(trans, + "SGOM UEFI variable not found 0x%lx\n", + PTR_ERR(data)); return; - - memcpy(&sgom_efivar->var.VariableName, IWL_UEFI_SGOM_NAME, - sizeof(IWL_UEFI_SGOM_NAME)); - sgom_efivar->var.VendorGuid = IWL_EFI_VAR_GUID; - - /* TODO: we hardcode a maximum length here, because reading - * from the UEFI is not working. To implement this properly, - * we have to call efivar_entry_size(). - */ - package_size = IWL_HARDCODED_SGOM_SIZE; - - data = kmalloc(package_size, GFP_KERNEL); - if (!data) { - data = ERR_PTR(-ENOMEM); - goto out; } - err = efivar_entry_get(sgom_efivar, NULL, &package_size, data); - if (err) { + if (package_size < sizeof(*data)) { IWL_DEBUG_FW(trans, - "SGOM UEFI variable not found %d\n", err); - goto out_free; + "Invalid SGOM table UEFI variable len (%lu)\n", + package_size); + kfree(data); + return; } IWL_DEBUG_FW(trans, "Read SGOM from UEFI with size %lu\n", @@ -346,11 +385,7 @@ void iwl_uefi_get_sgom_table(struct iwl_trans *trans, if (ret < 0) IWL_DEBUG_FW(trans, "Cannot read SGOM tables. rev is invalid\n"); -out_free: kfree(data); - -out: - kfree(sgom_efivar); } IWL_EXPORT_SYMBOL(iwl_uefi_get_sgom_table); #endif /* CONFIG_ACPI */ diff --git a/fw/uefi.h b/fw/uefi.h index 09d2a971b3a0..1369cc4855c3 100644 --- a/fw/uefi.h +++ b/fw/uefi.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright(c) 2021 Intel Corporation + * Copyright(c) 2021-2023 Intel Corporation */ #ifndef __iwl_fw_uefi__ #define __iwl_fw_uefi__ @@ -8,16 +8,9 @@ #define IWL_UEFI_OEM_PNVM_NAME L"UefiCnvWlanOemSignedPnvm" #define IWL_UEFI_REDUCED_POWER_NAME L"UefiCnvWlanReducedPower" #define IWL_UEFI_SGOM_NAME L"UefiCnvWlanSarGeoOffsetMapping" +#define IWL_UEFI_STEP_NAME L"UefiCnvCommonSTEP" -/* - * TODO: we have these hardcoded values that the caller must pass, - * because reading from the UEFI is not working. To implement this - * properly, we have to change iwl_pnvm_get_from_uefi() to call - * efivar_entry_size() and return the value to the caller instead. - */ -#define IWL_HARDCODED_PNVM_SIZE 4096 -#define IWL_HARDCODED_REDUCE_POWER_SIZE 32768 -#define IWL_HARDCODED_SGOM_SIZE 339 +#define IWL_SGOM_MAP_SIZE 339 struct pnvm_sku_package { u8 rev; @@ -29,7 +22,16 @@ struct pnvm_sku_package { struct uefi_cnv_wlan_sgom_data { u8 revision; - u8 offset_map[IWL_HARDCODED_SGOM_SIZE - 1]; + u8 offset_map[IWL_SGOM_MAP_SIZE - 1]; +} __packed; + +struct uefi_cnv_common_step_data { + u8 revision; + u8 step_mode; + u8 cnvi_eq_channel; + u8 cnvr_eq_channel; + u8 radio1; + u8 radio2; } __packed; /* @@ -39,19 +41,43 @@ struct uefi_cnv_wlan_sgom_data { */ #ifdef CONFIG_EFI void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len); -void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len); +u8 *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len); +int iwl_uefi_reduce_power_parse(struct iwl_trans *trans, + const u8 *data, size_t len, + struct iwl_pnvm_image *pnvm_data); +void iwl_uefi_get_step_table(struct iwl_trans *trans); +int iwl_uefi_handle_tlv_mem_desc(struct iwl_trans *trans, const u8 *data, + u32 tlv_len, struct iwl_pnvm_image *pnvm_data); #else /* CONFIG_EFI */ -static inline -void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len) +static inline void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len) { return ERR_PTR(-EOPNOTSUPP); } -static inline -void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len) +static inline int +iwl_uefi_reduce_power_parse(struct iwl_trans *trans, + const u8 *data, size_t len, + struct iwl_pnvm_image *pnvm_data) +{ + return -EOPNOTSUPP; +} + +static inline u8 * +iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len) { return ERR_PTR(-EOPNOTSUPP); } + +static inline void iwl_uefi_get_step_table(struct iwl_trans *trans) +{ +} + +static inline int +iwl_uefi_handle_tlv_mem_desc(struct iwl_trans *trans, const u8 *data, + u32 tlv_len, struct iwl_pnvm_image *pnvm_data) +{ + return 0; +} #endif /* CONFIG_EFI */ #if defined(CONFIG_EFI) && defined(CONFIG_ACPI) diff --git a/iwl-config.h b/iwl-config.h index f5b556a103e8..241a9e3f2a1a 100644 --- a/iwl-config.h +++ b/iwl-config.h @@ -2,6 +2,7 @@ /* * Copyright (C) 2005-2014, 2018-2021 Intel Corporation * Copyright (C) 2016-2017 Intel Deutschland GmbH + * Copyright (C) 2018-2023 Intel Corporation */ #ifndef __IWL_CONFIG_H__ #define __IWL_CONFIG_H__ @@ -34,6 +35,7 @@ enum iwl_device_family { IWL_DEVICE_FAMILY_22000, IWL_DEVICE_FAMILY_AX210, IWL_DEVICE_FAMILY_BZ, + IWL_DEVICE_FAMILY_SC, }; /* @@ -254,7 +256,6 @@ enum iwl_cfg_trans_ltr_delay { * @xtal_latency: power up latency to get the xtal stabilized * @extra_phy_cfg_flags: extra configuration flags to pass to the PHY * @rf_id: need to read rf_id to determine the firmware image - * @use_tfh: use TFH * @gen2: 22000 and on transport operation * @mq_rx_supported: multi-queue rx support * @integrated: discrete or integrated @@ -269,7 +270,6 @@ struct iwl_cfg_trans_params { u32 xtal_latency; u32 extra_phy_cfg_flags; u32 rf_id:1, - use_tfh:1, gen2:1, mq_rx_supported:1, integrated:1, @@ -307,7 +307,9 @@ struct iwl_fw_mon_regs { * @name: Official name of the device * @fw_name_pre: Firmware filename prefix. The api version and extension * (.ucode) will be added to filename before loading from disk. The - * filename is constructed as fw_name_pre<api>.ucode. + * filename is constructed as <fw_name_pre>-<api>.ucode. + * @fw_name_mac: MAC name for this config, the remaining pieces of the + * name will be generated dynamically * @ucode_api_max: Highest version of uCode API supported by driver. * @ucode_api_min: Lowest version of uCode API supported by driver. * @max_inst_size: The maximal length of the fw inst section (only DVM) @@ -361,6 +363,7 @@ struct iwl_cfg { /* params specific to an individual device within a device family */ const char *name; const char *fw_name_pre; + const char *fw_name_mac; /* params likely to change within a device family */ const struct iwl_ht_params *ht_params; const struct iwl_eeprom_params *eeprom_params; @@ -388,7 +391,6 @@ struct iwl_cfg { high_temp:1, mac_addr_from_csr:10, lp_xtal_workaround:1, - disable_dummy_notification:1, apmg_not_supported:1, vht_mu_mimo_supported:1, cdb:1, @@ -416,17 +418,15 @@ struct iwl_cfg { #define IWL_CFG_ANY (~0) #define IWL_CFG_MAC_TYPE_PU 0x31 -#define IWL_CFG_MAC_TYPE_PNJ 0x32 #define IWL_CFG_MAC_TYPE_TH 0x32 #define IWL_CFG_MAC_TYPE_QU 0x33 #define IWL_CFG_MAC_TYPE_QUZ 0x35 -#define IWL_CFG_MAC_TYPE_QNJ 0x36 #define IWL_CFG_MAC_TYPE_SO 0x37 -#define IWL_CFG_MAC_TYPE_SNJ 0x42 #define IWL_CFG_MAC_TYPE_SOF 0x43 #define IWL_CFG_MAC_TYPE_MA 0x44 #define IWL_CFG_MAC_TYPE_BZ 0x46 #define IWL_CFG_MAC_TYPE_GL 0x47 +#define IWL_CFG_MAC_TYPE_SC 0x48 #define IWL_CFG_RF_TYPE_TH 0x105 #define IWL_CFG_RF_TYPE_TH1 0x108 @@ -438,6 +438,7 @@ struct iwl_cfg { #define IWL_CFG_RF_TYPE_MR 0x110 #define IWL_CFG_RF_TYPE_MS 0x111 #define IWL_CFG_RF_TYPE_FM 0x112 +#define IWL_CFG_RF_TYPE_WH 0x113 #define IWL_CFG_RF_ID_TH 0x1 #define IWL_CFG_RF_ID_TH1 0x1 @@ -469,6 +470,7 @@ struct iwl_dev_info { u16 mac_type; u16 rf_type; u8 mac_step; + u8 rf_step; u8 rf_id; u8 no_160; u8 cores; @@ -485,17 +487,16 @@ extern const struct iwl_cfg_trans_params iwl9000_trans_cfg; extern const struct iwl_cfg_trans_params iwl9560_trans_cfg; extern const struct iwl_cfg_trans_params iwl9560_long_latency_trans_cfg; extern const struct iwl_cfg_trans_params iwl9560_shared_clk_trans_cfg; -extern const struct iwl_cfg_trans_params iwl_qnj_trans_cfg; extern const struct iwl_cfg_trans_params iwl_qu_trans_cfg; extern const struct iwl_cfg_trans_params iwl_qu_medium_latency_trans_cfg; extern const struct iwl_cfg_trans_params iwl_qu_long_latency_trans_cfg; extern const struct iwl_cfg_trans_params iwl_ax200_trans_cfg; -extern const struct iwl_cfg_trans_params iwl_snj_trans_cfg; extern const struct iwl_cfg_trans_params iwl_so_trans_cfg; extern const struct iwl_cfg_trans_params iwl_so_long_latency_trans_cfg; extern const struct iwl_cfg_trans_params iwl_so_long_latency_imr_trans_cfg; extern const struct iwl_cfg_trans_params iwl_ma_trans_cfg; extern const struct iwl_cfg_trans_params iwl_bz_trans_cfg; +extern const struct iwl_cfg_trans_params iwl_sc_trans_cfg; extern const char iwl9162_name[]; extern const char iwl9260_name[]; extern const char iwl9260_1_name[]; @@ -534,6 +535,7 @@ extern const char iwl_ax221_name[]; extern const char iwl_ax231_name[]; extern const char iwl_ax411_name[]; extern const char iwl_bz_name[]; +extern const char iwl_sc_name[]; #if IS_ENABLED(CONFIG_IWLDVM) extern const struct iwl_cfg iwl5300_agn_cfg; extern const struct iwl_cfg iwl5100_agn_cfg; @@ -579,6 +581,7 @@ extern const struct iwl_cfg iwl105_bgn_d_cfg; extern const struct iwl_cfg iwl135_bgn_cfg; #endif /* CONFIG_IWLDVM */ #if IS_ENABLED(CONFIG_IWLMVM) +extern const struct iwl_ht_params iwl_22000_ht_params; extern const struct iwl_cfg iwl7260_2ac_cfg; extern const struct iwl_cfg iwl7260_2ac_cfg_high_temp; extern const struct iwl_cfg iwl7260_2n_cfg; @@ -603,7 +606,6 @@ extern const struct iwl_cfg iwl9260_2ac_cfg; extern const struct iwl_cfg iwl9560_qu_b0_jf_b0_cfg; extern const struct iwl_cfg iwl9560_qu_c0_jf_b0_cfg; extern const struct iwl_cfg iwl9560_quz_a0_jf_b0_cfg; -extern const struct iwl_cfg iwl9560_qnj_b0_jf_b0_cfg; extern const struct iwl_cfg iwl9560_2ac_cfg_soc; extern const struct iwl_cfg iwl_qu_b0_hr1_b0; extern const struct iwl_cfg iwl_qu_c0_hr1_b0; @@ -622,40 +624,23 @@ extern const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0; extern const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0; extern const struct iwl_cfg killer1650x_2ax_cfg; extern const struct iwl_cfg killer1650w_2ax_cfg; -extern const struct iwl_cfg iwl_qnj_b0_hr_b0_cfg; extern const struct iwl_cfg iwlax210_2ax_cfg_so_jf_b0; extern const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0; extern const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0_long; extern const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0; extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0; extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0_long; -extern const struct iwl_cfg iwlax411_2ax_cfg_sosnj_gf4_a0; -extern const struct iwl_cfg iwlax211_cfg_snj_gf_a0; -extern const struct iwl_cfg iwl_cfg_snj_hr_b0; -extern const struct iwl_cfg iwl_cfg_snj_a0_jf_b0; -extern const struct iwl_cfg iwl_cfg_ma_a0_hr_b0; -extern const struct iwl_cfg iwl_cfg_ma_a0_gf_a0; -extern const struct iwl_cfg iwl_cfg_ma_a0_gf4_a0; -extern const struct iwl_cfg iwl_cfg_ma_a0_mr_a0; -extern const struct iwl_cfg iwl_cfg_ma_a0_ms_a0; -extern const struct iwl_cfg iwl_cfg_ma_a0_fm_a0; -extern const struct iwl_cfg iwl_cfg_snj_a0_mr_a0; -extern const struct iwl_cfg iwl_cfg_snj_a0_ms_a0; + +extern const struct iwl_cfg iwl_cfg_ma; + extern const struct iwl_cfg iwl_cfg_so_a0_hr_a0; extern const struct iwl_cfg iwl_cfg_so_a0_ms_a0; extern const struct iwl_cfg iwl_cfg_quz_a0_hr_b0; -extern const struct iwl_cfg iwl_cfg_bz_a0_hr_b0; -extern const struct iwl_cfg iwl_cfg_bz_a0_gf_a0; -extern const struct iwl_cfg iwl_cfg_bz_a0_gf4_a0; -extern const struct iwl_cfg iwl_cfg_bz_a0_mr_a0; -extern const struct iwl_cfg iwl_cfg_bz_a0_fm_a0; -extern const struct iwl_cfg iwl_cfg_gl_a0_fm_a0; -extern const struct iwl_cfg iwl_cfg_bz_z0_gf_a0; -extern const struct iwl_cfg iwl_cfg_bnj_a0_fm_a0; -extern const struct iwl_cfg iwl_cfg_bnj_a0_fm4_a0; -extern const struct iwl_cfg iwl_cfg_bnj_a0_gf_a0; -extern const struct iwl_cfg iwl_cfg_bnj_a0_gf4_a0; -extern const struct iwl_cfg iwl_cfg_bnj_a0_hr_b0; + +extern const struct iwl_cfg iwl_cfg_bz; +extern const struct iwl_cfg iwl_cfg_gl; + +extern const struct iwl_cfg iwl_cfg_sc; #endif /* CONFIG_IWLMVM */ #endif /* __IWL_CONFIG_H__ */ diff --git a/iwl-context-info-gen3.h b/iwl-context-info-gen3.h index b84884034c74..96bf353469b8 100644 --- a/iwl-context-info-gen3.h +++ b/iwl-context-info-gen3.h @@ -13,6 +13,8 @@ #define CSR_IML_SIZE_ADDR 0x128 #define CSR_IML_RESP_ADDR 0x12c +#define UNFRAGMENTED_PNVM_PAYLOADS_NUMBER 2 + /* Set bit for enabling automatic function boot */ #define CSR_AUTO_FUNC_BOOT_ENA BIT(1) /* Set bit for initiating function boot */ @@ -96,9 +98,9 @@ struct iwl_prph_scratch_control { } __packed; /* PERIPH_SCRATCH_CONTROL_S */ /* - * struct iwl_prph_scratch_pnvm_cfg - ror config + * struct iwl_prph_scratch_pnvm_cfg - PNVM scratch * @pnvm_base_addr: PNVM start address - * @pnvm_size: PNVM size in DWs + * @pnvm_size: the size of the PNVM image in bytes * @reserved: reserved */ struct iwl_prph_scratch_pnvm_cfg { @@ -107,6 +109,14 @@ struct iwl_prph_scratch_pnvm_cfg { __le32 reserved; } __packed; /* PERIPH_SCRATCH_PNVM_CFG_S */ +/** + * struct iwl_prph_scrath_mem_desc_addr_array + * @mem_descs: array of dram addresses. + * Each address is the beggining of a pnvm payload. + */ +struct iwl_prph_scrath_mem_desc_addr_array { + __le64 mem_descs[IPC_DRAM_MAP_ENTRY_NUM_MAX]; +} __packed; /* PERIPH_SCRATCH_MEM_DESC_ADDR_ARRAY_S_VER_1 */ /* * struct iwl_prph_scratch_hwm_cfg - hwm config * @hwm_base_addr: hwm start address @@ -132,7 +142,7 @@ struct iwl_prph_scratch_rbd_cfg { /* * struct iwl_prph_scratch_uefi_cfg - prph scratch reduce power table * @base_addr: reduce power table address - * @size: table size in dwords + * @size: the size of the entire power table image */ struct iwl_prph_scratch_uefi_cfg { __le64 base_addr; @@ -141,12 +151,27 @@ struct iwl_prph_scratch_uefi_cfg { } __packed; /* PERIPH_SCRATCH_UEFI_CFG_S */ /* + * struct iwl_prph_scratch_step_cfg - prph scratch step configuration + * @mbx_addr_0: [0:7] revision, + * [8:15] cnvi_to_cnvr length, + * [16:23] cnvr_to_cnvi channel length, + * [24:31] radio1 reserved + * @mbx_addr_1: [0:7] radio2 reserved + */ + +struct iwl_prph_scratch_step_cfg { + __le32 mbx_addr_0; + __le32 mbx_addr_1; +} __packed; + +/* * struct iwl_prph_scratch_ctrl_cfg - prph scratch ctrl and config * @version: version information of context info and HW * @control: control flags of FH configurations * @pnvm_cfg: ror configuration * @hwm_cfg: hwm configuration * @rbd_cfg: default RX queue configuration + * @step_cfg: step configuration */ struct iwl_prph_scratch_ctrl_cfg { struct iwl_prph_scratch_version version; @@ -155,6 +180,7 @@ struct iwl_prph_scratch_ctrl_cfg { struct iwl_prph_scratch_hwm_cfg hwm_cfg; struct iwl_prph_scratch_rbd_cfg rbd_cfg; struct iwl_prph_scratch_uefi_cfg reduce_power_cfg; + struct iwl_prph_scratch_step_cfg step_cfg; } __packed; /* PERIPH_SCRATCH_CTRL_CFG_S */ /* @@ -165,7 +191,7 @@ struct iwl_prph_scratch_ctrl_cfg { */ struct iwl_prph_scratch { struct iwl_prph_scratch_ctrl_cfg ctrl_cfg; - __le32 reserved[12]; + __le32 reserved[10]; struct iwl_context_info_dram dram; } __packed; /* PERIPH_SCRATCH_S */ @@ -261,9 +287,18 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, const struct fw_img *fw); void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans, bool alive); -int iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans, - const void *data, u32 len); -int iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans, - const void *data, u32 len); - +int iwl_trans_pcie_ctx_info_gen3_load_pnvm(struct iwl_trans *trans, + const struct iwl_pnvm_image *pnvm_payloads, + const struct iwl_ucode_capabilities *capa); +void iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans, + const struct iwl_ucode_capabilities *capa); +int +iwl_trans_pcie_ctx_info_gen3_load_reduce_power(struct iwl_trans *trans, + const struct iwl_pnvm_image *payloads, + const struct iwl_ucode_capabilities *capa); +void +iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans, + const struct iwl_ucode_capabilities *capa); +int iwl_trans_pcie_ctx_info_gen3_set_step(struct iwl_trans *trans, + u32 mbx_addr_0_step, u32 mbx_addr_1_step); #endif /* __iwl_context_info_file_gen3_h__ */ diff --git a/iwl-context-info.h b/iwl-context-info.h index 4354d5acac9f..1a1321db137c 100644 --- a/iwl-context-info.h +++ b/iwl-context-info.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2020 Intel Corporation + * Copyright (C) 2018-2020, 2022 Intel Corporation */ #ifndef __iwl_context_info_file_h__ #define __iwl_context_info_file_h__ @@ -177,6 +177,9 @@ void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans); int iwl_pcie_init_fw_sec(struct iwl_trans *trans, const struct fw_img *fw, struct iwl_context_info_dram *ctxt_dram); +void *iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans, + size_t size, + dma_addr_t *phys); int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans, const void *data, u32 len, struct iwl_dram_data *dram); diff --git a/iwl-csr.h b/iwl-csr.h index 3e1f011e93aa..587368a0ad4a 100644 --- a/iwl-csr.h +++ b/iwl-csr.h @@ -102,6 +102,8 @@ #define CSR_LTR_LONG_VAL_AD_SNOOP_VAL 0x000003ff #define CSR_LTR_LONG_VAL_AD_SCALE_USEC 2 +#define CSR_LTR_LAST_MSG (CSR_BASE + 0x0DC) + /* GIO Chicken Bits (PCI Express bus link power management) */ #define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100) @@ -309,6 +311,8 @@ enum { SILICON_A_STEP = 0, SILICON_B_STEP, SILICON_C_STEP, + SILICON_D_STEP, + SILICON_E_STEP, SILICON_Z_STEP = 0xf, }; @@ -348,6 +352,7 @@ enum { #define CSR_HW_RF_ID_TYPE_HRCDB (0x00109F00) #define CSR_HW_RF_ID_TYPE_GF (0x0010D000) #define CSR_HW_RF_ID_TYPE_GF4 (0x0010E000) +#define CSR_HW_RF_ID_TYPE_MS (0x00111000) /* HW_RF CHIP STEP */ #define CSR_HW_RF_STEP(_val) (((_val) >> 8) & 0xF) diff --git a/iwl-dbg-tlv.c b/iwl-dbg-tlv.c index 866a33f49915..ef5baee6c9c5 100644 --- a/iwl-dbg-tlv.c +++ b/iwl-dbg-tlv.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation */ #include <linux/firmware.h> #include "iwl-drv.h" @@ -138,6 +138,12 @@ static int iwl_dbg_tlv_alloc_buf_alloc(struct iwl_trans *trans, alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1) goto err; + if (buf_location == IWL_FW_INI_LOCATION_DRAM_PATH && + alloc->req_size == 0) { + IWL_ERR(trans, "WRT: Invalid DRAM buffer allocation requested size (0)\n"); + return -EINVAL; + } + trans->dbg.fw_mon_cfg[alloc_id] = *alloc; return 0; @@ -350,9 +356,9 @@ void iwl_dbg_tlv_alloc(struct iwl_trans *trans, const struct iwl_ucode_tlv *tlv, ret = dbg_tlv_alloc[tlv_idx](trans, tlv); if (ret) { - IWL_ERR(trans, - "WRT: Failed to allocate TLV 0x%x, ret %d, (ext=%d)\n", - type, ret, ext); + IWL_WARN(trans, + "WRT: Failed to allocate TLV 0x%x, ret %d, (ext=%d)\n", + type, ret, ext); goto out_err; } @@ -371,7 +377,7 @@ void iwl_dbg_tlv_del_timers(struct iwl_trans *trans) struct iwl_dbg_tlv_timer_node *node, *tmp; list_for_each_entry_safe(node, tmp, timer_list, list) { - del_timer(&node->timer); + timer_shutdown_sync(&node->timer); list_del(&node->list); kfree(node); } @@ -580,8 +586,14 @@ static int iwl_dbg_tlv_alloc_fragments(struct iwl_fw_runtime *fwrt, fw_mon_cfg = &fwrt->trans->dbg.fw_mon_cfg[alloc_id]; fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id]; - if (fw_mon->num_frags || - fw_mon_cfg->buf_location != + if (fw_mon->num_frags) { + for (i = 0; i < fw_mon->num_frags; i++) + memset(fw_mon->frags[i].block, 0, + fw_mon->frags[i].size); + return 0; + } + + if (fw_mon_cfg->buf_location != cpu_to_le32(IWL_FW_INI_LOCATION_DRAM_PATH)) return 0; @@ -590,6 +602,9 @@ static int iwl_dbg_tlv_alloc_fragments(struct iwl_fw_runtime *fwrt, if (alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1) return -EIO; num_frags = 1; + } else if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ && + alloc_id > IWL_FW_INI_ALLOCATION_ID_DBGC3) { + return -EIO; } remain_pages = DIV_ROUND_UP(le32_to_cpu(fw_mon_cfg->req_size), @@ -729,7 +744,8 @@ static int iwl_dbg_tlv_update_dram(struct iwl_fw_runtime *fwrt, if (le32_to_cpu(fwrt->trans->dbg.fw_mon_cfg[alloc_id].buf_location) != IWL_FW_INI_LOCATION_DRAM_PATH) { - IWL_DEBUG_FW(fwrt, "DRAM_PATH is not supported alloc_id %u\n", alloc_id); + IWL_DEBUG_FW(fwrt, "WRT: alloc_id %u location is not in DRAM_PATH\n", + alloc_id); return -1; } @@ -785,25 +801,27 @@ static void iwl_dbg_tlv_update_drams(struct iwl_fw_runtime *fwrt) IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT)) return; - dram_info->first_word = cpu_to_le32(DRAM_INFO_FIRST_MAGIC_WORD); - dram_info->second_word = cpu_to_le32(DRAM_INFO_SECOND_MAGIC_WORD); + memset(dram_info, 0, sizeof(*dram_info)); for (i = IWL_FW_INI_ALLOCATION_ID_DBGC1; - i <= IWL_FW_INI_ALLOCATION_ID_DBGC3; i++) { + i < IWL_FW_INI_ALLOCATION_NUM; i++) { + if (fwrt->trans->dbg.fw_mon_cfg[i].buf_location == + IWL_FW_INI_LOCATION_INVALID) + continue; + ret = iwl_dbg_tlv_update_dram(fwrt, i, dram_info); if (!ret) dram_alloc = true; else - IWL_WARN(fwrt, + IWL_INFO(fwrt, "WRT: Failed to set DRAM buffer for alloc id %d, ret=%d\n", i, ret); } - if (dram_alloc) - IWL_DEBUG_FW(fwrt, "block data after %08x\n", - dram_info->first_word); - else - memset(frags->block, 0, sizeof(*dram_info)); + if (dram_alloc) { + dram_info->first_word = cpu_to_le32(DRAM_INFO_FIRST_MAGIC_WORD); + dram_info->second_word = cpu_to_le32(DRAM_INFO_SECOND_MAGIC_WORD); + } } static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime *fwrt, @@ -1215,11 +1233,12 @@ iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync, } fwrt->trans->dbg.restart_required = FALSE; - IWL_DEBUG_INFO(fwrt, "WRT: tp %d, reset_fw %d\n", - tp, dump_data.trig->reset_fw); - IWL_DEBUG_INFO(fwrt, "WRT: restart_required %d, last_tp_resetfw %d\n", - fwrt->trans->dbg.restart_required, - fwrt->trans->dbg.last_tp_resetfw); + IWL_DEBUG_FW(fwrt, "WRT: tp %d, reset_fw %d\n", + tp, dump_data.trig->reset_fw); + IWL_DEBUG_FW(fwrt, + "WRT: restart_required %d, last_tp_resetfw %d\n", + fwrt->trans->dbg.restart_required, + fwrt->trans->dbg.last_tp_resetfw); if (fwrt->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_9000) { @@ -1232,18 +1251,19 @@ iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync, IWL_DEBUG_FW(fwrt, "WRT: FW_ASSERT due to reset_fw_mode-no restart\n"); } else if (le32_to_cpu(dump_data.trig->reset_fw) == IWL_FW_INI_RESET_FW_MODE_STOP_AND_RELOAD_FW) { - IWL_DEBUG_INFO(fwrt, "WRT: stop and reload firmware\n"); + IWL_DEBUG_FW(fwrt, "WRT: stop and reload firmware\n"); fwrt->trans->dbg.restart_required = TRUE; } else if (le32_to_cpu(dump_data.trig->reset_fw) == IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) { - IWL_DEBUG_INFO(fwrt, "WRT: stop only and no reload firmware\n"); + IWL_DEBUG_FW(fwrt, + "WRT: stop only and no reload firmware\n"); fwrt->trans->dbg.restart_required = FALSE; fwrt->trans->dbg.last_tp_resetfw = le32_to_cpu(dump_data.trig->reset_fw); } else if (le32_to_cpu(dump_data.trig->reset_fw) == IWL_FW_INI_RESET_FW_MODE_NOTHING) { - IWL_DEBUG_INFO(fwrt, - "WRT: nothing need to be done after debug collection\n"); + IWL_DEBUG_FW(fwrt, + "WRT: nothing need to be done after debug collection\n"); } else { IWL_ERR(fwrt, "WRT: wrong resetfw %d\n", le32_to_cpu(dump_data.trig->reset_fw)); @@ -1258,18 +1278,23 @@ static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt) int ret, i; u32 failed_alloc = 0; - if (*ini_dest != IWL_FW_INI_LOCATION_INVALID) - return; - - IWL_DEBUG_FW(fwrt, - "WRT: Generating active triggers list, domain 0x%x\n", - fwrt->trans->dbg.domains_bitmap); + if (*ini_dest == IWL_FW_INI_LOCATION_INVALID) { + IWL_DEBUG_FW(fwrt, + "WRT: Generating active triggers list, domain 0x%x\n", + fwrt->trans->dbg.domains_bitmap); - for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.time_point); i++) { - struct iwl_dbg_tlv_time_point_data *tp = - &fwrt->trans->dbg.time_point[i]; + for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.time_point); i++) { + struct iwl_dbg_tlv_time_point_data *tp = + &fwrt->trans->dbg.time_point[i]; - iwl_dbg_tlv_gen_active_trig_list(fwrt, tp); + iwl_dbg_tlv_gen_active_trig_list(fwrt, tp); + } + } else if (*ini_dest != IWL_FW_INI_LOCATION_DRAM_PATH) { + /* For DRAM, go through the loop below to clear all the buffers + * properly on restart, otherwise garbage may be left there and + * leak into new debug dumps. + */ + return; } *ini_dest = IWL_FW_INI_LOCATION_INVALID; @@ -1324,7 +1349,7 @@ static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt) "WRT: removing allocation id %d from region id %d\n", le32_to_cpu(reg->dram_alloc_id), i); - failed_alloc &= ~le32_to_cpu(reg->dram_alloc_id); + failed_alloc &= ~BIT(le32_to_cpu(reg->dram_alloc_id)); fwrt->trans->dbg.unsupported_region_msk |= BIT(i); kfree(*active_reg); diff --git a/iwl-debug.c b/iwl-debug.c index ae4c2a3d63d5..3a3c13a41fc6 100644 --- a/iwl-debug.c +++ b/iwl-debug.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2005-2011, 2021 Intel Corporation + * Copyright (C) 2005-2011, 2021-2022 Intel Corporation */ #include <linux/device.h> #include <linux/interrupt.h> @@ -57,6 +57,7 @@ void __iwl_err(struct device *dev, enum iwl_err_mode mode, const char *fmt, ...) default: break; } + vaf.va = &args; trace_iwlwifi_err(&vaf); va_end(args); } diff --git a/iwl-drv.c b/iwl-drv.c index a2203f661321..3d87d26845e7 100644 --- a/iwl-drv.c +++ b/iwl-drv.c @@ -127,6 +127,7 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv) kfree(drv->fw.iml); kfree(drv->fw.ucode_capa.cmd_versions); kfree(drv->fw.phy_integration_ver); + kfree(drv->trans->dbg.pc_data); for (i = 0; i < IWL_UCODE_TYPE_MAX; i++) iwl_free_fw_img(drv, drv->fw.img + i); @@ -157,13 +158,71 @@ static int iwl_alloc_fw_desc(struct iwl_drv *drv, struct fw_desc *desc, return 0; } +static inline char iwl_drv_get_step(int step) +{ + if (step == SILICON_Z_STEP) + return 'z'; + return 'a' + step; +} + +const char *iwl_drv_get_fwname_pre(struct iwl_trans *trans, char *buf) +{ + char mac_step, rf_step; + const char *rf, *cdb; + + if (trans->cfg->fw_name_pre) + return trans->cfg->fw_name_pre; + + if (WARN_ON(!trans->cfg->fw_name_mac)) + return "unconfigured"; + + mac_step = iwl_drv_get_step(trans->hw_rev_step); + + switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) { + case IWL_CFG_RF_TYPE_HR1: + case IWL_CFG_RF_TYPE_HR2: + rf = "hr"; + break; + case IWL_CFG_RF_TYPE_GF: + rf = "gf"; + break; + case IWL_CFG_RF_TYPE_MR: + rf = "mr"; + break; + case IWL_CFG_RF_TYPE_MS: + rf = "ms"; + break; + case IWL_CFG_RF_TYPE_FM: + rf = "fm"; + break; + case IWL_CFG_RF_TYPE_WH: + rf = "wh"; + break; + default: + return "unknown-rf"; + } + + cdb = CSR_HW_RFID_IS_CDB(trans->hw_rf_id) ? "4" : ""; + + rf_step = iwl_drv_get_step(CSR_HW_RFID_STEP(trans->hw_rf_id)); + + scnprintf(buf, FW_NAME_PRE_BUFSIZE, + "iwlwifi-%s-%c0-%s%s-%c0", + trans->cfg->fw_name_mac, mac_step, + rf, cdb, rf_step); + + return buf; +} +IWL_EXPORT_SYMBOL(iwl_drv_get_fwname_pre); + static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context); static int iwl_request_firmware(struct iwl_drv *drv, bool first) { const struct iwl_cfg *cfg = drv->trans->cfg; - char tag[8]; + char _fw_name_pre[FW_NAME_PRE_BUFSIZE]; + const char *fw_name_pre; if (drv->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_9000 && (drv->trans->hw_rev_step != SILICON_B_STEP && @@ -174,25 +233,24 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first) return -EINVAL; } - if (first) { + fw_name_pre = iwl_drv_get_fwname_pre(drv->trans, _fw_name_pre); + + if (first) drv->fw_index = cfg->ucode_api_max; - sprintf(tag, "%d", drv->fw_index); - } else { + else drv->fw_index--; - sprintf(tag, "%d", drv->fw_index); - } if (drv->fw_index < cfg->ucode_api_min) { IWL_ERR(drv, "no suitable firmware found!\n"); if (cfg->ucode_api_min == cfg->ucode_api_max) { - IWL_ERR(drv, "%s%d is required\n", cfg->fw_name_pre, + IWL_ERR(drv, "%s-%d is required\n", fw_name_pre, cfg->ucode_api_max); } else { - IWL_ERR(drv, "minimum version required: %s%d\n", - cfg->fw_name_pre, cfg->ucode_api_min); - IWL_ERR(drv, "maximum version supported: %s%d\n", - cfg->fw_name_pre, cfg->ucode_api_max); + IWL_ERR(drv, "minimum version required: %s-%d\n", + fw_name_pre, cfg->ucode_api_min); + IWL_ERR(drv, "maximum version supported: %s-%d\n", + fw_name_pre, cfg->ucode_api_max); } IWL_ERR(drv, @@ -200,8 +258,8 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first) return -ENOENT; } - snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode", - cfg->fw_name_pre, tag); + snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s-%d.ucode", + fw_name_pre, drv->fw_index); IWL_DEBUG_FW_INFO(drv, "attempting to load firmware '%s'\n", drv->firmware_name); @@ -898,7 +956,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, drv->fw.img[IWL_UCODE_WOWLAN].is_dual_cpus = true; } else if ((num_of_cpus > 2) || (num_of_cpus < 1)) { - IWL_ERR(drv, "Driver support upto 2 CPUs\n"); + IWL_ERR(drv, "Driver support up to 2 CPUs\n"); return -EINVAL; } break; @@ -1158,6 +1216,12 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, capa->num_stations = le32_to_cpup((const __le32 *)tlv_data); break; + case IWL_UCODE_TLV_FW_NUM_BEACONS: + if (tlv_len != sizeof(u32)) + goto invalid_tlv_len; + capa->num_beacons = + le32_to_cpup((const __le32 *)tlv_data); + break; case IWL_UCODE_TLV_UMAC_DEBUG_ADDRS: { const struct iwl_umac_debug_addrs *dbg_ptrs = (const void *)tlv_data; @@ -1236,6 +1300,14 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, iwl_drv_set_dump_exclude(drv, tlv_type, tlv_data, tlv_len); break; + case IWL_UCODE_TLV_CURRENT_PC: + if (tlv_len < sizeof(struct iwl_pc_data)) + goto invalid_tlv_len; + drv->trans->dbg.num_pc = + tlv_len / sizeof(struct iwl_pc_data); + drv->trans->dbg.pc_data = + kmemdup(tlv_data, tlv_len, GFP_KERNEL); + break; default: IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type); break; @@ -1410,6 +1482,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE; fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS; fw->ucode_capa.num_stations = IWL_MVM_STATION_COUNT_MAX; + fw->ucode_capa.num_beacons = 1; /* dump all fw memory areas by default */ fw->dbg.dump_mask = 0xffffffff; @@ -1971,3 +2044,6 @@ MODULE_PARM_DESC(remove_when_gone, module_param_named(disable_11ax, iwlwifi_mod_params.disable_11ax, bool, S_IRUGO); MODULE_PARM_DESC(disable_11ax, "Disable HE capabilities (default: false)"); + +module_param_named(disable_11be, iwlwifi_mod_params.disable_11be, bool, 0444); +MODULE_PARM_DESC(disable_11be, "Disable EHT capabilities (default: false)"); diff --git a/iwl-drv.h b/iwl-drv.h index 80073f973334..6c19989e4ab7 100644 --- a/iwl-drv.h +++ b/iwl-drv.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2020-2021 Intel Corporation + * Copyright (C) 2005-2014, 2020-2021, 2023 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH */ #ifndef __iwl_drv_h__ @@ -92,4 +92,8 @@ void iwl_drv_stop(struct iwl_drv *drv); /* max retry for init flow */ #define IWL_MAX_INIT_RETRY 2 +#define FW_NAME_PRE_BUFSIZE 64 +struct iwl_trans; +const char *iwl_drv_get_fwname_pre(struct iwl_trans *trans, char *buf); + #endif /* __iwl_drv_h__ */ diff --git a/iwl-eeprom-parse.h b/iwl-eeprom-parse.h index 0efffb6eeb1e..0e8ca761d24b 100644 --- a/iwl-eeprom-parse.h +++ b/iwl-eeprom-parse.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018 Intel Corporation + * Copyright (C) 2005-2014, 2018, 2020-2022 Intel Corporation * Copyright (C) 2015 Intel Mobile Communications GmbH */ #ifndef __iwl_eeprom_parse_h__ @@ -31,6 +31,7 @@ struct iwl_nvm_data { bool sku_cap_amt_enable; bool sku_cap_ipan_enable; bool sku_cap_mimo_disabled; + bool sku_cap_11be_enable; u16 radio_cfg_type; u8 radio_cfg_step; @@ -46,13 +47,12 @@ struct iwl_nvm_data { struct ieee80211_supported_band bands[NUM_NL80211_BANDS]; /* - * iftype data for low (2.4 GHz) and high (5 and 6 GHz) bands, - * we can use the same for 5 and 6 GHz bands because they have - * the same data + * iftype data for low (2.4 GHz) high (5 GHz) and uhb (6 GHz) bands */ struct { struct ieee80211_sband_iftype_data low[2]; struct ieee80211_sband_iftype_data high[2]; + struct ieee80211_sband_iftype_data uhb[2]; } iftd; struct ieee80211_channel channels[]; @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2021 Intel Corporation + * Copyright (C) 2005-2014, 2018-2021, 2023 Intel Corporation * Copyright (C) 2015-2017 Intel Deutschland GmbH */ #ifndef __iwl_fh_h__ @@ -71,7 +71,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans, unsigned int chnl) { - if (trans->trans_cfg->use_tfh) { + if (trans->trans_cfg->gen2) { WARN_ON_ONCE(chnl >= 64); return TFH_TFDQ_CBB_TABLE + 8 * chnl; } @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2003-2014, 2018-2021 Intel Corporation + * Copyright (C) 2003-2014, 2018-2022 Intel Corporation * Copyright (C) 2015-2016 Intel Deutschland GmbH */ #include <linux/delay.h> @@ -72,6 +72,7 @@ u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg) return value; } + /* return as if we have a HW timeout/failure */ return 0x5a5a5a5a; } IWL_EXPORT_SYMBOL(iwl_read_direct32); @@ -143,6 +144,7 @@ u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs) return val; } + /* return as if we have a HW timeout/failure */ return 0x5a5a5a5a; } IWL_EXPORT_SYMBOL(iwl_read_prph); diff --git a/iwl-modparams.h b/iwl-modparams.h index d0b4d02bdab9..1cf26ab4f488 100644 --- a/iwl-modparams.h +++ b/iwl-modparams.h @@ -62,6 +62,7 @@ enum iwl_uapsd_disable { * @disable_11ac: disable VHT capabilities, default = false. * @remove_when_gone: remove an inaccessible device from the PCIe bus. * @enable_ini: enable new FW debug infratructure (INI TLVs) + * @disable_11be: disable EHT capabilities, default = false. */ struct iwl_mod_params { int swcrypto; diff --git a/iwl-nvm-parse.c b/iwl-nvm-parse.c index 9040da3dcce3..8c23f57f5c89 100644 --- a/iwl-nvm-parse.c +++ b/iwl-nvm-parse.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2005-2014, 2018-2021 Intel Corporation + * Copyright (C) 2005-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -81,7 +81,7 @@ static const u16 iwl_nvm_channels[] = { /* 2.4 GHz */ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, /* 5 GHz */ - 36, 40, 44 , 48, 52, 56, 60, 64, + 36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 149, 153, 157, 161, 165 }; @@ -173,34 +173,34 @@ enum iwl_nvm_channel_flags { }; /** - * enum iwl_reg_capa_flags - global flags applied for the whole regulatory + * enum iwl_reg_capa_flags_v1 - global flags applied for the whole regulatory * domain. - * @REG_CAPA_BF_CCD_LOW_BAND: Beam-forming or Cyclic Delay Diversity in the + * @REG_CAPA_V1_BF_CCD_LOW_BAND: Beam-forming or Cyclic Delay Diversity in the * 2.4Ghz band is allowed. - * @REG_CAPA_BF_CCD_HIGH_BAND: Beam-forming or Cyclic Delay Diversity in the + * @REG_CAPA_V1_BF_CCD_HIGH_BAND: Beam-forming or Cyclic Delay Diversity in the * 5Ghz band is allowed. - * @REG_CAPA_160MHZ_ALLOWED: 11ac channel with a width of 160Mhz is allowed + * @REG_CAPA_V1_160MHZ_ALLOWED: 11ac channel with a width of 160Mhz is allowed * for this regulatory domain (valid only in 5Ghz). - * @REG_CAPA_80MHZ_ALLOWED: 11ac channel with a width of 80Mhz is allowed + * @REG_CAPA_V1_80MHZ_ALLOWED: 11ac channel with a width of 80Mhz is allowed * for this regulatory domain (valid only in 5Ghz). - * @REG_CAPA_MCS_8_ALLOWED: 11ac with MCS 8 is allowed. - * @REG_CAPA_MCS_9_ALLOWED: 11ac with MCS 9 is allowed. - * @REG_CAPA_40MHZ_FORBIDDEN: 11n channel with a width of 40Mhz is forbidden + * @REG_CAPA_V1_MCS_8_ALLOWED: 11ac with MCS 8 is allowed. + * @REG_CAPA_V1_MCS_9_ALLOWED: 11ac with MCS 9 is allowed. + * @REG_CAPA_V1_40MHZ_FORBIDDEN: 11n channel with a width of 40Mhz is forbidden * for this regulatory domain (valid only in 5Ghz). - * @REG_CAPA_DC_HIGH_ENABLED: DC HIGH allowed. - * @REG_CAPA_11AX_DISABLED: 11ax is forbidden for this regulatory domain. + * @REG_CAPA_V1_DC_HIGH_ENABLED: DC HIGH allowed. + * @REG_CAPA_V1_11AX_DISABLED: 11ax is forbidden for this regulatory domain. */ -enum iwl_reg_capa_flags { - REG_CAPA_BF_CCD_LOW_BAND = BIT(0), - REG_CAPA_BF_CCD_HIGH_BAND = BIT(1), - REG_CAPA_160MHZ_ALLOWED = BIT(2), - REG_CAPA_80MHZ_ALLOWED = BIT(3), - REG_CAPA_MCS_8_ALLOWED = BIT(4), - REG_CAPA_MCS_9_ALLOWED = BIT(5), - REG_CAPA_40MHZ_FORBIDDEN = BIT(7), - REG_CAPA_DC_HIGH_ENABLED = BIT(9), - REG_CAPA_11AX_DISABLED = BIT(10), -}; +enum iwl_reg_capa_flags_v1 { + REG_CAPA_V1_BF_CCD_LOW_BAND = BIT(0), + REG_CAPA_V1_BF_CCD_HIGH_BAND = BIT(1), + REG_CAPA_V1_160MHZ_ALLOWED = BIT(2), + REG_CAPA_V1_80MHZ_ALLOWED = BIT(3), + REG_CAPA_V1_MCS_8_ALLOWED = BIT(4), + REG_CAPA_V1_MCS_9_ALLOWED = BIT(5), + REG_CAPA_V1_40MHZ_FORBIDDEN = BIT(7), + REG_CAPA_V1_DC_HIGH_ENABLED = BIT(9), + REG_CAPA_V1_11AX_DISABLED = BIT(10), +}; /* GEO_CHANNEL_CAPABILITIES_API_S_VER_1 */ /** * enum iwl_reg_capa_flags_v2 - global flags applied for the whole regulatory @@ -234,7 +234,31 @@ enum iwl_reg_capa_flags_v2 { REG_CAPA_V2_WEATHER_DISABLED = BIT(7), REG_CAPA_V2_40MHZ_ALLOWED = BIT(8), REG_CAPA_V2_11AX_DISABLED = BIT(10), -}; +}; /* GEO_CHANNEL_CAPABILITIES_API_S_VER_2 */ + +/** + * enum iwl_reg_capa_flags_v4 - global flags applied for the whole regulatory + * domain. + * @REG_CAPA_V4_160MHZ_ALLOWED: 11ac channel with a width of 160Mhz is allowed + * for this regulatory domain (valid only in 5Ghz). + * @REG_CAPA_V4_80MHZ_ALLOWED: 11ac channel with a width of 80Mhz is allowed + * for this regulatory domain (valid only in 5Ghz). + * @REG_CAPA_V4_MCS_12_ALLOWED: 11ac with MCS 12 is allowed. + * @REG_CAPA_V4_MCS_13_ALLOWED: 11ac with MCS 13 is allowed. + * @REG_CAPA_V4_11BE_DISABLED: 11be is forbidden for this regulatory domain. + * @REG_CAPA_V4_11AX_DISABLED: 11ax is forbidden for this regulatory domain. + * @REG_CAPA_V4_320MHZ_ALLOWED: 11be channel with a width of 320Mhz is allowed + * for this regulatory domain (valid only in 5GHz). + */ +enum iwl_reg_capa_flags_v4 { + REG_CAPA_V4_160MHZ_ALLOWED = BIT(3), + REG_CAPA_V4_80MHZ_ALLOWED = BIT(4), + REG_CAPA_V4_MCS_12_ALLOWED = BIT(5), + REG_CAPA_V4_MCS_13_ALLOWED = BIT(6), + REG_CAPA_V4_11BE_DISABLED = BIT(8), + REG_CAPA_V4_11AX_DISABLED = BIT(13), + REG_CAPA_V4_320MHZ_ALLOWED = BIT(16), +}; /* GEO_CHANNEL_CAPABILITIES_API_S_VER_4 */ /* * API v2 for reg_capa_flags is relevant from version 6 and onwards of the @@ -242,23 +266,33 @@ enum iwl_reg_capa_flags_v2 { */ #define REG_CAPA_V2_RESP_VER 6 +/* API v4 for reg_capa_flags is relevant from version 8 and onwards of the + * MCC update command response. + */ +#define REG_CAPA_V4_RESP_VER 8 + /** * struct iwl_reg_capa - struct for global regulatory capabilities, Used for * handling the different APIs of reg_capa_flags. * * @allow_40mhz: 11n channel with a width of 40Mhz is allowed - * for this regulatory domain (valid only in 5Ghz). + * for this regulatory domain. * @allow_80mhz: 11ac channel with a width of 80Mhz is allowed - * for this regulatory domain (valid only in 5Ghz). + * for this regulatory domain (valid only in 5 and 6 Ghz). * @allow_160mhz: 11ac channel with a width of 160Mhz is allowed - * for this regulatory domain (valid only in 5Ghz). + * for this regulatory domain (valid only in 5 and 6 Ghz). + * @allow_320mhz: 11be channel with a width of 320Mhz is allowed + * for this regulatory domain (valid only in 6 Ghz). * @disable_11ax: 11ax is forbidden for this regulatory domain. + * @disable_11be: 11be is forbidden for this regulatory domain. */ struct iwl_reg_capa { - u16 allow_40mhz; - u16 allow_80mhz; - u16 allow_160mhz; - u16 disable_11ax; + bool allow_40mhz; + bool allow_80mhz; + bool allow_160mhz; + bool allow_320mhz; + bool disable_11ax; + bool disable_11be; }; static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level, @@ -464,6 +498,9 @@ static void iwl_init_vht_hw_capab(struct iwl_trans *trans, IEEE80211_VHT_MAX_AMPDU_1024K << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; + if (!trans->cfg->ht_params->stbc) + vht_cap->cap &= ~IEEE80211_VHT_CAP_RXSTBC_MASK; + if (data->vht160_supported) vht_cap->cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ | IEEE80211_VHT_CAP_SHORT_GI_160; @@ -479,7 +516,7 @@ static void iwl_init_vht_hw_capab(struct iwl_trans *trans, num_tx_ants = 1; } - if (num_tx_ants > 1) + if (trans->cfg->ht_params->stbc && num_tx_ants > 1) vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC; else vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN; @@ -546,7 +583,7 @@ static const u8 iwl_vendor_caps[] = { 0x00 }; -static const struct ieee80211_sband_iftype_data iwl_he_capa[] = { +static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = { { .types_mask = BIT(NL80211_IFTYPE_STATION), .he_cap = { @@ -571,10 +608,6 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = { IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU | IEEE80211_HE_MAC_CAP5_HE_DYNAMIC_SM_PS | IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX, - .phy_cap_info[0] = - IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G | - IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | - IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G, .phy_cap_info[1] = IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK | IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | @@ -631,6 +664,77 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = { */ .ppe_thres = {0x61, 0x1c, 0xc7, 0x71}, }, + .eht_cap = { + .has_eht = true, + .eht_cap_elem = { + .mac_cap_info[0] = + IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS | + IEEE80211_EHT_MAC_CAP0_OM_CONTROL | + IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 | + IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2, + .phy_cap_info[0] = + IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ | + IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI | + IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO | + IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE | + IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK, + .phy_cap_info[1] = + IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK | + IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK, + .phy_cap_info[3] = + IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK | + IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK | + IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK | + IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK | + IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK | + IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK | + IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK, + + .phy_cap_info[4] = + IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO | + IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP | + IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI, + .phy_cap_info[5] = + IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK | + IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP | + IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP | + IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT, + .phy_cap_info[6] = + IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK | + IEEE80211_EHT_PHY_CAP6_EHT_DUP_6GHZ_SUPP, + .phy_cap_info[8] = + IEEE80211_EHT_PHY_CAP8_RX_1024QAM_WIDER_BW_DL_OFDMA | + IEEE80211_EHT_PHY_CAP8_RX_4096QAM_WIDER_BW_DL_OFDMA, + }, + + /* For all MCS and bandwidth, set 2 NSS for both Tx and + * Rx - note we don't set the only_20mhz, but due to this + * being a union, it gets set correctly anyway. + */ + .eht_mcs_nss_supp = { + .bw._80 = { + .rx_tx_mcs9_max_nss = 0x22, + .rx_tx_mcs11_max_nss = 0x22, + .rx_tx_mcs13_max_nss = 0x22, + }, + .bw._160 = { + .rx_tx_mcs9_max_nss = 0x22, + .rx_tx_mcs11_max_nss = 0x22, + .rx_tx_mcs13_max_nss = 0x22, + }, + .bw._320 = { + .rx_tx_mcs9_max_nss = 0x22, + .rx_tx_mcs11_max_nss = 0x22, + .rx_tx_mcs13_max_nss = 0x22, + }, + }, + + /* + * PPE thresholds for NSS = 2, and RU index bitmap set + * to 0xc. + */ + .eht_ppe_thres = {0xc1, 0x0e, 0xe0 } + }, }, { .types_mask = BIT(NL80211_IFTYPE_AP), @@ -644,9 +748,6 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = { IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, .mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_OMI_CONTROL, - .phy_cap_info[0] = - IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G | - IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G, .phy_cap_info[1] = IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD, .phy_cap_info[2] = @@ -687,6 +788,49 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = { */ .ppe_thres = {0x61, 0x1c, 0xc7, 0x71}, }, + .eht_cap = { + .has_eht = true, + .eht_cap_elem = { + .mac_cap_info[0] = + IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS | + IEEE80211_EHT_MAC_CAP0_OM_CONTROL | + IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 | + IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2, + .phy_cap_info[0] = + IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ | + IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI, + .phy_cap_info[5] = + IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT, + }, + + /* For all MCS and bandwidth, set 2 NSS for both Tx and + * Rx - note we don't set the only_20mhz, but due to this + * being a union, it gets set correctly anyway. + */ + .eht_mcs_nss_supp = { + .bw._80 = { + .rx_tx_mcs9_max_nss = 0x22, + .rx_tx_mcs11_max_nss = 0x22, + .rx_tx_mcs13_max_nss = 0x22, + }, + .bw._160 = { + .rx_tx_mcs9_max_nss = 0x22, + .rx_tx_mcs11_max_nss = 0x22, + .rx_tx_mcs13_max_nss = 0x22, + }, + .bw._320 = { + .rx_tx_mcs9_max_nss = 0x22, + .rx_tx_mcs11_max_nss = 0x22, + .rx_tx_mcs13_max_nss = 0x22, + }, + }, + + /* + * PPE thresholds for NSS = 2, and RU index bitmap set + * to 0xc. + */ + .eht_ppe_thres = {0xc1, 0x0e, 0xe0 } + }, }, }; @@ -738,26 +882,59 @@ static void iwl_init_he_6ghz_capa(struct iwl_trans *trans, static void iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans, + struct iwl_nvm_data *data, struct ieee80211_supported_band *sband, struct ieee80211_sband_iftype_data *iftype_data, u8 tx_chains, u8 rx_chains, const struct iwl_fw *fw) { bool is_ap = iftype_data->types_mask & BIT(NL80211_IFTYPE_AP); + bool no_320; + + no_320 = !trans->trans_cfg->integrated && + trans->pcie_link_speed < PCI_EXP_LNKSTA_CLS_8_0GB; + + if (!data->sku_cap_11be_enable || iwlwifi_mod_params.disable_11be) + iftype_data->eht_cap.has_eht = false; /* Advertise an A-MPDU exponent extension based on * operating band */ - if (sband->band != NL80211_BAND_2GHZ) + if (sband->band == NL80211_BAND_6GHZ && iftype_data->eht_cap.has_eht) + iftype_data->he_cap.he_cap_elem.mac_cap_info[3] |= + IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_2; + else if (sband->band != NL80211_BAND_2GHZ) iftype_data->he_cap.he_cap_elem.mac_cap_info[3] |= IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_1; else iftype_data->he_cap.he_cap_elem.mac_cap_info[3] |= IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3; - if (is_ap && iwlwifi_mod_params.nvm_file) + switch (sband->band) { + case NL80211_BAND_2GHZ: iftype_data->he_cap.he_cap_elem.phy_cap_info[0] |= + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G; + iftype_data->eht_cap.eht_cap_elem.mac_cap_info[0] |= + u8_encode_bits(IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_11454, + IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK); + break; + case NL80211_BAND_6GHZ: + if (!no_320) { + iftype_data->eht_cap.eht_cap_elem.phy_cap_info[0] |= + IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ; + iftype_data->eht_cap.eht_cap_elem.phy_cap_info[1] |= + IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK; + } + fallthrough; + case NL80211_BAND_5GHZ: + iftype_data->he_cap.he_cap_elem.phy_cap_info[0] |= + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G; + break; + default: + WARN_ON(1); + break; + } if ((tx_chains & rx_chains) == ANT_AB) { iftype_data->he_cap.he_cap_elem.phy_cap_info[2] |= @@ -765,21 +942,50 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans, iftype_data->he_cap.he_cap_elem.phy_cap_info[5] |= IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 | IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2; - if (!is_ap) + if (!is_ap) { iftype_data->he_cap.he_cap_elem.phy_cap_info[7] |= IEEE80211_HE_PHY_CAP7_MAX_NC_2; - } else if (!is_ap) { - /* If not 2x2, we need to indicate 1x1 in the - * Midamble RX Max NSTS - but not for AP mode - */ - iftype_data->he_cap.he_cap_elem.phy_cap_info[1] &= - ~IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS; - iftype_data->he_cap.he_cap_elem.phy_cap_info[2] &= - ~IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS; - iftype_data->he_cap.he_cap_elem.phy_cap_info[7] |= - IEEE80211_HE_PHY_CAP7_MAX_NC_1; + + if (iftype_data->eht_cap.has_eht) { + /* + * Set the number of sounding dimensions for each + * bandwidth to 1 to indicate the maximal supported + * value of TXVECTOR parameter NUM_STS of 2 + */ + iftype_data->eht_cap.eht_cap_elem.phy_cap_info[2] |= 0x49; + + /* + * Set the MAX NC to 1 to indicate sounding feedback of + * 2 supported by the beamfomee. + */ + iftype_data->eht_cap.eht_cap_elem.phy_cap_info[4] |= 0x10; + } + } + } else { + if (iftype_data->eht_cap.has_eht) { + struct ieee80211_eht_mcs_nss_supp *mcs_nss = + &iftype_data->eht_cap.eht_mcs_nss_supp; + + memset(mcs_nss, 0x11, sizeof(*mcs_nss)); + } + + if (!is_ap) { + /* If not 2x2, we need to indicate 1x1 in the + * Midamble RX Max NSTS - but not for AP mode + */ + iftype_data->he_cap.he_cap_elem.phy_cap_info[1] &= + ~IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS; + iftype_data->he_cap.he_cap_elem.phy_cap_info[2] &= + ~IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS; + iftype_data->he_cap.he_cap_elem.phy_cap_info[7] |= + IEEE80211_HE_PHY_CAP7_MAX_NC_1; + } } + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210 && !is_ap) + iftype_data->he_cap.he_cap_elem.phy_cap_info[2] |= + IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO; + switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) { case IWL_CFG_RF_TYPE_GF: case IWL_CFG_RF_TYPE_MR: @@ -792,6 +998,31 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans, break; } + if (CSR_HW_REV_TYPE(trans->hw_rev) == IWL_CFG_MAC_TYPE_GL && + iftype_data->eht_cap.has_eht) { + iftype_data->eht_cap.eht_cap_elem.mac_cap_info[0] &= + ~(IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS | + IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 | + IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2); + iftype_data->eht_cap.eht_cap_elem.phy_cap_info[3] &= + ~(IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO | + IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK | + IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK | + IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK | + IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK | + IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK); + iftype_data->eht_cap.eht_cap_elem.phy_cap_info[4] &= + ~(IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO | + IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP); + iftype_data->eht_cap.eht_cap_elem.phy_cap_info[5] &= + ~IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK; + iftype_data->eht_cap.eht_cap_elem.phy_cap_info[6] &= + ~(IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK | + IEEE80211_EHT_PHY_CAP6_EHT_DUP_6GHZ_SUPP); + iftype_data->eht_cap.eht_cap_elem.phy_cap_info[5] |= + IEEE80211_EHT_PHY_CAP5_SUPP_EXTRA_EHT_LTF; + } + if (fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_BROADCAST_TWT)) iftype_data->he_cap.he_cap_elem.mac_cap_info[2] |= IEEE80211_HE_MAC_CAP2_BCAST_TWT; @@ -801,6 +1032,13 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans, iftype_data->vendor_elems.data = iwl_vendor_caps; iftype_data->vendor_elems.len = ARRAY_SIZE(iwl_vendor_caps); } + + if (!trans->cfg->ht_params->stbc) { + iftype_data->he_cap.he_cap_elem.phy_cap_info[2] &= + ~IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ; + iftype_data->he_cap.he_cap_elem.phy_cap_info[7] &= + ~IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ; + } } static void iwl_init_he_hw_capab(struct iwl_trans *trans, @@ -816,29 +1054,32 @@ static void iwl_init_he_hw_capab(struct iwl_trans *trans, if (WARN_ON(sband->iftype_data)) return; - BUILD_BUG_ON(sizeof(data->iftd.low) != sizeof(iwl_he_capa)); - BUILD_BUG_ON(sizeof(data->iftd.high) != sizeof(iwl_he_capa)); + BUILD_BUG_ON(sizeof(data->iftd.low) != sizeof(iwl_he_eht_capa)); + BUILD_BUG_ON(sizeof(data->iftd.high) != sizeof(iwl_he_eht_capa)); + BUILD_BUG_ON(sizeof(data->iftd.uhb) != sizeof(iwl_he_eht_capa)); switch (sband->band) { case NL80211_BAND_2GHZ: iftype_data = data->iftd.low; break; case NL80211_BAND_5GHZ: - case NL80211_BAND_6GHZ: iftype_data = data->iftd.high; break; + case NL80211_BAND_6GHZ: + iftype_data = data->iftd.uhb; + break; default: WARN_ON(1); return; } - memcpy(iftype_data, iwl_he_capa, sizeof(iwl_he_capa)); + memcpy(iftype_data, iwl_he_eht_capa, sizeof(iwl_he_eht_capa)); sband->iftype_data = iftype_data; - sband->n_iftype_data = ARRAY_SIZE(iwl_he_capa); + sband->n_iftype_data = ARRAY_SIZE(iwl_he_eht_capa); for (i = 0; i < sband->n_iftype_data; i++) - iwl_nvm_fixup_sband_iftd(trans, sband, &iftype_data[i], + iwl_nvm_fixup_sband_iftd(trans, data, sband, &iftype_data[i], tx_chains, rx_chains, fw); iwl_init_he_6ghz_capa(trans, data, sband, tx_chains, rx_chains); @@ -1333,27 +1574,41 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan, if (!reg_capa.allow_160mhz) flags |= NL80211_RRF_NO_160MHZ; + + if (!reg_capa.allow_320mhz) + flags |= NL80211_RRF_NO_320MHZ; } + if (reg_capa.disable_11ax) flags |= NL80211_RRF_NO_HE; + if (reg_capa.disable_11be) + flags |= NL80211_RRF_NO_EHT; + return flags; } -static struct iwl_reg_capa iwl_get_reg_capa(u16 flags, u8 resp_ver) +static struct iwl_reg_capa iwl_get_reg_capa(u32 flags, u8 resp_ver) { - struct iwl_reg_capa reg_capa; - - if (resp_ver >= REG_CAPA_V2_RESP_VER) { + struct iwl_reg_capa reg_capa = {}; + + if (resp_ver >= REG_CAPA_V4_RESP_VER) { + reg_capa.allow_40mhz = true; + reg_capa.allow_80mhz = flags & REG_CAPA_V4_80MHZ_ALLOWED; + reg_capa.allow_160mhz = flags & REG_CAPA_V4_160MHZ_ALLOWED; + reg_capa.allow_320mhz = flags & REG_CAPA_V4_320MHZ_ALLOWED; + reg_capa.disable_11ax = flags & REG_CAPA_V4_11AX_DISABLED; + reg_capa.disable_11be = flags & REG_CAPA_V4_11BE_DISABLED; + } else if (resp_ver >= REG_CAPA_V2_RESP_VER) { reg_capa.allow_40mhz = flags & REG_CAPA_V2_40MHZ_ALLOWED; reg_capa.allow_80mhz = flags & REG_CAPA_V2_80MHZ_ALLOWED; reg_capa.allow_160mhz = flags & REG_CAPA_V2_160MHZ_ALLOWED; reg_capa.disable_11ax = flags & REG_CAPA_V2_11AX_DISABLED; } else { - reg_capa.allow_40mhz = !(flags & REG_CAPA_40MHZ_FORBIDDEN); - reg_capa.allow_80mhz = flags & REG_CAPA_80MHZ_ALLOWED; - reg_capa.allow_160mhz = flags & REG_CAPA_160MHZ_ALLOWED; - reg_capa.disable_11ax = flags & REG_CAPA_11AX_DISABLED; + reg_capa.allow_40mhz = !(flags & REG_CAPA_V1_40MHZ_FORBIDDEN); + reg_capa.allow_80mhz = flags & REG_CAPA_V1_80MHZ_ALLOWED; + reg_capa.allow_160mhz = flags & REG_CAPA_V1_160MHZ_ALLOWED; + reg_capa.disable_11ax = flags & REG_CAPA_V1_11AX_DISABLED; } return reg_capa; } @@ -1361,7 +1616,7 @@ static struct iwl_reg_capa iwl_get_reg_capa(u16 flags, u8 resp_ver) struct ieee80211_regdomain * iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, int num_of_ch, __le32 *channels, u16 fw_mcc, - u16 geo_info, u16 cap, u8 resp_ver) + u16 geo_info, u32 cap, u8 resp_ver) { int ch_idx; u16 ch_flags; @@ -1783,6 +2038,8 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, !!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED); nvm->sku_cap_mimo_disabled = !!(mac_flags & NVM_MAC_SKU_FLAGS_MIMO_DISABLED); + if (CSR_HW_RFID_TYPE(trans->hw_rf_id) == IWL_CFG_RF_TYPE_FM) + nvm->sku_cap_11be_enable = true; /* Initialize PHY sku data */ nvm->valid_tx_ant = (u8)le32_to_cpu(rsp->phy_sku.tx_chains); diff --git a/iwl-nvm-parse.h b/iwl-nvm-parse.h index e01f7751cf11..c79f72d54482 100644 --- a/iwl-nvm-parse.h +++ b/iwl-nvm-parse.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2015, 2018-2021 Intel Corporation + * Copyright (C) 2005-2015, 2018-2022 Intel Corporation * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __iwl_nvm_parse_h__ @@ -50,7 +50,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, struct ieee80211_regdomain * iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, int num_of_ch, __le32 *channels, u16 fw_mcc, - u16 geo_info, u16 cap, u8 resp_ver); + u16 geo_info, u32 cap, u8 resp_ver); /** * struct iwl_nvm_section - describes an NVM section in memory. diff --git a/iwl-prph.h b/iwl-prph.h index a22788a68168..6dd381ff0f9e 100644 --- a/iwl-prph.h +++ b/iwl-prph.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2022 Intel Corporation + * Copyright (C) 2005-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016 Intel Deutschland GmbH */ @@ -350,6 +350,11 @@ #define WFPM_OTP_CFG1_ADDR 0x00a03098 #define WFPM_OTP_CFG1_IS_JACKET_BIT BIT(4) #define WFPM_OTP_CFG1_IS_CDB_BIT BIT(5) +#define WFPM_OTP_BZ_BNJ_JACKET_BIT 5 +#define WFPM_OTP_BZ_BNJ_CDB_BIT 4 +#define WFPM_OTP_CFG1_IS_JACKET(_val) (((_val) & 0x00000020) >> WFPM_OTP_BZ_BNJ_JACKET_BIT) +#define WFPM_OTP_CFG1_IS_CDB(_val) (((_val) & 0x00000010) >> WFPM_OTP_BZ_BNJ_CDB_BIT) + #define WFPM_GP2 0xA030B4 @@ -368,6 +373,7 @@ enum { #define CNVR_AUX_MISC_CHIP 0xA2B800 #define CNVR_SCU_SD_REGS_SD_REG_DIG_DCDC_VTRIM 0xA29890 #define CNVR_SCU_SD_REGS_SD_REG_ACTIVE_VDIG_MIRROR 0xA29938 +#define CNVI_SCU_SEQ_DATA_DW9 0xA27488 #define PREG_AUX_BUS_WPROT_0 0xA04CC0 @@ -377,6 +383,7 @@ enum { #define PREG_PRPH_WPROT_22000 0xA04D00 #define SB_MODIFY_CFG_FLAG 0xA03088 +#define SB_CFG_RESIDES_IN_OTP_MASK 0x10 #define SB_CPU_1_STATUS 0xA01E30 #define SB_CPU_2_STATUS 0xA01E34 #define UMAG_SB_CPU_1_STATUS 0xA038C0 @@ -389,6 +396,8 @@ enum { #define WFPM_LMAC1_PD_NOTIFICATION 0xa0338c #define WFPM_ARC1_PD_NOTIFICATION 0xa03044 #define HPM_SECONDARY_DEVICE_STATE 0xa03404 +#define WFPM_MAC_OTP_CFG7_ADDR 0xa03338 +#define WFPM_MAC_OTP_CFG7_DATA 0xa0333c /* For UMAG_GEN_HW_STATUS reg check */ @@ -441,6 +450,8 @@ enum { #define REG_CRF_ID_TYPE_GF_TC 0xF08 #define REG_CRF_ID_TYPE_MR 0x810 #define REG_CRF_ID_TYPE_FM 0x910 +#define REG_CRF_ID_TYPE_FMI 0x930 +#define REG_CRF_ID_TYPE_FMR 0x900 #define HPM_DEBUG 0xA03440 #define PERSISTENCE_BIT BIT(12) @@ -475,6 +486,10 @@ enum { #define FSEQ_ALIVE_TOKEN 0xA340F0 #define FSEQ_CNVI_ID 0xA3408C #define FSEQ_CNVR_ID 0xA34090 +#define FSEQ_PREV_CNVIO_INIT_VERSION 0xA34084 +#define FSEQ_WIFI_FSEQ_VERSION 0xA34040 +#define FSEQ_BT_FSEQ_VERSION 0xA34044 +#define FSEQ_CLASS_TP_VERSION 0xA34078 #define IWL_D3_SLEEP_STATUS_SUSPEND 0xD3 #define IWL_D3_SLEEP_STATUS_RESUME 0xD0 @@ -498,4 +513,7 @@ enum { #define REG_OTP_MINOR 0xA0333C +#define WFPM_LMAC2_PD_NOTIFICATION 0xA033CC +#define WFPM_LMAC2_PD_RE_READ BIT(31) + #endif /* __iwl_prph_h__ */ diff --git a/iwl-trans.c b/iwl-trans.c index b1af9359cea5..4bd759432d44 100644 --- a/iwl-trans.c +++ b/iwl-trans.c @@ -2,7 +2,7 @@ /* * Copyright (C) 2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH - * Copyright (C) 2019-2021 Intel Corporation + * Copyright (C) 2019-2021, 2023 Intel Corporation */ #include <linux/kernel.h> #include <linux/bsearch.h> @@ -42,7 +42,7 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, WARN_ON(!ops->wait_txq_empty && !ops->wait_tx_queues_empty); - if (trans->trans_cfg->use_tfh) { + if (trans->trans_cfg->gen2) { trans->txqs.tfd.addr_size = 64; trans->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS; trans->txqs.tfd.size = sizeof(struct iwl_tfh_tfd); @@ -101,7 +101,7 @@ int iwl_trans_init(struct iwl_trans *trans) /* Some things must not change even if the config does */ WARN_ON(trans->txqs.tfd.addr_size != - (trans->trans_cfg->use_tfh ? 64 : 36)); + (trans->trans_cfg->gen2 ? 64 : 36)); snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name), "iwl_cmd_pool:%s", dev_name(trans->dev)); diff --git a/iwl-trans.h b/iwl-trans.h index d659ccd065f7..d02943d0ea62 100644 --- a/iwl-trans.h +++ b/iwl-trans.h @@ -459,6 +459,24 @@ struct iwl_trans_rxq_dma_data { u64 ur_bd_cb; }; +/* maximal number of DRAM MAP entries supported by FW */ +#define IPC_DRAM_MAP_ENTRY_NUM_MAX 64 + +/** + * struct iwl_pnvm_image - contains info about the parsed pnvm image + * @chunks: array of pointers to pnvm payloads and their sizes + * @n_chunks: the number of the pnvm payloads. + * @version: the version of the loaded PNVM image + */ +struct iwl_pnvm_image { + struct { + const void *data; + u32 len; + } chunks[IPC_DRAM_MAP_ENTRY_NUM_MAX]; + u32 n_chunks; + u32 version; +}; + /** * struct iwl_trans_ops - transport specific operations * @@ -541,8 +559,11 @@ struct iwl_trans_rxq_dma_data { * Note that the transport must fill in the proper file headers. * @debugfs_cleanup: used in the driver unload flow to make a proper cleanup * of the trans debugfs + * @load_pnvm: save the pnvm data in DRAM * @set_pnvm: set the pnvm data in the prph scratch buffer, inside the * context info. + * @load_reduce_power: copy reduce power table to the corresponding DRAM memory + * @set_reduce_power: set reduce power table addresses in the sratch buffer * @interrupts: disable/enable interrupts to transport */ struct iwl_trans_ops { @@ -614,9 +635,17 @@ struct iwl_trans_ops { void *sanitize_ctx); void (*debugfs_cleanup)(struct iwl_trans *trans); void (*sync_nmi)(struct iwl_trans *trans); - int (*set_pnvm)(struct iwl_trans *trans, const void *data, u32 len); - int (*set_reduce_power)(struct iwl_trans *trans, - const void *data, u32 len); + int (*load_pnvm)(struct iwl_trans *trans, + const struct iwl_pnvm_image *pnvm_payloads, + const struct iwl_ucode_capabilities *capa); + void (*set_pnvm)(struct iwl_trans *trans, + const struct iwl_ucode_capabilities *capa); + int (*load_reduce_power)(struct iwl_trans *trans, + const struct iwl_pnvm_image *payloads, + const struct iwl_ucode_capabilities *capa); + void (*set_reduce_power)(struct iwl_trans *trans, + const struct iwl_ucode_capabilities *capa); + void (*interrupts)(struct iwl_trans *trans, bool enable); int (*imr_dma_data)(struct iwl_trans *trans, u32 dst_addr, u64 src_addr, @@ -705,6 +734,19 @@ struct iwl_dram_data { }; /** + * @drams: array of several DRAM areas that contains the pnvm and power + * reduction table payloads. + * @n_regions: number of DRAM regions that were allocated + * @prph_scratch_mem_desc: points to a structure allocated in dram, + * designed to show FW where all the payloads are. + */ +struct iwl_dram_regions { + struct iwl_dram_data drams[IPC_DRAM_MAP_ENTRY_NUM_MAX]; + struct iwl_dram_data prph_scratch_mem_desc; + u8 n_regions; +}; + +/** * struct iwl_fw_mon - fw monitor per allocation id * @num_frags: number of fragments * @frags: an array of DRAM buffer fragments @@ -748,6 +790,18 @@ struct iwl_imr_data { __le64 imr_base_addr; }; +#define IWL_TRANS_CURRENT_PC_NAME_MAX_BYTES 32 + +/** + * struct iwl_pc_data - program counter details + * @pc_name: cpu name + * @pc_address: cpu program counter + */ +struct iwl_pc_data { + u8 pc_name[IWL_TRANS_CURRENT_PC_NAME_MAX_BYTES]; + u32 pc_address; +}; + /** * struct iwl_trans_debug - transport debug related data * @@ -775,6 +829,10 @@ struct iwl_imr_data { * @periodic_trig_list: periodic triggers list * @domains_bitmap: bitmap of active domains other than &IWL_FW_INI_DOMAIN_ALWAYS_ON * @ucode_preset: preset based on ucode + * @dump_file_name_ext: dump file name extension + * @dump_file_name_ext_valid: dump file name extension if valid or not + * @num_pc: number of program counter for cpu + * @pc_data: details of the program counter */ struct iwl_trans_debug { u8 n_dest_reg; @@ -813,6 +871,10 @@ struct iwl_trans_debug { bool restart_required; u32 last_tp_resetfw; struct iwl_imr_data imr_data; + u8 dump_file_name_ext[IWL_FW_INI_MAX_NAME]; + bool dump_file_name_ext_valid; + u32 num_pc; + struct iwl_pc_data *pc_data; }; struct iwl_dma_ptr { @@ -976,12 +1038,16 @@ struct iwl_trans_txqs { * @max_skb_frags: maximum number of fragments an SKB can have when transmitted. * 0 indicates that frag SKBs (NETIF_F_SG) aren't supported. * @hw_rf_id a u32 with the device RF ID + * @hw_crf_id a u32 with the device CRF ID + * @hw_wfpm_id a u32 with the device wfpm ID * @hw_id: a u32 with the ID of the device / sub-device. * Set during transport allocation. * @hw_id_str: a string with info about HW ID. Set during transport allocation. * @hw_rev_step: The mac step of the HW * @pm_support: set to true in start_hw if link pm is supported * @ltr_enabled: set to true if the LTR is enabled + * @fail_to_parse_pnvm_image: set to true if pnvm parsing failed + * @failed_to_load_reduce_power_image: set to true if pnvm loading failed * @wide_cmd_header: true when ucode supports wide command header format * @wait_command_queue: wait queue for sync commands * @num_rx_queues: number of RX queues allocated by the transport; @@ -999,6 +1065,10 @@ struct iwl_trans_txqs { * This mode is set dynamically, depending on the WoWLAN values * configured from the userspace at runtime. * @iwl_trans_txqs: transport tx queues data. + * @mbx_addr_0_step: step address data 0 + * @mbx_addr_1_step: step address data 1 + * @pcie_link_speed: current PCIe link speed (%PCI_EXP_LNKSTA_CLS_*), + * only valid for discrete (not integrated) NICs */ struct iwl_trans { bool csme_own; @@ -1015,6 +1085,9 @@ struct iwl_trans { u32 hw_rev; u32 hw_rev_step; u32 hw_rf_id; + u32 hw_crf_id; + u32 hw_cnv_id; + u32 hw_wfpm_id; u32 hw_id; char hw_id_str[52]; u32 sku_id[3]; @@ -1024,7 +1097,9 @@ struct iwl_trans { bool pm_support; bool ltr_enabled; u8 pnvm_loaded:1; + u8 fail_to_parse_pnvm_image:1; u8 reduce_power_loaded:1; + u8 failed_to_load_reduce_power_image:1; const struct iwl_hcmd_arr *command_groups; int command_groups_size; @@ -1053,6 +1128,10 @@ struct iwl_trans { const char *name; struct iwl_trans_txqs txqs; + u32 mbx_addr_0_step; + u32 mbx_addr_1_step; + + u8 pcie_link_speed; /* pointer to trans specific struct */ /*Ensure that this pointer will always be aligned to sizeof pointer */ @@ -1131,7 +1210,7 @@ static inline int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, { might_sleep(); if (!trans->ops->d3_suspend) - return 0; + return -EOPNOTSUPP; return trans->ops->d3_suspend(trans, test, reset); } @@ -1142,7 +1221,7 @@ static inline int iwl_trans_d3_resume(struct iwl_trans *trans, { might_sleep(); if (!trans->ops->d3_resume) - return 0; + return -EOPNOTSUPP; return trans->ops->d3_resume(trans, status, test, reset); } @@ -1486,33 +1565,34 @@ static inline void iwl_trans_sync_nmi(struct iwl_trans *trans) void iwl_trans_sync_nmi_with_addr(struct iwl_trans *trans, u32 inta_addr, u32 sw_err_bit); -static inline int iwl_trans_set_pnvm(struct iwl_trans *trans, - const void *data, u32 len) +static inline int iwl_trans_load_pnvm(struct iwl_trans *trans, + const struct iwl_pnvm_image *pnvm_data, + const struct iwl_ucode_capabilities *capa) { - if (trans->ops->set_pnvm) { - int ret = trans->ops->set_pnvm(trans, data, len); - - if (ret) - return ret; - } - - trans->pnvm_loaded = true; - - return 0; + return trans->ops->load_pnvm(trans, pnvm_data, capa); } -static inline int iwl_trans_set_reduce_power(struct iwl_trans *trans, - const void *data, u32 len) +static inline void iwl_trans_set_pnvm(struct iwl_trans *trans, + const struct iwl_ucode_capabilities *capa) { - if (trans->ops->set_reduce_power) { - int ret = trans->ops->set_reduce_power(trans, data, len); + if (trans->ops->set_pnvm) + trans->ops->set_pnvm(trans, capa); +} - if (ret) - return ret; - } +static inline int iwl_trans_load_reduce_power + (struct iwl_trans *trans, + const struct iwl_pnvm_image *payloads, + const struct iwl_ucode_capabilities *capa) +{ + return trans->ops->load_reduce_power(trans, payloads, capa); +} - trans->reduce_power_loaded = true; - return 0; +static inline void +iwl_trans_set_reduce_power(struct iwl_trans *trans, + const struct iwl_ucode_capabilities *capa) +{ + if (trans->ops->set_reduce_power) + trans->ops->set_reduce_power(trans, capa); } static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans) @@ -1537,10 +1617,16 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, int iwl_trans_init(struct iwl_trans *trans); void iwl_trans_free(struct iwl_trans *trans); +static inline bool iwl_trans_is_hw_error_value(u32 val) +{ + return ((val & ~0xf) == 0xa5a5a5a0) || ((val & ~0xf) == 0x5a5a5a50); +} + /***************************************************** * driver (transport) register/unregister functions ******************************************************/ int __must_check iwl_pci_register_driver(void); void iwl_pci_unregister_driver(void); +void iwl_trans_pcie_remove(struct iwl_trans *trans, bool rescan); #endif /* __iwl_trans_h__ */ diff --git a/mvm/binding.c b/mvm/binding.c index 0aac306304cb..458b97930059 100644 --- a/mvm/binding.c +++ b/mvm/binding.c @@ -2,6 +2,7 @@ /* * Copyright (C) 2012-2014, 2020 Intel Corporation * Copyright (C) 2016 Intel Deutschland GmbH + * Copyright (C) 2022 Intel Corporation */ #include <net/mac80211.h> #include "fw-api.h" @@ -31,7 +32,7 @@ static int iwl_mvm_binding_cmd(struct iwl_mvm *mvm, u32 action, if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT)) { size = sizeof(cmd); - cmd.lmac_id = cpu_to_le32(iwl_mvm_get_lmac_id(mvm->fw, + cmd.lmac_id = cpu_to_le32(iwl_mvm_get_lmac_id(mvm, phyctxt->channel->band)); } else { size = IWL_BINDING_CMD_SIZE_V1; @@ -75,7 +76,7 @@ static void iwl_mvm_iface_iterator(void *_data, u8 *mac, if (vif == data->ignore_vif) return; - if (mvmvif->phy_ctxt != data->phyctxt) + if (mvmvif->deflink.phy_ctxt != data->phyctxt) return; if (WARN_ON_ONCE(data->idx >= MAX_MACS_IN_BINDING)) @@ -132,7 +133,7 @@ int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - if (WARN_ON_ONCE(!mvmvif->phy_ctxt)) + if (WARN_ON_ONCE(!mvmvif->deflink.phy_ctxt)) return -EINVAL; /* @@ -142,7 +143,8 @@ int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif) if (iwl_mvm_sf_update(mvm, vif, false)) return -EINVAL; - return iwl_mvm_binding_update(mvm, vif, mvmvif->phy_ctxt, true); + return iwl_mvm_binding_update(mvm, vif, mvmvif->deflink.phy_ctxt, + true); } int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif) @@ -150,10 +152,11 @@ int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif) struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; - if (WARN_ON_ONCE(!mvmvif->phy_ctxt)) + if (WARN_ON_ONCE(!mvmvif->deflink.phy_ctxt)) return -EINVAL; - ret = iwl_mvm_binding_update(mvm, vif, mvmvif->phy_ctxt, false); + ret = iwl_mvm_binding_update(mvm, vif, mvmvif->deflink.phy_ctxt, + false); if (!ret) if (iwl_mvm_sf_update(mvm, vif, true)) @@ -161,3 +164,11 @@ int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif) return ret; } + +u32 iwl_mvm_get_lmac_id(struct iwl_mvm *mvm, enum nl80211_band band) +{ + if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CDB_SUPPORT) || + band == NL80211_BAND_2GHZ) + return IWL_LMAC_24G_INDEX; + return IWL_LMAC_5G_INDEX; +} diff --git a/mvm/coex.c b/mvm/coex.c index 9b194cb8d65e..5a5b1128e75c 100644 --- a/mvm/coex.c +++ b/mvm/coex.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2013-2014, 2018-2020 Intel Corporation + * Copyright (C) 2013-2014, 2018-2020, 2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH */ #include <linux/ieee80211.h> @@ -106,7 +106,7 @@ iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif) rcu_read_lock(); - chanctx_conf = rcu_dereference(vif->chanctx_conf); + chanctx_conf = rcu_dereference(vif->bss_conf.chanctx_conf); if (!chanctx_conf || chanctx_conf->def.chan->band != NL80211_BAND_2GHZ) { @@ -194,7 +194,7 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, if (mvmsta->bt_reduced_txpower == enable) return 0; - value = mvmsta->sta_id; + value = mvmsta->deflink.sta_id; if (enable) value |= BT_REDUCED_TX_POWER_BIT; @@ -257,33 +257,35 @@ static void iwl_mvm_bt_coex_tcm_based_ci(struct iwl_mvm *mvm, swap(data->primary, data->secondary); } -/* must be called under rcu_read_lock */ -static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, - struct ieee80211_vif *vif) +static void iwl_mvm_bt_notif_per_link(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_bt_iterator_data *data, + unsigned int link_id) { - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_bt_iterator_data *data = _data; - struct iwl_mvm *mvm = data->mvm; - struct ieee80211_chanctx_conf *chanctx_conf; /* default smps_mode is AUTOMATIC - only used for client modes */ enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); u32 bt_activity_grading, min_ag_for_static_smps; + struct ieee80211_chanctx_conf *chanctx_conf; + struct iwl_mvm_vif_link_info *link_info; + struct ieee80211_bss_conf *link_conf; int ave_rssi; lockdep_assert_held(&mvm->mutex); - switch (vif->type) { - case NL80211_IFTYPE_STATION: - break; - case NL80211_IFTYPE_AP: - if (!mvmvif->ap_ibss_active) - return; - break; - default: + link_info = mvmvif->link[link_id]; + if (!link_info) return; - } - chanctx_conf = rcu_dereference(vif->chanctx_conf); + link_conf = rcu_dereference(vif->link_conf[link_id]); + /* This can happen due to races: if we receive the notification + * and have the mutex held, while mac80211 is stuck on our mutex + * in the middle of removing the link. + */ + if (!link_conf) + return; + + chanctx_conf = rcu_dereference(link_conf->chanctx_conf); /* If channel context is invalid or not on 2.4GHz .. */ if ((!chanctx_conf || @@ -291,9 +293,10 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, if (vif->type == NL80211_IFTYPE_STATION) { /* ... relax constraints and disable rssi events */ iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, - smps_mode); - iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, + smps_mode, link_id); + iwl_mvm_bt_coex_reduced_txp(mvm, link_info->ap_sta_id, false); + /* FIXME: should this be per link? */ iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0); } return; @@ -311,20 +314,21 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, smps_mode = IEEE80211_SMPS_DYNAMIC; /* relax SMPS constraints for next association */ - if (!vif->bss_conf.assoc) + if (!vif->cfg.assoc) smps_mode = IEEE80211_SMPS_AUTOMATIC; - if (mvmvif->phy_ctxt && - (mvm->last_bt_notif.rrc_status & BIT(mvmvif->phy_ctxt->id))) + if (link_info->phy_ctxt && + (mvm->last_bt_notif.rrc_status & BIT(link_info->phy_ctxt->id))) smps_mode = IEEE80211_SMPS_AUTOMATIC; IWL_DEBUG_COEX(data->mvm, - "mac %d: bt_activity_grading %d smps_req %d\n", - mvmvif->id, bt_activity_grading, smps_mode); + "mac %d link %d: bt_activity_grading %d smps_req %d\n", + mvmvif->id, link_info->fw_link_id, + bt_activity_grading, smps_mode); if (vif->type == NL80211_IFTYPE_STATION) iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, - smps_mode); + smps_mode, link_id); /* low latency is always primary */ if (iwl_mvm_vif_low_latency(mvmvif)) { @@ -353,6 +357,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, data->secondary = chanctx_conf; } + /* FIXME: TCM load per interface? or need something per link? */ if (data->primary == chanctx_conf) data->primary_load = mvm->tcm.result.load[mvmvif->id]; else if (data->secondary == chanctx_conf) @@ -370,6 +375,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, /* if secondary is not NULL, it might be a GO */ data->secondary = chanctx_conf; + /* FIXME: TCM load per interface? or need something per link? */ if (data->primary == chanctx_conf) data->primary_load = mvm->tcm.result.load[mvmvif->id]; else if (data->secondary == chanctx_conf) @@ -382,9 +388,10 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, * we are not associated */ if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT || - mvm->cfg->bt_shared_single_ant || !vif->bss_conf.assoc || + mvm->cfg->bt_shared_single_ant || !vif->cfg.assoc || le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF) { - iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false); + iwl_mvm_bt_coex_reduced_txp(mvm, link_info->ap_sta_id, false); + /* FIXME: should this be per link? */ iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0); return; } @@ -396,10 +403,12 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, if (!ave_rssi) ave_rssi = -100; if (ave_rssi > -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH) { - if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true)) + if (iwl_mvm_bt_coex_reduced_txp(mvm, link_info->ap_sta_id, + true)) IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n"); } else if (ave_rssi < -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH) { - if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false)) + if (iwl_mvm_bt_coex_reduced_txp(mvm, link_info->ap_sta_id, + false)) IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n"); } @@ -407,6 +416,32 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, true, ave_rssi); } +/* must be called under rcu_read_lock */ +static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_bt_iterator_data *data = _data; + struct iwl_mvm *mvm = data->mvm; + unsigned int link_id; + + lockdep_assert_held(&mvm->mutex); + + switch (vif->type) { + case NL80211_IFTYPE_STATION: + break; + case NL80211_IFTYPE_AP: + if (!mvmvif->ap_ibss_active) + return; + break; + default: + return; + } + + for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) + iwl_mvm_bt_notif_per_link(mvm, vif, data, link_id); +} + static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm) { struct iwl_bt_iterator_data data = { @@ -521,7 +556,7 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, * Rssi update while not associated - can happen since the statistics * are handled asynchronously */ - if (mvmvif->ap_sta_id == IWL_MVM_INVALID_STA) + if (mvmvif->deflink.ap_sta_id == IWL_MVM_INVALID_STA) return; /* No BT - reports should be disabled */ @@ -537,10 +572,13 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, */ if (rssi_event == RSSI_EVENT_LOW || mvm->cfg->bt_shared_single_ant || iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT) - ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, + ret = iwl_mvm_bt_coex_reduced_txp(mvm, + mvmvif->deflink.ap_sta_id, false); else - ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true); + ret = iwl_mvm_bt_coex_reduced_txp(mvm, + mvmvif->deflink.ap_sta_id, + true); if (ret) IWL_ERR(mvm, "couldn't send BT_CONFIG HCMD upon RSSI event\n"); @@ -554,7 +592,7 @@ u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm, { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); - struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt; + struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->deflink.phy_ctxt; enum iwl_bt_coex_lut_type lut_type; if (mvm->last_bt_notif.ttc_status & BIT(phy_ctxt->id)) @@ -578,7 +616,7 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm, { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); - struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt; + struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->deflink.phy_ctxt; enum iwl_bt_coex_lut_type lut_type; if (mvm->last_bt_notif.ttc_status & BIT(phy_ctxt->id)) diff --git a/mvm/constants.h b/mvm/constants.h index c604f9f39b24..243eccc68cb0 100644 --- a/mvm/constants.h +++ b/mvm/constants.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2013-2015 Intel Mobile Communications GmbH - * Copyright (C) 2013-2014, 2018-2021 Intel Corporation + * Copyright (C) 2013-2014, 2018-2023 Intel Corporation * Copyright (C) 2015 Intel Deutschland GmbH */ #ifndef __MVM_CONSTANTS_H @@ -109,10 +109,6 @@ #define IWL_MVM_USE_TWT true #define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA 20 #define IWL_MVM_USE_NSSN_SYNC 0 -#define IWL_MVM_PHY_FILTER_CHAIN_A 0 -#define IWL_MVM_PHY_FILTER_CHAIN_B 0 -#define IWL_MVM_PHY_FILTER_CHAIN_C 0 -#define IWL_MVM_PHY_FILTER_CHAIN_D 0 #define IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH false #define IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA 40 /* 20016 pSec is 6 meter RTT, meaning 3 meter range */ @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -470,7 +470,7 @@ static int iwl_mvm_wowlan_config_rsc_tsc(struct iwl_mvm *mvm, for (i = 0; i < ARRAY_SIZE(data.rsc->mcast_key_id_map); i++) data.rsc->mcast_key_id_map[i] = IWL_MCAST_KEY_MAP_INVALID; - data.rsc->sta_id = cpu_to_le32(mvmvif->ap_sta_id); + data.rsc->sta_id = cpu_to_le32(mvmvif->deflink.ap_sta_id); ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_wowlan_get_rsc_v5_data, @@ -493,7 +493,8 @@ static int iwl_mvm_wowlan_config_rsc_tsc(struct iwl_mvm *mvm, if (ver == 4) { size = sizeof(*data.rsc_tsc); - data.rsc_tsc->sta_id = cpu_to_le32(mvmvif->ap_sta_id); + data.rsc_tsc->sta_id = + cpu_to_le32(mvmvif->deflink.ap_sta_id); } else { /* ver == 2 || ver == IWL_FW_CMD_VER_UNKNOWN */ size = sizeof(data.rsc_tsc->params); @@ -563,6 +564,7 @@ static void iwl_mvm_wowlan_get_tkip_data(struct ieee80211_hw *hw, } for (i = 0; i < IWL_NUM_RSC; i++) { + ieee80211_get_key_rx_seq(key, i, &seq); /* wrapping isn't allowed, AP must rekey */ if (seq.tkip.iv32 > cur_rx_iv32) cur_rx_iv32 = seq.tkip.iv32; @@ -599,6 +601,11 @@ static void iwl_mvm_wowlan_gtk_type_iter(struct ieee80211_hw *hw, switch (key->cipher) { default: return; + case WLAN_CIPHER_SUITE_TKIP: + if (!sta) + data->kek_kck_cmd->gtk_cipher = + cpu_to_le32(STA_KEY_FLG_TKIP); + return; case WLAN_CIPHER_SUITE_BIP_GMAC_256: case WLAN_CIPHER_SUITE_BIP_GMAC_128: data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_GCMP); @@ -610,13 +617,13 @@ static void iwl_mvm_wowlan_gtk_type_iter(struct ieee80211_hw *hw, if (!sta) data->kek_kck_cmd->gtk_cipher = cpu_to_le32(STA_KEY_FLG_CCM); - break; + return; case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: if (!sta) data->kek_kck_cmd->gtk_cipher = cpu_to_le32(STA_KEY_FLG_GCMP); - break; + return; } } @@ -686,7 +693,7 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm, pattern_cmd->n_patterns = wowlan->n_patterns; if (ver >= 3) - pattern_cmd->sta_id = mvmvif->ap_sta_id; + pattern_cmd->sta_id = mvmvif->deflink.ap_sta_id; for (i = 0; i < wowlan->n_patterns; i++) { int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8); @@ -727,11 +734,11 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return -EINVAL; /* add back the PHY */ - if (WARN_ON(!mvmvif->phy_ctxt)) + if (WARN_ON(!mvmvif->deflink.phy_ctxt)) return -EINVAL; rcu_read_lock(); - ctx = rcu_dereference(vif->chanctx_conf); + ctx = rcu_dereference(vif->bss_conf.chanctx_conf); if (WARN_ON(!ctx)) { rcu_read_unlock(); return -EINVAL; @@ -741,7 +748,7 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif, chains_dynamic = ctx->rx_chains_dynamic; rcu_read_unlock(); - ret = iwl_mvm_phy_ctxt_add(mvm, mvmvif->phy_ctxt, &chandef, + ret = iwl_mvm_phy_ctxt_add(mvm, mvmvif->deflink.phy_ctxt, &chandef, chains_static, chains_dynamic); if (ret) return ret; @@ -749,7 +756,7 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif, /* add back the MAC */ mvmvif->uploaded = false; - if (WARN_ON(!vif->bss_conf.assoc)) + if (WARN_ON(!vif->cfg.assoc)) return -EINVAL; ret = iwl_mvm_mac_ctxt_add(mvm, vif); @@ -758,12 +765,12 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif, /* add back binding - XXX refactor? */ binding_cmd.id_and_color = - cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id, - mvmvif->phy_ctxt->color)); + cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->deflink.phy_ctxt->id, + mvmvif->deflink.phy_ctxt->color)); binding_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD); binding_cmd.phy = - cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id, - mvmvif->phy_ctxt->color)); + cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->deflink.phy_ctxt->id, + mvmvif->deflink.phy_ctxt->color)); binding_cmd.macs[0] = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); for (i = 1; i < MAX_MACS_IN_BINDING; i++) @@ -786,7 +793,8 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false, 0); if (ret) return ret; - rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta); + rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->deflink.ap_sta_id], + ap_sta); ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); if (ret) @@ -795,8 +803,8 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif, /* and some quota */ quota = iwl_mvm_quota_cmd_get_quota(mvm, "a_cmd, 0); quota->id_and_color = - cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id, - mvmvif->phy_ctxt->color)); + cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->deflink.phy_ctxt->id, + mvmvif->deflink.phy_ctxt->color)); quota->quota = cpu_to_le32(IWL_MVM_MAX_QUOTA); quota->max_duration = cpu_to_le32(IWL_MVM_MAX_QUOTA); @@ -915,7 +923,7 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm, /* TODO: wowlan_config_cmd->wowlan_ba_teardown_tids */ wowlan_config_cmd->is_11n_connection = - ap_sta->ht_cap.ht_supported; + ap_sta->deflink.ht_cap.ht_supported; wowlan_config_cmd->flags = ENABLE_L3_FILTERING | ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING; @@ -1022,7 +1030,7 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm, if (ver == 2) { size = sizeof(tkip_data.tkip); tkip_data.tkip.sta_id = - cpu_to_le32(mvmvif->ap_sta_id); + cpu_to_le32(mvmvif->deflink.ap_sta_id); } else if (ver == 1 || ver == IWL_FW_CMD_VER_UNKNOWN) { size = sizeof(struct iwl_wowlan_tkip_params_cmd_ver_1); } else { @@ -1071,7 +1079,7 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm, kek_kck_cmd.kek_len = cpu_to_le16(mvmvif->rekey_data.kek_len); kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr; kek_kck_cmd.akm = cpu_to_le32(mvmvif->rekey_data.akm); - kek_kck_cmd.sta_id = cpu_to_le32(mvmvif->ap_sta_id); + kek_kck_cmd.sta_id = cpu_to_le32(mvmvif->deflink.ap_sta_id); if (cmd_ver == 4) { cmd_size = sizeof(struct iwl_wowlan_kek_kck_material_cmd_v4); @@ -1264,7 +1272,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, mvmvif = iwl_mvm_vif_from_mac80211(vif); - if (mvmvif->ap_sta_id == IWL_MVM_INVALID_STA) { + if (mvmvif->deflink.ap_sta_id == IWL_MVM_INVALID_STA) { /* if we're not associated, this must be netdetect */ if (!wowlan->nd_config) { ret = 1; @@ -1280,10 +1288,10 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, } else { struct iwl_wowlan_config_cmd wowlan_config_cmd = {}; - wowlan_config_cmd.sta_id = mvmvif->ap_sta_id; + wowlan_config_cmd.sta_id = mvmvif->deflink.ap_sta_id; ap_sta = rcu_dereference_protected( - mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], + mvm->fw_id_to_mac_id[mvmvif->deflink.ap_sta_id], lockdep_is_held(&mvm->mutex)); if (IS_ERR_OR_NULL(ap_sta)) { ret = -EINVAL; @@ -1372,6 +1380,14 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) return __iwl_mvm_suspend(hw, wowlan, false); } +struct iwl_multicast_key_data { + u8 key[WOWLAN_KEY_MAX_SIZE]; + u8 len; + u8 flags; + u8 id; + u8 ipn[6]; +}; + /* converted data from the different status responses */ struct iwl_wowlan_status_data { u64 replay_ctr; @@ -1390,7 +1406,8 @@ struct iwl_wowlan_status_data { u8 key[WOWLAN_KEY_MAX_SIZE]; u8 len; u8 flags; - } gtk; + u8 id; + } gtk[WOWLAN_GTK_KEYS_NUM]; struct { /* @@ -1420,14 +1437,9 @@ struct iwl_wowlan_status_data { } tkip, aes; } ptk; - struct { - u64 ipn; - u8 key[WOWLAN_KEY_MAX_SIZE]; - u8 len; - u8 flags; - } igtk; + struct iwl_multicast_key_data igtk; - u8 wake_packet[]; + u8 *wake_packet; }; static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm, @@ -1480,7 +1492,7 @@ static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm, if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET) wakeup.tcp_match = true; - if (status->wake_packet_bufsize) { + if (status->wake_packet) { int pktsize = status->wake_packet_bufsize; int pktlen = status->wake_packet_length; const u8 *pktdata = status->wake_packet; @@ -1750,7 +1762,7 @@ static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key, s8 new_key_id = -1; if (status->num_of_gtk_rekeys) - new_key_id = status->gtk.flags & + new_key_id = status->gtk[0].flags & IWL_WOWLAN_GTK_IDX_MASK; /* Don't install a new key's value to an old key */ @@ -1769,20 +1781,18 @@ static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key, struct iwl_mvm_d3_gtk_iter_data { struct iwl_mvm *mvm; struct iwl_wowlan_status_data *status; - void *last_gtk; - u32 cipher; - bool find_phase, unhandled_cipher; + u32 gtk_cipher, igtk_cipher; + bool unhandled_cipher, igtk_support; int num_keys; }; -static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - struct ieee80211_key_conf *key, - void *_data) +static void iwl_mvm_d3_find_last_keys(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *_data) { struct iwl_mvm_d3_gtk_iter_data *data = _data; - struct iwl_wowlan_status_data *status = data->status; if (data->unhandled_cipher) return; @@ -1797,51 +1807,230 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw, case WLAN_CIPHER_SUITE_GCMP_256: case WLAN_CIPHER_SUITE_TKIP: /* we support these */ + data->gtk_cipher = key->cipher; + break; + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + case WLAN_CIPHER_SUITE_AES_CMAC: + /* we support these */ + if (data->igtk_support && + (key->keyidx == 4 || key->keyidx == 5)) { + data->igtk_cipher = key->cipher; + } else { + data->unhandled_cipher = true; + return; + } break; default: - /* everything else (even CMAC for MFP) - disconnect from AP */ + /* everything else - disconnect from AP */ data->unhandled_cipher = true; return; } data->num_keys++; +} - /* - * pairwise key - update sequence counters only; - * note that this assumes no TDLS sessions are active - */ - if (sta) { - if (data->find_phase) - return; +static void +iwl_mvm_d3_set_igtk_bigtk_ipn(const struct iwl_multicast_key_data *key, + struct ieee80211_key_seq *seq, u32 cipher) +{ + switch (cipher) { + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + BUILD_BUG_ON(sizeof(seq->aes_gmac.pn) != sizeof(key->ipn)); + memcpy(seq->aes_gmac.pn, key->ipn, sizeof(seq->aes_gmac.pn)); + break; + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + BUILD_BUG_ON(sizeof(seq->aes_cmac.pn) != sizeof(key->ipn)); + memcpy(seq->aes_cmac.pn, key->ipn, sizeof(seq->aes_cmac.pn)); + break; + } +} + +static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *_data) +{ + struct iwl_mvm_d3_gtk_iter_data *data = _data; + struct iwl_wowlan_status_data *status = data->status; + s8 keyidx; - switch (key->cipher) { - case WLAN_CIPHER_SUITE_CCMP: - case WLAN_CIPHER_SUITE_GCMP: - case WLAN_CIPHER_SUITE_GCMP_256: + if (data->unhandled_cipher) + return; + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + /* ignore WEP completely, nothing to do */ + return; + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + if (sta) { atomic64_set(&key->tx_pn, status->ptk.aes.tx_pn); iwl_mvm_set_aes_ptk_rx_seq(data->mvm, status, sta, key); - break; - case WLAN_CIPHER_SUITE_TKIP: + return; + } + fallthrough; + case WLAN_CIPHER_SUITE_TKIP: + if (sta) { atomic64_set(&key->tx_pn, status->ptk.tkip.tx_pn); iwl_mvm_set_key_rx_seq_tids(key, status->ptk.tkip.seq); - break; + return; } + keyidx = key->keyidx; + /* The current key is always sent by the FW, even if it wasn't + * rekeyed during D3. + * We remove an existing key if it has the same index as + * a new key + */ + if (status->num_of_gtk_rekeys && + ((status->gtk[0].len && keyidx == status->gtk[0].id) || + (status->gtk[1].len && keyidx == status->gtk[1].id))) { + ieee80211_remove_key(key); + } else { + iwl_mvm_set_key_rx_seq(key, data->status, false); + } + break; + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + case WLAN_CIPHER_SUITE_AES_CMAC: + if (key->keyidx == 4 || key->keyidx == 5) { + /* remove rekeyed key */ + if (status->num_of_gtk_rekeys) { + ieee80211_remove_key(key); + } else { + struct ieee80211_key_seq seq; - /* that's it for this key */ - return; + iwl_mvm_d3_set_igtk_bigtk_ipn(&status->igtk, + &seq, + key->cipher); + ieee80211_set_key_rx_seq(key, 0, &seq); + } + } } +} - if (data->find_phase) { - data->last_gtk = key; - data->cipher = key->cipher; - return; +static bool iwl_mvm_gtk_rekey(struct iwl_wowlan_status_data *status, + struct ieee80211_vif *vif, + struct iwl_mvm *mvm, u32 gtk_cipher) +{ + int i; + struct ieee80211_key_conf *key; + struct { + struct ieee80211_key_conf conf; + u8 key[32]; + } conf = { + .conf.cipher = gtk_cipher, + }; + + BUILD_BUG_ON(WLAN_KEY_LEN_CCMP != WLAN_KEY_LEN_GCMP); + BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_CCMP); + BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_GCMP_256); + BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_TKIP); + BUILD_BUG_ON(sizeof(conf.key) < sizeof(status->gtk[0].key)); + + switch (gtk_cipher) { + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_GCMP: + conf.conf.keylen = WLAN_KEY_LEN_CCMP; + break; + case WLAN_CIPHER_SUITE_GCMP_256: + conf.conf.keylen = WLAN_KEY_LEN_GCMP_256; + break; + case WLAN_CIPHER_SUITE_TKIP: + conf.conf.keylen = WLAN_KEY_LEN_TKIP; + break; + default: + WARN_ON(1); + } + + for (i = 0; i < ARRAY_SIZE(status->gtk); i++) { + if (!status->gtk[i].len) + continue; + + conf.conf.keyidx = status->gtk[i].id; + IWL_DEBUG_WOWLAN(mvm, + "Received from FW GTK cipher %d, key index %d\n", + conf.conf.cipher, conf.conf.keyidx); + memcpy(conf.conf.key, status->gtk[i].key, + sizeof(status->gtk[i].key)); + + key = ieee80211_gtk_rekey_add(vif, &conf.conf); + if (IS_ERR(key)) + return false; + iwl_mvm_set_key_rx_seq_idx(key, status, i); + } + + return true; +} + +static bool +iwl_mvm_d3_igtk_bigtk_rekey_add(struct iwl_wowlan_status_data *status, + struct ieee80211_vif *vif, u32 cipher, + struct iwl_multicast_key_data *key_data) +{ + struct ieee80211_key_conf *key_config; + struct { + struct ieee80211_key_conf conf; + u8 key[WOWLAN_KEY_MAX_SIZE]; + } conf = { + .conf.cipher = cipher, + .conf.keyidx = key_data->id, + }; + struct ieee80211_key_seq seq; + + if (!key_data->len) + return true; + + iwl_mvm_d3_set_igtk_bigtk_ipn(key_data, &seq, conf.conf.cipher); + + switch (cipher) { + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + conf.conf.keylen = WLAN_KEY_LEN_BIP_GMAC_128; + break; + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + conf.conf.keylen = WLAN_KEY_LEN_BIP_GMAC_256; + break; + case WLAN_CIPHER_SUITE_AES_CMAC: + conf.conf.keylen = WLAN_KEY_LEN_AES_CMAC; + break; + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + conf.conf.keylen = WLAN_KEY_LEN_BIP_CMAC_256; + break; + default: + WARN_ON(1); } + BUILD_BUG_ON(sizeof(conf.key) < sizeof(key_data->key)); + memcpy(conf.conf.key, key_data->key, conf.conf.keylen); + + key_config = ieee80211_gtk_rekey_add(vif, &conf.conf); + if (IS_ERR(key_config)) + return false; + ieee80211_set_key_rx_seq(key_config, 0, &seq); + return true; +} + +static int iwl_mvm_lookup_wowlan_status_ver(struct iwl_mvm *mvm) +{ + u8 notif_ver; - if (data->status->num_of_gtk_rekeys) - ieee80211_remove_key(key); + if (!fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL)) + return 6; - if (data->last_gtk == key) - iwl_mvm_set_key_rx_seq(key, data->status, false); + /* default to 7 (when we have IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL) */ + notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, + WOWLAN_GET_STATUSES, 0); + if (!notif_ver) + notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, + WOWLAN_GET_STATUSES, 7); + + return notif_ver; } static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, @@ -1863,71 +2052,41 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, if (status->wakeup_reasons & disconnection_reasons) return false; + if (iwl_mvm_lookup_wowlan_status_ver(mvm) > 6 || + iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP, + WOWLAN_INFO_NOTIFICATION, + 0)) + gtkdata.igtk_support = true; + /* find last GTK that we used initially, if any */ - gtkdata.find_phase = true; ieee80211_iter_keys(mvm->hw, vif, - iwl_mvm_d3_update_keys, >kdata); + iwl_mvm_d3_find_last_keys, >kdata); /* not trying to keep connections with MFP/unhandled ciphers */ if (gtkdata.unhandled_cipher) return false; if (!gtkdata.num_keys) goto out; - if (!gtkdata.last_gtk) - return false; /* * invalidate all other GTKs that might still exist and update * the one that we used */ - gtkdata.find_phase = false; ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_d3_update_keys, >kdata); - IWL_DEBUG_WOWLAN(mvm, "num of GTK rekeying %d\n", - status->num_of_gtk_rekeys); if (status->num_of_gtk_rekeys) { - struct ieee80211_key_conf *key; - struct { - struct ieee80211_key_conf conf; - u8 key[32]; - } conf = { - .conf.cipher = gtkdata.cipher, - .conf.keyidx = - status->gtk.flags & IWL_WOWLAN_GTK_IDX_MASK, - }; - __be64 replay_ctr; - - IWL_DEBUG_WOWLAN(mvm, - "Received from FW GTK cipher %d, key index %d\n", - conf.conf.cipher, conf.conf.keyidx); + __be64 replay_ctr = cpu_to_be64(status->replay_ctr); - BUILD_BUG_ON(WLAN_KEY_LEN_CCMP != WLAN_KEY_LEN_GCMP); - BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_CCMP); - BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_GCMP_256); - BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_TKIP); - BUILD_BUG_ON(sizeof(conf.key) < sizeof(status->gtk.key)); + IWL_DEBUG_WOWLAN(mvm, "num of GTK rekeying %d\n", + status->num_of_gtk_rekeys); - memcpy(conf.conf.key, status->gtk.key, sizeof(status->gtk.key)); - - switch (gtkdata.cipher) { - case WLAN_CIPHER_SUITE_CCMP: - case WLAN_CIPHER_SUITE_GCMP: - conf.conf.keylen = WLAN_KEY_LEN_CCMP; - break; - case WLAN_CIPHER_SUITE_GCMP_256: - conf.conf.keylen = WLAN_KEY_LEN_GCMP_256; - break; - case WLAN_CIPHER_SUITE_TKIP: - conf.conf.keylen = WLAN_KEY_LEN_TKIP; - break; - } - - key = ieee80211_gtk_rekey_add(vif, &conf.conf); - if (IS_ERR(key)) + if (!iwl_mvm_gtk_rekey(status, vif, mvm, gtkdata.gtk_cipher)) return false; - iwl_mvm_set_key_rx_seq(key, status, true); - replay_ctr = cpu_to_be64(status->replay_ctr); + if (!iwl_mvm_d3_igtk_bigtk_rekey_add(status, vif, + gtkdata.igtk_cipher, + &status->igtk)) + return false; ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid, (void *)&replay_ctr, GFP_KERNEL); @@ -1944,6 +2103,110 @@ out: return true; } +static void iwl_mvm_convert_gtk_v2(struct iwl_wowlan_status_data *status, + struct iwl_wowlan_gtk_status_v2 *data) +{ + BUILD_BUG_ON(sizeof(status->gtk[0].key) < sizeof(data->key)); + BUILD_BUG_ON(NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY + + sizeof(data->tkip_mic_key) > + sizeof(status->gtk[0].key)); + + status->gtk[0].len = data->key_len; + status->gtk[0].flags = data->key_flags; + + memcpy(status->gtk[0].key, data->key, sizeof(data->key)); + + /* if it's as long as the TKIP encryption key, copy MIC key */ + if (status->gtk[0].len == NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY) + memcpy(status->gtk[0].key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY, + data->tkip_mic_key, sizeof(data->tkip_mic_key)); +} + +static void iwl_mvm_convert_gtk_v3(struct iwl_wowlan_status_data *status, + struct iwl_wowlan_gtk_status_v3 *data) +{ + int data_idx, status_idx = 0; + + BUILD_BUG_ON(sizeof(status->gtk[0].key) < sizeof(data[0].key)); + BUILD_BUG_ON(NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY + + sizeof(data[0].tkip_mic_key) > + sizeof(status->gtk[0].key)); + BUILD_BUG_ON(ARRAY_SIZE(status->gtk) < WOWLAN_GTK_KEYS_NUM); + for (data_idx = 0; data_idx < ARRAY_SIZE(status->gtk); data_idx++) { + if (!(data[data_idx].key_len)) + continue; + status->gtk[status_idx].len = data[data_idx].key_len; + status->gtk[status_idx].flags = data[data_idx].key_flags; + status->gtk[status_idx].id = status->gtk[status_idx].flags & + IWL_WOWLAN_GTK_IDX_MASK; + + memcpy(status->gtk[status_idx].key, data[data_idx].key, + sizeof(data[data_idx].key)); + + /* if it's as long as the TKIP encryption key, copy MIC key */ + if (status->gtk[status_idx].len == + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY) + memcpy(status->gtk[status_idx].key + + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY, + data[data_idx].tkip_mic_key, + sizeof(data[data_idx].tkip_mic_key)); + status_idx++; + } +} + +static void iwl_mvm_convert_igtk(struct iwl_wowlan_status_data *status, + struct iwl_wowlan_igtk_status *data) +{ + BUILD_BUG_ON(sizeof(status->igtk.key) < sizeof(data->key)); + + if (!data->key_len) + return; + + status->igtk.len = data->key_len; + status->igtk.flags = data->key_flags; + status->igtk.id = u32_get_bits(data->key_flags, + IWL_WOWLAN_IGTK_BIGTK_IDX_MASK) + + WOWLAN_IGTK_MIN_INDEX; + + memcpy(status->igtk.key, data->key, sizeof(data->key)); + memcpy(status->igtk.ipn, data->ipn, sizeof(data->ipn)); +} + +static void iwl_mvm_parse_wowlan_info_notif(struct iwl_mvm *mvm, + struct iwl_wowlan_info_notif *data, + struct iwl_wowlan_status_data *status, + u32 len) +{ + u32 i; + + if (!data) { + IWL_ERR(mvm, "iwl_wowlan_info_notif data is NULL\n"); + status = NULL; + return; + } + + if (len < sizeof(*data)) { + IWL_ERR(mvm, "Invalid WoWLAN info notification!\n"); + status = NULL; + return; + } + + iwl_mvm_convert_key_counters_v5(status, &data->gtk[0].sc); + iwl_mvm_convert_gtk_v3(status, data->gtk); + iwl_mvm_convert_igtk(status, &data->igtk[0]); + + status->replay_ctr = le64_to_cpu(data->replay_ctr); + status->pattern_number = le16_to_cpu(data->pattern_number); + for (i = 0; i < IWL_MAX_TID_COUNT; i++) + status->qos_seq_ctr[i] = + le16_to_cpu(data->qos_seq_ctr[i]); + status->wakeup_reasons = le32_to_cpu(data->wakeup_reasons); + status->num_of_gtk_rekeys = + le32_to_cpu(data->num_of_gtk_rekeys); + status->received_beacons = le32_to_cpu(data->received_beacons); + status->tid_tear_down = data->tid_tear_down; +} + /* Occasionally, templates would be nice. This is one of those times ... */ #define iwl_mvm_parse_wowlan_status_common(_ver) \ static struct iwl_wowlan_status_data * \ @@ -1956,18 +2219,18 @@ iwl_mvm_parse_wowlan_status_common_ ## _ver(struct iwl_mvm *mvm, \ \ if (len < sizeof(*data)) { \ IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); \ - return ERR_PTR(-EIO); \ + return NULL; \ } \ \ data_size = ALIGN(le32_to_cpu(data->wake_packet_bufsize), 4); \ if (len != sizeof(*data) + data_size) { \ IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); \ - return ERR_PTR(-EIO); \ + return NULL; \ } \ \ - status = kzalloc(sizeof(*status) + data_size, GFP_KERNEL); \ + status = kzalloc(sizeof(*status), GFP_KERNEL); \ if (!status) \ - return ERR_PTR(-ENOMEM); \ + return NULL; \ \ /* copy all the common fields */ \ status->replay_ctr = le64_to_cpu(data->replay_ctr); \ @@ -1984,8 +2247,18 @@ iwl_mvm_parse_wowlan_status_common_ ## _ver(struct iwl_mvm *mvm, \ le32_to_cpu(data->wake_packet_length); \ status->wake_packet_bufsize = \ le32_to_cpu(data->wake_packet_bufsize); \ - memcpy(status->wake_packet, data->wake_packet, \ - status->wake_packet_bufsize); \ + if (status->wake_packet_bufsize) { \ + status->wake_packet = \ + kmemdup(data->wake_packet, \ + status->wake_packet_bufsize, \ + GFP_KERNEL); \ + if (!status->wake_packet) { \ + kfree(status); \ + return NULL; \ + } \ + } else { \ + status->wake_packet = NULL; \ + } \ \ return status; \ } @@ -1995,65 +2268,6 @@ iwl_mvm_parse_wowlan_status_common(v7) iwl_mvm_parse_wowlan_status_common(v9) iwl_mvm_parse_wowlan_status_common(v12) -static void iwl_mvm_convert_gtk_v2(struct iwl_wowlan_status_data *status, - struct iwl_wowlan_gtk_status_v2 *data) -{ - BUILD_BUG_ON(sizeof(status->gtk.key) < sizeof(data->key)); - BUILD_BUG_ON(NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY + - sizeof(data->tkip_mic_key) > - sizeof(status->gtk.key)); - - status->gtk.len = data->key_len; - status->gtk.flags = data->key_flags; - - memcpy(status->gtk.key, data->key, sizeof(data->key)); - - /* if it's as long as the TKIP encryption key, copy MIC key */ - if (status->gtk.len == NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY) - memcpy(status->gtk.key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY, - data->tkip_mic_key, sizeof(data->tkip_mic_key)); -} - -static void iwl_mvm_convert_gtk_v3(struct iwl_wowlan_status_data *status, - struct iwl_wowlan_gtk_status_v3 *data) -{ - /* The parts we need are identical in v2 and v3 */ -#define CHECK(_f) do { \ - BUILD_BUG_ON(offsetof(struct iwl_wowlan_gtk_status_v2, _f) != \ - offsetof(struct iwl_wowlan_gtk_status_v3, _f)); \ - BUILD_BUG_ON(offsetofend(struct iwl_wowlan_gtk_status_v2, _f) !=\ - offsetofend(struct iwl_wowlan_gtk_status_v3, _f)); \ -} while (0) - - CHECK(key); - CHECK(key_len); - CHECK(key_flags); - CHECK(tkip_mic_key); -#undef CHECK - - iwl_mvm_convert_gtk_v2(status, (void *)data); -} - -static void iwl_mvm_convert_igtk(struct iwl_wowlan_status_data *status, - struct iwl_wowlan_igtk_status *data) -{ - const u8 *ipn = data->ipn; - - BUILD_BUG_ON(sizeof(status->igtk.key) < sizeof(data->key)); - - status->igtk.len = data->key_len; - status->igtk.flags = data->key_flags; - - memcpy(status->igtk.key, data->key, sizeof(data->key)); - - status->igtk.ipn = ((u64)ipn[5] << 0) | - ((u64)ipn[4] << 8) | - ((u64)ipn[3] << 16) | - ((u64)ipn[2] << 24) | - ((u64)ipn[1] << 32) | - ((u64)ipn[0] << 40); -} - static struct iwl_wowlan_status_data * iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id) { @@ -2086,49 +2300,44 @@ iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id) len = iwl_rx_packet_payload_len(cmd.resp_pkt); /* default to 7 (when we have IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL) */ - notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, - WOWLAN_GET_STATUSES, 0); - if (!notif_ver) - notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, - WOWLAN_GET_STATUSES, 7); + notif_ver = iwl_mvm_lookup_wowlan_status_ver(mvm); - if (!fw_has_api(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL)) { + if (notif_ver < 7) { struct iwl_wowlan_status_v6 *v6 = (void *)cmd.resp_pkt->data; status = iwl_mvm_parse_wowlan_status_common_v6(mvm, v6, len); - if (IS_ERR(status)) + if (!status) goto out_free_resp; BUILD_BUG_ON(sizeof(v6->gtk.decrypt_key) > - sizeof(status->gtk.key)); + sizeof(status->gtk[0].key)); BUILD_BUG_ON(NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY + sizeof(v6->gtk.tkip_mic_key) > - sizeof(status->gtk.key)); + sizeof(status->gtk[0].key)); /* copy GTK info to the right place */ - memcpy(status->gtk.key, v6->gtk.decrypt_key, + memcpy(status->gtk[0].key, v6->gtk.decrypt_key, sizeof(v6->gtk.decrypt_key)); - memcpy(status->gtk.key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY, + memcpy(status->gtk[0].key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY, v6->gtk.tkip_mic_key, sizeof(v6->gtk.tkip_mic_key)); iwl_mvm_convert_key_counters(status, &v6->gtk.rsc.all_tsc_rsc); /* hardcode the key length to 16 since v6 only supports 16 */ - status->gtk.len = 16; + status->gtk[0].len = 16; /* * The key index only uses 2 bits (values 0 to 3) and * we always set bit 7 which means this is the * currently used key. */ - status->gtk.flags = v6->gtk.key_index | BIT(7); + status->gtk[0].flags = v6->gtk.key_index | BIT(7); } else if (notif_ver == 7) { struct iwl_wowlan_status_v7 *v7 = (void *)cmd.resp_pkt->data; status = iwl_mvm_parse_wowlan_status_common_v7(mvm, v7, len); - if (IS_ERR(status)) + if (!status) goto out_free_resp; iwl_mvm_convert_key_counters(status, &v7->gtk[0].rsc.all_tsc_rsc); @@ -2141,7 +2350,7 @@ iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id) * difference is only in a few not used (reserved) fields. */ status = iwl_mvm_parse_wowlan_status_common_v9(mvm, v9, len); - if (IS_ERR(status)) + if (!status) goto out_free_resp; iwl_mvm_convert_key_counters(status, &v9->gtk[0].rsc.all_tsc_rsc); @@ -2153,11 +2362,11 @@ iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id) struct iwl_wowlan_status_v12 *v12 = (void *)cmd.resp_pkt->data; status = iwl_mvm_parse_wowlan_status_common_v12(mvm, v12, len); - if (IS_ERR(status)) + if (!status) goto out_free_resp; iwl_mvm_convert_key_counters_v5(status, &v12->gtk[0].sc); - iwl_mvm_convert_gtk_v3(status, &v12->gtk[0]); + iwl_mvm_convert_gtk_v3(status, v12->gtk); iwl_mvm_convert_igtk(status, &v12->igtk[0]); status->tid_tear_down = v12->tid_tear_down; @@ -2165,7 +2374,7 @@ iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id) IWL_ERR(mvm, "Firmware advertises unknown WoWLAN status response %d!\n", notif_ver); - status = ERR_PTR(-EIO); + status = NULL; } out_free_resp: @@ -2173,37 +2382,16 @@ out_free_resp: return status; } -static struct iwl_wowlan_status_data * -iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, u8 sta_id) -{ - u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, OFFLOADS_QUERY_CMD, - IWL_FW_CMD_VER_UNKNOWN); - __le32 station_id = cpu_to_le32(sta_id); - u32 cmd_size = cmd_ver != IWL_FW_CMD_VER_UNKNOWN ? sizeof(station_id) : 0; - - if (!mvm->net_detect) { - /* only for tracing for now */ - int ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0, - cmd_size, &station_id); - if (ret) - IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret); - } - - return iwl_mvm_send_wowlan_get_status(mvm, sta_id); -} - /* releases the MVM mutex */ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) + struct ieee80211_vif *vif, + struct iwl_wowlan_status_data *status) { - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_wowlan_status_data *status; int i; bool keep; struct iwl_mvm_sta *mvm_ap_sta; - status = iwl_mvm_get_wakeup_status(mvm, mvmvif->ap_sta_id); - if (IS_ERR(status)) + if (!status) goto out_unlock; IWL_DEBUG_WOWLAN(mvm, "wakeup reason 0x%x\n", @@ -2212,7 +2400,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, /* still at hard-coded place 0 for D3 image */ mvm_ap_sta = iwl_mvm_sta_from_staid_protected(mvm, 0); if (!mvm_ap_sta) - goto out_free; + goto out_unlock; for (i = 0; i < IWL_MAX_TID_COUNT; i++) { u16 seq = status->qos_seq_ctr[i]; @@ -2235,11 +2423,8 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, keep = iwl_mvm_setup_connection_keep(mvm, vif, status); - kfree(status); return keep; -out_free: - kfree(status); out_unlock: mutex_unlock(&mvm->mutex); return false; @@ -2248,16 +2433,16 @@ out_unlock: #define ND_QUERY_BUF_LEN (sizeof(struct iwl_scan_offload_profile_match) * \ IWL_SCAN_MAX_PROFILES) -struct iwl_mvm_nd_query_results { +struct iwl_mvm_nd_results { u32 matched_profiles; u8 matches[ND_QUERY_BUF_LEN]; }; static int iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm, - struct iwl_mvm_nd_query_results *results) + struct iwl_mvm_nd_results *results) { - struct iwl_scan_offload_profiles_query *query; + struct iwl_scan_offload_match_info *query; struct iwl_host_cmd cmd = { .id = SCAN_OFFLOAD_PROFILES_QUERY_CMD, .flags = CMD_WANT_SKB, @@ -2274,7 +2459,7 @@ iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm, if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) { - query_len = sizeof(struct iwl_scan_offload_profiles_query); + query_len = sizeof(struct iwl_scan_offload_match_info); matches_len = sizeof(struct iwl_scan_offload_profile_match) * max_profiles; } else { @@ -2305,7 +2490,7 @@ out_free_resp: } static int iwl_mvm_query_num_match_chans(struct iwl_mvm *mvm, - struct iwl_mvm_nd_query_results *query, + struct iwl_mvm_nd_results *results, int idx) { int n_chans = 0, i; @@ -2313,13 +2498,13 @@ static int iwl_mvm_query_num_match_chans(struct iwl_mvm *mvm, if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) { struct iwl_scan_offload_profile_match *matches = - (struct iwl_scan_offload_profile_match *)query->matches; + (void *)results->matches; for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; i++) n_chans += hweight8(matches[idx].matching_channels[i]); } else { struct iwl_scan_offload_profile_match_v1 *matches = - (struct iwl_scan_offload_profile_match_v1 *)query->matches; + (void *)results->matches; for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1; i++) n_chans += hweight8(matches[idx].matching_channels[i]); @@ -2329,7 +2514,7 @@ static int iwl_mvm_query_num_match_chans(struct iwl_mvm *mvm, } static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm, - struct iwl_mvm_nd_query_results *query, + struct iwl_mvm_nd_results *results, struct cfg80211_wowlan_nd_match *match, int idx) { @@ -2338,7 +2523,7 @@ static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm, if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) { struct iwl_scan_offload_profile_match *matches = - (struct iwl_scan_offload_profile_match *)query->matches; + (void *)results->matches; for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; i++) if (matches[idx].matching_channels[i / 8] & (BIT(i % 8))) @@ -2346,7 +2531,7 @@ static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm, mvm->nd_channels[i]->center_freq; } else { struct iwl_scan_offload_profile_match_v1 *matches = - (struct iwl_scan_offload_profile_match_v1 *)query->matches; + (void *)results->matches; for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1 * 8; i++) if (matches[idx].matching_channels[i / 8] & (BIT(i % 8))) @@ -2355,25 +2540,50 @@ static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm, } } +/** + * enum iwl_d3_notif - d3 notifications + * @IWL_D3_NOTIF_WOWLAN_INFO: WOWLAN_INFO_NOTIF was received + * @IWL_D3_NOTIF_WOWLAN_WAKE_PKT: WOWLAN_WAKE_PKT_NOTIF was received + * @IWL_D3_NOTIF_PROT_OFFLOAD: PROT_OFFLOAD_NOTIF was received + * @IWL_D3_ND_MATCH_INFO: OFFLOAD_MATCH_INFO_NOTIF was received + * @IWL_D3_NOTIF_D3_END_NOTIF: D3_END_NOTIF was received + */ +enum iwl_d3_notif { + IWL_D3_NOTIF_WOWLAN_INFO = BIT(0), + IWL_D3_NOTIF_WOWLAN_WAKE_PKT = BIT(1), + IWL_D3_NOTIF_PROT_OFFLOAD = BIT(2), + IWL_D3_ND_MATCH_INFO = BIT(3), + IWL_D3_NOTIF_D3_END_NOTIF = BIT(4) +}; + +/* manage d3 resume data */ +struct iwl_d3_data { + struct iwl_wowlan_status_data *status; + bool test; + u32 d3_end_flags; + u32 notif_expected; /* bitmap - see &enum iwl_d3_notif */ + u32 notif_received; /* bitmap - see &enum iwl_d3_notif */ + struct iwl_mvm_nd_results *nd_results; + bool nd_results_valid; +}; + static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) + struct ieee80211_vif *vif, + struct iwl_d3_data *d3_data) { struct cfg80211_wowlan_nd_info *net_detect = NULL; struct cfg80211_wowlan_wakeup wakeup = { .pattern_idx = -1, }; struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup; - struct iwl_wowlan_status_data *status; - struct iwl_mvm_nd_query_results query; unsigned long matched_profiles; u32 reasons = 0; int i, n_matches, ret; - status = iwl_mvm_get_wakeup_status(mvm, IWL_MVM_INVALID_STA); - if (!IS_ERR(status)) { - reasons = status->wakeup_reasons; - kfree(status); - } + if (WARN_ON(!d3_data || !d3_data->status)) + goto out; + + reasons = d3_data->status->wakeup_reasons; if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) wakeup.rfkill_release = true; @@ -2381,13 +2591,22 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm, if (reasons != IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) goto out; - ret = iwl_mvm_netdetect_query_results(mvm, &query); - if (ret || !query.matched_profiles) { + if (!iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP, + WOWLAN_INFO_NOTIFICATION, 0)) { + IWL_INFO(mvm, "Query FW for ND results\n"); + ret = iwl_mvm_netdetect_query_results(mvm, d3_data->nd_results); + + } else { + IWL_INFO(mvm, "Notification based ND results\n"); + ret = d3_data->nd_results_valid ? 0 : -1; + } + + if (ret || !d3_data->nd_results->matched_profiles) { wakeup_report = NULL; goto out; } - matched_profiles = query.matched_profiles; + matched_profiles = d3_data->nd_results->matched_profiles; if (mvm->n_nd_match_sets) { n_matches = hweight_long(matched_profiles); } else { @@ -2404,7 +2623,9 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm, struct cfg80211_wowlan_nd_match *match; int idx, n_channels = 0; - n_channels = iwl_mvm_query_num_match_chans(mvm, &query, i); + n_channels = iwl_mvm_query_num_match_chans(mvm, + d3_data->nd_results, + i); match = kzalloc(struct_size(match, channels, n_channels), GFP_KERNEL); @@ -2424,7 +2645,7 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm, if (mvm->n_nd_channels < n_channels) continue; - iwl_mvm_query_set_freqs(mvm, &query, match, i); + iwl_mvm_query_set_freqs(mvm, d3_data->nd_results, match, i); } out_report_nd: @@ -2504,16 +2725,341 @@ static bool iwl_mvm_check_rt_status(struct iwl_mvm *mvm, return false; } +/* + * This function assumes: + * 1. The mutex is already held. + * 2. The callee functions unlock the mutex. + */ +static bool +iwl_mvm_choose_query_wakeup_reasons(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_d3_data *d3_data) +{ + lockdep_assert_held(&mvm->mutex); + + /* if FW uses status notification, status shouldn't be NULL here */ + if (!d3_data->status) { + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + u8 sta_id = mvm->net_detect ? IWL_MVM_INVALID_STA : + mvmvif->deflink.ap_sta_id; + + d3_data->status = iwl_mvm_send_wowlan_get_status(mvm, sta_id); + } + + if (mvm->net_detect) { + iwl_mvm_query_netdetect_reasons(mvm, vif, d3_data); + } else { + bool keep = iwl_mvm_query_wakeup_reasons(mvm, vif, + d3_data->status); + +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (keep) + mvm->keep_vif = vif; +#endif + + return keep; + } + return false; +} + +#define IWL_WOWLAN_WAKEUP_REASON_HAS_WAKEUP_PKT (IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET | \ + IWL_WOWLAN_WAKEUP_BY_PATTERN | \ + IWL_WAKEUP_BY_PATTERN_IPV4_TCP_SYN |\ + IWL_WAKEUP_BY_PATTERN_IPV4_TCP_SYN_WILDCARD |\ + IWL_WAKEUP_BY_PATTERN_IPV6_TCP_SYN |\ + IWL_WAKEUP_BY_PATTERN_IPV6_TCP_SYN_WILDCARD) + +static int iwl_mvm_wowlan_store_wake_pkt(struct iwl_mvm *mvm, + struct iwl_wowlan_wake_pkt_notif *notif, + struct iwl_wowlan_status_data *status, + u32 len) +{ + u32 data_size, packet_len = le32_to_cpu(notif->wake_packet_length); + + if (len < sizeof(*notif)) { + IWL_ERR(mvm, "Invalid WoWLAN wake packet notification!\n"); + return -EIO; + } + + if (WARN_ON(!status)) { + IWL_ERR(mvm, "Got wake packet notification but wowlan status data is NULL\n"); + return -EIO; + } + + if (WARN_ON(!(status->wakeup_reasons & + IWL_WOWLAN_WAKEUP_REASON_HAS_WAKEUP_PKT))) { + IWL_ERR(mvm, "Got wakeup packet but wakeup reason is %x\n", + status->wakeup_reasons); + return -EIO; + } + + data_size = len - offsetof(struct iwl_wowlan_wake_pkt_notif, wake_packet); + + /* data_size got the padding from the notification, remove it. */ + if (packet_len < data_size) + data_size = packet_len; + + status->wake_packet = kmemdup(notif->wake_packet, data_size, + GFP_ATOMIC); + + if (!status->wake_packet) + return -ENOMEM; + + status->wake_packet_length = packet_len; + status->wake_packet_bufsize = data_size; + + return 0; +} + +static void iwl_mvm_nd_match_info_handler(struct iwl_mvm *mvm, + struct iwl_d3_data *d3_data, + struct iwl_scan_offload_match_info *notif, + u32 len) +{ + struct iwl_wowlan_status_data *status = d3_data->status; + struct ieee80211_vif *vif = iwl_mvm_get_bss_vif(mvm); + struct iwl_mvm_nd_results *results = d3_data->nd_results; + size_t i, matches_len = sizeof(struct iwl_scan_offload_profile_match) * + iwl_umac_scan_get_max_profiles(mvm->fw); + + if (IS_ERR_OR_NULL(vif)) + return; + + if (len < sizeof(struct iwl_scan_offload_match_info)) { + IWL_ERR(mvm, "Invalid scan match info notification\n"); + return; + } + + if (!mvm->net_detect) { + IWL_ERR(mvm, "Unexpected scan match info notification\n"); + return; + } + + if (!status || status->wakeup_reasons != IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) { + IWL_ERR(mvm, + "Ignore scan match info notification: no reason\n"); + return; + } + +#ifdef CONFIG_IWLWIFI_DEBUGFS + mvm->last_netdetect_scans = le32_to_cpu(notif->n_scans_done); +#endif + + results->matched_profiles = le32_to_cpu(notif->matched_profiles); + IWL_INFO(mvm, "number of matched profiles=%u\n", + results->matched_profiles); + + if (results->matched_profiles) { + memcpy(results->matches, notif->matches, matches_len); + d3_data->nd_results_valid = TRUE; + } + + /* no scan should be active at this point */ + mvm->scan_status = 0; + for (i = 0; i < mvm->max_scans; i++) + mvm->scan_uid_status[i] = 0; +} + +static bool iwl_mvm_wait_d3_notif(struct iwl_notif_wait_data *notif_wait, + struct iwl_rx_packet *pkt, void *data) +{ + struct iwl_mvm *mvm = + container_of(notif_wait, struct iwl_mvm, notif_wait); + struct iwl_d3_data *d3_data = data; + u32 len; + int ret; + int wowlan_info_ver = iwl_fw_lookup_notif_ver(mvm->fw, + PROT_OFFLOAD_GROUP, + WOWLAN_INFO_NOTIFICATION, + IWL_FW_CMD_VER_UNKNOWN); + + + switch (WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) { + case WIDE_ID(PROT_OFFLOAD_GROUP, WOWLAN_INFO_NOTIFICATION): { + struct iwl_wowlan_info_notif *notif; + + if (d3_data->notif_received & IWL_D3_NOTIF_WOWLAN_INFO) { + /* We might get two notifications due to dual bss */ + IWL_DEBUG_WOWLAN(mvm, + "Got additional wowlan info notification\n"); + break; + } + + if (wowlan_info_ver < 2) { + struct iwl_wowlan_info_notif_v1 *notif_v1 = (void *)pkt->data; + + notif = kmemdup(notif_v1, sizeof(*notif), GFP_ATOMIC); + if (!notif) + return false; + + notif->tid_tear_down = notif_v1->tid_tear_down; + notif->station_id = notif_v1->station_id; + memset_after(notif, 0, station_id); + } else { + notif = (void *)pkt->data; + } + + d3_data->notif_received |= IWL_D3_NOTIF_WOWLAN_INFO; + len = iwl_rx_packet_payload_len(pkt); + iwl_mvm_parse_wowlan_info_notif(mvm, notif, d3_data->status, + len); + + if (wowlan_info_ver < 2) + kfree(notif); + + if (d3_data->status && + d3_data->status->wakeup_reasons & IWL_WOWLAN_WAKEUP_REASON_HAS_WAKEUP_PKT) + /* We are supposed to get also wake packet notif */ + d3_data->notif_expected |= IWL_D3_NOTIF_WOWLAN_WAKE_PKT; + + break; + } + case WIDE_ID(PROT_OFFLOAD_GROUP, WOWLAN_WAKE_PKT_NOTIFICATION): { + struct iwl_wowlan_wake_pkt_notif *notif = (void *)pkt->data; + + if (d3_data->notif_received & IWL_D3_NOTIF_WOWLAN_WAKE_PKT) { + /* We shouldn't get two wake packet notifications */ + IWL_ERR(mvm, + "Got additional wowlan wake packet notification\n"); + } else { + d3_data->notif_received |= IWL_D3_NOTIF_WOWLAN_WAKE_PKT; + len = iwl_rx_packet_payload_len(pkt); + ret = iwl_mvm_wowlan_store_wake_pkt(mvm, notif, + d3_data->status, + len); + if (ret) + IWL_ERR(mvm, + "Can't parse WOWLAN_WAKE_PKT_NOTIFICATION\n"); + } + + break; + } + case WIDE_ID(SCAN_GROUP, OFFLOAD_MATCH_INFO_NOTIF): { + struct iwl_scan_offload_match_info *notif = (void *)pkt->data; + + if (d3_data->notif_received & IWL_D3_ND_MATCH_INFO) { + IWL_ERR(mvm, + "Got additional netdetect match info\n"); + break; + } + + d3_data->notif_received |= IWL_D3_ND_MATCH_INFO; + + /* explicitly set this in the 'expected' as well */ + d3_data->notif_expected |= IWL_D3_ND_MATCH_INFO; + + len = iwl_rx_packet_payload_len(pkt); + iwl_mvm_nd_match_info_handler(mvm, d3_data, notif, len); + break; + } + case WIDE_ID(PROT_OFFLOAD_GROUP, D3_END_NOTIFICATION): { + struct iwl_mvm_d3_end_notif *notif = (void *)pkt->data; + + d3_data->d3_end_flags = __le32_to_cpu(notif->flags); + d3_data->notif_received |= IWL_D3_NOTIF_D3_END_NOTIF; + + break; + } + default: + WARN_ON(1); + } + + return d3_data->notif_received == d3_data->notif_expected; +} + +static int iwl_mvm_resume_firmware(struct iwl_mvm *mvm, bool test) +{ + int ret; + enum iwl_d3_status d3_status; + struct iwl_host_cmd cmd = { + .id = D0I3_END_CMD, + .flags = CMD_WANT_SKB | CMD_SEND_IN_D3, + }; + bool reset = fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); + + ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test, !reset); + if (ret) + return ret; + + if (d3_status != IWL_D3_STATUS_ALIVE) { + IWL_INFO(mvm, "Device was reset during suspend\n"); + return -ENOENT; + } + + /* + * We should trigger resume flow using command only for 22000 family + * AX210 and above don't need the command since they have + * the doorbell interrupt. + */ + if (mvm->trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_22000 && + fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_D0I3_END_FIRST)) { + ret = iwl_mvm_send_cmd(mvm, &cmd); + if (ret < 0) + IWL_ERR(mvm, "Failed to send D0I3_END_CMD first (%d)\n", + ret); + } + + return ret; +} + +#define IWL_MVM_D3_NOTIF_TIMEOUT (HZ / 5) + +static int iwl_mvm_d3_notif_wait(struct iwl_mvm *mvm, + struct iwl_d3_data *d3_data) +{ + static const u16 d3_resume_notif[] = { + WIDE_ID(PROT_OFFLOAD_GROUP, WOWLAN_INFO_NOTIFICATION), + WIDE_ID(PROT_OFFLOAD_GROUP, WOWLAN_WAKE_PKT_NOTIFICATION), + WIDE_ID(SCAN_GROUP, OFFLOAD_MATCH_INFO_NOTIF), + WIDE_ID(PROT_OFFLOAD_GROUP, D3_END_NOTIFICATION) + }; + struct iwl_notification_wait wait_d3_notif; + int ret; + + iwl_init_notification_wait(&mvm->notif_wait, &wait_d3_notif, + d3_resume_notif, ARRAY_SIZE(d3_resume_notif), + iwl_mvm_wait_d3_notif, d3_data); + + ret = iwl_mvm_resume_firmware(mvm, d3_data->test); + if (ret) { + iwl_remove_notification(&mvm->notif_wait, &wait_d3_notif); + return ret; + } + + return iwl_wait_notification(&mvm->notif_wait, &wait_d3_notif, + IWL_MVM_D3_NOTIF_TIMEOUT); +} + +static inline bool iwl_mvm_d3_resume_notif_based(struct iwl_mvm *mvm) +{ + return iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP, + WOWLAN_INFO_NOTIFICATION, 0) && + iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP, + WOWLAN_WAKE_PKT_NOTIFICATION, 0) && + iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP, + D3_END_NOTIFICATION, 0); +} + static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) { struct ieee80211_vif *vif = NULL; int ret = 1; - enum iwl_d3_status d3_status; - bool keep = false; + struct iwl_mvm_nd_results results = {}; + struct iwl_d3_data d3_data = { + .test = test, + .notif_expected = + IWL_D3_NOTIF_WOWLAN_INFO | + IWL_D3_NOTIF_D3_END_NOTIF, + .nd_results_valid = false, + .nd_results = &results, + }; bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); bool d0i3_first = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_D0I3_END_FIRST); + bool resume_notif_based = iwl_mvm_d3_resume_notif_based(mvm); + bool keep = false; mutex_lock(&mvm->mutex); @@ -2537,54 +3083,30 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) goto err; } - ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test, !unified_image); - if (ret) - goto err; - - if (d3_status != IWL_D3_STATUS_ALIVE) { - IWL_INFO(mvm, "Device was reset during suspend\n"); - goto err; - } - - if (d0i3_first) { - struct iwl_host_cmd cmd = { - .id = D0I3_END_CMD, - .flags = CMD_WANT_SKB | CMD_SEND_IN_D3, - }; - int len; - - ret = iwl_mvm_send_cmd(mvm, &cmd); - if (ret < 0) { - IWL_ERR(mvm, "Failed to send D0I3_END_CMD first (%d)\n", - ret); + if (resume_notif_based) { + d3_data.status = kzalloc(sizeof(*d3_data.status), GFP_KERNEL); + if (!d3_data.status) { + IWL_ERR(mvm, "Failed to allocate wowlan status\n"); + ret = -ENOMEM; goto err; } - switch (mvm->cmd_ver.d0i3_resp) { - case 0: - break; - case 1: - len = iwl_rx_packet_payload_len(cmd.resp_pkt); - if (len != sizeof(u32)) { - IWL_ERR(mvm, - "Error with D0I3_END_CMD response size (%d)\n", - len); - goto err; - } - if (IWL_D0I3_RESET_REQUIRE & - le32_to_cpu(*(__le32 *)cmd.resp_pkt->data)) { - iwl_write32(mvm->trans, CSR_RESET, - CSR_RESET_REG_FLAG_FORCE_NMI); - iwl_free_resp(&cmd); - } - break; - default: - WARN_ON(1); - } + + ret = iwl_mvm_d3_notif_wait(mvm, &d3_data); + if (ret) + goto err; + } else { + ret = iwl_mvm_resume_firmware(mvm, test); + if (ret < 0) + goto err; } /* after the successful handshake, we're out of D3 */ mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; + /* when reset is required we can't send these following commands */ + if (d3_data.d3_end_flags & IWL_D0I3_RESET_REQUIRE) + goto query_wakeup_reasons; + /* * Query the current location and source from the D3 firmware so we * can play it back when we re-intiailize the D0 firmware @@ -2598,41 +3120,36 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) /* Re-configure default SAR profile */ iwl_mvm_sar_select_profile(mvm, 1, 1); - if (mvm->net_detect) { + if (mvm->net_detect && unified_image) { /* If this is a non-unified image, we restart the FW, * so no need to stop the netdetect scan. If that * fails, continue and try to get the wake-up reasons, * but trigger a HW restart by keeping a failure code * in ret. */ - if (unified_image) - ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_NETDETECT, - false); - - iwl_mvm_query_netdetect_reasons(mvm, vif); - /* has unlocked the mutex, so skip that */ - goto out; - } else { - keep = iwl_mvm_query_wakeup_reasons(mvm, vif); -#ifdef CONFIG_IWLWIFI_DEBUGFS - if (keep) - mvm->keep_vif = vif; -#endif - /* has unlocked the mutex, so skip that */ - goto out_iterate; + ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_NETDETECT, + false); } +query_wakeup_reasons: + keep = iwl_mvm_choose_query_wakeup_reasons(mvm, vif, &d3_data); + /* has unlocked the mutex, so skip that */ + goto out; + err: - iwl_mvm_free_nd(mvm); mutex_unlock(&mvm->mutex); +out: + if (d3_data.status) + kfree(d3_data.status->wake_packet); + kfree(d3_data.status); + iwl_mvm_free_nd(mvm); -out_iterate: - if (!test) + if (!d3_data.test && !mvm->net_detect) ieee80211_iterate_active_interfaces_mtx(mvm->hw, - IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_d3_disconnect_iter, keep ? vif : NULL); + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_d3_disconnect_iter, + keep ? vif : NULL); -out: clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status); /* no need to reset the device in unified images, if successful */ @@ -2641,9 +3158,14 @@ out: if (d0i3_first) return 0; - ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL); - if (!ret) + if (!iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP, + D3_END_NOTIFICATION, 0)) { + ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL); + if (!ret) + return 0; + } else if (!(d3_data.d3_end_flags & IWL_D0I3_RESET_REQUIRE)) { return 0; + } } /* diff --git a/mvm/debugfs-vif.c b/mvm/debugfs-vif.c new file mode 100644 index 000000000000..cb4ecad6103f --- /dev/null +++ b/mvm/debugfs-vif.c @@ -0,0 +1,765 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ +#include "mvm.h" +#include "debugfs.h" + +static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + enum iwl_dbgfs_pm_mask param, int val) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_dbgfs_pm *dbgfs_pm = &mvmvif->dbgfs_pm; + + dbgfs_pm->mask |= param; + + switch (param) { + case MVM_DEBUGFS_PM_KEEP_ALIVE: { + int dtimper = vif->bss_conf.dtim_period ?: 1; + int dtimper_msec = dtimper * vif->bss_conf.beacon_int; + + IWL_DEBUG_POWER(mvm, "debugfs: set keep_alive= %d sec\n", val); + if (val * MSEC_PER_SEC < 3 * dtimper_msec) + IWL_WARN(mvm, + "debugfs: keep alive period (%ld msec) is less than minimum required (%d msec)\n", + val * MSEC_PER_SEC, 3 * dtimper_msec); + dbgfs_pm->keep_alive_seconds = val; + break; + } + case MVM_DEBUGFS_PM_SKIP_OVER_DTIM: + IWL_DEBUG_POWER(mvm, "skip_over_dtim %s\n", + val ? "enabled" : "disabled"); + dbgfs_pm->skip_over_dtim = val; + break; + case MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS: + IWL_DEBUG_POWER(mvm, "skip_dtim_periods=%d\n", val); + dbgfs_pm->skip_dtim_periods = val; + break; + case MVM_DEBUGFS_PM_RX_DATA_TIMEOUT: + IWL_DEBUG_POWER(mvm, "rx_data_timeout=%d\n", val); + dbgfs_pm->rx_data_timeout = val; + break; + case MVM_DEBUGFS_PM_TX_DATA_TIMEOUT: + IWL_DEBUG_POWER(mvm, "tx_data_timeout=%d\n", val); + dbgfs_pm->tx_data_timeout = val; + break; + case MVM_DEBUGFS_PM_LPRX_ENA: + IWL_DEBUG_POWER(mvm, "lprx %s\n", val ? "enabled" : "disabled"); + dbgfs_pm->lprx_ena = val; + break; + case MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD: + IWL_DEBUG_POWER(mvm, "lprx_rssi_threshold=%d\n", val); + dbgfs_pm->lprx_rssi_threshold = val; + break; + case MVM_DEBUGFS_PM_SNOOZE_ENABLE: + IWL_DEBUG_POWER(mvm, "snooze_enable=%d\n", val); + dbgfs_pm->snooze_ena = val; + break; + case MVM_DEBUGFS_PM_UAPSD_MISBEHAVING: + IWL_DEBUG_POWER(mvm, "uapsd_misbehaving_enable=%d\n", val); + dbgfs_pm->uapsd_misbehaving = val; + break; + case MVM_DEBUGFS_PM_USE_PS_POLL: + IWL_DEBUG_POWER(mvm, "use_ps_poll=%d\n", val); + dbgfs_pm->use_ps_poll = val; + break; + } +} + +static ssize_t iwl_dbgfs_pm_params_write(struct ieee80211_vif *vif, char *buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + enum iwl_dbgfs_pm_mask param; + int val, ret; + + if (!strncmp("keep_alive=", buf, 11)) { + if (sscanf(buf + 11, "%d", &val) != 1) + return -EINVAL; + param = MVM_DEBUGFS_PM_KEEP_ALIVE; + } else if (!strncmp("skip_over_dtim=", buf, 15)) { + if (sscanf(buf + 15, "%d", &val) != 1) + return -EINVAL; + param = MVM_DEBUGFS_PM_SKIP_OVER_DTIM; + } else if (!strncmp("skip_dtim_periods=", buf, 18)) { + if (sscanf(buf + 18, "%d", &val) != 1) + return -EINVAL; + param = MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS; + } else if (!strncmp("rx_data_timeout=", buf, 16)) { + if (sscanf(buf + 16, "%d", &val) != 1) + return -EINVAL; + param = MVM_DEBUGFS_PM_RX_DATA_TIMEOUT; + } else if (!strncmp("tx_data_timeout=", buf, 16)) { + if (sscanf(buf + 16, "%d", &val) != 1) + return -EINVAL; + param = MVM_DEBUGFS_PM_TX_DATA_TIMEOUT; + } else if (!strncmp("lprx=", buf, 5)) { + if (sscanf(buf + 5, "%d", &val) != 1) + return -EINVAL; + param = MVM_DEBUGFS_PM_LPRX_ENA; + } else if (!strncmp("lprx_rssi_threshold=", buf, 20)) { + if (sscanf(buf + 20, "%d", &val) != 1) + return -EINVAL; + if (val > POWER_LPRX_RSSI_THRESHOLD_MAX || val < + POWER_LPRX_RSSI_THRESHOLD_MIN) + return -EINVAL; + param = MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD; + } else if (!strncmp("snooze_enable=", buf, 14)) { + if (sscanf(buf + 14, "%d", &val) != 1) + return -EINVAL; + param = MVM_DEBUGFS_PM_SNOOZE_ENABLE; + } else if (!strncmp("uapsd_misbehaving=", buf, 18)) { + if (sscanf(buf + 18, "%d", &val) != 1) + return -EINVAL; + param = MVM_DEBUGFS_PM_UAPSD_MISBEHAVING; + } else if (!strncmp("use_ps_poll=", buf, 12)) { + if (sscanf(buf + 12, "%d", &val) != 1) + return -EINVAL; + param = MVM_DEBUGFS_PM_USE_PS_POLL; + } else { + return -EINVAL; + } + + mutex_lock(&mvm->mutex); + iwl_dbgfs_update_pm(mvm, vif, param, val); + ret = iwl_mvm_power_update_mac(mvm); + mutex_unlock(&mvm->mutex); + + return ret ?: count; +} + +static ssize_t iwl_dbgfs_tx_pwr_lmt_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + char buf[64]; + int bufsz = sizeof(buf); + int pos; + + pos = scnprintf(buf, bufsz, "bss limit = %d\n", + vif->bss_conf.txpower); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_pm_params_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + char buf[512]; + int bufsz = sizeof(buf); + int pos; + + pos = iwl_mvm_power_mac_dbgfs_read(mvm, vif, buf, bufsz); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_mac_params_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + u8 ap_sta_id; + struct ieee80211_chanctx_conf *chanctx_conf; + char buf[512]; + int bufsz = sizeof(buf); + int pos = 0; + int i; + + mutex_lock(&mvm->mutex); + + ap_sta_id = mvmvif->deflink.ap_sta_id; + + switch (ieee80211_vif_type_p2p(vif)) { + case NL80211_IFTYPE_ADHOC: + pos += scnprintf(buf+pos, bufsz-pos, "type: ibss\n"); + break; + case NL80211_IFTYPE_STATION: + pos += scnprintf(buf+pos, bufsz-pos, "type: bss\n"); + break; + case NL80211_IFTYPE_AP: + pos += scnprintf(buf+pos, bufsz-pos, "type: ap\n"); + break; + case NL80211_IFTYPE_P2P_CLIENT: + pos += scnprintf(buf+pos, bufsz-pos, "type: p2p client\n"); + break; + case NL80211_IFTYPE_P2P_GO: + pos += scnprintf(buf+pos, bufsz-pos, "type: p2p go\n"); + break; + case NL80211_IFTYPE_P2P_DEVICE: + pos += scnprintf(buf+pos, bufsz-pos, "type: p2p dev\n"); + break; + default: + break; + } + + pos += scnprintf(buf+pos, bufsz-pos, "mac id/color: %d / %d\n", + mvmvif->id, mvmvif->color); + pos += scnprintf(buf+pos, bufsz-pos, "bssid: %pM\n", + vif->bss_conf.bssid); + pos += scnprintf(buf+pos, bufsz-pos, "Load: %d\n", + mvm->tcm.result.load[mvmvif->id]); + pos += scnprintf(buf+pos, bufsz-pos, "QoS:\n"); + for (i = 0; i < ARRAY_SIZE(mvmvif->deflink.queue_params); i++) + pos += scnprintf(buf+pos, bufsz-pos, + "\t%d: txop:%d - cw_min:%d - cw_max = %d - aifs = %d upasd = %d\n", + i, mvmvif->deflink.queue_params[i].txop, + mvmvif->deflink.queue_params[i].cw_min, + mvmvif->deflink.queue_params[i].cw_max, + mvmvif->deflink.queue_params[i].aifs, + mvmvif->deflink.queue_params[i].uapsd); + + if (vif->type == NL80211_IFTYPE_STATION && + ap_sta_id != IWL_MVM_INVALID_STA) { + struct iwl_mvm_sta *mvm_sta; + + mvm_sta = iwl_mvm_sta_from_staid_protected(mvm, ap_sta_id); + if (mvm_sta) { + pos += scnprintf(buf+pos, bufsz-pos, + "ap_sta_id %d - reduced Tx power %d\n", + ap_sta_id, + mvm_sta->bt_reduced_txpower); + } + } + + rcu_read_lock(); + chanctx_conf = rcu_dereference(vif->bss_conf.chanctx_conf); + if (chanctx_conf) + pos += scnprintf(buf+pos, bufsz-pos, + "idle rx chains %d, active rx chains: %d\n", + chanctx_conf->rx_chains_static, + chanctx_conf->rx_chains_dynamic); + rcu_read_unlock(); + + mutex_unlock(&mvm->mutex); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif, + enum iwl_dbgfs_bf_mask param, int value) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_dbgfs_bf *dbgfs_bf = &mvmvif->dbgfs_bf; + + dbgfs_bf->mask |= param; + + switch (param) { + case MVM_DEBUGFS_BF_ENERGY_DELTA: + dbgfs_bf->bf_energy_delta = value; + break; + case MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA: + dbgfs_bf->bf_roaming_energy_delta = value; + break; + case MVM_DEBUGFS_BF_ROAMING_STATE: + dbgfs_bf->bf_roaming_state = value; + break; + case MVM_DEBUGFS_BF_TEMP_THRESHOLD: + dbgfs_bf->bf_temp_threshold = value; + break; + case MVM_DEBUGFS_BF_TEMP_FAST_FILTER: + dbgfs_bf->bf_temp_fast_filter = value; + break; + case MVM_DEBUGFS_BF_TEMP_SLOW_FILTER: + dbgfs_bf->bf_temp_slow_filter = value; + break; + case MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER: + dbgfs_bf->bf_enable_beacon_filter = value; + break; + case MVM_DEBUGFS_BF_DEBUG_FLAG: + dbgfs_bf->bf_debug_flag = value; + break; + case MVM_DEBUGFS_BF_ESCAPE_TIMER: + dbgfs_bf->bf_escape_timer = value; + break; + case MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT: + dbgfs_bf->ba_enable_beacon_abort = value; + break; + case MVM_DEBUGFS_BA_ESCAPE_TIMER: + dbgfs_bf->ba_escape_timer = value; + break; + } +} + +static ssize_t iwl_dbgfs_bf_params_write(struct ieee80211_vif *vif, char *buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + enum iwl_dbgfs_bf_mask param; + int value, ret = 0; + + if (!strncmp("bf_energy_delta=", buf, 16)) { + if (sscanf(buf+16, "%d", &value) != 1) + return -EINVAL; + if (value < IWL_BF_ENERGY_DELTA_MIN || + value > IWL_BF_ENERGY_DELTA_MAX) + return -EINVAL; + param = MVM_DEBUGFS_BF_ENERGY_DELTA; + } else if (!strncmp("bf_roaming_energy_delta=", buf, 24)) { + if (sscanf(buf+24, "%d", &value) != 1) + return -EINVAL; + if (value < IWL_BF_ROAMING_ENERGY_DELTA_MIN || + value > IWL_BF_ROAMING_ENERGY_DELTA_MAX) + return -EINVAL; + param = MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA; + } else if (!strncmp("bf_roaming_state=", buf, 17)) { + if (sscanf(buf+17, "%d", &value) != 1) + return -EINVAL; + if (value < IWL_BF_ROAMING_STATE_MIN || + value > IWL_BF_ROAMING_STATE_MAX) + return -EINVAL; + param = MVM_DEBUGFS_BF_ROAMING_STATE; + } else if (!strncmp("bf_temp_threshold=", buf, 18)) { + if (sscanf(buf+18, "%d", &value) != 1) + return -EINVAL; + if (value < IWL_BF_TEMP_THRESHOLD_MIN || + value > IWL_BF_TEMP_THRESHOLD_MAX) + return -EINVAL; + param = MVM_DEBUGFS_BF_TEMP_THRESHOLD; + } else if (!strncmp("bf_temp_fast_filter=", buf, 20)) { + if (sscanf(buf+20, "%d", &value) != 1) + return -EINVAL; + if (value < IWL_BF_TEMP_FAST_FILTER_MIN || + value > IWL_BF_TEMP_FAST_FILTER_MAX) + return -EINVAL; + param = MVM_DEBUGFS_BF_TEMP_FAST_FILTER; + } else if (!strncmp("bf_temp_slow_filter=", buf, 20)) { + if (sscanf(buf+20, "%d", &value) != 1) + return -EINVAL; + if (value < IWL_BF_TEMP_SLOW_FILTER_MIN || + value > IWL_BF_TEMP_SLOW_FILTER_MAX) + return -EINVAL; + param = MVM_DEBUGFS_BF_TEMP_SLOW_FILTER; + } else if (!strncmp("bf_enable_beacon_filter=", buf, 24)) { + if (sscanf(buf+24, "%d", &value) != 1) + return -EINVAL; + if (value < 0 || value > 1) + return -EINVAL; + param = MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER; + } else if (!strncmp("bf_debug_flag=", buf, 14)) { + if (sscanf(buf+14, "%d", &value) != 1) + return -EINVAL; + if (value < 0 || value > 1) + return -EINVAL; + param = MVM_DEBUGFS_BF_DEBUG_FLAG; + } else if (!strncmp("bf_escape_timer=", buf, 16)) { + if (sscanf(buf+16, "%d", &value) != 1) + return -EINVAL; + if (value < IWL_BF_ESCAPE_TIMER_MIN || + value > IWL_BF_ESCAPE_TIMER_MAX) + return -EINVAL; + param = MVM_DEBUGFS_BF_ESCAPE_TIMER; + } else if (!strncmp("ba_escape_timer=", buf, 16)) { + if (sscanf(buf+16, "%d", &value) != 1) + return -EINVAL; + if (value < IWL_BA_ESCAPE_TIMER_MIN || + value > IWL_BA_ESCAPE_TIMER_MAX) + return -EINVAL; + param = MVM_DEBUGFS_BA_ESCAPE_TIMER; + } else if (!strncmp("ba_enable_beacon_abort=", buf, 23)) { + if (sscanf(buf+23, "%d", &value) != 1) + return -EINVAL; + if (value < 0 || value > 1) + return -EINVAL; + param = MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT; + } else { + return -EINVAL; + } + + mutex_lock(&mvm->mutex); + iwl_dbgfs_update_bf(vif, param, value); + if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value) + ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); + else + ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0); + mutex_unlock(&mvm->mutex); + + return ret ?: count; +} + +static ssize_t iwl_dbgfs_bf_params_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + char buf[256]; + int pos = 0; + const size_t bufsz = sizeof(buf); + struct iwl_beacon_filter_cmd cmd = { + IWL_BF_CMD_CONFIG_DEFAULTS, + .bf_enable_beacon_filter = + cpu_to_le32(IWL_BF_ENABLE_BEACON_FILTER_DEFAULT), + .ba_enable_beacon_abort = + cpu_to_le32(IWL_BA_ENABLE_BEACON_ABORT_DEFAULT), + }; + + iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd); + if (mvmvif->bf_data.bf_enabled) + cmd.bf_enable_beacon_filter = cpu_to_le32(1); + else + cmd.bf_enable_beacon_filter = 0; + + pos += scnprintf(buf+pos, bufsz-pos, "bf_energy_delta = %d\n", + le32_to_cpu(cmd.bf_energy_delta)); + pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_energy_delta = %d\n", + le32_to_cpu(cmd.bf_roaming_energy_delta)); + pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_state = %d\n", + le32_to_cpu(cmd.bf_roaming_state)); + pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_threshold = %d\n", + le32_to_cpu(cmd.bf_temp_threshold)); + pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_fast_filter = %d\n", + le32_to_cpu(cmd.bf_temp_fast_filter)); + pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_slow_filter = %d\n", + le32_to_cpu(cmd.bf_temp_slow_filter)); + pos += scnprintf(buf+pos, bufsz-pos, "bf_enable_beacon_filter = %d\n", + le32_to_cpu(cmd.bf_enable_beacon_filter)); + pos += scnprintf(buf+pos, bufsz-pos, "bf_debug_flag = %d\n", + le32_to_cpu(cmd.bf_debug_flag)); + pos += scnprintf(buf+pos, bufsz-pos, "bf_escape_timer = %d\n", + le32_to_cpu(cmd.bf_escape_timer)); + pos += scnprintf(buf+pos, bufsz-pos, "ba_escape_timer = %d\n", + le32_to_cpu(cmd.ba_escape_timer)); + pos += scnprintf(buf+pos, bufsz-pos, "ba_enable_beacon_abort = %d\n", + le32_to_cpu(cmd.ba_enable_beacon_abort)); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_os_device_timediff_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + u32 curr_gp2; + u64 curr_os; + s64 diff; + char buf[64]; + const size_t bufsz = sizeof(buf); + int pos = 0; + + mutex_lock(&mvm->mutex); + iwl_mvm_get_sync_time(mvm, CLOCK_BOOTTIME, &curr_gp2, &curr_os, NULL); + mutex_unlock(&mvm->mutex); + + do_div(curr_os, NSEC_PER_USEC); + diff = curr_os - curr_gp2; + pos += scnprintf(buf + pos, bufsz - pos, "diff=%lld\n", diff); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + u8 value; + int ret; + + ret = kstrtou8(buf, 0, &value); + if (ret) + return ret; + if (value > 1) + return -EINVAL; + + mutex_lock(&mvm->mutex); + iwl_mvm_update_low_latency(mvm, vif, value, LOW_LATENCY_DEBUGFS); + mutex_unlock(&mvm->mutex); + + return count; +} + +static ssize_t +iwl_dbgfs_low_latency_force_write(struct ieee80211_vif *vif, char *buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + u8 value; + int ret; + + ret = kstrtou8(buf, 0, &value); + if (ret) + return ret; + + if (value > NUM_LOW_LATENCY_FORCE) + return -EINVAL; + + mutex_lock(&mvm->mutex); + if (value == LOW_LATENCY_FORCE_UNSET) { + iwl_mvm_update_low_latency(mvm, vif, false, + LOW_LATENCY_DEBUGFS_FORCE); + iwl_mvm_update_low_latency(mvm, vif, false, + LOW_LATENCY_DEBUGFS_FORCE_ENABLE); + } else { + iwl_mvm_update_low_latency(mvm, vif, + value == LOW_LATENCY_FORCE_ON, + LOW_LATENCY_DEBUGFS_FORCE); + iwl_mvm_update_low_latency(mvm, vif, true, + LOW_LATENCY_DEBUGFS_FORCE_ENABLE); + } + mutex_unlock(&mvm->mutex); + return count; +} + +static ssize_t iwl_dbgfs_low_latency_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + char format[] = "traffic=%d\ndbgfs=%d\nvcmd=%d\nvif_type=%d\n" + "dbgfs_force_enable=%d\ndbgfs_force=%d\nactual=%d\n"; + + /* + * all values in format are boolean so the size of format is enough + * for holding the result string + */ + char buf[sizeof(format) + 1] = {}; + int len; + + len = scnprintf(buf, sizeof(buf) - 1, format, + !!(mvmvif->low_latency & LOW_LATENCY_TRAFFIC), + !!(mvmvif->low_latency & LOW_LATENCY_DEBUGFS), + !!(mvmvif->low_latency & LOW_LATENCY_VCMD), + !!(mvmvif->low_latency & LOW_LATENCY_VIF_TYPE), + !!(mvmvif->low_latency & + LOW_LATENCY_DEBUGFS_FORCE_ENABLE), + !!(mvmvif->low_latency & LOW_LATENCY_DEBUGFS_FORCE), + !!(mvmvif->low_latency_actual)); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t iwl_dbgfs_uapsd_misbehaving_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + char buf[20]; + int len; + + len = sprintf(buf, "%pM\n", mvmvif->uapsd_misbehaving_ap_addr); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t iwl_dbgfs_uapsd_misbehaving_write(struct ieee80211_vif *vif, + char *buf, size_t count, + loff_t *ppos) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + bool ret; + + mutex_lock(&mvm->mutex); + ret = mac_pton(buf, mvmvif->uapsd_misbehaving_ap_addr); + mutex_unlock(&mvm->mutex); + + return ret ? count : -EINVAL; +} + +static ssize_t iwl_dbgfs_rx_phyinfo_write(struct ieee80211_vif *vif, char *buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + struct ieee80211_chanctx_conf *chanctx_conf; + struct iwl_mvm_phy_ctxt *phy_ctxt; + u16 value; + int ret; + + ret = kstrtou16(buf, 0, &value); + if (ret) + return ret; + + mutex_lock(&mvm->mutex); + rcu_read_lock(); + + chanctx_conf = rcu_dereference(vif->bss_conf.chanctx_conf); + /* make sure the channel context is assigned */ + if (!chanctx_conf) { + rcu_read_unlock(); + mutex_unlock(&mvm->mutex); + return -EINVAL; + } + + phy_ctxt = &mvm->phy_ctxts[*(u16 *)chanctx_conf->drv_priv]; + rcu_read_unlock(); + + mvm->dbgfs_rx_phyinfo = value; + + ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chanctx_conf->min_def, + chanctx_conf->rx_chains_static, + chanctx_conf->rx_chains_dynamic); + mutex_unlock(&mvm->mutex); + + return ret ?: count; +} + +static ssize_t iwl_dbgfs_rx_phyinfo_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + char buf[8]; + int len; + + len = scnprintf(buf, sizeof(buf), "0x%04x\n", + mvmvif->mvm->dbgfs_rx_phyinfo); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static void iwl_dbgfs_quota_check(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int *ret = data; + + if (mvmvif->dbgfs_quota_min) + *ret = -EINVAL; +} + +static ssize_t iwl_dbgfs_quota_min_write(struct ieee80211_vif *vif, char *buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + u16 value; + int ret; + + ret = kstrtou16(buf, 0, &value); + if (ret) + return ret; + + if (value > 95) + return -EINVAL; + + mutex_lock(&mvm->mutex); + + mvmvif->dbgfs_quota_min = 0; + ieee80211_iterate_interfaces(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + iwl_dbgfs_quota_check, &ret); + if (ret == 0) { + mvmvif->dbgfs_quota_min = value; + iwl_mvm_update_quotas(mvm, false, NULL); + } + mutex_unlock(&mvm->mutex); + + return ret ?: count; +} + +static ssize_t iwl_dbgfs_quota_min_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + char buf[10]; + int len; + + len = scnprintf(buf, sizeof(buf), "%d\n", mvmvif->dbgfs_quota_min); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \ + _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif) +#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \ + _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif) +#define MVM_DEBUGFS_ADD_FILE_VIF(name, parent, mode) do { \ + debugfs_create_file(#name, mode, parent, vif, \ + &iwl_dbgfs_##name##_ops); \ + } while (0) + +MVM_DEBUGFS_READ_FILE_OPS(mac_params); +MVM_DEBUGFS_READ_FILE_OPS(tx_pwr_lmt); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10); +MVM_DEBUGFS_WRITE_FILE_OPS(low_latency_force, 10); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(uapsd_misbehaving, 20); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(rx_phyinfo, 10); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32); +MVM_DEBUGFS_READ_FILE_OPS(os_device_timediff); + + +void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct dentry *dbgfs_dir = vif->debugfs_dir; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + char buf[100]; + + /* + * Check if debugfs directory already exist before creating it. + * This may happen when, for example, resetting hw or suspend-resume + */ + if (!dbgfs_dir || mvmvif->dbgfs_dir) + return; + + mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir); + if (IS_ERR_OR_NULL(mvmvif->dbgfs_dir)) { + IWL_ERR(mvm, "Failed to create debugfs directory under %pd\n", + dbgfs_dir); + return; + } + + if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM && + ((vif->type == NL80211_IFTYPE_STATION && !vif->p2p) || + (vif->type == NL80211_IFTYPE_STATION && vif->p2p))) + MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, 0600); + + MVM_DEBUGFS_ADD_FILE_VIF(tx_pwr_lmt, mvmvif->dbgfs_dir, 0400); + MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, 0400); + MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir, 0600); + MVM_DEBUGFS_ADD_FILE_VIF(low_latency_force, mvmvif->dbgfs_dir, 0600); + MVM_DEBUGFS_ADD_FILE_VIF(uapsd_misbehaving, mvmvif->dbgfs_dir, 0600); + MVM_DEBUGFS_ADD_FILE_VIF(rx_phyinfo, mvmvif->dbgfs_dir, 0600); + MVM_DEBUGFS_ADD_FILE_VIF(quota_min, mvmvif->dbgfs_dir, 0600); + MVM_DEBUGFS_ADD_FILE_VIF(os_device_timediff, mvmvif->dbgfs_dir, 0400); + + if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p && + mvmvif == mvm->bf_allowed_vif) + MVM_DEBUGFS_ADD_FILE_VIF(bf_params, mvmvif->dbgfs_dir, 0600); + + /* + * Create symlink for convenience pointing to interface specific + * debugfs entries for the driver. For example, under + * /sys/kernel/debug/iwlwifi/0000\:02\:00.0/iwlmvm/ + * find + * netdev:wlan0 -> ../../../ieee80211/phy0/netdev:wlan0/iwlmvm/ + */ + snprintf(buf, 100, "../../../%pd3/%pd", + dbgfs_dir, + mvmvif->dbgfs_dir); + + mvmvif->dbgfs_slink = debugfs_create_symlink(dbgfs_dir->d_name.name, + mvm->debugfs_dir, buf); +} + +void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + debugfs_remove(mvmvif->dbgfs_slink); + mvmvif->dbgfs_slink = NULL; + + debugfs_remove_recursive(mvmvif->dbgfs_dir); + mvmvif->dbgfs_dir = NULL; +} diff --git a/mvm/debugfs.c b/mvm/debugfs.c index 49898fd99594..cf27f106d4d5 100644 --- a/mvm/debugfs.c +++ b/mvm/debugfs.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -8,13 +8,16 @@ #include <linux/err.h> #include <linux/ieee80211.h> #include <linux/netdevice.h> +#include <linux/dmi.h> #include "mvm.h" #include "sta.h" #include "iwl-io.h" #include "debugfs.h" #include "iwl-modparams.h" +#include "iwl-drv.h" #include "fw/error-dump.h" +#include "fw/api/phy-ctxt.h" static ssize_t iwl_dbgfs_ctdp_budget_read(struct file *file, char __user *user_buf, @@ -214,9 +217,9 @@ static ssize_t iwl_dbgfs_set_nic_temperature_read(struct file *file, int pos; if (!mvm->temperature_test) - pos = scnprintf(buf , sizeof(buf), "disabled\n"); + pos = scnprintf(buf, sizeof(buf), "disabled\n"); else - pos = scnprintf(buf , sizeof(buf), "%d\n", mvm->temperature); + pos = scnprintf(buf, sizeof(buf), "%d\n", mvm->temperature); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } @@ -261,7 +264,7 @@ static ssize_t iwl_dbgfs_set_nic_temperature_write(struct iwl_mvm *mvm, mvm->temperature = temperature; } IWL_DEBUG_TEMP(mvm, "%sabling debug set temperature (temp = %d)\n", - mvm->temperature_test ? "En" : "Dis" , + mvm->temperature_test ? "En" : "Dis", mvm->temperature); /* handle the temperature change */ iwl_mvm_tt_handler(mvm); @@ -291,7 +294,7 @@ static ssize_t iwl_dbgfs_nic_temp_read(struct file *file, if (ret) return -EIO; - pos = scnprintf(buf , sizeof(buf), "%d\n", temp); + pos = scnprintf(buf, sizeof(buf), "%d\n", temp); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } @@ -324,12 +327,12 @@ static ssize_t iwl_dbgfs_sar_geo_profile_read(struct file *file, pos += scnprintf(buf + pos, bufsz - pos, "Use geographic profile %d\n", tbl_idx); pos += scnprintf(buf + pos, bufsz - pos, - "2.4GHz:\n\tChain A offset: %hhu dBm\n\tChain B offset: %hhu dBm\n\tmax tx power: %hhu dBm\n", + "2.4GHz:\n\tChain A offset: %u dBm\n\tChain B offset: %u dBm\n\tmax tx power: %u dBm\n", mvm->fwrt.geo_profiles[tbl_idx - 1].bands[0].chains[0], mvm->fwrt.geo_profiles[tbl_idx - 1].bands[0].chains[1], mvm->fwrt.geo_profiles[tbl_idx - 1].bands[0].max); pos += scnprintf(buf + pos, bufsz - pos, - "5.2GHz:\n\tChain A offset: %hhu dBm\n\tChain B offset: %hhu dBm\n\tmax tx power: %hhu dBm\n", + "5.2GHz:\n\tChain A offset: %u dBm\n\tChain B offset: %u dBm\n\tmax tx power: %u dBm\n", mvm->fwrt.geo_profiles[tbl_idx - 1].bands[1].chains[0], mvm->fwrt.geo_profiles[tbl_idx - 1].bands[1].chains[1], mvm->fwrt.geo_profiles[tbl_idx - 1].bands[1].max); @@ -338,6 +341,26 @@ static ssize_t iwl_dbgfs_sar_geo_profile_read(struct file *file, return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } + +static ssize_t iwl_dbgfs_wifi_6e_enable_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + int err, pos; + char buf[12]; + u32 value; + + err = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0, + DSM_FUNC_ENABLE_6E, + &iwl_guid, &value); + if (err) + return err; + + pos = sprintf(buf, "0x%08x\n", value); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} #endif static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf, @@ -369,13 +392,14 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf, return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } -static ssize_t iwl_dbgfs_rs_data_read(struct file *file, char __user *user_buf, +static ssize_t iwl_dbgfs_rs_data_read(struct ieee80211_link_sta *link_sta, + struct iwl_mvm_sta *mvmsta, + struct iwl_mvm *mvm, + struct iwl_mvm_link_sta *mvm_link_sta, + char __user *user_buf, size_t count, loff_t *ppos) { - struct ieee80211_sta *sta = file->private_data; - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw; - struct iwl_mvm *mvm = lq_sta->pers.drv; + struct iwl_lq_sta_rs_fw *lq_sta = &mvm_link_sta->lq_sta.rs_fw; static const size_t bufsz = 2048; char *buff; int desc = 0; @@ -385,8 +409,6 @@ static ssize_t iwl_dbgfs_rs_data_read(struct file *file, char __user *user_buf, if (!buff) return -ENOMEM; - mutex_lock(&mvm->mutex); - desc += scnprintf(buff + desc, bufsz - desc, "sta_id %d\n", lq_sta->pers.sta_id); desc += scnprintf(buff + desc, bufsz - desc, @@ -407,18 +429,19 @@ static ssize_t iwl_dbgfs_rs_data_read(struct file *file, char __user *user_buf, lq_sta->last_rate_n_flags); if (desc < bufsz - 1) buff[desc++] = '\n'; - mutex_unlock(&mvm->mutex); ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); kfree(buff); return ret; } -static ssize_t iwl_dbgfs_amsdu_len_write(struct ieee80211_sta *sta, +static ssize_t iwl_dbgfs_amsdu_len_write(struct ieee80211_link_sta *link_sta, + struct iwl_mvm_sta *mvmsta, + struct iwl_mvm *mvm, + struct iwl_mvm_link_sta *mvm_link_sta, char *buf, size_t count, loff_t *ppos) { - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); int i; u16 amsdu_len; @@ -426,34 +449,39 @@ static ssize_t iwl_dbgfs_amsdu_len_write(struct ieee80211_sta *sta, return -EINVAL; /* only change from debug set <-> debug unset */ - if (amsdu_len && mvmsta->orig_amsdu_len) + if (amsdu_len && mvm_link_sta->orig_amsdu_len) return -EBUSY; if (amsdu_len) { - mvmsta->orig_amsdu_len = sta->max_amsdu_len; - sta->max_amsdu_len = amsdu_len; - for (i = 0; i < ARRAY_SIZE(sta->max_tid_amsdu_len); i++) - sta->max_tid_amsdu_len[i] = amsdu_len; + mvm_link_sta->orig_amsdu_len = link_sta->agg.max_amsdu_len; + link_sta->agg.max_amsdu_len = amsdu_len; + link_sta->agg.max_amsdu_len = amsdu_len; + for (i = 0; i < ARRAY_SIZE(link_sta->agg.max_tid_amsdu_len); i++) + link_sta->agg.max_tid_amsdu_len[i] = amsdu_len; } else { - sta->max_amsdu_len = mvmsta->orig_amsdu_len; - mvmsta->orig_amsdu_len = 0; + link_sta->agg.max_amsdu_len = mvm_link_sta->orig_amsdu_len; + mvm_link_sta->orig_amsdu_len = 0; } + + ieee80211_sta_recalc_aggregates(link_sta->sta); + return count; } -static ssize_t iwl_dbgfs_amsdu_len_read(struct file *file, +static ssize_t iwl_dbgfs_amsdu_len_read(struct ieee80211_link_sta *link_sta, + struct iwl_mvm_sta *mvmsta, + struct iwl_mvm *mvm, + struct iwl_mvm_link_sta *mvm_link_sta, char __user *user_buf, size_t count, loff_t *ppos) { - struct ieee80211_sta *sta = file->private_data; - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - char buf[32]; int pos; - pos = scnprintf(buf, sizeof(buf), "current %d ", sta->max_amsdu_len); + pos = scnprintf(buf, sizeof(buf), "current %d ", + link_sta->agg.max_amsdu_len); pos += scnprintf(buf + pos, sizeof(buf) - pos, "stored %d\n", - mvmsta->orig_amsdu_len); + mvm_link_sta->orig_amsdu_len); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } @@ -688,6 +716,7 @@ static ssize_t iwl_dbgfs_fw_ver_read(struct file *file, char __user *user_buf, struct iwl_mvm *mvm = file->private_data; char *buff, *pos, *endpos; static const size_t bufsz = 1024; + char _fw_name_pre[FW_NAME_PRE_BUFSIZE]; int ret; buff = kmalloc(bufsz, GFP_KERNEL); @@ -698,7 +727,7 @@ static ssize_t iwl_dbgfs_fw_ver_read(struct file *file, char __user *user_buf, endpos = pos + bufsz; pos += scnprintf(pos, endpos - pos, "FW prefix: %s\n", - mvm->trans->cfg->fw_name_pre); + iwl_drv_get_fwname_pre(mvm->trans, _fw_name_pre)); pos += scnprintf(pos, endpos - pos, "FW: %s\n", mvm->fwrt.fw->human_readable); pos += scnprintf(pos, endpos - pos, "Device: %s\n", @@ -712,6 +741,190 @@ static ssize_t iwl_dbgfs_fw_ver_read(struct file *file, char __user *user_buf, return ret; } +static ssize_t iwl_dbgfs_tas_get_status_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + struct iwl_mvm_tas_status_resp tas_rsp; + struct iwl_mvm_tas_status_resp *rsp = &tas_rsp; + static const size_t bufsz = 1024; + char *buff, *pos, *endpos; + const char * const tas_dis_reason[TAS_DISABLED_REASON_MAX] = { + [TAS_DISABLED_DUE_TO_BIOS] = + "Due To BIOS", + [TAS_DISABLED_DUE_TO_SAR_6DBM] = + "Due To SAR Limit Less Than 6 dBm", + [TAS_DISABLED_REASON_INVALID] = + "N/A", + }; + const char * const tas_current_status[TAS_DYNA_STATUS_MAX] = { + [TAS_DYNA_INACTIVE] = "INACTIVE", + [TAS_DYNA_INACTIVE_MVM_MODE] = + "inactive due to mvm mode", + [TAS_DYNA_INACTIVE_TRIGGER_MODE] = + "inactive due to trigger mode", + [TAS_DYNA_INACTIVE_BLOCK_LISTED] = + "inactive due to block listed", + [TAS_DYNA_INACTIVE_UHB_NON_US] = + "inactive due to uhb non US", + [TAS_DYNA_ACTIVE] = "ACTIVE", + }; + struct iwl_host_cmd hcmd = { + .id = WIDE_ID(DEBUG_GROUP, GET_TAS_STATUS), + .flags = CMD_WANT_SKB, + .len = { 0, }, + .data = { NULL, }, + }; + int ret, i, tmp; + bool tas_enabled = false; + unsigned long dyn_status; + + if (!iwl_mvm_firmware_running(mvm)) + return -ENODEV; + + mutex_lock(&mvm->mutex); + ret = iwl_mvm_send_cmd(mvm, &hcmd); + mutex_unlock(&mvm->mutex); + if (ret < 0) + return ret; + + buff = kzalloc(bufsz, GFP_KERNEL); + if (!buff) + return -ENOMEM; + pos = buff; + endpos = pos + bufsz; + + rsp = (void *)hcmd.resp_pkt->data; + + pos += scnprintf(pos, endpos - pos, "TAS Conclusion:\n"); + for (i = 0; i < rsp->in_dual_radio + 1; i++) { + if (rsp->tas_status_mac[i].band != TAS_LMAC_BAND_INVALID && + rsp->tas_status_mac[i].dynamic_status & BIT(TAS_DYNA_ACTIVE)) { + pos += scnprintf(pos, endpos - pos, "\tON for "); + switch (rsp->tas_status_mac[i].band) { + case TAS_LMAC_BAND_HB: + pos += scnprintf(pos, endpos - pos, "HB\n"); + break; + case TAS_LMAC_BAND_LB: + pos += scnprintf(pos, endpos - pos, "LB\n"); + break; + case TAS_LMAC_BAND_UHB: + pos += scnprintf(pos, endpos - pos, "UHB\n"); + break; + case TAS_LMAC_BAND_INVALID: + pos += scnprintf(pos, endpos - pos, + "INVALID BAND\n"); + break; + default: + pos += scnprintf(pos, endpos - pos, + "Unsupported band (%d)\n", + rsp->tas_status_mac[i].band); + goto out; + } + tas_enabled = true; + } + } + if (!tas_enabled) + pos += scnprintf(pos, endpos - pos, "\tOFF\n"); + + pos += scnprintf(pos, endpos - pos, "TAS Report\n"); + pos += scnprintf(pos, endpos - pos, "TAS FW version: %d\n", + rsp->tas_fw_version); + pos += scnprintf(pos, endpos - pos, "Is UHB enabled for USA?: %s\n", + rsp->is_uhb_for_usa_enable ? "True" : "False"); + pos += scnprintf(pos, endpos - pos, "Current MCC: 0x%x\n", + le16_to_cpu(rsp->curr_mcc)); + + pos += scnprintf(pos, endpos - pos, "Block list entries:"); + for (i = 0; i < APCI_WTAS_BLACK_LIST_MAX; i++) + pos += scnprintf(pos, endpos - pos, " 0x%x", + le16_to_cpu(rsp->block_list[i])); + + pos += scnprintf(pos, endpos - pos, "\nOEM name: %s\n", + dmi_get_system_info(DMI_SYS_VENDOR)); + pos += scnprintf(pos, endpos - pos, "\tVendor In Approved List: %s\n", + iwl_mvm_is_vendor_in_approved_list() ? "YES" : "NO"); + pos += scnprintf(pos, endpos - pos, + "\tDo TAS Support Dual Radio?: %s\n", + rsp->in_dual_radio ? "TRUE" : "FALSE"); + + for (i = 0; i < rsp->in_dual_radio + 1; i++) { + if (rsp->tas_status_mac[i].static_status == 0) { + pos += scnprintf(pos, endpos - pos, + "Static status: disabled\n"); + pos += scnprintf(pos, endpos - pos, + "Static disabled reason: %s (0)\n", + tas_dis_reason[0]); + goto out; + } + + pos += scnprintf(pos, endpos - pos, "TAS status for "); + switch (rsp->tas_status_mac[i].band) { + case TAS_LMAC_BAND_HB: + pos += scnprintf(pos, endpos - pos, "High band\n"); + break; + case TAS_LMAC_BAND_LB: + pos += scnprintf(pos, endpos - pos, "Low band\n"); + break; + case TAS_LMAC_BAND_UHB: + pos += scnprintf(pos, endpos - pos, + "Ultra high band\n"); + break; + case TAS_LMAC_BAND_INVALID: + pos += scnprintf(pos, endpos - pos, + "INVALID band\n"); + break; + default: + pos += scnprintf(pos, endpos - pos, + "Unsupported band (%d)\n", + rsp->tas_status_mac[i].band); + goto out; + } + pos += scnprintf(pos, endpos - pos, "Static status: %sabled\n", + rsp->tas_status_mac[i].static_status ? + "En" : "Dis"); + pos += scnprintf(pos, endpos - pos, + "\tStatic Disabled Reason: "); + if (rsp->tas_status_mac[i].static_dis_reason < TAS_DISABLED_REASON_MAX) + pos += scnprintf(pos, endpos - pos, "%s (%d)\n", + tas_dis_reason[rsp->tas_status_mac[i].static_dis_reason], + rsp->tas_status_mac[i].static_dis_reason); + else + pos += scnprintf(pos, endpos - pos, + "unsupported value (%d)\n", + rsp->tas_status_mac[i].static_dis_reason); + + pos += scnprintf(pos, endpos - pos, "Dynamic status:\n"); + dyn_status = (rsp->tas_status_mac[i].dynamic_status); + for_each_set_bit(tmp, &dyn_status, sizeof(dyn_status)) { + if (tmp >= 0 && tmp < TAS_DYNA_STATUS_MAX) + pos += scnprintf(pos, endpos - pos, + "\t%s (%d)\n", + tas_current_status[tmp], tmp); + } + + pos += scnprintf(pos, endpos - pos, + "Is near disconnection?: %s\n", + rsp->tas_status_mac[i].near_disconnection ? + "True" : "False"); + tmp = le16_to_cpu(rsp->tas_status_mac[i].max_reg_pwr_limit); + pos += scnprintf(pos, endpos - pos, + "Max. regulatory pwr limit (dBm): %d.%03d\n", + tmp / 8, 125 * (tmp % 8)); + tmp = le16_to_cpu(rsp->tas_status_mac[i].sar_limit); + pos += scnprintf(pos, endpos - pos, + "SAR limit (dBm): %d.%03d\n", + tmp / 8, 125 * (tmp % 8)); + } + +out: + ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff); + kfree(buff); + iwl_free_resp(&hcmd); + return ret; +} + static ssize_t iwl_dbgfs_phy_integration_ver_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) @@ -1067,7 +1280,7 @@ iwl_dbgfs_scan_ant_rxchain_read(struct file *file, pos += scnprintf(buf + pos, bufsz - pos, "A"); if (mvm->scan_rx_ant & ANT_B) pos += scnprintf(buf + pos, bufsz - pos, "B"); - pos += scnprintf(buf + pos, bufsz - pos, " (%hhx)\n", mvm->scan_rx_ant); + pos += scnprintf(buf + pos, bufsz - pos, " (%x)\n", mvm->scan_rx_ant); return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } @@ -1200,6 +1413,7 @@ static int _iwl_dbgfs_inject_beacon_ie(struct iwl_mvm *mvm, char *bin, int len) struct sk_buff *beacon; struct ieee80211_tx_info *info; struct iwl_mac_beacon_cmd beacon_cmd = {}; + unsigned int link_id; u8 rate; int i; @@ -1233,7 +1447,7 @@ static int _iwl_dbgfs_inject_beacon_ie(struct iwl_mvm *mvm, char *bin, int len) mvm->hw->extra_beacon_tailroom = len; - beacon = ieee80211_beacon_get_template(mvm->hw, vif, NULL); + beacon = ieee80211_beacon_get_template(mvm->hw, vif, NULL, 0); if (!beacon) goto out_err; @@ -1246,19 +1460,26 @@ static int _iwl_dbgfs_inject_beacon_ie(struct iwl_mvm *mvm, char *bin, int len) mvmvif = iwl_mvm_vif_from_mac80211(vif); info = IEEE80211_SKB_CB(beacon); - rate = iwl_mvm_mac_ctxt_get_lowest_rate(info, vif); - - beacon_cmd.flags = - cpu_to_le16(iwl_mvm_mac_ctxt_get_beacon_flags(mvm->fw, rate)); - beacon_cmd.byte_cnt = cpu_to_le16((u16)beacon->len); - beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id); + rate = iwl_mvm_mac_ctxt_get_beacon_rate(mvm, info, vif); + + for_each_mvm_vif_valid_link(mvmvif, link_id) { + beacon_cmd.flags = + cpu_to_le16(iwl_mvm_mac_ctxt_get_beacon_flags(mvm->fw, + rate)); + beacon_cmd.byte_cnt = cpu_to_le16((u16)beacon->len); + if (iwl_fw_lookup_cmd_ver(mvm->fw, BEACON_TEMPLATE_CMD, 0) > 12) + beacon_cmd.link_id = + cpu_to_le32(mvmvif->link[link_id]->fw_link_id); + else + beacon_cmd.link_id = cpu_to_le32((u32)mvmvif->id); - iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx, - &beacon_cmd.tim_size, - beacon->data, beacon->len); + iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx, + &beacon_cmd.tim_size, + beacon->data, beacon->len); - iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd, - sizeof(beacon_cmd)); + iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd, + sizeof(beacon_cmd)); + } mutex_unlock(&mvm->mutex); dev_kfree_skb(beacon); @@ -1380,17 +1601,127 @@ static ssize_t iwl_dbgfs_dbg_time_point_write(struct iwl_mvm *mvm, #define MVM_DEBUGFS_ADD_FILE(name, parent, mode) \ MVM_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode) -#define MVM_DEBUGFS_WRITE_STA_FILE_OPS(name, bufsz) \ - _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_sta) -#define MVM_DEBUGFS_READ_WRITE_STA_FILE_OPS(name, bufsz) \ - _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct ieee80211_sta) +static ssize_t +_iwl_dbgfs_link_sta_wrap_write(ssize_t (*real)(struct ieee80211_link_sta *, + struct iwl_mvm_sta *, + struct iwl_mvm *, + struct iwl_mvm_link_sta *, + char *, + size_t, loff_t *), + struct file *file, + char *buf, size_t buf_size, loff_t *ppos) +{ + struct ieee80211_link_sta *link_sta = file->private_data; + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(link_sta->sta); + struct iwl_mvm *mvm = iwl_mvm_vif_from_mac80211(mvmsta->vif)->mvm; + struct iwl_mvm_link_sta *mvm_link_sta; + ssize_t ret; -#define MVM_DEBUGFS_ADD_STA_FILE_ALIAS(alias, name, parent, mode) do { \ - debugfs_create_file(alias, mode, parent, sta, \ - &iwl_dbgfs_##name##_ops); \ - } while (0) -#define MVM_DEBUGFS_ADD_STA_FILE(name, parent, mode) \ - MVM_DEBUGFS_ADD_STA_FILE_ALIAS(#name, name, parent, mode) + mutex_lock(&mvm->mutex); + + mvm_link_sta = rcu_dereference_protected(mvmsta->link[link_sta->link_id], + lockdep_is_held(&mvm->mutex)); + if (WARN_ON(!mvm_link_sta)) { + mutex_unlock(&mvm->mutex); + return -ENODEV; + } + + ret = real(link_sta, mvmsta, mvm, mvm_link_sta, buf, buf_size, ppos); + + mutex_unlock(&mvm->mutex); + + return ret; +} + +static ssize_t +_iwl_dbgfs_link_sta_wrap_read(ssize_t (*real)(struct ieee80211_link_sta *, + struct iwl_mvm_sta *, + struct iwl_mvm *, + struct iwl_mvm_link_sta *, + char __user *, + size_t, loff_t *), + struct file *file, + char __user *user_buf, size_t count, loff_t *ppos) +{ + struct ieee80211_link_sta *link_sta = file->private_data; + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(link_sta->sta); + struct iwl_mvm *mvm = iwl_mvm_vif_from_mac80211(mvmsta->vif)->mvm; + struct iwl_mvm_link_sta *mvm_link_sta; + ssize_t ret; + + mutex_lock(&mvm->mutex); + + mvm_link_sta = rcu_dereference_protected(mvmsta->link[link_sta->link_id], + lockdep_is_held(&mvm->mutex)); + if (WARN_ON(!mvm_link_sta)) { + mutex_unlock(&mvm->mutex); + return -ENODEV; + } + + ret = real(link_sta, mvmsta, mvm, mvm_link_sta, user_buf, count, ppos); + + mutex_unlock(&mvm->mutex); + + return ret; +} + +#define MVM_DEBUGFS_LINK_STA_WRITE_WRAPPER(name, buflen) \ +static ssize_t _iwl_dbgfs_link_sta_##name##_write(struct file *file, \ + const char __user *user_buf, \ + size_t count, loff_t *ppos) \ +{ \ + char buf[buflen] = {}; \ + size_t buf_size = min(count, sizeof(buf) - 1); \ + \ + if (copy_from_user(buf, user_buf, sizeof(buf))) \ + return -EFAULT; \ + \ + return _iwl_dbgfs_link_sta_wrap_write(iwl_dbgfs_##name##_write, \ + file, \ + buf, buf_size, ppos); \ +} \ + +#define MVM_DEBUGFS_LINK_STA_READ_WRAPPER(name) \ +static ssize_t _iwl_dbgfs_link_sta_##name##_read(struct file *file, \ + char __user *user_buf, \ + size_t count, loff_t *ppos) \ +{ \ + return _iwl_dbgfs_link_sta_wrap_read(iwl_dbgfs_##name##_read, \ + file, \ + user_buf, count, ppos); \ +} \ + +#define MVM_DEBUGFS_WRITE_LINK_STA_FILE_OPS(name, bufsz) \ +MVM_DEBUGFS_LINK_STA_WRITE_WRAPPER(name, bufsz) \ +static const struct file_operations iwl_dbgfs_link_sta_##name##_ops = { \ + .write = _iwl_dbgfs_link_sta_##name##_write, \ + .open = simple_open, \ + .llseek = generic_file_llseek, \ +} + +#define MVM_DEBUGFS_READ_LINK_STA_FILE_OPS(name) \ +MVM_DEBUGFS_LINK_STA_READ_WRAPPER(name) \ +static const struct file_operations iwl_dbgfs_link_sta_##name##_ops = { \ + .read = _iwl_dbgfs_link_sta_##name##_read, \ + .open = simple_open, \ + .llseek = generic_file_llseek, \ +} + +#define MVM_DEBUGFS_READ_WRITE_LINK_STA_FILE_OPS(name, bufsz) \ +MVM_DEBUGFS_LINK_STA_READ_WRAPPER(name) \ +MVM_DEBUGFS_LINK_STA_WRITE_WRAPPER(name, bufsz) \ +static const struct file_operations iwl_dbgfs_link_sta_##name##_ops = { \ + .read = _iwl_dbgfs_link_sta_##name##_read, \ + .write = _iwl_dbgfs_link_sta_##name##_write, \ + .open = simple_open, \ + .llseek = generic_file_llseek, \ +} + +#define MVM_DEBUGFS_ADD_LINK_STA_FILE_ALIAS(alias, name, parent, mode) \ + debugfs_create_file(alias, mode, parent, link_sta, \ + &iwl_dbgfs_link_sta_##name##_ops) +#define MVM_DEBUGFS_ADD_LINK_STA_FILE(name, parent, mode) \ + MVM_DEBUGFS_ADD_LINK_STA_FILE_ALIAS(#name, name, parent, mode) static ssize_t iwl_dbgfs_prph_reg_read(struct file *file, @@ -1675,7 +2006,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram, 64); MVM_DEBUGFS_READ_WRITE_FILE_OPS(set_nic_temperature, 64); MVM_DEBUGFS_READ_FILE_OPS(nic_temp); MVM_DEBUGFS_READ_FILE_OPS(stations); -MVM_DEBUGFS_READ_FILE_OPS(rs_data); +MVM_DEBUGFS_READ_LINK_STA_FILE_OPS(rs_data); MVM_DEBUGFS_READ_FILE_OPS(bt_notif); MVM_DEBUGFS_READ_FILE_OPS(bt_cmd); MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off, 64); @@ -1683,6 +2014,7 @@ MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats); MVM_DEBUGFS_READ_FILE_OPS(drv_rx_stats); MVM_DEBUGFS_READ_FILE_OPS(fw_ver); MVM_DEBUGFS_READ_FILE_OPS(phy_integration_ver); +MVM_DEBUGFS_READ_FILE_OPS(tas_get_status); MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart, 10); MVM_DEBUGFS_WRITE_FILE_OPS(fw_nmi, 10); MVM_DEBUGFS_WRITE_FILE_OPS(bt_tx_prio, 10); @@ -1701,9 +2033,10 @@ MVM_DEBUGFS_READ_FILE_OPS(uapsd_noagg_bssids); #ifdef CONFIG_ACPI MVM_DEBUGFS_READ_FILE_OPS(sar_geo_profile); +MVM_DEBUGFS_READ_FILE_OPS(wifi_6e_enable); #endif -MVM_DEBUGFS_READ_WRITE_STA_FILE_OPS(amsdu_len, 16); +MVM_DEBUGFS_READ_WRITE_LINK_STA_FILE_OPS(amsdu_len, 16); MVM_DEBUGFS_READ_WRITE_FILE_OPS(he_sniffer_params, 32); @@ -1743,6 +2076,11 @@ static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf, if (ret < 0) return ret; + if (iwl_rx_packet_payload_len(hcmd.resp_pkt) < sizeof(*rsp)) { + ret = -EIO; + goto out; + } + rsp = (void *)hcmd.resp_pkt->data; if (le32_to_cpu(rsp->status) != DEBUG_MEM_STATUS_SUCCESS) { ret = -ENXIO; @@ -1819,6 +2157,11 @@ static ssize_t iwl_dbgfs_mem_write(struct file *file, if (ret < 0) return ret; + if (iwl_rx_packet_payload_len(hcmd.resp_pkt) < sizeof(*rsp)) { + ret = -EIO; + goto out; + } + rsp = (void *)hcmd.resp_pkt->data; if (rsp->status != DEBUG_MEM_STATUS_SUCCESS) { ret = -ENXIO; @@ -1840,17 +2183,18 @@ static const struct file_operations iwl_dbgfs_mem_ops = { .llseek = default_llseek, }; -void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - struct dentry *dir) +void iwl_mvm_link_sta_add_debugfs(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_link_sta *link_sta, + struct dentry *dir) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); if (iwl_mvm_has_tlc_offload(mvm)) { - MVM_DEBUGFS_ADD_STA_FILE(rs_data, dir, 0400); + MVM_DEBUGFS_ADD_LINK_STA_FILE(rs_data, dir, 0400); } - MVM_DEBUGFS_ADD_STA_FILE(amsdu_len, dir, 0600); + + MVM_DEBUGFS_ADD_LINK_STA_FILE(amsdu_len, dir, 0600); } void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm) @@ -1892,8 +2236,10 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm) if (mvm->fw->phy_integration_ver) MVM_DEBUGFS_ADD_FILE(phy_integration_ver, mvm->debugfs_dir, 0400); + MVM_DEBUGFS_ADD_FILE(tas_get_status, mvm->debugfs_dir, 0400); #ifdef CONFIG_ACPI MVM_DEBUGFS_ADD_FILE(sar_geo_profile, mvm->debugfs_dir, 0400); + MVM_DEBUGFS_ADD_FILE(wifi_6e_enable, mvm->debugfs_dir, 0400); #endif MVM_DEBUGFS_ADD_FILE(he_sniffer_params, mvm->debugfs_dir, 0600); diff --git a/mvm/ftm-initiator.c b/mvm/ftm-initiator.c index 430044bc4755..233ae81884a0 100644 --- a/mvm/ftm-initiator.c +++ b/mvm/ftm-initiator.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2021 Intel Corporation + * Copyright (C) 2018-2022 Intel Corporation */ #include <linux/etherdevice.h> #include <linux/math64.h> @@ -25,6 +25,10 @@ struct iwl_mvm_smooth_entry { u64 host_time; }; +enum iwl_mvm_pasn_flags { + IWL_MVM_PASN_FLAG_HAS_HLTK = BIT(0), +}; + struct iwl_mvm_ftm_pasn_entry { struct list_head list; u8 addr[ETH_ALEN]; @@ -33,6 +37,7 @@ struct iwl_mvm_ftm_pasn_entry { u8 cipher; u8 tx_pn[IEEE80211_CCMP_PN_LEN]; u8 rx_pn[IEEE80211_CCMP_PN_LEN]; + u32 flags; }; int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, @@ -67,26 +72,45 @@ int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, * the TK is already configured for this station, so it * shouldn't be set again here. */ - if (vif->bss_conf.assoc && - !memcmp(addr, vif->bss_conf.bssid, ETH_ALEN)) { + if (vif->cfg.assoc) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct ieee80211_bss_conf *link_conf; + unsigned int link_id; struct ieee80211_sta *sta; + u8 sta_id; rcu_read_lock(); - sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id]); - if (!IS_ERR_OR_NULL(sta) && sta->mfp) - expected_tk_len = 0; + for_each_vif_active_link(vif, link_conf, link_id) { + if (memcmp(addr, link_conf->bssid, ETH_ALEN)) + continue; + + sta_id = mvmvif->link[link_id]->ap_sta_id; + sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); + if (!IS_ERR_OR_NULL(sta) && sta->mfp) + expected_tk_len = 0; + break; + } rcu_read_unlock(); } - if (tk_len != expected_tk_len || hltk_len != sizeof(pasn->hltk)) { + if (tk_len != expected_tk_len || + (hltk_len && hltk_len != sizeof(pasn->hltk))) { IWL_ERR(mvm, "Invalid key length: tk_len=%u hltk_len=%u\n", tk_len, hltk_len); goto out; } + if (!expected_tk_len && !hltk_len) { + IWL_ERR(mvm, "TK and HLTK not set\n"); + goto out; + } + memcpy(pasn->addr, addr, sizeof(pasn->addr)); - memcpy(pasn->hltk, hltk, sizeof(pasn->hltk)); + + if (hltk_len) { + memcpy(pasn->hltk, hltk, sizeof(pasn->hltk)); + pasn->flags |= IWL_MVM_PASN_FLAG_HAS_HLTK; + } if (tk && tk_len) memcpy(pasn->tk, tk, sizeof(pasn->tk)); @@ -222,7 +246,7 @@ static void iwl_mvm_ftm_cmd_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif, for (i = 0; i < ETH_ALEN; i++) cmd->macaddr_mask[i] = ~req->mac_addr_mask[i]; - if (vif->bss_conf.assoc) + if (vif->cfg.assoc) memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN); else eth_broadcast_addr(cmd->range_req_bssid); @@ -254,7 +278,7 @@ static void iwl_mvm_ftm_cmd_common(struct iwl_mvm *mvm, for (i = 0; i < ETH_ALEN; i++) cmd->macaddr_mask[i] = ~req->mac_addr_mask[i]; - if (vif->bss_conf.assoc) { + if (vif->cfg.assoc) { memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN); /* AP's TSF is only relevant if associated */ @@ -503,20 +527,30 @@ iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif, iwl_mvm_ftm_put_target_common(mvm, peer, target); - if (vif->bss_conf.assoc && - !memcmp(peer->addr, vif->bss_conf.bssid, ETH_ALEN)) { + if (vif->cfg.assoc) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct ieee80211_sta *sta; + struct ieee80211_bss_conf *link_conf; + unsigned int link_id; rcu_read_lock(); + for_each_vif_active_link(vif, link_conf, link_id) { + if (memcmp(peer->addr, link_conf->bssid, ETH_ALEN)) + continue; + + target->sta_id = mvmvif->link[link_id]->ap_sta_id; + sta = rcu_dereference(mvm->fw_id_to_mac_id[target->sta_id]); + if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { + rcu_read_unlock(); + return PTR_ERR_OR_ZERO(sta); + } - sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id]); - if (sta->mfp && (peer->ftm.trigger_based || peer->ftm.non_trigger_based)) - FTM_PUT_FLAG(PMF); - + if (sta->mfp && (peer->ftm.trigger_based || + peer->ftm.non_trigger_based)) + FTM_PUT_FLAG(PMF); + break; + } rcu_read_unlock(); - - target->sta_id = mvmvif->ap_sta_id; } else { target->sta_id = IWL_MVM_INVALID_STA; } @@ -691,9 +725,13 @@ iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm *mvm, struct ieee80211_vif *vif, continue; target->cipher = entry->cipher; - memcpy(target->hltk, entry->hltk, sizeof(target->hltk)); - if (vif->bss_conf.assoc && + if (entry->flags & IWL_MVM_PASN_FLAG_HAS_HLTK) + memcpy(target->hltk, entry->hltk, sizeof(target->hltk)); + else + memset(target->hltk, 0, sizeof(target->hltk)); + + if (vif->cfg.assoc && !memcmp(vif->bss_conf.bssid, target->bssid, sizeof(target->bssid))) ieee80211_iter_keys(mvm->hw, vif, iter, target); @@ -1010,11 +1048,10 @@ static int iwl_mvm_ftm_range_resp_valid(struct iwl_mvm *mvm, u8 request_id, static void iwl_mvm_ftm_rtt_smoothing(struct iwl_mvm *mvm, struct cfg80211_pmsr_result *res) { - struct iwl_mvm_smooth_entry *resp; + struct iwl_mvm_smooth_entry *resp = NULL, *iter; s64 rtt_avg, rtt = res->ftm.rtt_avg; u32 undershoot, overshoot; u8 alpha; - bool found; if (!IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH) return; @@ -1028,15 +1065,14 @@ static void iwl_mvm_ftm_rtt_smoothing(struct iwl_mvm *mvm, return; } - found = false; - list_for_each_entry(resp, &mvm->ftm_initiator.smooth.resp, list) { - if (!memcmp(res->addr, resp->addr, ETH_ALEN)) { - found = true; + list_for_each_entry(iter, &mvm->ftm_initiator.smooth.resp, list) { + if (!memcmp(res->addr, iter->addr, ETH_ALEN)) { + resp = iter; break; } } - if (!found) { + if (!resp) { resp = kzalloc(sizeof(*resp), GFP_KERNEL); if (!resp) return; @@ -1105,10 +1141,10 @@ static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index, IWL_DEBUG_INFO(mvm, "\tstatus: %d\n", res->status); IWL_DEBUG_INFO(mvm, "\tBSSID: %pM\n", res->addr); IWL_DEBUG_INFO(mvm, "\thost time: %llu\n", res->host_time); - IWL_DEBUG_INFO(mvm, "\tburst index: %hhu\n", res->ftm.burst_index); + IWL_DEBUG_INFO(mvm, "\tburst index: %d\n", res->ftm.burst_index); IWL_DEBUG_INFO(mvm, "\tsuccess num: %u\n", res->ftm.num_ftmr_successes); IWL_DEBUG_INFO(mvm, "\trssi: %d\n", res->ftm.rssi_avg); - IWL_DEBUG_INFO(mvm, "\trssi spread: %hhu\n", res->ftm.rssi_spread); + IWL_DEBUG_INFO(mvm, "\trssi spread: %d\n", res->ftm.rssi_spread); IWL_DEBUG_INFO(mvm, "\trtt: %lld\n", res->ftm.rtt_avg); IWL_DEBUG_INFO(mvm, "\trtt var: %llu\n", res->ftm.rtt_variance); IWL_DEBUG_INFO(mvm, "\trtt spread: %llu\n", res->ftm.rtt_spread); @@ -1208,7 +1244,7 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) } IWL_DEBUG_INFO(mvm, "Range response received\n"); - IWL_DEBUG_INFO(mvm, "request id: %lld, num of entries: %hhu\n", + IWL_DEBUG_INFO(mvm, "request id: %lld, num of entries: %u\n", mvm->ftm_initiator.req->cookie, num_of_aps); for (i = 0; i < num_of_aps && i < IWL_MVM_TOF_MAX_APS; i++) { @@ -1300,7 +1336,7 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FTM_RTT_ACCURACY)) - IWL_DEBUG_INFO(mvm, "RTT confidence: %hhu\n", + IWL_DEBUG_INFO(mvm, "RTT confidence: %u\n", fw_ap->rttConfidence); iwl_mvm_debug_range_resp(mvm, i, &result); diff --git a/mvm/ftm-responder.c b/mvm/ftm-responder.c index 9729680476fd..b49781d1a07a 100644 --- a/mvm/ftm-responder.c +++ b/mvm/ftm-responder.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2021 Intel Corporation + * Copyright (C) 2018-2022 Intel Corporation */ #include <net/cfg80211.h> #include <linux/etherdevice.h> @@ -104,7 +104,8 @@ iwl_mvm_ftm_responder_set_ndp(struct iwl_mvm *mvm, static int iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct cfg80211_chan_def *chandef) + struct cfg80211_chan_def *chandef, + struct ieee80211_bss_conf *link_conf) { u32 cmd_id = WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_CONFIG_CMD); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); @@ -119,7 +120,7 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm, cpu_to_le32(IWL_TOF_RESPONDER_CMD_VALID_CHAN_INFO | IWL_TOF_RESPONDER_CMD_VALID_BSSID | IWL_TOF_RESPONDER_CMD_VALID_STA_ID), - .sta_id = mvmvif->bcast_sta.sta_id, + .sta_id = mvmvif->link[link_conf->link_id]->bcast_sta.sta_id, }; u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 6); int err; @@ -317,6 +318,8 @@ int iwl_mvm_ftm_respoder_add_pasn_sta(struct iwl_mvm *mvm, .addr = addr, .hltk = hltk, }; + struct iwl_mvm_pasn_hltk_data *hltk_data_ptr = NULL; + u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_DYN_CONFIG_CMD), 2); @@ -328,12 +331,21 @@ int iwl_mvm_ftm_respoder_add_pasn_sta(struct iwl_mvm *mvm, return -ENOTSUPP; } - hltk_data.cipher = iwl_mvm_cipher_to_location_cipher(cipher); - if (hltk_data.cipher == IWL_LOCATION_CIPHER_INVALID) { - IWL_ERR(mvm, "invalid cipher: %u\n", cipher); + if ((!hltk || !hltk_len) && (!tk || !tk_len)) { + IWL_ERR(mvm, "TK and HLTK not set\n"); return -EINVAL; } + if (hltk && hltk_len) { + hltk_data.cipher = iwl_mvm_cipher_to_location_cipher(cipher); + if (hltk_data.cipher == IWL_LOCATION_CIPHER_INVALID) { + IWL_ERR(mvm, "invalid cipher: %u\n", cipher); + return -EINVAL; + } + + hltk_data_ptr = &hltk_data; + } + if (tk && tk_len) { sta = kzalloc(sizeof(*sta), GFP_KERNEL); if (!sta) @@ -350,7 +362,7 @@ int iwl_mvm_ftm_respoder_add_pasn_sta(struct iwl_mvm *mvm, list_add_tail(&sta->list, &mvm->resp_pasn_list); } - ret = iwl_mvm_ftm_responder_dyn_cfg_v3(mvm, vif, NULL, &hltk_data); + ret = iwl_mvm_ftm_responder_dyn_cfg_v3(mvm, vif, NULL, hltk_data_ptr); if (ret && sta) iwl_mvm_resp_del_pasn_sta(mvm, vif, sta); @@ -375,7 +387,8 @@ int iwl_mvm_ftm_resp_remove_pasn_sta(struct iwl_mvm *mvm, return -EINVAL; } -int iwl_mvm_ftm_start_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +int iwl_mvm_ftm_start_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct ieee80211_ftm_responder_params *params; @@ -384,11 +397,11 @@ int iwl_mvm_ftm_start_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif) struct iwl_mvm_phy_ctxt *phy_ctxt; int ret; - params = vif->bss_conf.ftmr_params; + params = bss_conf->ftmr_params; lockdep_assert_held(&mvm->mutex); - if (WARN_ON_ONCE(!vif->bss_conf.ftm_responder)) + if (WARN_ON_ONCE(!bss_conf->ftm_responder)) return -EINVAL; if (vif->p2p || vif->type != NL80211_IFTYPE_AP || @@ -398,7 +411,7 @@ int iwl_mvm_ftm_start_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif) } rcu_read_lock(); - pctx = rcu_dereference(vif->chanctx_conf); + pctx = rcu_dereference(bss_conf->chanctx_conf); /* Copy the ctx to unlock the rcu and send the phy ctxt. We don't care * about changes in the ctx after releasing the lock because the driver * is still protected by the mutex. */ @@ -413,7 +426,7 @@ int iwl_mvm_ftm_start_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif) if (ret) return ret; - ret = iwl_mvm_ftm_responder_cmd(mvm, vif, &ctx.def); + ret = iwl_mvm_ftm_responder_cmd(mvm, vif, &ctx.def, bss_conf); if (ret) return ret; @@ -435,13 +448,14 @@ void iwl_mvm_ftm_responder_clear(struct iwl_mvm *mvm, } void iwl_mvm_ftm_restart_responder(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf) { - if (!vif->bss_conf.ftm_responder) + if (!bss_conf->ftm_responder) return; iwl_mvm_ftm_responder_clear(mvm, vif); - iwl_mvm_ftm_start_responder(mvm, vif); + iwl_mvm_ftm_start_responder(mvm, vif, bss_conf); } void iwl_mvm_ftm_responder_stats(struct iwl_mvm *mvm, @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2022 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -21,6 +21,7 @@ #include "iwl-phy-db.h" #include "iwl-modparams.h" #include "iwl-nvm-parse.h" +#include "time-sync.h" #define MVM_UCODE_ALIVE_TIMEOUT (HZ) #define MVM_UCODE_CALIB_TIMEOUT (2 * HZ) @@ -123,6 +124,7 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, UCODE_ALIVE_NTFY, 0); u32 i; + if (version == 6) { struct iwl_alive_ntf_v6 *palive; @@ -287,6 +289,9 @@ static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait, static void iwl_mvm_print_pd_notification(struct iwl_mvm *mvm) { +#define IWL_FW_PRINT_REG_INFO(reg_name) \ + IWL_ERR(mvm, #reg_name ": 0x%x\n", iwl_read_umac_prph(trans, reg_name)) + struct iwl_trans *trans = mvm->trans; enum iwl_device_family device_family = trans->trans_cfg->device_family; @@ -294,15 +299,15 @@ static void iwl_mvm_print_pd_notification(struct iwl_mvm *mvm) return; if (device_family <= IWL_DEVICE_FAMILY_9000) - IWL_ERR(mvm, "WFPM_ARC1_PD_NOTIFICATION: 0x%x\n", - iwl_read_umac_prph(trans, WFPM_ARC1_PD_NOTIFICATION)); + IWL_FW_PRINT_REG_INFO(WFPM_ARC1_PD_NOTIFICATION); else - IWL_ERR(mvm, "WFPM_LMAC1_PD_NOTIFICATION: 0x%x\n", - iwl_read_umac_prph(trans, WFPM_LMAC1_PD_NOTIFICATION)); + IWL_FW_PRINT_REG_INFO(WFPM_LMAC1_PD_NOTIFICATION); - IWL_ERR(mvm, "HPM_SECONDARY_DEVICE_STATE: 0x%x\n", - iwl_read_umac_prph(trans, HPM_SECONDARY_DEVICE_STATE)); + IWL_FW_PRINT_REG_INFO(HPM_SECONDARY_DEVICE_STATE); + /* print OPT info */ + IWL_FW_PRINT_REG_INFO(WFPM_MAC_OTP_CFG7_ADDR); + IWL_FW_PRINT_REG_INFO(WFPM_MAC_OTP_CFG7_DATA); } static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, @@ -316,6 +321,8 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, static const u16 alive_cmd[] = { UCODE_ALIVE_NTFY }; bool run_in_rfkill = ucode_type == IWL_UCODE_INIT || iwl_mvm_has_unified_ucode(mvm); + u8 count; + struct iwl_pc_data *pc_data; if (ucode_type == IWL_UCODE_REGULAR && iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) && @@ -351,6 +358,20 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, */ ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait, MVM_UCODE_ALIVE_TIMEOUT); + + if (mvm->trans->trans_cfg->device_family == + IWL_DEVICE_FAMILY_AX210) { + /* print these registers regardless of alive fail/success */ + IWL_INFO(mvm, "WFPM_UMAC_PD_NOTIFICATION: 0x%x\n", + iwl_read_umac_prph(mvm->trans, WFPM_ARC1_PD_NOTIFICATION)); + IWL_INFO(mvm, "WFPM_LMAC2_PD_NOTIFICATION: 0x%x\n", + iwl_read_umac_prph(mvm->trans, WFPM_LMAC2_PD_NOTIFICATION)); + IWL_INFO(mvm, "WFPM_AUTH_KEY_0: 0x%x\n", + iwl_read_umac_prph(mvm->trans, SB_MODIFY_CFG_FLAG)); + IWL_INFO(mvm, "CNVI_SCU_SEQ_DATA_DW9: 0x%x\n", + iwl_read_prph(mvm->trans, CNVI_SCU_SEQ_DATA_DW9)); + } + if (ret) { struct iwl_trans *trans = mvm->trans; @@ -374,6 +395,14 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, /* LMAC/UMAC PC info */ if (trans->trans_cfg->device_family >= + IWL_DEVICE_FAMILY_22000) { + pc_data = trans->dbg.pc_data; + for (count = 0; count < trans->dbg.num_pc; + count++, pc_data++) + IWL_ERR(mvm, "%s: 0x%x\n", + pc_data->pc_name, + pc_data->pc_address); + } else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000) { IWL_ERR(mvm, "UMAC PC: 0x%x\n", iwl_read_umac_prph(trans, @@ -387,7 +416,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, UREG_LMAC2_CURRENT_PC)); } - if (ret == -ETIMEDOUT) + if (ret == -ETIMEDOUT && !mvm->pldr_sync) iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_ALIVE_TIMEOUT); @@ -401,7 +430,11 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, return -EIO; } - ret = iwl_pnvm_load(mvm->trans, &mvm->notif_wait); + /* if reached this point, Alive notification was received */ + iwl_mei_alive_notif(true); + + ret = iwl_pnvm_load(mvm->trans, &mvm->notif_wait, + &mvm->fw->ucode_capa); if (ret) { IWL_ERR(mvm, "Timeout waiting for PNVM load!\n"); iwl_fw_set_current_image(&mvm->fwrt, old_type); @@ -445,6 +478,101 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, return 0; } +static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm, + struct iwl_phy_specific_cfg *phy_filters) +{ +#ifdef CONFIG_ACPI + *phy_filters = mvm->phy_filters; +#endif /* CONFIG_ACPI */ +} + +#if defined(CONFIG_ACPI) && defined(CONFIG_EFI) +static int iwl_mvm_sgom_init(struct iwl_mvm *mvm) +{ + u8 cmd_ver; + int ret; + struct iwl_host_cmd cmd = { + .id = WIDE_ID(REGULATORY_AND_NVM_GROUP, + SAR_OFFSET_MAPPING_TABLE_CMD), + .flags = 0, + .data[0] = &mvm->fwrt.sgom_table, + .len[0] = sizeof(mvm->fwrt.sgom_table), + .dataflags[0] = IWL_HCMD_DFL_NOCOPY, + }; + + if (!mvm->fwrt.sgom_enabled) { + IWL_DEBUG_RADIO(mvm, "SGOM table is disabled\n"); + return 0; + } + + cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id, + IWL_FW_CMD_VER_UNKNOWN); + + if (cmd_ver != 2) { + IWL_DEBUG_RADIO(mvm, "command version is unsupported. version = %d\n", + cmd_ver); + return 0; + } + + ret = iwl_mvm_send_cmd(mvm, &cmd); + if (ret < 0) + IWL_ERR(mvm, "failed to send SAR_OFFSET_MAPPING_CMD (%d)\n", ret); + + return ret; +} +#else + +static int iwl_mvm_sgom_init(struct iwl_mvm *mvm) +{ + return 0; +} +#endif + +static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) +{ + u32 cmd_id = PHY_CONFIGURATION_CMD; + struct iwl_phy_cfg_cmd_v3 phy_cfg_cmd; + enum iwl_ucode_type ucode_type = mvm->fwrt.cur_fw_img; + u8 cmd_ver; + size_t cmd_size; + + if (iwl_mvm_has_unified_ucode(mvm) && + !mvm->trans->cfg->tx_with_siso_diversity) + return 0; + + if (mvm->trans->cfg->tx_with_siso_diversity) { + /* + * TODO: currently we don't set the antenna but letting the NIC + * to decide which antenna to use. This should come from BIOS. + */ + phy_cfg_cmd.phy_cfg = + cpu_to_le32(FW_PHY_CFG_CHAIN_SAD_ENABLED); + } + + /* Set parameters */ + phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm)); + + /* set flags extra PHY configuration flags from the device's cfg */ + phy_cfg_cmd.phy_cfg |= + cpu_to_le32(mvm->trans->trans_cfg->extra_phy_cfg_flags); + + phy_cfg_cmd.calib_control.event_trigger = + mvm->fw->default_calib[ucode_type].event_trigger; + phy_cfg_cmd.calib_control.flow_trigger = + mvm->fw->default_calib[ucode_type].flow_trigger; + + cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, + IWL_FW_CMD_VER_UNKNOWN); + if (cmd_ver >= 3) + iwl_mvm_phy_filter_init(mvm, &phy_cfg_cmd.phy_specific_cfg); + + IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n", + phy_cfg_cmd.phy_cfg); + cmd_size = (cmd_ver == 3) ? sizeof(struct iwl_phy_cfg_cmd_v3) : + sizeof(struct iwl_phy_cfg_cmd_v1); + return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &phy_cfg_cmd); +} + static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm) { struct iwl_notification_wait init_wait; @@ -524,6 +652,13 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm) goto error; } + ret = iwl_send_phy_cfg_cmd(mvm); + if (ret) { + IWL_ERR(mvm, "Failed to run PHY configuration: %d\n", + ret); + goto error; + } + /* We wait for the INIT complete notification */ ret = iwl_wait_notification(&mvm->notif_wait, &init_wait, MVM_UCODE_ALIVE_TIMEOUT); @@ -550,132 +685,6 @@ error: return ret; } -#ifdef CONFIG_ACPI -static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm, - struct iwl_phy_specific_cfg *phy_filters) -{ - /* - * TODO: read specific phy config from BIOS - * ACPI table for this feature has not been defined yet, - * so for now we use hardcoded values. - */ - - if (IWL_MVM_PHY_FILTER_CHAIN_A) { - phy_filters->filter_cfg_chain_a = - cpu_to_le32(IWL_MVM_PHY_FILTER_CHAIN_A); - } - if (IWL_MVM_PHY_FILTER_CHAIN_B) { - phy_filters->filter_cfg_chain_b = - cpu_to_le32(IWL_MVM_PHY_FILTER_CHAIN_B); - } - if (IWL_MVM_PHY_FILTER_CHAIN_C) { - phy_filters->filter_cfg_chain_c = - cpu_to_le32(IWL_MVM_PHY_FILTER_CHAIN_C); - } - if (IWL_MVM_PHY_FILTER_CHAIN_D) { - phy_filters->filter_cfg_chain_d = - cpu_to_le32(IWL_MVM_PHY_FILTER_CHAIN_D); - } -} -#else /* CONFIG_ACPI */ - -static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm, - struct iwl_phy_specific_cfg *phy_filters) -{ -} -#endif /* CONFIG_ACPI */ - -#if defined(CONFIG_ACPI) && defined(CONFIG_EFI) -static int iwl_mvm_sgom_init(struct iwl_mvm *mvm) -{ - u8 cmd_ver; - int ret; - struct iwl_host_cmd cmd = { - .id = WIDE_ID(REGULATORY_AND_NVM_GROUP, - SAR_OFFSET_MAPPING_TABLE_CMD), - .flags = 0, - .data[0] = &mvm->fwrt.sgom_table, - .len[0] = sizeof(mvm->fwrt.sgom_table), - .dataflags[0] = IWL_HCMD_DFL_NOCOPY, - }; - - if (!mvm->fwrt.sgom_enabled) { - IWL_DEBUG_RADIO(mvm, "SGOM table is disabled\n"); - return 0; - } - - cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id, - IWL_FW_CMD_VER_UNKNOWN); - - if (cmd_ver != 2) { - IWL_DEBUG_RADIO(mvm, "command version is unsupported. version = %d\n", - cmd_ver); - return 0; - } - - ret = iwl_mvm_send_cmd(mvm, &cmd); - if (ret < 0) - IWL_ERR(mvm, "failed to send SAR_OFFSET_MAPPING_CMD (%d)\n", ret); - - return ret; -} -#else - -static int iwl_mvm_sgom_init(struct iwl_mvm *mvm) -{ - return 0; -} -#endif - -static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) -{ - u32 cmd_id = PHY_CONFIGURATION_CMD; - struct iwl_phy_cfg_cmd_v3 phy_cfg_cmd; - enum iwl_ucode_type ucode_type = mvm->fwrt.cur_fw_img; - struct iwl_phy_specific_cfg phy_filters = {}; - u8 cmd_ver; - size_t cmd_size; - - if (iwl_mvm_has_unified_ucode(mvm) && - !mvm->trans->cfg->tx_with_siso_diversity) - return 0; - - if (mvm->trans->cfg->tx_with_siso_diversity) { - /* - * TODO: currently we don't set the antenna but letting the NIC - * to decide which antenna to use. This should come from BIOS. - */ - phy_cfg_cmd.phy_cfg = - cpu_to_le32(FW_PHY_CFG_CHAIN_SAD_ENABLED); - } - - /* Set parameters */ - phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm)); - - /* set flags extra PHY configuration flags from the device's cfg */ - phy_cfg_cmd.phy_cfg |= - cpu_to_le32(mvm->trans->trans_cfg->extra_phy_cfg_flags); - - phy_cfg_cmd.calib_control.event_trigger = - mvm->fw->default_calib[ucode_type].event_trigger; - phy_cfg_cmd.calib_control.flow_trigger = - mvm->fw->default_calib[ucode_type].flow_trigger; - - cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, - IWL_FW_CMD_VER_UNKNOWN); - if (cmd_ver == 3) { - iwl_mvm_phy_filter_init(mvm, &phy_filters); - memcpy(&phy_cfg_cmd.phy_specific_cfg, &phy_filters, - sizeof(struct iwl_phy_specific_cfg)); - } - - IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n", - phy_cfg_cmd.phy_cfg); - cmd_size = (cmd_ver == 3) ? sizeof(struct iwl_phy_cfg_cmd_v3) : - sizeof(struct iwl_phy_cfg_cmd_v1); - return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &phy_cfg_cmd); -} - int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm) { struct iwl_notification_wait calib_wait; @@ -1015,8 +1024,9 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm) int ret, cmd_size; ret = iwl_read_ppag_table(&mvm->fwrt, &cmd, &cmd_size); - if(ret < 0) - return ret; + /* Not supporting PPAG table is a valid scenario */ + if (ret < 0) + return 0; IWL_DEBUG_RADIO(mvm, "Sending PER_PLATFORM_ANT_GAIN_CMD\n"); ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(PHY_OPS_GROUP, @@ -1051,7 +1061,7 @@ static const struct dmi_system_id dmi_tas_approved_list[] = { }, { .ident = "LENOVO", .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Lenovo"), + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), }, }, { .ident = "DELL", @@ -1059,11 +1069,40 @@ static const struct dmi_system_id dmi_tas_approved_list[] = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), }, }, - + { .ident = "MSFT", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + }, + }, + { .ident = "Acer", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + }, + }, + { .ident = "ASUS", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + }, + }, + { .ident = "MSI", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International Co., Ltd."), + }, + }, + { .ident = "Honor", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HONOR"), + }, + }, /* keep last */ {} }; +bool iwl_mvm_is_vendor_in_approved_list(void) +{ + return dmi_check_system(dmi_tas_approved_list); +} + static bool iwl_mvm_add_to_tas_block_list(__le32 *list, __le32 *le_size, unsigned int mcc) { int i; @@ -1112,7 +1151,7 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm) if (ret == 0) return; - if (!dmi_check_system(dmi_tas_approved_list)) { + if (!iwl_mvm_is_vendor_in_approved_list()) { IWL_DEBUG_RADIO(mvm, "System vendor '%s' is not in the approved list, disabling TAS in US and Canada.\n", dmi_get_system_info(DMI_SYS_VENDOR)); @@ -1126,6 +1165,10 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm) "Unable to add US/Canada to TAS block list, disabling TAS\n"); return; } + } else { + IWL_DEBUG_RADIO(mvm, + "System vendor '%s' is in the approved list.\n", + dmi_get_system_info(DMI_SYS_VENDOR)); } /* v4 is the same size as v3, so no need to differentiate here */ @@ -1306,6 +1349,8 @@ void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm) /* we don't fail if the table is not available */ } } + + iwl_acpi_get_phy_filters(&mvm->fwrt, &mvm->phy_filters); } #else /* CONFIG_ACPI */ @@ -1343,6 +1388,11 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm) { } +bool iwl_mvm_is_vendor_in_approved_list(void) +{ + return false; +} + static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm) { return DSM_VALUE_RFI_DISABLE; @@ -1452,6 +1502,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) struct ieee80211_channel *chan; struct cfg80211_chan_def chandef; struct ieee80211_supported_band *sband = NULL; + u32 sb_cfg; lockdep_assert_held(&mvm->mutex); @@ -1459,15 +1510,23 @@ int iwl_mvm_up(struct iwl_mvm *mvm) if (ret) return ret; + sb_cfg = iwl_read_umac_prph(mvm->trans, SB_MODIFY_CFG_FLAG); + mvm->pldr_sync = !(sb_cfg & SB_CFG_RESIDES_IN_OTP_MASK); + if (mvm->pldr_sync && iwl_mei_pldr_req()) + return -EBUSY; + ret = iwl_mvm_load_rt_fw(mvm); if (ret) { IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret); - if (ret != -ERFKILL) + if (ret != -ERFKILL && !mvm->pldr_sync) iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER); goto error; } + /* FW loaded successfully */ + mvm->pldr_sync = false; + iwl_get_shared_mem_conf(&mvm->fwrt); ret = iwl_mvm_sf_update(mvm, NULL, false); @@ -1491,12 +1550,11 @@ int iwl_mvm_up(struct iwl_mvm *mvm) ret = iwl_send_phy_db_data(mvm->phy_db); if (ret) goto error; + ret = iwl_send_phy_cfg_cmd(mvm); + if (ret) + goto error; } - ret = iwl_send_phy_cfg_cmd(mvm); - if (ret) - goto error; - ret = iwl_mvm_send_bt_init_conf(mvm); if (ret) goto error; @@ -1508,6 +1566,8 @@ int iwl_mvm_up(struct iwl_mvm *mvm) goto error; } + iwl_mvm_lari_cfg(mvm); + /* Init RSS configuration */ ret = iwl_configure_rxq(&mvm->fwrt); if (ret) @@ -1523,8 +1583,15 @@ int iwl_mvm_up(struct iwl_mvm *mvm) } /* init the fw <-> mac80211 STA mapping */ - for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) + for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL); + RCU_INIT_POINTER(mvm->fw_id_to_link_sta[i], NULL); + } + + for (i = 0; i < IWL_MVM_FW_MAX_LINK_ID + 1; i++) + RCU_INIT_POINTER(mvm->link_id_to_link_conf[i], NULL); + + memset(&mvm->fw_link_ids_map, 0, sizeof(mvm->fw_link_ids_map)); mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA; @@ -1543,7 +1610,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) * internal aux station for all aux activities that don't * requires a dedicated data queue. */ - if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) { + if (!iwl_mvm_has_new_station_api(mvm->fw)) { /* * In old version the aux station uses mac id like other * station and not lmac id @@ -1611,7 +1678,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm) if (ret) goto error; - iwl_mvm_lari_cfg(mvm); /* * RTNL is not taken during Ct-kill, but we don't need to scan/Tx * anyway, so don't init MCC. @@ -1630,9 +1696,18 @@ int iwl_mvm_up(struct iwl_mvm *mvm) goto error; } - if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { iwl_mvm_send_recovery_cmd(mvm, ERROR_RECOVERY_UPDATE_DB); + if (mvm->time_sync.active) + iwl_mvm_time_sync_config(mvm, mvm->time_sync.peer_addr, + IWL_TIME_SYNC_PROTOCOL_TM | + IWL_TIME_SYNC_PROTOCOL_FTM); + } + + if (!mvm->ptp_data.ptp_clock) + iwl_mvm_ptp_init(mvm); + if (iwl_acpi_get_eckv(mvm->dev, &mvm->ext_clock_valid)) IWL_DEBUG_INFO(mvm, "ECKV table doesn't exist in BIOS\n"); @@ -1653,14 +1728,13 @@ int iwl_mvm_up(struct iwl_mvm *mvm) iwl_mvm_tas_init(mvm); iwl_mvm_leds_sync(mvm); - iwl_mvm_ftm_initiator_smooth_config(mvm); - - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_RFIM_SUPPORT)) { + if (iwl_rfi_supported(mvm)) { if (iwl_mvm_eval_dsm_rfi(mvm) == DSM_VALUE_RFI_ENABLE) iwl_rfi_send_config_cmd(mvm, NULL); } + iwl_mvm_mei_device_state(mvm, true); + IWL_DEBUG_INFO(mvm, "RT uCode started.\n"); return 0; error: @@ -1699,10 +1773,12 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm) goto error; /* init the fw <-> mac80211 STA mapping */ - for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) + for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL); + RCU_INIT_POINTER(mvm->fw_id_to_link_sta[i], NULL); + } - if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) { + if (!iwl_mvm_has_new_station_api(mvm->fw)) { /* * Add auxiliary station for scanning. * Newer versions of this command implies that the fw uses diff --git a/mvm/link.c b/mvm/link.c new file mode 100644 index 000000000000..ace82e2c5bd9 --- /dev/null +++ b/mvm/link.c @@ -0,0 +1,285 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2022 - 2023 Intel Corporation + */ +#include "mvm.h" +#include "time-event.h" + +static u32 iwl_mvm_get_free_fw_link_id(struct iwl_mvm *mvm, + struct iwl_mvm_vif *mvm_vif) +{ + u32 link_id; + + lockdep_assert_held(&mvm->mutex); + + link_id = ffz(mvm->fw_link_ids_map); + + /* this case can happen if there're deactivated but not removed links */ + if (link_id > IWL_MVM_FW_MAX_LINK_ID) + return IWL_MVM_FW_LINK_ID_INVALID; + + mvm->fw_link_ids_map |= BIT(link_id); + return link_id; +} + +static void iwl_mvm_release_fw_link_id(struct iwl_mvm *mvm, u32 link_id) +{ + lockdep_assert_held(&mvm->mutex); + + if (!WARN_ON(link_id > IWL_MVM_FW_MAX_LINK_ID)) + mvm->fw_link_ids_map &= ~BIT(link_id); +} + +static int iwl_mvm_link_cmd_send(struct iwl_mvm *mvm, + struct iwl_link_config_cmd *cmd, + enum iwl_ctxt_action action) +{ + int ret; + + cmd->action = cpu_to_le32(action); + ret = iwl_mvm_send_cmd_pdu(mvm, + WIDE_ID(MAC_CONF_GROUP, LINK_CONFIG_CMD), 0, + sizeof(*cmd), cmd); + if (ret) + IWL_ERR(mvm, "Failed to send LINK_CONFIG_CMD (action:%d): %d\n", + action, ret); + return ret; +} + +int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + unsigned int link_id = link_conf->link_id; + struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id]; + struct iwl_link_config_cmd cmd = {}; + struct iwl_mvm_phy_ctxt *phyctxt; + + if (WARN_ON_ONCE(!link_info)) + return -EINVAL; + + if (link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID) { + link_info->fw_link_id = iwl_mvm_get_free_fw_link_id(mvm, + mvmvif); + if (link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID) + return -EINVAL; + + rcu_assign_pointer(mvm->link_id_to_link_conf[link_info->fw_link_id], + link_conf); + } + + /* Update SF - Disable if needed. if this fails, SF might still be on + * while many macs are bound, which is forbidden - so fail the binding. + */ + if (iwl_mvm_sf_update(mvm, vif, false)) + return -EINVAL; + + cmd.link_id = cpu_to_le32(link_info->fw_link_id); + cmd.mac_id = cpu_to_le32(mvmvif->id); + cmd.spec_link_id = link_conf->link_id; + /* P2P-Device already has a valid PHY context during add */ + phyctxt = link_info->phy_ctxt; + if (phyctxt) + cmd.phy_id = cpu_to_le32(phyctxt->id); + else + cmd.phy_id = cpu_to_le32(FW_CTXT_INVALID); + + memcpy(cmd.local_link_addr, link_conf->addr, ETH_ALEN); + + if (vif->type == NL80211_IFTYPE_ADHOC && link_conf->bssid) + memcpy(cmd.ibss_bssid_addr, link_conf->bssid, ETH_ALEN); + + cmd.listen_lmac = cpu_to_le32(link_info->listen_lmac); + + return iwl_mvm_link_cmd_send(mvm, &cmd, FW_CTXT_ACTION_ADD); +} + +int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + u32 changes, bool active) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + unsigned int link_id = link_conf->link_id; + struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id]; + struct iwl_mvm_phy_ctxt *phyctxt; + struct iwl_link_config_cmd cmd = {}; + u32 ht_flag, flags = 0, flags_mask = 0; + int ret; + + if (WARN_ON_ONCE(!link_info || + link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID)) + return -EINVAL; + + if (changes & LINK_CONTEXT_MODIFY_ACTIVE) { + /* When activating a link, phy context should be valid; + * when deactivating a link, it also should be valid since + * the link was active before. So, do nothing in this case. + * Since a link is added first with FW_CTXT_INVALID, then we + * can get here in case it's removed before it was activated. + */ + if (!link_info->phy_ctxt) + return 0; + + /* Catch early if driver tries to activate or deactivate a link + * twice. + */ + WARN_ON_ONCE(active == link_info->active); + + /* When deactivating a link session protection should + * be stopped + */ + if (!active && vif->type == NL80211_IFTYPE_STATION) + iwl_mvm_stop_session_protection(mvm, vif); + } + + cmd.link_id = cpu_to_le32(link_info->fw_link_id); + + /* The phy_id, link address and listen_lmac can be modified only until + * the link becomes active, otherwise they will be ignored. + */ + phyctxt = link_info->phy_ctxt; + if (phyctxt) + cmd.phy_id = cpu_to_le32(phyctxt->id); + else + cmd.phy_id = cpu_to_le32(FW_CTXT_INVALID); + cmd.mac_id = cpu_to_le32(mvmvif->id); + + memcpy(cmd.local_link_addr, link_conf->addr, ETH_ALEN); + + cmd.active = cpu_to_le32(active); + + if (vif->type == NL80211_IFTYPE_ADHOC && link_conf->bssid) + memcpy(cmd.ibss_bssid_addr, link_conf->bssid, ETH_ALEN); + + iwl_mvm_set_fw_basic_rates(mvm, vif, link_conf, + &cmd.cck_rates, &cmd.ofdm_rates); + + cmd.cck_short_preamble = cpu_to_le32(link_conf->use_short_preamble); + cmd.short_slot = cpu_to_le32(link_conf->use_short_slot); + + /* The fw does not distinguish between ht and fat */ + ht_flag = LINK_PROT_FLG_HT_PROT | LINK_PROT_FLG_FAT_PROT; + iwl_mvm_set_fw_protection_flags(mvm, vif, link_conf, + &cmd.protection_flags, + ht_flag, LINK_PROT_FLG_TGG_PROTECT); + + iwl_mvm_set_fw_qos_params(mvm, vif, link_conf, cmd.ac, + &cmd.qos_flags); + + + cmd.bi = cpu_to_le32(link_conf->beacon_int); + cmd.dtim_interval = cpu_to_le32(link_conf->beacon_int * + link_conf->dtim_period); + + if (!link_conf->he_support || iwlwifi_mod_params.disable_11ax || + (vif->type == NL80211_IFTYPE_STATION && !vif->cfg.assoc)) { + changes &= ~LINK_CONTEXT_MODIFY_HE_PARAMS; + goto send_cmd; + } + + cmd.htc_trig_based_pkt_ext = link_conf->htc_trig_based_pkt_ext; + + if (link_conf->uora_exists) { + cmd.rand_alloc_ecwmin = + link_conf->uora_ocw_range & 0x7; + cmd.rand_alloc_ecwmax = + (link_conf->uora_ocw_range >> 3) & 0x7; + } + + /* TODO how to set ndp_fdbk_buff_th_exp? */ + + if (iwl_mvm_set_fw_mu_edca_params(mvm, mvmvif->link[link_id], + &cmd.trig_based_txf[0])) { + flags |= LINK_FLG_MU_EDCA_CW; + flags_mask |= LINK_FLG_MU_EDCA_CW; + } + + if (link_conf->eht_puncturing && !iwlwifi_mod_params.disable_11be) + cmd.puncture_mask = cpu_to_le16(link_conf->eht_puncturing); + else + /* This flag can be set only if the MAC has eht support */ + changes &= ~LINK_CONTEXT_MODIFY_EHT_PARAMS; + + cmd.bss_color = link_conf->he_bss_color.color; + + if (!link_conf->he_bss_color.enabled) { + flags |= LINK_FLG_BSS_COLOR_DIS; + flags_mask |= LINK_FLG_BSS_COLOR_DIS; + } + + cmd.frame_time_rts_th = cpu_to_le16(link_conf->frame_time_rts_th); + + /* Block 26-tone RU OFDMA transmissions */ + if (link_info->he_ru_2mhz_block) { + flags |= LINK_FLG_RU_2MHZ_BLOCK; + flags_mask |= LINK_FLG_RU_2MHZ_BLOCK; + } + + if (link_conf->nontransmitted) { + ether_addr_copy(cmd.ref_bssid_addr, + link_conf->transmitter_bssid); + cmd.bssid_index = link_conf->bssid_index; + } + +send_cmd: + cmd.modify_mask = cpu_to_le32(changes); + cmd.flags = cpu_to_le32(flags); + cmd.flags_mask = cpu_to_le32(flags_mask); + cmd.spec_link_id = link_conf->link_id; + cmd.listen_lmac = cpu_to_le32(link_info->listen_lmac); + + ret = iwl_mvm_link_cmd_send(mvm, &cmd, FW_CTXT_ACTION_MODIFY); + if (!ret && (changes & LINK_CONTEXT_MODIFY_ACTIVE)) + link_info->active = active; + + return ret; +} + +int iwl_mvm_remove_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + unsigned int link_id = link_conf->link_id; + struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id]; + struct iwl_link_config_cmd cmd = {}; + int ret; + + if (WARN_ON(!link_info || + link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID)) + return -EINVAL; + + RCU_INIT_POINTER(mvm->link_id_to_link_conf[link_info->fw_link_id], + NULL); + cmd.link_id = cpu_to_le32(link_info->fw_link_id); + iwl_mvm_release_fw_link_id(mvm, link_info->fw_link_id); + link_info->fw_link_id = IWL_MVM_FW_LINK_ID_INVALID; + cmd.spec_link_id = link_conf->link_id; + + ret = iwl_mvm_link_cmd_send(mvm, &cmd, FW_CTXT_ACTION_REMOVE); + + if (!ret) + if (iwl_mvm_sf_update(mvm, vif, true)) + IWL_ERR(mvm, "Failed to update SF state\n"); + + return ret; +} + +/* link should be deactivated before removal, so in most cases we need to + * perform these two operations together + */ +int iwl_mvm_disable_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) +{ + int ret; + + ret = iwl_mvm_link_changed(mvm, vif, link_conf, + LINK_CONTEXT_MODIFY_ACTIVE, false); + if (ret) + return ret; + + ret = iwl_mvm_remove_link(mvm, vif, link_conf); + if (ret) + return ret; + + return ret; +} diff --git a/mvm/mac-ctxt.c b/mvm/mac-ctxt.c index 5aa4520b70ac..7369a45f7f2b 100644 --- a/mvm/mac-ctxt.c +++ b/mvm/mac-ctxt.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ @@ -225,16 +225,20 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) * that we should share it with another interface. */ - /* Currently, MAC ID 0 should be used only for the managed/IBSS vif */ - switch (vif->type) { - case NL80211_IFTYPE_ADHOC: - break; - case NL80211_IFTYPE_STATION: - if (!vif->p2p) + /* MAC ID 0 should be used only for the managed/IBSS vif with non-MLO + * FW API + */ + if (!mvm->mld_api_is_used) { + switch (vif->type) { + case NL80211_IFTYPE_ADHOC: break; - fallthrough; - default: - __clear_bit(0, data.available_mac_ids); + case NL80211_IFTYPE_STATION: + if (!vif->p2p) + break; + fallthrough; + default: + __clear_bit(0, data.available_mac_ids); + } } ieee80211_iterate_active_interfaces_atomic( @@ -293,15 +297,15 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) * For TVQM this will be overwritten later with the FW assigned * queue value (when queue is enabled). */ - mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; + mvmvif->deflink.cab_queue = IWL_MVM_DQA_GCAST_QUEUE; } - mvmvif->bcast_sta.sta_id = IWL_MVM_INVALID_STA; - mvmvif->mcast_sta.sta_id = IWL_MVM_INVALID_STA; - mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; + mvmvif->deflink.bcast_sta.sta_id = IWL_MVM_INVALID_STA; + mvmvif->deflink.mcast_sta.sta_id = IWL_MVM_INVALID_STA; + mvmvif->deflink.ap_sta_id = IWL_MVM_INVALID_STA; for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) - mvmvif->smps_requests[i] = IEEE80211_SMPS_AUTOMATIC; + mvmvif->deflink.smps_requests[i] = IEEE80211_SMPS_AUTOMATIC; return 0; @@ -396,15 +400,46 @@ static void iwl_mvm_ack_rates(struct iwl_mvm *mvm, *ofdm_rates = ofdm; } -static void iwl_mvm_mac_ctxt_set_ht_flags(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct iwl_mac_ctx_cmd *cmd) +void iwl_mvm_set_fw_basic_rates(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + __le32 *cck_rates, __le32 *ofdm_rates) +{ + struct ieee80211_chanctx_conf *chanctx; + u8 cck_ack_rates = 0, ofdm_ack_rates = 0; + + rcu_read_lock(); + chanctx = rcu_dereference(link_conf->chanctx_conf); + iwl_mvm_ack_rates(mvm, vif, chanctx ? chanctx->def.chan->band + : NL80211_BAND_2GHZ, + &cck_ack_rates, &ofdm_ack_rates); + + rcu_read_unlock(); + + *cck_rates = cpu_to_le32((u32)cck_ack_rates); + *ofdm_rates = cpu_to_le32((u32)ofdm_ack_rates); +} + +void iwl_mvm_set_fw_protection_flags(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + __le32 *protection_flags, u32 ht_flag, + u32 tgg_flag) { /* for both sta and ap, ht_operation_mode hold the protection_mode */ - u8 protection_mode = vif->bss_conf.ht_operation_mode & + u8 protection_mode = link_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION; - /* The fw does not distinguish between ht and fat */ - u32 ht_flag = MAC_PROT_FLG_HT_PROT | MAC_PROT_FLG_FAT_PROT; + bool ht_enabled = !!(link_conf->ht_operation_mode & + IEEE80211_HT_OP_MODE_PROTECTION); + + if (link_conf->use_cts_prot) + *protection_flags |= cpu_to_le32(tgg_flag); + + IWL_DEBUG_RATE(mvm, "use_cts_prot %d, ht_operation_mode %d\n", + link_conf->use_cts_prot, + link_conf->ht_operation_mode); + + if (!ht_enabled) + return; IWL_DEBUG_RATE(mvm, "protection mode set to %d\n", protection_mode); /* @@ -416,12 +451,12 @@ static void iwl_mvm_mac_ctxt_set_ht_flags(struct iwl_mvm *mvm, break; case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER: case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED: - cmd->protection_flags |= cpu_to_le32(ht_flag); + *protection_flags |= cpu_to_le32(ht_flag); break; case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ: /* Protect when channel wider than 20MHz */ - if (vif->bss_conf.chandef.width > NL80211_CHAN_WIDTH_20) - cmd->protection_flags |= cpu_to_le32(ht_flag); + if (link_conf->chandef.width > NL80211_CHAN_WIDTH_20) + *protection_flags |= cpu_to_le32(ht_flag); break; default: IWL_ERR(mvm, "Illegal protection mode %d\n", @@ -430,46 +465,82 @@ static void iwl_mvm_mac_ctxt_set_ht_flags(struct iwl_mvm *mvm, } } -static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct iwl_mac_ctx_cmd *cmd, - const u8 *bssid_override, - u32 action) +void iwl_mvm_set_fw_qos_params(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct iwl_ac_qos *ac, __le32 *qos_flags) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct ieee80211_chanctx_conf *chanctx; - bool ht_enabled = !!(vif->bss_conf.ht_operation_mode & - IEEE80211_HT_OP_MODE_PROTECTION); - u8 cck_ack_rates, ofdm_ack_rates; - const u8 *bssid = bssid_override ?: vif->bss_conf.bssid; + struct iwl_mvm_vif_link_info *mvm_link = + mvmvif->link[link_conf->link_id]; int i; - cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, - mvmvif->color)); - cmd->action = cpu_to_le32(action); + if (!mvm_link) + return; + + for (i = 0; i < IEEE80211_NUM_ACS; i++) { + u8 txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, i); + u8 ucode_ac = iwl_mvm_mac80211_ac_to_ucode_ac(i); + + ac[ucode_ac].cw_min = + cpu_to_le16(mvm_link->queue_params[i].cw_min); + ac[ucode_ac].cw_max = + cpu_to_le16(mvm_link->queue_params[i].cw_max); + ac[ucode_ac].edca_txop = + cpu_to_le16(mvm_link->queue_params[i].txop * 32); + ac[ucode_ac].aifsn = mvm_link->queue_params[i].aifs; + ac[ucode_ac].fifos_mask = BIT(txf); + } + + if (link_conf->qos) + *qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA); + + if (link_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT) + *qos_flags |= cpu_to_le32(MAC_QOS_FLG_TGN); +} + +int iwl_mvm_get_mac_type(struct ieee80211_vif *vif) +{ + u32 mac_type = FW_MAC_TYPE_BSS_STA; switch (vif->type) { case NL80211_IFTYPE_STATION: if (vif->p2p) - cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_P2P_STA); + mac_type = FW_MAC_TYPE_P2P_STA; else - cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_BSS_STA); + mac_type = FW_MAC_TYPE_BSS_STA; break; case NL80211_IFTYPE_AP: - cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_GO); + mac_type = FW_MAC_TYPE_GO; break; case NL80211_IFTYPE_MONITOR: - cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_LISTENER); + mac_type = FW_MAC_TYPE_LISTENER; break; case NL80211_IFTYPE_P2P_DEVICE: - cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_P2P_DEVICE); + mac_type = FW_MAC_TYPE_P2P_DEVICE; break; case NL80211_IFTYPE_ADHOC: - cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_IBSS); + mac_type = FW_MAC_TYPE_IBSS; break; default: WARN_ON_ONCE(1); } + return mac_type; +} + +static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_mac_ctx_cmd *cmd, + const u8 *bssid_override, + u32 action) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + const u8 *bssid = bssid_override ?: vif->bss_conf.bssid; + u32 ht_flag; + + cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, + mvmvif->color)); + cmd->action = cpu_to_le32(action); + cmd->mac_type = cpu_to_le32(iwl_mvm_get_mac_type(vif)); cmd->tsf_id = cpu_to_le32(mvmvif->tsf_id); @@ -480,15 +551,8 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm, else eth_broadcast_addr(cmd->bssid_addr); - rcu_read_lock(); - chanctx = rcu_dereference(vif->chanctx_conf); - iwl_mvm_ack_rates(mvm, vif, chanctx ? chanctx->def.chan->band - : NL80211_BAND_2GHZ, - &cck_ack_rates, &ofdm_ack_rates); - rcu_read_unlock(); - - cmd->cck_rates = cpu_to_le32((u32)cck_ack_rates); - cmd->ofdm_rates = cpu_to_le32((u32)ofdm_ack_rates); + iwl_mvm_set_fw_basic_rates(mvm, vif, &vif->bss_conf, &cmd->cck_rates, + &cmd->ofdm_rates); cmd->cck_short_preamble = cpu_to_le32(vif->bss_conf.use_short_preamble ? @@ -499,33 +563,14 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm, cmd->filter_flags = 0; - for (i = 0; i < IEEE80211_NUM_ACS; i++) { - u8 txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, i); - u8 ucode_ac = iwl_mvm_mac80211_ac_to_ucode_ac(i); - - cmd->ac[ucode_ac].cw_min = - cpu_to_le16(mvmvif->queue_params[i].cw_min); - cmd->ac[ucode_ac].cw_max = - cpu_to_le16(mvmvif->queue_params[i].cw_max); - cmd->ac[ucode_ac].edca_txop = - cpu_to_le16(mvmvif->queue_params[i].txop * 32); - cmd->ac[ucode_ac].aifsn = mvmvif->queue_params[i].aifs; - cmd->ac[ucode_ac].fifos_mask = BIT(txf); - } - - if (vif->bss_conf.qos) - cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA); - - if (vif->bss_conf.use_cts_prot) - cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT); + iwl_mvm_set_fw_qos_params(mvm, vif, &vif->bss_conf, cmd->ac, + &cmd->qos_flags); - IWL_DEBUG_RATE(mvm, "use_cts_prot %d, ht_operation_mode %d\n", - vif->bss_conf.use_cts_prot, - vif->bss_conf.ht_operation_mode); - if (vif->bss_conf.chandef.width != NL80211_CHAN_WIDTH_20_NOHT) - cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_TGN); - if (ht_enabled) - iwl_mvm_mac_ctxt_set_ht_flags(mvm, vif, cmd); + /* The fw does not distinguish between ht and fat */ + ht_flag = MAC_PROT_FLG_HT_PROT | MAC_PROT_FLG_FAT_PROT; + iwl_mvm_set_fw_protection_flags(mvm, vif, &vif->bss_conf, + &cmd->protection_flags, + ht_flag, MAC_PROT_FLG_TGG_PROTECT); } static int iwl_mvm_mac_ctxt_send_cmd(struct iwl_mvm *mvm, @@ -534,11 +579,76 @@ static int iwl_mvm_mac_ctxt_send_cmd(struct iwl_mvm *mvm, int ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, 0, sizeof(*cmd), cmd); if (ret) - IWL_ERR(mvm, "Failed to send MAC context (action:%d): %d\n", + IWL_ERR(mvm, "Failed to send MAC_CONTEXT_CMD (action:%d): %d\n", le32_to_cpu(cmd->action), ret); return ret; } +void iwl_mvm_set_fw_dtim_tbtt(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + __le64 *dtim_tsf, __le32 *dtim_time, + __le32 *assoc_beacon_arrive_time) +{ + u32 dtim_offs; + + /* + * The DTIM count counts down, so when it is N that means N + * more beacon intervals happen until the DTIM TBTT. Therefore + * add this to the current time. If that ends up being in the + * future, the firmware will handle it. + * + * Also note that the system_timestamp (which we get here as + * "sync_device_ts") and TSF timestamp aren't at exactly the + * same offset in the frame -- the TSF is at the first symbol + * of the TSF, the system timestamp is at signal acquisition + * time. This means there's an offset between them of at most + * a few hundred microseconds (24 * 8 bits + PLCP time gives + * 384us in the longest case), this is currently not relevant + * as the firmware wakes up around 2ms before the TBTT. + */ + dtim_offs = link_conf->sync_dtim_count * + link_conf->beacon_int; + /* convert TU to usecs */ + dtim_offs *= 1024; + + *dtim_tsf = + cpu_to_le64(link_conf->sync_tsf + dtim_offs); + *dtim_time = + cpu_to_le32(link_conf->sync_device_ts + dtim_offs); + *assoc_beacon_arrive_time = + cpu_to_le32(link_conf->sync_device_ts); + + IWL_DEBUG_INFO(mvm, "DTIM TBTT is 0x%llx/0x%x, offset %d\n", + le64_to_cpu(*dtim_tsf), + le32_to_cpu(*dtim_time), + dtim_offs); +} + +__le32 iwl_mvm_mac_ctxt_cmd_p2p_sta_get_oppps_ctwin(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct ieee80211_p2p_noa_attr *noa = + &vif->bss_conf.p2p_noa_attr; + + return cpu_to_le32(noa->oppps_ctwindow & + IEEE80211_P2P_OPPPS_CTWINDOW_MASK); +} + +u32 iwl_mvm_mac_ctxt_cmd_sta_get_twt_policy(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + u32 twt_policy = 0; + + if (vif->bss_conf.twt_requester && IWL_MVM_USE_TWT) + twt_policy |= TWT_SUPPORTED; + if (vif->bss_conf.twt_protected) + twt_policy |= PROTECTED_TWT_SUPPORTED; + if (vif->bss_conf.twt_broadcast) + twt_policy |= BROADCAST_TWT_SUPPORTED; + + return twt_policy; +} + static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 action, bool force_assoc_off, @@ -552,55 +662,30 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm, /* Fill the common data for all mac context types */ iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, bssid_override, action); + /* + * We always want to hear MCAST frames, if we're not authorized yet, + * we'll drop them. + */ + cmd.filter_flags |= cpu_to_le32(MAC_FILTER_ACCEPT_GRP); + if (vif->p2p) { - struct ieee80211_p2p_noa_attr *noa = - &vif->bss_conf.p2p_noa_attr; + cmd.p2p_sta.ctwin = + iwl_mvm_mac_ctxt_cmd_p2p_sta_get_oppps_ctwin(mvm, vif); - cmd.p2p_sta.ctwin = cpu_to_le32(noa->oppps_ctwindow & - IEEE80211_P2P_OPPPS_CTWINDOW_MASK); ctxt_sta = &cmd.p2p_sta.sta; } else { ctxt_sta = &cmd.sta; } /* We need the dtim_period to set the MAC as associated */ - if (vif->bss_conf.assoc && vif->bss_conf.dtim_period && + if (vif->cfg.assoc && vif->bss_conf.dtim_period && !force_assoc_off) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - u8 ap_sta_id = mvmvif->ap_sta_id; - u32 dtim_offs; - /* - * The DTIM count counts down, so when it is N that means N - * more beacon intervals happen until the DTIM TBTT. Therefore - * add this to the current time. If that ends up being in the - * future, the firmware will handle it. - * - * Also note that the system_timestamp (which we get here as - * "sync_device_ts") and TSF timestamp aren't at exactly the - * same offset in the frame -- the TSF is at the first symbol - * of the TSF, the system timestamp is at signal acquisition - * time. This means there's an offset between them of at most - * a few hundred microseconds (24 * 8 bits + PLCP time gives - * 384us in the longest case), this is currently not relevant - * as the firmware wakes up around 2ms before the TBTT. - */ - dtim_offs = vif->bss_conf.sync_dtim_count * - vif->bss_conf.beacon_int; - /* convert TU to usecs */ - dtim_offs *= 1024; - - ctxt_sta->dtim_tsf = - cpu_to_le64(vif->bss_conf.sync_tsf + dtim_offs); - ctxt_sta->dtim_time = - cpu_to_le32(vif->bss_conf.sync_device_ts + dtim_offs); - ctxt_sta->assoc_beacon_arrive_time = - cpu_to_le32(vif->bss_conf.sync_device_ts); - - IWL_DEBUG_INFO(mvm, "DTIM TBTT is 0x%llx/0x%x, offset %d\n", - le64_to_cpu(ctxt_sta->dtim_tsf), - le32_to_cpu(ctxt_sta->dtim_time), - dtim_offs); + iwl_mvm_set_fw_dtim_tbtt(mvm, vif, &vif->bss_conf, + &ctxt_sta->dtim_tsf, + &ctxt_sta->dtim_time, + &ctxt_sta->assoc_beacon_arrive_time); ctxt_sta->is_assoc = cpu_to_le32(1); @@ -609,29 +694,6 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm, IWL_UCODE_TLV_CAPA_COEX_HIGH_PRIO)) ctxt_sta->data_policy |= cpu_to_le32(COEX_HIGH_PRIORITY_ENABLE); - - /* - * allow multicast data frames only as long as the station is - * authorized, i.e., GTK keys are already installed (if needed) - */ - if (ap_sta_id < mvm->fw->ucode_capa.num_stations) { - struct ieee80211_sta *sta; - - rcu_read_lock(); - - sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]); - if (!IS_ERR_OR_NULL(sta)) { - struct iwl_mvm_sta *mvmsta = - iwl_mvm_sta_from_mac80211(sta); - - if (mvmsta->sta_state == - IEEE80211_STA_AUTHORIZED) - cmd.filter_flags |= - cpu_to_le32(MAC_FILTER_ACCEPT_GRP); - } - - rcu_read_unlock(); - } } else { ctxt_sta->is_assoc = cpu_to_le32(0); @@ -646,21 +708,15 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm, vif->bss_conf.dtim_period); ctxt_sta->listen_interval = cpu_to_le32(mvm->hw->conf.listen_interval); - ctxt_sta->assoc_id = cpu_to_le32(vif->bss_conf.aid); + ctxt_sta->assoc_id = cpu_to_le32(vif->cfg.aid); - if (vif->probe_req_reg && vif->bss_conf.assoc && vif->p2p) + if (vif->probe_req_reg && vif->cfg.assoc && vif->p2p) cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST); if (vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax) { cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_11AX); - if (vif->bss_conf.twt_requester && IWL_MVM_USE_TWT) - ctxt_sta->data_policy |= cpu_to_le32(TWT_SUPPORTED); - if (vif->bss_conf.twt_protected) - ctxt_sta->data_policy |= - cpu_to_le32(PROTECTED_TWT_SUPPORTED); - if (vif->bss_conf.twt_broadcast) - ctxt_sta->data_policy |= - cpu_to_le32(BROADCAST_TWT_SUPPORTED); + ctxt_sta->data_policy |= + cpu_to_le32(iwl_mvm_mac_ctxt_cmd_sta_get_twt_policy(mvm, vif)); } @@ -672,7 +728,7 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm, u32 action) { struct iwl_mac_ctx_cmd cmd = {}; - u32 tfd_queue_msk = BIT(mvm->snif_queue); + u32 tfd_queue_msk = 0; int ret; WARN_ON(vif->type != NL80211_IFTYPE_MONITOR); @@ -687,6 +743,14 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm, MAC_FILTER_ACCEPT_GRP); ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS); + /* + * the queue mask is only relevant for old TX API, and + * mvm->snif_queue isn't set here (it's still set to + * IWL_MVM_INVALID_QUEUE so the BIT() of it is UB) + */ + if (!iwl_mvm_has_new_tx_api(mvm)) + tfd_queue_msk = BIT(mvm->snif_queue); + /* Allocate sniffer station */ ret = iwl_mvm_allocate_int_sta(mvm, &mvm->snif_sta, tfd_queue_msk, vif->type, IWL_STA_GENERAL_PURPOSE); @@ -734,20 +798,11 @@ static void iwl_mvm_go_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) data->go_active = true; } -static int iwl_mvm_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - u32 action) +__le32 iwl_mac_ctxt_p2p_dev_has_extended_disc(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) { - struct iwl_mac_ctx_cmd cmd = {}; struct iwl_mvm_go_iterator_data data = {}; - WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE); - - iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); - - /* Override the filter flags to accept only probe requests */ - cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST); - /* * This flag should be set to true when the P2P Device is * discoverable and there is at least another active P2P GO. Settings @@ -760,7 +815,25 @@ static int iwl_mvm_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm, mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL, iwl_mvm_go_iterator, &data); - cmd.p2p_dev.is_disc_extended = cpu_to_le32(data.go_active ? 1 : 0); + return cpu_to_le32(data.go_active ? 1 : 0); +} + +static int iwl_mvm_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 action) +{ + struct iwl_mac_ctx_cmd cmd = {}; + + WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE); + + iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); + + cmd.p2p_dev.is_disc_extended = + iwl_mac_ctxt_p2p_dev_has_extended_disc(mvm, vif); + + /* Override the filter flags to accept only probe requests */ + cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST); + return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); } @@ -806,14 +879,69 @@ static u32 iwl_mvm_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size) return ie - beacon; } -u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct ieee80211_tx_info *info, +u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct iwl_mvm *mvm, + struct ieee80211_tx_info *info, struct ieee80211_vif *vif) { + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct ieee80211_supported_band *sband; + unsigned long basic = vif->bss_conf.basic_rates; + u16 lowest_cck = IWL_RATE_COUNT, lowest_ofdm = IWL_RATE_COUNT; + u32 link_id = u32_get_bits(info->control.flags, + IEEE80211_TX_CTRL_MLO_LINK); + u8 band = info->band; u8 rate; - if (info->band == NL80211_BAND_2GHZ && !vif->p2p) - rate = IWL_FIRST_CCK_RATE; - else - rate = IWL_FIRST_OFDM_RATE; + u32 i; + + if (link_id == IEEE80211_LINK_UNSPECIFIED && ieee80211_vif_is_mld(vif)) { + for (i = 0; i < ARRAY_SIZE(mvmvif->link); i++) { + if (!mvmvif->link[i]) + continue; + /* shouldn't do this when >1 link is active */ + WARN_ON_ONCE(link_id != IEEE80211_LINK_UNSPECIFIED); + link_id = i; + } + } + + if (link_id < IEEE80211_LINK_UNSPECIFIED) { + struct ieee80211_bss_conf *link_conf; + + rcu_read_lock(); + link_conf = rcu_dereference(vif->link_conf[link_id]); + if (link_conf) { + basic = link_conf->basic_rates; + if (link_conf->chandef.chan) + band = link_conf->chandef.chan->band; + } + rcu_read_unlock(); + } + + sband = mvm->hw->wiphy->bands[band]; + for_each_set_bit(i, &basic, BITS_PER_LONG) { + u16 hw = sband->bitrates[i].hw_value; + + if (hw >= IWL_FIRST_OFDM_RATE) { + if (lowest_ofdm > hw) + lowest_ofdm = hw; + } else if (lowest_cck > hw) { + lowest_cck = hw; + } + } + + if (band == NL80211_BAND_2GHZ && !vif->p2p && + vif->type != NL80211_IFTYPE_P2P_DEVICE && + !(info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)) { + if (lowest_cck != IWL_RATE_COUNT) + rate = lowest_cck; + else if (lowest_ofdm != IWL_RATE_COUNT) + rate = lowest_ofdm; + else + rate = IWL_RATE_1M_INDEX; + } else if (lowest_ofdm != IWL_RATE_COUNT) { + rate = lowest_ofdm; + } else { + rate = IWL_RATE_6M_INDEX; + } return rate; } @@ -830,6 +958,24 @@ u16 iwl_mvm_mac_ctxt_get_beacon_flags(const struct iwl_fw *fw, u8 rate_idx) return flags; } +u8 iwl_mvm_mac_ctxt_get_beacon_rate(struct iwl_mvm *mvm, + struct ieee80211_tx_info *info, + struct ieee80211_vif *vif) +{ + struct ieee80211_supported_band *sband = + mvm->hw->wiphy->bands[info->band]; + u32 legacy = vif->bss_conf.beacon_tx_rate.control[info->band].legacy; + + /* if beacon rate was configured try using it */ + if (hweight32(legacy) == 1) { + u32 rate = ffs(legacy) - 1; + + return sband->bitrates[rate].hw_value; + } + + return iwl_mvm_mac_ctxt_get_lowest_rate(mvm, info, vif); +} + static void iwl_mvm_mac_ctxt_set_tx(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct sk_buff *beacon, @@ -844,7 +990,7 @@ static void iwl_mvm_mac_ctxt_set_tx(struct iwl_mvm *mvm, /* Set up TX command fields */ tx->len = cpu_to_le16((u16)beacon->len); - tx->sta_id = mvmvif->bcast_sta.sta_id; + tx->sta_id = mvmvif->deflink.bcast_sta.sta_id; tx->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); tx_flags = TX_CMD_FLG_SEQ_CTL | TX_CMD_FLG_TSF; tx_flags |= @@ -860,7 +1006,7 @@ static void iwl_mvm_mac_ctxt_set_tx(struct iwl_mvm *mvm, cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS); - rate = iwl_mvm_mac_ctxt_get_lowest_rate(info, vif); + rate = iwl_mvm_mac_ctxt_get_beacon_rate(mvm, info, vif); tx->rate_n_flags |= cpu_to_le32(iwl_mvm_mac80211_idx_to_hwrate(mvm->fw, rate)); @@ -939,12 +1085,13 @@ static int iwl_mvm_mac_ctxt_send_beacon_v7(struct iwl_mvm *mvm, static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct sk_buff *beacon) + struct sk_buff *beacon, + struct ieee80211_bss_conf *link_conf) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(beacon); struct iwl_mac_beacon_cmd beacon_cmd = {}; - u8 rate = iwl_mvm_mac_ctxt_get_lowest_rate(info, vif); + u8 rate = iwl_mvm_mac_ctxt_get_beacon_rate(mvm, info, vif); u16 flags; struct ieee80211_chanctx_conf *ctx; int channel; @@ -952,7 +1099,7 @@ static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm *mvm, /* Enable FILS on PSC channels only */ rcu_read_lock(); - ctx = rcu_dereference(vif->chanctx_conf); + ctx = rcu_dereference(link_conf->chanctx_conf); channel = ieee80211_frequency_to_channel(ctx->def.chan->center_freq); WARN_ON(channel == 0); if (cfg80211_channel_is_psc(ctx->def.chan) && @@ -962,14 +1109,22 @@ static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm *mvm, IWL_MAC_BEACON_FILS : IWL_MAC_BEACON_FILS_V1; beacon_cmd.short_ssid = - cpu_to_le32(~crc32_le(~0, vif->bss_conf.ssid, - vif->bss_conf.ssid_len)); + cpu_to_le32(~crc32_le(~0, vif->cfg.ssid, + vif->cfg.ssid_len)); } rcu_read_unlock(); beacon_cmd.flags = cpu_to_le16(flags); beacon_cmd.byte_cnt = cpu_to_le16((u16)beacon->len); - beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id); + + if (WARN_ON(!mvmvif->link[link_conf->link_id])) + return -EINVAL; + + if (iwl_fw_lookup_cmd_ver(mvm->fw, BEACON_TEMPLATE_CMD, 0) > 12) + beacon_cmd.link_id = + cpu_to_le32(mvmvif->link[link_conf->link_id]->fw_link_id); + else + beacon_cmd.link_id = cpu_to_le32((u32)mvmvif->id); if (vif->type == NL80211_IFTYPE_AP) iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx, @@ -989,9 +1144,10 @@ static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm *mvm, sizeof(beacon_cmd)); } -int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct sk_buff *beacon) +static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct sk_buff *beacon, + struct ieee80211_bss_conf *link_conf) { if (WARN_ON(!beacon)) return -EINVAL; @@ -1005,14 +1161,16 @@ int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE)) - return iwl_mvm_mac_ctxt_send_beacon_v9(mvm, vif, beacon); + return iwl_mvm_mac_ctxt_send_beacon_v9(mvm, vif, beacon, + link_conf); return iwl_mvm_mac_ctxt_send_beacon_v7(mvm, vif, beacon); } /* The beacon template for the AP/GO/IBSS has changed and needs update */ int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) { struct sk_buff *beacon; int ret; @@ -1020,7 +1178,8 @@ int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm, WARN_ON(vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_ADHOC); - beacon = ieee80211_beacon_get_template(mvm->hw, vif, NULL); + beacon = ieee80211_beacon_get_template(mvm->hw, vif, NULL, + link_conf->link_id); if (!beacon) return -ENOMEM; @@ -1031,7 +1190,7 @@ int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm, } #endif - ret = iwl_mvm_mac_ctxt_send_beacon(mvm, vif, beacon); + ret = iwl_mvm_mac_ctxt_send_beacon(mvm, vif, beacon, link_conf); dev_kfree_skb(beacon); return ret; } @@ -1049,7 +1208,7 @@ static void iwl_mvm_mac_ap_iterator(void *_data, u8 *mac, { struct iwl_mvm_mac_ap_iterator_data *data = _data; - if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc) + if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc) return; /* Station client has higher priority over P2P client*/ @@ -1061,6 +1220,30 @@ static void iwl_mvm_mac_ap_iterator(void *_data, u8 *mac, } /* + * Fill the filter flags for mac context of type AP or P2P GO. + */ +void iwl_mvm_mac_ctxt_cmd_ap_set_filter_flags(struct iwl_mvm *mvm, + struct iwl_mvm_vif *mvmvif, + __le32 *filter_flags, + int accept_probe_req_flag, + int accept_beacon_flag) +{ + /* + * in AP mode, pass probe requests and beacons from other APs + * (needed for ht protection); when there're no any associated + * station don't ask FW to pass beacons to prevent unnecessary + * wake-ups. + */ + *filter_flags |= cpu_to_le32(accept_probe_req_flag); + if (mvmvif->ap_assoc_sta_count || !mvm->drop_bcn_ap_mode) { + *filter_flags |= cpu_to_le32(accept_beacon_flag); + IWL_DEBUG_HC(mvm, "Asking FW to pass beacons\n"); + } else { + IWL_DEBUG_HC(mvm, "No need to receive beacons\n"); + } +} + +/* * Fill the specific data for mac context of type AP of P2P GO */ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm, @@ -1079,19 +1262,10 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm, /* in AP mode, the MCAST FIFO takes the EDCA params from VO */ cmd->ac[IWL_MVM_TX_FIFO_VO].fifos_mask |= BIT(IWL_MVM_TX_FIFO_MCAST); - /* - * in AP mode, pass probe requests and beacons from other APs - * (needed for ht protection); when there're no any associated - * station don't ask FW to pass beacons to prevent unnecessary - * wake-ups. - */ - cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST); - if (mvmvif->ap_assoc_sta_count || !mvm->drop_bcn_ap_mode) { - cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON); - IWL_DEBUG_HC(mvm, "Asking FW to pass beacons\n"); - } else { - IWL_DEBUG_HC(mvm, "No need to receive beacons\n"); - } + iwl_mvm_mac_ctxt_cmd_ap_set_filter_flags(mvm, mvmvif, + &cmd->filter_flags, + MAC_FILTER_IN_PROBE_REQUEST, + MAC_FILTER_IN_BEACON); ctxt_ap->bi = cpu_to_le32(vif->bss_conf.beacon_int); ctxt_ap->dtim_interval = cpu_to_le32(vif->bss_conf.beacon_int * @@ -1099,7 +1273,7 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm, if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) - ctxt_ap->mcast_qid = cpu_to_le32(mvmvif->cab_queue); + ctxt_ap->mcast_qid = cpu_to_le32(mvmvif->deflink.cab_queue); /* * Only set the beacon time when the MAC is being added, when we @@ -1117,7 +1291,7 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm, iwl_mvm_mac_ap_iterator, &data); if (data.beacon_device_ts) { - u32 rand = (prandom_u32() % (64 - 36)) + 36; + u32 rand = get_random_u32_inclusive(36, 63); mvmvif->ap_beacon_time = data.beacon_device_ts + ieee80211_tu_to_usec(data.beacon_int * rand / 100); @@ -1253,12 +1427,9 @@ int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif) mvmvif->color)); cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE); - ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, 0, - sizeof(cmd), &cmd); - if (ret) { - IWL_ERR(mvm, "Failed to remove MAC context: %d\n", ret); + ret = iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); + if (ret) return ret; - } mvmvif->uploaded = false; @@ -1286,7 +1457,8 @@ static void iwl_mvm_csa_count_down(struct iwl_mvm *mvm, if (!ieee80211_beacon_cntdwn_is_complete(csa_vif)) { int c = ieee80211_beacon_update_cntdwn(csa_vif); - iwl_mvm_mac_ctxt_beacon_changed(mvm, csa_vif); + iwl_mvm_mac_ctxt_beacon_changed(mvm, csa_vif, + &csa_vif->bss_conf); if (csa_vif->p2p && !iwl_mvm_te_scheduled(&mvmvif->time_event_data) && gp2 && tx_success) { @@ -1353,7 +1525,7 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, csa_vif = rcu_dereference_protected(mvm->csa_vif, lockdep_is_held(&mvm->mutex)); - if (unlikely(csa_vif && csa_vif->csa_active)) + if (unlikely(csa_vif && csa_vif->bss_conf.csa_active)) iwl_mvm_csa_count_down(mvm, csa_vif, mvm->ap_last_beacon_gp2, (status == TX_STATUS_SUCCESS)); @@ -1392,23 +1564,49 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, u32 stop_trig_missed_bcon, stop_trig_missed_bcon_since_rx; u32 rx_missed_bcon, rx_missed_bcon_since_rx; struct ieee80211_vif *vif; - u32 id = le32_to_cpu(mb->mac_id); + /* Id can be mac/link id depending on the notification version */ + u32 id = le32_to_cpu(mb->link_id); union iwl_dbg_tlv_tp_data tp_data = { .fw_pkt = pkt }; + u32 mac_type; + u8 notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, + MISSED_BEACONS_NOTIFICATION, + 0); + + rcu_read_lock(); + + /* before version four the ID in the notification refers to mac ID */ + if (notif_ver < 4) { + vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true); + } else { + struct ieee80211_bss_conf *bss_conf = + iwl_mvm_rcu_fw_link_id_to_link_conf(mvm, id, true); + + if (!bss_conf) + goto out; + + vif = bss_conf->vif; + } IWL_DEBUG_INFO(mvm, - "missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n", - le32_to_cpu(mb->mac_id), + "missed bcn %s_id=%u, consecutive=%u (%u, %u, %u)\n", + notif_ver < 4 ? "mac" : "link", + id, le32_to_cpu(mb->consec_missed_beacons), le32_to_cpu(mb->consec_missed_beacons_since_last_rx), le32_to_cpu(mb->num_recvd_beacons), le32_to_cpu(mb->num_expected_beacons)); - rcu_read_lock(); - - vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true); if (!vif) goto out; + mac_type = iwl_mvm_get_mac_type(vif); + + IWL_DEBUG_INFO(mvm, "missed beacon mac_type=%u,\n", mac_type); + + mvm->trans->dbg.dump_file_name_ext_valid = true; + snprintf(mvm->trans->dbg.dump_file_name_ext, IWL_FW_INI_MAX_NAME, + "MacId_%d_MacType_%d", id, mac_type); + rx_missed_bcon = le32_to_cpu(mb->consec_missed_beacons); rx_missed_bcon_since_rx = le32_to_cpu(mb->consec_missed_beacons_since_last_rx); @@ -1542,9 +1740,9 @@ void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm, sizeof(struct ieee80211_p2p_noa_desc) + 2) new_data->noa_len -= sizeof(struct ieee80211_p2p_noa_desc); - old_data = rcu_dereference_protected(mvmvif->probe_resp_data, - lockdep_is_held(&mvmvif->mvm->mutex)); - rcu_assign_pointer(mvmvif->probe_resp_data, new_data); + old_data = rcu_dereference_protected(mvmvif->deflink.probe_resp_data, + lockdep_is_held(&mvmvif->mvm->mutex)); + rcu_assign_pointer(mvmvif->deflink.probe_resp_data, new_data); if (old_data) kfree_rcu(old_data, rcu_head); @@ -1558,29 +1756,57 @@ void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_channel_switch_start_notif *notif = (void *)pkt->data; struct ieee80211_vif *csa_vif, *vif; - struct iwl_mvm_vif *mvmvif; - u32 id_n_color, csa_id, mac_id; + struct iwl_mvm_vif *mvmvif, *csa_mvmvif; + u32 id_n_color, csa_id; + /* save mac_id or link_id to use later to cancel csa if needed */ + u32 id; + u8 notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP, + CHANNEL_SWITCH_START_NOTIF, 0); + bool csa_active; - id_n_color = le32_to_cpu(notif->id_and_color); - mac_id = id_n_color & FW_CTXT_ID_MSK; + rcu_read_lock(); - if (WARN_ON_ONCE(mac_id >= NUM_MAC_INDEX_DRIVER)) - return; + if (notif_ver < 3) { + struct iwl_channel_switch_start_notif_v1 *notif = (void *)pkt->data; + u32 mac_id; + + id_n_color = le32_to_cpu(notif->id_and_color); + mac_id = id_n_color & FW_CTXT_ID_MSK; + + vif = iwl_mvm_rcu_dereference_vif_id(mvm, mac_id, true); + if (!vif) + goto out_unlock; + + id = mac_id; + csa_active = vif->bss_conf.csa_active; + } else { + struct iwl_channel_switch_start_notif *notif = (void *)pkt->data; + u32 link_id = le32_to_cpu(notif->link_id); + struct ieee80211_bss_conf *bss_conf = + iwl_mvm_rcu_fw_link_id_to_link_conf(mvm, link_id, true); + + if (!bss_conf) + goto out_unlock; + + id = link_id; + vif = bss_conf->vif; + csa_active = bss_conf->csa_active; + } - rcu_read_lock(); - vif = rcu_dereference(mvm->vif_id_to_mac[mac_id]); mvmvif = iwl_mvm_vif_from_mac80211(vif); + if (notif_ver >= 3) + id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color); switch (vif->type) { case NL80211_IFTYPE_AP: csa_vif = rcu_dereference(mvm->csa_vif); - if (WARN_ON(!csa_vif || !csa_vif->csa_active || + if (WARN_ON(!csa_vif || !csa_vif->bss_conf.csa_active || csa_vif != vif)) goto out_unlock; - csa_id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color); + csa_mvmvif = iwl_mvm_vif_from_mac80211(csa_vif); + csa_id = FW_CMD_ID_AND_COLOR(csa_mvmvif->id, csa_mvmvif->color); if (WARN(csa_id != id_n_color, "channel switch noa notification on unexpected vif (csa_vif=%d, notif=%d)", csa_id, id_n_color)) @@ -1605,9 +1831,9 @@ void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm, */ if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP, CHANNEL_SWITCH_ERROR_NOTIF, - 0) && !vif->csa_active) { + 0) && !csa_active) { IWL_DEBUG_INFO(mvm, "Channel Switch was canceled\n"); - iwl_mvm_cancel_channel_switch(mvm, vif, mac_id); + iwl_mvm_cancel_channel_switch(mvm, vif, id); break; } @@ -1630,7 +1856,7 @@ void iwl_mvm_channel_switch_error_notif(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_channel_switch_error_notif *notif = (void *)pkt->data; struct ieee80211_vif *vif; - u32 id = le32_to_cpu(notif->mac_id); + u32 id = le32_to_cpu(notif->link_id); u32 csa_err_mask = le32_to_cpu(notif->csa_err_mask); rcu_read_lock(); @@ -1640,7 +1866,7 @@ void iwl_mvm_channel_switch_error_notif(struct iwl_mvm *mvm, return; } - IWL_DEBUG_INFO(mvm, "FW reports CSA error: mac_id=%u, csa_err_mask=%u\n", + IWL_DEBUG_INFO(mvm, "FW reports CSA error: id=%u, csa_err_mask=%u\n", id, csa_err_mask); if (csa_err_mask & (CS_ERR_COUNT_ERROR | CS_ERR_LONG_DELAY_AFTER_CS | diff --git a/mvm/mac80211.c b/mvm/mac80211.c index 784d91281c02..ce7905faa08f 100644 --- a/mvm/mac80211.c +++ b/mvm/mac80211.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2022 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -28,6 +28,7 @@ #include "fw/error-dump.h" #include "iwl-prph.h" #include "iwl-nvm-parse.h" +#include "time-sync.h" static const struct ieee80211_iface_limit iwl_mvm_limits[] = { { @@ -107,7 +108,7 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, struct ieee80211_regdomain *regd = NULL; struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - struct iwl_mcc_update_resp *resp; + struct iwl_mcc_update_resp_v8 *resp; u8 resp_ver; IWL_DEBUG_LAR(mvm, "Getting regdomain data for %s from FW\n", alpha2); @@ -137,7 +138,7 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, resp->channels, __le16_to_cpu(resp->mcc), __le16_to_cpu(resp->geo_info), - __le16_to_cpu(resp->cap), resp_ver); + le32_to_cpu(resp->cap), resp_ver); /* Store the return source id */ src_id = resp->source_id; if (IS_ERR_OR_NULL(regd)) { @@ -222,23 +223,55 @@ int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm) return ret; } +/* Each capability added here should also be add to tm_if_types_ext_capa_sta */ static const u8 he_if_types_ext_capa_sta[] = { [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT, - [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, + [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF | + WLAN_EXT_CAPA8_MAX_MSDU_IN_AMSDU_LSB, + [8] = WLAN_EXT_CAPA9_MAX_MSDU_IN_AMSDU_MSB, }; -static const struct wiphy_iftype_ext_capab he_iftypes_ext_capa[] = { +static const u8 tm_if_types_ext_capa_sta[] = { + [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, + [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT | + WLAN_EXT_CAPA3_TIMING_MEASUREMENT_SUPPORT, + [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF | + WLAN_EXT_CAPA8_MAX_MSDU_IN_AMSDU_LSB, + [8] = WLAN_EXT_CAPA9_MAX_MSDU_IN_AMSDU_MSB, + [9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT, +}; + +/* Additional interface types for which extended capabilities are + * specified separately + */ + +#define IWL_MVM_EMLSR_CAPA (IEEE80211_EML_CAP_EMLSR_SUPP | \ + IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_32US << \ + __bf_shf(IEEE80211_EML_CAP_EMLSR_PADDING_DELAY) | \ + IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_64US << \ + __bf_shf(IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY)) + +static const struct wiphy_iftype_ext_capab add_iftypes_ext_capa[] = { { .iftype = NL80211_IFTYPE_STATION, .extended_capabilities = he_if_types_ext_capa_sta, .extended_capabilities_mask = he_if_types_ext_capa_sta, .extended_capabilities_len = sizeof(he_if_types_ext_capa_sta), + /* relevant only if EHT is supported */ + .eml_capabilities = IWL_MVM_EMLSR_CAPA, + }, + { + .iftype = NL80211_IFTYPE_STATION, + .extended_capabilities = tm_if_types_ext_capa_sta, + .extended_capabilities_mask = tm_if_types_ext_capa_sta, + .extended_capabilities_len = sizeof(tm_if_types_ext_capa_sta), + /* relevant only if EHT is supported */ + .eml_capabilities = IWL_MVM_EMLSR_CAPA, }, }; -static int -iwl_mvm_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) +int iwl_mvm_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); *tx_ant = iwl_mvm_get_valid_tx_ant(mvm); @@ -260,6 +293,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) bool unified = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); #endif + u32 sec_key_id = WIDE_ID(DATA_PATH_GROUP, SEC_KEY_CMD); + u8 sec_key_ver = iwl_fw_lookup_cmd_ver(mvm->fw, sec_key_id, 0); /* Tell mac80211 our characteristics */ ieee80211_hw_set(hw, SIGNAL_DBM); @@ -269,17 +304,33 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) ieee80211_hw_set(hw, SUPPORTS_PS); ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); ieee80211_hw_set(hw, AMPDU_AGGREGATION); - ieee80211_hw_set(hw, TIMING_BEACON_ONLY); ieee80211_hw_set(hw, CONNECTION_MONITOR); ieee80211_hw_set(hw, CHANCTX_STA_CSA); ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS); ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR); - ieee80211_hw_set(hw, DEAUTH_NEED_MGD_TX_PREP); ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW); ieee80211_hw_set(hw, BUFF_MMPDU_TXQ); ieee80211_hw_set(hw, STA_MMPDU_TXQ); + + /* Set this early since we need to have it for the check below */ + if (mvm->mld_api_is_used && + mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO; + + /* With MLD FW API, it tracks timing by itself, + * no need for any timing from the host + */ + if (!mvm->mld_api_is_used) + ieee80211_hw_set(hw, TIMING_BEACON_ONLY); + + /* We should probably have this, but mac80211 + * currently doesn't support it for MLO. + */ + if (!(hw->wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO)) + ieee80211_hw_set(hw, DEAUTH_NEED_MGD_TX_PREP); + /* * On older devices, enabling TX A-MSDU occasionally leads to * something getting messed up, the command read from the FIFO @@ -374,6 +425,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->wiphy->n_cipher_suites++; } + wiphy_ext_feature_set(hw->wiphy, + NL80211_EXT_FEATURE_BEACON_RATE_LEGACY); + wiphy_ext_feature_set(hw->wiphy, + NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT); + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_FTM_CALIBRATED)) { wiphy_ext_feature_set(hw->wiphy, @@ -381,11 +437,20 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->wiphy->pmsr_capa = &iwl_mvm_pmsr_capa; } - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT)) + if (sec_key_ver && + fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_BIGTK_TX_SUPPORT)) + wiphy_ext_feature_set(hw->wiphy, + NL80211_EXT_FEATURE_BEACON_PROTECTION); + else if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT)) wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT); + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_TIME_SYNC_BOTH_FTM_TM)) + hw->wiphy->hw_timestamp_max_peers = 1; + ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); hw->wiphy->features |= NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR | @@ -559,16 +624,34 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION); } + hw->wiphy->iftype_ext_capab = NULL; + hw->wiphy->num_iftype_ext_capab = 0; + if (mvm->nvm_data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax) { - hw->wiphy->iftype_ext_capab = he_iftypes_ext_capa; + hw->wiphy->iftype_ext_capab = add_iftypes_ext_capa; hw->wiphy->num_iftype_ext_capab = - ARRAY_SIZE(he_iftypes_ext_capa); + ARRAY_SIZE(add_iftypes_ext_capa) - 1; ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID); ieee80211_hw_set(hw, SUPPORTS_ONLY_HE_MULTI_BSSID); } + if (iwl_fw_lookup_cmd_ver(mvm->fw, + WIDE_ID(DATA_PATH_GROUP, + WNM_80211V_TIMING_MEASUREMENT_CONFIG_CMD), + IWL_FW_CMD_VER_UNKNOWN) >= 1) { + IWL_DEBUG_INFO(mvm->trans, "Timing measurement supported\n"); + + if (!hw->wiphy->iftype_ext_capab) { + hw->wiphy->num_iftype_ext_capab = 1; + hw->wiphy->iftype_ext_capab = add_iftypes_ext_capa + + ARRAY_SIZE(add_iftypes_ext_capa) - 1; + } else { + hw->wiphy->iftype_ext_capab = add_iftypes_ext_capa + 1; + } + } + mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; #ifdef CONFIG_PM_SLEEP @@ -650,9 +733,8 @@ static void iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, ieee80211_free_txskb(mvm->hw, skb); } -static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, - struct ieee80211_tx_control *control, - struct sk_buff *skb) +void iwl_mvm_mac_tx(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, struct sk_buff *skb) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct ieee80211_sta *sta = control->sta; @@ -660,6 +742,9 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr = (void *)skb->data; bool offchannel = IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_TX_OFFCHAN; + u32 link_id = u32_get_bits(info->control.flags, + IEEE80211_TX_CTRL_MLO_LINK); + struct ieee80211_sta *tmp_sta = sta; if (iwl_mvm_is_radio_killed(mvm)) { IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n"); @@ -683,7 +768,7 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, !offchannel) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(info->control.vif); - u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id); + u8 ap_sta_id = READ_ONCE(mvmvif->deflink.ap_sta_id); if (ap_sta_id < mvm->fw->ucode_capa.num_stations) { /* mac80211 holds rcu read lock */ @@ -693,6 +778,25 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, } } + if (tmp_sta && !sta && link_id != IEEE80211_LINK_UNSPECIFIED && + !ieee80211_is_probe_resp(hdr->frame_control)) { + /* translate MLD addresses to LINK addresses */ + struct ieee80211_link_sta *link_sta = + rcu_dereference(tmp_sta->link[link_id]); + struct ieee80211_bss_conf *link_conf = + rcu_dereference(info->control.vif->link_conf[link_id]); + struct ieee80211_mgmt *mgmt; + + if (WARN_ON(!link_sta || !link_conf)) + goto drop; + + /* if sta is NULL, the frame is a management frame */ + mgmt = (void *)hdr; + memcpy(mgmt->da, link_sta->addr, ETH_ALEN); + memcpy(mgmt->sa, link_conf->addr, ETH_ALEN); + memcpy(mgmt->bssid, link_conf->bssid, ETH_ALEN); + } + iwl_mvm_tx_skb(mvm, skb, sta); return; drop: @@ -729,7 +833,10 @@ void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq) rcu_read_lock(); do { - while (likely(!mvmtxq->stopped && + while (likely(!test_bit(IWL_MVM_TXQ_STATE_STOP_FULL, + &mvmtxq->state) && + !test_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, + &mvmtxq->state) && !test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status))) { skb = ieee80211_tx_dequeue(hw, txq); @@ -748,48 +855,31 @@ void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq) rcu_read_unlock(); } -static void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw, - struct ieee80211_txq *txq) +void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw, + struct ieee80211_txq *txq) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq); - /* - * Please note that racing is handled very carefully here: - * mvmtxq->txq_id is updated during allocation, and mvmtxq->list is - * deleted afterwards. - * This means that if: - * mvmtxq->txq_id != INVALID_QUEUE && list_empty(&mvmtxq->list): - * queue is allocated and we can TX. - * mvmtxq->txq_id != INVALID_QUEUE && !list_empty(&mvmtxq->list): - * a race, should defer the frame. - * mvmtxq->txq_id == INVALID_QUEUE && list_empty(&mvmtxq->list): - * need to allocate the queue and defer the frame. - * mvmtxq->txq_id == INVALID_QUEUE && !list_empty(&mvmtxq->list): - * queue is already scheduled for allocation, no need to allocate, - * should defer the frame. - */ - - /* If the queue is allocated TX and return. */ - if (!txq->sta || mvmtxq->txq_id != IWL_MVM_INVALID_QUEUE) { - /* - * Check that list is empty to avoid a race where txq_id is - * already updated, but the queue allocation work wasn't - * finished - */ - if (unlikely(txq->sta && !list_empty(&mvmtxq->list))) - return; - + if (likely(test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) || + !txq->sta) { iwl_mvm_mac_itxq_xmit(hw, txq); return; } - /* The list is being deleted only after the queue is fully allocated. */ - if (!list_empty(&mvmtxq->list)) - return; + /* iwl_mvm_mac_itxq_xmit() will later be called by the worker + * to handle any packets we leave on the txq now + */ - list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs); - schedule_work(&mvm->add_stream_wk); + spin_lock_bh(&mvm->add_stream_lock); + /* The list is being deleted only after the queue is fully allocated. */ + if (list_empty(&mvmtxq->list) && + /* recheck under lock */ + !test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) { + list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs); + schedule_work(&mvm->add_stream_wk); + } + spin_unlock_bh(&mvm->add_stream_lock); } #define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \ @@ -844,9 +934,9 @@ iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif, } } -static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_ampdu_params *params) +int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_ampdu_params *params) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; @@ -868,8 +958,8 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, switch (action) { case IEEE80211_AMPDU_RX_START: - if (iwl_mvm_vif_from_mac80211(vif)->ap_sta_id == - iwl_mvm_sta_from_mac80211(sta)->sta_id) { + if (iwl_mvm_vif_from_mac80211(vif)->deflink.ap_sta_id == + iwl_mvm_sta_from_mac80211(sta)->deflink.sta_id) { struct iwl_mvm_vif *mvmvif; u16 macid = iwl_mvm_vif_from_mac80211(vif)->id; struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[macid]; @@ -932,17 +1022,30 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac, { struct iwl_mvm *mvm = data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_probe_resp_data *probe_data; + unsigned int link_id; mvmvif->uploaded = false; - mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; spin_lock_bh(&mvm->time_event_lock); iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data); spin_unlock_bh(&mvm->time_event_lock); - mvmvif->phy_ctxt = NULL; memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data)); - memset(&mvmvif->probe_resp_data, 0, sizeof(mvmvif->probe_resp_data)); + + for_each_mvm_vif_valid_link(mvmvif, link_id) { + mvmvif->link[link_id]->ap_sta_id = IWL_MVM_INVALID_STA; + mvmvif->link[link_id]->fw_link_id = IWL_MVM_FW_LINK_ID_INVALID; + mvmvif->link[link_id]->phy_ctxt = NULL; + mvmvif->link[link_id]->active = 0; + mvmvif->link[link_id]->igtk = NULL; + } + + probe_data = rcu_dereference_protected(mvmvif->deflink.probe_resp_data, + lockdep_is_held(&mvm->mutex)); + if (probe_data) + kfree_rcu(probe_data, rcu_head); + RCU_INIT_POINTER(mvmvif->deflink.probe_resp_data, NULL); } static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) @@ -976,10 +1079,12 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) ieee80211_wake_queues(mvm->hw); - mvm->vif_count = 0; mvm->rx_ba_sessions = 0; mvm->fwrt.dump.conf = FW_DBG_INVALID; mvm->monitor_on = false; +#ifdef CONFIG_IWLWIFI_DEBUGFS + mvm->beacon_inject_active = false; +#endif /* keep statistics ticking */ iwl_mvm_accu_radio_stats(mvm); @@ -1042,7 +1147,7 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm) return ret; } -static int iwl_mvm_mac_start(struct ieee80211_hw *hw) +int iwl_mvm_mac_start(struct ieee80211_hw *hw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; @@ -1066,6 +1171,16 @@ static int iwl_mvm_mac_start(struct ieee80211_hw *hw) if (!ret) break; + /* + * In PLDR sync PCI re-enumeration is needed. no point to retry + * mac start before that. + */ + if (mvm->pldr_sync) { + iwl_mei_alive_notif(false); + iwl_trans_pcie_remove(mvm->trans, true); + break; + } + IWL_ERR(mvm, "mac start retry %d\n", retry); } clear_bit(IWL_MVM_STATUS_STARTING, &mvm->status); @@ -1101,9 +1216,8 @@ static void iwl_mvm_restart_complete(struct iwl_mvm *mvm) mutex_unlock(&mvm->mutex); } -static void -iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw, - enum ieee80211_reconfig_type reconfig_type) +void iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw, + enum ieee80211_reconfig_type reconfig_type) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); @@ -1129,7 +1243,7 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) /* async_handlers_wk is now blocked */ - if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) + if (!iwl_mvm_has_new_station_api(mvm->fw)) iwl_mvm_rm_aux_sta(mvm); iwl_mvm_stop_device(mvm); @@ -1165,7 +1279,7 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) } } -static void iwl_mvm_mac_stop(struct ieee80211_hw *hw) +void iwl_mvm_mac_stop(struct ieee80211_hw *hw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); @@ -1204,7 +1318,7 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw) cancel_work_sync(&mvm->async_handlers_wk); } -static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm) +struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm) { u16 i; @@ -1218,8 +1332,8 @@ static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm) return NULL; } -static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - s16 tx_power) +int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + s16 tx_power) { u32 cmd_id = REDUCE_TX_POWER_CMD; int len; @@ -1254,8 +1368,8 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd); } -static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) +int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); @@ -1268,7 +1382,7 @@ static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, mvmvif->csa_bcn_pending = false; mvmsta = iwl_mvm_sta_from_staid_protected(mvm, - mvmvif->ap_sta_id); + mvmvif->deflink.ap_sta_id); if (WARN_ON(!mvmsta)) { ret = -EIO; @@ -1276,8 +1390,10 @@ static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, } iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false); - - iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); + if (mvm->mld_api_is_used) + iwl_mvm_mld_mac_ctxt_changed(mvm, vif, false); + else + iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) { @@ -1301,8 +1417,8 @@ out_unlock: return ret; } -static void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) +void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); @@ -1338,7 +1454,7 @@ static void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw, iwl_mvm_post_channel_switch(hw, vif); } -static void iwl_mvm_channel_switch_disconnect_wk(struct work_struct *wk) +void iwl_mvm_channel_switch_disconnect_wk(struct work_struct *wk) { struct iwl_mvm_vif *mvmvif; struct ieee80211_vif *vif; @@ -1350,6 +1466,50 @@ static void iwl_mvm_channel_switch_disconnect_wk(struct work_struct *wk) ieee80211_chswitch_done(vif, false); } +static u8 +iwl_mvm_chandef_get_primary_80(struct cfg80211_chan_def *chandef) +{ + int data_start; + int control_start; + int bw; + + if (chandef->width == NL80211_CHAN_WIDTH_320) + bw = 320; + else if (chandef->width == NL80211_CHAN_WIDTH_160) + bw = 160; + else + return 0; + + /* data is bw wide so the start is half the width */ + data_start = chandef->center_freq1 - bw / 2; + /* control is 20Mhz width */ + control_start = chandef->chan->center_freq - 10; + + return (control_start - data_start) / 80; +} + +static int iwl_mvm_alloc_bcast_mcast_sta(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ret; + + lockdep_assert_held(&mvm->mutex); + + ret = iwl_mvm_alloc_bcast_sta(mvm, vif); + if (ret) { + IWL_ERR(mvm, "Failed to allocate bcast sta\n"); + return ret; + } + + /* Only queue for this station is the mcast queue, + * which shouldn't be in TFD mask anyway + */ + return iwl_mvm_allocate_int_sta(mvm, &mvmvif->deflink.mcast_sta, 0, + vif->type, + IWL_STA_MULTICAST); +} + static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { @@ -1357,8 +1517,12 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; + mutex_lock(&mvm->mutex); + mvmvif->mvm = mvm; - RCU_INIT_POINTER(mvmvif->probe_resp_data, NULL); + + /* the first link always points to the default one */ + mvmvif->link[0] = &mvmvif->deflink; /* * Not much to do here. The stack will not allow interface @@ -1366,23 +1530,23 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, * don't really have to check the types. */ - mutex_lock(&mvm->mutex); - /* make sure that beacon statistics don't go backwards with FW reset */ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) - mvmvif->beacon_stats.accu_num_beacons += - mvmvif->beacon_stats.num_beacons; + mvmvif->deflink.beacon_stats.accu_num_beacons += + mvmvif->deflink.beacon_stats.num_beacons; /* Allocate resources for the MAC context, and add it to the fw */ ret = iwl_mvm_mac_ctxt_init(mvm, vif); if (ret) - goto out_unlock; + goto out; rcu_assign_pointer(mvm->vif_id_to_mac[mvmvif->id], vif); - /* Counting number of interfaces is needed for legacy PM */ - if (vif->type != NL80211_IFTYPE_P2P_DEVICE) - mvm->vif_count++; + /* Currently not much to do for NAN */ + if (vif->type == NL80211_IFTYPE_NAN) { + ret = 0; + goto out; + } /* * The AP binding flow can be done only after the beacon @@ -1397,31 +1561,16 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, */ if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) { - ret = iwl_mvm_alloc_bcast_sta(mvm, vif); - if (ret) { - IWL_ERR(mvm, "Failed to allocate bcast sta\n"); - goto out_release; - } - - /* - * Only queue for this station is the mcast queue, - * which shouldn't be in TFD mask anyway - */ - ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->mcast_sta, - 0, vif->type, - IWL_STA_MULTICAST); - if (ret) - goto out_release; - iwl_mvm_vif_dbgfs_register(mvm, vif); - goto out_unlock; + ret = 0; + goto out; } mvmvif->features |= hw->netdev_features; ret = iwl_mvm_mac_ctxt_add(mvm, vif); if (ret) - goto out_release; + goto out_unlock; ret = iwl_mvm_power_update_mac(mvm); if (ret) @@ -1446,13 +1595,13 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, */ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { - mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); - if (!mvmvif->phy_ctxt) { + mvmvif->deflink.phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); + if (!mvmvif->deflink.phy_ctxt) { ret = -ENOSPC; goto out_free_bf; } - iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); + iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt); ret = iwl_mvm_binding_add_vif(mvm, vif); if (ret) goto out_unref_phy; @@ -1470,8 +1619,11 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, INIT_DELAYED_WORK(&mvmvif->csa_work, iwl_mvm_channel_switch_disconnect_wk); - if (vif->type == NL80211_IFTYPE_MONITOR) + if (vif->type == NL80211_IFTYPE_MONITOR) { mvm->monitor_on = true; + mvm->monitor_p80 = + iwl_mvm_chandef_get_primary_80(&vif->bss_conf.chandef); + } iwl_mvm_vif_dbgfs_register(mvm, vif); @@ -1483,12 +1635,17 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, mvm->csme_vif = vif; } +out: + if (!ret && (vif->type == NL80211_IFTYPE_AP || + vif->type == NL80211_IFTYPE_ADHOC)) + ret = iwl_mvm_alloc_bcast_mcast_sta(mvm, vif); + goto out_unlock; out_unbind: iwl_mvm_binding_remove_vif(mvm, vif); out_unref_phy: - iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); + iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt); out_free_bf: if (mvm->bf_allowed_vif == mvmvif) { mvm->bf_allowed_vif = NULL; @@ -1496,19 +1653,16 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, IEEE80211_VIF_SUPPORTS_CQM_RSSI); } out_remove_mac: - mvmvif->phy_ctxt = NULL; + mvmvif->deflink.phy_ctxt = NULL; iwl_mvm_mac_ctxt_remove(mvm, vif); - out_release: - if (vif->type != NL80211_IFTYPE_P2P_DEVICE) - mvm->vif_count--; out_unlock: mutex_unlock(&mvm->mutex); return ret; } -static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) +void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) { if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { /* @@ -1520,8 +1674,12 @@ static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm, } } -static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) +/* This function is doing the common part of removing the interface for + * both - MLD and non-MLD modes. Returns true if removing the interface + * is done + */ +static bool iwl_mvm_mac_remove_interface_common(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); @@ -1540,9 +1698,9 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, mvm->csme_vif = NULL; } - probe_data = rcu_dereference_protected(mvmvif->probe_resp_data, + probe_data = rcu_dereference_protected(mvmvif->deflink.probe_resp_data, lockdep_is_held(&mvm->mutex)); - RCU_INIT_POINTER(mvmvif->probe_resp_data, NULL); + RCU_INIT_POINTER(mvmvif->deflink.probe_resp_data, NULL); if (probe_data) kfree_rcu(probe_data, rcu_head); @@ -1569,23 +1727,30 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, mvm->noa_duration = 0; } #endif - iwl_mvm_dealloc_int_sta(mvm, &mvmvif->mcast_sta); - iwl_mvm_dealloc_bcast_sta(mvm, vif); - goto out_release; + return true; } + iwl_mvm_power_update_mac(mvm); + return false; +} + +static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + if (iwl_mvm_mac_remove_interface_common(hw, vif)) + goto out; + if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { mvm->p2p_device_vif = NULL; iwl_mvm_rm_p2p_bcast_sta(mvm, vif); iwl_mvm_binding_remove_vif(mvm, vif); - iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); - mvmvif->phy_ctxt = NULL; + iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt); + mvmvif->deflink.phy_ctxt = NULL; } - if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE) - mvm->vif_count--; - - iwl_mvm_power_update_mac(mvm); iwl_mvm_mac_ctxt_remove(mvm, vif); RCU_INIT_POINTER(mvm->vif_id_to_mac[mvmvif->id], NULL); @@ -1593,13 +1758,14 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, if (vif->type == NL80211_IFTYPE_MONITOR) mvm->monitor_on = false; -out_release: - mutex_unlock(&mvm->mutex); -} +out: + if (vif->type == NL80211_IFTYPE_AP || + vif->type == NL80211_IFTYPE_ADHOC) { + iwl_mvm_dealloc_int_sta(mvm, &mvmvif->deflink.mcast_sta); + iwl_mvm_dealloc_bcast_sta(mvm, vif); + } -static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed) -{ - return 0; + mutex_unlock(&mvm->mutex); } struct iwl_mvm_mc_iter_data { @@ -1625,7 +1791,7 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac, return; if (vif->type != NL80211_IFTYPE_STATION || - !vif->bss_conf.assoc) + !vif->cfg.assoc) return; cmd->port_id = data->port_id++; @@ -1673,8 +1839,8 @@ static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm) IWL_ERR(mvm, "Failed to synchronize multicast groups update\n"); } -static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw, - struct netdev_hw_addr_list *mc_list) +u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw, + struct netdev_hw_addr_list *mc_list) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mcast_filter_cmd *cmd; @@ -1710,10 +1876,9 @@ static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw, return (u64)(unsigned long)cmd; } -static void iwl_mvm_configure_filter(struct ieee80211_hw *hw, - unsigned int changed_flags, - unsigned int *total_flags, - u64 multicast) +void iwl_mvm_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, u64 multicast) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast; @@ -1751,7 +1916,7 @@ static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw, return; /* Supported only for p2p client interfaces */ - if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc || + if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc || !vif->p2p) return; @@ -1760,8 +1925,7 @@ static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw, mutex_unlock(&mvm->mutex); } -static int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) +int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mu_group_mgmt_cmd cmd = {}; @@ -1779,7 +1943,7 @@ static int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm, static void iwl_mvm_mu_mimo_iface_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { - if (vif->mu_mimo_owner) { + if (vif->bss_conf.mu_mimo_owner) { struct iwl_mu_group_mgmt_notif *notif = _data; /* @@ -1787,7 +1951,7 @@ static void iwl_mvm_mu_mimo_iface_iterator(void *_data, u8 *mac, * the data received from firmware as if it came from the * action frame, so no conversion is needed. */ - ieee80211_update_mu_groups(vif, + ieee80211_update_mu_groups(vif, 0, (u8 *)¬if->membership_status, (u8 *)¬if->user_position); } @@ -1833,7 +1997,8 @@ static u8 iwl_mvm_he_get_ppe_val(u8 *ppe, u8 ppe_pos_bit) static void iwl_mvm_parse_ppe(struct iwl_mvm *mvm, struct iwl_he_pkt_ext_v2 *pkt_ext, u8 nss, - u8 ru_index_bitmap, u8 *ppe, u8 ppe_pos_bit) + u8 ru_index_bitmap, u8 *ppe, u8 ppe_pos_bit, + bool inheritance) { int i; @@ -1844,8 +2009,8 @@ static void iwl_mvm_parse_ppe(struct iwl_mvm *mvm, * If nss < MAX: we can set zeros in other streams */ if (nss > MAX_HE_SUPP_NSS) { - IWL_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss, - MAX_HE_SUPP_NSS); + IWL_DEBUG_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss, + MAX_HE_SUPP_NSS); nss = MAX_HE_SUPP_NSS; } @@ -1859,14 +2024,25 @@ static void iwl_mvm_parse_ppe(struct iwl_mvm *mvm, bw++) { ru_index_tmp >>= 1; - if (!(ru_index_tmp & 1)) - continue; + /* + * According to the 11be spec, if for a specific BW the PPE Thresholds + * isn't present - it should inherit the thresholds from the last + * BW for which we had PPE Thresholds. In 11ax though, we don't have + * this inheritance - continue in this case + */ + if (!(ru_index_tmp & 1)) { + if (inheritance) + goto set_thresholds; + else + continue; + } high_th = iwl_mvm_he_get_ppe_val(ppe, ppe_pos_bit); ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE; low_th = iwl_mvm_he_get_ppe_val(ppe, ppe_pos_bit); ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE; +set_thresholds: pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th; pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th; } @@ -1874,60 +2050,274 @@ static void iwl_mvm_parse_ppe(struct iwl_mvm *mvm, } static void iwl_mvm_set_pkt_ext_from_he_ppe(struct iwl_mvm *mvm, - struct ieee80211_sta *sta, - struct iwl_he_pkt_ext_v2 *pkt_ext) + struct ieee80211_link_sta *link_sta, + struct iwl_he_pkt_ext_v2 *pkt_ext, + bool inheritance) { - u8 nss = (sta->he_cap.ppe_thres[0] & IEEE80211_PPE_THRES_NSS_MASK) + 1; - u8 *ppe = &sta->he_cap.ppe_thres[0]; + u8 nss = (link_sta->he_cap.ppe_thres[0] & + IEEE80211_PPE_THRES_NSS_MASK) + 1; + u8 *ppe = &link_sta->he_cap.ppe_thres[0]; u8 ru_index_bitmap = u8_get_bits(*ppe, IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK); /* Starting after PPE header */ u8 ppe_pos_bit = IEEE80211_HE_PPE_THRES_INFO_HEADER_SIZE; - iwl_mvm_parse_ppe(mvm, pkt_ext, nss, ru_index_bitmap, ppe, ppe_pos_bit); + iwl_mvm_parse_ppe(mvm, pkt_ext, nss, ru_index_bitmap, ppe, ppe_pos_bit, + inheritance); } -static void iwl_mvm_set_pkt_ext_from_nominal_padding(struct iwl_he_pkt_ext_v2 *pkt_ext, - u8 nominal_padding, - u32 *flags) +static int +iwl_mvm_set_pkt_ext_from_nominal_padding(struct iwl_he_pkt_ext_v2 *pkt_ext, + u8 nominal_padding) { int low_th = -1; int high_th = -1; int i; + /* all the macros are the same for EHT and HE */ switch (nominal_padding) { - case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_0US: + case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_0US: low_th = IWL_HE_PKT_EXT_NONE; high_th = IWL_HE_PKT_EXT_NONE; break; - case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_8US: + case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US: low_th = IWL_HE_PKT_EXT_BPSK; high_th = IWL_HE_PKT_EXT_NONE; break; - case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US: + case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US: + case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_20US: low_th = IWL_HE_PKT_EXT_NONE; high_th = IWL_HE_PKT_EXT_BPSK; break; } + if (low_th < 0 || high_th < 0) + return -EINVAL; + /* Set the PPE thresholds accordingly */ - if (low_th >= 0 && high_th >= 0) { - for (i = 0; i < MAX_HE_SUPP_NSS; i++) { - u8 bw; + for (i = 0; i < MAX_HE_SUPP_NSS; i++) { + u8 bw; - for (bw = 0; - bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]); - bw++) { - pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th; - pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th; - } + for (bw = 0; + bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]); + bw++) { + pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th; + pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th; } + } + + return 0; +} + +static void iwl_mvm_get_optimal_ppe_info(struct iwl_he_pkt_ext_v2 *pkt_ext, + u8 nominal_padding) +{ + int i; + + for (i = 0; i < MAX_HE_SUPP_NSS; i++) { + u8 bw; - *flags |= STA_CTXT_HE_PACKET_EXT; + for (bw = 0; bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]); + bw++) { + u8 *qam_th = &pkt_ext->pkt_ext_qam_th[i][bw][0]; + + if (nominal_padding > + IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US && + qam_th[1] == IWL_HE_PKT_EXT_NONE) + qam_th[1] = IWL_HE_PKT_EXT_4096QAM; + else if (nominal_padding == + IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US && + qam_th[0] == IWL_HE_PKT_EXT_NONE && + qam_th[1] == IWL_HE_PKT_EXT_NONE) + qam_th[0] = IWL_HE_PKT_EXT_4096QAM; + } } } +/* Set the pkt_ext field according to PPE Thresholds element */ +int iwl_mvm_set_sta_pkt_ext(struct iwl_mvm *mvm, + struct ieee80211_link_sta *link_sta, + struct iwl_he_pkt_ext_v2 *pkt_ext) +{ + u8 nominal_padding; + int i, ret = 0; + + if (WARN_ON(!link_sta)) + return -EINVAL; + + /* Initialize the PPE thresholds to "None" (7), as described in Table + * 9-262ac of 80211.ax/D3.0. + */ + memset(pkt_ext, IWL_HE_PKT_EXT_NONE, + sizeof(struct iwl_he_pkt_ext_v2)); + + if (link_sta->eht_cap.has_eht) { + nominal_padding = + u8_get_bits(link_sta->eht_cap.eht_cap_elem.phy_cap_info[5], + IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK); + + /* If PPE Thresholds exists, parse them into a FW-familiar + * format. + */ + if (link_sta->eht_cap.eht_cap_elem.phy_cap_info[5] & + IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT) { + u8 nss = (link_sta->eht_cap.eht_ppe_thres[0] & + IEEE80211_EHT_PPE_THRES_NSS_MASK) + 1; + u8 *ppe = &link_sta->eht_cap.eht_ppe_thres[0]; + u8 ru_index_bitmap = + u16_get_bits(*ppe, + IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK); + /* Starting after PPE header */ + u8 ppe_pos_bit = IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE; + + iwl_mvm_parse_ppe(mvm, pkt_ext, nss, ru_index_bitmap, + ppe, ppe_pos_bit, true); + /* EHT PPE Thresholds doesn't exist - set the API according to + * HE PPE Tresholds + */ + } else if (link_sta->he_cap.he_cap_elem.phy_cap_info[6] & + IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) { + /* Even though HE Capabilities IE doesn't contain PPE + * Thresholds for BW 320Mhz, thresholds for this BW will + * be filled in with the same values as 160Mhz, due to + * the inheritance, as required. + */ + iwl_mvm_set_pkt_ext_from_he_ppe(mvm, link_sta, pkt_ext, + true); + + /* According to the requirements, for MCSs 12-13 the + * maximum value between HE PPE Threshold and Common + * Nominal Packet Padding needs to be taken + */ + iwl_mvm_get_optimal_ppe_info(pkt_ext, nominal_padding); + + /* if PPE Thresholds doesn't present in both EHT IE and HE IE - + * take the Thresholds from Common Nominal Packet Padding field + */ + } else { + ret = iwl_mvm_set_pkt_ext_from_nominal_padding(pkt_ext, + nominal_padding); + } + } else if (link_sta->he_cap.has_he) { + /* If PPE Thresholds exist, parse them into a FW-familiar format. */ + if (link_sta->he_cap.he_cap_elem.phy_cap_info[6] & + IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) { + iwl_mvm_set_pkt_ext_from_he_ppe(mvm, link_sta, pkt_ext, + false); + /* PPE Thresholds doesn't exist - set the API PPE values + * according to Common Nominal Packet Padding field. + */ + } else { + nominal_padding = + u8_get_bits(link_sta->he_cap.he_cap_elem.phy_cap_info[9], + IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK); + if (nominal_padding != IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED) + ret = iwl_mvm_set_pkt_ext_from_nominal_padding(pkt_ext, + nominal_padding); + } + } + + for (i = 0; i < MAX_HE_SUPP_NSS; i++) { + int bw; + + for (bw = 0; + bw < ARRAY_SIZE(*pkt_ext->pkt_ext_qam_th[i]); + bw++) { + u8 *qam_th = + &pkt_ext->pkt_ext_qam_th[i][bw][0]; + + IWL_DEBUG_HT(mvm, + "PPE table: nss[%d] bw[%d] PPET8 = %d, PPET16 = %d\n", + i, bw, qam_th[0], qam_th[1]); + } + } + return ret; +} + +/* + * This function sets the MU EDCA parameters ans returns whether MU EDCA + * is enabled or not + */ +bool iwl_mvm_set_fw_mu_edca_params(struct iwl_mvm *mvm, + const struct iwl_mvm_vif_link_info *link_info, + struct iwl_he_backoff_conf *trig_based_txf) +{ + int i; + /* Mark MU EDCA as enabled, unless none detected on some AC */ + bool mu_edca_enabled = true; + + for (i = 0; i < IEEE80211_NUM_ACS; i++) { + const struct ieee80211_he_mu_edca_param_ac_rec *mu_edca = + &link_info->queue_params[i].mu_edca_param_rec; + u8 ac = iwl_mvm_mac80211_ac_to_ucode_ac(i); + + if (!link_info->queue_params[i].mu_edca) { + mu_edca_enabled = false; + break; + } + + trig_based_txf[ac].cwmin = + cpu_to_le16(mu_edca->ecw_min_max & 0xf); + trig_based_txf[ac].cwmax = + cpu_to_le16((mu_edca->ecw_min_max & 0xf0) >> 4); + trig_based_txf[ac].aifsn = + cpu_to_le16(mu_edca->aifsn & 0xf); + trig_based_txf[ac].mu_time = + cpu_to_le16(mu_edca->mu_edca_timer); + } + + return mu_edca_enabled; +} + +bool iwl_mvm_is_nic_ack_enabled(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + const struct ieee80211_supported_band *sband; + const struct ieee80211_sta_he_cap *own_he_cap = NULL; + + /* This capability is the same for all bands, + * so take it from one of them. + */ + sband = mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]; + own_he_cap = ieee80211_get_he_iftype_cap_vif(sband, vif); + + return (own_he_cap && (own_he_cap->he_cap_elem.mac_cap_info[2] & + IEEE80211_HE_MAC_CAP2_ACK_EN)); +} + +__le32 iwl_mvm_get_sta_htc_flags(struct ieee80211_sta *sta, + struct ieee80211_link_sta *link_sta) +{ + u8 *mac_cap_info = + &link_sta->he_cap.he_cap_elem.mac_cap_info[0]; + __le32 htc_flags = 0; + + if (mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_HTC_HE) + htc_flags |= cpu_to_le32(IWL_HE_HTC_SUPPORT); + if ((mac_cap_info[1] & IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION) || + (mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION)) { + u8 link_adap = + ((mac_cap_info[2] & + IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION) << 1) + + (mac_cap_info[1] & + IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION); + + if (link_adap == 2) + htc_flags |= + cpu_to_le32(IWL_HE_HTC_LINK_ADAP_UNSOLICITED); + else if (link_adap == 3) + htc_flags |= cpu_to_le32(IWL_HE_HTC_LINK_ADAP_BOTH); + } + if (mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR) + htc_flags |= cpu_to_le32(IWL_HE_HTC_BSR_SUPP); + if (mac_cap_info[3] & IEEE80211_HE_MAC_CAP3_OMI_CONTROL) + htc_flags |= cpu_to_le32(IWL_HE_HTC_OMI_SUPP); + if (mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR) + htc_flags |= cpu_to_le32(IWL_HE_HTC_BQR_SUPP); + + return htc_flags; +} + static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u8 sta_id) { @@ -1947,9 +2337,6 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, struct ieee80211_sta *sta; u32 flags; int i; - const struct ieee80211_sta_he_cap *own_he_cap = NULL; - struct ieee80211_chanctx_conf *chanctx_conf; - const struct ieee80211_supported_band *sband; void *cmd; if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_MBSSID_HE)) @@ -1976,16 +2363,6 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, rcu_read_lock(); - chanctx_conf = rcu_dereference(vif->chanctx_conf); - if (WARN_ON(!chanctx_conf)) { - rcu_read_unlock(); - return; - } - - sband = mvm->hw->wiphy->bands[chanctx_conf->def.chan->band]; - own_he_cap = ieee80211_get_he_iftype_cap(sband, - ieee80211_vif_type_p2p(vif)); - sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_ctxt_cmd.sta_id]); if (IS_ERR_OR_NULL(sta)) { rcu_read_unlock(); @@ -1993,7 +2370,7 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, return; } - if (!sta->he_cap.has_he) { + if (!sta->deflink.he_cap.has_he) { rcu_read_unlock(); return; } @@ -2001,95 +2378,29 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, flags = 0; /* Block 26-tone RU OFDMA transmissions */ - if (mvmvif->he_ru_2mhz_block) + if (mvmvif->deflink.he_ru_2mhz_block) flags |= STA_CTXT_HE_RU_2MHZ_BLOCK; /* HTC flags */ - if (sta->he_cap.he_cap_elem.mac_cap_info[0] & - IEEE80211_HE_MAC_CAP0_HTC_HE) - sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_SUPPORT); - if ((sta->he_cap.he_cap_elem.mac_cap_info[1] & - IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION) || - (sta->he_cap.he_cap_elem.mac_cap_info[2] & - IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION)) { - u8 link_adap = - ((sta->he_cap.he_cap_elem.mac_cap_info[2] & - IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION) << 1) + - (sta->he_cap.he_cap_elem.mac_cap_info[1] & - IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION); + sta_ctxt_cmd.htc_flags = iwl_mvm_get_sta_htc_flags(sta, &sta->deflink); - if (link_adap == 2) - sta_ctxt_cmd.htc_flags |= - cpu_to_le32(IWL_HE_HTC_LINK_ADAP_UNSOLICITED); - else if (link_adap == 3) - sta_ctxt_cmd.htc_flags |= - cpu_to_le32(IWL_HE_HTC_LINK_ADAP_BOTH); - } - if (sta->he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR) - sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BSR_SUPP); - if (sta->he_cap.he_cap_elem.mac_cap_info[3] & - IEEE80211_HE_MAC_CAP3_OMI_CONTROL) - sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_OMI_SUPP); - if (sta->he_cap.he_cap_elem.mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR) - sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BQR_SUPP); - - /* - * Initialize the PPE thresholds to "None" (7), as described in Table - * 9-262ac of 80211.ax/D3.0. - */ - memset(&sta_ctxt_cmd.pkt_ext, IWL_HE_PKT_EXT_NONE, - sizeof(sta_ctxt_cmd.pkt_ext)); - - /* If PPE Thresholds exist, parse them into a FW-familiar format. */ - if (sta->he_cap.he_cap_elem.phy_cap_info[6] & - IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) { - iwl_mvm_set_pkt_ext_from_he_ppe(mvm, sta, - &sta_ctxt_cmd.pkt_ext); + /* PPE Thresholds */ + if (!iwl_mvm_set_sta_pkt_ext(mvm, &sta->deflink, &sta_ctxt_cmd.pkt_ext)) flags |= STA_CTXT_HE_PACKET_EXT; - /* PPE Thresholds doesn't exist - set the API PPE values - * according to Common Nominal Packet Padding fiels. */ - } else { - u8 nominal_padding = - u8_get_bits(sta->he_cap.he_cap_elem.phy_cap_info[9], - IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK); - if (nominal_padding != IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED) - iwl_mvm_set_pkt_ext_from_nominal_padding(&sta_ctxt_cmd.pkt_ext, - nominal_padding, - &flags); - } - if (sta->he_cap.he_cap_elem.mac_cap_info[2] & + if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP) flags |= STA_CTXT_HE_32BIT_BA_BITMAP; - if (sta->he_cap.he_cap_elem.mac_cap_info[2] & + if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_ACK_EN) flags |= STA_CTXT_HE_ACK_ENABLED; rcu_read_unlock(); - /* Mark MU EDCA as enabled, unless none detected on some AC */ - flags |= STA_CTXT_HE_MU_EDCA_CW; - for (i = 0; i < IEEE80211_NUM_ACS; i++) { - struct ieee80211_he_mu_edca_param_ac_rec *mu_edca = - &mvmvif->queue_params[i].mu_edca_param_rec; - u8 ac = iwl_mvm_mac80211_ac_to_ucode_ac(i); - - if (!mvmvif->queue_params[i].mu_edca) { - flags &= ~STA_CTXT_HE_MU_EDCA_CW; - break; - } - - sta_ctxt_cmd.trig_based_txf[ac].cwmin = - cpu_to_le16(mu_edca->ecw_min_max & 0xf); - sta_ctxt_cmd.trig_based_txf[ac].cwmax = - cpu_to_le16((mu_edca->ecw_min_max & 0xf0) >> 4); - sta_ctxt_cmd.trig_based_txf[ac].aifsn = - cpu_to_le16(mu_edca->aifsn); - sta_ctxt_cmd.trig_based_txf[ac].mu_time = - cpu_to_le16(mu_edca->mu_edca_timer); - } - + if (iwl_mvm_set_fw_mu_edca_params(mvm, &mvmvif->deflink, + &sta_ctxt_cmd.trig_based_txf[0])) + flags |= STA_CTXT_HE_MU_EDCA_CW; if (vif->bss_conf.uora_exists) { flags |= STA_CTXT_HE_TRIG_RND_ALLOC; @@ -2100,8 +2411,7 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, (vif->bss_conf.uora_ocw_range >> 3) & 0x7; } - if (own_he_cap && !(own_he_cap->he_cap_elem.mac_cap_info[2] & - IEEE80211_HE_MAC_CAP2_ACK_EN)) + if (!iwl_mvm_is_nic_ack_enabled(mvm, vif)) flags |= STA_CTXT_HE_NIC_NOT_ACK_ENABLED; if (vif->bss_conf.nontransmitted) { @@ -2161,9 +2471,8 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, IWL_ERR(mvm, "Failed to config FW to work HE!\n"); } -static void iwl_mvm_protect_assoc(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - u32 duration_override) +void iwl_mvm_protect_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + u32 duration_override) { u32 duration = IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS; u32 min_duration = IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS; @@ -2189,10 +2498,91 @@ static void iwl_mvm_protect_assoc(struct iwl_mvm *mvm, min_duration, 500, false); } +/* Handle association common part to MLD and non-MLD modes */ +void iwl_mvm_bss_info_changed_station_assoc(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u64 changes) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ret; + + /* The firmware tracks the MU-MIMO group on its own. + * However, on HW restart we should restore this data. + */ + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && + (changes & BSS_CHANGED_MU_GROUPS) && vif->bss_conf.mu_mimo_owner) { + ret = iwl_mvm_update_mu_groups(mvm, vif); + if (ret) + IWL_ERR(mvm, + "failed to update VHT MU_MIMO groups\n"); + } + + iwl_mvm_recalc_multicast(mvm); + + /* reset rssi values */ + mvmvif->bf_data.ave_beacon_signal = 0; + + iwl_mvm_bt_coex_vif_change(mvm); + iwl_mvm_update_smps_on_active_links(mvm, vif, IWL_MVM_SMPS_REQ_TT, + IEEE80211_SMPS_AUTOMATIC); + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_UMAC_SCAN)) + iwl_mvm_config_scan(mvm); +} + +/* Execute the common part for MLD and non-MLD modes */ +void +iwl_mvm_bss_info_changed_station_common(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + u64 changes) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ret; + + if (changes & BSS_CHANGED_BEACON_INFO) { + /* We received a beacon from the associated AP so + * remove the session protection. + */ + iwl_mvm_stop_session_protection(mvm, vif); + + iwl_mvm_sf_update(mvm, vif, false); + WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); + } + + if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS | + /* Send power command on every beacon change, + * because we may have not enabled beacon abort yet. + */ + BSS_CHANGED_BEACON_INFO)) { + ret = iwl_mvm_power_update_mac(mvm); + if (ret) + IWL_ERR(mvm, "failed to update power mode\n"); + } + + if (changes & BSS_CHANGED_CQM) { + IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n"); + /* reset cqm events tracking */ + mvmvif->bf_data.last_cqm_event = 0; + if (mvmvif->bf_data.bf_enabled) { + /* FIXME: need to update per link when FW API will + * support it + */ + ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0); + if (ret) + IWL_ERR(mvm, + "failed to update CQM thresholds\n"); + } + } + + if (changes & BSS_CHANGED_BANDWIDTH) + iwl_mvm_update_link_smps(vif, link_conf); +} + static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, - u32 changes) + u64 changes) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; @@ -2202,19 +2592,24 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, * on the beacon interval, which was not known when the station * interface was added. */ - if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) { - if (vif->bss_conf.he_support && - !iwlwifi_mod_params.disable_11ax) - iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id); + if (changes & BSS_CHANGED_ASSOC && vif->cfg.assoc) { + if ((vif->bss_conf.he_support && + !iwlwifi_mod_params.disable_11ax) || + (vif->bss_conf.eht_support && + !iwlwifi_mod_params.disable_11be)) + iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->deflink.ap_sta_id); iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); } /* Update MU EDCA params */ if (changes & BSS_CHANGED_QOS && mvmvif->associated && - bss_conf->assoc && vif->bss_conf.he_support && - !iwlwifi_mod_params.disable_11ax) - iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id); + vif->cfg.assoc && + ((vif->bss_conf.he_support && + !iwlwifi_mod_params.disable_11ax) || + (vif->bss_conf.eht_support && + !iwlwifi_mod_params.disable_11be))) + iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->deflink.ap_sta_id); /* * If we're not associated yet, take the (new) BSSID before associating @@ -2223,22 +2618,22 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, * branch for disassociation below. */ if (changes & BSS_CHANGED_BSSID && !mvmvif->associated) - memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN); + memcpy(mvmvif->deflink.bssid, bss_conf->bssid, ETH_ALEN); - ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, mvmvif->bssid); + ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, mvmvif->deflink.bssid); if (ret) IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); /* after sending it once, adopt mac80211 data */ - memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN); - mvmvif->associated = bss_conf->assoc; + memcpy(mvmvif->deflink.bssid, bss_conf->bssid, ETH_ALEN); + mvmvif->associated = vif->cfg.assoc; if (changes & BSS_CHANGED_ASSOC) { - if (bss_conf->assoc) { + if (vif->cfg.assoc) { /* clear statistics to get clean beacon counter */ iwl_mvm_request_statistics(mvm, true); - memset(&mvmvif->beacon_stats, 0, - sizeof(mvmvif->beacon_stats)); + memset(&mvmvif->deflink.beacon_stats, 0, + sizeof(mvmvif->deflink.beacon_stats)); /* add quota for this interface */ ret = iwl_mvm_update_quotas(mvm, true, NULL); @@ -2293,9 +2688,9 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, if (vif->p2p) { iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_PROT, - IEEE80211_SMPS_DYNAMIC); + IEEE80211_SMPS_DYNAMIC, 0); } - } else if (mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) { + } else if (mvmvif->deflink.ap_sta_id != IWL_MVM_INVALID_STA) { iwl_mvm_mei_host_disassociated(mvm); /* * If update fails - SF might be running in associated @@ -2317,17 +2712,21 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, */ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { + /* first remove remaining keys */ + iwl_mvm_sec_key_remove_ap(mvm, vif, + &mvmvif->deflink, 0); + /* * Remove AP station now that * the MAC is unassoc */ ret = iwl_mvm_rm_sta_id(mvm, vif, - mvmvif->ap_sta_id); + mvmvif->deflink.ap_sta_id); if (ret) IWL_ERR(mvm, "failed to remove AP station\n"); - mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; + mvmvif->deflink.ap_sta_id = IWL_MVM_INVALID_STA; } /* remove quota for this interface */ @@ -2343,83 +2742,64 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, vif->addr); } - /* - * The firmware tracks the MU-MIMO group on its own. - * However, on HW restart we should restore this data. - */ - if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && - (changes & BSS_CHANGED_MU_GROUPS) && vif->mu_mimo_owner) { - ret = iwl_mvm_update_mu_groups(mvm, vif); - if (ret) - IWL_ERR(mvm, - "failed to update VHT MU_MIMO groups\n"); - } + iwl_mvm_bss_info_changed_station_assoc(mvm, vif, changes); + } - iwl_mvm_recalc_multicast(mvm); + iwl_mvm_bss_info_changed_station_common(mvm, vif, &vif->bss_conf, + changes); +} - /* reset rssi values */ - mvmvif->bf_data.ave_beacon_signal = 0; +bool iwl_mvm_start_ap_ibss_common(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + int *ret) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int i; - iwl_mvm_bt_coex_vif_change(mvm); - iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT, - IEEE80211_SMPS_AUTOMATIC); - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_UMAC_SCAN)) - iwl_mvm_config_scan(mvm); - } + lockdep_assert_held(&mvm->mutex); - if (changes & BSS_CHANGED_BEACON_INFO) { - /* - * We received a beacon from the associated AP so - * remove the session protection. - */ - iwl_mvm_stop_session_protection(mvm, vif); + mvmvif->ap_assoc_sta_count = 0; - iwl_mvm_sf_update(mvm, vif, false); - WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); - } + /* must be set before quota calculations */ + mvmvif->ap_ibss_active = true; - if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS | - /* - * Send power command on every beacon change, - * because we may have not enabled beacon abort yet. - */ - BSS_CHANGED_BEACON_INFO)) { - ret = iwl_mvm_power_update_mac(mvm); - if (ret) - IWL_ERR(mvm, "failed to update power mode\n"); + /* send all the early keys to the device now */ + for (i = 0; i < ARRAY_SIZE(mvmvif->ap_early_keys); i++) { + struct ieee80211_key_conf *key = mvmvif->ap_early_keys[i]; + + if (!key) + continue; + + mvmvif->ap_early_keys[i] = NULL; + + *ret = __iwl_mvm_mac_set_key(hw, SET_KEY, vif, NULL, key); + if (*ret) + return true; } - if (changes & BSS_CHANGED_CQM) { - IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n"); - /* reset cqm events tracking */ - mvmvif->bf_data.last_cqm_event = 0; - if (mvmvif->bf_data.bf_enabled) { - ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0); - if (ret) - IWL_ERR(mvm, - "failed to update CQM thresholds\n"); - } + if (vif->type == NL80211_IFTYPE_AP && !vif->p2p) { + iwl_mvm_vif_set_low_latency(mvmvif, true, + LOW_LATENCY_VIF_TYPE); + iwl_mvm_send_low_latency_cmd(mvm, true, mvmvif->id); } - if (changes & BSS_CHANGED_BANDWIDTH) - iwl_mvm_apply_fw_smps_request(vif); + /* power updated needs to be done before quotas */ + iwl_mvm_power_update_mac(mvm); + + return false; } static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - int ret, i; + int ret; mutex_lock(&mvm->mutex); - /* Send the beacon template */ - ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif); - if (ret) - goto out_unlock; - /* * Re-calculate the tsf id, as the leader-follower relations depend on * the beacon interval, which was not known when the AP interface @@ -2428,12 +2808,31 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, if (vif->type == NL80211_IFTYPE_AP) iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); - mvmvif->ap_assoc_sta_count = 0; + /* For older devices need to send beacon template before adding mac + * context. For the newer, the beacon is a resource that belongs to a + * MAC, so need to send beacon template after adding the mac. + */ + if (mvm->trans->trans_cfg->device_family > IWL_DEVICE_FAMILY_22000) { + /* Add the mac context */ + ret = iwl_mvm_mac_ctxt_add(mvm, vif); + if (ret) + goto out_unlock; - /* Add the mac context */ - ret = iwl_mvm_mac_ctxt_add(mvm, vif); - if (ret) - goto out_unlock; + /* Send the beacon template */ + ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif, link_conf); + if (ret) + goto out_unlock; + } else { + /* Send the beacon template */ + ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif, link_conf); + if (ret) + goto out_unlock; + + /* Add the mac context */ + ret = iwl_mvm_mac_ctxt_add(mvm, vif); + if (ret) + goto out_unlock; + } /* Perform the binding */ ret = iwl_mvm_binding_add_vif(mvm, vif); @@ -2475,35 +2874,12 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, } } - /* must be set before quota calculations */ - mvmvif->ap_ibss_active = true; - - /* send all the early keys to the device now */ - for (i = 0; i < ARRAY_SIZE(mvmvif->ap_early_keys); i++) { - struct ieee80211_key_conf *key = mvmvif->ap_early_keys[i]; - - if (!key) - continue; - - mvmvif->ap_early_keys[i] = NULL; - - ret = __iwl_mvm_mac_set_key(hw, SET_KEY, vif, NULL, key); - if (ret) - goto out_quota_failed; - } - - if (vif->type == NL80211_IFTYPE_AP && !vif->p2p) { - iwl_mvm_vif_set_low_latency(mvmvif, true, - LOW_LATENCY_VIF_TYPE); - iwl_mvm_send_low_latency_cmd(mvm, true, mvmvif->id); - } - - /* power updated needs to be done before quotas */ - iwl_mvm_power_update_mac(mvm); + if (iwl_mvm_start_ap_ibss_common(hw, vif, &ret)) + goto out_failed; ret = iwl_mvm_update_quotas(mvm, false, NULL); if (ret) - goto out_quota_failed; + goto out_failed; /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */ if (vif->p2p && mvm->p2p_device_vif) @@ -2515,11 +2891,11 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, if (iwl_mvm_phy_ctx_count(mvm) > 1) iwl_mvm_teardown_tdls_peers(mvm); - iwl_mvm_ftm_restart_responder(mvm, vif); + iwl_mvm_ftm_restart_responder(mvm, vif, &vif->bss_conf); goto out_unlock; -out_quota_failed: +out_failed: iwl_mvm_power_update_mac(mvm); mvmvif->ap_ibss_active = false; iwl_mvm_send_rm_bcast_sta(mvm, vif); @@ -2533,15 +2909,28 @@ out_unlock: return ret; } -static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw, +static int iwl_mvm_start_ap(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) +{ + return iwl_mvm_start_ap_ibss(hw, vif, link_conf); +} + +static int iwl_mvm_start_ibss(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + return iwl_mvm_start_ap_ibss(hw, vif, &vif->bss_conf); +} + +/* Common part for MLD and non-MLD ops */ +void iwl_mvm_stop_ap_ibss_common(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - iwl_mvm_prepare_mac_removal(mvm, vif); + lockdep_assert_held(&mvm->mutex); - mutex_lock(&mvm->mutex); + iwl_mvm_prepare_mac_removal(mvm, vif); /* Handle AP stop while in CSA */ if (rcu_access_pointer(mvm->csa_vif) == vif) { @@ -2566,6 +2955,17 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw, } iwl_mvm_bt_coex_vif_change(mvm); +} + +static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + mutex_lock(&mvm->mutex); + + iwl_mvm_stop_ap_ibss_common(mvm, vif); /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */ if (vif->p2p && mvm->p2p_device_vif) @@ -2597,11 +2997,24 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw, mutex_unlock(&mvm->mutex); } +static void iwl_mvm_stop_ap(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) +{ + iwl_mvm_stop_ap_ibss(hw, vif, link_conf); +} + +static void iwl_mvm_stop_ibss(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + iwl_mvm_stop_ap_ibss(hw, vif, &vif->bss_conf); +} + static void iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, - u32 changes) + u64 changes) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); @@ -2616,11 +3029,11 @@ iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm, /* Need to send a new beacon template to the FW */ if (changes & BSS_CHANGED_BEACON && - iwl_mvm_mac_ctxt_beacon_changed(mvm, vif)) + iwl_mvm_mac_ctxt_beacon_changed(mvm, vif, &vif->bss_conf)) IWL_WARN(mvm, "Failed updating beacon data\n"); if (changes & BSS_CHANGED_FTM_RESPONDER) { - int ret = iwl_mvm_ftm_start_responder(mvm, vif); + int ret = iwl_mvm_ftm_start_responder(mvm, vif, &vif->bss_conf); if (ret) IWL_WARN(mvm, "Failed to enable FTM responder (%d)\n", @@ -2632,22 +3045,39 @@ iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm, static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, - u32 changes) + u64 changes) +{ + static const struct iwl_mvm_bss_info_changed_ops callbacks = { + .bss_info_changed_sta = iwl_mvm_bss_info_changed_station, + .bss_info_changed_ap_ibss = iwl_mvm_bss_info_changed_ap_ibss, + }; + + iwl_mvm_bss_info_changed_common(hw, vif, bss_conf, &callbacks, + changes); +} + +void +iwl_mvm_bss_info_changed_common(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + const struct iwl_mvm_bss_info_changed_ops *callbacks, + u64 changes) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); mutex_lock(&mvm->mutex); - if (changes & BSS_CHANGED_IDLE && !bss_conf->idle) + if (changes & BSS_CHANGED_IDLE && !vif->cfg.idle) iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); switch (vif->type) { case NL80211_IFTYPE_STATION: - iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes); + callbacks->bss_info_changed_sta(mvm, vif, bss_conf, changes); break; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_ADHOC: - iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes); + callbacks->bss_info_changed_ap_ibss(mvm, vif, bss_conf, + changes); break; case NL80211_IFTYPE_MONITOR: if (changes & BSS_CHANGED_MU_GROUPS) @@ -2667,9 +3097,8 @@ static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw, mutex_unlock(&mvm->mutex); } -static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_scan_request *hw_req) +int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_scan_request *hw_req) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; @@ -2685,8 +3114,8 @@ static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw, return ret; } -static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) +void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); @@ -2705,7 +3134,7 @@ static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw, mutex_unlock(&mvm->mutex); } -static void +void iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta, u16 tids, int num_frames, @@ -2720,7 +3149,7 @@ iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw, tids, more_data, false); } -static void +void iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta, u16 tids, int num_frames, @@ -2781,7 +3210,7 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, */ break; case STA_NOTIFY_AWAKE: - if (WARN_ON(mvmsta->sta_id == IWL_MVM_INVALID_STA)) + if (WARN_ON(mvmsta->deflink.sta_id == IWL_MVM_INVALID_STA)) break; if (txqs) @@ -2794,10 +3223,8 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, spin_unlock_bh(&mvmsta->lock); } -static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - enum sta_notify_cmd cmd, - struct ieee80211_sta *sta) +void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + enum sta_notify_cmd cmd, struct ieee80211_sta *sta) { __iwl_mvm_mac_sta_notify(hw, cmd, sta); } @@ -2855,12 +3282,13 @@ void iwl_mvm_sta_pm_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) rcu_read_unlock(); } -static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta) +void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + unsigned int link_id; /* * This is called before mac80211 does RCU synchronisation, @@ -2869,12 +3297,26 @@ static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw, * be able to find the station this way, and we don't rely * on further RCU synchronisation after the sta_state() * callback deleted the station. + * Since there's mvm->mutex here, no need to have RCU lock for + * mvm_sta->link access. */ mutex_lock(&mvm->mutex); - if (sta == rcu_access_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id])) - rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], - ERR_PTR(-ENOENT)); + for (link_id = 0; link_id < ARRAY_SIZE(mvm_sta->link); link_id++) { + struct iwl_mvm_link_sta *link_sta; + u32 sta_id; + + if (!mvm_sta->link[link_id]) + continue; + link_sta = rcu_dereference_protected(mvm_sta->link[link_id], + lockdep_is_held(&mvm->mutex)); + sta_id = link_sta->sta_id; + if (sta == rcu_access_pointer(mvm->fw_id_to_mac_id[sta_id])) { + RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], + ERR_PTR(-ENOENT)); + RCU_INIT_POINTER(mvm->fw_id_to_link_sta[sta_id], NULL); + } + } mutex_unlock(&mvm->mutex); } @@ -2967,20 +3409,27 @@ static void iwl_mvm_check_he_obss_narrow_bw_ru_iter(struct wiphy *wiphy, rcu_read_unlock(); } -static void iwl_mvm_check_he_obss_narrow_bw_ru(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) +static void +iwl_mvm_check_he_obss_narrow_bw_ru(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + unsigned int link_id, + struct ieee80211_bss_conf *link_conf) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_he_obss_narrow_bw_ru_data iter_data = { .tolerated = true, }; - if (!(vif->bss_conf.chandef.chan->flags & IEEE80211_CHAN_RADAR)) { - mvmvif->he_ru_2mhz_block = false; + if (WARN_ON_ONCE(!link_conf->chandef.chan || + !mvmvif->link[link_id])) + return; + + if (!(link_conf->chandef.chan->flags & IEEE80211_CHAN_RADAR)) { + mvmvif->link[link_id]->he_ru_2mhz_block = false; return; } - cfg80211_bss_iter(hw->wiphy, &vif->bss_conf.chandef, + cfg80211_bss_iter(hw->wiphy, &link_conf->chandef, iwl_mvm_check_he_obss_narrow_bw_ru_iter, &iter_data); @@ -2988,7 +3437,7 @@ static void iwl_mvm_check_he_obss_narrow_bw_ru(struct ieee80211_hw *hw, * If there is at least one AP on radar channel that cannot * tolerate 26-tone RU UL OFDMA transmissions using HE TB PPDU. */ - mvmvif->he_ru_2mhz_block = !iter_data.tolerated; + mvmvif->link[link_id]->he_ru_2mhz_block = !iter_data.tolerated; } static void iwl_mvm_reset_cca_40mhz_workaround(struct iwl_mvm *mvm, @@ -3012,8 +3461,7 @@ static void iwl_mvm_reset_cca_40mhz_workaround(struct iwl_mvm *mvm, sband->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; - he_cap = ieee80211_get_he_iftype_cap(sband, - ieee80211_vif_type_p2p(vif)); + he_cap = ieee80211_get_he_iftype_cap_vif(sband, vif); if (he_cap) { /* we know that ours is writable */ @@ -3031,8 +3479,7 @@ static void iwl_mvm_mei_host_associated(struct iwl_mvm *mvm, #if IS_ENABLED(CONFIG_IWLMEI) struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mei_conn_info conn_info = { - .ssid_len = vif->bss_conf.ssid_len, - .channel = vif->bss_conf.chandef.chan->hw_value, + .ssid_len = vif->cfg.ssid_len, }; if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) @@ -3041,7 +3488,16 @@ static void iwl_mvm_mei_host_associated(struct iwl_mvm *mvm, if (!mvm->mei_registered) return; + /* FIXME: MEI needs to be updated for MLO */ + if (!vif->bss_conf.chandef.chan) + return; + + conn_info.channel = vif->bss_conf.chandef.chan->hw_value; + switch (mvm_sta->pairwise_cipher) { + case WLAN_CIPHER_SUITE_TKIP: + conn_info.pairwise_cipher = IWL_MEI_CIPHER_TKIP; + break; case WLAN_CIPHER_SUITE_CCMP: conn_info.pairwise_cipher = IWL_MEI_CIPHER_CCMP; break; @@ -3079,7 +3535,7 @@ static void iwl_mvm_mei_host_associated(struct iwl_mvm *mvm, return; } - memcpy(conn_info.ssid, vif->bss_conf.ssid, vif->bss_conf.ssid_len); + memcpy(conn_info.ssid, vif->cfg.ssid, vif->cfg.ssid_len); memcpy(conn_info.bssid, vif->bss_conf.bssid, ETH_ALEN); /* TODO: add support for collocated AP data */ @@ -3087,24 +3543,306 @@ static void iwl_mvm_mei_host_associated(struct iwl_mvm *mvm, #endif } +static int iwl_mvm_mac_ctxt_changed_wrapper(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + bool force_assoc_off) +{ + return iwl_mvm_mac_ctxt_changed(mvm, vif, force_assoc_off, NULL); +} + static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, enum ieee80211_sta_state old_state, enum ieee80211_sta_state new_state) { + static const struct iwl_mvm_sta_state_ops callbacks = { + .add_sta = iwl_mvm_add_sta, + .update_sta = iwl_mvm_update_sta, + .rm_sta = iwl_mvm_rm_sta, + .mac_ctxt_changed = iwl_mvm_mac_ctxt_changed_wrapper, + }; + + return iwl_mvm_mac_sta_state_common(hw, vif, sta, old_state, new_state, + &callbacks); +} + +/* FIXME: temporary making two assumptions in all sta handling functions: + * (1) when setting sta state, the link exists and protected + * (2) if a link is valid in sta then it's valid in vif (can + * use same index in the link array) + */ +static void iwl_mvm_rs_rate_init_all_links(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + unsigned int link_id; + + for_each_mvm_vif_valid_link(mvmvif, link_id) { + struct ieee80211_bss_conf *conf = + link_conf_dereference_check(vif, link_id); + struct ieee80211_link_sta *link_sta = + link_sta_dereference_check(sta, link_id); + + if (!conf || !link_sta || !mvmvif->link[link_id]->phy_ctxt) + continue; + + iwl_mvm_rs_rate_init(mvm, vif, sta, conf, link_sta, + mvmvif->link[link_id]->phy_ctxt->channel->band); + } +} + +#define IWL_MVM_MIN_BEACON_INTERVAL_TU 16 + +static bool iwl_mvm_vif_conf_from_sta(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct ieee80211_link_sta *link_sta; + unsigned int link_id; + + /* Beacon interval check - firmware will crash if the beacon + * interval is less than 16. We can't avoid connecting at all, + * so refuse the station state change, this will cause mac80211 + * to abandon attempts to connect to this AP, and eventually + * wpa_s will blocklist the AP... + */ + + for_each_sta_active_link(vif, sta, link_sta, link_id) { + struct ieee80211_bss_conf *link_conf = + link_conf_dereference_protected(vif, link_id); + + if (!link_conf) + continue; + + if (link_conf->beacon_int < IWL_MVM_MIN_BEACON_INTERVAL_TU) { + IWL_ERR(mvm, + "Beacon interval %d for AP %pM is too small\n", + link_conf->beacon_int, link_sta->addr); + return false; + } + + link_conf->he_support = link_sta->he_cap.has_he; + } + + return true; +} + +static void iwl_mvm_vif_set_he_support(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + bool is_sta) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct ieee80211_link_sta *link_sta; + unsigned int link_id; + + for_each_sta_active_link(vif, sta, link_sta, link_id) { + struct ieee80211_bss_conf *link_conf = + link_conf_dereference_protected(vif, link_id); + + if (!link_conf || !mvmvif->link[link_id]) + continue; + + link_conf->he_support = link_sta->he_cap.has_he; + + if (is_sta) { + mvmvif->link[link_id]->he_ru_2mhz_block = false; + if (link_sta->he_cap.has_he) + iwl_mvm_check_he_obss_narrow_bw_ru(hw, vif, + link_id, + link_conf); + } + } +} + +static int +iwl_mvm_sta_state_notexist_to_none(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + const struct iwl_mvm_sta_state_ops *callbacks) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct ieee80211_link_sta *link_sta; + unsigned int i; + int ret; + + lockdep_assert_held(&mvm->mutex); + + if (vif->type == NL80211_IFTYPE_STATION && + !iwl_mvm_vif_conf_from_sta(mvm, vif, sta)) + return -EINVAL; + + if (sta->tdls && + (vif->p2p || + iwl_mvm_tdls_sta_count(mvm, NULL) == IWL_MVM_TDLS_STA_COUNT || + iwl_mvm_phy_ctx_count(mvm) > 1)) { + IWL_DEBUG_MAC80211(mvm, "refusing TDLS sta\n"); + return -EBUSY; + } + + ret = callbacks->add_sta(mvm, vif, sta); + if (sta->tdls && ret == 0) { + iwl_mvm_recalc_tdls_state(mvm, vif, true); + iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, + NL80211_TDLS_SETUP); + } + + for_each_sta_active_link(vif, sta, link_sta, i) + link_sta->agg.max_rc_amsdu_len = 1; + + ieee80211_sta_recalc_aggregates(sta); + + if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) + mvmvif->ap_sta = sta; + + return 0; +} + +static int +iwl_mvm_sta_state_auth_to_assoc(struct ieee80211_hw *hw, + struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + const struct iwl_mvm_sta_state_ops *callbacks) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + struct ieee80211_link_sta *link_sta; + unsigned int link_id; + + lockdep_assert_held(&mvm->mutex); + + if (vif->type == NL80211_IFTYPE_AP) { + iwl_mvm_vif_set_he_support(hw, vif, sta, false); + mvmvif->ap_assoc_sta_count++; + callbacks->mac_ctxt_changed(mvm, vif, false); + + /* since the below is not for MLD API, it's ok to use + * the default bss_conf + */ + if (!mvm->mld_api_is_used && + ((vif->bss_conf.he_support && + !iwlwifi_mod_params.disable_11ax) || + (vif->bss_conf.eht_support && + !iwlwifi_mod_params.disable_11be))) + iwl_mvm_cfg_he_sta(mvm, vif, mvm_sta->deflink.sta_id); + } else if (vif->type == NL80211_IFTYPE_STATION) { + iwl_mvm_vif_set_he_support(hw, vif, sta, true); + + callbacks->mac_ctxt_changed(mvm, vif, false); + + if (!mvm->mld_api_is_used) + goto out; + + for_each_sta_active_link(vif, sta, link_sta, link_id) { + struct ieee80211_bss_conf *link_conf = + link_conf_dereference_protected(vif, link_id); + + if (WARN_ON(!link_conf)) + return -EINVAL; + if (!mvmvif->link[link_id]) + continue; + + iwl_mvm_link_changed(mvm, vif, link_conf, + LINK_CONTEXT_MODIFY_ALL & + ~LINK_CONTEXT_MODIFY_ACTIVE, + true); + } + } + +out: + iwl_mvm_rs_rate_init_all_links(mvm, vif, sta); + + return callbacks->update_sta(mvm, vif, sta); +} + +static int +iwl_mvm_sta_state_assoc_to_authorized(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + const struct iwl_mvm_sta_state_ops *callbacks) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + + lockdep_assert_held(&mvm->mutex); + + /* we don't support TDLS during DCM */ + if (iwl_mvm_phy_ctx_count(mvm) > 1) + iwl_mvm_teardown_tdls_peers(mvm); + + if (sta->tdls) { + iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, + NL80211_TDLS_ENABLE_LINK); + } else { + /* enable beacon filtering */ + WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); + + mvmvif->authorized = 1; + + callbacks->mac_ctxt_changed(mvm, vif, false); + iwl_mvm_mei_host_associated(mvm, vif, mvm_sta); + } + + mvm_sta->authorized = true; + + iwl_mvm_rs_rate_init_all_links(mvm, vif, sta); + + return 0; +} + +static int +iwl_mvm_sta_state_authorized_to_assoc(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + const struct iwl_mvm_sta_state_ops *callbacks) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + + lockdep_assert_held(&mvm->mutex); + + mvmsta->authorized = false; + + /* once we move into assoc state, need to update rate scale to + * disable using wide bandwidth + */ + iwl_mvm_rs_rate_init_all_links(mvm, vif, sta); + + if (!sta->tdls) { + /* Set this but don't call iwl_mvm_mac_ctxt_changed() + * yet to avoid sending high prio again for a little + * time. + */ + mvmvif->authorized = 0; + + /* disable beacon filtering */ + iwl_mvm_disable_beacon_filter(mvm, vif, 0); + } + + return 0; +} + +/* Common part for MLD and non-MLD modes */ +int iwl_mvm_mac_sta_state_common(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + enum ieee80211_sta_state old_state, + enum ieee80211_sta_state new_state, + const struct iwl_mvm_sta_state_ops *callbacks) +{ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + struct ieee80211_link_sta *link_sta; + unsigned int link_id; int ret; IWL_DEBUG_MAC80211(mvm, "station %pM state change %d->%d\n", sta->addr, old_state, new_state); - /* this would be a mac80211 bug ... but don't crash */ - if (WARN_ON_ONCE(!mvmvif->phy_ctxt)) - return test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) ? 0 : -EINVAL; - /* * If we are in a STA removal flow and in DQA mode: * @@ -3132,51 +3870,31 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, * from the AP now. */ iwl_mvm_reset_cca_40mhz_workaround(mvm, vif); + + /* Also free dup data just in case any assertions below fail */ + kfree(mvm_sta->dup_data); } mutex_lock(&mvm->mutex); + + /* this would be a mac80211 bug ... but don't crash */ + for_each_sta_active_link(vif, sta, link_sta, link_id) { + if (WARN_ON_ONCE(!mvmvif->link[link_id]->phy_ctxt)) { + mutex_unlock(&mvm->mutex); + return test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, + &mvm->status) ? 0 : -EINVAL; + } + } + /* track whether or not the station is associated */ mvm_sta->sta_state = new_state; if (old_state == IEEE80211_STA_NOTEXIST && new_state == IEEE80211_STA_NONE) { - /* - * Firmware bug - it'll crash if the beacon interval is less - * than 16. We can't avoid connecting at all, so refuse the - * station state change, this will cause mac80211 to abandon - * attempts to connect to this AP, and eventually wpa_s will - * blocklist the AP... - */ - if (vif->type == NL80211_IFTYPE_STATION && - vif->bss_conf.beacon_int < 16) { - IWL_ERR(mvm, - "AP %pM beacon interval is %d, refusing due to firmware bug!\n", - sta->addr, vif->bss_conf.beacon_int); - ret = -EINVAL; - goto out_unlock; - } - - if (vif->type == NL80211_IFTYPE_STATION) - vif->bss_conf.he_support = sta->he_cap.has_he; - - if (sta->tdls && - (vif->p2p || - iwl_mvm_tdls_sta_count(mvm, NULL) == - IWL_MVM_TDLS_STA_COUNT || - iwl_mvm_phy_ctx_count(mvm) > 1)) { - IWL_DEBUG_MAC80211(mvm, "refusing TDLS sta\n"); - ret = -EBUSY; + ret = iwl_mvm_sta_state_notexist_to_none(mvm, vif, sta, + callbacks); + if (ret < 0) goto out_unlock; - } - - ret = iwl_mvm_add_sta(mvm, vif, sta); - if (sta->tdls && ret == 0) { - iwl_mvm_recalc_tdls_state(mvm, vif, true); - iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, - NL80211_TDLS_SETUP); - } - - sta->max_rc_amsdu_len = 1; } else if (old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_AUTH) { /* @@ -3188,83 +3906,21 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, ret = 0; } else if (old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_ASSOC) { - if (vif->type == NL80211_IFTYPE_AP) { - vif->bss_conf.he_support = sta->he_cap.has_he; - mvmvif->ap_assoc_sta_count++; - iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); - if (vif->bss_conf.he_support && - !iwlwifi_mod_params.disable_11ax) - iwl_mvm_cfg_he_sta(mvm, vif, mvm_sta->sta_id); - } else if (vif->type == NL80211_IFTYPE_STATION) { - vif->bss_conf.he_support = sta->he_cap.has_he; - - mvmvif->he_ru_2mhz_block = false; - if (sta->he_cap.has_he) - iwl_mvm_check_he_obss_narrow_bw_ru(hw, vif); - - iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); - } - - iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, - false); - ret = iwl_mvm_update_sta(mvm, vif, sta); + ret = iwl_mvm_sta_state_auth_to_assoc(hw, mvm, vif, sta, + callbacks); } else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTHORIZED) { - ret = 0; - - /* we don't support TDLS during DCM */ - if (iwl_mvm_phy_ctx_count(mvm) > 1) - iwl_mvm_teardown_tdls_peers(mvm); - - if (sta->tdls) { - iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, - NL80211_TDLS_ENABLE_LINK); - } else { - /* enable beacon filtering */ - WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); - - mvmvif->authorized = 1; - - /* - * Now that the station is authorized, i.e., keys were already - * installed, need to indicate to the FW that - * multicast data frames can be forwarded to the driver - */ - iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); - iwl_mvm_mei_host_associated(mvm, vif, mvm_sta); - } - - iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, - true); + ret = iwl_mvm_sta_state_assoc_to_authorized(mvm, vif, sta, + callbacks); } else if (old_state == IEEE80211_STA_AUTHORIZED && new_state == IEEE80211_STA_ASSOC) { - /* once we move into assoc state, need to update rate scale to - * disable using wide bandwidth - */ - iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, - false); - if (!sta->tdls) { - /* Multicast data frames are no longer allowed */ - iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); - - /* - * Set this after the above iwl_mvm_mac_ctxt_changed() - * to avoid sending high prio again for a little time. - */ - mvmvif->authorized = 0; - - /* disable beacon filtering */ - ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); - WARN_ON(ret && - !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, - &mvm->status)); - } - ret = 0; + ret = iwl_mvm_sta_state_authorized_to_assoc(mvm, vif, sta, + callbacks); } else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTH) { if (vif->type == NL80211_IFTYPE_AP) { mvmvif->ap_assoc_sta_count--; - iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); + callbacks->mac_ctxt_changed(mvm, vif, false); } else if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) iwl_mvm_stop_session_protection(mvm, vif); ret = 0; @@ -3273,9 +3929,11 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, ret = 0; } else if (old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_NOTEXIST) { - if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) + if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) { iwl_mvm_stop_session_protection(mvm, vif); - ret = iwl_mvm_rm_sta(mvm, vif, sta); + mvmvif->ap_sta = NULL; + } + ret = callbacks->rm_sta(mvm, vif, sta); if (sta->tdls) { iwl_mvm_recalc_tdls_state(mvm, vif, false); iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, @@ -3304,7 +3962,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, return ret; } -static int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value) +int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); @@ -3313,18 +3971,15 @@ static int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value) return 0; } -static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, u32 changed) +void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u32 changed) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (changed & (IEEE80211_RC_BW_CHANGED | IEEE80211_RC_SUPP_RATES_CHANGED | IEEE80211_RC_NSS_CHANGED)) - iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, - true); + iwl_mvm_rs_rate_init_all_links(mvm, vif, sta); if (vif->type == NL80211_IFTYPE_STATION && changed & IEEE80211_RC_NSS_CHANGED) @@ -3332,13 +3987,14 @@ static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw, } static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, u16 ac, + struct ieee80211_vif *vif, + unsigned int link_id, u16 ac, const struct ieee80211_tx_queue_params *params) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - mvmvif->queue_params[ac] = *params; + mvmvif->deflink.queue_params[ac] = *params; /* * No need to update right away, we'll get BSS_CHANGED_QOS @@ -3355,9 +4011,9 @@ static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw, return 0; } -static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_prep_tx_info *info) +void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_prep_tx_info *info) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); @@ -3366,9 +4022,9 @@ static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw, mutex_unlock(&mvm->mutex); } -static void iwl_mvm_mac_mgd_complete_tx(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_prep_tx_info *info) +void iwl_mvm_mac_mgd_complete_tx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_prep_tx_info *info) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); @@ -3381,10 +4037,10 @@ static void iwl_mvm_mac_mgd_complete_tx(struct ieee80211_hw *hw, mutex_unlock(&mvm->mutex); } -static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct cfg80211_sched_scan_request *req, - struct ieee80211_scan_ies *ies) +int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_sched_scan_request *req, + struct ieee80211_scan_ies *ies) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); @@ -3392,7 +4048,7 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw, mutex_lock(&mvm->mutex); - if (!vif->bss_conf.idle) { + if (!vif->cfg.idle) { ret = -EBUSY; goto out; } @@ -3404,8 +4060,8 @@ out: return ret; } -static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) +int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; @@ -3441,8 +4097,10 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_sta *mvmsta = NULL; - struct iwl_mvm_key_pn *ptk_pn; + struct iwl_mvm_key_pn *ptk_pn = NULL; int keyidx = key->keyidx; + u32 sec_key_id = WIDE_ID(DATA_PATH_GROUP, SEC_KEY_CMD); + u8 sec_key_ver = iwl_fw_lookup_cmd_ver(mvm->fw, sec_key_id, 0); int ret, i; u8 key_offset; @@ -3486,7 +4144,8 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, switch (cmd) { case SET_KEY: - if (keyidx == 6 || keyidx == 7) + if (vif->type == NL80211_IFTYPE_STATION && + (keyidx == 6 || keyidx == 7)) rcu_assign_pointer(mvmvif->bcn_prot.keys[keyidx - 6], key); @@ -3497,10 +4156,14 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, * on IBSS they're per-station and because we're lazy * we don't support them for RX, so do the same. * CMAC/GMAC in AP/IBSS modes must be done in software. + * + * Except, of course, beacon protection - it must be + * offloaded since we just set a beacon template. */ - if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC || - key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || - key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) { + if (keyidx < 6 && + (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC || + key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || + key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)) { ret = -EOPNOTSUPP; break; } @@ -3581,11 +4244,21 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, if (mvmsta && key->flags & IEEE80211_KEY_FLAG_PAIRWISE) mvmsta->pairwise_cipher = key->cipher; - IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n"); - ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset); + IWL_DEBUG_MAC80211(mvm, "set hwcrypto key (sta:%pM, id:%d)\n", + sta ? sta->addr : NULL, key->keyidx); + + if (sec_key_ver) + ret = iwl_mvm_sec_key_add(mvm, vif, sta, key); + else + ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset); + if (ret) { IWL_WARN(mvm, "set key failed\n"); key->hw_key_idx = STA_KEY_IDX_INVALID; + if (ptk_pn) { + RCU_INIT_POINTER(mvmsta->ptk_pn[keyidx], NULL); + kfree(ptk_pn); + } /* * can't add key for RX, but we don't need it * in the device for TX so still return 0, @@ -3600,7 +4273,8 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, break; case DISABLE_KEY: - if (keyidx == 6 || keyidx == 7) + if (vif->type == NL80211_IFTYPE_STATION && + (keyidx == 6 || keyidx == 7)) RCU_INIT_POINTER(mvmvif->bcn_prot.keys[keyidx - 6], NULL); @@ -3635,7 +4309,10 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, } IWL_DEBUG_MAC80211(mvm, "disable hwcrypto key\n"); - ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key); + if (sec_key_ver) + ret = iwl_mvm_sec_key_del(mvm, vif, sta, key); + else + ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key); break; default: ret = -EINVAL; @@ -3644,11 +4321,9 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, return ret; } -static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, - enum set_key_cmd cmd, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - struct ieee80211_key_conf *key) +int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; @@ -3660,11 +4335,11 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, return ret; } -static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_key_conf *keyconf, - struct ieee80211_sta *sta, - u32 iv32, u16 *phase1key) +void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_key_conf *keyconf, + struct ieee80211_sta *sta, + u32 iv32, u16 *phase1key) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); @@ -3740,7 +4415,7 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, /* Set the channel info data */ iwl_mvm_set_chan_info(mvm, &aux_roc_req.channel_info, channel->hw_value, iwl_mvm_phy_band_from_nl80211(channel->band), - PHY_VHT_CHANNEL_MODE20, + IWL_PHY_CHANNEL_MODE20, 0); /* Set the time and duration */ @@ -3758,7 +4433,7 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, * like the delay to be for 2-3 dtim intervals, in case there are * other time events with higher priority. */ - if (vif->bss_conf.assoc) { + if (vif->cfg.assoc) { delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY); /* We cannot remain off-channel longer than the DTIM interval */ if (dtim_interval <= req_dur) { @@ -3835,18 +4510,79 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, return res; } +static int iwl_mvm_add_aux_sta_for_hs20(struct iwl_mvm *mvm, u32 lmac_id) +{ + int ret = 0; + + lockdep_assert_held(&mvm->mutex); + + if (!fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) { + IWL_ERR(mvm, "hotspot not supported\n"); + return -EINVAL; + } + + if (iwl_mvm_has_new_station_api(mvm->fw)) { + ret = iwl_mvm_add_aux_sta(mvm, lmac_id); + WARN(ret, "Failed to allocate aux station"); + } + + return ret; +} + +static int iwl_mvm_roc_switch_binding(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_mvm_phy_ctxt *new_phy_ctxt) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ret = 0; + + lockdep_assert_held(&mvm->mutex); + + /* Unbind the P2P_DEVICE from the current PHY context, + * and if the PHY context is not used remove it. + */ + ret = iwl_mvm_binding_remove_vif(mvm, vif); + if (WARN(ret, "Failed unbinding P2P_DEVICE\n")) + return ret; + + iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt); + + /* Bind the P2P_DEVICE to the current PHY Context */ + mvmvif->deflink.phy_ctxt = new_phy_ctxt; + + ret = iwl_mvm_binding_add_vif(mvm, vif); + WARN(ret, "Failed binding P2P_DEVICE\n"); + return ret; +} + static int iwl_mvm_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_channel *channel, int duration, enum ieee80211_roc_type type) { + static const struct iwl_mvm_roc_ops ops = { + .add_aux_sta_for_hs20 = iwl_mvm_add_aux_sta_for_hs20, + .switch_phy_ctxt = iwl_mvm_roc_switch_binding, + }; + + return iwl_mvm_roc_common(hw, vif, channel, duration, type, &ops); +} + +/* Execute the common part for MLD and non-MLD modes */ +int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_channel *channel, int duration, + enum ieee80211_roc_type type, + const struct iwl_mvm_roc_ops *ops) +{ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct cfg80211_chan_def chandef; struct iwl_mvm_phy_ctxt *phy_ctxt; bool band_change_removal; int ret, i; + u32 lmac_id; IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value, duration, type); @@ -3861,25 +4597,13 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw, switch (vif->type) { case NL80211_IFTYPE_STATION: - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) { - /* Use aux roc framework (HS20) */ - if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12) { - u32 lmac_id; - - lmac_id = iwl_mvm_get_lmac_id(mvm->fw, - channel->band); - ret = iwl_mvm_add_aux_sta(mvm, lmac_id); - if (WARN(ret, - "Failed to allocate aux station")) - goto out_unlock; - } + lmac_id = iwl_mvm_get_lmac_id(mvm, channel->band); + + /* Use aux roc framework (HS20) */ + ret = ops->add_aux_sta_for_hs20(mvm, lmac_id); + if (!ret) ret = iwl_mvm_send_aux_roc_cmd(mvm, channel, vif, duration); - goto out_unlock; - } - IWL_ERR(mvm, "hotspot not supported\n"); - ret = -EINVAL; goto out_unlock; case NL80211_IFTYPE_P2P_DEVICE: /* handle below */ @@ -3892,34 +4616,21 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw, for (i = 0; i < NUM_PHY_CTX; i++) { phy_ctxt = &mvm->phy_ctxts[i]; - if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt) + if (phy_ctxt->ref == 0 || mvmvif->deflink.phy_ctxt == phy_ctxt) continue; if (phy_ctxt->ref && channel == phy_ctxt->channel) { - /* - * Unbind the P2P_DEVICE from the current PHY context, - * and if the PHY context is not used remove it. - */ - ret = iwl_mvm_binding_remove_vif(mvm, vif); - if (WARN(ret, "Failed unbinding P2P_DEVICE\n")) - goto out_unlock; - - iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); - - /* Bind the P2P_DEVICE to the current PHY Context */ - mvmvif->phy_ctxt = phy_ctxt; - - ret = iwl_mvm_binding_add_vif(mvm, vif); - if (WARN(ret, "Failed binding P2P_DEVICE\n")) + ret = ops->switch_phy_ctxt(mvm, vif, phy_ctxt); + if (ret) goto out_unlock; - iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); + iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt); goto schedule_time_event; } } /* Need to update the PHY context only if the ROC channel changed */ - if (channel == mvmvif->phy_ctxt->channel) + if (channel == mvmvif->deflink.phy_ctxt->channel) goto schedule_time_event; cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT); @@ -3933,14 +4644,14 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw, band_change_removal = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) && - mvmvif->phy_ctxt->channel->band != chandef.chan->band; + mvmvif->deflink.phy_ctxt->channel->band != chandef.chan->band; - if (mvmvif->phy_ctxt->ref == 1 && !band_change_removal) { + if (mvmvif->deflink.phy_ctxt->ref == 1 && !band_change_removal) { /* * Change the PHY context configuration as it is currently * referenced only by the P2P Device MAC (and we can modify it) */ - ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt, + ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->deflink.phy_ctxt, &chandef, 1, 1); if (ret) goto out_unlock; @@ -3963,21 +4674,11 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw, goto out_unlock; } - /* Unbind the P2P_DEVICE from the current PHY context */ - ret = iwl_mvm_binding_remove_vif(mvm, vif); - if (WARN(ret, "Failed unbinding P2P_DEVICE\n")) - goto out_unlock; - - iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); - - /* Bind the P2P_DEVICE to the new allocated PHY context */ - mvmvif->phy_ctxt = phy_ctxt; - - ret = iwl_mvm_binding_add_vif(mvm, vif); - if (WARN(ret, "Failed binding P2P_DEVICE\n")) + ret = ops->switch_phy_ctxt(mvm, vif, phy_ctxt); + if (ret) goto out_unlock; - iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); + iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt); } schedule_time_event: @@ -3990,8 +4691,8 @@ out_unlock: return ret; } -static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) +int iwl_mvm_cancel_roc(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); @@ -4015,7 +4716,7 @@ static void iwl_mvm_ftm_responder_chanctx_iter(void *_data, u8 *mac, { struct iwl_mvm_ftm_responder_iter_data *data = _data; - if (rcu_access_pointer(vif->chanctx_conf) == data->ctx && + if (rcu_access_pointer(vif->bss_conf.chanctx_conf) == data->ctx && vif->type == NL80211_IFTYPE_AP && vif->bss_conf.ftmr_params) data->responder = true; } @@ -4068,8 +4769,8 @@ out: return ret; } -static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw, - struct ieee80211_chanctx_conf *ctx) +int iwl_mvm_add_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; @@ -4092,8 +4793,8 @@ static void __iwl_mvm_remove_chanctx(struct iwl_mvm *mvm, iwl_mvm_phy_ctxt_unref(mvm, phy_ctxt); } -static void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw, - struct ieee80211_chanctx_conf *ctx) +void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); @@ -4102,9 +4803,8 @@ static void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw, mutex_unlock(&mvm->mutex); } -static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw, - struct ieee80211_chanctx_conf *ctx, - u32 changed) +void iwl_mvm_change_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx, u32 changed) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; @@ -4143,19 +4843,25 @@ out_unlock: mutex_unlock(&mvm->mutex); } -static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct ieee80211_chanctx_conf *ctx, - bool switching_chanctx) +/* + * This function executes the common part for MLD and non-MLD modes. + * + * Returns true if we're done assigning the chanctx + * (either on failure or success) + */ +static bool +__iwl_mvm_assign_vif_chanctx_common(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_chanctx_conf *ctx, + bool switching_chanctx, int *ret) { u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - int ret; lockdep_assert_held(&mvm->mutex); - mvmvif->phy_ctxt = phy_ctxt; + mvmvif->deflink.phy_ctxt = phy_ctxt; switch (vif->type) { case NL80211_IFTYPE_AP: @@ -4170,19 +4876,36 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, * The AP binding flow is handled as part of the start_ap flow * (in bss_info_changed), similarly for IBSS. */ - ret = 0; - goto out; + *ret = 0; + return true; case NL80211_IFTYPE_STATION: - mvmvif->csa_bcn_pending = false; break; case NL80211_IFTYPE_MONITOR: /* always disable PS when a monitor interface is active */ mvmvif->ps_disabled = true; break; default: - ret = -EINVAL; - goto out; + *ret = -EINVAL; + return true; } + return false; +} + +static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx_conf *ctx, + bool switching_chanctx) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ret; + + if (WARN_ON(!link_conf)) + return -EINVAL; + + if (__iwl_mvm_assign_vif_chanctx_common(mvm, vif, ctx, + switching_chanctx, &ret)) + goto out; ret = iwl_mvm_binding_add_vif(mvm, vif); if (ret) @@ -4216,7 +4939,12 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); } - if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) { + if (vif->type == NL80211_IFTYPE_STATION) { + if (!switching_chanctx) { + mvmvif->csa_bcn_pending = false; + goto out; + } + mvmvif->csa_bcn_pending = true; if (!fw_has_capa(&mvm->fw->ucode_capa, @@ -4241,46 +4969,52 @@ out_remove_binding: iwl_mvm_power_update_mac(mvm); out: if (ret) - mvmvif->phy_ctxt = NULL; + mvmvif->deflink.phy_ctxt = NULL; return ret; } + static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, struct ieee80211_chanctx_conf *ctx) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; mutex_lock(&mvm->mutex); - ret = __iwl_mvm_assign_vif_chanctx(mvm, vif, ctx, false); + ret = __iwl_mvm_assign_vif_chanctx(mvm, vif, link_conf, ctx, false); mutex_unlock(&mvm->mutex); return ret; } -static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct ieee80211_chanctx_conf *ctx, - bool switching_chanctx) +/* + * This function executes the common part for MLD and non-MLD modes. + * + * Returns if chanctx unassign chanctx is done + * (either on failure or success) + */ +static bool __iwl_mvm_unassign_vif_chanctx_common(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + bool switching_chanctx) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct ieee80211_vif *disabled_vif = NULL; lockdep_assert_held(&mvm->mutex); - iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data); + iwl_mvm_remove_time_event(mvm, mvmvif, + &mvmvif->time_event_data); switch (vif->type) { case NL80211_IFTYPE_ADHOC: - goto out; + return true; case NL80211_IFTYPE_MONITOR: mvmvif->monitor_active = false; mvmvif->ps_disabled = false; - iwl_mvm_rm_snif_sta(mvm, vif); break; case NL80211_IFTYPE_AP: /* This part is triggered only during CSA */ if (!switching_chanctx || !mvmvif->ap_ibss_active) - goto out; + return true; mvmvif->csa_countdown = false; @@ -4292,18 +5026,33 @@ static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm, mvmvif->ap_ibss_active = false; break; - case NL80211_IFTYPE_STATION: - if (!switching_chanctx) - break; + default: + break; + } + return false; +} - disabled_vif = vif; +static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx_conf *ctx, + bool switching_chanctx) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct ieee80211_vif *disabled_vif = NULL; + + if (__iwl_mvm_unassign_vif_chanctx_common(mvm, vif, switching_chanctx)) + goto out; + + if (vif->type == NL80211_IFTYPE_MONITOR) + iwl_mvm_rm_snif_sta(mvm, vif); + + if (vif->type == NL80211_IFTYPE_STATION && switching_chanctx) { + disabled_vif = vif; if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL); - break; - default: - break; } iwl_mvm_update_quotas(mvm, false, disabled_vif); @@ -4313,29 +5062,32 @@ out: if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD) && switching_chanctx) return; - mvmvif->phy_ctxt = NULL; + mvmvif->deflink.phy_ctxt = NULL; iwl_mvm_power_update_mac(mvm); } static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, struct ieee80211_chanctx_conf *ctx) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); mutex_lock(&mvm->mutex); - __iwl_mvm_unassign_vif_chanctx(mvm, vif, ctx, false); + __iwl_mvm_unassign_vif_chanctx(mvm, vif, link_conf, ctx, false); mutex_unlock(&mvm->mutex); } static int iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm, - struct ieee80211_vif_chanctx_switch *vifs) + struct ieee80211_vif_chanctx_switch *vifs, + const struct iwl_mvm_switch_vif_chanctx_ops *ops) { int ret; mutex_lock(&mvm->mutex); - __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true); + ops->__unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].link_conf, + vifs[0].old_ctx, true); __iwl_mvm_remove_chanctx(mvm, vifs[0].old_ctx); ret = __iwl_mvm_add_chanctx(mvm, vifs[0].new_ctx); @@ -4344,8 +5096,8 @@ iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm, goto out_reassign; } - ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx, - true); + ret = ops->__assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].link_conf, + vifs[0].new_ctx, true); if (ret) { IWL_ERR(mvm, "failed to assign new_ctx during channel switch\n"); @@ -4367,8 +5119,8 @@ out_reassign: goto out_restart; } - if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, - true)) { + if (ops->__assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].link_conf, + vifs[0].old_ctx, true)) { IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n"); goto out_restart; } @@ -4387,15 +5139,17 @@ out: static int iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm, - struct ieee80211_vif_chanctx_switch *vifs) + struct ieee80211_vif_chanctx_switch *vifs, + const struct iwl_mvm_switch_vif_chanctx_ops *ops) { int ret; mutex_lock(&mvm->mutex); - __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true); + ops->__unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].link_conf, + vifs[0].old_ctx, true); - ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx, - true); + ret = ops->__assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].link_conf, + vifs[0].new_ctx, true); if (ret) { IWL_ERR(mvm, "failed to assign new_ctx during channel switch\n"); @@ -4405,8 +5159,8 @@ iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm, goto out; out_reassign: - if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, - true)) { + if (ops->__assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].link_conf, + vifs[0].old_ctx, true)) { IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n"); goto out_restart; } @@ -4423,10 +5177,13 @@ out: return ret; } -static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw, - struct ieee80211_vif_chanctx_switch *vifs, - int n_vifs, - enum ieee80211_chanctx_switch_mode mode) +/* Execute the common part for both MLD and non-MLD modes */ +int +iwl_mvm_switch_vif_chanctx_common(struct ieee80211_hw *hw, + struct ieee80211_vif_chanctx_switch *vifs, + int n_vifs, + enum ieee80211_chanctx_switch_mode mode, + const struct iwl_mvm_switch_vif_chanctx_ops *ops) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; @@ -4437,10 +5194,10 @@ static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw, switch (mode) { case CHANCTX_SWMODE_SWAP_CONTEXTS: - ret = iwl_mvm_switch_vif_chanctx_swap(mvm, vifs); + ret = iwl_mvm_switch_vif_chanctx_swap(mvm, vifs, ops); break; case CHANCTX_SWMODE_REASSIGN_VIF: - ret = iwl_mvm_switch_vif_chanctx_reassign(mvm, vifs); + ret = iwl_mvm_switch_vif_chanctx_reassign(mvm, vifs, ops); break; default: ret = -EOPNOTSUPP; @@ -4450,16 +5207,28 @@ static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw, return ret; } -static int iwl_mvm_tx_last_beacon(struct ieee80211_hw *hw) +static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif_chanctx_switch *vifs, + int n_vifs, + enum ieee80211_chanctx_switch_mode mode) +{ + static const struct iwl_mvm_switch_vif_chanctx_ops ops = { + .__assign_vif_chanctx = __iwl_mvm_assign_vif_chanctx, + .__unassign_vif_chanctx = __iwl_mvm_unassign_vif_chanctx, + }; + + return iwl_mvm_switch_vif_chanctx_common(hw, vifs, n_vifs, mode, &ops); +} + +int iwl_mvm_tx_last_beacon(struct ieee80211_hw *hw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); return mvm->ibss_manager; } -static int iwl_mvm_set_tim(struct ieee80211_hw *hw, - struct ieee80211_sta *sta, - bool set) +int iwl_mvm_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, + bool set) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); @@ -4469,7 +5238,8 @@ static int iwl_mvm_set_tim(struct ieee80211_hw *hw, return -EINVAL; } - return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif); + return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif, + &mvm_sta->vif->bss_conf); } #ifdef CONFIG_NL80211_TESTMODE @@ -4513,7 +5283,7 @@ static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm, case IWL_MVM_TM_CMD_SET_BEACON_FILTER: /* must be associated client vif - ignore authorized */ if (!vif || vif->type != NL80211_IFTYPE_STATION || - !vif->bss_conf.assoc || !vif->bss_conf.dtim_period || + !vif->cfg.assoc || !vif->bss_conf.dtim_period || !tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]) return -EINVAL; @@ -4525,9 +5295,9 @@ static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm, return -EOPNOTSUPP; } -static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - void *data, int len) +int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + void *data, int len) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int err; @@ -4540,9 +5310,8 @@ static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw, } #endif -static void iwl_mvm_channel_switch(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_channel_switch *chsw) +void iwl_mvm_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_channel_switch *chsw) { /* By implementing this operation, we prevent mac80211 from * starting its own channel switch timer, so that we can call @@ -4617,9 +5386,9 @@ static int iwl_mvm_old_pre_chan_sw_sta(struct iwl_mvm *mvm, } #define IWL_MAX_CSA_BLOCK_TX 1500 -static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_channel_switch *chsw) +int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_channel_switch *chsw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct ieee80211_vif *csa_vif; @@ -4642,7 +5411,7 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, csa_vif = rcu_dereference_protected(mvm->csa_vif, lockdep_is_held(&mvm->mutex)); - if (WARN_ONCE(csa_vif && csa_vif->csa_active, + if (WARN_ONCE(csa_vif && csa_vif->bss_conf.csa_active, "Another CSA is already in progress")) { ret = -EBUSY; goto out_unlock; @@ -4681,7 +5450,7 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, * we don't know the dtim period. In this case, the firmware can't * track the beacons. */ - if (!vif->bss_conf.assoc || !vif->bss_conf.dtim_period) { + if (!vif->cfg.assoc || !vif->bss_conf.dtim_period) { ret = -EBUSY; goto out_unlock; } @@ -4732,9 +5501,9 @@ out_unlock: return ret; } -static void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_channel_switch *chsw) +void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_channel_switch *chsw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); @@ -4819,13 +5588,14 @@ static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop) mutex_unlock(&mvm->mutex); } -static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, u32 queues, bool drop) +void iwl_mvm_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u32 queues, bool drop) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif; struct iwl_mvm_sta *mvmsta; struct ieee80211_sta *sta; + bool ap_sta_done = false; int i; u32 msk = 0; @@ -4854,16 +5624,23 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, if (mvmsta->vif != vif) continue; + if (sta == mvmvif->ap_sta) { + if (ap_sta_done) + continue; + ap_sta_done = true; + } + /* make sure only TDLS peers or the AP are flushed */ - WARN_ON(i != mvmvif->ap_sta_id && !sta->tdls); + WARN_ON_ONCE(sta != mvmvif->ap_sta && !sta->tdls); if (drop) { if (iwl_mvm_flush_sta(mvm, mvmsta, false)) IWL_ERR(mvm, "flush request fail\n"); } else { - msk |= mvmsta->tfd_queue_msk; if (iwl_mvm_has_new_tx_api(mvm)) iwl_mvm_wait_sta_queues_empty(mvm, mvmsta); + else /* only used for !iwl_mvm_has_new_tx_api() below */ + msk |= mvmsta->tfd_queue_msk; } } @@ -4876,8 +5653,32 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, iwl_trans_wait_tx_queues_empty(mvm->trans, msk); } -static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx, - struct survey_info *survey) +void iwl_mvm_mac_flush_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + int i; + + mutex_lock(&mvm->mutex); + for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { + struct iwl_mvm_sta *mvmsta; + struct ieee80211_sta *tmp; + + tmp = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], + lockdep_is_held(&mvm->mutex)); + if (tmp != sta) + continue; + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + + if (iwl_mvm_flush_sta(mvm, mvmsta, false)) + IWL_ERR(mvm, "flush request fail\n"); + } + mutex_unlock(&mvm->mutex); +} + +int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx, + struct survey_info *survey) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; @@ -4929,6 +5730,7 @@ static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx, static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo) { u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK; + u32 gi_ltf; switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { case RATE_MCS_CHAN_WIDTH_20: @@ -4943,6 +5745,9 @@ static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo) case RATE_MCS_CHAN_WIDTH_160: rinfo->bw = RATE_INFO_BW_160; break; + case RATE_MCS_CHAN_WIDTH_320: + rinfo->bw = RATE_INFO_BW_320; + break; } if (format == RATE_MCS_CCK_MSK || @@ -4999,9 +5804,16 @@ static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo) RATE_HT_MCS_INDEX(rate_n_flags) : u32_get_bits(rate_n_flags, RATE_MCS_CODE_MSK); - if (format == RATE_MCS_HE_MSK) { - u32 gi_ltf = u32_get_bits(rate_n_flags, - RATE_MCS_HE_GI_LTF_MSK); + if (rate_n_flags & RATE_MCS_SGI_MSK) + rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; + + switch (format) { + case RATE_MCS_EHT_MSK: + /* TODO: GI/LTF/RU. How does the firmware encode them? */ + rinfo->flags |= RATE_INFO_FLAGS_EHT_MCS; + break; + case RATE_MCS_HE_MSK: + gi_ltf = u32_get_bits(rate_n_flags, RATE_MCS_HE_GI_LTF_MSK); rinfo->flags |= RATE_INFO_FLAGS_HE_MCS; @@ -5040,37 +5852,32 @@ static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo) if (rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK) rinfo->he_dcm = 1; - return; - } - - if (rate_n_flags & RATE_MCS_SGI_MSK) - rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; - - if (format == RATE_MCS_HT_MSK) { + break; + case RATE_MCS_HT_MSK: rinfo->flags |= RATE_INFO_FLAGS_MCS; - - } else if (format == RATE_MCS_VHT_MSK) { + break; + case RATE_MCS_VHT_MSK: rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS; + break; } - } -static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - struct station_info *sinfo) +void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct station_info *sinfo) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - if (mvmsta->avg_energy) { - sinfo->signal_avg = -(s8)mvmsta->avg_energy; + if (mvmsta->deflink.avg_energy) { + sinfo->signal_avg = -(s8)mvmsta->deflink.avg_energy; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); } if (iwl_mvm_has_tlc_offload(mvm)) { - struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw; + struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->deflink.lq_sta.rs_fw; iwl_mvm_set_sta_rate(lq_sta->last_rate_n_flags, &sinfo->txrate); sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); @@ -5080,23 +5887,24 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER)) return; - if (!vif->bss_conf.assoc) + if (!vif->cfg.assoc) return; mutex_lock(&mvm->mutex); - if (mvmvif->ap_sta_id != mvmsta->sta_id) + if (mvmvif->deflink.ap_sta_id != mvmsta->deflink.sta_id) goto unlock; if (iwl_mvm_request_statistics(mvm, false)) goto unlock; - sinfo->rx_beacon = mvmvif->beacon_stats.num_beacons + - mvmvif->beacon_stats.accu_num_beacons; + sinfo->rx_beacon = mvmvif->deflink.beacon_stats.num_beacons + + mvmvif->deflink.beacon_stats.accu_num_beacons; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX); - if (mvmvif->beacon_stats.avg_signal) { + if (mvmvif->deflink.beacon_stats.avg_signal) { /* firmware only reports a value after RXing a few beacons */ - sinfo->rx_beacon_signal_avg = mvmvif->beacon_stats.avg_signal; + sinfo->rx_beacon_signal_avg = + mvmvif->deflink.beacon_stats.avg_signal; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG); } unlock: @@ -5198,9 +6006,9 @@ static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm, event->u.ba.ssn); } -static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - const struct ieee80211_event *event) +void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + const struct ieee80211_event *event) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); @@ -5282,7 +6090,7 @@ out: } } -static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw) +void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); @@ -5291,7 +6099,7 @@ static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw) mutex_unlock(&mvm->mutex); } -static int +int iwl_mvm_mac_get_ftm_responder_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_ftm_responder_stats *stats) @@ -5320,9 +6128,8 @@ iwl_mvm_mac_get_ftm_responder_stats(struct ieee80211_hw *hw, return 0; } -static int iwl_mvm_start_pmsr(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct cfg80211_pmsr_request *request) +int iwl_mvm_start_pmsr(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct cfg80211_pmsr_request *request) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; @@ -5334,9 +6141,8 @@ static int iwl_mvm_start_pmsr(struct ieee80211_hw *hw, return ret; } -static void iwl_mvm_abort_pmsr(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct cfg80211_pmsr_request *request) +void iwl_mvm_abort_pmsr(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct cfg80211_pmsr_request *request) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); @@ -5361,10 +6167,6 @@ static bool iwl_mvm_mac_can_aggregate(struct ieee80211_hw *hw, { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) - return iwl_mvm_tx_csum_bz(mvm, head, true) == - iwl_mvm_tx_csum_bz(mvm, skb, true); - /* For now don't aggregate IPv6 in AMSDU */ if (skb->protocol != htons(ETH_P_IP)) return false; @@ -5375,6 +6177,29 @@ static bool iwl_mvm_mac_can_aggregate(struct ieee80211_hw *hw, return iwl_mvm_can_hw_csum(skb) == iwl_mvm_can_hw_csum(head); } +int iwl_mvm_set_hw_timestamp(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_set_hw_timestamp *hwts) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + u32 protocols = 0; + int ret; + + /* HW timestamping is only supported for a specific station */ + if (!hwts->macaddr) + return -EOPNOTSUPP; + + if (hwts->enable) + protocols = + IWL_TIME_SYNC_PROTOCOL_TM | IWL_TIME_SYNC_PROTOCOL_FTM; + + mutex_lock(&mvm->mutex); + ret = iwl_mvm_time_sync_config(mvm, hwts->macaddr, protocols); + mutex_unlock(&mvm->mutex); + + return ret; +} + const struct ieee80211_ops iwl_mvm_hw_ops = { .tx = iwl_mvm_mac_tx, .wake_tx_queue = iwl_mvm_mac_wake_tx_queue, @@ -5404,6 +6229,7 @@ const struct ieee80211_ops iwl_mvm_hw_ops = { .mgd_complete_tx = iwl_mvm_mac_mgd_complete_tx, .mgd_protect_tdls_discover = iwl_mvm_mac_mgd_protect_tdls_discover, .flush = iwl_mvm_mac_flush, + .flush_sta = iwl_mvm_mac_flush_sta, .sched_scan_start = iwl_mvm_mac_sched_scan_start, .sched_scan_stop = iwl_mvm_mac_sched_scan_stop, .set_key = iwl_mvm_mac_set_key, @@ -5417,10 +6243,10 @@ const struct ieee80211_ops iwl_mvm_hw_ops = { .unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx, .switch_vif_chanctx = iwl_mvm_switch_vif_chanctx, - .start_ap = iwl_mvm_start_ap_ibss, - .stop_ap = iwl_mvm_stop_ap_ibss, - .join_ibss = iwl_mvm_start_ap_ibss, - .leave_ibss = iwl_mvm_stop_ap_ibss, + .start_ap = iwl_mvm_start_ap, + .stop_ap = iwl_mvm_stop_ap, + .join_ibss = iwl_mvm_start_ibss, + .leave_ibss = iwl_mvm_stop_ibss, .tx_last_beacon = iwl_mvm_tx_last_beacon, @@ -5461,6 +6287,7 @@ const struct ieee80211_ops iwl_mvm_hw_ops = { .can_aggregate_in_amsdu = iwl_mvm_mac_can_aggregate, #ifdef CONFIG_IWLWIFI_DEBUGFS - .sta_add_debugfs = iwl_mvm_sta_add_debugfs, + .link_sta_add_debugfs = iwl_mvm_link_sta_add_debugfs, #endif + .set_hw_timestamp = iwl_mvm_set_hw_timestamp, }; diff --git a/mvm/mld-key.c b/mvm/mld-key.c new file mode 100644 index 000000000000..2c9f2f71b083 --- /dev/null +++ b/mvm/mld-key.c @@ -0,0 +1,376 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2022 - 2023 Intel Corporation + */ +#include <linux/kernel.h> +#include <net/mac80211.h> +#include "mvm.h" +#include "fw/api/context.h" +#include "fw/api/datapath.h" + +static u32 iwl_mvm_get_sec_sta_mask(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *keyconf) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_vif_link_info *link_info = &mvmvif->deflink; + + lockdep_assert_held(&mvm->mutex); + + if (keyconf->link_id >= 0) { + link_info = mvmvif->link[keyconf->link_id]; + if (!link_info) + return 0; + } + + /* AP group keys are per link and should be on the mcast STA */ + if (vif->type == NL80211_IFTYPE_AP && + !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) + return BIT(link_info->mcast_sta.sta_id); + + /* for client mode use the AP STA also for group keys */ + if (!sta && vif->type == NL80211_IFTYPE_STATION) + sta = mvmvif->ap_sta; + + /* During remove the STA was removed and the group keys come later + * (which sounds like a bad sequence, but remember that to mac80211 the + * group keys have no sta pointer), so we don't have a STA now. + * Since this happens for group keys only, just use the link_info as + * the group keys are per link; make sure that is the case by checking + * we do have a link_id or are not doing MLO. + * Of course the same can be done during add as well, but we must do + * it during remove, since we don't have the mvmvif->ap_sta pointer. + */ + if (!sta && (keyconf->link_id >= 0 || !ieee80211_vif_is_mld(vif))) + return BIT(link_info->ap_sta_id); + + /* STA should be non-NULL now, but iwl_mvm_sta_fw_id_mask() checks */ + + /* pass link_id to filter by it if not -1 (GTK on client) */ + return iwl_mvm_sta_fw_id_mask(mvm, sta, keyconf->link_id); +} + +u32 iwl_mvm_get_sec_flags(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *keyconf) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + u32 flags = 0; + + lockdep_assert_held(&mvm->mutex); + + if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) + flags |= IWL_SEC_KEY_FLAG_MCAST_KEY; + + switch (keyconf->cipher) { + case WLAN_CIPHER_SUITE_WEP104: + flags |= IWL_SEC_KEY_FLAG_KEY_SIZE; + fallthrough; + case WLAN_CIPHER_SUITE_WEP40: + flags |= IWL_SEC_KEY_FLAG_CIPHER_WEP; + break; + case WLAN_CIPHER_SUITE_TKIP: + flags |= IWL_SEC_KEY_FLAG_CIPHER_TKIP; + break; + case WLAN_CIPHER_SUITE_AES_CMAC: + case WLAN_CIPHER_SUITE_CCMP: + flags |= IWL_SEC_KEY_FLAG_CIPHER_CCMP; + break; + case WLAN_CIPHER_SUITE_GCMP_256: + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + flags |= IWL_SEC_KEY_FLAG_KEY_SIZE; + fallthrough; + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + flags |= IWL_SEC_KEY_FLAG_CIPHER_GCMP; + break; + } + + if (!sta && vif->type == NL80211_IFTYPE_STATION) + sta = mvmvif->ap_sta; + + if (!IS_ERR_OR_NULL(sta) && sta->mfp) + flags |= IWL_SEC_KEY_FLAG_MFP; + + return flags; +} + +struct iwl_mvm_sta_key_update_data { + struct ieee80211_sta *sta; + u32 old_sta_mask; + u32 new_sta_mask; + int err; +}; + +static void iwl_mvm_mld_update_sta_key(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *_data) +{ + u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, SEC_KEY_CMD); + struct iwl_mvm_sta_key_update_data *data = _data; + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_sec_key_cmd cmd = { + .action = cpu_to_le32(FW_CTXT_ACTION_MODIFY), + .u.modify.old_sta_mask = cpu_to_le32(data->old_sta_mask), + .u.modify.new_sta_mask = cpu_to_le32(data->new_sta_mask), + .u.modify.key_id = cpu_to_le32(key->keyidx), + .u.modify.key_flags = + cpu_to_le32(iwl_mvm_get_sec_flags(mvm, vif, sta, key)), + }; + int err; + + /* only need to do this for pairwise keys (link_id == -1) */ + if (sta != data->sta || key->link_id >= 0) + return; + + err = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC, sizeof(cmd), &cmd); + + if (err) + data->err = err; +} + +int iwl_mvm_mld_update_sta_keys(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + u32 old_sta_mask, + u32 new_sta_mask) +{ + struct iwl_mvm_sta_key_update_data data = { + .sta = sta, + .old_sta_mask = old_sta_mask, + .new_sta_mask = new_sta_mask, + }; + + ieee80211_iter_keys_rcu(mvm->hw, vif, iwl_mvm_mld_update_sta_key, + &data); + return data.err; +} + +static int __iwl_mvm_sec_key_del(struct iwl_mvm *mvm, u32 sta_mask, + u32 key_flags, u32 keyidx, u32 flags) +{ + u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, SEC_KEY_CMD); + struct iwl_sec_key_cmd cmd = { + .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE), + .u.remove.sta_mask = cpu_to_le32(sta_mask), + .u.remove.key_id = cpu_to_le32(keyidx), + .u.remove.key_flags = cpu_to_le32(key_flags), + }; + + return iwl_mvm_send_cmd_pdu(mvm, cmd_id, flags, sizeof(cmd), &cmd); +} + +int iwl_mvm_mld_send_key(struct iwl_mvm *mvm, u32 sta_mask, u32 key_flags, + struct ieee80211_key_conf *keyconf) +{ + u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, SEC_KEY_CMD); + struct iwl_sec_key_cmd cmd = { + .action = cpu_to_le32(FW_CTXT_ACTION_ADD), + .u.add.sta_mask = cpu_to_le32(sta_mask), + .u.add.key_id = cpu_to_le32(keyconf->keyidx), + .u.add.key_flags = cpu_to_le32(key_flags), + .u.add.tx_seq = cpu_to_le64(atomic64_read(&keyconf->tx_pn)), + }; + int max_key_len = sizeof(cmd.u.add.key); + int ret; + + if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || + keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) + max_key_len -= IWL_SEC_WEP_KEY_OFFSET; + + if (WARN_ON(keyconf->keylen > max_key_len)) + return -EINVAL; + + if (WARN_ON(!sta_mask)) + return -EINVAL; + + if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || + keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) + memcpy(cmd.u.add.key + IWL_SEC_WEP_KEY_OFFSET, keyconf->key, + keyconf->keylen); + else + memcpy(cmd.u.add.key, keyconf->key, keyconf->keylen); + + if (keyconf->cipher == WLAN_CIPHER_SUITE_TKIP) { + memcpy(cmd.u.add.tkip_mic_rx_key, + keyconf->key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY, + 8); + memcpy(cmd.u.add.tkip_mic_tx_key, + keyconf->key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, + 8); + } + + ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cmd), &cmd); + if (ret) + return ret; + + /* + * For WEP, the same key is used for multicast and unicast so need to + * upload it again. If this fails, remove the original as well. + */ + if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || + keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) { + cmd.u.add.key_flags ^= cpu_to_le32(IWL_SEC_KEY_FLAG_MCAST_KEY); + ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cmd), &cmd); + if (ret) + __iwl_mvm_sec_key_del(mvm, sta_mask, key_flags, + keyconf->keyidx, 0); + } + + return ret; +} + +int iwl_mvm_sec_key_add(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *keyconf) +{ + u32 sta_mask = iwl_mvm_get_sec_sta_mask(mvm, vif, sta, keyconf); + u32 key_flags = iwl_mvm_get_sec_flags(mvm, vif, sta, keyconf); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_vif_link_info *mvm_link = NULL; + int ret; + + if (keyconf->keyidx == 4 || keyconf->keyidx == 5) { + unsigned int link_id = 0; + + /* set to -1 for non-MLO right now */ + if (keyconf->link_id >= 0) + link_id = keyconf->link_id; + + mvm_link = mvmvif->link[link_id]; + if (WARN_ON(!mvm_link)) + return -EINVAL; + + if (mvm_link->igtk) { + IWL_DEBUG_MAC80211(mvm, "remove old IGTK %d\n", + mvm_link->igtk->keyidx); + ret = iwl_mvm_sec_key_del(mvm, vif, sta, + mvm_link->igtk); + if (ret) + IWL_ERR(mvm, + "failed to remove old IGTK (ret=%d)\n", + ret); + } + + WARN_ON(mvm_link->igtk); + } + + ret = iwl_mvm_mld_send_key(mvm, sta_mask, key_flags, keyconf); + if (ret) + return ret; + + if (mvm_link) + mvm_link->igtk = keyconf; + + /* We don't really need this, but need it to be not invalid, + * and if we switch links multiple times it might go to be + * invalid when removed. + */ + keyconf->hw_key_idx = 0; + + return 0; +} + +static int _iwl_mvm_sec_key_del(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *keyconf, + u32 flags) +{ + u32 sta_mask = iwl_mvm_get_sec_sta_mask(mvm, vif, sta, keyconf); + u32 key_flags = iwl_mvm_get_sec_flags(mvm, vif, sta, keyconf); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ret; + + if (WARN_ON(!sta_mask)) + return -EINVAL; + + if (keyconf->keyidx == 4 || keyconf->keyidx == 5) { + struct iwl_mvm_vif_link_info *mvm_link; + unsigned int link_id = 0; + + /* set to -1 for non-MLO right now */ + if (keyconf->link_id >= 0) + link_id = keyconf->link_id; + + mvm_link = mvmvif->link[link_id]; + if (WARN_ON(!mvm_link)) + return -EINVAL; + + if (mvm_link->igtk == keyconf) { + /* no longer in HW - mark for later */ + mvm_link->igtk->hw_key_idx = STA_KEY_IDX_INVALID; + mvm_link->igtk = NULL; + } + } + + ret = __iwl_mvm_sec_key_del(mvm, sta_mask, key_flags, keyconf->keyidx, + flags); + if (ret) + return ret; + + /* For WEP, delete the key again as unicast */ + if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || + keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) { + key_flags ^= IWL_SEC_KEY_FLAG_MCAST_KEY; + ret = __iwl_mvm_sec_key_del(mvm, sta_mask, key_flags, + keyconf->keyidx, flags); + } + + return ret; +} + +int iwl_mvm_sec_key_del(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *keyconf) +{ + return _iwl_mvm_sec_key_del(mvm, vif, sta, keyconf, 0); +} + +static void iwl_mvm_sec_key_remove_ap_iter(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *data) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + unsigned int link_id = (uintptr_t)data; + + if (key->hw_key_idx == STA_KEY_IDX_INVALID) + return; + + if (sta) + return; + + if (key->link_id >= 0 && key->link_id != link_id) + return; + + _iwl_mvm_sec_key_del(mvm, vif, NULL, key, CMD_ASYNC); + key->hw_key_idx = STA_KEY_IDX_INVALID; +} + +void iwl_mvm_sec_key_remove_ap(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_mvm_vif_link_info *link, + unsigned int link_id) +{ + u32 sec_key_id = WIDE_ID(DATA_PATH_GROUP, SEC_KEY_CMD); + u8 sec_key_ver = iwl_fw_lookup_cmd_ver(mvm->fw, sec_key_id, 0); + + if (WARN_ON_ONCE(vif->type != NL80211_IFTYPE_STATION || + link->ap_sta_id == IWL_MVM_INVALID_STA)) + return; + + if (!sec_key_ver) + return; + + ieee80211_iter_keys_rcu(mvm->hw, vif, + iwl_mvm_sec_key_remove_ap_iter, + (void *)(uintptr_t)link_id); +} diff --git a/mvm/mld-mac.c b/mvm/mld-mac.c new file mode 100644 index 000000000000..f313a8d771e4 --- /dev/null +++ b/mvm/mld-mac.c @@ -0,0 +1,321 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2022 - 2023 Intel Corporation + */ +#include "mvm.h" + +static void iwl_mvm_mld_set_he_support(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_mac_config_cmd *cmd) +{ + if (vif->type == NL80211_IFTYPE_AP) + cmd->he_ap_support = cpu_to_le16(1); + else + cmd->he_support = cpu_to_le16(1); +} + +static void iwl_mvm_mld_mac_ctxt_cmd_common(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_mac_config_cmd *cmd, + u32 action) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct ieee80211_bss_conf *link_conf; + unsigned int link_id; + + cmd->id_and_color = cpu_to_le32(mvmvif->id); + cmd->action = cpu_to_le32(action); + + cmd->mac_type = cpu_to_le32(iwl_mvm_get_mac_type(vif)); + + memcpy(cmd->local_mld_addr, vif->addr, ETH_ALEN); + + cmd->he_support = 0; + cmd->eht_support = 0; + + /* should be set by specific context type handler */ + cmd->filter_flags = 0; + + cmd->nic_not_ack_enabled = + cpu_to_le32(!iwl_mvm_is_nic_ack_enabled(mvm, vif)); + + if (iwlwifi_mod_params.disable_11ax) + return; + + /* If we have MLO enabled, then the firmware needs to enable + * address translation for the station(s) we add. That depends + * on having EHT enabled in firmware, which in turn depends on + * mac80211 in the code below. + * However, mac80211 doesn't enable HE/EHT until it has parsed + * the association response successfully, so just skip all that + * and enable both when we have MLO. + */ + if (ieee80211_vif_is_mld(vif)) { + iwl_mvm_mld_set_he_support(mvm, vif, cmd); + cmd->eht_support = cpu_to_le32(1); + return; + } + + rcu_read_lock(); + for (link_id = 0; link_id < ARRAY_SIZE((vif)->link_conf); link_id++) { + link_conf = rcu_dereference(vif->link_conf[link_id]); + if (!link_conf) + continue; + + if (link_conf->he_support) + iwl_mvm_mld_set_he_support(mvm, vif, cmd); + + /* it's not reasonable to have EHT without HE and FW API doesn't + * support it. Ignore EHT in this case. + */ + if (!link_conf->he_support && link_conf->eht_support) + continue; + + if (link_conf->eht_support) { + cmd->eht_support = cpu_to_le32(1); + break; + } + } + rcu_read_unlock(); +} + +static int iwl_mvm_mld_mac_ctxt_send_cmd(struct iwl_mvm *mvm, + struct iwl_mac_config_cmd *cmd) +{ + int ret = iwl_mvm_send_cmd_pdu(mvm, + WIDE_ID(MAC_CONF_GROUP, MAC_CONFIG_CMD), + 0, sizeof(*cmd), cmd); + if (ret) + IWL_ERR(mvm, "Failed to send MAC_CONFIG_CMD (action:%d): %d\n", + le32_to_cpu(cmd->action), ret); + return ret; +} + +static int iwl_mvm_mld_mac_ctxt_cmd_sta(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 action, bool force_assoc_off) +{ + struct iwl_mac_config_cmd cmd = {}; + u16 esr_transition_timeout; + + WARN_ON(vif->type != NL80211_IFTYPE_STATION); + + /* Fill the common data for all mac context types */ + iwl_mvm_mld_mac_ctxt_cmd_common(mvm, vif, &cmd, action); + + /* + * We always want to hear MCAST frames, if we're not authorized yet, + * we'll drop them. + */ + cmd.filter_flags |= cpu_to_le32(MAC_CFG_FILTER_ACCEPT_GRP); + + if (vif->p2p) + cmd.client.ctwin = + iwl_mvm_mac_ctxt_cmd_p2p_sta_get_oppps_ctwin(mvm, vif); + + if (vif->cfg.assoc && !force_assoc_off) { + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + cmd.client.is_assoc = 1; + + if (!mvmvif->authorized && + fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_COEX_HIGH_PRIO)) + cmd.client.data_policy |= + cpu_to_le16(COEX_HIGH_PRIORITY_ENABLE); + + } else { + cmd.client.is_assoc = 0; + + /* Allow beacons to pass through as long as we are not + * associated, or we do not have dtim period information. + */ + cmd.filter_flags |= cpu_to_le32(MAC_CFG_FILTER_ACCEPT_BEACON); + } + + cmd.client.assoc_id = cpu_to_le16(vif->cfg.aid); + if (ieee80211_vif_is_mld(vif)) { + esr_transition_timeout = + u16_get_bits(vif->cfg.eml_cap, + IEEE80211_EML_CAP_TRANSITION_TIMEOUT); + + cmd.client.esr_transition_timeout = + min_t(u16, IEEE80211_EML_CAP_TRANSITION_TIMEOUT_128TU, + esr_transition_timeout); + cmd.client.medium_sync_delay = + cpu_to_le16(vif->cfg.eml_med_sync_delay); + } + + if (vif->probe_req_reg && vif->cfg.assoc && vif->p2p) + cmd.filter_flags |= cpu_to_le32(MAC_CFG_FILTER_ACCEPT_PROBE_REQ); + + if (vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax) + cmd.client.data_policy |= + cpu_to_le16(iwl_mvm_mac_ctxt_cmd_sta_get_twt_policy(mvm, vif)); + + return iwl_mvm_mld_mac_ctxt_send_cmd(mvm, &cmd); +} + +static int iwl_mvm_mld_mac_ctxt_cmd_listener(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 action) +{ + struct iwl_mac_config_cmd cmd = {}; + + WARN_ON(vif->type != NL80211_IFTYPE_MONITOR); + + iwl_mvm_mld_mac_ctxt_cmd_common(mvm, vif, &cmd, action); + + cmd.filter_flags = cpu_to_le32(MAC_CFG_FILTER_PROMISC | + MAC_FILTER_IN_CONTROL_AND_MGMT | + MAC_CFG_FILTER_ACCEPT_BEACON | + MAC_CFG_FILTER_ACCEPT_PROBE_REQ | + MAC_CFG_FILTER_ACCEPT_GRP); + + return iwl_mvm_mld_mac_ctxt_send_cmd(mvm, &cmd); +} + +static int iwl_mvm_mld_mac_ctxt_cmd_ibss(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 action) +{ + struct iwl_mac_config_cmd cmd = {}; + + WARN_ON(vif->type != NL80211_IFTYPE_ADHOC); + + iwl_mvm_mld_mac_ctxt_cmd_common(mvm, vif, &cmd, action); + + cmd.filter_flags = cpu_to_le32(MAC_CFG_FILTER_ACCEPT_BEACON | + MAC_CFG_FILTER_ACCEPT_PROBE_REQ | + MAC_CFG_FILTER_ACCEPT_GRP); + + return iwl_mvm_mld_mac_ctxt_send_cmd(mvm, &cmd); +} + +static int iwl_mvm_mld_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 action) +{ + struct iwl_mac_config_cmd cmd = {}; + + WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE); + + iwl_mvm_mld_mac_ctxt_cmd_common(mvm, vif, &cmd, action); + + cmd.p2p_dev.is_disc_extended = + iwl_mac_ctxt_p2p_dev_has_extended_disc(mvm, vif); + + /* Override the filter flags to accept only probe requests */ + cmd.filter_flags = cpu_to_le32(MAC_CFG_FILTER_ACCEPT_PROBE_REQ); + + return iwl_mvm_mld_mac_ctxt_send_cmd(mvm, &cmd); +} + +static int iwl_mvm_mld_mac_ctxt_cmd_ap_go(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 action) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mac_config_cmd cmd = {}; + + WARN_ON(vif->type != NL80211_IFTYPE_AP); + + /* Fill the common data for all mac context types */ + iwl_mvm_mld_mac_ctxt_cmd_common(mvm, vif, &cmd, action); + + iwl_mvm_mac_ctxt_cmd_ap_set_filter_flags(mvm, mvmvif, + &cmd.filter_flags, + MAC_CFG_FILTER_ACCEPT_PROBE_REQ, + MAC_CFG_FILTER_ACCEPT_BEACON); + + return iwl_mvm_mld_mac_ctxt_send_cmd(mvm, &cmd); +} + +static int iwl_mvm_mld_mac_ctx_send(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 action, bool force_assoc_off) +{ + switch (vif->type) { + case NL80211_IFTYPE_STATION: + return iwl_mvm_mld_mac_ctxt_cmd_sta(mvm, vif, action, + force_assoc_off); + case NL80211_IFTYPE_AP: + return iwl_mvm_mld_mac_ctxt_cmd_ap_go(mvm, vif, action); + case NL80211_IFTYPE_MONITOR: + return iwl_mvm_mld_mac_ctxt_cmd_listener(mvm, vif, action); + case NL80211_IFTYPE_P2P_DEVICE: + return iwl_mvm_mld_mac_ctxt_cmd_p2p_device(mvm, vif, action); + case NL80211_IFTYPE_ADHOC: + return iwl_mvm_mld_mac_ctxt_cmd_ibss(mvm, vif, action); + default: + break; + } + + return -EOPNOTSUPP; +} + +int iwl_mvm_mld_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ret; + + if (WARN_ON_ONCE(vif->type == NL80211_IFTYPE_NAN)) + return -EOPNOTSUPP; + + if (WARN_ONCE(mvmvif->uploaded, "Adding active MAC %pM/%d\n", + vif->addr, ieee80211_vif_type_p2p(vif))) + return -EIO; + + ret = iwl_mvm_mld_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_ADD, + true); + if (ret) + return ret; + + /* will only do anything at resume from D3 time */ + iwl_mvm_set_last_nonqos_seq(mvm, vif); + + mvmvif->uploaded = true; + return 0; +} + +int iwl_mvm_mld_mac_ctxt_changed(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + bool force_assoc_off) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + if (WARN_ON_ONCE(vif->type == NL80211_IFTYPE_NAN)) + return -EOPNOTSUPP; + + if (WARN_ONCE(!mvmvif->uploaded, "Changing inactive MAC %pM/%d\n", + vif->addr, ieee80211_vif_type_p2p(vif))) + return -EIO; + + return iwl_mvm_mld_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_MODIFY, + force_assoc_off); +} + +int iwl_mvm_mld_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mac_config_cmd cmd = { + .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE), + .id_and_color = cpu_to_le32(mvmvif->id), + }; + int ret; + + if (WARN_ON_ONCE(vif->type == NL80211_IFTYPE_NAN)) + return -EOPNOTSUPP; + + if (WARN_ONCE(!mvmvif->uploaded, "Removing inactive MAC %pM/%d\n", + vif->addr, ieee80211_vif_type_p2p(vif))) + return -EIO; + + ret = iwl_mvm_mld_mac_ctxt_send_cmd(mvm, &cmd); + if (ret) + return ret; + + mvmvif->uploaded = false; + + return 0; +} diff --git a/mvm/mld-mac80211.c b/mvm/mld-mac80211.c new file mode 100644 index 000000000000..8b6c641772ee --- /dev/null +++ b/mvm/mld-mac80211.c @@ -0,0 +1,1215 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2022-2023 Intel Corporation + */ +#include "mvm.h" + +static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ret; + + mutex_lock(&mvm->mutex); + + mvmvif->mvm = mvm; + + /* Not much to do here. The stack will not allow interface + * types or combinations that we didn't advertise, so we + * don't really have to check the types. + */ + + /* make sure that beacon statistics don't go backwards with FW reset */ + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + mvmvif->deflink.beacon_stats.accu_num_beacons += + mvmvif->deflink.beacon_stats.num_beacons; + + /* Allocate resources for the MAC context, and add it to the fw */ + ret = iwl_mvm_mac_ctxt_init(mvm, vif); + if (ret) + goto out_unlock; + + rcu_assign_pointer(mvm->vif_id_to_mac[mvmvif->id], vif); + + mvmvif->features |= hw->netdev_features; + + /* reset deflink MLO parameters */ + mvmvif->deflink.fw_link_id = IWL_MVM_FW_LINK_ID_INVALID; + mvmvif->deflink.active = 0; + /* the first link always points to the default one */ + mvmvif->link[0] = &mvmvif->deflink; + + ret = iwl_mvm_mld_mac_ctxt_add(mvm, vif); + if (ret) + goto out_unlock; + + /* beacon filtering */ + ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); + if (ret) + goto out_remove_mac; + + if (!mvm->bf_allowed_vif && + vif->type == NL80211_IFTYPE_STATION && !vif->p2p) { + mvm->bf_allowed_vif = mvmvif; + vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER | + IEEE80211_VIF_SUPPORTS_CQM_RSSI; + } + + /* + * P2P_DEVICE interface does not have a channel context assigned to it, + * so a dedicated PHY context is allocated to it and the corresponding + * MAC context is bound to it at this stage. + */ + if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { + mvmvif->deflink.phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); + if (!mvmvif->deflink.phy_ctxt) { + ret = -ENOSPC; + goto out_free_bf; + } + + iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt); + ret = iwl_mvm_add_link(mvm, vif, &vif->bss_conf); + if (ret) + goto out_unref_phy; + + ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf, + LINK_CONTEXT_MODIFY_ACTIVE | + LINK_CONTEXT_MODIFY_RATES_INFO, + true); + if (ret) + goto out_remove_link; + + ret = iwl_mvm_mld_add_bcast_sta(mvm, vif, &vif->bss_conf); + if (ret) + goto out_remove_link; + + /* Save a pointer to p2p device vif, so it can later be used to + * update the p2p device MAC when a GO is started/stopped + */ + mvm->p2p_device_vif = vif; + } else { + ret = iwl_mvm_add_link(mvm, vif, &vif->bss_conf); + if (ret) + goto out_free_bf; + } + + ret = iwl_mvm_power_update_mac(mvm); + if (ret) + goto out_free_bf; + + iwl_mvm_tcm_add_vif(mvm, vif); + INIT_DELAYED_WORK(&mvmvif->csa_work, + iwl_mvm_channel_switch_disconnect_wk); + + if (vif->type == NL80211_IFTYPE_MONITOR) { + mvm->monitor_on = true; + ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS); + } + + iwl_mvm_vif_dbgfs_register(mvm, vif); + + if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && + vif->type == NL80211_IFTYPE_STATION && !vif->p2p && + !mvm->csme_vif && mvm->mei_registered) { + iwl_mei_set_nic_info(vif->addr, mvm->nvm_data->hw_addr); + iwl_mei_set_netdev(ieee80211_vif_to_wdev(vif)->netdev); + mvm->csme_vif = vif; + } + + goto out_unlock; + + out_remove_link: + iwl_mvm_disable_link(mvm, vif, &vif->bss_conf); + out_unref_phy: + iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt); + out_free_bf: + if (mvm->bf_allowed_vif == mvmvif) { + mvm->bf_allowed_vif = NULL; + vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER | + IEEE80211_VIF_SUPPORTS_CQM_RSSI); + } + out_remove_mac: + mvmvif->deflink.phy_ctxt = NULL; + mvmvif->link[0] = NULL; + iwl_mvm_mld_mac_ctxt_remove(mvm, vif); + out_unlock: + mutex_unlock(&mvm->mutex); + + return ret; +} + +static void iwl_mvm_mld_mac_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_probe_resp_data *probe_data; + + iwl_mvm_prepare_mac_removal(mvm, vif); + + if (!(vif->type == NL80211_IFTYPE_AP || + vif->type == NL80211_IFTYPE_ADHOC)) + iwl_mvm_tcm_rm_vif(mvm, vif); + + mutex_lock(&mvm->mutex); + + if (vif == mvm->csme_vif) { + iwl_mei_set_netdev(NULL); + mvm->csme_vif = NULL; + } + + if (mvm->bf_allowed_vif == mvmvif) { + mvm->bf_allowed_vif = NULL; + vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER | + IEEE80211_VIF_SUPPORTS_CQM_RSSI); + } + + if (vif->bss_conf.ftm_responder) + memset(&mvm->ftm_resp_stats, 0, sizeof(mvm->ftm_resp_stats)); + + iwl_mvm_vif_dbgfs_clean(mvm, vif); + + /* For AP/GO interface, the tear down of the resources allocated to the + * interface is be handled as part of the stop_ap flow. + */ + if (vif->type == NL80211_IFTYPE_AP || + vif->type == NL80211_IFTYPE_ADHOC) { +#ifdef CONFIG_NL80211_TESTMODE + if (vif == mvm->noa_vif) { + mvm->noa_vif = NULL; + mvm->noa_duration = 0; + } +#endif + } + + iwl_mvm_power_update_mac(mvm); + + if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { + mvm->p2p_device_vif = NULL; + + /* P2P device uses only one link */ + iwl_mvm_mld_rm_bcast_sta(mvm, vif, &vif->bss_conf); + iwl_mvm_disable_link(mvm, vif, &vif->bss_conf); + iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt); + mvmvif->deflink.phy_ctxt = NULL; + } else { + iwl_mvm_disable_link(mvm, vif, &vif->bss_conf); + } + + iwl_mvm_mld_mac_ctxt_remove(mvm, vif); + + RCU_INIT_POINTER(mvm->vif_id_to_mac[mvmvif->id], NULL); + + probe_data = rcu_dereference_protected(mvmvif->deflink.probe_resp_data, + lockdep_is_held(&mvm->mutex)); + RCU_INIT_POINTER(mvmvif->deflink.probe_resp_data, NULL); + if (probe_data) + kfree_rcu(probe_data, rcu_head); + + if (vif->type == NL80211_IFTYPE_MONITOR) { + mvm->monitor_on = false; + __clear_bit(IEEE80211_HW_RX_INCLUDES_FCS, mvm->hw->flags); + } + + mutex_unlock(&mvm->mutex); +} + +static unsigned int iwl_mvm_mld_count_active_links(struct ieee80211_vif *vif) +{ + unsigned int n_active = 0; + int i; + + for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) { + struct ieee80211_bss_conf *link_conf; + + link_conf = link_conf_dereference_protected(vif, i); + if (link_conf && + rcu_access_pointer(link_conf->chanctx_conf)) + n_active++; + } + + return n_active; +} + +static int iwl_mvm_esr_mode_active(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int link_id, ret = 0; + + mvmvif->esr_active = true; + + /* Disable SMPS overrideing by user */ + vif->driver_flags |= IEEE80211_VIF_DISABLE_SMPS_OVERRIDE; + + iwl_mvm_update_smps_on_active_links(mvm, vif, IWL_MVM_SMPS_REQ_FW, + IEEE80211_SMPS_OFF); + + for_each_mvm_vif_valid_link(mvmvif, link_id) { + struct iwl_mvm_vif_link_info *link = mvmvif->link[link_id]; + + if (!link->phy_ctxt) + continue; + + ret = iwl_mvm_phy_send_rlc(mvm, link->phy_ctxt, 2, 2); + if (ret) + break; + + link->phy_ctxt->rlc_disabled = true; + } + + return ret; +} + +static int +__iwl_mvm_mld_assign_vif_chanctx(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx_conf *ctx, + bool switching_chanctx) +{ + u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; + struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; + unsigned int n_active = iwl_mvm_mld_count_active_links(vif); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + unsigned int link_id = link_conf->link_id; + int ret; + + /* if the assigned one was not counted yet, count it now */ + if (!rcu_access_pointer(link_conf->chanctx_conf)) + n_active++; + + if (n_active > iwl_mvm_max_active_links(mvm, vif)) + return -EOPNOTSUPP; + + if (WARN_ON_ONCE(!mvmvif->link[link_id])) + return -EINVAL; + + /* mac parameters such as HE support can change at this stage + * For sta, need first to configure correct state from drv_sta_state + * and only after that update mac config. + */ + if (vif->type == NL80211_IFTYPE_AP) { + ret = iwl_mvm_mld_mac_ctxt_changed(mvm, vif, false); + if (ret) { + IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); + return -EINVAL; + } + } + + if (iwl_mvm_is_esr_supported(mvm->fwrt.trans) && n_active > 1) { + mvmvif->link[link_id]->listen_lmac = true; + ret = iwl_mvm_esr_mode_active(mvm, vif); + if (ret) { + IWL_ERR(mvm, "failed to activate ESR mode (%d)\n", ret); + return ret; + } + } + + mvmvif->link[link_id]->phy_ctxt = phy_ctxt; + + if (switching_chanctx) { + /* reactivate if we turned this off during channel switch */ + if (vif->type == NL80211_IFTYPE_AP) + mvmvif->ap_ibss_active = true; + } + + /* send it first with phy context ID */ + ret = iwl_mvm_link_changed(mvm, vif, link_conf, 0, false); + if (ret) + goto out; + + /* Initialize rate control for the AP station, since we might be + * doing a link switch here - we cannot initialize it before since + * this needs the phy context assigned (and in FW?), and we cannot + * do it later because it needs to be initialized as soon as we're + * able to TX on the link, i.e. when active. + * + * Firmware restart isn't quite correct yet for MLO, but we don't + * need to do it in that case anyway since it will happen from the + * normal station state callback. + */ + if (mvmvif->ap_sta && + !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { + struct ieee80211_link_sta *link_sta; + + rcu_read_lock(); + link_sta = rcu_dereference(mvmvif->ap_sta->link[link_id]); + + if (!WARN_ON_ONCE(!link_sta)) + iwl_mvm_rs_rate_init(mvm, vif, mvmvif->ap_sta, + link_conf, link_sta, + phy_ctxt->channel->band); + rcu_read_unlock(); + } + + /* then activate */ + ret = iwl_mvm_link_changed(mvm, vif, link_conf, + LINK_CONTEXT_MODIFY_ACTIVE | + LINK_CONTEXT_MODIFY_RATES_INFO, + true); + if (ret) + goto out; + + /* + * Power state must be updated before quotas, + * otherwise fw will complain. + */ + iwl_mvm_power_update_mac(mvm); + + if (vif->type == NL80211_IFTYPE_MONITOR) { + ret = iwl_mvm_mld_add_snif_sta(mvm, vif, link_conf); + if (ret) + goto deactivate; + } + + return 0; + +deactivate: + iwl_mvm_link_changed(mvm, vif, link_conf, LINK_CONTEXT_MODIFY_ACTIVE, + false); +out: + mvmvif->link[link_id]->phy_ctxt = NULL; + iwl_mvm_power_update_mac(mvm); + return ret; +} + +static int iwl_mvm_mld_assign_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx_conf *ctx) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + int ret; + + mutex_lock(&mvm->mutex); + ret = __iwl_mvm_mld_assign_vif_chanctx(mvm, vif, link_conf, ctx, false); + mutex_unlock(&mvm->mutex); + + return ret; +} + +static int iwl_mvm_esr_mode_inactive(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct ieee80211_bss_conf *link_conf; + int link_id, ret = 0; + + mvmvif->esr_active = false; + + vif->driver_flags &= ~IEEE80211_VIF_DISABLE_SMPS_OVERRIDE; + + iwl_mvm_update_smps_on_active_links(mvm, vif, IWL_MVM_SMPS_REQ_FW, + IEEE80211_SMPS_AUTOMATIC); + + for_each_vif_active_link(vif, link_conf, link_id) { + struct ieee80211_chanctx_conf *chanctx_conf; + struct iwl_mvm_phy_ctxt *phy_ctxt; + u8 static_chains, dynamic_chains; + + mvmvif->link[link_id]->listen_lmac = false; + + rcu_read_lock(); + + chanctx_conf = rcu_dereference(link_conf->chanctx_conf); + phy_ctxt = mvmvif->link[link_id]->phy_ctxt; + + if (!chanctx_conf || !phy_ctxt) { + rcu_read_unlock(); + continue; + } + + phy_ctxt->rlc_disabled = false; + static_chains = chanctx_conf->rx_chains_static; + dynamic_chains = chanctx_conf->rx_chains_dynamic; + + rcu_read_unlock(); + + ret = iwl_mvm_phy_send_rlc(mvm, phy_ctxt, static_chains, + dynamic_chains); + if (ret) + break; + } + + return ret; +} + +static void +__iwl_mvm_mld_unassign_vif_chanctx(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx_conf *ctx, + bool switching_chanctx) + +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + unsigned int n_active = iwl_mvm_mld_count_active_links(vif); + unsigned int link_id = link_conf->link_id; + + /* shouldn't happen, but verify link_id is valid before accessing */ + if (WARN_ON_ONCE(!mvmvif->link[link_id])) + return; + + if (vif->type == NL80211_IFTYPE_AP && switching_chanctx) { + mvmvif->csa_countdown = false; + + /* Set CS bit on all the stations */ + iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, true); + + /* Save blocked iface, the timeout is set on the next beacon */ + rcu_assign_pointer(mvm->csa_tx_blocked_vif, vif); + + mvmvif->ap_ibss_active = false; + } + + if (iwl_mvm_is_esr_supported(mvm->fwrt.trans) && n_active > 1) { + int ret = iwl_mvm_esr_mode_inactive(mvm, vif); + + if (ret) + IWL_ERR(mvm, "failed to deactivate ESR mode (%d)\n", + ret); + } + + if (vif->type == NL80211_IFTYPE_MONITOR) + iwl_mvm_mld_rm_snif_sta(mvm, vif); + + iwl_mvm_link_changed(mvm, vif, link_conf, + LINK_CONTEXT_MODIFY_ACTIVE, false); + + if (switching_chanctx) + return; + mvmvif->link[link_id]->phy_ctxt = NULL; + iwl_mvm_power_update_mac(mvm); +} + +static void iwl_mvm_mld_unassign_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx_conf *ctx) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + mutex_lock(&mvm->mutex); + __iwl_mvm_mld_unassign_vif_chanctx(mvm, vif, link_conf, ctx, false); + mutex_unlock(&mvm->mutex); +} + +static int iwl_mvm_mld_start_ap_ibss(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ret; + + mutex_lock(&mvm->mutex); + /* Send the beacon template */ + ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif, link_conf); + if (ret) + goto out_unlock; + + /* the link should be already activated when assigning chan context */ + ret = iwl_mvm_link_changed(mvm, vif, link_conf, + LINK_CONTEXT_MODIFY_ALL & + ~LINK_CONTEXT_MODIFY_ACTIVE, + true); + if (ret) + goto out_unlock; + + ret = iwl_mvm_mld_add_mcast_sta(mvm, vif, link_conf); + if (ret) + goto out_unlock; + + /* Send the bcast station. At this stage the TBTT and DTIM time + * events are added and applied to the scheduler + */ + ret = iwl_mvm_mld_add_bcast_sta(mvm, vif, link_conf); + if (ret) + goto out_rm_mcast; + + if (iwl_mvm_start_ap_ibss_common(hw, vif, &ret)) + goto out_failed; + + /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */ + if (vif->p2p && mvm->p2p_device_vif) + iwl_mvm_mld_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false); + + iwl_mvm_bt_coex_vif_change(mvm); + + /* we don't support TDLS during DCM */ + if (iwl_mvm_phy_ctx_count(mvm) > 1) + iwl_mvm_teardown_tdls_peers(mvm); + + iwl_mvm_ftm_restart_responder(mvm, vif, link_conf); + + goto out_unlock; + +out_failed: + iwl_mvm_power_update_mac(mvm); + mvmvif->ap_ibss_active = false; + iwl_mvm_mld_rm_bcast_sta(mvm, vif, link_conf); +out_rm_mcast: + iwl_mvm_mld_rm_mcast_sta(mvm, vif, link_conf); +out_unlock: + mutex_unlock(&mvm->mutex); + return ret; +} + +static int iwl_mvm_mld_start_ap(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) +{ + return iwl_mvm_mld_start_ap_ibss(hw, vif, link_conf); +} + +static int iwl_mvm_mld_start_ibss(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + return iwl_mvm_mld_start_ap_ibss(hw, vif, &vif->bss_conf); +} + +static void iwl_mvm_mld_stop_ap_ibss(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + mutex_lock(&mvm->mutex); + + iwl_mvm_stop_ap_ibss_common(mvm, vif); + + /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */ + if (vif->p2p && mvm->p2p_device_vif) + iwl_mvm_mld_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false); + + iwl_mvm_ftm_responder_clear(mvm, vif); + + iwl_mvm_mld_rm_bcast_sta(mvm, vif, link_conf); + iwl_mvm_mld_rm_mcast_sta(mvm, vif, link_conf); + + iwl_mvm_power_update_mac(mvm); + mutex_unlock(&mvm->mutex); +} + +static void iwl_mvm_mld_stop_ap(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) +{ + iwl_mvm_mld_stop_ap_ibss(hw, vif, link_conf); +} + +static void iwl_mvm_mld_stop_ibss(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + iwl_mvm_mld_stop_ap_ibss(hw, vif, &vif->bss_conf); +} + +static int iwl_mvm_mld_mac_sta_state(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + enum ieee80211_sta_state old_state, + enum ieee80211_sta_state new_state) +{ + static const struct iwl_mvm_sta_state_ops callbacks = { + .add_sta = iwl_mvm_mld_add_sta, + .update_sta = iwl_mvm_mld_update_sta, + .rm_sta = iwl_mvm_mld_rm_sta, + .mac_ctxt_changed = iwl_mvm_mld_mac_ctxt_changed, + }; + + return iwl_mvm_mac_sta_state_common(hw, vif, sta, old_state, new_state, + &callbacks); +} + +static void +iwl_mvm_mld_link_info_changed_station(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + u64 changes) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + bool has_he, has_eht; + u32 link_changes = 0; + int ret; + + if (WARN_ON_ONCE(!mvmvif->link[link_conf->link_id])) + return; + + has_he = link_conf->he_support && !iwlwifi_mod_params.disable_11ax; + has_eht = link_conf->eht_support && !iwlwifi_mod_params.disable_11be; + + /* Update EDCA params */ + if (changes & BSS_CHANGED_QOS && vif->cfg.assoc && link_conf->qos) + link_changes |= LINK_CONTEXT_MODIFY_QOS_PARAMS; + + if (changes & BSS_CHANGED_ERP_SLOT) + link_changes |= LINK_CONTEXT_MODIFY_RATES_INFO; + + if (vif->cfg.assoc && (has_he || has_eht)) { + IWL_DEBUG_MAC80211(mvm, "Associated in HE mode\n"); + link_changes |= LINK_CONTEXT_MODIFY_HE_PARAMS; + } + + /* Update EHT Puncturing info */ + if (changes & BSS_CHANGED_EHT_PUNCTURING && vif->cfg.assoc && has_eht) + link_changes |= LINK_CONTEXT_MODIFY_EHT_PARAMS; + + if (link_changes) { + ret = iwl_mvm_link_changed(mvm, vif, link_conf, link_changes, + true); + if (ret) + IWL_ERR(mvm, "failed to update link\n"); + } + + ret = iwl_mvm_mld_mac_ctxt_changed(mvm, vif, false); + if (ret) + IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); + + memcpy(mvmvif->link[link_conf->link_id]->bssid, link_conf->bssid, + ETH_ALEN); + + iwl_mvm_bss_info_changed_station_common(mvm, vif, link_conf, changes); +} + +static bool iwl_mvm_mld_vif_have_valid_ap_sta(struct iwl_mvm_vif *mvmvif) +{ + int i; + + for_each_mvm_vif_valid_link(mvmvif, i) { + if (mvmvif->link[i]->ap_sta_id != IWL_MVM_INVALID_STA) + return true; + } + + return false; +} + +static void iwl_mvm_mld_vif_delete_all_stas(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int i, ret; + + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + return; + + for_each_mvm_vif_valid_link(mvmvif, i) { + struct iwl_mvm_vif_link_info *link = mvmvif->link[i]; + + if (!link) + continue; + + iwl_mvm_sec_key_remove_ap(mvm, vif, link, i); + ret = iwl_mvm_mld_rm_sta_id(mvm, link->ap_sta_id); + if (ret) + IWL_ERR(mvm, "failed to remove AP station\n"); + + link->ap_sta_id = IWL_MVM_INVALID_STA; + } +} + +static void iwl_mvm_mld_vif_cfg_changed_station(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u64 changes) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct ieee80211_bss_conf *link_conf; + bool protect = false; + unsigned int i; + int ret; + + /* This might get called without active links during the + * chanctx switch, but we don't care about it anyway. + */ + if (changes == BSS_CHANGED_IDLE) + return; + + ret = iwl_mvm_mld_mac_ctxt_changed(mvm, vif, false); + if (ret) + IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); + + mvmvif->associated = vif->cfg.assoc; + + if (!(changes & BSS_CHANGED_ASSOC)) + return; + + if (vif->cfg.assoc) { + /* clear statistics to get clean beacon counter */ + iwl_mvm_request_statistics(mvm, true); + iwl_mvm_sf_update(mvm, vif, false); + iwl_mvm_power_vif_assoc(mvm, vif); + + for_each_mvm_vif_valid_link(mvmvif, i) { + memset(&mvmvif->link[i]->beacon_stats, 0, + sizeof(mvmvif->link[i]->beacon_stats)); + + if (vif->p2p) { + iwl_mvm_update_smps(mvm, vif, + IWL_MVM_SMPS_REQ_PROT, + IEEE80211_SMPS_DYNAMIC, i); + } + + rcu_read_lock(); + link_conf = rcu_dereference(vif->link_conf[i]); + if (link_conf && !link_conf->dtim_period) + protect = true; + rcu_read_unlock(); + } + + if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && + protect) { + /* If we're not restarting and still haven't + * heard a beacon (dtim period unknown) then + * make sure we still have enough minimum time + * remaining in the time event, since the auth + * might actually have taken quite a while + * (especially for SAE) and so the remaining + * time could be small without us having heard + * a beacon yet. + */ + iwl_mvm_protect_assoc(mvm, vif, 0); + } + + iwl_mvm_sf_update(mvm, vif, false); + + /* FIXME: need to decide about misbehaving AP handling */ + iwl_mvm_power_vif_assoc(mvm, vif); + } else if (iwl_mvm_mld_vif_have_valid_ap_sta(mvmvif)) { + iwl_mvm_mei_host_disassociated(mvm); + + /* If update fails - SF might be running in associated + * mode while disassociated - which is forbidden. + */ + ret = iwl_mvm_sf_update(mvm, vif, false); + WARN_ONCE(ret && + !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, + &mvm->status), + "Failed to update SF upon disassociation\n"); + + /* If we get an assert during the connection (after the + * station has been added, but before the vif is set + * to associated), mac80211 will re-add the station and + * then configure the vif. Since the vif is not + * associated, we would remove the station here and + * this would fail the recovery. + */ + iwl_mvm_mld_vif_delete_all_stas(mvm, vif); + } + + iwl_mvm_bss_info_changed_station_assoc(mvm, vif, changes); +} + +static void +iwl_mvm_mld_link_info_changed_ap_ibss(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + u64 changes) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + u32 link_changes = LINK_CONTEXT_MODIFY_PROTECT_FLAGS | + LINK_CONTEXT_MODIFY_QOS_PARAMS; + + /* Changes will be applied when the AP/IBSS is started */ + if (!mvmvif->ap_ibss_active) + return; + + if (link_conf->he_support) + link_changes |= LINK_CONTEXT_MODIFY_HE_PARAMS; + + if (changes & BSS_CHANGED_ERP_SLOT) + link_changes |= LINK_CONTEXT_MODIFY_RATES_INFO; + + if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_ERP_SLOT | + BSS_CHANGED_HT | + BSS_CHANGED_BANDWIDTH | BSS_CHANGED_QOS | + BSS_CHANGED_HE_BSS_COLOR) && + iwl_mvm_link_changed(mvm, vif, link_conf, + link_changes, true)) + IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); + + /* Need to send a new beacon template to the FW */ + if (changes & BSS_CHANGED_BEACON && + iwl_mvm_mac_ctxt_beacon_changed(mvm, vif, link_conf)) + IWL_WARN(mvm, "Failed updating beacon data\n"); + + /* FIXME: need to decide if we need FTM responder per link */ + if (changes & BSS_CHANGED_FTM_RESPONDER) { + int ret = iwl_mvm_ftm_start_responder(mvm, vif, link_conf); + + if (ret) + IWL_WARN(mvm, "Failed to enable FTM responder (%d)\n", + ret); + } +} + +static void iwl_mvm_mld_link_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + u64 changes) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + mutex_lock(&mvm->mutex); + + switch (vif->type) { + case NL80211_IFTYPE_STATION: + iwl_mvm_mld_link_info_changed_station(mvm, vif, link_conf, + changes); + break; + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_ADHOC: + iwl_mvm_mld_link_info_changed_ap_ibss(mvm, vif, link_conf, + changes); + break; + case NL80211_IFTYPE_MONITOR: + if (changes & BSS_CHANGED_MU_GROUPS) + iwl_mvm_update_mu_groups(mvm, vif); + break; + default: + /* shouldn't happen */ + WARN_ON_ONCE(1); + } + + if (changes & BSS_CHANGED_TXPOWER) { + IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d dBm\n", + link_conf->txpower); + iwl_mvm_set_tx_power(mvm, vif, link_conf->txpower); + } + + mutex_unlock(&mvm->mutex); +} + +static void iwl_mvm_mld_vif_cfg_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u64 changes) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + mutex_lock(&mvm->mutex); + + if (changes & BSS_CHANGED_IDLE && !vif->cfg.idle) + iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); + + if (vif->type == NL80211_IFTYPE_STATION) + iwl_mvm_mld_vif_cfg_changed_station(mvm, vif, changes); + + mutex_unlock(&mvm->mutex); +} + +static int +iwl_mvm_mld_switch_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif_chanctx_switch *vifs, + int n_vifs, + enum ieee80211_chanctx_switch_mode mode) +{ + static const struct iwl_mvm_switch_vif_chanctx_ops ops = { + .__assign_vif_chanctx = __iwl_mvm_mld_assign_vif_chanctx, + .__unassign_vif_chanctx = __iwl_mvm_mld_unassign_vif_chanctx, + }; + + return iwl_mvm_switch_vif_chanctx_common(hw, vifs, n_vifs, mode, &ops); +} + +static void iwl_mvm_mld_config_iface_filter(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + unsigned int filter_flags, + unsigned int changed_flags) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + /* We support only filter for probe requests */ + if (!(changed_flags & FIF_PROBE_REQ)) + return; + + /* Supported only for p2p client interfaces */ + if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc || + !vif->p2p) + return; + + mutex_lock(&mvm->mutex); + iwl_mvm_mld_mac_ctxt_changed(mvm, vif, false); + mutex_unlock(&mvm->mutex); +} + +static int +iwl_mvm_mld_mac_conf_tx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + unsigned int link_id, u16 ac, + const struct ieee80211_tx_queue_params *params) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_vif_link_info *mvm_link = mvmvif->link[link_id]; + + if (!mvm_link) + return -EINVAL; + + mvm_link->queue_params[ac] = *params; + + /* No need to update right away, we'll get BSS_CHANGED_QOS + * The exception is P2P_DEVICE interface which needs immediate update. + */ + if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { + int ret; + + mutex_lock(&mvm->mutex); + ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf, + LINK_CONTEXT_MODIFY_QOS_PARAMS, + true); + mutex_unlock(&mvm->mutex); + return ret; + } + return 0; +} + +static int iwl_mvm_link_switch_phy_ctx(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_mvm_phy_ctxt *new_phy_ctxt) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ret = 0; + + lockdep_assert_held(&mvm->mutex); + + /* Inorder to change the phy_ctx of a link, the link needs to be + * inactive. Therefore, first deactivate the link, then change its + * phy_ctx, and then activate it again. + */ + ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf, + LINK_CONTEXT_MODIFY_ACTIVE, false); + if (WARN(ret, "Failed to deactivate link\n")) + return ret; + + iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt); + + mvmvif->deflink.phy_ctxt = new_phy_ctxt; + + ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf, 0, false); + if (WARN(ret, "Failed to deactivate link\n")) + return ret; + + ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf, + LINK_CONTEXT_MODIFY_ACTIVE, true); + WARN(ret, "Failed binding P2P_DEVICE\n"); + return ret; +} + +static int iwl_mvm_mld_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_channel *channel, int duration, + enum ieee80211_roc_type type) +{ + static const struct iwl_mvm_roc_ops ops = { + .add_aux_sta_for_hs20 = iwl_mvm_mld_add_aux_sta, + .switch_phy_ctxt = iwl_mvm_link_switch_phy_ctx, + }; + + return iwl_mvm_roc_common(hw, vif, channel, duration, type, &ops); +} + +static int +iwl_mvm_mld_change_vif_links(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u16 old_links, u16 new_links, + struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS]) +{ + struct iwl_mvm_vif_link_info *new_link[IEEE80211_MLD_MAX_NUM_LINKS] = {}; + unsigned int n_active = iwl_mvm_mld_count_active_links(vif); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + u16 removed = old_links & ~new_links; + u16 added = new_links & ~old_links; + int err, i; + + if (hweight16(new_links) > 1 && + n_active > iwl_mvm_max_active_links(mvm, vif)) + return -EOPNOTSUPP; + + for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) { + int r; + + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + break; + + if (!(added & BIT(i))) + continue; + new_link[i] = kzalloc(sizeof(*new_link[i]), GFP_KERNEL); + if (!new_link[i]) { + err = -ENOMEM; + goto free; + } + + new_link[i]->bcast_sta.sta_id = IWL_MVM_INVALID_STA; + new_link[i]->mcast_sta.sta_id = IWL_MVM_INVALID_STA; + new_link[i]->ap_sta_id = IWL_MVM_INVALID_STA; + new_link[i]->fw_link_id = IWL_MVM_FW_LINK_ID_INVALID; + + for (r = 0; r < NUM_IWL_MVM_SMPS_REQ; r++) + new_link[i]->smps_requests[r] = + IEEE80211_SMPS_AUTOMATIC; + } + + mutex_lock(&mvm->mutex); + + if (old_links == 0) { + err = iwl_mvm_disable_link(mvm, vif, &vif->bss_conf); + if (err) + goto out_err; + mvmvif->link[0] = NULL; + } + + for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) { + if (removed & BIT(i)) { + struct ieee80211_bss_conf *link_conf = old[i]; + + err = iwl_mvm_disable_link(mvm, vif, link_conf); + if (err) + goto out_err; + kfree(mvmvif->link[i]); + mvmvif->link[i] = NULL; + } else if (added & BIT(i)) { + struct ieee80211_bss_conf *link_conf; + + link_conf = link_conf_dereference_protected(vif, i); + if (WARN_ON(!link_conf)) + continue; + + if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, + &mvm->status)) + mvmvif->link[i] = new_link[i]; + new_link[i] = NULL; + err = iwl_mvm_add_link(mvm, vif, link_conf); + if (err) + goto out_err; + } + } + + if (err) + goto out_err; + + err = 0; + if (new_links == 0) { + mvmvif->link[0] = &mvmvif->deflink; + err = iwl_mvm_add_link(mvm, vif, &vif->bss_conf); + } + +out_err: + /* we really don't have a good way to roll back here ... */ + mutex_unlock(&mvm->mutex); + +free: + for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) + kfree(new_link[i]); + return err; +} + +static int +iwl_mvm_mld_change_sta_links(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + u16 old_links, u16 new_links) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + int ret; + + mutex_lock(&mvm->mutex); + ret = iwl_mvm_mld_update_sta_links(mvm, vif, sta, old_links, new_links); + mutex_unlock(&mvm->mutex); + + return ret; +} + +const struct ieee80211_ops iwl_mvm_mld_hw_ops = { + .tx = iwl_mvm_mac_tx, + .wake_tx_queue = iwl_mvm_mac_wake_tx_queue, + .ampdu_action = iwl_mvm_mac_ampdu_action, + .get_antenna = iwl_mvm_op_get_antenna, + .start = iwl_mvm_mac_start, + .reconfig_complete = iwl_mvm_mac_reconfig_complete, + .stop = iwl_mvm_mac_stop, + .add_interface = iwl_mvm_mld_mac_add_interface, + .remove_interface = iwl_mvm_mld_mac_remove_interface, + .config = iwl_mvm_mac_config, + .prepare_multicast = iwl_mvm_prepare_multicast, + .configure_filter = iwl_mvm_configure_filter, + .config_iface_filter = iwl_mvm_mld_config_iface_filter, + .link_info_changed = iwl_mvm_mld_link_info_changed, + .vif_cfg_changed = iwl_mvm_mld_vif_cfg_changed, + .hw_scan = iwl_mvm_mac_hw_scan, + .cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan, + .sta_pre_rcu_remove = iwl_mvm_sta_pre_rcu_remove, + .sta_state = iwl_mvm_mld_mac_sta_state, + .sta_notify = iwl_mvm_mac_sta_notify, + .allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames, + .release_buffered_frames = iwl_mvm_mac_release_buffered_frames, + .set_rts_threshold = iwl_mvm_mac_set_rts_threshold, + .sta_rc_update = iwl_mvm_sta_rc_update, + .conf_tx = iwl_mvm_mld_mac_conf_tx, + .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx, + .mgd_complete_tx = iwl_mvm_mac_mgd_complete_tx, + .mgd_protect_tdls_discover = iwl_mvm_mac_mgd_protect_tdls_discover, + .flush = iwl_mvm_mac_flush, + .flush_sta = iwl_mvm_mac_flush_sta, + .sched_scan_start = iwl_mvm_mac_sched_scan_start, + .sched_scan_stop = iwl_mvm_mac_sched_scan_stop, + .set_key = iwl_mvm_mac_set_key, + .update_tkip_key = iwl_mvm_mac_update_tkip_key, + .remain_on_channel = iwl_mvm_mld_roc, + .cancel_remain_on_channel = iwl_mvm_cancel_roc, + .add_chanctx = iwl_mvm_add_chanctx, + .remove_chanctx = iwl_mvm_remove_chanctx, + .change_chanctx = iwl_mvm_change_chanctx, + .assign_vif_chanctx = iwl_mvm_mld_assign_vif_chanctx, + .unassign_vif_chanctx = iwl_mvm_mld_unassign_vif_chanctx, + .switch_vif_chanctx = iwl_mvm_mld_switch_vif_chanctx, + + .start_ap = iwl_mvm_mld_start_ap, + .stop_ap = iwl_mvm_mld_stop_ap, + .join_ibss = iwl_mvm_mld_start_ibss, + .leave_ibss = iwl_mvm_mld_stop_ibss, + + .tx_last_beacon = iwl_mvm_tx_last_beacon, + + .set_tim = iwl_mvm_set_tim, + + .channel_switch = iwl_mvm_channel_switch, + .pre_channel_switch = iwl_mvm_pre_channel_switch, + .post_channel_switch = iwl_mvm_post_channel_switch, + .abort_channel_switch = iwl_mvm_abort_channel_switch, + .channel_switch_rx_beacon = iwl_mvm_channel_switch_rx_beacon, + + .tdls_channel_switch = iwl_mvm_tdls_channel_switch, + .tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch, + .tdls_recv_channel_switch = iwl_mvm_tdls_recv_channel_switch, + + .event_callback = iwl_mvm_mac_event_callback, + + .sync_rx_queues = iwl_mvm_sync_rx_queues, + + CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd) + +#ifdef CONFIG_PM_SLEEP + /* look at d3.c */ + .suspend = iwl_mvm_suspend, + .resume = iwl_mvm_resume, + .set_wakeup = iwl_mvm_set_wakeup, + .set_rekey_data = iwl_mvm_set_rekey_data, +#if IS_ENABLED(CONFIG_IPV6) + .ipv6_addr_change = iwl_mvm_ipv6_addr_change, +#endif + .set_default_unicast_key = iwl_mvm_set_default_unicast_key, +#endif + .get_survey = iwl_mvm_mac_get_survey, + .sta_statistics = iwl_mvm_mac_sta_statistics, + .get_ftm_responder_stats = iwl_mvm_mac_get_ftm_responder_stats, + .start_pmsr = iwl_mvm_start_pmsr, + .abort_pmsr = iwl_mvm_abort_pmsr, + +#ifdef CONFIG_IWLWIFI_DEBUGFS + .link_sta_add_debugfs = iwl_mvm_link_sta_add_debugfs, +#endif + .set_hw_timestamp = iwl_mvm_set_hw_timestamp, + + .change_vif_links = iwl_mvm_mld_change_vif_links, + .change_sta_links = iwl_mvm_mld_change_sta_links, +}; diff --git a/mvm/mld-sta.c b/mvm/mld-sta.c new file mode 100644 index 000000000000..524852cf5cd2 --- /dev/null +++ b/mvm/mld-sta.c @@ -0,0 +1,1181 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2022-2023 Intel Corporation + */ +#include "mvm.h" +#include "time-sync.h" +#include "sta.h" + +u32 iwl_mvm_sta_fw_id_mask(struct iwl_mvm *mvm, struct ieee80211_sta *sta, + int filter_link_id) +{ + struct iwl_mvm_sta *mvmsta; + unsigned int link_id; + u32 result = 0; + + if (!sta) + return 0; + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + + /* it's easy when the STA is not an MLD */ + if (!sta->valid_links) + return BIT(mvmsta->deflink.sta_id); + + /* but if it is an MLD, get the mask of all the FW STAs it has ... */ + for (link_id = 0; link_id < ARRAY_SIZE(mvmsta->link); link_id++) { + struct iwl_mvm_link_sta *link_sta; + + /* unless we have a specific link in mind */ + if (filter_link_id >= 0 && link_id != filter_link_id) + continue; + + link_sta = + rcu_dereference_check(mvmsta->link[link_id], + lockdep_is_held(&mvm->mutex)); + if (!link_sta) + continue; + + result |= BIT(link_sta->sta_id); + } + + return result; +} + +static int iwl_mvm_mld_send_sta_cmd(struct iwl_mvm *mvm, + struct iwl_mvm_sta_cfg_cmd *cmd) +{ + int ret = iwl_mvm_send_cmd_pdu(mvm, + WIDE_ID(MAC_CONF_GROUP, STA_CONFIG_CMD), + 0, sizeof(*cmd), cmd); + if (ret) + IWL_ERR(mvm, "STA_CONFIG_CMD send failed, ret=0x%x\n", ret); + return ret; +} + +/* + * Add an internal station to the FW table + */ +static int iwl_mvm_mld_add_int_sta_to_fw(struct iwl_mvm *mvm, + struct iwl_mvm_int_sta *sta, + const u8 *addr, int link_id) +{ + struct iwl_mvm_sta_cfg_cmd cmd; + + lockdep_assert_held(&mvm->mutex); + + memset(&cmd, 0, sizeof(cmd)); + cmd.sta_id = cpu_to_le32((u8)sta->sta_id); + + cmd.link_id = cpu_to_le32(link_id); + + cmd.station_type = cpu_to_le32(sta->type); + + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_STA_EXP_MFP_SUPPORT) && + sta->type == STATION_TYPE_BCAST_MGMT) + cmd.mfp = cpu_to_le32(1); + + if (addr) { + memcpy(cmd.peer_mld_address, addr, ETH_ALEN); + memcpy(cmd.peer_link_address, addr, ETH_ALEN); + } + + return iwl_mvm_mld_send_sta_cmd(mvm, &cmd); +} + +/* + * Remove a station from the FW table. Before sending the command to remove + * the station validate that the station is indeed known to the driver (sanity + * only). + */ +static int iwl_mvm_mld_rm_sta_from_fw(struct iwl_mvm *mvm, u32 sta_id) +{ + struct iwl_mvm_remove_sta_cmd rm_sta_cmd = { + .sta_id = cpu_to_le32(sta_id), + }; + int ret; + + /* Note: internal stations are marked as error values */ + if (!rcu_access_pointer(mvm->fw_id_to_mac_id[sta_id])) { + IWL_ERR(mvm, "Invalid station id %d\n", sta_id); + return -EINVAL; + } + + ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, STA_REMOVE_CMD), + 0, sizeof(rm_sta_cmd), &rm_sta_cmd); + if (ret) { + IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id); + return ret; + } + + return 0; +} + +static int iwl_mvm_add_aux_sta_to_fw(struct iwl_mvm *mvm, + struct iwl_mvm_int_sta *sta, + u32 lmac_id) +{ + int ret; + + struct iwl_mvm_aux_sta_cmd cmd = { + .sta_id = cpu_to_le32(sta->sta_id), + .lmac_id = cpu_to_le32(lmac_id), + }; + + ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, AUX_STA_CMD), + 0, sizeof(cmd), &cmd); + if (ret) + IWL_ERR(mvm, "Failed to send AUX_STA_CMD\n"); + return ret; +} + +/* + * Adds an internal sta to the FW table with its queues + */ +int iwl_mvm_mld_add_int_sta_with_queue(struct iwl_mvm *mvm, + struct iwl_mvm_int_sta *sta, + const u8 *addr, int link_id, + u16 *queue, u8 tid, + unsigned int *_wdg_timeout) +{ + int ret, txq; + unsigned int wdg_timeout = _wdg_timeout ? *_wdg_timeout : + mvm->trans->trans_cfg->base_params->wd_timeout; + + if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA)) + return -ENOSPC; + + if (sta->type == STATION_TYPE_AUX) + ret = iwl_mvm_add_aux_sta_to_fw(mvm, sta, link_id); + else + ret = iwl_mvm_mld_add_int_sta_to_fw(mvm, sta, addr, link_id); + if (ret) + return ret; + + /* + * For 22000 firmware and on we cannot add queue to a station unknown + * to firmware so enable queue here - after the station was added + */ + txq = iwl_mvm_tvqm_enable_txq(mvm, NULL, sta->sta_id, tid, + wdg_timeout); + if (txq < 0) { + iwl_mvm_mld_rm_sta_from_fw(mvm, sta->sta_id); + return txq; + } + *queue = txq; + + return 0; +} + +/* + * Adds a new int sta: allocate it in the driver, add it to the FW table, + * and add its queues. + */ +static int iwl_mvm_mld_add_int_sta(struct iwl_mvm *mvm, + struct iwl_mvm_int_sta *int_sta, u16 *queue, + enum nl80211_iftype iftype, + enum iwl_fw_sta_type sta_type, + int link_id, const u8 *addr, u8 tid, + unsigned int *wdg_timeout) +{ + int ret; + + lockdep_assert_held(&mvm->mutex); + + /* qmask argument is not used in the new tx api, send a don't care */ + ret = iwl_mvm_allocate_int_sta(mvm, int_sta, 0, iftype, + sta_type); + if (ret) + return ret; + + ret = iwl_mvm_mld_add_int_sta_with_queue(mvm, int_sta, addr, link_id, + queue, tid, wdg_timeout); + if (ret) { + iwl_mvm_dealloc_int_sta(mvm, int_sta); + return ret; + } + + return 0; +} + +/* Allocate a new station entry for the broadcast station to the given vif, + * and send it to the FW. + * Note that each P2P mac should have its own broadcast station. + */ +int iwl_mvm_mld_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_vif_link_info *mvm_link = + mvmvif->link[link_conf->link_id]; + struct iwl_mvm_int_sta *bsta = &mvm_link->bcast_sta; + static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; + const u8 *baddr = _baddr; + unsigned int wdg_timeout = + iwl_mvm_get_wd_timeout(mvm, vif, false, false); + u16 *queue; + + lockdep_assert_held(&mvm->mutex); + + if (vif->type == NL80211_IFTYPE_ADHOC) + baddr = link_conf->bssid; + + if (vif->type == NL80211_IFTYPE_AP || + vif->type == NL80211_IFTYPE_ADHOC) { + queue = &mvm_link->mgmt_queue; + } else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { + queue = &mvm->p2p_dev_queue; + } else { + WARN(1, "Missing required TXQ for adding bcast STA\n"); + return -EINVAL; + } + + return iwl_mvm_mld_add_int_sta(mvm, bsta, queue, + ieee80211_vif_type_p2p(vif), + STATION_TYPE_BCAST_MGMT, + mvm_link->fw_link_id, baddr, + IWL_MAX_TID_COUNT, &wdg_timeout); +} + +/* Allocate a new station entry for the broadcast station to the given vif, + * and send it to the FW. + * Note that each AP/GO mac should have its own multicast station. + */ +int iwl_mvm_mld_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_vif_link_info *mvm_link = + mvmvif->link[link_conf->link_id]; + struct iwl_mvm_int_sta *msta = &mvm_link->mcast_sta; + static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00}; + const u8 *maddr = _maddr; + unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false); + + lockdep_assert_held(&mvm->mutex); + + if (WARN_ON(vif->type != NL80211_IFTYPE_AP && + vif->type != NL80211_IFTYPE_ADHOC)) + return -EOPNOTSUPP; + + /* In IBSS, ieee80211_check_queues() sets the cab_queue to be + * invalid, so make sure we use the queue we want. + * Note that this is done here as we want to avoid making DQA + * changes in mac80211 layer. + */ + if (vif->type == NL80211_IFTYPE_ADHOC) + mvm_link->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; + + return iwl_mvm_mld_add_int_sta(mvm, msta, &mvm_link->cab_queue, + vif->type, STATION_TYPE_MCAST, + mvm_link->fw_link_id, maddr, 0, + &timeout); +} + +/* Allocate a new station entry for the sniffer station to the given vif, + * and send it to the FW. + */ +int iwl_mvm_mld_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_vif_link_info *mvm_link = + mvmvif->link[link_conf->link_id]; + + lockdep_assert_held(&mvm->mutex); + + return iwl_mvm_mld_add_int_sta(mvm, &mvm->snif_sta, &mvm->snif_queue, + vif->type, STATION_TYPE_BCAST_MGMT, + mvm_link->fw_link_id, NULL, + IWL_MAX_TID_COUNT, NULL); +} + +int iwl_mvm_mld_add_aux_sta(struct iwl_mvm *mvm, u32 lmac_id) +{ + lockdep_assert_held(&mvm->mutex); + + /* In CDB NICs we need to specify which lmac to use for aux activity; + * use the link_id argument place to send lmac_id to the function. + */ + return iwl_mvm_mld_add_int_sta(mvm, &mvm->aux_sta, &mvm->aux_queue, + NL80211_IFTYPE_UNSPECIFIED, + STATION_TYPE_AUX, lmac_id, NULL, + IWL_MAX_TID_COUNT, NULL); +} + +static int iwl_mvm_mld_disable_txq(struct iwl_mvm *mvm, u32 sta_mask, + u16 *queueptr, u8 tid) +{ + int queue = *queueptr; + int ret = 0; + + if (tid == IWL_MAX_TID_COUNT) + tid = IWL_MGMT_TID; + + if (mvm->sta_remove_requires_queue_remove) { + u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, + SCD_QUEUE_CONFIG_CMD); + struct iwl_scd_queue_cfg_cmd remove_cmd = { + .operation = cpu_to_le32(IWL_SCD_QUEUE_REMOVE), + .u.remove.tid = cpu_to_le32(tid), + .u.remove.sta_mask = cpu_to_le32(sta_mask), + }; + + ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, + sizeof(remove_cmd), + &remove_cmd); + } + + iwl_trans_txq_free(mvm->trans, queue); + *queueptr = IWL_MVM_INVALID_QUEUE; + + return ret; +} + +/* Removes a sta from the FW table, disable its queues, and dealloc it + */ +static int iwl_mvm_mld_rm_int_sta(struct iwl_mvm *mvm, + struct iwl_mvm_int_sta *int_sta, + bool flush, u8 tid, u16 *queuptr) +{ + int ret; + + lockdep_assert_held(&mvm->mutex); + + if (WARN_ON_ONCE(int_sta->sta_id == IWL_MVM_INVALID_STA)) + return -EINVAL; + + if (flush) + iwl_mvm_flush_sta(mvm, int_sta, true); + + iwl_mvm_mld_disable_txq(mvm, BIT(int_sta->sta_id), queuptr, tid); + + ret = iwl_mvm_mld_rm_sta_from_fw(mvm, int_sta->sta_id); + if (ret) + IWL_WARN(mvm, "Failed sending remove station\n"); + + iwl_mvm_dealloc_int_sta(mvm, int_sta); + + return ret; +} + +int iwl_mvm_mld_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_vif_link_info *link = mvmvif->link[link_conf->link_id]; + u16 *queueptr; + + lockdep_assert_held(&mvm->mutex); + + if (WARN_ON(!link)) + return -EIO; + + switch (vif->type) { + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_ADHOC: + queueptr = &link->mgmt_queue; + break; + case NL80211_IFTYPE_P2P_DEVICE: + queueptr = &mvm->p2p_dev_queue; + break; + default: + WARN(1, "Can't free bcast queue on vif type %d\n", + vif->type); + return -EINVAL; + } + + return iwl_mvm_mld_rm_int_sta(mvm, &link->bcast_sta, + true, IWL_MAX_TID_COUNT, queueptr); +} + +/* Send the FW a request to remove the station from it's internal data + * structures, and in addition remove it from the local data structure. + */ +int iwl_mvm_mld_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_vif_link_info *link = mvmvif->link[link_conf->link_id]; + + lockdep_assert_held(&mvm->mutex); + + if (WARN_ON(!link)) + return -EIO; + + return iwl_mvm_mld_rm_int_sta(mvm, &link->mcast_sta, true, 0, + &link->cab_queue); +} + +int iwl_mvm_mld_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + lockdep_assert_held(&mvm->mutex); + + return iwl_mvm_mld_rm_int_sta(mvm, &mvm->snif_sta, false, + IWL_MAX_TID_COUNT, &mvm->snif_queue); +} + +int iwl_mvm_mld_rm_aux_sta(struct iwl_mvm *mvm) +{ + lockdep_assert_held(&mvm->mutex); + + return iwl_mvm_mld_rm_int_sta(mvm, &mvm->aux_sta, false, + IWL_MAX_TID_COUNT, &mvm->aux_queue); +} + +/* send a cfg sta command to add/update a sta in firmware */ +static int iwl_mvm_mld_cfg_sta(struct iwl_mvm *mvm, struct ieee80211_sta *sta, + struct ieee80211_vif *vif, + struct ieee80211_link_sta *link_sta, + struct ieee80211_bss_conf *link_conf, + struct iwl_mvm_link_sta *mvm_link_sta) +{ + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_vif *mvm_vif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_vif_link_info *link_info = + mvm_vif->link[link_conf->link_id]; + struct iwl_mvm_sta_cfg_cmd cmd = { + .sta_id = cpu_to_le32(mvm_link_sta->sta_id), + .station_type = cpu_to_le32(mvm_sta->sta_type), + }; + u32 agg_size = 0, mpdu_dens = 0; + + /* when adding sta, link should exist in FW */ + if (WARN_ON(link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID)) + return -EINVAL; + + cmd.link_id = cpu_to_le32(link_info->fw_link_id); + + memcpy(&cmd.peer_mld_address, sta->addr, ETH_ALEN); + memcpy(&cmd.peer_link_address, link_sta->addr, ETH_ALEN); + + if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC) + cmd.assoc_id = cpu_to_le32(sta->aid); + + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_STA_EXP_MFP_SUPPORT) && + (sta->mfp || mvm_sta->sta_state < IEEE80211_STA_AUTHORIZED)) + cmd.mfp = cpu_to_le32(1); + + switch (link_sta->rx_nss) { + case 1: + cmd.mimo = cpu_to_le32(0); + break; + case 2 ... 8: + cmd.mimo = cpu_to_le32(1); + break; + } + + switch (sta->deflink.smps_mode) { + case IEEE80211_SMPS_AUTOMATIC: + case IEEE80211_SMPS_NUM_MODES: + WARN_ON(1); + break; + case IEEE80211_SMPS_STATIC: + /* override NSS */ + cmd.mimo = cpu_to_le32(0); + break; + case IEEE80211_SMPS_DYNAMIC: + cmd.mimo_protection = cpu_to_le32(1); + break; + case IEEE80211_SMPS_OFF: + /* nothing */ + break; + } + + mpdu_dens = iwl_mvm_get_sta_ampdu_dens(link_sta, link_conf, &agg_size); + cmd.tx_ampdu_spacing = cpu_to_le32(mpdu_dens); + cmd.tx_ampdu_max_size = cpu_to_le32(agg_size); + + if (sta->wme) { + cmd.sp_length = + cpu_to_le32(sta->max_sp ? sta->max_sp * 2 : 128); + cmd.uapsd_acs = cpu_to_le32(iwl_mvm_get_sta_uapsd_acs(sta)); + } + + if (link_sta->he_cap.has_he) { + cmd.trig_rnd_alloc = + cpu_to_le32(link_conf->uora_exists ? 1 : 0); + + /* PPE Thresholds */ + iwl_mvm_set_sta_pkt_ext(mvm, link_sta, &cmd.pkt_ext); + + /* HTC flags */ + cmd.htc_flags = iwl_mvm_get_sta_htc_flags(sta, link_sta); + + if (link_sta->he_cap.he_cap_elem.mac_cap_info[2] & + IEEE80211_HE_MAC_CAP2_ACK_EN) + cmd.ack_enabled = cpu_to_le32(1); + } + + return iwl_mvm_mld_send_sta_cmd(mvm, &cmd); +} + +static void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm, + struct iwl_mvm_sta *mvm_sta, + struct iwl_mvm_link_sta *mvm_sta_link, + unsigned int link_id, + bool is_in_fw) +{ + RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta_link->sta_id], + is_in_fw ? ERR_PTR(-EINVAL) : NULL); + RCU_INIT_POINTER(mvm->fw_id_to_link_sta[mvm_sta_link->sta_id], NULL); + RCU_INIT_POINTER(mvm_sta->link[link_id], NULL); + + if (mvm_sta_link != &mvm_sta->deflink) + kfree_rcu(mvm_sta_link, rcu_head); +} + +static void iwl_mvm_mld_sta_rm_all_sta_links(struct iwl_mvm *mvm, + struct iwl_mvm_sta *mvm_sta) +{ + unsigned int link_id; + + for (link_id = 0; link_id < ARRAY_SIZE(mvm_sta->link); link_id++) { + struct iwl_mvm_link_sta *link = + rcu_dereference_protected(mvm_sta->link[link_id], + lockdep_is_held(&mvm->mutex)); + + if (!link) + continue; + + iwl_mvm_mld_free_sta_link(mvm, mvm_sta, link, link_id, false); + } +} + +static int iwl_mvm_mld_alloc_sta_link(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + unsigned int link_id) +{ + struct ieee80211_link_sta *link_sta = + link_sta_dereference_protected(sta, link_id); + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_link_sta *link; + u32 sta_id = iwl_mvm_find_free_sta_id(mvm, + ieee80211_vif_type_p2p(vif)); + + if (sta_id == IWL_MVM_INVALID_STA) + return -ENOSPC; + + if (rcu_access_pointer(sta->link[link_id]) == &sta->deflink) { + link = &mvm_sta->deflink; + } else { + link = kzalloc(sizeof(*link), GFP_KERNEL); + if (!link) + return -ENOMEM; + } + + link->sta_id = sta_id; + rcu_assign_pointer(mvm_sta->link[link_id], link); + rcu_assign_pointer(mvm->fw_id_to_mac_id[link->sta_id], sta); + rcu_assign_pointer(mvm->fw_id_to_link_sta[link->sta_id], + link_sta); + + return 0; +} + +/* allocate all the links of a sta, called when the station is first added */ +static int iwl_mvm_mld_alloc_sta_links(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + unsigned int link_id; + int ret; + + lockdep_assert_held(&mvm->mutex); + + for (link_id = 0; link_id < ARRAY_SIZE(sta->link); link_id++) { + if (!rcu_access_pointer(sta->link[link_id]) || + mvm_sta->link[link_id]) + continue; + + ret = iwl_mvm_mld_alloc_sta_link(mvm, vif, sta, link_id); + if (ret) + goto err; + } + + return 0; + +err: + iwl_mvm_mld_sta_rm_all_sta_links(mvm, mvm_sta); + return ret; +} + +static void iwl_mvm_mld_set_ap_sta_id(struct ieee80211_sta *sta, + struct iwl_mvm_vif_link_info *vif_link, + struct iwl_mvm_link_sta *sta_link) +{ + if (!sta->tdls) { + WARN_ON(vif_link->ap_sta_id != IWL_MVM_INVALID_STA); + vif_link->ap_sta_id = sta_link->sta_id; + } else { + WARN_ON(vif_link->ap_sta_id == IWL_MVM_INVALID_STA); + } +} + +/* FIXME: consider waiting for mac80211 to add the STA instead of allocating + * queues here + */ +static int iwl_mvm_alloc_sta_after_restart(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct ieee80211_link_sta *link_sta; + unsigned int link_id; + /* no active link found */ + int ret = -EINVAL; + int sta_id; + + /* First add an empty station since allocating a queue requires + * a valid station. Since we need a link_id to allocate a station, + * pick up the first valid one. + */ + for_each_sta_active_link(vif, sta, link_sta, link_id) { + struct iwl_mvm_vif_link_info *mvm_link; + struct ieee80211_bss_conf *link_conf = + link_conf_dereference_protected(vif, link_id); + struct iwl_mvm_link_sta *mvm_link_sta = + rcu_dereference_protected(mvm_sta->link[link_id], + lockdep_is_held(&mvm->mutex)); + + if (!link_conf) + continue; + + mvm_link = mvmvif->link[link_conf->link_id]; + + if (!mvm_link || !mvm_link_sta) + continue; + + sta_id = mvm_link_sta->sta_id; + ret = iwl_mvm_mld_cfg_sta(mvm, sta, vif, link_sta, + link_conf, mvm_link_sta); + if (ret) + return ret; + + rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta); + rcu_assign_pointer(mvm->fw_id_to_link_sta[sta_id], link_sta); + ret = 0; + } + + iwl_mvm_realloc_queues_after_restart(mvm, sta); + + return ret; +} + +int iwl_mvm_mld_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_mvm_vif *mvm_vif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + unsigned long link_sta_added_to_fw = 0; + struct ieee80211_link_sta *link_sta; + int ret = 0; + unsigned int link_id; + + lockdep_assert_held(&mvm->mutex); + + if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { + ret = iwl_mvm_mld_alloc_sta_links(mvm, vif, sta); + if (ret) + return ret; + + spin_lock_init(&mvm_sta->lock); + + ret = iwl_mvm_sta_init(mvm, vif, sta, IWL_MVM_INVALID_STA, + STATION_TYPE_PEER); + } else { + ret = iwl_mvm_alloc_sta_after_restart(mvm, vif, sta); + } + + if (ret) + goto err; + + /* at this stage sta link pointers are already allocated */ + ret = iwl_mvm_mld_update_sta(mvm, vif, sta); + + for_each_sta_active_link(vif, sta, link_sta, link_id) { + struct ieee80211_bss_conf *link_conf = + link_conf_dereference_protected(vif, link_id); + struct iwl_mvm_link_sta *mvm_link_sta = + rcu_dereference_protected(mvm_sta->link[link_id], + lockdep_is_held(&mvm->mutex)); + + if (WARN_ON(!link_conf || !mvm_link_sta)) + goto err; + + ret = iwl_mvm_mld_cfg_sta(mvm, sta, vif, link_sta, link_conf, + mvm_link_sta); + if (ret) + goto err; + + link_sta_added_to_fw |= BIT(link_id); + + if (vif->type == NL80211_IFTYPE_STATION) + iwl_mvm_mld_set_ap_sta_id(sta, mvm_vif->link[link_id], + mvm_link_sta); + } + + return 0; + +err: + /* remove all already allocated stations in FW */ + for_each_set_bit(link_id, &link_sta_added_to_fw, + IEEE80211_MLD_MAX_NUM_LINKS) { + struct iwl_mvm_link_sta *mvm_link_sta = + rcu_dereference_protected(mvm_sta->link[link_id], + lockdep_is_held(&mvm->mutex)); + + iwl_mvm_mld_rm_sta_from_fw(mvm, mvm_link_sta->sta_id); + } + + /* free all sta resources in the driver */ + iwl_mvm_mld_sta_rm_all_sta_links(mvm, mvm_sta); + return ret; +} + +int iwl_mvm_mld_update_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + struct ieee80211_link_sta *link_sta; + unsigned int link_id; + int ret = -EINVAL; + + lockdep_assert_held(&mvm->mutex); + + for_each_sta_active_link(vif, sta, link_sta, link_id) { + struct ieee80211_bss_conf *link_conf = + link_conf_dereference_protected(vif, link_id); + struct iwl_mvm_link_sta *mvm_link_sta = + rcu_dereference_protected(mvm_sta->link[link_id], + lockdep_is_held(&mvm->mutex)); + + if (WARN_ON(!link_conf || !mvm_link_sta)) + return -EINVAL; + + ret = iwl_mvm_mld_cfg_sta(mvm, sta, vif, link_sta, link_conf, + mvm_link_sta); + + if (ret) { + IWL_ERR(mvm, "Failed to update sta link %d\n", link_id); + break; + } + } + + return ret; +} + +static void iwl_mvm_mld_disable_sta_queues(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + u32 sta_mask = iwl_mvm_sta_fw_id_mask(mvm, sta, -1); + int i; + + lockdep_assert_held(&mvm->mutex); + + for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) { + if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE) + continue; + + iwl_mvm_mld_disable_txq(mvm, sta_mask, + &mvm_sta->tid_data[i].txq_id, i); + mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE; + } + + for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { + struct iwl_mvm_txq *mvmtxq = + iwl_mvm_txq_from_mac80211(sta->txq[i]); + + mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; + } +} + +int iwl_mvm_mld_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + struct ieee80211_link_sta *link_sta; + unsigned int link_id; + int ret; + + lockdep_assert_held(&mvm->mutex); + + /* flush its queues here since we are freeing mvm_sta */ + for_each_sta_active_link(vif, sta, link_sta, link_id) { + struct iwl_mvm_link_sta *mvm_link_sta = + rcu_dereference_protected(mvm_sta->link[link_id], + lockdep_is_held(&mvm->mutex)); + + if (WARN_ON(!mvm_link_sta)) + return -EINVAL; + + ret = iwl_mvm_flush_sta_tids(mvm, mvm_link_sta->sta_id, + 0xffff); + if (ret) + return ret; + } + + ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta); + if (ret) + return ret; + + iwl_mvm_mld_disable_sta_queues(mvm, vif, sta); + + for_each_sta_active_link(vif, sta, link_sta, link_id) { + struct iwl_mvm_link_sta *mvm_link_sta = + rcu_dereference_protected(mvm_sta->link[link_id], + lockdep_is_held(&mvm->mutex)); + bool stay_in_fw; + + stay_in_fw = iwl_mvm_sta_del(mvm, vif, sta, link_sta, &ret); + if (ret) + break; + + if (!stay_in_fw) + ret = iwl_mvm_mld_rm_sta_from_fw(mvm, + mvm_link_sta->sta_id); + + iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_link_sta, + link_id, stay_in_fw); + } + + return ret; +} + +int iwl_mvm_mld_rm_sta_id(struct iwl_mvm *mvm, u8 sta_id) +{ + int ret = iwl_mvm_mld_rm_sta_from_fw(mvm, sta_id); + + lockdep_assert_held(&mvm->mutex); + + RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL); + RCU_INIT_POINTER(mvm->fw_id_to_link_sta[sta_id], NULL); + return ret; +} + +void iwl_mvm_mld_sta_modify_disable_tx(struct iwl_mvm *mvm, + struct iwl_mvm_sta *mvmsta, + bool disable) +{ + struct iwl_mvm_sta_disable_tx_cmd cmd; + int ret; + + cmd.sta_id = cpu_to_le32(mvmsta->deflink.sta_id); + cmd.disable = cpu_to_le32(disable); + + ret = iwl_mvm_send_cmd_pdu(mvm, + WIDE_ID(MAC_CONF_GROUP, STA_DISABLE_TX_CMD), + CMD_ASYNC, sizeof(cmd), &cmd); + if (ret) + IWL_ERR(mvm, + "Failed to send STA_DISABLE_TX_CMD command (%d)\n", + ret); +} + +void iwl_mvm_mld_sta_modify_disable_tx_ap(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + bool disable) +{ + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + + spin_lock_bh(&mvm_sta->lock); + + if (mvm_sta->disable_tx == disable) { + spin_unlock_bh(&mvm_sta->lock); + return; + } + + iwl_mvm_mld_sta_modify_disable_tx(mvm, mvm_sta, disable); + + spin_unlock_bh(&mvm_sta->lock); +} + +void iwl_mvm_mld_modify_all_sta_disable_tx(struct iwl_mvm *mvm, + struct iwl_mvm_vif *mvmvif, + bool disable) +{ + struct ieee80211_sta *sta; + struct iwl_mvm_sta *mvm_sta; + int i; + + rcu_read_lock(); + + /* Block/unblock all the stations of the given mvmvif */ + for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { + sta = rcu_dereference(mvm->fw_id_to_mac_id[i]); + if (IS_ERR_OR_NULL(sta)) + continue; + + mvm_sta = iwl_mvm_sta_from_mac80211(sta); + if (mvm_sta->mac_id_n_color != + FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)) + continue; + + iwl_mvm_mld_sta_modify_disable_tx(mvm, mvm_sta, disable); + } + + rcu_read_unlock(); +} + +static int iwl_mvm_mld_update_sta_queues(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + u32 old_sta_mask, + u32 new_sta_mask) +{ + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_scd_queue_cfg_cmd cmd = { + .operation = cpu_to_le32(IWL_SCD_QUEUE_MODIFY), + .u.modify.old_sta_mask = cpu_to_le32(old_sta_mask), + .u.modify.new_sta_mask = cpu_to_le32(new_sta_mask), + }; + struct iwl_host_cmd hcmd = { + .id = WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD), + .len[0] = sizeof(cmd), + .data[0] = &cmd + }; + int tid; + int ret; + + lockdep_assert_held(&mvm->mutex); + + for (tid = 0; tid <= IWL_MAX_TID_COUNT; tid++) { + struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[tid]; + int txq_id = tid_data->txq_id; + + if (txq_id == IWL_MVM_INVALID_QUEUE) + continue; + + if (tid == IWL_MAX_TID_COUNT) + cmd.u.modify.tid = cpu_to_le32(IWL_MGMT_TID); + else + cmd.u.modify.tid = cpu_to_le32(tid); + + ret = iwl_mvm_send_cmd(mvm, &hcmd); + if (ret) + return ret; + } + + return 0; +} + +static int iwl_mvm_mld_update_sta_baids(struct iwl_mvm *mvm, + u32 old_sta_mask, + u32 new_sta_mask) +{ + struct iwl_rx_baid_cfg_cmd cmd = { + .action = cpu_to_le32(IWL_RX_BAID_ACTION_MODIFY), + .modify.old_sta_id_mask = cpu_to_le32(old_sta_mask), + .modify.new_sta_id_mask = cpu_to_le32(new_sta_mask), + }; + u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD); + int baid; + + BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid)); + + for (baid = 0; baid < ARRAY_SIZE(mvm->baid_map); baid++) { + struct iwl_mvm_baid_data *data; + int ret; + + data = rcu_dereference_protected(mvm->baid_map[baid], + lockdep_is_held(&mvm->mutex)); + if (!data) + continue; + + if (!(data->sta_mask & old_sta_mask)) + continue; + + WARN_ONCE(data->sta_mask != old_sta_mask, + "BAID data for %d corrupted - expected 0x%x found 0x%x\n", + baid, old_sta_mask, data->sta_mask); + + cmd.modify.tid = cpu_to_le32(data->tid); + + ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cmd), &cmd); + data->sta_mask = new_sta_mask; + if (ret) + return ret; + } + + return 0; +} + +static int iwl_mvm_mld_update_sta_resources(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + u32 old_sta_mask, + u32 new_sta_mask) +{ + int ret; + + ret = iwl_mvm_mld_update_sta_queues(mvm, sta, + old_sta_mask, + new_sta_mask); + if (ret) + return ret; + + ret = iwl_mvm_mld_update_sta_keys(mvm, vif, sta, + old_sta_mask, + new_sta_mask); + if (ret) + return ret; + + return iwl_mvm_mld_update_sta_baids(mvm, old_sta_mask, new_sta_mask); +} + +int iwl_mvm_mld_update_sta_links(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + u16 old_links, u16 new_links) +{ + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_vif *mvm_vif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_link_sta *mvm_sta_link; + struct iwl_mvm_vif_link_info *mvm_vif_link; + unsigned long links_to_add = ~old_links & new_links; + unsigned long links_to_rem = old_links & ~new_links; + unsigned long old_links_long = old_links; + u32 current_sta_mask = 0, sta_mask_added = 0, sta_mask_to_rem = 0; + unsigned long link_sta_added_to_fw = 0, link_sta_allocated = 0; + unsigned int link_id; + int ret; + + lockdep_assert_held(&mvm->mutex); + + for_each_set_bit(link_id, &old_links_long, + IEEE80211_MLD_MAX_NUM_LINKS) { + mvm_sta_link = + rcu_dereference_protected(mvm_sta->link[link_id], + lockdep_is_held(&mvm->mutex)); + + if (WARN_ON(!mvm_sta_link)) { + ret = -EINVAL; + goto err; + } + + current_sta_mask |= BIT(mvm_sta_link->sta_id); + if (links_to_rem & BIT(link_id)) + sta_mask_to_rem |= BIT(mvm_sta_link->sta_id); + } + + if (sta_mask_to_rem) { + ret = iwl_mvm_mld_update_sta_resources(mvm, vif, sta, + current_sta_mask, + current_sta_mask & + ~sta_mask_to_rem); + if (WARN_ON(ret)) + goto err; + + current_sta_mask &= ~sta_mask_to_rem; + } + + for_each_set_bit(link_id, &links_to_rem, IEEE80211_MLD_MAX_NUM_LINKS) { + mvm_sta_link = + rcu_dereference_protected(mvm_sta->link[link_id], + lockdep_is_held(&mvm->mutex)); + mvm_vif_link = mvm_vif->link[link_id]; + + if (WARN_ON(!mvm_sta_link || !mvm_vif_link)) { + ret = -EINVAL; + goto err; + } + + ret = iwl_mvm_mld_rm_sta_from_fw(mvm, mvm_sta_link->sta_id); + if (WARN_ON(ret)) + goto err; + + if (vif->type == NL80211_IFTYPE_STATION) + mvm_vif_link->ap_sta_id = IWL_MVM_INVALID_STA; + + iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_sta_link, link_id, + false); + } + + for_each_set_bit(link_id, &links_to_add, IEEE80211_MLD_MAX_NUM_LINKS) { + struct ieee80211_bss_conf *link_conf = + link_conf_dereference_protected(vif, link_id); + struct ieee80211_link_sta *link_sta = + link_sta_dereference_protected(sta, link_id); + mvm_vif_link = mvm_vif->link[link_id]; + + if (WARN_ON(!mvm_vif_link || !link_conf || !link_sta || + mvm_sta->link[link_id])) { + ret = -EINVAL; + goto err; + } + + ret = iwl_mvm_mld_alloc_sta_link(mvm, vif, sta, link_id); + if (WARN_ON(ret)) + goto err; + + link_sta->agg.max_rc_amsdu_len = 1; + ieee80211_sta_recalc_aggregates(sta); + + mvm_sta_link = + rcu_dereference_protected(mvm_sta->link[link_id], + lockdep_is_held(&mvm->mutex)); + + if (WARN_ON(!mvm_sta_link)) { + ret = -EINVAL; + goto err; + } + + if (vif->type == NL80211_IFTYPE_STATION) + iwl_mvm_mld_set_ap_sta_id(sta, mvm_vif_link, + mvm_sta_link); + + link_sta_allocated |= BIT(link_id); + + sta_mask_added |= BIT(mvm_sta_link->sta_id); + + ret = iwl_mvm_mld_cfg_sta(mvm, sta, vif, link_sta, link_conf, + mvm_sta_link); + if (WARN_ON(ret)) + goto err; + + link_sta_added_to_fw |= BIT(link_id); + + iwl_mvm_rs_add_sta_link(mvm, mvm_sta_link); + } + + if (sta_mask_added) { + ret = iwl_mvm_mld_update_sta_resources(mvm, vif, sta, + current_sta_mask, + current_sta_mask | + sta_mask_added); + if (WARN_ON(ret)) + goto err; + } + + return 0; + +err: + /* remove all already allocated stations in FW */ + for_each_set_bit(link_id, &link_sta_added_to_fw, + IEEE80211_MLD_MAX_NUM_LINKS) { + mvm_sta_link = + rcu_dereference_protected(mvm_sta->link[link_id], + lockdep_is_held(&mvm->mutex)); + + iwl_mvm_mld_rm_sta_from_fw(mvm, mvm_sta_link->sta_id); + } + + /* remove all already allocated station links in driver */ + for_each_set_bit(link_id, &link_sta_allocated, + IEEE80211_MLD_MAX_NUM_LINKS) { + mvm_sta_link = + rcu_dereference_protected(mvm_sta->link[link_id], + lockdep_is_held(&mvm->mutex)); + + iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_sta_link, link_id, + false); + } + + return ret; +} diff --git a/mvm/mvm.h b/mvm/mvm.h index c6bc85d4600a..b18c91c5dd5d 100644 --- a/mvm/mvm.h +++ b/mvm/mvm.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2022 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -16,6 +16,8 @@ #include <linux/thermal.h> #endif +#include <linux/ptp_clock_kernel.h> + #include <linux/ktime.h> #include "iwl-op-mode.h" @@ -71,7 +73,11 @@ /* offchannel queue towards mac80211 */ #define IWL_MVM_OFFCHANNEL_QUEUE 0 +/* invalid value for FW link id */ +#define IWL_MVM_FW_LINK_ID_INVALID 0xff + extern const struct ieee80211_ops iwl_mvm_hw_ops; +extern const struct ieee80211_ops iwl_mvm_mld_hw_ops; /** * struct iwl_mvm_mod_params - module parameters for iwlmvm @@ -98,6 +104,7 @@ struct iwl_mvm_phy_ctxt { /* track for RLC config command */ u32 center_freq1; + bool rlc_disabled; }; struct iwl_mvm_time_event_data { @@ -278,11 +285,65 @@ struct iwl_probe_resp_data { }; /** + * struct iwl_mvm_vif_link_info - per link data in Virtual Interface + * @ap_sta_id: the sta_id of the AP - valid only if VIF type is STA + * @fw_link_id: the id of the link according to the FW API + * @bssid: BSSID for this (client) interface + * @bcast_sta: station used for broadcast packets. Used by the following + * vifs: P2P_DEVICE, GO and AP. + * @beacon_stats: beacon statistics, containing the # of received beacons, + * # of received beacons accumulated over FW restart, and the current + * average signal of beacons retrieved from the firmware + * @smps_requests: the SMPS requests of different parts of the driver, + * combined on update to yield the overall request to mac80211. + * @probe_resp_data: data from FW notification to store NOA and CSA related + * data to be inserted into probe response. + * @he_ru_2mhz_block: 26-tone RU OFDMA transmissions should be blocked + * @queue_params: QoS params for this MAC + * @mgmt_queue: queue number for unbufferable management frames + * @igtk: the current IGTK programmed into the firmware + * @listen_lmac: indicates this link is allocated to the listen LMAC + */ +struct iwl_mvm_vif_link_info { + u8 bssid[ETH_ALEN]; + u8 ap_sta_id; + u8 fw_link_id; + + struct iwl_mvm_int_sta bcast_sta; + struct iwl_mvm_int_sta mcast_sta; + + struct { + u32 num_beacons, accu_num_beacons; + u8 avg_signal; + } beacon_stats; + + enum ieee80211_smps_mode smps_requests[NUM_IWL_MVM_SMPS_REQ]; + struct iwl_probe_resp_data __rcu *probe_resp_data; + + struct ieee80211_key_conf *igtk; + + bool he_ru_2mhz_block; + bool active; + bool listen_lmac; + + u16 cab_queue; + /* Assigned while mac80211 has the link in a channel context, + * or, for P2P Device, while it exists. + */ + struct iwl_mvm_phy_ctxt *phy_ctxt; + /* QoS data from mac80211, need to store this here + * as mac80211 has a separate callback but we need + * to have the data for the MAC context + */ + struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS]; + + u16 mgmt_queue; +}; + +/** * struct iwl_mvm_vif - data per Virtual Interface, it is a MAC context * @id: between 0 and 3 * @color: to solve races upon MAC addition and removal - * @ap_sta_id: the sta_id of the AP - valid only if VIF type is STA - * @bssid: BSSID for this (client) interface * @associated: indicates that we're currently associated, used only for * managing the firmware state in iwl_mvm_bss_info_changed_station() * @ap_assoc_sta_count: count of stations associated to us - valid only @@ -290,7 +351,7 @@ struct iwl_probe_resp_data { * @uploaded: indicates the MAC context has been added to the device * @ap_ibss_active: indicates that AP/IBSS is configured and that the interface * should get quota etc. - * @pm_enabled - Indicate if MAC power management is allowed + * @pm_enabled - indicate if MAC power management is allowed * @monitor_active: indicates that monitor context is configured, and that the * interface should get quota etc. * @low_latency: bit flags for low latency @@ -299,68 +360,32 @@ struct iwl_probe_resp_data { * as a result from low_latency bit flags and takes force into account. * @authorized: indicates the AP station was set to authorized * @ps_disabled: indicates that this interface requires PS to be disabled - * @queue_params: QoS params for this MAC - * @bcast_sta: station used for broadcast packets. Used by the following - * vifs: P2P_DEVICE, GO and AP. - * @beacon_skb: the skb used to hold the AP/GO beacon template - * @smps_requests: the SMPS requests of different parts of the driver, - * combined on update to yield the overall request to mac80211. - * @beacon_stats: beacon statistics, containing the # of received beacons, - * # of received beacons accumulated over FW restart, and the current - * average signal of beacons retrieved from the firmware + * @csa_countdown: indicates that CSA countdown may be started * @csa_failed: CSA failed to schedule time event, report an error later + * @csa_bcn_pending: indicates that we are waiting for a beacon on a new channel * @features: hw features active for this vif - * @probe_resp_data: data from FW notification to store NOA and CSA related - * data to be inserted into probe response. */ struct iwl_mvm_vif { struct iwl_mvm *mvm; u16 id; u16 color; - u8 ap_sta_id; - u8 bssid[ETH_ALEN]; bool associated; u8 ap_assoc_sta_count; - - u16 cab_queue; - bool uploaded; bool ap_ibss_active; bool pm_enabled; bool monitor_active; + bool esr_active; + u8 low_latency: 6; u8 low_latency_actual: 1; + u8 authorized:1; bool ps_disabled; - struct iwl_mvm_vif_bf_data bf_data; - - struct { - u32 num_beacons, accu_num_beacons; - u8 avg_signal; - } beacon_stats; u32 ap_beacon_time; - - enum iwl_tsf_id tsf_id; - - /* - * QoS data from mac80211, need to store this here - * as mac80211 has a separate callback but we need - * to have the data for the MAC context - */ - struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS]; - struct iwl_mvm_time_event_data time_event_data; - struct iwl_mvm_time_event_data hs_time_event_data; - - struct iwl_mvm_int_sta bcast_sta; - struct iwl_mvm_int_sta mcast_sta; - - /* - * Assigned while mac80211 has the interface in a channel context, - * or, for P2P Device, while it exists. - */ - struct iwl_mvm_phy_ctxt *phy_ctxt; + struct iwl_mvm_vif_bf_data bf_data; #ifdef CONFIG_PM /* WoWLAN GTK rekey data */ @@ -396,40 +421,45 @@ struct iwl_mvm_vif { int dbgfs_quota_min; #endif - enum ieee80211_smps_mode smps_requests[NUM_IWL_MVM_SMPS_REQ]; - /* FW identified misbehaving AP */ - u8 uapsd_misbehaving_bssid[ETH_ALEN]; - + u8 uapsd_misbehaving_ap_addr[ETH_ALEN] __aligned(2); struct delayed_work uapsd_nonagg_detected_wk; - /* Indicates that CSA countdown may be started */ bool csa_countdown; bool csa_failed; + bool csa_bcn_pending; u16 csa_target_freq; u16 csa_count; u16 csa_misbehave; struct delayed_work csa_work; - /* Indicates that we are waiting for a beacon on a new channel */ - bool csa_bcn_pending; + enum iwl_tsf_id tsf_id; + + struct iwl_mvm_time_event_data time_event_data; + struct iwl_mvm_time_event_data hs_time_event_data; /* TCP Checksum Offload */ netdev_features_t features; - struct iwl_probe_resp_data __rcu *probe_resp_data; + struct ieee80211_sta *ap_sta; /* we can only have 2 GTK + 2 IGTK active at a time */ struct ieee80211_key_conf *ap_early_keys[4]; - /* 26-tone RU OFDMA transmissions should be blocked */ - bool he_ru_2mhz_block; - struct { struct ieee80211_key_conf __rcu *keys[2]; } bcn_prot; + + struct iwl_mvm_vif_link_info deflink; + struct iwl_mvm_vif_link_info *link[IEEE80211_MLD_MAX_NUM_LINKS]; }; +#define for_each_mvm_vif_valid_link(mvm_vif, link_id) \ + for (link_id = 0; \ + link_id < ARRAY_SIZE((mvm_vif)->link); \ + link_id++) \ + if ((mvm_vif)->link[link_id]) + static inline struct iwl_mvm_vif * iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif) { @@ -501,7 +531,7 @@ struct iwl_mvm_tt_mgmt { * @tzone: thermal zone device data */ struct iwl_mvm_thermal_device { - s16 temp_trips[IWL_MAX_DTS_TRIPS]; + struct thermal_trip trips[IWL_MAX_DTS_TRIPS]; u8 fw_trips_index[IWL_MAX_DTS_TRIPS]; struct thermal_zone_device *tzone; }; @@ -657,7 +687,7 @@ __aligned(roundup_pow_of_two(sizeof(struct _iwl_mvm_reorder_buf_entry))) /** * struct iwl_mvm_baid_data - BA session data - * @sta_id: station id + * @sta_mask: current station mask for the BAID * @tid: tid of the session * @baid baid of the session * @timeout: the timeout set in the addba request @@ -671,7 +701,7 @@ __aligned(roundup_pow_of_two(sizeof(struct _iwl_mvm_reorder_buf_entry))) */ struct iwl_mvm_baid_data { struct rcu_head rcu_head; - u8 sta_id; + u32 sta_mask; u8 tid; u8 baid; u16 timeout; @@ -729,7 +759,10 @@ struct iwl_mvm_txq { struct list_head list; u16 txq_id; atomic_t tx_request; - bool stopped; +#define IWL_MVM_TXQ_STATE_STOP_FULL 0 +#define IWL_MVM_TXQ_STATE_STOP_REDIRECT 1 +#define IWL_MVM_TXQ_STATE_READY 2 + unsigned long state; }; static inline struct iwl_mvm_txq * @@ -769,6 +802,43 @@ struct iwl_mvm_dqa_txq_info { enum iwl_mvm_queue_status status; }; +struct ptp_data { + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_clock_info; + + struct delayed_work dwork; + + /* The last GP2 reading from the hw */ + u32 last_gp2; + + /* number of wraparounds since scale_update_adj_time_ns */ + u32 wrap_counter; + + /* GP2 time when the scale was last updated */ + u32 scale_update_gp2; + + /* Adjusted time when the scale was last updated in nanoseconds */ + u64 scale_update_adj_time_ns; + + /* clock frequency offset, scaled to 65536000000 */ + u64 scaled_freq; + + /* Delta between hardware clock and ptp clock in nanoseconds */ + s64 delta; +}; + +struct iwl_time_sync_data { + struct sk_buff_head frame_list; + u8 peer_addr[ETH_ALEN]; + bool active; +}; + +struct iwl_mei_scan_filter { + bool is_mei_limited_scan; + struct sk_buff_head scan_res; + struct work_struct scan_work; +}; + struct iwl_mvm { /* for logger access */ struct device *dev; @@ -827,6 +897,7 @@ struct iwl_mvm { struct iwl_mvm_tvqm_txq_info tvqm_info[IWL_MAX_TVQM_QUEUES]; }; struct work_struct add_stream_wk; /* To add streams to queues */ + spinlock_t add_stream_lock; const char *nvm_file_name; struct iwl_nvm_data *nvm_data; @@ -853,6 +924,8 @@ struct iwl_mvm { /* data related to data path */ struct iwl_rx_phy_info last_phy_info; struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT_MAX]; + struct ieee80211_link_sta __rcu *fw_id_to_link_sta[IWL_MVM_STATION_COUNT_MAX]; + unsigned long fw_link_ids_map; u8 rx_ba_sessions; /* configured by mac80211 */ @@ -860,6 +933,7 @@ struct iwl_mvm { /* Scan status, cmd (pre-allocated) and auxiliary station */ unsigned int scan_status; + size_t scan_cmd_size; void *scan_cmd; struct iwl_mcast_filter_cmd *mcast_filter_cmd; /* For CDB this is low band scan type, for non-CDB - type. */ @@ -934,9 +1008,10 @@ struct iwl_mvm { unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)]; u8 fw_key_deleted[STA_KEY_MAX_NUM]; - u8 vif_count; struct ieee80211_vif __rcu *vif_id_to_mac[NUM_MAC_INDEX_DRIVER]; + struct ieee80211_bss_conf __rcu *link_id_to_link_conf[IWL_MVM_FW_MAX_LINK_ID + 1]; + /* -1 for always, 0 for never, >0 for that many times */ s8 fw_restart; u8 *error_recovery_buf; @@ -1079,8 +1154,9 @@ struct iwl_mvm { struct list_head resp_pasn_list; + struct ptp_data ptp_data; + struct { - u8 d0i3_resp; u8 range_resp; } cmd_ver; @@ -1097,15 +1173,31 @@ struct iwl_mvm { /* does a monitor vif exist (only one can exist hence bool) */ bool monitor_on; + /* + * primary channel position relative to he whole bandwidth, + * in steps of 80 MHz + */ + u8 monitor_p80; /* sniffer data to include in radiotap */ __le16 cur_aid; u8 cur_bssid[ETH_ALEN]; +#ifdef CONFIG_ACPI + struct iwl_phy_specific_cfg phy_filters; +#endif + unsigned long last_6ghz_passive_scan_jiffies; unsigned long last_reset_or_resume_time_jiffies; bool sta_remove_requires_queue_remove; + bool mld_api_is_used; + + bool pldr_sync; + + struct iwl_time_sync_data time_sync; + + struct iwl_mei_scan_filter mei_scan_filter; }; /* Extract MVM priv from op_mode and _hw */ @@ -1223,6 +1315,19 @@ iwl_mvm_rcu_dereference_vif_id(struct iwl_mvm *mvm, u8 vif_id, bool rcu) lockdep_is_held(&mvm->mutex)); } +static inline struct ieee80211_bss_conf * +iwl_mvm_rcu_fw_link_id_to_link_conf(struct iwl_mvm *mvm, u8 link_id, bool rcu) +{ + if (WARN_ON(link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf))) + return NULL; + + if (rcu) + return rcu_dereference(mvm->link_id_to_link_conf[link_id]); + + return rcu_dereference_protected(mvm->link_id_to_link_conf[link_id], + lockdep_is_held(&mvm->mutex)); +} + static inline bool iwl_mvm_is_adaptive_dwell_supported(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, @@ -1305,7 +1410,7 @@ static inline bool iwl_mvm_is_csum_supported(struct iwl_mvm *mvm) { return fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CSUM_SUPPORT) && - !IWL_MVM_HW_CSUM_DISABLE; + !IWL_MVM_HW_CSUM_DISABLE; } static inline bool iwl_mvm_is_mplut_supported(struct iwl_mvm *mvm) @@ -1330,10 +1435,22 @@ static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm) IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT); } +static inline bool iwl_mvm_has_mld_api(const struct iwl_fw *fw) +{ + return fw_has_capa(&fw->ucode_capa, + IWL_UCODE_TLV_CAPA_MLD_API_SUPPORT); +} + +static inline bool iwl_mvm_has_new_station_api(const struct iwl_fw *fw) +{ + return iwl_mvm_has_mld_api(fw) || + iwl_fw_lookup_cmd_ver(fw, ADD_STA, 0) >= 12; +} + static inline bool iwl_mvm_has_new_tx_api(struct iwl_mvm *mvm) { /* TODO - replace with TLV once defined */ - return mvm->trans->trans_cfg->use_tfh; + return mvm->trans->trans_cfg->gen2; } static inline bool iwl_mvm_has_unified_ucode(struct iwl_mvm *mvm) @@ -1431,6 +1548,32 @@ static inline bool iwl_mvm_is_ctdp_supported(struct iwl_mvm *mvm) IWL_UCODE_TLV_CAPA_CTDP_SUPPORT); } +static inline bool iwl_mvm_is_esr_supported(struct iwl_trans *trans) +{ + if ((CSR_HW_RFID_TYPE(trans->hw_rf_id) == IWL_CFG_RF_TYPE_FM) && + !CSR_HW_RFID_IS_CDB(trans->hw_rf_id)) + /* Step A doesn't support eSR */ + return CSR_HW_RFID_STEP(trans->hw_rf_id); + + return false; +} + +static inline int iwl_mvm_max_active_links(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_trans *trans = mvm->fwrt.trans; + + if (vif->type == NL80211_IFTYPE_AP) + return mvm->fw->ucode_capa.num_beacons; + + if (iwl_mvm_is_esr_supported(trans) || + (CSR_HW_RFID_TYPE(trans->hw_rf_id) == IWL_CFG_RF_TYPE_FM && + CSR_HW_RFID_IS_CDB(trans->hw_rf_id))) + return IWL_MVM_FW_MAX_ACTIVE_LINKS_NUM; + + return 1; +} + extern const u8 iwl_mvm_ac_to_tx_fifo[]; extern const u8 iwl_mvm_ac_to_gen2_tx_fifo[]; @@ -1471,6 +1614,7 @@ void iwl_mvm_hwrate_to_tx_rate_v1(u32 rate_n_flags, struct ieee80211_tx_rate *r); u8 iwl_mvm_mac80211_idx_to_hwrate(const struct iwl_fw *fw, int rate_idx); u8 iwl_mvm_mac80211_ac_to_ucode_ac(enum ieee80211_ac_numbers ac); +bool iwl_mvm_is_nic_ack_enabled(struct iwl_mvm *mvm, struct ieee80211_vif *vif); static inline void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) { @@ -1507,7 +1651,6 @@ void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq); unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm, struct ieee80211_sta *sta, unsigned int tid); -u32 iwl_mvm_tx_csum_bz(struct iwl_mvm *mvm, struct sk_buff *skb, bool amsdu); #ifdef CONFIG_IWLWIFI_DEBUG const char *iwl_mvm_get_tx_fail_reason(u32 status); @@ -1518,6 +1661,17 @@ int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk); int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal); int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id, u16 tids); +/* Utils to extract sta related data */ +__le32 iwl_mvm_get_sta_htc_flags(struct ieee80211_sta *sta, + struct ieee80211_link_sta *link_sta); +u8 iwl_mvm_get_sta_uapsd_acs(struct ieee80211_sta *sta); +u32 iwl_mvm_get_sta_ampdu_dens(struct ieee80211_link_sta *link_sta, + struct ieee80211_bss_conf *link_conf, + u32 *_agg_size); +int iwl_mvm_set_sta_pkt_ext(struct iwl_mvm *mvm, + struct ieee80211_link_sta *link_sta, + struct iwl_he_pkt_ext_v2 *pkt_ext); + void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm); static inline void iwl_mvm_set_tx_cmd_ccmp(struct ieee80211_tx_info *info, @@ -1617,6 +1771,7 @@ void iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); /* MVM PHY */ +struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm); int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, struct cfg80211_chan_def *chandef, u8 chains_static, u8 chains_dynamic); @@ -1630,22 +1785,63 @@ void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, int iwl_mvm_phy_ctx_count(struct iwl_mvm *mvm); u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef); u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef); +int iwl_mvm_phy_send_rlc(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, + u8 chains_static, u8 chains_dynamic); /* MAC (virtual interface) programming */ + +void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm, + struct ieee80211_vif *vif); +void iwl_mvm_set_fw_basic_rates(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + __le32 *cck_rates, __le32 *ofdm_rates); +void iwl_mvm_set_fw_protection_flags(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + __le32 *protection_flags, u32 ht_flag, + u32 tgg_flag); +void iwl_mvm_set_fw_qos_params(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct iwl_ac_qos *ac, __le32 *qos_flags); +bool iwl_mvm_set_fw_mu_edca_params(struct iwl_mvm *mvm, + const struct iwl_mvm_vif_link_info *link_info, + struct iwl_he_backoff_conf *trig_based_txf); +void iwl_mvm_set_fw_dtim_tbtt(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + __le64 *dtim_tsf, __le32 *dtim_time, + __le32 *assoc_beacon_arrive_time); +__le32 iwl_mac_ctxt_p2p_dev_has_extended_disc(struct iwl_mvm *mvm, + struct ieee80211_vif *vif); +void iwl_mvm_mac_ctxt_cmd_ap_set_filter_flags(struct iwl_mvm *mvm, + struct iwl_mvm_vif *mvmvif, + __le32 *filter_flags, + int accept_probe_req_flag, + int accept_beacon_flag); +int iwl_mvm_get_mac_type(struct ieee80211_vif *vif); +__le32 iwl_mvm_mac_ctxt_cmd_p2p_sta_get_oppps_ctwin(struct iwl_mvm *mvm, + struct ieee80211_vif *vif); +u32 iwl_mvm_mac_ctxt_cmd_sta_get_twt_policy(struct iwl_mvm *mvm, + struct ieee80211_vif *vif); +int iwl_mvm_mld_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +int iwl_mvm_mld_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + bool force_assoc_off); +int iwl_mvm_mld_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool force_assoc_off, const u8 *bssid_override); int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm, - struct ieee80211_vif *vif); -int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct sk_buff *beacon); + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf); int iwl_mvm_mac_ctxt_send_beacon_cmd(struct iwl_mvm *mvm, struct sk_buff *beacon, void *data, int len); -u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct ieee80211_tx_info *info, +u8 iwl_mvm_mac_ctxt_get_beacon_rate(struct iwl_mvm *mvm, + struct ieee80211_tx_info *info, + struct ieee80211_vif *vif); +u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct iwl_mvm *mvm, + struct ieee80211_tx_info *info, struct ieee80211_vif *vif); u16 iwl_mvm_mac_ctxt_get_beacon_flags(const struct iwl_fw *fw, u8 rate_idx); @@ -1676,6 +1872,94 @@ void iwl_mvm_channel_switch_error_notif(struct iwl_mvm *mvm, /* Bindings */ int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +u32 iwl_mvm_get_lmac_id(struct iwl_mvm *mvm, enum nl80211_band band); + +/* Links */ +int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf); +int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + u32 changes, bool active); +int iwl_mvm_remove_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf); +int iwl_mvm_disable_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf); + +/* AP and IBSS */ +bool iwl_mvm_start_ap_ibss_common(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, int *ret); +void iwl_mvm_stop_ap_ibss_common(struct iwl_mvm *mvm, + struct ieee80211_vif *vif); + +/* BSS Info */ +/** + * struct iwl_mvm_bss_info_changed_ops - callbacks for the bss_info_changed() + * + * Since the only difference between both MLD and + * non-MLD versions of bss_info_changed() is these function calls, + * each version will send its specific function calls to + * %iwl_mvm_bss_info_changed_common(). + * + * @bss_info_changed_sta: pointer to the function that handles changes + * in bss_info in sta mode + * @bss_info_changed_ap_ibss: pointer to the function that handles changes + * in bss_info in ap and ibss modes + */ +struct iwl_mvm_bss_info_changed_ops { + void (*bss_info_changed_sta)(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + u64 changes); + void (*bss_info_changed_ap_ibss)(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + u64 changes); +}; + +void +iwl_mvm_bss_info_changed_common(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + const struct iwl_mvm_bss_info_changed_ops *callbacks, + u64 changes); +void +iwl_mvm_bss_info_changed_station_common(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + u64 changes); +void iwl_mvm_bss_info_changed_station_assoc(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u64 changes); + +/* ROC */ +/** + * struct iwl_mvm_roc_ops - callbacks for the remain_on_channel() + * + * Since the only difference between both MLD and + * non-MLD versions of remain_on_channel() is these function calls, + * each version will send its specific function calls to + * %iwl_mvm_roc_common(). + * + * @add_aux_sta_for_hs20: pointer to the function that adds an aux sta + * for Hot Spot 2.0 + * @switch_phy_ctxt: pointer to the function that switches a vif from one + * phy_ctx to another + */ +struct iwl_mvm_roc_ops { + int (*add_aux_sta_for_hs20)(struct iwl_mvm *mvm, u32 lmac_id); + int (*switch_phy_ctxt)(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct iwl_mvm_phy_ctxt *new_phy_ctxt); +}; + +int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_channel *channel, int duration, + enum ieee80211_roc_type type, + const struct iwl_mvm_roc_ops *ops); +int iwl_mvm_cancel_roc(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); +/*Session Protection */ +void iwl_mvm_protect_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + u32 duration_override); /* Quota management */ static inline size_t iwl_mvm_quota_cmd_size(struct iwl_mvm *mvm) @@ -1706,7 +1990,7 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, bool force_upload, int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_scan_request *req, struct ieee80211_scan_ies *ies); -int iwl_mvm_scan_size(struct iwl_mvm *mvm); +size_t iwl_mvm_scan_size(struct iwl_mvm *mvm); int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify); int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm); void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm); @@ -1856,10 +2140,17 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, /* SMPS */ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif, enum iwl_mvm_smps_type_request req_type, - enum ieee80211_smps_mode smps_request); + enum ieee80211_smps_mode smps_request, + unsigned int link_id); +void +iwl_mvm_update_smps_on_active_links(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + enum iwl_mvm_smps_type_request req_type, + enum ieee80211_smps_mode smps_request); bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt); -void iwl_mvm_apply_fw_smps_request(struct ieee80211_vif *vif); +void iwl_mvm_update_link_smps(struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf); /* Low latency */ int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif, @@ -1960,7 +2251,7 @@ static inline void iwl_mvm_vendor_cmds_register(struct iwl_mvm *mvm) {} #endif /* Location Aware Regulatory */ -struct iwl_mcc_update_resp * +struct iwl_mcc_update_resp_v8 * iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, enum iwl_mcc_source src_id); int iwl_mvm_init_mcc(struct iwl_mvm *mvm); @@ -1980,9 +2271,11 @@ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool added_vif); /* FTM responder */ -int iwl_mvm_ftm_start_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +int iwl_mvm_ftm_start_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf); void iwl_mvm_ftm_restart_responder(struct iwl_mvm *mvm, - struct ieee80211_vif *vif); + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf); void iwl_mvm_ftm_responder_stats(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); int iwl_mvm_ftm_resp_remove_pasn_sta(struct iwl_mvm *mvm, @@ -2068,18 +2361,48 @@ void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const struct ieee80211_sta *sta, u16 tid); +void iwl_mvm_mei_scan_filter_init(struct iwl_mei_scan_filter *mei_scan_filter); +void iwl_mvm_ptp_init(struct iwl_mvm *mvm); +void iwl_mvm_ptp_remove(struct iwl_mvm *mvm); +u64 iwl_mvm_ptp_get_adj_time(struct iwl_mvm *mvm, u64 base_time); int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b); int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm); int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm); void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm); #ifdef CONFIG_IWLWIFI_DEBUGFS -void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - struct dentry *dir); +void iwl_mvm_link_sta_add_debugfs(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_link_sta *link_sta, + struct dentry *dir); #endif +/* new MLD related APIs */ +int iwl_mvm_sec_key_add(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *keyconf); +int iwl_mvm_sec_key_del(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *keyconf); +void iwl_mvm_sec_key_remove_ap(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_mvm_vif_link_info *link, + unsigned int link_id); +int iwl_mvm_mld_update_sta_keys(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + u32 old_sta_mask, + u32 new_sta_mask); +int iwl_mvm_mld_send_key(struct iwl_mvm *mvm, u32 sta_mask, u32 key_flags, + struct ieee80211_key_conf *keyconf); +u32 iwl_mvm_get_sec_flags(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *keyconf); + +bool iwl_rfi_supported(struct iwl_mvm *mvm); int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm, struct iwl_rfi_lut_entry *rfi_table); struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm); @@ -2101,6 +2424,45 @@ static inline u8 iwl_mvm_phy_band_from_nl80211(enum nl80211_band band) } } +/* Channel Switch */ +void iwl_mvm_channel_switch_disconnect_wk(struct work_struct *wk); +int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); + +/* Channel Context */ +/** + * struct iwl_mvm_switch_vif_chanctx_ops - callbacks for switch_vif_chanctx() + * + * Since the only difference between both MLD and + * non-MLD versions of switch_vif_chanctx() is these function calls, + * each version will send its specific function calls to + * %iwl_mvm_switch_vif_chanctx_common(). + * + * @__assign_vif_chanctx: pointer to the function that assigns a chanctx to + * a given vif + * @__unassign_vif_chanctx: pointer to the function that unassigns a chanctx to + * a given vif + */ +struct iwl_mvm_switch_vif_chanctx_ops { + int (*__assign_vif_chanctx)(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx_conf *ctx, + bool switching_chanctx); + void (*__unassign_vif_chanctx)(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx_conf *ctx, + bool switching_chanctx); +}; + +int +iwl_mvm_switch_vif_chanctx_common(struct ieee80211_hw *hw, + struct ieee80211_vif_chanctx_switch *vifs, + int n_vifs, + enum ieee80211_chanctx_switch_mode mode, + const struct iwl_mvm_switch_vif_chanctx_ops *ops); + /* Channel info utils */ static inline bool iwl_mvm_has_ultra_hb_channel(struct iwl_mvm *mvm) { @@ -2202,10 +2564,10 @@ static inline void iwl_mvm_mei_host_disassociated(struct iwl_mvm *mvm) iwl_mei_host_disassociated(); } -static inline void iwl_mvm_mei_device_down(struct iwl_mvm *mvm) +static inline void iwl_mvm_mei_device_state(struct iwl_mvm *mvm, bool up) { if (mvm->mei_registered) - iwl_mei_device_down(); + iwl_mei_device_state(up); } static inline void iwl_mvm_mei_set_sw_rfkill_state(struct iwl_mvm *mvm) @@ -2218,8 +2580,149 @@ static inline void iwl_mvm_mei_set_sw_rfkill_state(struct iwl_mvm *mvm) sw_rfkill); } +static inline bool iwl_mvm_mei_filter_scan(struct iwl_mvm *mvm, + struct sk_buff *skb) +{ + struct ieee80211_mgmt *mgmt = (void *)skb->data; + + if (mvm->mei_scan_filter.is_mei_limited_scan && + (ieee80211_is_probe_resp(mgmt->frame_control) || + ieee80211_is_beacon(mgmt->frame_control))) { + skb_queue_tail(&mvm->mei_scan_filter.scan_res, skb); + schedule_work(&mvm->mei_scan_filter.scan_work); + return true; + } + + return false; +} + void iwl_mvm_send_roaming_forbidden_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool forbidden); +bool iwl_mvm_is_vendor_in_approved_list(void); + +/* Callbacks for ieee80211_ops */ +void iwl_mvm_mac_tx(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, struct sk_buff *skb); +void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw, + struct ieee80211_txq *txq); + +int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_ampdu_params *params); +int iwl_mvm_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant); +int iwl_mvm_mac_start(struct ieee80211_hw *hw); +void iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw, + enum ieee80211_reconfig_type reconfig_type); +void iwl_mvm_mac_stop(struct ieee80211_hw *hw); +static inline int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed) +{ + return 0; +} +u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw, + struct netdev_hw_addr_list *mc_list); + +void iwl_mvm_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, u64 multicast); +int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_scan_request *hw_req); +void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); +void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + enum sta_notify_cmd cmd, + struct ieee80211_sta *sta); +void +iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, u16 tids, + int num_frames, + enum ieee80211_frame_release_type reason, + bool more_data); +void +iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, u16 tids, + int num_frames, + enum ieee80211_frame_release_type reason, + bool more_data); +int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value); +void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u32 changed); +void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_prep_tx_info *info); +void iwl_mvm_mac_mgd_complete_tx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_prep_tx_info *info); +void iwl_mvm_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u32 queues, bool drop); +void iwl_mvm_mac_flush_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_sched_scan_request *req, + struct ieee80211_scan_ies *ies); +int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); +int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key); +void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_key_conf *keyconf, + struct ieee80211_sta *sta, + u32 iv32, u16 *phase1key); +int iwl_mvm_add_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx); +void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx); +void iwl_mvm_change_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx, u32 changed); +int iwl_mvm_tx_last_beacon(struct ieee80211_hw *hw); +int iwl_mvm_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, + bool set); +void iwl_mvm_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_channel_switch *chsw); +int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_channel_switch *chsw); +void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); +void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_channel_switch *chsw); +void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + const struct ieee80211_event *event); +void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw); +int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + void *data, int len); +int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx, + struct survey_info *survey); +void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct station_info *sinfo); +int +iwl_mvm_mac_get_ftm_responder_stats(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_ftm_responder_stats *stats); +int iwl_mvm_start_pmsr(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct cfg80211_pmsr_request *request); +void iwl_mvm_abort_pmsr(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct cfg80211_pmsr_request *request); + +bool iwl_mvm_have_links_same_channel(struct iwl_mvm_vif *vif1, + struct iwl_mvm_vif *vif2); +bool iwl_mvm_vif_is_active(struct iwl_mvm_vif *mvmvif); +int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + s16 tx_power); +int iwl_mvm_set_hw_timestamp(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_set_hw_timestamp *hwts); +int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm, struct ieee80211_vif *vif); #endif /* __IWL_MVM_H__ */ diff --git a/mvm/nvm.c b/mvm/nvm.c index 6d18a1fd649b..f67ab8ee18c2 100644 --- a/mvm/nvm.c +++ b/mvm/nvm.c @@ -404,7 +404,7 @@ int iwl_nvm_init(struct iwl_mvm *mvm) return ret < 0 ? ret : 0; } -struct iwl_mcc_update_resp * +struct iwl_mcc_update_resp_v8 * iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, enum iwl_mcc_source src_id) { @@ -412,7 +412,7 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, .mcc = cpu_to_le16(alpha2[0] << 8 | alpha2[1]), .source_id = (u8)src_id, }; - struct iwl_mcc_update_resp *resp_cp; + struct iwl_mcc_update_resp_v8 *resp_cp; struct iwl_rx_packet *pkt; struct iwl_host_cmd cmd = { .id = MCC_UPDATE_CMD, @@ -420,7 +420,7 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, .data = { &mcc_update_cmd }, }; - int ret; + int ret, resp_ver; u32 status; int resp_len, n_channels; u16 mcc; @@ -439,25 +439,70 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, pkt = cmd.resp_pkt; + resp_ver = iwl_fw_lookup_notif_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP, + MCC_UPDATE_CMD, 0); + /* Extract MCC response */ - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT)) { - struct iwl_mcc_update_resp *mcc_resp = (void *)pkt->data; - - n_channels = __le32_to_cpu(mcc_resp->n_channels); - resp_len = sizeof(struct iwl_mcc_update_resp) + - n_channels * sizeof(__le32); - resp_cp = kmemdup(mcc_resp, resp_len, GFP_KERNEL); + if (resp_ver >= 8) { + struct iwl_mcc_update_resp_v8 *mcc_resp_v8 = (void *)pkt->data; + + n_channels = __le32_to_cpu(mcc_resp_v8->n_channels); + if (iwl_rx_packet_payload_len(pkt) != + struct_size(mcc_resp_v8, channels, n_channels)) { + resp_cp = ERR_PTR(-EINVAL); + goto exit; + } + resp_len = struct_size(resp_cp, channels, n_channels); + resp_cp = kzalloc(resp_len, GFP_KERNEL); + if (!resp_cp) { + resp_cp = ERR_PTR(-ENOMEM); + goto exit; + } + resp_cp->status = mcc_resp_v8->status; + resp_cp->mcc = mcc_resp_v8->mcc; + resp_cp->cap = mcc_resp_v8->cap; + resp_cp->source_id = mcc_resp_v8->source_id; + resp_cp->time = mcc_resp_v8->time; + resp_cp->geo_info = mcc_resp_v8->geo_info; + resp_cp->n_channels = mcc_resp_v8->n_channels; + memcpy(resp_cp->channels, mcc_resp_v8->channels, + n_channels * sizeof(__le32)); + } else if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT)) { + struct iwl_mcc_update_resp_v4 *mcc_resp_v4 = (void *)pkt->data; + + n_channels = __le32_to_cpu(mcc_resp_v4->n_channels); + if (iwl_rx_packet_payload_len(pkt) != + struct_size(mcc_resp_v4, channels, n_channels)) { + resp_cp = ERR_PTR(-EINVAL); + goto exit; + } + resp_len = struct_size(resp_cp, channels, n_channels); + resp_cp = kzalloc(resp_len, GFP_KERNEL); if (!resp_cp) { resp_cp = ERR_PTR(-ENOMEM); goto exit; } + + resp_cp->status = mcc_resp_v4->status; + resp_cp->mcc = mcc_resp_v4->mcc; + resp_cp->cap = cpu_to_le32(le16_to_cpu(mcc_resp_v4->cap)); + resp_cp->source_id = mcc_resp_v4->source_id; + resp_cp->time = mcc_resp_v4->time; + resp_cp->geo_info = mcc_resp_v4->geo_info; + resp_cp->n_channels = mcc_resp_v4->n_channels; + memcpy(resp_cp->channels, mcc_resp_v4->channels, + n_channels * sizeof(__le32)); } else { struct iwl_mcc_update_resp_v3 *mcc_resp_v3 = (void *)pkt->data; n_channels = __le32_to_cpu(mcc_resp_v3->n_channels); - resp_len = sizeof(struct iwl_mcc_update_resp) + - n_channels * sizeof(__le32); + if (iwl_rx_packet_payload_len(pkt) != + struct_size(mcc_resp_v3, channels, n_channels)) { + resp_cp = ERR_PTR(-EINVAL); + goto exit; + } + resp_len = struct_size(resp_cp, channels, n_channels); resp_cp = kzalloc(resp_len, GFP_KERNEL); if (!resp_cp) { resp_cp = ERR_PTR(-ENOMEM); @@ -466,7 +511,7 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, resp_cp->status = mcc_resp_v3->status; resp_cp->mcc = mcc_resp_v3->mcc; - resp_cp->cap = cpu_to_le16(mcc_resp_v3->cap); + resp_cp->cap = cpu_to_le32(mcc_resp_v3->cap); resp_cp->source_id = mcc_resp_v3->source_id; resp_cp->time = mcc_resp_v3->time; resp_cp->geo_info = mcc_resp_v3->geo_info; diff --git a/mvm/offloading.c b/mvm/offloading.c index c7dabc6b3765..dfb16ca5b438 100644 --- a/mvm/offloading.c +++ b/mvm/offloading.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2021 Intel Corporation + * Copyright (C) 2012-2014, 2021-2022 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015 Intel Deutschland GmbH */ @@ -192,12 +192,16 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm, size = sizeof(cmd.v1); } - if (vif->bss_conf.arp_addr_cnt) { + if (vif->cfg.arp_addr_cnt) { enabled |= IWL_D3_PROTO_OFFLOAD_ARP | IWL_D3_PROTO_IPV4_VALID; - common->host_ipv4_addr = vif->bss_conf.arp_addr_list[0]; + common->host_ipv4_addr = vif->cfg.arp_addr_list[0]; memcpy(common->arp_mac_addr, vif->addr, ETH_ALEN); } + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_OFFLOAD_BTM_SUPPORT)) + enabled |= IWL_D3_PROTO_OFFLOAD_BTM; + if (!disable_offloading) common->enabled = cpu_to_le32(enabled); diff --git a/mvm/ops.c b/mvm/ops.c index b2f33ebdf485..5336a4afde4d 100644 --- a/mvm/ops.c +++ b/mvm/ops.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -29,6 +29,7 @@ #include "fw-api.h" #include "fw/acpi.h" #include "fw/uefi.h" +#include "time-sync.h" #define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux" MODULE_DESCRIPTION(DRV_DESCRIPTION); @@ -92,6 +93,12 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode) radio_cfg_dash = (phy_config & FW_PHY_CFG_RADIO_DASH) >> FW_PHY_CFG_RADIO_DASH_POS; + IWL_DEBUG_INFO(mvm, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type, + radio_cfg_step, radio_cfg_dash); + + if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + return; + /* SKU control */ reg_val = CSR_HW_REV_STEP_DASH(mvm->trans->hw_rev); @@ -127,9 +134,6 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode) CSR_HW_IF_CONFIG_REG_D3_DEBUG, reg_val); - IWL_DEBUG_INFO(mvm, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type, - radio_cfg_step, radio_cfg_dash); - /* * W/A : NIC is stuck in a reset state after Early PCIe power off * (PCIe power is lost before PERST# is asserted), causing ME FW @@ -162,7 +166,7 @@ static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm, vif->bss_conf.chandef.width < NL80211_CHAN_WIDTH_40) return; - if (!vif->bss_conf.assoc) + if (!vif->cfg.assoc) return; /* this shouldn't happen *again*, ignore it */ @@ -188,8 +192,7 @@ static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm, WARN_ON(!(sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)); sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; - he_cap = ieee80211_get_he_iftype_cap(sband, - ieee80211_vif_type_p2p(vif)); + he_cap = ieee80211_get_he_iftype_cap_vif(sband, vif); if (he_cap) { /* we know that ours is writable */ @@ -205,24 +208,37 @@ static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm, ieee80211_disconnect(vif, true); } -void iwl_mvm_apply_fw_smps_request(struct ieee80211_vif *vif) +void iwl_mvm_update_link_smps(struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = mvmvif->mvm; enum ieee80211_smps_mode mode = IEEE80211_SMPS_AUTOMATIC; + if (!link_conf) + return; + if (mvm->fw_static_smps_request && - vif->bss_conf.chandef.width == NL80211_CHAN_WIDTH_160 && - vif->bss_conf.he_support) + link_conf->chandef.width == NL80211_CHAN_WIDTH_160 && + link_conf->he_support) mode = IEEE80211_SMPS_STATIC; - iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_FW, mode); + iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_FW, mode, + link_conf->link_id); } static void iwl_mvm_intf_dual_chain_req(void *data, u8 *mac, struct ieee80211_vif *vif) { - iwl_mvm_apply_fw_smps_request(vif); + struct ieee80211_bss_conf *link_conf; + unsigned int link_id; + + rcu_read_lock(); + + for_each_vif_active_link(vif, link_conf, link_id) + iwl_mvm_update_link_smps(vif, link_conf); + + rcu_read_unlock(); } static void iwl_mvm_rx_thermal_dual_chain_req(struct iwl_mvm *mvm, @@ -401,6 +417,15 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { RX_HANDLER_GRP(SYSTEM_GROUP, RFI_DEACTIVATE_NOTIF, iwl_rfi_deactivate_notif_handler, RX_HANDLER_ASYNC_UNLOCKED, struct iwl_rfi_deactivate_notif), + + RX_HANDLER_GRP(LEGACY_GROUP, + WNM_80211V_TIMING_MEASUREMENT_NOTIFICATION, + iwl_mvm_time_sync_msmt_event, RX_HANDLER_SYNC, + struct iwl_time_msmt_notify), + RX_HANDLER_GRP(LEGACY_GROUP, + WNM_80211V_TIMING_MEASUREMENT_CONFIRM_NOTIFICATION, + iwl_mvm_time_sync_msmt_confirm_event, RX_HANDLER_SYNC, + struct iwl_time_msmt_cfm_notify), }; #undef RX_HANDLER #undef RX_HANDLER_GRP @@ -423,7 +448,6 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { HCMD_NAME(ADD_STA_KEY), HCMD_NAME(ADD_STA), HCMD_NAME(REMOVE_STA), - HCMD_NAME(FW_GET_ITEM_CMD), HCMD_NAME(TX_CMD), HCMD_NAME(SCD_QUEUE_CFG), HCMD_NAME(TXPATH_FLUSH), @@ -446,6 +470,8 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { HCMD_NAME(SCAN_OFFLOAD_PROFILES_QUERY_CMD), HCMD_NAME(BT_COEX_UPDATE_REDUCED_TXP), HCMD_NAME(BT_COEX_CI), + HCMD_NAME(WNM_80211V_TIMING_MEASUREMENT_NOTIFICATION), + HCMD_NAME(WNM_80211V_TIMING_MEASUREMENT_CONFIRM_NOTIFICATION), HCMD_NAME(PHY_CONFIGURATION_CMD), HCMD_NAME(CALIB_RES_NOTIF_PHY_DB), HCMD_NAME(PHY_DB_CMD), @@ -484,7 +510,6 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { HCMD_NAME(REPLY_BEACON_FILTERING_CMD), HCMD_NAME(D3_CONFIG_CMD), HCMD_NAME(PROT_OFFLOAD_CONFIG_CMD), - HCMD_NAME(OFFLOADS_QUERY_CMD), HCMD_NAME(MATCH_FOUND_NOTIFICATION), HCMD_NAME(DTS_MEASUREMENT_NOTIFICATION), HCMD_NAME(WOWLAN_PATTERNS), @@ -518,6 +543,12 @@ static const struct iwl_hcmd_names iwl_mvm_system_names[] = { static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = { HCMD_NAME(CHANNEL_SWITCH_TIME_EVENT_CMD), HCMD_NAME(SESSION_PROTECTION_CMD), + HCMD_NAME(MAC_CONFIG_CMD), + HCMD_NAME(LINK_CONFIG_CMD), + HCMD_NAME(STA_CONFIG_CMD), + HCMD_NAME(AUX_STA_CMD), + HCMD_NAME(STA_REMOVE_CMD), + HCMD_NAME(STA_DISABLE_TX_CMD), HCMD_NAME(SESSION_PROTECTION_NOTIF), HCMD_NAME(CHANNEL_SWITCH_START_NOTIF), }; @@ -547,6 +578,7 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = { HCMD_NAME(TLC_MNG_CONFIG_CMD), HCMD_NAME(CHEST_COLLECTOR_FILTER_CONFIG_CMD), HCMD_NAME(SCD_QUEUE_CONFIG_CMD), + HCMD_NAME(SEC_KEY_CMD), HCMD_NAME(MONITOR_NOTIF), HCMD_NAME(THERMAL_DUAL_CHAIN_REQUEST), HCMD_NAME(STA_PM_NOTIF), @@ -557,6 +589,13 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = { /* Please keep this array *SORTED* by hex value. * Access is done through binary search */ +static const struct iwl_hcmd_names iwl_mvm_scan_names[] = { + HCMD_NAME(OFFLOAD_MATCH_INFO_NOTIF), +}; + +/* Please keep this array *SORTED* by hex value. + * Access is done through binary search + */ static const struct iwl_hcmd_names iwl_mvm_location_names[] = { HCMD_NAME(TOF_RANGE_REQ_CMD), HCMD_NAME(TOF_CONFIG_CMD), @@ -574,6 +613,9 @@ static const struct iwl_hcmd_names iwl_mvm_location_names[] = { * Access is done through binary search */ static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = { + HCMD_NAME(WOWLAN_WAKE_PKT_NOTIFICATION), + HCMD_NAME(WOWLAN_INFO_NOTIFICATION), + HCMD_NAME(D3_END_NOTIFICATION), HCMD_NAME(STORED_BEACON_NTF), }; @@ -593,6 +635,7 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = { [MAC_CONF_GROUP] = HCMD_ARR(iwl_mvm_mac_conf_names), [PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names), [DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names), + [SCAN_GROUP] = HCMD_ARR(iwl_mvm_scan_names), [LOCATION_GROUP] = HCMD_ARR(iwl_mvm_location_names), [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names), [REGULATORY_AND_NVM_GROUP] = @@ -974,10 +1017,14 @@ static void iwl_mvm_me_conn_status(void *priv, const struct iwl_mei_conn_info *c kfree_rcu(prev_conn_info, rcu_head); } -static void iwl_mvm_mei_rfkill(void *priv, bool blocked) +static void iwl_mvm_mei_rfkill(void *priv, bool blocked, + bool csme_taking_ownership) { struct iwl_mvm *mvm = priv; + if (blocked && !csme_taking_ownership) + return; + mvm->mei_rfkill_blocked = blocked; if (!mvm->hw_registered) return; @@ -1065,7 +1112,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, static const u8 no_reclaim_cmds[] = { TX_CMD, }; - int scan_size; + u32 max_agg; + size_t scan_size; u32 min_backoff; struct iwl_mvm_csme_conn_info *csme_conn_info __maybe_unused; @@ -1082,16 +1130,22 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, ********************************/ hw = ieee80211_alloc_hw(sizeof(struct iwl_op_mode) + sizeof(struct iwl_mvm), + iwl_mvm_has_mld_api(fw) ? &iwl_mvm_mld_hw_ops : &iwl_mvm_hw_ops); if (!hw) return NULL; - hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE; + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + max_agg = IEEE80211_MAX_AMPDU_BUF_EHT; + else + max_agg = IEEE80211_MAX_AMPDU_BUF_HE; + + hw->max_rx_aggregation_subframes = max_agg; if (cfg->max_tx_agg_size) hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size; else - hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE; + hw->max_tx_aggregation_subframes = max_agg; op_mode = hw->priv; @@ -1107,6 +1161,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, iwl_mvm_get_acpi_tables(mvm); iwl_uefi_get_sgom_table(trans, &mvm->fwrt); + iwl_uefi_get_step_table(trans); mvm->init_status = 0; @@ -1173,6 +1228,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk); INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk); INIT_LIST_HEAD(&mvm->add_stream_txqs); + spin_lock_init(&mvm->add_stream_lock); init_waitqueue_head(&mvm->rx_sync_waitq); @@ -1188,13 +1244,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, INIT_DELAYED_WORK(&mvm->cs_tx_unblock_dwork, iwl_mvm_tx_unblock_dwork); - mvm->cmd_ver.d0i3_resp = - iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, D0I3_END_CMD, - 0); - /* we only support version 1 */ - if (WARN_ON_ONCE(mvm->cmd_ver.d0i3_resp > 1)) - goto out_free; - mvm->cmd_ver.range_resp = iwl_fw_lookup_notif_ver(mvm->fw, LOCATION_GROUP, TOF_RANGE_RESPONSE_NOTIF, 5); @@ -1262,6 +1311,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, mvm->sta_remove_requires_queue_remove = trans_cfg.queue_alloc_cmd_ver > 0; + mvm->mld_api_is_used = iwl_mvm_has_mld_api(mvm->fw); + /* Configure transport layer */ iwl_trans_configure(mvm->trans, &trans_cfg); @@ -1299,6 +1350,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL); if (!mvm->scan_cmd) goto out_free; + mvm->scan_cmd_size = scan_size; /* invalidate ids to prevent accidental removal of sta_id 0 */ mvm->aux_sta.sta_id = IWL_MVM_INVALID_STA; @@ -1316,10 +1368,16 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, else memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx)); + iwl_mvm_ftm_initiator_smooth_config(mvm); + + iwl_mvm_init_time_sync(&mvm->time_sync); + mvm->debugfs_dir = dbgfs_dir; mvm->mei_registered = !iwl_mei_register(mvm, &mei_ops); + iwl_mvm_mei_scan_filter_init(&mvm->mei_scan_filter); + if (iwl_mvm_start_get_nvm(mvm)) { /* * Getting NVM failed while CSME is the owner, but we are @@ -1370,7 +1428,7 @@ void iwl_mvm_stop_device(struct iwl_mvm *mvm) iwl_trans_stop_device(mvm->trans); iwl_free_fw_paging(&mvm->fwrt); iwl_fw_dump_conf_clear(&mvm->fwrt); - iwl_mvm_mei_device_down(mvm); + iwl_mvm_mei_device_state(mvm, false); } static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) @@ -1413,6 +1471,8 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) kfree(mvm->error_recovery_buf); mvm->error_recovery_buf = NULL; + iwl_mvm_ptp_remove(mvm); + iwl_trans_op_mode_leave(mvm->trans); iwl_phy_db_free(mvm->phy_db); @@ -1541,7 +1601,9 @@ static void iwl_mvm_rx_common(struct iwl_mvm *mvm, if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) continue; - if (unlikely(pkt_len < rx_h->min_size)) + if (IWL_FW_CHECK(mvm, pkt_len < rx_h->min_size, + "unexpected notification 0x%04x size %d, need %d\n", + rx_h->cmd_id, pkt_len, rx_h->min_size)) return; if (rx_h->context == RX_HANDLER_SYNC) { @@ -1675,10 +1737,16 @@ static void iwl_mvm_queue_state_change(struct iwl_op_mode *op_mode, txq = sta->txq[tid]; mvmtxq = iwl_mvm_txq_from_mac80211(txq); - mvmtxq->stopped = !start; + if (start) + clear_bit(IWL_MVM_TXQ_STATE_STOP_FULL, &mvmtxq->state); + else + set_bit(IWL_MVM_TXQ_STATE_STOP_FULL, &mvmtxq->state); - if (start && mvmsta->sta_state != IEEE80211_STA_NOTEXIST) + if (start && mvmsta->sta_state != IEEE80211_STA_NOTEXIST) { + local_bh_disable(); iwl_mvm_mac_itxq_xmit(mvm->hw, txq); + local_bh_enable(); + } } out: @@ -1876,6 +1944,9 @@ static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode, bool sync) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); + if (mvm->pldr_sync) + return; + if (!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status) && !test_and_clear_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE, &mvm->status)) diff --git a/mvm/phy-ctxt.c b/mvm/phy-ctxt.c index a3cefbc43e80..a5b432bc9e2f 100644 --- a/mvm/phy-ctxt.c +++ b/mvm/phy-ctxt.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2022 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2017 Intel Deutschland GmbH */ @@ -14,16 +14,18 @@ u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef) switch (chandef->width) { case NL80211_CHAN_WIDTH_20_NOHT: case NL80211_CHAN_WIDTH_20: - return PHY_VHT_CHANNEL_MODE20; + return IWL_PHY_CHANNEL_MODE20; case NL80211_CHAN_WIDTH_40: - return PHY_VHT_CHANNEL_MODE40; + return IWL_PHY_CHANNEL_MODE40; case NL80211_CHAN_WIDTH_80: - return PHY_VHT_CHANNEL_MODE80; + return IWL_PHY_CHANNEL_MODE80; case NL80211_CHAN_WIDTH_160: - return PHY_VHT_CHANNEL_MODE160; + return IWL_PHY_CHANNEL_MODE160; + case NL80211_CHAN_WIDTH_320: + return IWL_PHY_CHANNEL_MODE320; default: WARN(1, "Invalid channel width=%u", chandef->width); - return PHY_VHT_CHANNEL_MODE20; + return IWL_PHY_CHANNEL_MODE20; } } @@ -33,34 +35,32 @@ u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef) */ u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef) { - switch (chandef->chan->center_freq - chandef->center_freq1) { - case -70: - return PHY_VHT_CTRL_POS_4_BELOW; - case -50: - return PHY_VHT_CTRL_POS_3_BELOW; - case -30: - return PHY_VHT_CTRL_POS_2_BELOW; - case -10: - return PHY_VHT_CTRL_POS_1_BELOW; - case 10: - return PHY_VHT_CTRL_POS_1_ABOVE; - case 30: - return PHY_VHT_CTRL_POS_2_ABOVE; - case 50: - return PHY_VHT_CTRL_POS_3_ABOVE; - case 70: - return PHY_VHT_CTRL_POS_4_ABOVE; - default: - WARN(1, "Invalid channel definition"); - fallthrough; - case 0: + int offs = chandef->chan->center_freq - chandef->center_freq1; + int abs_offs = abs(offs); + u8 ret; + + if (offs == 0) { /* * The FW is expected to check the control channel position only * when in HT/VHT and the channel width is not 20MHz. Return * this value as the default one. */ - return PHY_VHT_CTRL_POS_1_BELOW; + return 0; } + + /* this results in a value 0-7, i.e. fitting into 0b0111 */ + ret = (abs_offs - 10) / 20; + /* + * But we need the value to be in 0b1011 because 0b0100 is + * IWL_PHY_CTRL_POS_ABOVE, so shift bit 2 up to land in + * IWL_PHY_CTRL_POS_OFFS_EXT (0b1000) + */ + ret = (ret & IWL_PHY_CTRL_POS_OFFS_MSK) | + ((ret & BIT(2)) << 1); + /* and add the above bit */ + ret |= (offs > 0) * IWL_PHY_CTRL_POS_ABOVE; + + return ret; } /* @@ -151,7 +151,7 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm, struct cfg80211_chan_def *chandef, u8 chains_static, u8 chains_dynamic) { - cmd->lmac_id = cpu_to_le32(iwl_mvm_get_lmac_id(mvm->fw, + cmd->lmac_id = cpu_to_le32(iwl_mvm_get_lmac_id(mvm, chandef->chan->band)); /* Set the channel info data */ @@ -163,15 +163,18 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm, chains_static, chains_dynamic); } -static int iwl_mvm_phy_send_rlc(struct iwl_mvm *mvm, - struct iwl_mvm_phy_ctxt *ctxt, - u8 chains_static, u8 chains_dynamic) +int iwl_mvm_phy_send_rlc(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, + u8 chains_static, u8 chains_dynamic) { struct iwl_rlc_config_cmd cmd = { .phy_id = cpu_to_le32(ctxt->id), }; - if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP, RLC_CONFIG_CMD), 0) < 2) + if (ctxt->rlc_disabled) + return 0; + + if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP, + RLC_CONFIG_CMD), 0) < 2) return 0; BUILD_BUG_ON(IWL_RLC_CHAIN_INFO_DRIVER_FORCE != @@ -382,12 +385,12 @@ static void iwl_mvm_binding_iterator(void *_data, u8 *mac, unsigned long *data = _data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - if (!mvmvif->phy_ctxt) + if (!mvmvif->deflink.phy_ctxt) return; if (vif->type == NL80211_IFTYPE_STATION || vif->type == NL80211_IFTYPE_AP) - __set_bit(mvmvif->phy_ctxt->id, data); + __set_bit(mvmvif->deflink.phy_ctxt->id, data); } int iwl_mvm_phy_ctx_count(struct iwl_mvm *mvm) diff --git a/mvm/power.c b/mvm/power.c index b2ea2fca5376..9131b5f1bc76 100644 --- a/mvm/power.c +++ b/mvm/power.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2019, 2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2019, 2021-2023 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ @@ -150,7 +150,7 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm, #endif for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_BK; ac++) { - if (!mvmvif->queue_params[ac].uapsd) + if (!mvmvif->deflink.queue_params[ac].uapsd) continue; if (!test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status)) @@ -160,7 +160,7 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm, cmd->uapsd_ac_flags |= BIT(ac); /* QNDP TID - the highest TID with no admission control */ - if (!tid_found && !mvmvif->queue_params[ac].acm) { + if (!tid_found && !mvmvif->deflink.queue_params[ac].acm) { tid_found = true; switch (ac) { case IEEE80211_AC_VO: @@ -223,7 +223,7 @@ static void iwl_mvm_p2p_standalone_iterator(void *_data, u8 *mac, *is_p2p_standalone = false; break; case NL80211_IFTYPE_STATION: - if (vif->bss_conf.assoc) + if (vif->cfg.assoc) *is_p2p_standalone = false; break; @@ -237,8 +237,8 @@ static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm, { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - if (!memcmp(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid, - ETH_ALEN)) + if (ether_addr_equal(mvmvif->uapsd_misbehaving_ap_addr, + vif->cfg.ap_addr)) return false; /* @@ -279,18 +279,25 @@ static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm, static bool iwl_mvm_power_is_radar(struct ieee80211_vif *vif) { struct ieee80211_chanctx_conf *chanctx_conf; - struct ieee80211_channel *chan; + struct ieee80211_bss_conf *link_conf; bool radar_detect = false; + unsigned int link_id; rcu_read_lock(); - chanctx_conf = rcu_dereference(vif->chanctx_conf); - WARN_ON(!chanctx_conf); - if (chanctx_conf) { - chan = chanctx_conf->def.chan; - radar_detect = chan->flags & IEEE80211_CHAN_RADAR; + for_each_vif_active_link(vif, link_conf, link_id) { + chanctx_conf = rcu_dereference(link_conf->chanctx_conf); + /* this happens on link switching, just ignore inactive ones */ + if (!chanctx_conf) + continue; + + radar_detect = !!(chanctx_conf->def.chan->flags & + IEEE80211_CHAN_RADAR); + if (radar_detect) + goto out; } - rcu_read_unlock(); +out: + rcu_read_unlock(); return radar_detect; } @@ -320,12 +327,11 @@ static void iwl_mvm_power_config_skip_dtim(struct iwl_mvm *mvm, if (WARN_ON(!dtimper_tu)) return; - /* configure skip over dtim up to 306TU - 314 msec */ - skip = max_t(u8, 1, 306 / dtimper_tu); + /* configure skip over dtim up to 900 TU DTIM interval */ + skip = max_t(u8, 1, 900 / dtimper_tu); } - /* the firmware really expects "look at every X DTIMs", so add 1 */ - cmd->skip_dtim_periods = 1 + skip; + cmd->skip_dtim_periods = skip; cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK); } @@ -359,7 +365,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK); - if (!vif->bss_conf.ps || !mvmvif->pm_enabled) + if (!vif->cfg.ps || !mvmvif->pm_enabled) return; if (iwl_mvm_vif_low_latency(mvmvif) && vif->p2p && @@ -495,9 +501,9 @@ void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - if (memcmp(vif->bss_conf.bssid, mvmvif->uapsd_misbehaving_bssid, - ETH_ALEN)) - eth_zero_addr(mvmvif->uapsd_misbehaving_bssid); + if (!ether_addr_equal(mvmvif->uapsd_misbehaving_ap_addr, + vif->cfg.ap_addr)) + eth_zero_addr(mvmvif->uapsd_misbehaving_ap_addr); } static void iwl_mvm_power_uapsd_misbehav_ap_iterator(void *_data, u8 *mac, @@ -505,13 +511,23 @@ static void iwl_mvm_power_uapsd_misbehav_ap_iterator(void *_data, u8 *mac, { u8 *ap_sta_id = _data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct ieee80211_bss_conf *link_conf; + unsigned int link_id; - /* The ap_sta_id is not expected to change during current association - * so no explicit protection is needed - */ - if (mvmvif->ap_sta_id == *ap_sta_id) - memcpy(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid, - ETH_ALEN); + rcu_read_lock(); + for_each_vif_active_link(vif, link_conf, link_id) { + struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id]; + + /* The ap_sta_id is not expected to change during current + * association so no explicit protection is needed + */ + if (link_info->ap_sta_id == *ap_sta_id) { + ether_addr_copy(mvmvif->uapsd_misbehaving_ap_addr, + vif->cfg.ap_addr); + break; + } + } + rcu_read_unlock(); } void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm, @@ -552,7 +568,7 @@ static void iwl_mvm_power_ps_disabled_iterator(void *_data, u8* mac, struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); bool *disable_ps = _data; - if (mvmvif->phy_ctxt && mvmvif->phy_ctxt->id < NUM_PHY_CTX) + if (iwl_mvm_vif_is_active(mvmvif)) *disable_ps |= mvmvif->ps_disabled; } @@ -561,7 +577,12 @@ static void iwl_mvm_power_get_vifs_iterator(void *_data, u8 *mac, { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_power_vifs *power_iterator = _data; - bool active = mvmvif->phy_ctxt && mvmvif->phy_ctxt->id < NUM_PHY_CTX; + bool active; + + if (!mvmvif->uploaded) + return; + + active = iwl_mvm_vif_is_active(mvmvif); switch (ieee80211_vif_type_p2p(vif)) { case NL80211_IFTYPE_P2P_DEVICE: @@ -634,29 +655,32 @@ static void iwl_mvm_power_set_pm(struct iwl_mvm *mvm, return; /* enable PM on bss if bss stand alone */ - if (vifs->bss_active && !vifs->p2p_active && !vifs->ap_active) { + if (bss_mvmvif && vifs->bss_active && !vifs->p2p_active && + !vifs->ap_active) { bss_mvmvif->pm_enabled = true; return; } /* enable PM on p2p if p2p stand alone */ - if (vifs->p2p_active && !vifs->bss_active && !vifs->ap_active) { + if (p2p_mvmvif && vifs->p2p_active && !vifs->bss_active && + !vifs->ap_active) { p2p_mvmvif->pm_enabled = true; return; } - if (vifs->bss_active && vifs->p2p_active) - client_same_channel = (bss_mvmvif->phy_ctxt->id == - p2p_mvmvif->phy_ctxt->id); - if (vifs->bss_active && vifs->ap_active) - ap_same_channel = (bss_mvmvif->phy_ctxt->id == - ap_mvmvif->phy_ctxt->id); + if (p2p_mvmvif && bss_mvmvif && vifs->bss_active && vifs->p2p_active) + client_same_channel = + iwl_mvm_have_links_same_channel(bss_mvmvif, p2p_mvmvif); + + if (bss_mvmvif && ap_mvmvif && vifs->bss_active && vifs->ap_active) + ap_same_channel = + iwl_mvm_have_links_same_channel(bss_mvmvif, ap_mvmvif); /* clients are not stand alone: enable PM if DCM */ if (!(client_same_channel || ap_same_channel)) { - if (vifs->bss_active) + if (bss_mvmvif && vifs->bss_active) bss_mvmvif->pm_enabled = true; - if (vifs->p2p_active) + if (p2p_mvmvif && vifs->p2p_active) p2p_mvmvif->pm_enabled = true; return; } @@ -887,7 +911,7 @@ static int iwl_mvm_power_set_ba(struct iwl_mvm *mvm, mvmvif->bf_data.ba_enabled = !(!mvmvif->pm_enabled || mvm->ps_disabled || - !vif->bss_conf.ps || + !vif->cfg.ps || iwl_mvm_vif_low_latency(mvmvif)); return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, 0); diff --git a/mvm/ptp.c b/mvm/ptp.c new file mode 100644 index 000000000000..e89259de6f4c --- /dev/null +++ b/mvm/ptp.c @@ -0,0 +1,326 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2021 - 2023 Intel Corporation + */ + +#include "mvm.h" +#include "iwl-debug.h" +#include <linux/timekeeping.h> +#include <linux/math64.h> + +#define IWL_PTP_GP2_WRAP 0x100000000ULL +#define IWL_PTP_WRAP_TIME (3600 * HZ) + +/* The scaled_ppm parameter is ppm (parts per million) with a 16-bit fractional + * part, which means that a value of 1 in one of those fields actually means + * 2^-16 ppm, and 2^16=65536 is 1 ppm. + */ +#define SCALE_FACTOR 65536000000ULL +#define IWL_PTP_WRAP_THRESHOLD_USEC (5000) + +#define IWL_PTP_GET_CROSS_TS_NUM 5 + +static void iwl_mvm_ptp_update_new_read(struct iwl_mvm *mvm, u32 gp2) +{ + /* If the difference is above the threshold, assume it's a wraparound. + * Otherwise assume it's an old read and ignore it. + */ + if (gp2 < mvm->ptp_data.last_gp2 && + mvm->ptp_data.last_gp2 - gp2 < IWL_PTP_WRAP_THRESHOLD_USEC) { + IWL_DEBUG_INFO(mvm, + "PTP: ignore old read (gp2=%u, last_gp2=%u)\n", + gp2, mvm->ptp_data.last_gp2); + return; + } + + if (gp2 < mvm->ptp_data.last_gp2) { + mvm->ptp_data.wrap_counter++; + IWL_DEBUG_INFO(mvm, + "PTP: wraparound detected (new counter=%u)\n", + mvm->ptp_data.wrap_counter); + } + + mvm->ptp_data.last_gp2 = gp2; + schedule_delayed_work(&mvm->ptp_data.dwork, IWL_PTP_WRAP_TIME); +} + +u64 iwl_mvm_ptp_get_adj_time(struct iwl_mvm *mvm, u64 base_time_ns) +{ + struct ptp_data *data = &mvm->ptp_data; + u64 last_gp2_ns = mvm->ptp_data.scale_update_gp2 * NSEC_PER_USEC; + u64 res; + u64 diff; + + iwl_mvm_ptp_update_new_read(mvm, + div64_u64(base_time_ns, NSEC_PER_USEC)); + + IWL_DEBUG_INFO(mvm, "base_time_ns=%llu, wrap_counter=%u\n", + (unsigned long long)base_time_ns, data->wrap_counter); + + base_time_ns = base_time_ns + + (data->wrap_counter * IWL_PTP_GP2_WRAP * NSEC_PER_USEC); + + /* It is possible that a GP2 timestamp was received from fw before the + * last scale update. Since we don't know how to scale - ignore it. + */ + if (base_time_ns < last_gp2_ns) { + IWL_DEBUG_INFO(mvm, "Time before scale update - ignore\n"); + return 0; + } + + diff = base_time_ns - last_gp2_ns; + IWL_DEBUG_INFO(mvm, "diff ns=%llu\n", (unsigned long long)diff); + + diff = mul_u64_u64_div_u64(diff, data->scaled_freq, + SCALE_FACTOR); + IWL_DEBUG_INFO(mvm, "scaled diff ns=%llu\n", (unsigned long long)diff); + + res = data->scale_update_adj_time_ns + data->delta + diff; + + IWL_DEBUG_INFO(mvm, "base=%llu delta=%lld adj=%llu\n", + (unsigned long long)base_time_ns, (long long)data->delta, + (unsigned long long)res); + return res; +} + +static int +iwl_mvm_get_crosstimestamp_fw(struct iwl_mvm *mvm, u32 *gp2, u64 *sys_time) +{ + struct iwl_synced_time_cmd synced_time_cmd = { + .operation = cpu_to_le32(IWL_SYNCED_TIME_OPERATION_READ_BOTH) + }; + struct iwl_host_cmd cmd = { + .id = WIDE_ID(DATA_PATH_GROUP, WNM_PLATFORM_PTM_REQUEST_CMD), + .flags = CMD_WANT_SKB, + .data[0] = &synced_time_cmd, + .len[0] = sizeof(synced_time_cmd), + }; + struct iwl_synced_time_rsp *resp; + struct iwl_rx_packet *pkt; + int ret; + u64 gp2_10ns; + + ret = iwl_mvm_send_cmd(mvm, &cmd); + if (ret) + return ret; + + pkt = cmd.resp_pkt; + + if (iwl_rx_packet_payload_len(pkt) != sizeof(*resp)) { + IWL_ERR(mvm, "PTP: Invalid command response\n"); + iwl_free_resp(&cmd); + return -EIO; + } + + resp = (void *)pkt->data; + + gp2_10ns = (u64)le32_to_cpu(resp->gp2_timestamp_hi) << 32 | + le32_to_cpu(resp->gp2_timestamp_lo); + *gp2 = div_u64(gp2_10ns, 100); + + *sys_time = (u64)le32_to_cpu(resp->platform_timestamp_hi) << 32 | + le32_to_cpu(resp->platform_timestamp_lo); + + return ret; +} + +static void iwl_mvm_phc_get_crosstimestamp_loop(struct iwl_mvm *mvm, + ktime_t *sys_time, u32 *gp2) +{ + u64 diff = 0, new_diff; + u64 tmp_sys_time; + u32 tmp_gp2; + int i; + + for (i = 0; i < IWL_PTP_GET_CROSS_TS_NUM; i++) { + iwl_mvm_get_sync_time(mvm, CLOCK_REALTIME, &tmp_gp2, NULL, + &tmp_sys_time); + new_diff = tmp_sys_time - ((u64)tmp_gp2 * NSEC_PER_USEC); + if (!diff || new_diff < diff) { + *sys_time = tmp_sys_time; + *gp2 = tmp_gp2; + diff = new_diff; + IWL_DEBUG_INFO(mvm, "PTP: new times: gp2=%u sys=%lld\n", + *gp2, *sys_time); + } + } +} + +static int +iwl_mvm_phc_get_crosstimestamp(struct ptp_clock_info *ptp, + struct system_device_crosststamp *xtstamp) +{ + struct iwl_mvm *mvm = container_of(ptp, struct iwl_mvm, + ptp_data.ptp_clock_info); + int ret = 0; + /* Raw value read from GP2 register in usec */ + u32 gp2; + /* GP2 value in ns*/ + s64 gp2_ns; + /* System (wall) time */ + ktime_t sys_time; + + memset(xtstamp, 0, sizeof(struct system_device_crosststamp)); + + if (!mvm->ptp_data.ptp_clock) { + IWL_ERR(mvm, "No PHC clock registered\n"); + return -ENODEV; + } + + mutex_lock(&mvm->mutex); + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SYNCED_TIME)) { + ret = iwl_mvm_get_crosstimestamp_fw(mvm, &gp2, &sys_time); + + if (ret) + goto out; + } else { + iwl_mvm_phc_get_crosstimestamp_loop(mvm, &sys_time, &gp2); + } + + gp2_ns = iwl_mvm_ptp_get_adj_time(mvm, (u64)gp2 * NSEC_PER_USEC); + + IWL_INFO(mvm, "Got Sync Time: GP2:%u, last_GP2: %u, GP2_ns: %lld, sys_time: %lld\n", + gp2, mvm->ptp_data.last_gp2, gp2_ns, (s64)sys_time); + + /* System monotonic raw time is not used */ + xtstamp->device = (ktime_t)gp2_ns; + xtstamp->sys_realtime = sys_time; + +out: + mutex_unlock(&mvm->mutex); + return ret; +} + +static void iwl_mvm_ptp_work(struct work_struct *wk) +{ + struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, + ptp_data.dwork.work); + u32 gp2; + + mutex_lock(&mvm->mutex); + gp2 = iwl_mvm_get_systime(mvm); + iwl_mvm_ptp_update_new_read(mvm, gp2); + mutex_unlock(&mvm->mutex); +} + +static int iwl_mvm_ptp_gettime(struct ptp_clock_info *ptp, + struct timespec64 *ts) +{ + struct iwl_mvm *mvm = container_of(ptp, struct iwl_mvm, + ptp_data.ptp_clock_info); + u64 gp2; + u64 ns; + + mutex_lock(&mvm->mutex); + gp2 = iwl_mvm_get_systime(mvm); + ns = iwl_mvm_ptp_get_adj_time(mvm, gp2 * NSEC_PER_USEC); + mutex_unlock(&mvm->mutex); + + *ts = ns_to_timespec64(ns); + return 0; +} + +static int iwl_mvm_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct iwl_mvm *mvm = container_of(ptp, struct iwl_mvm, + ptp_data.ptp_clock_info); + struct ptp_data *data = container_of(ptp, struct ptp_data, + ptp_clock_info); + + mutex_lock(&mvm->mutex); + data->delta += delta; + IWL_DEBUG_INFO(mvm, "delta=%lld, new delta=%lld\n", (long long)delta, + (long long)data->delta); + mutex_unlock(&mvm->mutex); + return 0; +} + +static int iwl_mvm_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) +{ + struct iwl_mvm *mvm = container_of(ptp, struct iwl_mvm, + ptp_data.ptp_clock_info); + struct ptp_data *data = &mvm->ptp_data; + u32 gp2; + + mutex_lock(&mvm->mutex); + + /* Must call _iwl_mvm_ptp_get_adj_time() before updating + * data->scale_update_gp2 or data->scaled_freq since + * scale_update_adj_time_ns should reflect the previous scaled_freq. + */ + gp2 = iwl_mvm_get_systime(mvm); + data->scale_update_adj_time_ns = + iwl_mvm_ptp_get_adj_time(mvm, gp2 * NSEC_PER_USEC); + data->scale_update_gp2 = gp2; + data->wrap_counter = 0; + data->delta = 0; + + data->scaled_freq = SCALE_FACTOR + scaled_ppm; + IWL_DEBUG_INFO(mvm, "adjfine: scaled_ppm=%ld new=%llu\n", + scaled_ppm, (unsigned long long)data->scaled_freq); + + mutex_unlock(&mvm->mutex); + return 0; +} + +/* iwl_mvm_ptp_init - initialize PTP for devices which support it. + * @mvm: internal mvm structure, see &struct iwl_mvm. + * + * Performs the required steps for enabling PTP support. + */ +void iwl_mvm_ptp_init(struct iwl_mvm *mvm) +{ + /* Warn if the interface already has a ptp_clock defined */ + if (WARN_ON(mvm->ptp_data.ptp_clock)) + return; + + mvm->ptp_data.ptp_clock_info.owner = THIS_MODULE; + mvm->ptp_data.ptp_clock_info.max_adj = 0x7fffffff; + mvm->ptp_data.ptp_clock_info.getcrosststamp = + iwl_mvm_phc_get_crosstimestamp; + mvm->ptp_data.ptp_clock_info.adjfine = iwl_mvm_ptp_adjfine; + mvm->ptp_data.ptp_clock_info.adjtime = iwl_mvm_ptp_adjtime; + mvm->ptp_data.ptp_clock_info.gettime64 = iwl_mvm_ptp_gettime; + mvm->ptp_data.scaled_freq = SCALE_FACTOR; + + /* Give a short 'friendly name' to identify the PHC clock */ + snprintf(mvm->ptp_data.ptp_clock_info.name, + sizeof(mvm->ptp_data.ptp_clock_info.name), + "%s", "iwlwifi-PTP"); + + INIT_DELAYED_WORK(&mvm->ptp_data.dwork, iwl_mvm_ptp_work); + + mvm->ptp_data.ptp_clock = + ptp_clock_register(&mvm->ptp_data.ptp_clock_info, mvm->dev); + + if (IS_ERR(mvm->ptp_data.ptp_clock)) { + IWL_ERR(mvm, "Failed to register PHC clock (%ld)\n", + PTR_ERR(mvm->ptp_data.ptp_clock)); + mvm->ptp_data.ptp_clock = NULL; + } else if (mvm->ptp_data.ptp_clock) { + IWL_INFO(mvm, "Registered PHC clock: %s, with index: %d\n", + mvm->ptp_data.ptp_clock_info.name, + ptp_clock_index(mvm->ptp_data.ptp_clock)); + } +} + +/* iwl_mvm_ptp_remove - disable PTP device. + * @mvm: internal mvm structure, see &struct iwl_mvm. + * + * Disable PTP support. + */ +void iwl_mvm_ptp_remove(struct iwl_mvm *mvm) +{ + if (mvm->ptp_data.ptp_clock) { + IWL_INFO(mvm, "Unregistering PHC clock: %s, with index: %d\n", + mvm->ptp_data.ptp_clock_info.name, + ptp_clock_index(mvm->ptp_data.ptp_clock)); + + ptp_clock_unregister(mvm->ptp_data.ptp_clock); + mvm->ptp_data.ptp_clock = NULL; + memset(&mvm->ptp_data.ptp_clock_info, 0, + sizeof(mvm->ptp_data.ptp_clock_info)); + mvm->ptp_data.last_gp2 = 0; + cancel_delayed_work_sync(&mvm->ptp_data.dwork); + } +} diff --git a/mvm/quota.c b/mvm/quota.c index c862bd243b55..aad2614af9ad 100644 --- a/mvm/quota.c +++ b/mvm/quota.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018, 2021 Intel Corporation + * Copyright (C) 2012-2014, 2018, 2021-2022 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -33,11 +33,11 @@ static void iwl_mvm_quota_iterator(void *_data, u8 *mac, if (vif == data->disabled_vif) return; - if (!mvmvif->phy_ctxt) + if (!mvmvif->deflink.phy_ctxt) return; /* currently, PHY ID == binding ID */ - id = mvmvif->phy_ctxt->id; + id = mvmvif->deflink.phy_ctxt->id; /* need at least one binding per PHY */ BUILD_BUG_ON(NUM_PHY_CTX > MAX_BINDINGS); @@ -47,7 +47,7 @@ static void iwl_mvm_quota_iterator(void *_data, u8 *mac, switch (vif->type) { case NL80211_IFTYPE_STATION: - if (vif->bss_conf.assoc) + if (vif->cfg.assoc) break; return; case NL80211_IFTYPE_AP: @@ -67,9 +67,10 @@ static void iwl_mvm_quota_iterator(void *_data, u8 *mac, } if (data->colors[id] < 0) - data->colors[id] = mvmvif->phy_ctxt->color; + data->colors[id] = mvmvif->deflink.phy_ctxt->color; else - WARN_ON_ONCE(data->colors[id] != mvmvif->phy_ctxt->color); + WARN_ON_ONCE(data->colors[id] != + mvmvif->deflink.phy_ctxt->color); data->n_interfaces[id]++; @@ -99,7 +100,7 @@ static void iwl_mvm_adjust_quota_for_noa(struct iwl_mvm *mvm, if (!mvmvif->ap_ibss_active) return; - phy_id = mvmvif->phy_ctxt->id; + phy_id = mvmvif->deflink.phy_ctxt->id; beacon_int = mvm->noa_vif->bss_conf.beacon_int; for (i = 0; i < MAX_BINDINGS; i++) { diff --git a/mvm/rfi.c b/mvm/rfi.c index bb77bc9aa821..2ecd32bed752 100644 --- a/mvm/rfi.c +++ b/mvm/rfi.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2020 - 2021 Intel Corporation + * Copyright (C) 2020 - 2022 Intel Corporation */ #include "mvm.h" @@ -70,6 +70,16 @@ static const struct iwl_rfi_lut_entry iwl_rfi_table[IWL_RFI_LUT_SIZE] = { PHY_BAND_6, PHY_BAND_6,}}, }; +bool iwl_rfi_supported(struct iwl_mvm *mvm) +{ + /* The feature depends on a platform bugfix, so for now + * it's always disabled. + * When the platform support detection is implemented we should + * check FW TLV and platform support instead. + */ + return false; +} + int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm, struct iwl_rfi_lut_entry *rfi_table) { int ret; @@ -81,7 +91,7 @@ int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm, struct iwl_rfi_lut_entry *rfi_t .len[0] = sizeof(cmd), }; - if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_RFIM_SUPPORT)) + if (!iwl_rfi_supported(mvm)) return -EOPNOTSUPP; lockdep_assert_held(&mvm->mutex); @@ -113,7 +123,7 @@ struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm) .flags = CMD_WANT_SKB, }; - if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_RFIM_SUPPORT)) + if (!iwl_rfi_supported(mvm)) return ERR_PTR(-EOPNOTSUPP); mutex_lock(&mvm->mutex); diff --git a/mvm/rs-fw.c b/mvm/rs-fw.c index 9830d2663689..6cba8a353b53 100644 --- a/mvm/rs-fw.c +++ b/mvm/rs-fw.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation */ #include "rs.h" #include "fw-api.h" @@ -9,9 +9,11 @@ #include "iwl-op-mode.h" #include "mvm.h" -static u8 rs_fw_bw_from_sta_bw(struct ieee80211_sta *sta) +static u8 rs_fw_bw_from_sta_bw(const struct ieee80211_link_sta *link_sta) { - switch (sta->bandwidth) { + switch (link_sta->bandwidth) { + case IEEE80211_STA_RX_BW_320: + return IWL_TLC_MNG_CH_WIDTH_320MHZ; case IEEE80211_STA_RX_BW_160: return IWL_TLC_MNG_CH_WIDTH_160MHZ; case IEEE80211_STA_RX_BW_80: @@ -36,11 +38,11 @@ static u8 rs_fw_set_active_chains(u8 chains) return fw_chains; } -static u8 rs_fw_sgi_cw_support(struct ieee80211_sta *sta) +static u8 rs_fw_sgi_cw_support(struct ieee80211_link_sta *link_sta) { - struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; - struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; - struct ieee80211_sta_he_cap *he_cap = &sta->he_cap; + struct ieee80211_sta_ht_cap *ht_cap = &link_sta->ht_cap; + struct ieee80211_sta_vht_cap *vht_cap = &link_sta->vht_cap; + struct ieee80211_sta_he_cap *he_cap = &link_sta->he_cap; u8 supp = 0; if (he_cap->has_he) @@ -59,12 +61,13 @@ static u8 rs_fw_sgi_cw_support(struct ieee80211_sta *sta) } static u16 rs_fw_get_config_flags(struct iwl_mvm *mvm, - struct ieee80211_sta *sta, - struct ieee80211_supported_band *sband) + struct ieee80211_vif *vif, + struct ieee80211_link_sta *link_sta, + const struct ieee80211_sta_he_cap *sband_he_cap) { - struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; - struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; - struct ieee80211_sta_he_cap *he_cap = &sta->he_cap; + struct ieee80211_sta_ht_cap *ht_cap = &link_sta->ht_cap; + struct ieee80211_sta_vht_cap *vht_cap = &link_sta->vht_cap; + struct ieee80211_sta_he_cap *he_cap = &link_sta->he_cap; bool vht_ena = vht_cap->vht_supported; u16 flags = 0; @@ -90,17 +93,17 @@ static u16 rs_fw_get_config_flags(struct iwl_mvm *mvm, IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)) flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK; - if (sband->iftype_data && sband->iftype_data->he_cap.has_he && - !(sband->iftype_data->he_cap.he_cap_elem.phy_cap_info[1] & - IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)) + if (sband_he_cap && + !(sband_he_cap->he_cap_elem.phy_cap_info[1] & + IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)) flags &= ~IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK; if (he_cap->has_he && (he_cap->he_cap_elem.phy_cap_info[3] & IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK && - sband->iftype_data && - sband->iftype_data->he_cap.he_cap_elem.phy_cap_info[3] & - IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK)) + sband_he_cap && + sband_he_cap->he_cap_elem.phy_cap_info[3] & + IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK)) flags |= IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK; return flags; @@ -130,20 +133,20 @@ int rs_fw_vht_highest_rx_mcs_index(const struct ieee80211_sta_vht_cap *vht_cap, } static void -rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta, +rs_fw_vht_set_enabled_rates(const struct ieee80211_link_sta *link_sta, const struct ieee80211_sta_vht_cap *vht_cap, struct iwl_tlc_config_cmd_v4 *cmd) { u16 supp; int i, highest_mcs; - u8 max_nss = sta->rx_nss; + u8 max_nss = link_sta->rx_nss; struct ieee80211_vht_cap ieee_vht_cap = { .vht_cap_info = cpu_to_le32(vht_cap->cap), .supp_mcs = vht_cap->vht_mcs, }; /* the station support only a single receive chain */ - if (sta->smps_mode == IEEE80211_SMPS_STATIC) + if (link_sta->smps_mode == IEEE80211_SMPS_STATIC) max_nss = 1; for (i = 0; i < max_nss && i < IWL_TLC_NSS_MAX; i++) { @@ -154,7 +157,7 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta, continue; supp = BIT(highest_mcs + 1) - 1; - if (sta->bandwidth == IEEE80211_STA_RX_BW_20) + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20) supp &= ~BIT(IWL_TLC_MNG_HT_RATE_MCS9); cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80] = cpu_to_le16(supp); @@ -163,7 +166,7 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta, * configuration is supported - only for MCS 0 since we already * decoded the MCS bits anyway ourselves. */ - if (sta->bandwidth == IEEE80211_STA_RX_BW_160 && + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160 && ieee80211_get_vht_max_nss(&ieee_vht_cap, IEEE80211_VHT_CHANWIDTH_160MHZ, 0, true, nss) >= nss) @@ -190,22 +193,20 @@ static u16 rs_fw_he_ieee80211_mcs_to_rs_mcs(u16 mcs) } static void -rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta, - struct ieee80211_supported_band *sband, +rs_fw_he_set_enabled_rates(const struct ieee80211_link_sta *link_sta, + const struct ieee80211_sta_he_cap *sband_he_cap, struct iwl_tlc_config_cmd_v4 *cmd) { - const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap; + const struct ieee80211_sta_he_cap *he_cap = &link_sta->he_cap; u16 mcs_160 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160); u16 mcs_80 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80); - u16 tx_mcs_80 = - le16_to_cpu(sband->iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_80); - u16 tx_mcs_160 = - le16_to_cpu(sband->iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_160); + u16 tx_mcs_80 = le16_to_cpu(sband_he_cap->he_mcs_nss_supp.tx_mcs_80); + u16 tx_mcs_160 = le16_to_cpu(sband_he_cap->he_mcs_nss_supp.tx_mcs_160); int i; - u8 nss = sta->rx_nss; + u8 nss = link_sta->rx_nss; /* the station support only a single receive chain */ - if (sta->smps_mode == IEEE80211_SMPS_STATIC) + if (link_sta->smps_mode == IEEE80211_SMPS_STATIC) nss = 1; for (i = 0; i < nss && i < IWL_TLC_NSS_MAX; i++) { @@ -238,19 +239,146 @@ rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta, } } -static void rs_fw_set_supp_rates(struct ieee80211_sta *sta, +static u8 rs_fw_eht_max_nss(u8 rx_nss, u8 tx_nss) +{ + u8 tx = u8_get_bits(tx_nss, IEEE80211_EHT_MCS_NSS_TX); + u8 rx = u8_get_bits(rx_nss, IEEE80211_EHT_MCS_NSS_RX); + /* the max nss that can be used, + * is the min with our tx capa and the peer rx capa. + */ + return min(tx, rx); +} + +#define MAX_NSS_MCS(mcs_num, rx, tx) \ + rs_fw_eht_max_nss((rx)->rx_tx_mcs ##mcs_num## _max_nss, \ + (tx)->rx_tx_mcs ##mcs_num## _max_nss) + +static void rs_fw_set_eht_mcs_nss(__le16 ht_rates[][3], + enum IWL_TLC_MCS_PER_BW bw, + u8 max_nss, u16 mcs_msk) +{ + if (max_nss >= 2) + ht_rates[IWL_TLC_NSS_2][bw] |= cpu_to_le16(mcs_msk); + + if (max_nss >= 1) + ht_rates[IWL_TLC_NSS_1][bw] |= cpu_to_le16(mcs_msk); +} + +static const +struct ieee80211_eht_mcs_nss_supp_bw * +rs_fw_rs_mcs2eht_mcs(enum IWL_TLC_MCS_PER_BW bw, + const struct ieee80211_eht_mcs_nss_supp *eht_mcs) +{ + switch (bw) { + case IWL_TLC_MCS_PER_BW_80: + return &eht_mcs->bw._80; + case IWL_TLC_MCS_PER_BW_160: + return &eht_mcs->bw._160; + case IWL_TLC_MCS_PER_BW_320: + return &eht_mcs->bw._320; + default: + return NULL; + } +} + +static void +rs_fw_eht_set_enabled_rates(struct ieee80211_vif *vif, + const struct ieee80211_link_sta *link_sta, + const struct ieee80211_sta_he_cap *sband_he_cap, + const struct ieee80211_sta_eht_cap *sband_eht_cap, + struct iwl_tlc_config_cmd_v4 *cmd) +{ + /* peer RX mcs capa */ + const struct ieee80211_eht_mcs_nss_supp *eht_rx_mcs = + &link_sta->eht_cap.eht_mcs_nss_supp; + /* our TX mcs capa */ + const struct ieee80211_eht_mcs_nss_supp *eht_tx_mcs = + &sband_eht_cap->eht_mcs_nss_supp; + + enum IWL_TLC_MCS_PER_BW bw; + struct ieee80211_eht_mcs_nss_supp_20mhz_only mcs_rx_20; + struct ieee80211_eht_mcs_nss_supp_20mhz_only mcs_tx_20; + + /* peer is 20Mhz only */ + if (vif->type == NL80211_IFTYPE_AP && + !(link_sta->he_cap.he_cap_elem.phy_cap_info[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL)) { + mcs_rx_20 = eht_rx_mcs->only_20mhz; + } else { + mcs_rx_20.rx_tx_mcs7_max_nss = eht_rx_mcs->bw._80.rx_tx_mcs9_max_nss; + mcs_rx_20.rx_tx_mcs9_max_nss = eht_rx_mcs->bw._80.rx_tx_mcs9_max_nss; + mcs_rx_20.rx_tx_mcs11_max_nss = eht_rx_mcs->bw._80.rx_tx_mcs11_max_nss; + mcs_rx_20.rx_tx_mcs13_max_nss = eht_rx_mcs->bw._80.rx_tx_mcs13_max_nss; + } + + /* nic is 20Mhz only */ + if (!(sband_he_cap->he_cap_elem.phy_cap_info[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL)) { + mcs_tx_20 = eht_tx_mcs->only_20mhz; + } else { + mcs_tx_20.rx_tx_mcs7_max_nss = eht_tx_mcs->bw._80.rx_tx_mcs9_max_nss; + mcs_tx_20.rx_tx_mcs9_max_nss = eht_tx_mcs->bw._80.rx_tx_mcs9_max_nss; + mcs_tx_20.rx_tx_mcs11_max_nss = eht_tx_mcs->bw._80.rx_tx_mcs11_max_nss; + mcs_tx_20.rx_tx_mcs13_max_nss = eht_tx_mcs->bw._80.rx_tx_mcs13_max_nss; + } + + /* rates for 20/40/80 bw */ + bw = IWL_TLC_MCS_PER_BW_80; + rs_fw_set_eht_mcs_nss(cmd->ht_rates, bw, + MAX_NSS_MCS(7, &mcs_rx_20, &mcs_tx_20), GENMASK(7, 0)); + rs_fw_set_eht_mcs_nss(cmd->ht_rates, bw, + MAX_NSS_MCS(9, &mcs_rx_20, &mcs_tx_20), GENMASK(9, 8)); + rs_fw_set_eht_mcs_nss(cmd->ht_rates, bw, + MAX_NSS_MCS(11, &mcs_rx_20, &mcs_tx_20), GENMASK(11, 10)); + rs_fw_set_eht_mcs_nss(cmd->ht_rates, bw, + MAX_NSS_MCS(13, &mcs_rx_20, &mcs_tx_20), GENMASK(13, 12)); + + /* rate for 160/320 bw */ + for (bw = IWL_TLC_MCS_PER_BW_160; bw <= IWL_TLC_MCS_PER_BW_320; bw++) { + const struct ieee80211_eht_mcs_nss_supp_bw *mcs_rx = + rs_fw_rs_mcs2eht_mcs(bw, eht_rx_mcs); + const struct ieee80211_eht_mcs_nss_supp_bw *mcs_tx = + rs_fw_rs_mcs2eht_mcs(bw, eht_tx_mcs); + + /* got unsupported index for bw */ + if (!mcs_rx || !mcs_tx) + continue; + + /* break out if we don't support the bandwidth */ + if (cmd->max_ch_width < (bw + IWL_TLC_MNG_CH_WIDTH_80MHZ)) + break; + + rs_fw_set_eht_mcs_nss(cmd->ht_rates, bw, + MAX_NSS_MCS(9, mcs_rx, mcs_tx), GENMASK(9, 0)); + rs_fw_set_eht_mcs_nss(cmd->ht_rates, bw, + MAX_NSS_MCS(11, mcs_rx, mcs_tx), GENMASK(11, 10)); + rs_fw_set_eht_mcs_nss(cmd->ht_rates, bw, + MAX_NSS_MCS(13, mcs_rx, mcs_tx), GENMASK(13, 12)); + } + + /* the station support only a single receive chain */ + if (link_sta->smps_mode == IEEE80211_SMPS_STATIC || + link_sta->rx_nss < 2) + memset(cmd->ht_rates[IWL_TLC_NSS_2], 0, + sizeof(cmd->ht_rates[IWL_TLC_NSS_2])); +} + +static void rs_fw_set_supp_rates(struct ieee80211_vif *vif, + struct ieee80211_link_sta *link_sta, struct ieee80211_supported_band *sband, + const struct ieee80211_sta_he_cap *sband_he_cap, + const struct ieee80211_sta_eht_cap *sband_eht_cap, struct iwl_tlc_config_cmd_v4 *cmd) { int i; u16 supp = 0; unsigned long tmp; /* must be unsigned long for for_each_set_bit */ - const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; - const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; - const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap; + const struct ieee80211_sta_ht_cap *ht_cap = &link_sta->ht_cap; + const struct ieee80211_sta_vht_cap *vht_cap = &link_sta->vht_cap; + const struct ieee80211_sta_he_cap *he_cap = &link_sta->he_cap; /* non HT rates */ - tmp = sta->supp_rates[sband->band]; + tmp = link_sta->supp_rates[sband->band]; for_each_set_bit(i, &tmp, BITS_PER_LONG) supp |= BIT(sband->bitrates[i].hw_value); @@ -258,19 +386,23 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta, cmd->mode = IWL_TLC_MNG_MODE_NON_HT; /* HT/VHT rates */ - if (he_cap->has_he) { + if (link_sta->eht_cap.has_eht && sband_he_cap && sband_eht_cap) { + cmd->mode = IWL_TLC_MNG_MODE_EHT; + rs_fw_eht_set_enabled_rates(vif, link_sta, sband_he_cap, + sband_eht_cap, cmd); + } else if (he_cap->has_he && sband_he_cap) { cmd->mode = IWL_TLC_MNG_MODE_HE; - rs_fw_he_set_enabled_rates(sta, sband, cmd); + rs_fw_he_set_enabled_rates(link_sta, sband_he_cap, cmd); } else if (vht_cap->vht_supported) { cmd->mode = IWL_TLC_MNG_MODE_VHT; - rs_fw_vht_set_enabled_rates(sta, vht_cap, cmd); + rs_fw_vht_set_enabled_rates(link_sta, vht_cap, cmd); } else if (ht_cap->ht_supported) { cmd->mode = IWL_TLC_MNG_MODE_HT; cmd->ht_rates[IWL_TLC_NSS_1][IWL_TLC_MCS_PER_BW_80] = cpu_to_le16(ht_cap->mcs.rx_mask[0]); /* the station support only a single receive chain */ - if (sta->smps_mode == IEEE80211_SMPS_STATIC) + if (link_sta->smps_mode == IEEE80211_SMPS_STATIC) cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_MCS_PER_BW_80] = 0; else @@ -285,15 +417,18 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_tlc_update_notif *notif; struct ieee80211_sta *sta; + struct ieee80211_link_sta *link_sta; struct iwl_mvm_sta *mvmsta; + struct iwl_mvm_link_sta *mvm_link_sta; struct iwl_lq_sta_rs_fw *lq_sta; u32 flags; rcu_read_lock(); notif = (void *)pkt->data; + link_sta = rcu_dereference(mvm->fw_id_to_link_sta[notif->sta_id]); sta = rcu_dereference(mvm->fw_id_to_mac_id[notif->sta_id]); - if (IS_ERR_OR_NULL(sta)) { + if (IS_ERR_OR_NULL(sta) || !link_sta) { /* can happen in remove station flow where mvm removed internally * the station before removing from FW */ @@ -313,7 +448,14 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, flags = le32_to_cpu(notif->flags); - lq_sta = &mvmsta->lq_sta.rs_fw; + mvm_link_sta = rcu_dereference(mvmsta->link[link_sta->link_id]); + if (!mvm_link_sta) { + IWL_DEBUG_RATE(mvm, + "Invalid mvmsta RCU pointer for link (%d) of sta id (%d) in TLC notification\n", + link_sta->link_id, notif->sta_id); + goto out; + } + lq_sta = &mvm_link_sta->lq_sta.rs_fw; if (flags & IWL_TLC_NOTIF_FLAG_RATE) { char pretty_rate[100]; @@ -336,34 +478,34 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, IWL_DEBUG_RATE(mvm, "new rate: %s\n", pretty_rate); } - if (flags & IWL_TLC_NOTIF_FLAG_AMSDU && !mvmsta->orig_amsdu_len) { + if (flags & IWL_TLC_NOTIF_FLAG_AMSDU && !mvm_link_sta->orig_amsdu_len) { u16 size = le32_to_cpu(notif->amsdu_size); int i; - if (sta->max_amsdu_len < size) { + if (link_sta->agg.max_amsdu_len < size) { /* - * In debug sta->max_amsdu_len < size + * In debug link_sta->agg.max_amsdu_len < size * so also check with orig_amsdu_len which holds the * original data before debugfs changed the value */ - WARN_ON(mvmsta->orig_amsdu_len < size); + WARN_ON(mvm_link_sta->orig_amsdu_len < size); goto out; } mvmsta->amsdu_enabled = le32_to_cpu(notif->amsdu_enabled); mvmsta->max_amsdu_len = size; - sta->max_rc_amsdu_len = mvmsta->max_amsdu_len; + link_sta->agg.max_rc_amsdu_len = mvmsta->max_amsdu_len; for (i = 0; i < IWL_MAX_TID_COUNT; i++) { if (mvmsta->amsdu_enabled & BIT(i)) - sta->max_tid_amsdu_len[i] = + link_sta->agg.max_tid_amsdu_len[i] = iwl_mvm_max_amsdu_size(mvm, sta, i); else /* * Not so elegant, but this will effectively * prevent AMSDU on this TID */ - sta->max_tid_amsdu_len[i] = 1; + link_sta->agg.max_tid_amsdu_len[i] = 1; } IWL_DEBUG_RATE(mvm, @@ -375,14 +517,19 @@ out: rcu_read_unlock(); } -u16 rs_fw_get_max_amsdu_len(struct ieee80211_sta *sta) +u16 rs_fw_get_max_amsdu_len(struct ieee80211_sta *sta, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_link_sta *link_sta) { - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; - const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + const struct ieee80211_sta_vht_cap *vht_cap = &link_sta->vht_cap; + const struct ieee80211_sta_ht_cap *ht_cap = &link_sta->ht_cap; + const struct ieee80211_sta_eht_cap *eht_cap = &link_sta->eht_cap; + + if (WARN_ON_ONCE(!link_conf->chandef.chan)) + return IEEE80211_MAX_MPDU_LEN_VHT_3895; - if (mvmsta->vif->bss_conf.chandef.chan->band == NL80211_BAND_6GHZ) { - switch (le16_get_bits(sta->he_6ghz_capa.capa, + if (link_conf->chandef.chan->band == NL80211_BAND_6GHZ) { + switch (le16_get_bits(link_sta->he_6ghz_capa.capa, IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN)) { case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454: return IEEE80211_MAX_MPDU_LEN_VHT_11454; @@ -391,8 +538,18 @@ u16 rs_fw_get_max_amsdu_len(struct ieee80211_sta *sta) default: return IEEE80211_MAX_MPDU_LEN_VHT_3895; } - } else - if (vht_cap->vht_supported) { + } else if (link_conf->chandef.chan->band == NL80211_BAND_2GHZ && + eht_cap->has_eht) { + switch (u8_get_bits(eht_cap->eht_cap_elem.mac_cap_info[0], + IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK)) { + case IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_11454: + return IEEE80211_MAX_MPDU_LEN_VHT_11454; + case IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_7991: + return IEEE80211_MAX_MPDU_LEN_VHT_7991; + default: + return IEEE80211_MAX_MPDU_LEN_VHT_3895; + } + } else if (vht_cap->vht_supported) { switch (vht_cap->cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK) { case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454: return IEEE80211_MAX_MPDU_LEN_VHT_11454; @@ -417,40 +574,79 @@ u16 rs_fw_get_max_amsdu_len(struct ieee80211_sta *sta) return 0; } -void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - enum nl80211_band band, bool update) +void iwl_mvm_rs_fw_rate_init(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_link_sta *link_sta, + enum nl80211_band band) { struct ieee80211_hw *hw = mvm->hw; struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw; u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, TLC_MNG_CONFIG_CMD); struct ieee80211_supported_band *sband = hw->wiphy->bands[band]; - u16 max_amsdu_len = rs_fw_get_max_amsdu_len(sta); + u16 max_amsdu_len = rs_fw_get_max_amsdu_len(sta, link_conf, link_sta); + const struct ieee80211_sta_he_cap *sband_he_cap = + ieee80211_get_he_iftype_cap_vif(sband, vif); + const struct ieee80211_sta_eht_cap *sband_eht_cap = + ieee80211_get_eht_iftype_cap_vif(sband, vif); + struct iwl_mvm_link_sta *mvm_link_sta; + struct iwl_lq_sta_rs_fw *lq_sta; struct iwl_tlc_config_cmd_v4 cfg_cmd = { - .sta_id = mvmsta->sta_id, - .max_ch_width = update ? - rs_fw_bw_from_sta_bw(sta) : RATE_MCS_CHAN_WIDTH_20, - .flags = cpu_to_le16(rs_fw_get_config_flags(mvm, sta, sband)), + .max_ch_width = mvmsta->authorized ? + rs_fw_bw_from_sta_bw(link_sta) : IWL_TLC_MNG_CH_WIDTH_20MHZ, + .flags = cpu_to_le16(rs_fw_get_config_flags(mvm, vif, link_sta, + sband_he_cap)), .chains = rs_fw_set_active_chains(iwl_mvm_get_valid_tx_ant(mvm)), - .sgi_ch_width_supp = rs_fw_sgi_cw_support(sta), + .sgi_ch_width_supp = rs_fw_sgi_cw_support(link_sta), .max_mpdu_len = iwl_mvm_is_csum_supported(mvm) ? cpu_to_le16(max_amsdu_len) : 0, }; - int ret; + unsigned int link_id = link_conf->link_id; int cmd_ver; + int ret; + + /* Enable external EHT LTF only for GL device and if there's + * mutual support by AP and client + */ + if (CSR_HW_REV_TYPE(mvm->trans->hw_rev) == IWL_CFG_MAC_TYPE_GL && + sband_eht_cap && + sband_eht_cap->eht_cap_elem.phy_cap_info[5] & + IEEE80211_EHT_PHY_CAP5_SUPP_EXTRA_EHT_LTF && + link_sta->eht_cap.has_eht && + link_sta->eht_cap.eht_cap_elem.phy_cap_info[5] & + IEEE80211_EHT_PHY_CAP5_SUPP_EXTRA_EHT_LTF) { + IWL_DEBUG_RATE(mvm, "Set support for Extra EHT LTF\n"); + cfg_cmd.flags |= + cpu_to_le16(IWL_TLC_MNG_CFG_FLAGS_EHT_EXTRA_LTF_MSK); + } + + rcu_read_lock(); + mvm_link_sta = rcu_dereference(mvmsta->link[link_id]); + if (WARN_ON_ONCE(!mvm_link_sta)) { + rcu_read_unlock(); + return; + } + cfg_cmd.sta_id = mvm_link_sta->sta_id; + + lq_sta = &mvm_link_sta->lq_sta.rs_fw; memset(lq_sta, 0, offsetof(typeof(*lq_sta), pers)); + rcu_read_unlock(); + #ifdef CONFIG_IWLWIFI_DEBUGFS iwl_mvm_reset_frame_stats(mvm); #endif - rs_fw_set_supp_rates(sta, sband, &cfg_cmd); + rs_fw_set_supp_rates(vif, link_sta, sband, + sband_he_cap, sband_eht_cap, + &cfg_cmd); /* * since TLC offload works with one mode we can assume * that only vht/ht is used and also set it as station max amsdu */ - sta->max_amsdu_len = max_amsdu_len; + sta->deflink.agg.max_amsdu_len = max_amsdu_len; cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP, @@ -514,16 +710,18 @@ int rs_fw_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, return 0; } -void iwl_mvm_rs_add_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta) +void iwl_mvm_rs_add_sta_link(struct iwl_mvm *mvm, + struct iwl_mvm_link_sta *link_sta) { - struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw; + struct iwl_lq_sta_rs_fw *lq_sta; - IWL_DEBUG_RATE(mvm, "create station rate scale window\n"); + lq_sta = &link_sta->lq_sta.rs_fw; lq_sta->pers.drv = mvm; - lq_sta->pers.sta_id = mvmsta->sta_id; + lq_sta->pers.sta_id = link_sta->sta_id; lq_sta->pers.chains = 0; - memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal)); + memset(lq_sta->pers.chain_signal, 0, + sizeof(lq_sta->pers.chain_signal)); lq_sta->pers.last_rssi = S8_MIN; lq_sta->last_rate_n_flags = 0; @@ -531,3 +729,20 @@ void iwl_mvm_rs_add_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta) lq_sta->pers.dbg_fixed_rate = 0; #endif } + +void iwl_mvm_rs_add_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta) +{ + unsigned int link_id; + + IWL_DEBUG_RATE(mvm, "create station rate scale window\n"); + + for (link_id = 0; link_id < ARRAY_SIZE(mvmsta->link); link_id++) { + struct iwl_mvm_link_sta *link = + rcu_dereference_protected(mvmsta->link[link_id], + lockdep_is_held(&mvm->mutex)); + if (!link) + continue; + + iwl_mvm_rs_add_sta_link(mvm, link); + } +} @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -190,7 +190,7 @@ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm, default: /* Expected in monitor (not having the keys) */ if (!mvm->monitor_on) - IWL_ERR(mvm, "Unhandled alg: 0x%x\n", rx_pkt_status); + IWL_WARN(mvm, "Unhandled alg: 0x%x\n", rx_pkt_status); } return 0; @@ -213,8 +213,12 @@ static void iwl_mvm_rx_handle_tcm(struct iwl_mvm *mvm, }; u16 thr; - if (ieee80211_is_data_qos(hdr->frame_control)) - ac = tid_to_mac80211_ac[ieee80211_get_tid(hdr)]; + if (ieee80211_is_data_qos(hdr->frame_control)) { + u8 tid = ieee80211_get_tid(hdr); + + if (tid < IWL_MAX_TID_COUNT) + ac = tid_to_mac80211_ac[tid]; + } mvmsta = iwl_mvm_sta_from_mac80211(sta); mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK; @@ -237,11 +241,11 @@ static void iwl_mvm_rx_handle_tcm(struct iwl_mvm *mvm, if (mdata->opened_rx_ba_sessions || mdata->uapsd_nonagg_detect.detected || - (!mvmvif->queue_params[IEEE80211_AC_VO].uapsd && - !mvmvif->queue_params[IEEE80211_AC_VI].uapsd && - !mvmvif->queue_params[IEEE80211_AC_BE].uapsd && - !mvmvif->queue_params[IEEE80211_AC_BK].uapsd) || - mvmsta->sta_id != mvmvif->ap_sta_id) + (!mvmvif->deflink.queue_params[IEEE80211_AC_VO].uapsd && + !mvmvif->deflink.queue_params[IEEE80211_AC_VI].uapsd && + !mvmvif->deflink.queue_params[IEEE80211_AC_BE].uapsd && + !mvmvif->deflink.queue_params[IEEE80211_AC_BK].uapsd) || + mvmsta->deflink.sta_id != mvmvif->deflink.ap_sta_id) return; if (rate_n_flags & RATE_MCS_HT_MSK_V1) { @@ -253,8 +257,7 @@ static void iwl_mvm_rx_handle_tcm(struct iwl_mvm *mvm, ARRAY_SIZE(thresh_tpt))) return; thr = thresh_tpt[rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK]; - thr *= 1 + ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> - RATE_VHT_MCS_NSS_POS); + thr *= 1 + FIELD_GET(RATE_MCS_NSS_MSK, rate_n_flags); } thr <<= ((rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK_V1) >> @@ -327,17 +330,6 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, rx_status = IEEE80211_SKB_RXCB(skb); /* - * drop the packet if it has failed being decrypted by HW - */ - if (iwl_mvm_set_mac80211_rx_flag(mvm, hdr, rx_status, rx_pkt_status, - &crypt_len)) { - IWL_DEBUG_DROP(mvm, "Bad decryption results 0x%08x\n", - rx_pkt_status); - kfree_skb(skb); - return; - } - - /* * Keep packets with CRC errors (and with overrun) for monitor mode * (otherwise the firmware discards them) but mark them as bad. */ @@ -388,6 +380,38 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, if (sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct ieee80211_vif *vif = mvmsta->vif; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + /* + * Don't even try to decrypt a MCAST frame that was received + * before the managed vif is authorized, we'd fail anyway. + */ + if (is_multicast_ether_addr(hdr->addr1) && + vif->type == NL80211_IFTYPE_STATION && + !mvmvif->authorized && + ieee80211_has_protected(hdr->frame_control)) { + IWL_DEBUG_DROP(mvm, "MCAST before the vif is authorized\n"); + kfree_skb(skb); + rcu_read_unlock(); + return; + } + } + + /* + * drop the packet if it has failed being decrypted by HW + */ + if (iwl_mvm_set_mac80211_rx_flag(mvm, hdr, rx_status, rx_pkt_status, + &crypt_len)) { + IWL_DEBUG_DROP(mvm, "Bad decryption results 0x%08x\n", + rx_pkt_status); + kfree_skb(skb); + rcu_read_unlock(); + return; + } + + if (sta) { + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct ieee80211_vif *tx_blocked_vif = rcu_dereference(mvm->csa_tx_blocked_vif); struct iwl_fw_dbg_trigger_tlv *trig; @@ -480,8 +504,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS; rx_status->nss = - ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> - RATE_VHT_MCS_NSS_POS) + 1; + FIELD_GET(RATE_MCS_NSS_MSK, rate_n_flags) + 1; rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK; rx_status->encoding = RX_ENC_VHT; rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; @@ -610,9 +633,9 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac, * data copied into the "data" struct, but rather the data from * the notification directly. */ - mvmvif->beacon_stats.num_beacons = + mvmvif->deflink.beacon_stats.num_beacons = le32_to_cpu(data->beacon_counter[vif_id]); - mvmvif->beacon_stats.avg_signal = + mvmvif->deflink.beacon_stats.avg_signal = -data->beacon_average_energy[vif_id]; if (mvmvif->id != id) @@ -625,8 +648,8 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac, * request to clear statistics */ if (le32_to_cpu(data->flags) & IWL_STATISTICS_REPLY_FLG_CLEAR) - mvmvif->beacon_stats.accu_num_beacons += - mvmvif->beacon_stats.num_beacons; + mvmvif->deflink.beacon_stats.accu_num_beacons += + mvmvif->deflink.beacon_stats.num_beacons; iwl_mvm_update_vif_sig(vif, sig); } @@ -648,17 +671,17 @@ static void iwl_mvm_stat_iterator_all_macs(void *_data, u8 *mac, mac_stats = &data->per_mac_stats[vif_id]; - mvmvif->beacon_stats.num_beacons = + mvmvif->deflink.beacon_stats.num_beacons = le32_to_cpu(mac_stats->beacon_counter); - mvmvif->beacon_stats.avg_signal = + mvmvif->deflink.beacon_stats.avg_signal = -le32_to_cpu(mac_stats->beacon_average_energy); /* make sure that beacon statistics don't go backwards with TCM * request to clear statistics */ if (le32_to_cpu(data->flags) & IWL_STATISTICS_REPLY_FLG_CLEAR) - mvmvif->beacon_stats.accu_num_beacons += - mvmvif->beacon_stats.num_beacons; + mvmvif->deflink.beacon_stats.accu_num_beacons += + mvmvif->deflink.beacon_stats.num_beacons; sig = -le32_to_cpu(mac_stats->beacon_filter_average_energy); iwl_mvm_update_vif_sig(vif, sig); @@ -694,14 +717,14 @@ static void iwl_mvm_stats_energy_iter(void *_data, { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); u8 *energy = _data; - u32 sta_id = mvmsta->sta_id; + u32 sta_id = mvmsta->deflink.sta_id; if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT_MAX, "sta_id %d >= %d", sta_id, IWL_MVM_STATION_COUNT_MAX)) return; if (energy[sta_id]) - mvmsta->avg_energy = energy[sta_id]; + mvmsta->deflink.avg_energy = energy[sta_id]; } diff --git a/mvm/rxmq.c b/mvm/rxmq.c index 2c43a9989783..8d1e44fd9de7 100644 --- a/mvm/rxmq.c +++ b/mvm/rxmq.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ @@ -9,38 +9,13 @@ #include "iwl-trans.h" #include "mvm.h" #include "fw-api.h" - -static void *iwl_mvm_skb_get_hdr(struct sk_buff *skb) -{ - struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); - u8 *data = skb->data; - - /* Alignment concerns */ - BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) % 4); - BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) % 4); - BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_lsig) % 4); - BUILD_BUG_ON(sizeof(struct ieee80211_vendor_radiotap) % 4); - - if (rx_status->flag & RX_FLAG_RADIOTAP_HE) - data += sizeof(struct ieee80211_radiotap_he); - if (rx_status->flag & RX_FLAG_RADIOTAP_HE_MU) - data += sizeof(struct ieee80211_radiotap_he_mu); - if (rx_status->flag & RX_FLAG_RADIOTAP_LSIG) - data += sizeof(struct ieee80211_radiotap_lsig); - if (rx_status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { - struct ieee80211_vendor_radiotap *radiotap = (void *)data; - - data += sizeof(*radiotap) + radiotap->len + radiotap->pad; - } - - return data; -} +#include "time-sync.h" static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb, int queue, struct ieee80211_sta *sta) { struct iwl_mvm_sta *mvmsta; - struct ieee80211_hdr *hdr = iwl_mvm_skb_get_hdr(skb); + struct ieee80211_hdr *hdr = (void *)skb_mac_header(skb); struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb); struct iwl_mvm_key_pn *ptk_pn; int res; @@ -131,28 +106,12 @@ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb, /* * For non monitor interface strip the bytes the RADA might not have - * removed. As monitor interface cannot exist with other interfaces - * this removal is safe. + * removed (it might be disabled, e.g. for mgmt frames). As a monitor + * interface cannot exist with other interfaces, this removal is safe + * and sufficient, in monitor mode there's no decryption being done. */ - if (mic_crc_len && !ieee80211_hw_check(mvm->hw, RX_INCLUDES_FCS)) { - u32 pkt_flags = le32_to_cpu(pkt->len_n_flags); - - /* - * If RADA was not enabled then decryption was not performed so - * the MIC cannot be removed. - */ - if (!(pkt_flags & FH_RSCSR_RADA_EN)) { - if (WARN_ON(crypt_len > mic_crc_len)) - return -EINVAL; - - mic_crc_len -= crypt_len; - } - - if (WARN_ON(mic_crc_len > len)) - return -EINVAL; - + if (len > mic_crc_len && !ieee80211_hw_check(mvm->hw, RX_INCLUDES_FCS)) len -= mic_crc_len; - } /* If frame is small enough to fit in skb->head, pull it completely. * If not, only pull ieee80211_hdr (including crypto if present, and @@ -179,6 +138,10 @@ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb, if (unlikely(headlen < hdrlen)) return -EINVAL; + /* Since data doesn't move data while putting data on skb and that is + * the only way we use, data + len is the next place that hdr would be put + */ + skb_set_mac_header(skb, skb->len); skb_put_data(skb, hdr, hdrlen); skb_put_data(skb, (u8 *)hdr + hdrlen + pad_len, headlen - hdrlen); @@ -193,8 +156,7 @@ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb, * Starting from Bz hardware, it calculates starting directly after * the MAC header, so that matches mac80211's expectation. */ - if (skb->ip_summed == CHECKSUM_COMPLETE && - mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ) { + if (skb->ip_summed == CHECKSUM_COMPLETE) { struct { u8 hdr[6]; __be16 type; @@ -209,7 +171,7 @@ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb, shdr->type != htons(ETH_P_PAE) && shdr->type != htons(ETH_P_TDLS)))) skb->ip_summed = CHECKSUM_NONE; - else + else if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ) /* mac80211 assumes full CSUM including SNAP header */ skb_postpush_rcsum(skb, shdr, sizeof(*shdr)); } @@ -227,49 +189,69 @@ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb, return 0; } +/* put a TLV on the skb and return data pointer + * + * Also pad to 4 the len and zero out all data part + */ +static void * +iwl_mvm_radiotap_put_tlv(struct sk_buff *skb, u16 type, u16 len) +{ + struct ieee80211_radiotap_tlv *tlv; + + tlv = skb_put(skb, sizeof(*tlv)); + tlv->type = cpu_to_le16(type); + tlv->len = cpu_to_le16(len); + return skb_put_zero(skb, ALIGN(len, 4)); +} + static void iwl_mvm_add_rtap_sniffer_config(struct iwl_mvm *mvm, struct sk_buff *skb) { struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); - struct ieee80211_vendor_radiotap *radiotap; - const int size = sizeof(*radiotap) + sizeof(__le16); + struct ieee80211_radiotap_vendor_content *radiotap; + const u16 vendor_data_len = sizeof(mvm->cur_aid); if (!mvm->cur_aid) return; - /* ensure alignment */ - BUILD_BUG_ON((size + 2) % 4); + radiotap = iwl_mvm_radiotap_put_tlv(skb, + IEEE80211_RADIOTAP_VENDOR_NAMESPACE, + sizeof(*radiotap) + vendor_data_len); - radiotap = skb_put(skb, size + 2); - radiotap->align = 1; /* Intel OUI */ radiotap->oui[0] = 0xf6; radiotap->oui[1] = 0x54; radiotap->oui[2] = 0x25; /* radiotap sniffer config sub-namespace */ - radiotap->subns = 1; - radiotap->present = 0x1; - radiotap->len = size - sizeof(*radiotap); - radiotap->pad = 2; + radiotap->oui_subtype = 1; + radiotap->vendor_type = 0; /* fill the data now */ memcpy(radiotap->data, &mvm->cur_aid, sizeof(mvm->cur_aid)); - /* and clear the padding */ - memset(radiotap->data + sizeof(__le16), 0, radiotap->pad); - rx_status->flag |= RX_FLAG_RADIOTAP_VENDOR_DATA; + rx_status->flag |= RX_FLAG_RADIOTAP_TLV_AT_END; } /* iwl_mvm_pass_packet_to_mac80211 - passes the packet for mac80211 */ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, struct napi_struct *napi, struct sk_buff *skb, int queue, - struct ieee80211_sta *sta) + struct ieee80211_sta *sta, + struct ieee80211_link_sta *link_sta) { - if (iwl_mvm_check_pn(mvm, skb, queue, sta)) + if (unlikely(iwl_mvm_check_pn(mvm, skb, queue, sta))) { kfree_skb(skb); - else - ieee80211_rx_napi(mvm->hw, sta, skb, napi); + return; + } + + if (sta && sta->valid_links && link_sta) { + struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); + + rx_status->link_valid = 1; + rx_status->link_id = link_sta->link_id; + } + + ieee80211_rx_napi(mvm->hw, sta, skb, napi); } static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, @@ -297,7 +279,8 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta, struct ieee80211_hdr *hdr, struct iwl_rx_mpdu_desc *desc, - u32 status) + u32 status, + struct ieee80211_rx_status *stats) { struct iwl_mvm_sta *mvmsta; struct iwl_mvm_vif *mvmvif; @@ -326,8 +309,10 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta, /* good cases */ if (likely(status & IWL_RX_MPDU_STATUS_MIC_OK && - !(status & IWL_RX_MPDU_STATUS_REPLAY_ERROR))) + !(status & IWL_RX_MPDU_STATUS_REPLAY_ERROR))) { + stats->flag |= RX_FLAG_DECRYPTED; return 0; + } if (!sta) return -1; @@ -396,7 +381,7 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_sta *sta, if (unlikely(ieee80211_is_mgmt(hdr->frame_control) && !ieee80211_has_protected(hdr->frame_control))) - return iwl_mvm_rx_mgmt_prot(sta, hdr, desc, status); + return iwl_mvm_rx_mgmt_prot(sta, hdr, desc, status, stats); if (!ieee80211_has_protected(hdr->frame_control) || (status & IWL_RX_MPDU_STATUS_SEC_MASK) == @@ -413,9 +398,7 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_sta *sta, if (!(status & IWL_RX_MPDU_STATUS_MIC_OK)) return -1; - stats->flag |= RX_FLAG_DECRYPTED; - if (pkt_flags & FH_RSCSR_RADA_EN) - stats->flag |= RX_FLAG_MIC_STRIPPED; + stats->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MIC_STRIPPED; *crypt_len = IEEE80211_CCMP_HDR_LEN; return 0; case IWL_RX_MPDU_STATUS_SEC_TKIP: @@ -465,7 +448,7 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_sta *sta, */ if (!is_multicast_ether_addr(hdr->addr1) && !mvm->monitor_on && net_ratelimit()) - IWL_ERR(mvm, "Unhandled alg: 0x%x\n", status); + IWL_WARN(mvm, "Unhandled alg: 0x%x\n", status); } return 0; @@ -504,7 +487,7 @@ static void iwl_mvm_rx_csum(struct iwl_mvm *mvm, } /* - * returns true if a packet is a duplicate and should be dropped. + * returns true if a packet is a duplicate or invalid tid and should be dropped. * Updates AMSDU PN tracking info */ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue, @@ -533,11 +516,14 @@ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue, return false; } - if (ieee80211_is_data_qos(hdr->frame_control)) + if (ieee80211_is_data_qos(hdr->frame_control)) { /* frame has qos control */ tid = ieee80211_get_tid(hdr); - else + if (tid >= IWL_MAX_TID_COUNT) + return true; + } else { tid = IWL_MAX_TID_COUNT; + } /* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */ sub_frame_idx = desc->amsdu_info & @@ -642,7 +628,7 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm, while ((skb = __skb_dequeue(skb_list))) { iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, reorder_buf->queue, - sta); + sta, NULL /* FIXME */); reorder_buf->num_stored--; } } @@ -707,10 +693,15 @@ void iwl_mvm_reorder_timer_expired(struct timer_list *t) if (expired) { struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; - u8 sta_id = baid_data->sta_id; + u8 sta_id = ffs(baid_data->sta_mask) - 1; rcu_read_lock(); sta = rcu_dereference(buf->mvm->fw_id_to_mac_id[sta_id]); + if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { + rcu_read_unlock(); + goto out; + } + mvmsta = iwl_mvm_sta_from_mac80211(sta); /* SN is set to the last expired frame + 1 */ @@ -732,6 +723,8 @@ void iwl_mvm_reorder_timer_expired(struct timer_list *t) entries[index].e.reorder_time + 1 + RX_REORDER_BUF_TIMEOUT_MQ); } + +out: spin_unlock(&buf->lock); } @@ -742,6 +735,7 @@ static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue, struct ieee80211_sta *sta; struct iwl_mvm_reorder_buffer *reorder_buf; u8 baid = data->baid; + u32 sta_id; if (WARN_ONCE(baid >= IWL_MAX_BAID, "invalid BAID: %x\n", baid)) return; @@ -752,7 +746,9 @@ static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue, if (WARN_ON_ONCE(!ba_data)) goto out; - sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]); + /* pick any STA ID to find the pointer */ + sta_id = ffs(ba_data->sta_mask) - 1; + sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) goto out; @@ -779,6 +775,7 @@ static void iwl_mvm_release_frames_from_notif(struct iwl_mvm *mvm, struct ieee80211_sta *sta; struct iwl_mvm_reorder_buffer *reorder_buf; struct iwl_mvm_baid_data *ba_data; + u32 sta_id; IWL_DEBUG_HT(mvm, "Frame release notification for BAID %u, NSSN %d\n", baid, nssn); @@ -796,7 +793,9 @@ static void iwl_mvm_release_frames_from_notif(struct iwl_mvm *mvm, goto out; } - sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]); + /* pick any STA ID to find the pointer */ + sta_id = ffs(ba_data->sta_mask) - 1; + sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) goto out; @@ -936,8 +935,7 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, struct iwl_rx_mpdu_desc *desc) { struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); - struct ieee80211_hdr *hdr = iwl_mvm_skb_get_hdr(skb); - struct iwl_mvm_sta *mvm_sta; + struct ieee80211_hdr *hdr = (void *)skb_mac_header(skb); struct iwl_mvm_baid_data *baid_data; struct iwl_mvm_reorder_buffer *buffer; struct sk_buff *tail; @@ -949,6 +947,7 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, u8 sub_frame_idx = desc->amsdu_info & IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK; struct iwl_mvm_reorder_buf_entry *entries; + u32 sta_mask; int index; u16 nssn, sn; u8 baid; @@ -971,8 +970,6 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, "Got valid BAID without a valid station assigned\n")) return false; - mvm_sta = iwl_mvm_sta_from_mac80211(sta); - /* not a data packet or a bar */ if (!ieee80211_is_back_req(hdr->frame_control) && (!ieee80211_is_data_qos(hdr->frame_control) || @@ -990,10 +987,16 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, return false; } - if (WARN(tid != baid_data->tid || mvm_sta->sta_id != baid_data->sta_id, - "baid 0x%x is mapped to sta:%d tid:%d, but was received for sta:%d tid:%d\n", - baid, baid_data->sta_id, baid_data->tid, mvm_sta->sta_id, - tid)) + rcu_read_lock(); + sta_mask = iwl_mvm_sta_fw_id_mask(mvm, sta, -1); + rcu_read_unlock(); + + if (IWL_FW_CHECK(mvm, + tid != baid_data->tid || + !(sta_mask & baid_data->sta_mask), + "baid 0x%x is mapped to sta_mask:0x%x tid:%d, but was received for sta_mask:0x%x tid:%d\n", + baid, baid_data->sta_mask, baid_data->tid, + sta_mask, tid)) return false; nssn = reorder & IWL_RX_MPDU_REORDER_NSSN_MASK; @@ -1189,18 +1192,27 @@ static void iwl_mvm_flip_address(u8 *addr) struct iwl_mvm_rx_phy_data { enum iwl_rx_phy_info_type info_type; - __le32 d0, d1, d2, d3; + __le32 d0, d1, d2, d3, eht_d4, d5; __le16 d4; + bool with_data; + bool first_subframe; + __le32 rx_vec[4]; + + u32 rate_n_flags; + u32 gp2_on_air_rise; + u16 phy_info; + u8 energy_a, energy_b; + u8 channel; }; static void iwl_mvm_decode_he_mu_ext(struct iwl_mvm *mvm, struct iwl_mvm_rx_phy_data *phy_data, - u32 rate_n_flags, struct ieee80211_radiotap_he_mu *he_mu) { u32 phy_data2 = le32_to_cpu(phy_data->d2); u32 phy_data3 = le32_to_cpu(phy_data->d3); u16 phy_data4 = le16_to_cpu(phy_data->d4); + u32 rate_n_flags = phy_data->rate_n_flags; if (FIELD_GET(IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CRC_OK, phy_data4)) { he_mu->flags1 |= @@ -1246,7 +1258,6 @@ static void iwl_mvm_decode_he_mu_ext(struct iwl_mvm *mvm, static void iwl_mvm_decode_he_phy_ru_alloc(struct iwl_mvm_rx_phy_data *phy_data, - u32 rate_n_flags, struct ieee80211_radiotap_he *he, struct ieee80211_radiotap_he_mu *he_mu, struct ieee80211_rx_status *rx_status) @@ -1260,6 +1271,7 @@ iwl_mvm_decode_he_phy_ru_alloc(struct iwl_mvm_rx_phy_data *phy_data, * the TSF/timers are not be transmitted in HE-MU. */ u8 ru = le32_get_bits(phy_data->d1, IWL_RX_PHY_DATA1_HE_RU_ALLOC_MASK); + u32 rate_n_flags = phy_data->rate_n_flags; u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK_V1; u8 offs = 0; @@ -1331,7 +1343,7 @@ static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm, struct ieee80211_radiotap_he *he, struct ieee80211_radiotap_he_mu *he_mu, struct ieee80211_rx_status *rx_status, - u32 rate_n_flags, int queue) + int queue) { switch (phy_data->info_type) { case IWL_RX_PHY_INFO_TYPE_NONE: @@ -1340,6 +1352,10 @@ static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm, case IWL_RX_PHY_INFO_TYPE_HT: case IWL_RX_PHY_INFO_TYPE_VHT_SU: case IWL_RX_PHY_INFO_TYPE_VHT_MU: + case IWL_RX_PHY_INFO_TYPE_EHT_MU: + case IWL_RX_PHY_INFO_TYPE_EHT_TB: + case IWL_RX_PHY_INFO_TYPE_EHT_MU_EXT: + case IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT: return; case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT: he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN | @@ -1430,7 +1446,7 @@ static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm, le16_encode_bits(le16_get_bits(phy_data->d4, IWL_RX_PHY_DATA4_HE_MU_EXT_PREAMBLE_PUNC_TYPE_MASK), IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW); - iwl_mvm_decode_he_mu_ext(mvm, phy_data, rate_n_flags, he_mu); + iwl_mvm_decode_he_mu_ext(mvm, phy_data, he_mu); fallthrough; case IWL_RX_PHY_INFO_TYPE_HE_MU: he_mu->flags2 |= @@ -1444,8 +1460,7 @@ static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm, fallthrough; case IWL_RX_PHY_INFO_TYPE_HE_TB: case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT: - iwl_mvm_decode_he_phy_ru_alloc(phy_data, rate_n_flags, - he, he_mu, rx_status); + iwl_mvm_decode_he_phy_ru_alloc(phy_data, he, he_mu, rx_status); break; case IWL_RX_PHY_INFO_TYPE_HE_SU: he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN); @@ -1459,15 +1474,547 @@ static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm, } } +#define LE32_DEC_ENC(value, dec_bits, enc_bits) \ + le32_encode_bits(le32_get_bits(value, dec_bits), enc_bits) + +#define IWL_MVM_ENC_USIG_VALUE_MASK(usig, in_value, dec_bits, enc_bits) do { \ + typeof(enc_bits) _enc_bits = enc_bits; \ + typeof(usig) _usig = usig; \ + (_usig)->mask |= cpu_to_le32(_enc_bits); \ + (_usig)->value |= LE32_DEC_ENC(in_value, dec_bits, _enc_bits); \ +} while (0) + +#define __IWL_MVM_ENC_EHT_RU(rt_data, rt_ru, fw_data, fw_ru) \ + eht->data[(rt_data)] |= \ + (cpu_to_le32 \ + (IEEE80211_RADIOTAP_EHT_DATA ## rt_data ## _RU_ALLOC_CC_ ## rt_ru ## _KNOWN) | \ + LE32_DEC_ENC(data ## fw_data, \ + IWL_RX_PHY_DATA ## fw_data ## _EHT_MU_EXT_RU_ALLOC_ ## fw_ru, \ + IEEE80211_RADIOTAP_EHT_DATA ## rt_data ## _RU_ALLOC_CC_ ## rt_ru)) + +#define _IWL_MVM_ENC_EHT_RU(rt_data, rt_ru, fw_data, fw_ru) \ + __IWL_MVM_ENC_EHT_RU(rt_data, rt_ru, fw_data, fw_ru) + +#define IEEE80211_RADIOTAP_RU_DATA_1_1_1 1 +#define IEEE80211_RADIOTAP_RU_DATA_2_1_1 2 +#define IEEE80211_RADIOTAP_RU_DATA_1_1_2 2 +#define IEEE80211_RADIOTAP_RU_DATA_2_1_2 2 +#define IEEE80211_RADIOTAP_RU_DATA_1_2_1 3 +#define IEEE80211_RADIOTAP_RU_DATA_2_2_1 3 +#define IEEE80211_RADIOTAP_RU_DATA_1_2_2 3 +#define IEEE80211_RADIOTAP_RU_DATA_2_2_2 4 + +#define IWL_RX_RU_DATA_A1 2 +#define IWL_RX_RU_DATA_A2 2 +#define IWL_RX_RU_DATA_B1 2 +#define IWL_RX_RU_DATA_B2 3 +#define IWL_RX_RU_DATA_C1 3 +#define IWL_RX_RU_DATA_C2 3 +#define IWL_RX_RU_DATA_D1 4 +#define IWL_RX_RU_DATA_D2 4 + +#define IWL_MVM_ENC_EHT_RU(rt_ru, fw_ru) \ + _IWL_MVM_ENC_EHT_RU(IEEE80211_RADIOTAP_RU_DATA_ ## rt_ru, \ + rt_ru, \ + IWL_RX_RU_DATA_ ## fw_ru, \ + fw_ru) + +static void iwl_mvm_decode_eht_ext_mu(struct iwl_mvm *mvm, + struct iwl_mvm_rx_phy_data *phy_data, + struct ieee80211_rx_status *rx_status, + struct ieee80211_radiotap_eht *eht, + struct ieee80211_radiotap_eht_usig *usig) +{ + if (phy_data->with_data) { + __le32 data1 = phy_data->d1; + __le32 data2 = phy_data->d2; + __le32 data3 = phy_data->d3; + __le32 data4 = phy_data->eht_d4; + __le32 data5 = phy_data->d5; + u32 phy_bw = phy_data->rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK; + + IWL_MVM_ENC_USIG_VALUE_MASK(usig, data5, + IWL_RX_PHY_DATA5_EHT_TYPE_AND_COMP, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B0_B1_PPDU_TYPE); + IWL_MVM_ENC_USIG_VALUE_MASK(usig, data5, + IWL_RX_PHY_DATA5_EHT_MU_PUNC_CH_CODE, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B3_B7_PUNCTURED_INFO); + IWL_MVM_ENC_USIG_VALUE_MASK(usig, data4, + IWL_RX_PHY_DATA4_EHT_MU_EXT_SIGB_MCS, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B9_B10_SIG_MCS); + IWL_MVM_ENC_USIG_VALUE_MASK + (usig, data1, IWL_RX_PHY_DATA1_EHT_MU_NUM_SIG_SYM_USIGA2, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B11_B15_EHT_SIG_SYMBOLS); + + eht->user_info[0] |= + cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID_KNOWN) | + LE32_DEC_ENC(data5, IWL_RX_PHY_DATA5_EHT_MU_STA_ID_USR, + IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID); + + eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_NR_NON_OFDMA_USERS_M); + eht->data[7] |= LE32_DEC_ENC + (data5, IWL_RX_PHY_DATA5_EHT_MU_NUM_USR_NON_OFDMA, + IEEE80211_RADIOTAP_EHT_DATA7_NUM_OF_NON_OFDMA_USERS); + + /* + * Hardware labels the content channels/RU allocation values + * as follows: + * Content Channel 1 Content Channel 2 + * 20 MHz: A1 + * 40 MHz: A1 B1 + * 80 MHz: A1 C1 B1 D1 + * 160 MHz: A1 C1 A2 C2 B1 D1 B2 D2 + * 320 MHz: A1 C1 A2 C2 A3 C3 A4 C4 B1 D1 B2 D2 B3 D3 B4 D4 + * + * However firmware can only give us A1-D2, so the higher + * frequencies are missing. + */ + + switch (phy_bw) { + case RATE_MCS_CHAN_WIDTH_320: + /* additional values are missing in RX metadata */ + case RATE_MCS_CHAN_WIDTH_160: + /* content channel 1 */ + IWL_MVM_ENC_EHT_RU(1_2_1, A2); + IWL_MVM_ENC_EHT_RU(1_2_2, C2); + /* content channel 2 */ + IWL_MVM_ENC_EHT_RU(2_2_1, B2); + IWL_MVM_ENC_EHT_RU(2_2_2, D2); + fallthrough; + case RATE_MCS_CHAN_WIDTH_80: + /* content channel 1 */ + IWL_MVM_ENC_EHT_RU(1_1_2, C1); + /* content channel 2 */ + IWL_MVM_ENC_EHT_RU(2_1_2, D1); + fallthrough; + case RATE_MCS_CHAN_WIDTH_40: + /* content channel 2 */ + IWL_MVM_ENC_EHT_RU(2_1_1, B1); + fallthrough; + case RATE_MCS_CHAN_WIDTH_20: + IWL_MVM_ENC_EHT_RU(1_1_1, A1); + break; + } + } else { + __le32 usig_a1 = phy_data->rx_vec[0]; + __le32 usig_a2 = phy_data->rx_vec[1]; + + IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a1, + IWL_RX_USIG_A1_DISREGARD, + IEEE80211_RADIOTAP_EHT_USIG1_MU_B20_B24_DISREGARD); + IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a1, + IWL_RX_USIG_A1_VALIDATE, + IEEE80211_RADIOTAP_EHT_USIG1_MU_B25_VALIDATE); + IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2, + IWL_RX_USIG_A2_EHT_PPDU_TYPE, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B0_B1_PPDU_TYPE); + IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2, + IWL_RX_USIG_A2_EHT_USIG2_VALIDATE_B2, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B2_VALIDATE); + IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2, + IWL_RX_USIG_A2_EHT_PUNC_CHANNEL, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B3_B7_PUNCTURED_INFO); + IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2, + IWL_RX_USIG_A2_EHT_USIG2_VALIDATE_B8, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B8_VALIDATE); + IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2, + IWL_RX_USIG_A2_EHT_SIG_MCS, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B9_B10_SIG_MCS); + IWL_MVM_ENC_USIG_VALUE_MASK + (usig, usig_a2, IWL_RX_USIG_A2_EHT_SIG_SYM_NUM, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B11_B15_EHT_SIG_SYMBOLS); + IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2, + IWL_RX_USIG_A2_EHT_CRC_OK, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B16_B19_CRC); + } +} + +static void iwl_mvm_decode_eht_ext_tb(struct iwl_mvm *mvm, + struct iwl_mvm_rx_phy_data *phy_data, + struct ieee80211_rx_status *rx_status, + struct ieee80211_radiotap_eht *eht, + struct ieee80211_radiotap_eht_usig *usig) +{ + if (phy_data->with_data) { + __le32 data5 = phy_data->d5; + + IWL_MVM_ENC_USIG_VALUE_MASK(usig, data5, + IWL_RX_PHY_DATA5_EHT_TYPE_AND_COMP, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B0_B1_PPDU_TYPE); + IWL_MVM_ENC_USIG_VALUE_MASK(usig, data5, + IWL_RX_PHY_DATA5_EHT_TB_SPATIAL_REUSE1, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B3_B6_SPATIAL_REUSE_1); + + IWL_MVM_ENC_USIG_VALUE_MASK(usig, data5, + IWL_RX_PHY_DATA5_EHT_TB_SPATIAL_REUSE2, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B7_B10_SPATIAL_REUSE_2); + } else { + __le32 usig_a1 = phy_data->rx_vec[0]; + __le32 usig_a2 = phy_data->rx_vec[1]; + + IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a1, + IWL_RX_USIG_A1_DISREGARD, + IEEE80211_RADIOTAP_EHT_USIG1_TB_B20_B25_DISREGARD); + IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2, + IWL_RX_USIG_A2_EHT_PPDU_TYPE, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B0_B1_PPDU_TYPE); + IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2, + IWL_RX_USIG_A2_EHT_USIG2_VALIDATE_B2, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B2_VALIDATE); + IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2, + IWL_RX_USIG_A2_EHT_TRIG_SPATIAL_REUSE_1, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B3_B6_SPATIAL_REUSE_1); + IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2, + IWL_RX_USIG_A2_EHT_TRIG_SPATIAL_REUSE_2, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B7_B10_SPATIAL_REUSE_2); + IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2, + IWL_RX_USIG_A2_EHT_TRIG_USIG2_DISREGARD, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B11_B15_DISREGARD); + IWL_MVM_ENC_USIG_VALUE_MASK(usig, usig_a2, + IWL_RX_USIG_A2_EHT_CRC_OK, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B16_B19_CRC); + } +} + +static void iwl_mvm_decode_eht_ru(struct iwl_mvm *mvm, + struct ieee80211_rx_status *rx_status, + struct ieee80211_radiotap_eht *eht) +{ + u32 ru = le32_get_bits(eht->data[8], + IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_B7_B1); + enum nl80211_eht_ru_alloc nl_ru; + + /* Using D1.5 Table 9-53a - Encoding of PS160 and RU Allocation subfields + * in an EHT variant User Info field + */ + + switch (ru) { + case 0 ... 36: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_26; + break; + case 37 ... 52: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_52; + break; + case 53 ... 60: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_106; + break; + case 61 ... 64: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_242; + break; + case 65 ... 66: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_484; + break; + case 67: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_996; + break; + case 68: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_2x996; + break; + case 69: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_4x996; + break; + case 70 ... 81: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_52P26; + break; + case 82 ... 89: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_106P26; + break; + case 90 ... 93: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_484P242; + break; + case 94 ... 95: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_996P484; + break; + case 96 ... 99: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_996P484P242; + break; + case 100 ... 103: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_2x996P484; + break; + case 104: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_3x996; + break; + case 105 ... 106: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_3x996P484; + break; + default: + return; + } + + rx_status->bw = RATE_INFO_BW_EHT_RU; + rx_status->eht.ru = nl_ru; +} + +static void iwl_mvm_decode_eht_phy_data(struct iwl_mvm *mvm, + struct iwl_mvm_rx_phy_data *phy_data, + struct ieee80211_rx_status *rx_status, + struct ieee80211_radiotap_eht *eht, + struct ieee80211_radiotap_eht_usig *usig) + +{ + __le32 data0 = phy_data->d0; + __le32 data1 = phy_data->d1; + __le32 usig_a1 = phy_data->rx_vec[0]; + u8 info_type = phy_data->info_type; + + /* Not in EHT range */ + if (info_type < IWL_RX_PHY_INFO_TYPE_EHT_MU || + info_type > IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT) + return; + + usig->common |= cpu_to_le32 + (IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL_KNOWN | + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR_KNOWN); + if (phy_data->with_data) { + usig->common |= LE32_DEC_ENC(data0, + IWL_RX_PHY_DATA0_EHT_UPLINK, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL); + usig->common |= LE32_DEC_ENC(data0, + IWL_RX_PHY_DATA0_EHT_BSS_COLOR_MASK, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR); + } else { + usig->common |= LE32_DEC_ENC(usig_a1, + IWL_RX_USIG_A1_UL_FLAG, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL); + usig->common |= LE32_DEC_ENC(usig_a1, + IWL_RX_USIG_A1_BSS_COLOR, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR); + } + + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_SNIFF_VALIDATE_SUPPORT)) { + usig->common |= + cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_VALIDATE_BITS_CHECKED); + usig->common |= + LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_VALIDATE, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_VALIDATE_BITS_OK); + } + + eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_SPATIAL_REUSE); + eht->data[0] |= LE32_DEC_ENC(data0, + IWL_RX_PHY_DATA0_ETH_SPATIAL_REUSE_MASK, + IEEE80211_RADIOTAP_EHT_DATA0_SPATIAL_REUSE); + + /* All RU allocating size/index is in TB format */ + eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_RU_ALLOC_TB_FMT); + eht->data[8] |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_PS160, + IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_PS_160); + eht->data[8] |= LE32_DEC_ENC(data1, IWL_RX_PHY_DATA1_EHT_RU_ALLOC_B0, + IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_B0); + eht->data[8] |= LE32_DEC_ENC(data1, IWL_RX_PHY_DATA1_EHT_RU_ALLOC_B1_B7, + IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_B7_B1); + + iwl_mvm_decode_eht_ru(mvm, rx_status, eht); + + /* We only get here in case of IWL_RX_MPDU_PHY_TSF_OVERLOAD is set + * which is on only in case of monitor mode so no need to check monitor + * mode + */ + eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_PRIMARY_80); + eht->data[1] |= + le32_encode_bits(mvm->monitor_p80, + IEEE80211_RADIOTAP_EHT_DATA1_PRIMARY_80); + + usig->common |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP_KNOWN); + if (phy_data->with_data) + usig->common |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_TXOP_DUR_MASK, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP); + else + usig->common |= LE32_DEC_ENC(usig_a1, IWL_RX_USIG_A1_TXOP_DURATION, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP); + + eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_LDPC_EXTRA_SYM_OM); + eht->data[0] |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_LDPC_EXT_SYM, + IEEE80211_RADIOTAP_EHT_DATA0_LDPC_EXTRA_SYM_OM); + + eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_PRE_PADD_FACOR_OM); + eht->data[0] |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_PRE_FEC_PAD_MASK, + IEEE80211_RADIOTAP_EHT_DATA0_PRE_PADD_FACOR_OM); + + eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_PE_DISAMBIGUITY_OM); + eht->data[0] |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_PE_DISAMBIG, + IEEE80211_RADIOTAP_EHT_DATA0_PE_DISAMBIGUITY_OM); + + /* TODO: what about IWL_RX_PHY_DATA0_EHT_BW320_SLOT */ + + if (!le32_get_bits(data0, IWL_RX_PHY_DATA0_EHT_SIGA_CRC_OK)) + usig->common |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_BAD_USIG_CRC); + + usig->common |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER_KNOWN); + usig->common |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_PHY_VER, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER); + + /* + * TODO: what about TB - IWL_RX_PHY_DATA1_EHT_TB_PILOT_TYPE, + * IWL_RX_PHY_DATA1_EHT_TB_LOW_SS + */ + + eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_EHT_LTF); + eht->data[0] |= LE32_DEC_ENC(data1, IWL_RX_PHY_DATA1_EHT_SIG_LTF_NUM, + IEEE80211_RADIOTAP_EHT_DATA0_EHT_LTF); + + if (info_type == IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT || + info_type == IWL_RX_PHY_INFO_TYPE_EHT_TB) + iwl_mvm_decode_eht_ext_tb(mvm, phy_data, rx_status, eht, usig); + + if (info_type == IWL_RX_PHY_INFO_TYPE_EHT_MU_EXT || + info_type == IWL_RX_PHY_INFO_TYPE_EHT_MU) + iwl_mvm_decode_eht_ext_mu(mvm, phy_data, rx_status, eht, usig); +} + +static void iwl_mvm_rx_eht(struct iwl_mvm *mvm, struct sk_buff *skb, + struct iwl_mvm_rx_phy_data *phy_data, + int queue) +{ + struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); + + struct ieee80211_radiotap_eht *eht; + struct ieee80211_radiotap_eht_usig *usig; + size_t eht_len = sizeof(*eht); + + u32 rate_n_flags = phy_data->rate_n_flags; + u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK; + /* EHT and HE have the same valus for LTF */ + u8 ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN; + u16 phy_info = phy_data->phy_info; + u32 bw; + + /* u32 for 1 user_info */ + if (phy_data->with_data) + eht_len += sizeof(u32); + + eht = iwl_mvm_radiotap_put_tlv(skb, IEEE80211_RADIOTAP_EHT, eht_len); + + usig = iwl_mvm_radiotap_put_tlv(skb, IEEE80211_RADIOTAP_EHT_USIG, + sizeof(*usig)); + rx_status->flag |= RX_FLAG_RADIOTAP_TLV_AT_END; + usig->common |= + cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_KNOWN); + + /* specific handling for 320MHz */ + bw = FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK, rate_n_flags); + if (bw == RATE_MCS_CHAN_WIDTH_320_VAL) + bw += FIELD_GET(IWL_RX_PHY_DATA0_EHT_BW320_SLOT, + le32_to_cpu(phy_data->d0)); + + usig->common |= cpu_to_le32 + (FIELD_PREP(IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW, bw)); + + /* report the AMPDU-EOF bit on single frames */ + if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) { + rx_status->flag |= RX_FLAG_AMPDU_DETAILS; + rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN; + if (phy_data->d0 & cpu_to_le32(IWL_RX_PHY_DATA0_EHT_DELIM_EOF)) + rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT; + } + + /* update aggregation data for monitor sake on default queue */ + if (!queue && (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) && + (phy_info & IWL_RX_MPDU_PHY_AMPDU) && phy_data->first_subframe) { + rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN; + if (phy_data->d0 & cpu_to_le32(IWL_RX_PHY_DATA0_EHT_DELIM_EOF)) + rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT; + } + + if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) + iwl_mvm_decode_eht_phy_data(mvm, phy_data, rx_status, eht, usig); + +#define CHECK_TYPE(F) \ + BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F != \ + (RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS)) + + CHECK_TYPE(SU); + CHECK_TYPE(EXT_SU); + CHECK_TYPE(MU); + CHECK_TYPE(TRIG); + + switch (FIELD_GET(RATE_MCS_HE_GI_LTF_MSK, rate_n_flags)) { + case 0: + if (he_type == RATE_MCS_HE_TYPE_TRIG) { + rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_1_6; + ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_1X; + } else { + rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_0_8; + ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X; + } + break; + case 1: + rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_1_6; + ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X; + break; + case 2: + ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X; + if (he_type == RATE_MCS_HE_TYPE_TRIG) + rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_3_2; + else + rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_0_8; + break; + case 3: + if (he_type != RATE_MCS_HE_TYPE_TRIG) { + ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X; + rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_3_2; + } + break; + default: + /* nothing here */ + break; + } + + if (ltf != IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN) { + eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_GI); + eht->data[0] |= cpu_to_le32 + (FIELD_PREP(IEEE80211_RADIOTAP_EHT_DATA0_LTF, + ltf) | + FIELD_PREP(IEEE80211_RADIOTAP_EHT_DATA0_GI, + rx_status->eht.gi)); + } + + + if (!phy_data->with_data) { + eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_NSS_S | + IEEE80211_RADIOTAP_EHT_KNOWN_BEAMFORMED_S); + eht->data[7] |= + le32_encode_bits(le32_get_bits(phy_data->rx_vec[2], + RX_NO_DATA_RX_VEC2_EHT_NSTS_MSK), + IEEE80211_RADIOTAP_EHT_DATA7_NSS_S); + if (rate_n_flags & RATE_MCS_BF_MSK) + eht->data[7] |= + cpu_to_le32(IEEE80211_RADIOTAP_EHT_DATA7_BEAMFORMED_S); + } else { + eht->user_info[0] |= + cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_MCS_KNOWN | + IEEE80211_RADIOTAP_EHT_USER_INFO_CODING_KNOWN | + IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_KNOWN_O | + IEEE80211_RADIOTAP_EHT_USER_INFO_BEAMFORMING_KNOWN_O | + IEEE80211_RADIOTAP_EHT_USER_INFO_DATA_FOR_USER); + + if (rate_n_flags & RATE_MCS_BF_MSK) + eht->user_info[0] |= + cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_BEAMFORMING_O); + + if (rate_n_flags & RATE_MCS_LDPC_MSK) + eht->user_info[0] |= + cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_CODING); + + eht->user_info[0] |= cpu_to_le32 + (FIELD_PREP(IEEE80211_RADIOTAP_EHT_USER_INFO_MCS, + FIELD_GET(RATE_VHT_MCS_RATE_CODE_MSK, + rate_n_flags)) | + FIELD_PREP(IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_O, + FIELD_GET(RATE_MCS_NSS_MSK, rate_n_flags))); + } +} + static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb, struct iwl_mvm_rx_phy_data *phy_data, - u32 rate_n_flags, u16 phy_info, int queue) + int queue) { struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); struct ieee80211_radiotap_he *he = NULL; struct ieee80211_radiotap_he_mu *he_mu = NULL; + u32 rate_n_flags = phy_data->rate_n_flags; u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK; - u8 stbc, ltf; + u8 ltf; static const struct ieee80211_radiotap_he known = { .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN | IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN | @@ -1484,6 +2031,7 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb, .flags2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN | IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN), }; + u16 phy_info = phy_data->phy_info; he = skb_put_data(skb, &known, sizeof(known)); rx_status->flag |= RX_FLAG_RADIOTAP_HE; @@ -1504,19 +2052,14 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb, if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) iwl_mvm_decode_he_phy_data(mvm, phy_data, he, he_mu, rx_status, - rate_n_flags, queue); + queue); /* update aggregation data for monitor sake on default queue */ if (!queue && (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) && - (phy_info & IWL_RX_MPDU_PHY_AMPDU)) { - bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE; - - /* toggle is switched whenever new aggregation starts */ - if (toggle_bit != mvm->ampdu_toggle) { - rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN; - if (phy_data->d0 & cpu_to_le32(IWL_RX_PHY_DATA0_HE_DELIM_EOF)) - rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT; - } + (phy_info & IWL_RX_MPDU_PHY_AMPDU) && phy_data->first_subframe) { + rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN; + if (phy_data->d0 & cpu_to_le32(IWL_RX_PHY_DATA0_EHT_DELIM_EOF)) + rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT; } if (he_type == RATE_MCS_HE_TYPE_EXT_SU && @@ -1531,19 +2074,6 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb, he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN); - stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS; - rx_status->nss = - ((rate_n_flags & RATE_MCS_NSS_MSK) >> - RATE_MCS_NSS_POS) + 1; - rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK; - rx_status->encoding = RX_ENC_HE; - rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; - if (rate_n_flags & RATE_MCS_BF_MSK) - rx_status->enc_flags |= RX_ENC_FLAG_BF; - - rx_status->he_dcm = - !!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK); - #define CHECK_TYPE(F) \ BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F != \ (RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS)) @@ -1617,6 +2147,10 @@ static void iwl_mvm_decode_lsig(struct sk_buff *skb, case IWL_RX_PHY_INFO_TYPE_HE_MU: case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT: case IWL_RX_PHY_INFO_TYPE_HE_TB: + case IWL_RX_PHY_INFO_TYPE_EHT_MU: + case IWL_RX_PHY_INFO_TYPE_EHT_TB: + case IWL_RX_PHY_INFO_TYPE_EHT_MU_EXT: + case IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT: lsig = skb_put(skb, sizeof(*lsig)); lsig->data1 = cpu_to_le16(IEEE80211_RADIOTAP_LSIG_DATA1_LENGTH_KNOWN); lsig->data2 = le16_encode_bits(le32_get_bits(phy_data->d1, @@ -1661,6 +2195,121 @@ static void iwl_mvm_rx_get_sta_block_tx(void *data, struct ieee80211_sta *sta) rx_sta_csa->all_sta_unblocked = false; } +/* + * Note: requires also rx_status->band to be prefilled, as well + * as phy_data (apart from phy_data->info_type) + */ +static void iwl_mvm_rx_fill_status(struct iwl_mvm *mvm, + struct sk_buff *skb, + struct iwl_mvm_rx_phy_data *phy_data, + int queue) +{ + struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); + u32 rate_n_flags = phy_data->rate_n_flags; + u8 stbc = u32_get_bits(rate_n_flags, RATE_MCS_STBC_MSK); + u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK; + bool is_sgi; + + phy_data->info_type = IWL_RX_PHY_INFO_TYPE_NONE; + + if (phy_data->phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) + phy_data->info_type = + le32_get_bits(phy_data->d1, + IWL_RX_PHY_DATA1_INFO_TYPE_MASK); + + /* This may be overridden by iwl_mvm_rx_he() to HE_RU */ + switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { + case RATE_MCS_CHAN_WIDTH_20: + break; + case RATE_MCS_CHAN_WIDTH_40: + rx_status->bw = RATE_INFO_BW_40; + break; + case RATE_MCS_CHAN_WIDTH_80: + rx_status->bw = RATE_INFO_BW_80; + break; + case RATE_MCS_CHAN_WIDTH_160: + rx_status->bw = RATE_INFO_BW_160; + break; + case RATE_MCS_CHAN_WIDTH_320: + rx_status->bw = RATE_INFO_BW_320; + break; + } + + /* must be before L-SIG data */ + if (format == RATE_MCS_HE_MSK) + iwl_mvm_rx_he(mvm, skb, phy_data, queue); + + iwl_mvm_decode_lsig(skb, phy_data); + + rx_status->device_timestamp = phy_data->gp2_on_air_rise; + rx_status->freq = ieee80211_channel_to_frequency(phy_data->channel, + rx_status->band); + iwl_mvm_get_signal_strength(mvm, rx_status, rate_n_flags, + phy_data->energy_a, phy_data->energy_b); + + /* using TLV format and must be after all fixed len fields */ + if (format == RATE_MCS_EHT_MSK) + iwl_mvm_rx_eht(mvm, skb, phy_data, queue); + + if (unlikely(mvm->monitor_on)) + iwl_mvm_add_rtap_sniffer_config(mvm, skb); + + is_sgi = format == RATE_MCS_HE_MSK ? + iwl_he_is_sgi(rate_n_flags) : + rate_n_flags & RATE_MCS_SGI_MSK; + + if (!(format == RATE_MCS_CCK_MSK) && is_sgi) + rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; + + if (rate_n_flags & RATE_MCS_LDPC_MSK) + rx_status->enc_flags |= RX_ENC_FLAG_LDPC; + + switch (format) { + case RATE_MCS_VHT_MSK: + rx_status->encoding = RX_ENC_VHT; + break; + case RATE_MCS_HE_MSK: + rx_status->encoding = RX_ENC_HE; + rx_status->he_dcm = + !!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK); + break; + case RATE_MCS_EHT_MSK: + rx_status->encoding = RX_ENC_EHT; + break; + } + + switch (format) { + case RATE_MCS_HT_MSK: + rx_status->encoding = RX_ENC_HT; + rx_status->rate_idx = RATE_HT_MCS_INDEX(rate_n_flags); + rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; + break; + case RATE_MCS_VHT_MSK: + case RATE_MCS_HE_MSK: + case RATE_MCS_EHT_MSK: + rx_status->nss = + u32_get_bits(rate_n_flags, RATE_MCS_NSS_MSK) + 1; + rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK; + rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; + break; + default: { + int rate = iwl_mvm_legacy_hw_idx_to_mac80211_idx(rate_n_flags, + rx_status->band); + + rx_status->rate_idx = rate; + + if ((rate < 0 || rate > 0xFF)) { + rx_status->rate_idx = 0; + if (net_ratelimit()) + IWL_ERR(mvm, "Invalid rate flags 0x%x, band %d,\n", + rate_n_flags, rx_status->band); + } + + break; + } + } +} + void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue) { @@ -1670,17 +2319,13 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, struct ieee80211_hdr *hdr; u32 len; u32 pkt_len = iwl_rx_packet_payload_len(pkt); - u32 rate_n_flags, gp2_on_air_rise; - u16 phy_info; struct ieee80211_sta *sta = NULL; + struct ieee80211_link_sta *link_sta = NULL; struct sk_buff *skb; - u8 crypt_len = 0, channel, energy_a, energy_b; + u8 crypt_len = 0; size_t desc_size; - struct iwl_mvm_rx_phy_data phy_data = { - .info_type = IWL_RX_PHY_INFO_TYPE_NONE, - }; + struct iwl_mvm_rx_phy_data phy_data = {}; u32 format; - bool is_sgi; if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))) return; @@ -1696,35 +2341,39 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, } if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { - rate_n_flags = le32_to_cpu(desc->v3.rate_n_flags); - channel = desc->v3.channel; - gp2_on_air_rise = le32_to_cpu(desc->v3.gp2_on_air_rise); - energy_a = desc->v3.energy_a; - energy_b = desc->v3.energy_b; + phy_data.rate_n_flags = le32_to_cpu(desc->v3.rate_n_flags); + phy_data.channel = desc->v3.channel; + phy_data.gp2_on_air_rise = le32_to_cpu(desc->v3.gp2_on_air_rise); + phy_data.energy_a = desc->v3.energy_a; + phy_data.energy_b = desc->v3.energy_b; phy_data.d0 = desc->v3.phy_data0; phy_data.d1 = desc->v3.phy_data1; phy_data.d2 = desc->v3.phy_data2; phy_data.d3 = desc->v3.phy_data3; + phy_data.eht_d4 = desc->phy_eht_data4; + phy_data.d5 = desc->v3.phy_data5; } else { - rate_n_flags = le32_to_cpu(desc->v1.rate_n_flags); - channel = desc->v1.channel; - gp2_on_air_rise = le32_to_cpu(desc->v1.gp2_on_air_rise); - energy_a = desc->v1.energy_a; - energy_b = desc->v1.energy_b; + phy_data.rate_n_flags = le32_to_cpu(desc->v1.rate_n_flags); + phy_data.channel = desc->v1.channel; + phy_data.gp2_on_air_rise = le32_to_cpu(desc->v1.gp2_on_air_rise); + phy_data.energy_a = desc->v1.energy_a; + phy_data.energy_b = desc->v1.energy_b; phy_data.d0 = desc->v1.phy_data0; phy_data.d1 = desc->v1.phy_data1; phy_data.d2 = desc->v1.phy_data2; phy_data.d3 = desc->v1.phy_data3; } + if (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, REPLY_RX_MPDU_CMD, 0) < 4) { - rate_n_flags = iwl_new_rate_from_v1(rate_n_flags); + phy_data.rate_n_flags = iwl_new_rate_from_v1(phy_data.rate_n_flags); IWL_DEBUG_DROP(mvm, "Got old format rate, converting. New rate: 0x%x\n", - rate_n_flags); + phy_data.rate_n_flags); } - format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK; + + format = phy_data.rate_n_flags & RATE_MCS_MOD_TYPE_MSK; len = le16_to_cpu(desc->mpdu_len); @@ -1733,14 +2382,10 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, return; } - phy_info = le16_to_cpu(desc->phy_info); + phy_data.with_data = true; + phy_data.phy_info = le16_to_cpu(desc->phy_info); phy_data.d4 = desc->phy_data4; - if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) - phy_data.info_type = - le32_get_bits(phy_data.d1, - IWL_RX_PHY_DATA1_INFO_TYPE_MASK); - hdr = (void *)(pkt->data + desc_size); /* Dont use dev_alloc_skb(), we'll have enough headroom once * ieee80211_hdr pulled. @@ -1763,27 +2408,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rx_status = IEEE80211_SKB_RXCB(skb); - /* This may be overridden by iwl_mvm_rx_he() to HE_RU */ - switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { - case RATE_MCS_CHAN_WIDTH_20: - break; - case RATE_MCS_CHAN_WIDTH_40: - rx_status->bw = RATE_INFO_BW_40; - break; - case RATE_MCS_CHAN_WIDTH_80: - rx_status->bw = RATE_INFO_BW_80; - break; - case RATE_MCS_CHAN_WIDTH_160: - rx_status->bw = RATE_INFO_BW_160; - break; - } - - if (format == RATE_MCS_HE_MSK) - iwl_mvm_rx_he(mvm, skb, &phy_data, rate_n_flags, - phy_info, queue); - - iwl_mvm_decode_lsig(skb, &phy_data); - /* * Keep packets with CRC errors (and with overrun) for monitor mode * (otherwise the firmware discards them) but mark them as bad. @@ -1794,12 +2418,13 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, le32_to_cpu(desc->status)); rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; } + /* set the preamble flag if appropriate */ if (format == RATE_MCS_CCK_MSK && - phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE) + phy_data.phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE) rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE; - if (likely(!(phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) { + if (likely(!(phy_data.phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) { u64 tsf_on_air_rise; if (mvm->trans->trans_cfg->device_family >= @@ -1813,24 +2438,20 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rx_status->flag |= RX_FLAG_MACTIME_PLCP_START; } - rx_status->device_timestamp = gp2_on_air_rise; if (iwl_mvm_is_band_in_rx_supported(mvm)) { u8 band = BAND_IN_RX_STATUS(desc->mac_phy_idx); rx_status->band = iwl_mvm_nl80211_band_from_rx_msdu(band); } else { - rx_status->band = channel > 14 ? NL80211_BAND_5GHZ : + rx_status->band = phy_data.channel > 14 ? NL80211_BAND_5GHZ : NL80211_BAND_2GHZ; } - rx_status->freq = ieee80211_channel_to_frequency(channel, - rx_status->band); - iwl_mvm_get_signal_strength(mvm, rx_status, rate_n_flags, energy_a, - energy_b); /* update aggregation data for monitor sake on default queue */ - if (!queue && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) { - bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE; + if (!queue && (phy_data.phy_info & IWL_RX_MPDU_PHY_AMPDU)) { + bool toggle_bit; + toggle_bit = phy_data.phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE; rx_status->flag |= RX_FLAG_AMPDU_DETAILS; /* * Toggle is switched whenever new aggregation starts. Make @@ -1842,13 +2463,11 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, if (mvm->ampdu_ref == 0) mvm->ampdu_ref++; mvm->ampdu_toggle = toggle_bit; + phy_data.first_subframe = true; } rx_status->ampdu_reference = mvm->ampdu_ref; } - if (unlikely(mvm->monitor_on)) - iwl_mvm_add_rtap_sniffer_config(mvm, skb); - rcu_read_lock(); if (desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) { @@ -1858,6 +2477,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, sta = rcu_dereference(mvm->fw_id_to_mac_id[id]); if (IS_ERR(sta)) sta = NULL; + link_sta = rcu_dereference(mvm->fw_id_to_link_sta[id]); } } else if (!is_multicast_ether_addr(hdr->addr2)) { /* @@ -1867,13 +2487,15 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL); } - if (iwl_mvm_rx_crypto(mvm, sta, hdr, rx_status, phy_info, desc, + if (iwl_mvm_rx_crypto(mvm, sta, hdr, rx_status, phy_data.phy_info, desc, le32_to_cpu(pkt->len_n_flags), queue, &crypt_len)) { kfree_skb(skb); goto out; } + iwl_mvm_rx_fill_status(mvm, skb, &phy_data, queue); + if (sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct ieee80211_vif *tx_blocked_vif = @@ -1914,7 +2536,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL); /* Unblock BCAST / MCAST station */ iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false); - cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork); + cancel_delayed_work(&mvm->cs_tx_unblock_dwork); } } @@ -1971,43 +2593,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, } } - is_sgi = format == RATE_MCS_HE_MSK ? - iwl_he_is_sgi(rate_n_flags) : - rate_n_flags & RATE_MCS_SGI_MSK; - - if (!(format == RATE_MCS_CCK_MSK) && is_sgi) - rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; - if (rate_n_flags & RATE_MCS_LDPC_MSK) - rx_status->enc_flags |= RX_ENC_FLAG_LDPC; - if (format == RATE_MCS_HT_MSK) { - u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> - RATE_MCS_STBC_POS; - rx_status->encoding = RX_ENC_HT; - rx_status->rate_idx = RATE_HT_MCS_INDEX(rate_n_flags); - rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; - } else if (format == RATE_MCS_VHT_MSK) { - u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> - RATE_MCS_STBC_POS; - rx_status->nss = ((rate_n_flags & RATE_MCS_NSS_MSK) >> - RATE_MCS_NSS_POS) + 1; - rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK; - rx_status->encoding = RX_ENC_VHT; - rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; - if (rate_n_flags & RATE_MCS_BF_MSK) - rx_status->enc_flags |= RX_ENC_FLAG_BF; - } else if (!(format == RATE_MCS_HE_MSK)) { - int rate = iwl_mvm_legacy_hw_idx_to_mac80211_idx(rate_n_flags, - rx_status->band); - - if (WARN(rate < 0 || rate > 0xFF, - "Invalid rate flags 0x%x, band %d,\n", - rate_n_flags, rx_status->band)) { - kfree_skb(skb); - goto out; - } - rx_status->rate_idx = rate; - } - /* management stuff on default queue */ if (!queue) { if (unlikely((ieee80211_is_beacon(hdr->frame_control) || @@ -2026,9 +2611,11 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, goto out; } - if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc)) - iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, - sta); + if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc) && + likely(!iwl_mvm_time_sync_frame(mvm, skb, hdr->addr2)) && + likely(!iwl_mvm_mei_filter_scan(mvm, skb))) + iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta, + link_sta); out: rcu_read_unlock(); } @@ -2038,43 +2625,58 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi, { struct ieee80211_rx_status *rx_status; struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_rx_no_data *desc = (void *)pkt->data; - u32 rate_n_flags = le32_to_cpu(desc->rate); - u32 gp2_on_air_rise = le32_to_cpu(desc->on_air_rise_time); - u32 rssi = le32_to_cpu(desc->rssi); - u32 info_type = le32_to_cpu(desc->info) & RX_NO_DATA_INFO_TYPE_MSK; - u16 phy_info = IWL_RX_MPDU_PHY_TSF_OVERLOAD; + struct iwl_rx_no_data_ver_3 *desc = (void *)pkt->data; + u32 rssi; + u32 info_type; struct ieee80211_sta *sta = NULL; struct sk_buff *skb; - u8 channel, energy_a, energy_b; + struct iwl_mvm_rx_phy_data phy_data; u32 format; - struct iwl_mvm_rx_phy_data phy_data = { - .info_type = le32_get_bits(desc->phy_info[1], - IWL_RX_PHY_DATA1_INFO_TYPE_MASK), - .d0 = desc->phy_info[0], - .d1 = desc->phy_info[1], - }; - bool is_sgi; + + if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))) + return; + + if (unlikely(iwl_rx_packet_payload_len(pkt) < sizeof(struct iwl_rx_no_data))) + return; + + rssi = le32_to_cpu(desc->rssi); + info_type = le32_to_cpu(desc->info) & RX_NO_DATA_INFO_TYPE_MSK; + phy_data.d0 = desc->phy_info[0]; + phy_data.d1 = desc->phy_info[1]; + phy_data.phy_info = IWL_RX_MPDU_PHY_TSF_OVERLOAD; + phy_data.gp2_on_air_rise = le32_to_cpu(desc->on_air_rise_time); + phy_data.rate_n_flags = le32_to_cpu(desc->rate); + phy_data.energy_a = u32_get_bits(rssi, RX_NO_DATA_CHAIN_A_MSK); + phy_data.energy_b = u32_get_bits(rssi, RX_NO_DATA_CHAIN_B_MSK); + phy_data.channel = u32_get_bits(rssi, RX_NO_DATA_CHANNEL_MSK); + phy_data.with_data = false; + phy_data.rx_vec[0] = desc->rx_vec[0]; + phy_data.rx_vec[1] = desc->rx_vec[1]; if (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP, RX_NO_DATA_NOTIF, 0) < 2) { IWL_DEBUG_DROP(mvm, "Got an old rate format. Old rate: 0x%x\n", - rate_n_flags); - rate_n_flags = iwl_new_rate_from_v1(rate_n_flags); + phy_data.rate_n_flags); + phy_data.rate_n_flags = iwl_new_rate_from_v1(phy_data.rate_n_flags); IWL_DEBUG_DROP(mvm, " Rate after conversion to the new format: 0x%x\n", - rate_n_flags); + phy_data.rate_n_flags); } - format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK; - - if (unlikely(iwl_rx_packet_payload_len(pkt) < sizeof(*desc))) - return; - if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))) - return; + format = phy_data.rate_n_flags & RATE_MCS_MOD_TYPE_MSK; - energy_a = (rssi & RX_NO_DATA_CHAIN_A_MSK) >> RX_NO_DATA_CHAIN_A_POS; - energy_b = (rssi & RX_NO_DATA_CHAIN_B_MSK) >> RX_NO_DATA_CHAIN_B_POS; - channel = (rssi & RX_NO_DATA_CHANNEL_MSK) >> RX_NO_DATA_CHANNEL_POS; + if (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP, + RX_NO_DATA_NOTIF, 0) >= 3) { + if (unlikely(iwl_rx_packet_payload_len(pkt) < + sizeof(struct iwl_rx_no_data_ver_3))) + /* invalid len for ver 3 */ + return; + phy_data.rx_vec[2] = desc->rx_vec[2]; + phy_data.rx_vec[3] = desc->rx_vec[3]; + } else { + if (format == RATE_MCS_EHT_MSK) + /* no support for EHT before version 3 API */ + return; + } /* Dont use dev_alloc_skb(), we'll have enough headroom once * ieee80211_hdr pulled. @@ -2096,7 +2698,7 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi, IEEE80211_RADIOTAP_ZERO_LEN_PSDU_SOUNDING; break; case RX_NO_DATA_INFO_TYPE_MU_UNMATCHED: - case RX_NO_DATA_INFO_TYPE_HE_TB_UNMATCHED: + case RX_NO_DATA_INFO_TYPE_TB_UNMATCHED: rx_status->zero_length_psdu_type = IEEE80211_RADIOTAP_ZERO_LEN_PSDU_NOT_CAPTURED; break; @@ -2106,86 +2708,42 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi, break; } - /* This may be overridden by iwl_mvm_rx_he() to HE_RU */ - switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { - case RATE_MCS_CHAN_WIDTH_20: - break; - case RATE_MCS_CHAN_WIDTH_40: - rx_status->bw = RATE_INFO_BW_40; - break; - case RATE_MCS_CHAN_WIDTH_80: - rx_status->bw = RATE_INFO_BW_80; - break; - case RATE_MCS_CHAN_WIDTH_160: - rx_status->bw = RATE_INFO_BW_160; - break; - } - - if (format == RATE_MCS_HE_MSK) - iwl_mvm_rx_he(mvm, skb, &phy_data, rate_n_flags, - phy_info, queue); - - iwl_mvm_decode_lsig(skb, &phy_data); - - rx_status->device_timestamp = gp2_on_air_rise; - rx_status->band = channel > 14 ? NL80211_BAND_5GHZ : + rx_status->band = phy_data.channel > 14 ? NL80211_BAND_5GHZ : NL80211_BAND_2GHZ; - rx_status->freq = ieee80211_channel_to_frequency(channel, - rx_status->band); - iwl_mvm_get_signal_strength(mvm, rx_status, rate_n_flags, energy_a, - energy_b); - rcu_read_lock(); + iwl_mvm_rx_fill_status(mvm, skb, &phy_data, queue); - is_sgi = format == RATE_MCS_HE_MSK ? - iwl_he_is_sgi(rate_n_flags) : - rate_n_flags & RATE_MCS_SGI_MSK; + /* no more radio tap info should be put after this point. + * + * We mark it as mac header, for upper layers to know where + * all radio tap header ends. + */ + skb_reset_mac_header(skb); - if (!(format == RATE_MCS_CCK_MSK) && is_sgi) - rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; - if (rate_n_flags & RATE_MCS_LDPC_MSK) - rx_status->enc_flags |= RX_ENC_FLAG_LDPC; - if (format == RATE_MCS_HT_MSK) { - u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> - RATE_MCS_STBC_POS; - rx_status->encoding = RX_ENC_HT; - rx_status->rate_idx = RATE_HT_MCS_INDEX(rate_n_flags); - rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; - } else if (format == RATE_MCS_VHT_MSK) { - u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> - RATE_MCS_STBC_POS; - rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK; - rx_status->encoding = RX_ENC_VHT; - rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; - if (rate_n_flags & RATE_MCS_BF_MSK) - rx_status->enc_flags |= RX_ENC_FLAG_BF; - /* - * take the nss from the rx_vec since the rate_n_flags has - * only 2 bits for the nss which gives a max of 4 ss but - * there may be up to 8 spatial streams - */ + /* + * Override the nss from the rx_vec since the rate_n_flags has + * only 2 bits for the nss which gives a max of 4 ss but there + * may be up to 8 spatial streams. + */ + switch (format) { + case RATE_MCS_VHT_MSK: rx_status->nss = le32_get_bits(desc->rx_vec[0], RX_NO_DATA_RX_VEC0_VHT_NSTS_MSK) + 1; - } else if (format == RATE_MCS_HE_MSK) { + break; + case RATE_MCS_HE_MSK: rx_status->nss = le32_get_bits(desc->rx_vec[0], RX_NO_DATA_RX_VEC0_HE_NSTS_MSK) + 1; - } else { - int rate = iwl_mvm_legacy_hw_idx_to_mac80211_idx(rate_n_flags, - rx_status->band); - - if (WARN(rate < 0 || rate > 0xFF, - "Invalid rate flags 0x%x, band %d,\n", - rate_n_flags, rx_status->band)) { - kfree_skb(skb); - goto out; - } - rx_status->rate_idx = rate; + break; + case RATE_MCS_EHT_MSK: + rx_status->nss = + le32_get_bits(desc->rx_vec[2], + RX_NO_DATA_RX_VEC2_EHT_NSTS_MSK) + 1; } + rcu_read_lock(); ieee80211_rx_napi(mvm->hw, sta, skb, napi); -out: rcu_read_unlock(); } @@ -2234,9 +2792,10 @@ void iwl_mvm_rx_bar_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, goto out; } - if (WARN(tid != baid_data->tid || sta_id != baid_data->sta_id, - "baid 0x%x is mapped to sta:%d tid:%d, but BAR release received for sta:%d tid:%d\n", - baid, baid_data->sta_id, baid_data->tid, sta_id, + if (WARN(tid != baid_data->tid || sta_id > IWL_MVM_STATION_COUNT_MAX || + !(baid_data->sta_mask & BIT(sta_id)), + "baid 0x%x is mapped to sta_mask:0x%x tid:%d, but BAR release received for sta:%d tid:%d\n", + baid, baid_data->sta_mask, baid_data->tid, sta_id, tid)) goto out; diff --git a/mvm/scan.c b/mvm/scan.c index a4077053e374..c1d9ce753468 100644 --- a/mvm/scan.c +++ b/mvm/scan.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -45,6 +45,9 @@ /* minimal number of 2GHz and 5GHz channels in the regular scan request */ #define IWL_MVM_6GHZ_PASSIVE_SCAN_MIN_CHANS 4 +/* Number of iterations on the channel for mei filtered scan */ +#define IWL_MEI_SCAN_NUM_ITER 5U + struct iwl_mvm_scan_timing_params { u32 suspend_time; u32 max_out_time; @@ -98,6 +101,7 @@ struct iwl_mvm_scan_params { bool scan_6ghz; bool enable_6ghz_passive; bool respect_p2p_go, respect_p2p_go_hb; + u8 bssid[ETH_ALEN] __aligned(2); }; static inline void *iwl_mvm_get_scan_req_umac_data(struct iwl_mvm *mvm) @@ -193,8 +197,9 @@ static void iwl_mvm_scan_iterator(void *_data, u8 *mac, struct iwl_mvm_scan_iter_data *data = _data; struct iwl_mvm_vif *curr_mvmvif; - if (vif->type != NL80211_IFTYPE_P2P_DEVICE && mvmvif->phy_ctxt && - mvmvif->phy_ctxt->id < NUM_PHY_CTX) + if (vif->type != NL80211_IFTYPE_P2P_DEVICE && + mvmvif->deflink.phy_ctxt && + mvmvif->deflink.phy_ctxt->id < NUM_PHY_CTX) data->global_cnt += 1; if (!data->current_vif || vif == data->current_vif) @@ -203,8 +208,8 @@ static void iwl_mvm_scan_iterator(void *_data, u8 *mac, curr_mvmvif = iwl_mvm_vif_from_mac80211(data->current_vif); if (vif->type == NL80211_IFTYPE_AP && vif->p2p && - mvmvif->phy_ctxt && curr_mvmvif->phy_ctxt && - mvmvif->phy_ctxt->id != curr_mvmvif->phy_ctxt->id) + mvmvif->deflink.phy_ctxt && curr_mvmvif->deflink.phy_ctxt && + mvmvif->deflink.phy_ctxt->id != curr_mvmvif->deflink.phy_ctxt->id) data->is_dcm_with_p2p_go = true; } @@ -239,8 +244,9 @@ iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm, * set all scan requests as fast-balance scan */ if (vif && vif->type == NL80211_IFTYPE_STATION && - vif->bss_conf.dtim_period < 220 && - data.is_dcm_with_p2p_go) + data.is_dcm_with_p2p_go && + ((vif->bss_conf.beacon_int * + vif->bss_conf.dtim_period) < 220)) return IWL_SCAN_TYPE_FAST_BALANCE; } @@ -641,7 +647,7 @@ static void iwl_mvm_scan_fill_tx_cmd(struct iwl_mvm *mvm, NL80211_BAND_2GHZ, no_cck); - if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) { + if (!iwl_mvm_has_new_station_api(mvm->fw)) { tx_cmd[0].sta_id = mvm->aux_sta.sta_id; tx_cmd[1].sta_id = mvm->aux_sta.sta_id; @@ -758,7 +764,7 @@ iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif, frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); eth_broadcast_addr(frame->da); - eth_broadcast_addr(frame->bssid); + ether_addr_copy(frame->bssid, params->bssid); frame->seq_ctrl = 0; pos = frame->u.probe_req.variable; @@ -1078,7 +1084,7 @@ static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config, memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN); /* This function should not be called when using ADD_STA ver >=12 */ - WARN_ON_ONCE(iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12); + WARN_ON_ONCE(iwl_mvm_has_new_station_api(mvm->fw)); cfg->bcast_sta_id = mvm->aux_sta.sta_id; cfg->channel_flags = channel_flags; @@ -1129,7 +1135,7 @@ static void iwl_mvm_fill_scan_config_v2(struct iwl_mvm *mvm, void *config, memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN); /* This function should not be called when using ADD_STA ver >=12 */ - WARN_ON_ONCE(iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12); + WARN_ON_ONCE(iwl_mvm_has_new_station_api(mvm->fw)); cfg->bcast_sta_id = mvm->aux_sta.sta_id; cfg->channel_flags = channel_flags; @@ -1244,7 +1250,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) memset(&cfg, 0, sizeof(cfg)); - if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) { + if (!iwl_mvm_has_new_station_api(mvm->fw)) { cfg.bcast_sta_id = mvm->aux_sta.sta_id; } else if (iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_CFG_CMD, 0) < 5) { /* @@ -1621,11 +1627,11 @@ iwl_mvm_umac_scan_cfg_channels_v4(struct iwl_mvm *mvm, } static void -iwl_mvm_umac_scan_cfg_channels_v6(struct iwl_mvm *mvm, +iwl_mvm_umac_scan_cfg_channels_v7(struct iwl_mvm *mvm, struct ieee80211_channel **channels, - struct iwl_scan_channel_params_v6 *cp, + struct iwl_scan_channel_params_v7 *cp, int n_channels, u32 flags, - enum nl80211_iftype vif_type) + enum nl80211_iftype vif_type, u32 version) { int i; @@ -1635,14 +1641,19 @@ iwl_mvm_umac_scan_cfg_channels_v6(struct iwl_mvm *mvm, u32 n_aps_flag = iwl_mvm_scan_ch_n_aps_flag(vif_type, channels[i]->hw_value); + u8 iwl_band = iwl_mvm_phy_band_from_nl80211(band); cfg->flags = cpu_to_le32(flags | n_aps_flag); cfg->v2.channel_num = channels[i]->hw_value; - cfg->v2.band = iwl_mvm_phy_band_from_nl80211(band); if (cfg80211_channel_is_psc(channels[i])) cfg->flags = 0; cfg->v2.iter_count = 1; cfg->v2.iter_interval = 0; + if (version < 17) + cfg->v2.band = iwl_band; + else + cfg->flags |= cpu_to_le32((iwl_band << + IWL_CHAN_CFG_FLAGS_BAND_POS)); } } @@ -1717,14 +1728,15 @@ iwl_mvm_umac_scan_fill_6g_chan_list(struct iwl_mvm *mvm, pp->bssid_num = idex_b; } -/* TODO: this function can be merged with iwl_mvm_scan_umac_fill_ch_p_v6 */ +/* TODO: this function can be merged with iwl_mvm_scan_umac_fill_ch_p_v7 */ static u32 -iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm *mvm, +iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm, struct iwl_mvm_scan_params *params, u32 n_channels, struct iwl_scan_probe_params_v4 *pp, - struct iwl_scan_channel_params_v6 *cp, - enum nl80211_iftype vif_type) + struct iwl_scan_channel_params_v7 *cp, + enum nl80211_iftype vif_type, + u32 version) { int i; struct cfg80211_scan_6ghz_params *scan_6ghz_params = @@ -1739,6 +1751,7 @@ iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm *mvm, u8 j, k, s_max = 0, b_max = 0, n_used_bssid_entries; bool force_passive, found = false, allow_passive = true, unsolicited_probe_on_chan = false, psc_no_listen = false; + s8 psd_20 = IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED; /* * Avoid performing passive scan on non PSC channels unless the @@ -1750,9 +1763,14 @@ iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm *mvm, continue; cfg->v1.channel_num = params->channels[i]->hw_value; - cfg->v2.band = 2; - cfg->v2.iter_count = 1; - cfg->v2.iter_interval = 0; + if (version < 17) + cfg->v2.band = PHY_BAND_6; + else + cfg->flags |= cpu_to_le32(PHY_BAND_6 << + IWL_CHAN_CFG_FLAGS_BAND_POS); + + cfg->v5.iter_count = 1; + cfg->v5.iter_interval = 0; /* * The optimize the scan time, i.e., reduce the scan dwell time @@ -1763,9 +1781,22 @@ iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm *mvm, */ n_used_bssid_entries = 3; for (j = 0; j < params->n_6ghz_params; j++) { + s8 tmp_psd_20; + if (!(scan_6ghz_params[j].channel_idx == i)) continue; + /* Use the highest PSD value allowed as advertised by + * APs for this channel + */ + tmp_psd_20 = scan_6ghz_params[j].psd_20; + if (tmp_psd_20 != + IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED && + (psd_20 == + IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED || + psd_20 < tmp_psd_20)) + psd_20 = tmp_psd_20; + found = false; unsolicited_probe_on_chan |= scan_6ghz_params[j].unsolicited_probe; @@ -1869,6 +1900,9 @@ iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm *mvm, flags |= bssid_bitmap | (s_ssid_bitmap << 16); cfg->flags |= cpu_to_le32(flags); + if (version >= 17) + cfg->v5.psd_20 = psd_20; + ch_cnt++; } @@ -1948,14 +1982,14 @@ static void iwl_mvm_scan_6ghz_passive_scan(struct iwl_mvm *mvm, * reset or resume flow, or while not associated and a large interval * has passed since the last 6GHz passive scan. */ - if ((vif->bss_conf.assoc || + if ((vif->cfg.assoc || time_after(mvm->last_6ghz_passive_scan_jiffies + (IWL_MVM_6GHZ_PASSIVE_SCAN_TIMEOUT * HZ), jiffies)) && (time_before(mvm->last_reset_or_resume_time_jiffies + (IWL_MVM_6GHZ_PASSIVE_SCAN_ASSOC_TIMEOUT * HZ), jiffies))) { IWL_DEBUG_SCAN(mvm, "6GHz passive scan: %s\n", - vif->bss_conf.assoc ? "associated" : + vif->cfg.assoc ? "associated" : "timeout did not expire"); return; } @@ -2080,6 +2114,11 @@ static u8 iwl_mvm_scan_umac_flags2(struct iwl_mvm *mvm, IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_HB; } + if (params->scan_6ghz && + fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_SCAN_DONT_TOGGLE_ANT)) + flags |= IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_DONT_TOGGLE_ANT; + return flags; } @@ -2281,17 +2320,18 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, } static void -iwl_mvm_scan_umac_fill_general_p_v11(struct iwl_mvm *mvm, +iwl_mvm_scan_umac_fill_general_p_v12(struct iwl_mvm *mvm, struct iwl_mvm_scan_params *params, struct ieee80211_vif *vif, struct iwl_scan_general_params_v11 *gp, - u16 gen_flags, u8 gen_flags2) + u16 gen_flags, u8 gen_flags2, + u32 version) { struct iwl_mvm_vif *scan_vif = iwl_mvm_vif_from_mac80211(vif); iwl_mvm_scan_umac_dwell_v11(mvm, gp, params); - IWL_DEBUG_SCAN(mvm, "Gerenal: flags=0x%x, flags2=0x%x\n", + IWL_DEBUG_SCAN(mvm, "General: flags=0x%x, flags2=0x%x\n", gen_flags, gen_flags2); gp->flags = cpu_to_le16(gen_flags); @@ -2302,7 +2342,23 @@ iwl_mvm_scan_umac_fill_general_p_v11(struct iwl_mvm *mvm, if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2) gp->num_of_fragments[SCAN_HB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS; - gp->scan_start_mac_id = scan_vif->id; + if (version < 12) { + gp->scan_start_mac_or_link_id = scan_vif->id; + } else { + struct iwl_mvm_vif_link_info *link_info; + u8 link_id = 0; + + /* Use one of the active link (if any). In the future it would + * be possible that the link ID would be part of the scan + * request coming from upper layers so we would need to use it. + */ + if (vif->active_links) + link_id = ffs(vif->active_links) - 1; + + link_info = scan_vif->link[link_id]; + if (!WARN_ON(!link_info)) + gp->scan_start_mac_or_link_id = link_info->fw_link_id; + } } static void @@ -2341,21 +2397,22 @@ iwl_mvm_scan_umac_fill_ch_p_v4(struct iwl_mvm *mvm, } static void -iwl_mvm_scan_umac_fill_ch_p_v6(struct iwl_mvm *mvm, +iwl_mvm_scan_umac_fill_ch_p_v7(struct iwl_mvm *mvm, struct iwl_mvm_scan_params *params, struct ieee80211_vif *vif, - struct iwl_scan_channel_params_v6 *cp, - u32 channel_cfg_flags) + struct iwl_scan_channel_params_v7 *cp, + u32 channel_cfg_flags, + u32 version) { cp->flags = iwl_mvm_scan_umac_chan_flags_v2(mvm, params, vif); cp->count = params->n_channels; cp->n_aps_override[0] = IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY; cp->n_aps_override[1] = IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS; - iwl_mvm_umac_scan_cfg_channels_v6(mvm, params->channels, cp, + iwl_mvm_umac_scan_cfg_channels_v7(mvm, params->channels, cp, params->n_channels, channel_cfg_flags, - vif->type); + vif->type, version); if (params->enable_6ghz_passive) { struct ieee80211_supported_band *sband = @@ -2372,11 +2429,19 @@ iwl_mvm_scan_umac_fill_ch_p_v6(struct iwl_mvm *mvm, if (!cfg80211_channel_is_psc(channel)) continue; - cfg->flags = 0; - cfg->v2.channel_num = channel->hw_value; - cfg->v2.band = PHY_BAND_6; - cfg->v2.iter_count = 1; - cfg->v2.iter_interval = 0; + cfg->v5.channel_num = channel->hw_value; + cfg->v5.iter_count = 1; + cfg->v5.iter_interval = 0; + + if (version < 17) { + cfg->flags = 0; + cfg->v2.band = PHY_BAND_6; + } else { + cfg->flags = cpu_to_le32(PHY_BAND_6 << + IWL_CHAN_CFG_FLAGS_BAND_POS); + cfg->v5.psd_20 = + IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED; + } cp->count++; } } @@ -2397,9 +2462,9 @@ static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif, cmd->uid = cpu_to_le32(uid); gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type); - iwl_mvm_scan_umac_fill_general_p_v11(mvm, params, vif, + iwl_mvm_scan_umac_fill_general_p_v12(mvm, params, vif, &scan_p->general_params, - gen_flags, 0); + gen_flags, 0, 12); ret = iwl_mvm_fill_scan_sched_params(params, scan_p->periodic_params.schedule, @@ -2419,9 +2484,9 @@ static int iwl_mvm_scan_umac_v14_and_above(struct iwl_mvm *mvm, struct iwl_mvm_scan_params *params, int type, int uid, u32 version) { - struct iwl_scan_req_umac_v15 *cmd = mvm->scan_cmd; - struct iwl_scan_req_params_v15 *scan_p = &cmd->scan_params; - struct iwl_scan_channel_params_v6 *cp = &scan_p->channel_params; + struct iwl_scan_req_umac_v17 *cmd = mvm->scan_cmd; + struct iwl_scan_req_params_v17 *scan_p = &cmd->scan_params; + struct iwl_scan_channel_params_v7 *cp = &scan_p->channel_params; struct iwl_scan_probe_params_v4 *pb = &scan_p->probe_params; int ret; u16 gen_flags; @@ -2440,9 +2505,9 @@ static int iwl_mvm_scan_umac_v14_and_above(struct iwl_mvm *mvm, else gen_flags2 = 0; - iwl_mvm_scan_umac_fill_general_p_v11(mvm, params, vif, + iwl_mvm_scan_umac_fill_general_p_v12(mvm, params, vif, &scan_p->general_params, - gen_flags, gen_flags2); + gen_flags, gen_flags2, version); ret = iwl_mvm_fill_scan_sched_params(params, scan_p->periodic_params.schedule, @@ -2451,11 +2516,13 @@ static int iwl_mvm_scan_umac_v14_and_above(struct iwl_mvm *mvm, return ret; if (!params->scan_6ghz) { - iwl_mvm_scan_umac_fill_probe_p_v4(params, &scan_p->probe_params, - &bitmap_ssid); - iwl_mvm_scan_umac_fill_ch_p_v6(mvm, params, vif, - &scan_p->channel_params, bitmap_ssid); - + iwl_mvm_scan_umac_fill_probe_p_v4(params, + &scan_p->probe_params, + &bitmap_ssid); + iwl_mvm_scan_umac_fill_ch_p_v7(mvm, params, vif, + &scan_p->channel_params, + bitmap_ssid, + version); return 0; } else { pb->preq = params->preq; @@ -2467,9 +2534,10 @@ static int iwl_mvm_scan_umac_v14_and_above(struct iwl_mvm *mvm, iwl_mvm_umac_scan_fill_6g_chan_list(mvm, params, pb); - cp->count = iwl_mvm_umac_scan_cfg_channels_v6_6g(mvm, params, + cp->count = iwl_mvm_umac_scan_cfg_channels_v7_6g(mvm, params, params->n_channels, - pb, cp, vif->type); + pb, cp, vif->type, + version); if (!cp->count) { mvm->scan_uid_status[uid] = 0; return -EINVAL; @@ -2496,6 +2564,20 @@ static int iwl_mvm_scan_umac_v15(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return iwl_mvm_scan_umac_v14_and_above(mvm, vif, params, type, uid, 15); } +static int iwl_mvm_scan_umac_v16(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct iwl_mvm_scan_params *params, int type, + int uid) +{ + return iwl_mvm_scan_umac_v14_and_above(mvm, vif, params, type, uid, 16); +} + +static int iwl_mvm_scan_umac_v17(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct iwl_mvm_scan_params *params, int type, + int uid) +{ + return iwl_mvm_scan_umac_v14_and_above(mvm, vif, params, type, uid, 17); +} + static int iwl_mvm_num_scans(struct iwl_mvm *mvm) { return hweight32(mvm->scan_status & IWL_MVM_SCAN_MASK); @@ -2611,11 +2693,96 @@ struct iwl_scan_umac_handler { static const struct iwl_scan_umac_handler iwl_scan_umac_handlers[] = { /* set the newest version first to shorten the list traverse time */ + IWL_SCAN_UMAC_HANDLER(17), + IWL_SCAN_UMAC_HANDLER(16), IWL_SCAN_UMAC_HANDLER(15), IWL_SCAN_UMAC_HANDLER(14), IWL_SCAN_UMAC_HANDLER(12), }; +static void iwl_mvm_mei_scan_work(struct work_struct *wk) +{ + struct iwl_mei_scan_filter *scan_filter = + container_of(wk, struct iwl_mei_scan_filter, scan_work); + struct iwl_mvm *mvm = + container_of(scan_filter, struct iwl_mvm, mei_scan_filter); + struct iwl_mvm_csme_conn_info *info; + struct sk_buff *skb; + u8 bssid[ETH_ALEN]; + + mutex_lock(&mvm->mutex); + info = iwl_mvm_get_csme_conn_info(mvm); + memcpy(bssid, info->conn_info.bssid, ETH_ALEN); + mutex_unlock(&mvm->mutex); + + while ((skb = skb_dequeue(&scan_filter->scan_res))) { + struct ieee80211_mgmt *mgmt = (void *)skb->data; + + if (!memcmp(mgmt->bssid, bssid, ETH_ALEN)) + ieee80211_rx_irqsafe(mvm->hw, skb); + else + kfree_skb(skb); + } +} + +void iwl_mvm_mei_scan_filter_init(struct iwl_mei_scan_filter *mei_scan_filter) +{ + skb_queue_head_init(&mei_scan_filter->scan_res); + INIT_WORK(&mei_scan_filter->scan_work, iwl_mvm_mei_scan_work); +} + +/* In case CSME is connected and has link protection set, this function will + * override the scan request to scan only the associated channel and only for + * the associated SSID. + */ +static void iwl_mvm_mei_limited_scan(struct iwl_mvm *mvm, + struct iwl_mvm_scan_params *params) +{ + struct iwl_mvm_csme_conn_info *info = iwl_mvm_get_csme_conn_info(mvm); + struct iwl_mei_conn_info *conn_info; + struct ieee80211_channel *chan; + int scan_iters, i; + + if (!info) { + IWL_DEBUG_SCAN(mvm, "mei_limited_scan: no connection info\n"); + return; + } + + conn_info = &info->conn_info; + if (!info->conn_info.lp_state || !info->conn_info.ssid_len) + return; + + if (!params->n_channels || !params->n_ssids) + return; + + mvm->mei_scan_filter.is_mei_limited_scan = true; + + chan = ieee80211_get_channel(mvm->hw->wiphy, + ieee80211_channel_to_frequency(conn_info->channel, + conn_info->band)); + if (!chan) { + IWL_DEBUG_SCAN(mvm, + "Failed to get CSME channel (chan=%u band=%u)\n", + conn_info->channel, conn_info->band); + return; + } + + /* The mei filtered scan must find the AP, otherwise CSME will + * take the NIC ownership. Add several iterations on the channel to + * make the scan more robust. + */ + scan_iters = min(IWL_MEI_SCAN_NUM_ITER, params->n_channels); + params->n_channels = scan_iters; + for (i = 0; i < scan_iters; i++) + params->channels[i] = chan; + + IWL_DEBUG_SCAN(mvm, "Mei scan: num iterations=%u\n", scan_iters); + + params->n_ssids = 1; + params->ssids[0].ssid_len = conn_info->ssid_len; + memcpy(params->ssids[0].ssid, conn_info->ssid, conn_info->ssid_len); +} + static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_host_cmd *hcmd, @@ -2626,7 +2793,9 @@ static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm, u8 scan_ver; lockdep_assert_held(&mvm->mutex); - memset(mvm->scan_cmd, 0, ksize(mvm->scan_cmd)); + memset(mvm->scan_cmd, 0, mvm->scan_cmd_size); + + iwl_mvm_mei_limited_scan(mvm, params); if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { hcmd->id = SCAN_OFFLOAD_REQUEST_CMD; @@ -2676,11 +2845,23 @@ static void iwl_mvm_scan_respect_p2p_go_iter(void *_data, u8 *mac, if (vif == data->current_vif) return; - if (vif->type == NL80211_IFTYPE_AP && vif->p2p && - mvmvif->phy_ctxt->id < NUM_PHY_CTX && - (data->band == NUM_NL80211_BANDS || - mvmvif->phy_ctxt->channel->band == data->band)) - data->p2p_go = true; + if (vif->type == NL80211_IFTYPE_AP && vif->p2p) { + u32 link_id; + + for (link_id = 0; + link_id < ARRAY_SIZE(mvmvif->link); + link_id++) { + struct iwl_mvm_vif_link_info *link = + mvmvif->link[link_id]; + + if (link && link->phy_ctxt->id < NUM_PHY_CTX && + (data->band == NUM_NL80211_BANDS || + link->phy_ctxt->channel->band == data->band)) { + data->p2p_go = true; + break; + } + } + } } static bool _iwl_mvm_get_respect_p2p_go(struct iwl_mvm *mvm, @@ -2782,6 +2963,7 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, params.pass_all = true; params.n_match_sets = 0; params.match_sets = NULL; + ether_addr_copy(params.bssid, req->bssid); params.scan_plans = &scan_plan; params.n_scan_plans = 1; @@ -2875,6 +3057,7 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, params.pass_all = iwl_mvm_scan_pass_all(mvm, req); params.n_match_sets = req->n_match_sets; params.match_sets = req->match_sets; + eth_broadcast_addr(params.bssid); if (!req->n_scan_plans) return -EINVAL; @@ -2970,6 +3153,8 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, u32 uid = __le32_to_cpu(notif->uid); bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED); + mvm->mei_scan_filter.is_mei_limited_scan = false; + if (WARN_ON(!(mvm->scan_uid_status[uid] & mvm->scan_status))) return; @@ -2980,7 +3165,7 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, .scan_start_tsf = mvm->scan_start, }; - memcpy(info.tsf_bssid, mvm->scan_vif->bssid, ETH_ALEN); + memcpy(info.tsf_bssid, mvm->scan_vif->deflink.bssid, ETH_ALEN); ieee80211_scan_completed(mvm->hw, &info); mvm->scan_vif = NULL; cancel_delayed_work(&mvm->scan_timeout_dwork); @@ -3091,20 +3276,22 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type) 1 * HZ); } -static int iwl_scan_req_umac_get_size(u8 scan_ver) +static size_t iwl_scan_req_umac_get_size(u8 scan_ver) { switch (scan_ver) { case 12: return sizeof(struct iwl_scan_req_umac_v12); case 14: case 15: - return sizeof(struct iwl_scan_req_umac_v15); + case 16: + case 17: + return sizeof(struct iwl_scan_req_umac_v17); } return 0; } -int iwl_mvm_scan_size(struct iwl_mvm *mvm) +size_t iwl_mvm_scan_size(struct iwl_mvm *mvm) { int base_size, tail_size; u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_REQ_UMAC, @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2013-2014, 2018-2019 Intel Corporation + * Copyright (C) 2013-2014, 2018-2019, 2022-2023 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH */ #include "mvm.h" @@ -8,7 +8,7 @@ /* For counting bound interfaces */ struct iwl_mvm_active_iface_iterator_data { struct ieee80211_vif *ignore_vif; - u8 sta_vif_ap_sta_id; + struct ieee80211_sta *sta_vif_ap_sta; enum iwl_sf_state sta_vif_state; u32 num_active_macs; }; @@ -23,15 +23,15 @@ static void iwl_mvm_bound_iface_iterator(void *_data, u8 *mac, struct iwl_mvm_active_iface_iterator_data *data = _data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - if (vif == data->ignore_vif || !mvmvif->phy_ctxt || + if (vif == data->ignore_vif || !mvmvif->deflink.phy_ctxt || vif->type == NL80211_IFTYPE_P2P_DEVICE) return; data->num_active_macs++; if (vif->type == NL80211_IFTYPE_STATION) { - data->sta_vif_ap_sta_id = mvmvif->ap_sta_id; - if (vif->bss_conf.assoc) + data->sta_vif_ap_sta = mvmvif->ap_sta; + if (vif->cfg.assoc) data->sta_vif_state = SF_FULL_ON; else data->sta_vif_state = SF_INIT_OFF; @@ -98,6 +98,10 @@ static void iwl_mvm_fill_sf_command(struct iwl_mvm *mvm, struct ieee80211_sta *sta) { int i, j, watermark; + u8 max_rx_nss = 0; + bool is_legacy = true; + struct ieee80211_link_sta *link_sta; + unsigned int link_id; sf_cmd->watermark[SF_LONG_DELAY_ON] = cpu_to_le32(SF_W_MARK_SCAN); @@ -106,10 +110,25 @@ static void iwl_mvm_fill_sf_command(struct iwl_mvm *mvm, * capabilities of the AP station, and choose the watermark accordingly. */ if (sta) { - if (sta->ht_cap.ht_supported || - sta->vht_cap.vht_supported || - sta->he_cap.has_he) { - switch (sta->rx_nss) { + /* find the maximal NSS number among all links (if relevant) */ + rcu_read_lock(); + for (link_id = 0; link_id < ARRAY_SIZE(sta->link); link_id++) { + link_sta = rcu_dereference(sta->link[link_id]); + if (!link_sta) + continue; + + if (link_sta->ht_cap.ht_supported || + link_sta->vht_cap.vht_supported || + link_sta->eht_cap.has_eht || + link_sta->he_cap.has_he) { + is_legacy = false; + max_rx_nss = max(max_rx_nss, link_sta->rx_nss); + } + } + rcu_read_unlock(); + + if (!is_legacy) { + switch (max_rx_nss) { case 1: watermark = SF_W_MARK_SISO; break; @@ -151,21 +170,16 @@ static void iwl_mvm_fill_sf_command(struct iwl_mvm *mvm, memcpy(sf_cmd->full_on_timeouts, sf_full_timeout_def, sizeof(sf_full_timeout_def)); } - } -static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id, +static int iwl_mvm_sf_config(struct iwl_mvm *mvm, struct ieee80211_sta *sta, enum iwl_sf_state new_state) { struct iwl_sf_cfg_cmd sf_cmd = { .state = cpu_to_le32(new_state), }; - struct ieee80211_sta *sta; int ret = 0; - if (mvm->cfg->disable_dummy_notification) - sf_cmd.state |= cpu_to_le32(SF_CFG_DUMMY_NOTIF_OFF); - /* * If an associated AP sta changed its antenna configuration, the state * will remain FULL_ON but SF parameters need to be reconsidered. @@ -178,20 +192,12 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id, iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL); break; case SF_FULL_ON: - if (sta_id == IWL_MVM_INVALID_STA) { + if (!sta) { IWL_ERR(mvm, "No station: Cannot switch SF to FULL_ON\n"); return -EINVAL; } - rcu_read_lock(); - sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); - if (IS_ERR_OR_NULL(sta)) { - IWL_ERR(mvm, "Invalid station id\n"); - rcu_read_unlock(); - return -EINVAL; - } iwl_mvm_fill_sf_command(mvm, &sf_cmd, sta); - rcu_read_unlock(); break; case SF_INIT_OFF: iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL); @@ -219,13 +225,12 @@ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif, bool remove_vif) { enum iwl_sf_state new_state; - u8 sta_id = IWL_MVM_INVALID_STA; struct iwl_mvm_vif *mvmvif = NULL; struct iwl_mvm_active_iface_iterator_data data = { .ignore_vif = changed_vif, .sta_vif_state = SF_UNINIT, - .sta_vif_ap_sta_id = IWL_MVM_INVALID_STA, }; + struct ieee80211_sta *sta = NULL; /* * Ignore the call if we are in HW Restart flow, or if the handled @@ -255,16 +260,16 @@ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif, * and we filled the relevant data during iteration */ new_state = data.sta_vif_state; - sta_id = data.sta_vif_ap_sta_id; + sta = data.sta_vif_ap_sta; } else { if (WARN_ON(!changed_vif)) return -EINVAL; if (changed_vif->type != NL80211_IFTYPE_STATION) { new_state = SF_UNINIT; - } else if (changed_vif->bss_conf.assoc && + } else if (changed_vif->cfg.assoc && changed_vif->bss_conf.dtim_period) { mvmvif = iwl_mvm_vif_from_mac80211(changed_vif); - sta_id = mvmvif->ap_sta_id; + sta = mvmvif->ap_sta; new_state = SF_FULL_ON; } else { new_state = SF_INIT_OFF; @@ -275,5 +280,6 @@ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif, /* If there are multiple active macs - change to SF_UNINIT */ new_state = SF_UNINIT; } - return iwl_mvm_sf_config(mvm, sta_id, new_state); + + return iwl_mvm_sf_config(mvm, sta, new_state); } diff --git a/mvm/sta.c b/mvm/sta.c index c7f9d3870f21..3b9a343d4f67 100644 --- a/mvm/sta.c +++ b/mvm/sta.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2015, 2018-2022 Intel Corporation + * Copyright (C) 2012-2015, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -24,8 +24,7 @@ static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm) return sizeof(struct iwl_mvm_add_sta_cmd_v7); } -static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, - enum nl80211_iftype iftype) +int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, enum nl80211_iftype iftype) { int sta_id; u32 reserved_ids = 0; @@ -51,13 +50,87 @@ static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, return IWL_MVM_INVALID_STA; } +/* Calculate the ampdu density and max size */ +u32 iwl_mvm_get_sta_ampdu_dens(struct ieee80211_link_sta *link_sta, + struct ieee80211_bss_conf *link_conf, + u32 *_agg_size) +{ + u32 agg_size = 0, mpdu_dens = 0; + + if (WARN_ON(!link_sta)) + return 0; + + /* Note that we always use only legacy & highest supported PPDUs, so + * of Draft P802.11be D.30 Table 10-12a--Fields used for calculating + * the maximum A-MPDU size of various PPDU types in different bands, + * we only need to worry about the highest supported PPDU type here. + */ + + if (link_sta->ht_cap.ht_supported) { + agg_size = link_sta->ht_cap.ampdu_factor; + mpdu_dens = link_sta->ht_cap.ampdu_density; + } + + if (link_conf->chandef.chan->band == NL80211_BAND_6GHZ) { + /* overwrite HT values on 6 GHz */ + mpdu_dens = le16_get_bits(link_sta->he_6ghz_capa.capa, + IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START); + agg_size = le16_get_bits(link_sta->he_6ghz_capa.capa, + IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP); + } else if (link_sta->vht_cap.vht_supported) { + /* if VHT supported overwrite HT value */ + agg_size = u32_get_bits(link_sta->vht_cap.cap, + IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK); + } + + /* D6.0 10.12.2 A-MPDU length limit rules + * A STA indicates the maximum length of the A-MPDU preEOF padding + * that it can receive in an HE PPDU in the Maximum A-MPDU Length + * Exponent field in its HT Capabilities, VHT Capabilities, + * and HE 6 GHz Band Capabilities elements (if present) and the + * Maximum AMPDU Length Exponent Extension field in its HE + * Capabilities element + */ + if (link_sta->he_cap.has_he) + agg_size += + u8_get_bits(link_sta->he_cap.he_cap_elem.mac_cap_info[3], + IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK); + + if (link_sta->eht_cap.has_eht) + agg_size += u8_get_bits(link_sta->eht_cap.eht_cap_elem.mac_cap_info[1], + IEEE80211_EHT_MAC_CAP1_MAX_AMPDU_LEN_MASK); + + /* Limit to max A-MPDU supported by FW */ + agg_size = min_t(u32, agg_size, + STA_FLG_MAX_AGG_SIZE_4M >> STA_FLG_MAX_AGG_SIZE_SHIFT); + + *_agg_size = agg_size; + return mpdu_dens; +} + +u8 iwl_mvm_get_sta_uapsd_acs(struct ieee80211_sta *sta) +{ + u8 uapsd_acs = 0; + + if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) + uapsd_acs |= BIT(AC_BK); + if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) + uapsd_acs |= BIT(AC_BE); + if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) + uapsd_acs |= BIT(AC_VI); + if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) + uapsd_acs |= BIT(AC_VO); + + return uapsd_acs | uapsd_acs << 4; +} + /* send station add/update command to firmware */ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, bool update, unsigned int flags) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_add_sta_cmd add_sta_cmd = { - .sta_id = mvm_sta->sta_id, + .sta_id = mvm_sta->deflink.sta_id, .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color), .add_modify = update ? 1 : 0, .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK | @@ -86,7 +159,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, } } - switch (sta->bandwidth) { + switch (sta->deflink.bandwidth) { case IEEE80211_STA_RX_BW_320: case IEEE80211_STA_RX_BW_160: add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ); @@ -98,13 +171,13 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ); fallthrough; case IEEE80211_STA_RX_BW_20: - if (sta->ht_cap.ht_supported) + if (sta->deflink.ht_cap.ht_supported) add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_20MHZ); break; } - switch (sta->rx_nss) { + switch (sta->deflink.rx_nss) { case 1: add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO); break; @@ -116,7 +189,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, break; } - switch (sta->smps_mode) { + switch (sta->deflink.smps_mode) { case IEEE80211_SMPS_AUTOMATIC: case IEEE80211_SMPS_NUM_MODES: WARN_ON(1); @@ -134,69 +207,26 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, break; } - if (sta->ht_cap.ht_supported) { + if (sta->deflink.ht_cap.ht_supported || + mvm_sta->vif->bss_conf.chandef.chan->band == NL80211_BAND_6GHZ) add_sta_cmd.station_flags_msk |= cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK | STA_FLG_AGG_MPDU_DENS_MSK); - mpdu_dens = sta->ht_cap.ampdu_density; - } - - if (mvm_sta->vif->bss_conf.chandef.chan->band == NL80211_BAND_6GHZ) { - add_sta_cmd.station_flags_msk |= - cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK | - STA_FLG_AGG_MPDU_DENS_MSK); - - mpdu_dens = le16_get_bits(sta->he_6ghz_capa.capa, - IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START); - agg_size = le16_get_bits(sta->he_6ghz_capa.capa, - IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP); - } else - if (sta->vht_cap.vht_supported) { - agg_size = sta->vht_cap.cap & - IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; - agg_size >>= - IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; - } else if (sta->ht_cap.ht_supported) { - agg_size = sta->ht_cap.ampdu_factor; - } - - /* D6.0 10.12.2 A-MPDU length limit rules - * A STA indicates the maximum length of the A-MPDU preEOF padding - * that it can receive in an HE PPDU in the Maximum A-MPDU Length - * Exponent field in its HT Capabilities, VHT Capabilities, - * and HE 6 GHz Band Capabilities elements (if present) and the - * Maximum AMPDU Length Exponent Extension field in its HE - * Capabilities element - */ - if (sta->he_cap.has_he) - agg_size += u8_get_bits(sta->he_cap.he_cap_elem.mac_cap_info[3], - IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK); - - /* Limit to max A-MPDU supported by FW */ - if (agg_size > (STA_FLG_MAX_AGG_SIZE_4M >> STA_FLG_MAX_AGG_SIZE_SHIFT)) - agg_size = (STA_FLG_MAX_AGG_SIZE_4M >> - STA_FLG_MAX_AGG_SIZE_SHIFT); - + mpdu_dens = iwl_mvm_get_sta_ampdu_dens(&sta->deflink, + &mvm_sta->vif->bss_conf, + &agg_size); add_sta_cmd.station_flags |= cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT); add_sta_cmd.station_flags |= cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT); + if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC) add_sta_cmd.assoc_id = cpu_to_le16(sta->aid); if (sta->wme) { add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS; - - if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) - add_sta_cmd.uapsd_acs |= BIT(AC_BK); - if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) - add_sta_cmd.uapsd_acs |= BIT(AC_BE); - if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) - add_sta_cmd.uapsd_acs |= BIT(AC_VI); - if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) - add_sta_cmd.uapsd_acs |= BIT(AC_VO); - add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4; + add_sta_cmd.uapsd_acs = iwl_mvm_get_sta_uapsd_acs(sta); add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128; } @@ -229,6 +259,7 @@ static void iwl_mvm_rx_agg_session_expired(struct timer_list *t) struct ieee80211_sta *sta; struct iwl_mvm_sta *mvm_sta; unsigned long timeout; + unsigned int sta_id; rcu_read_lock(); @@ -247,7 +278,8 @@ static void iwl_mvm_rx_agg_session_expired(struct timer_list *t) } /* Timer expired */ - sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]); + sta_id = ffs(ba_data->sta_mask) - 1; /* don't care which one */ + sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[sta_id]); /* * sta should be valid unless the following happens: @@ -257,7 +289,7 @@ static void iwl_mvm_rx_agg_session_expired(struct timer_list *t) * A-MDPU and hence the timer continues to run. Then, the * timer expires and sta is NULL. */ - if (!sta) + if (IS_ERR_OR_NULL(sta)) goto unlock; mvm_sta = iwl_mvm_sta_from_mac80211(sta); @@ -297,7 +329,7 @@ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue, mvmsta->tid_disable_agg |= disable_agg_tids; cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); - cmd.sta_id = mvmsta->sta_id; + cmd.sta_id = mvmsta->deflink.sta_id; cmd.add_modify = STA_MODE_MODIFY; cmd.modify_mask = STA_MODIFY_QUEUES; if (disable_agg_tids) @@ -317,7 +349,7 @@ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue, } static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - u16 *queueptr, u8 tid) + int sta_id, u16 *queueptr, u8 tid) { int queue = *queueptr; struct iwl_scd_txq_cfg_cmd cmd = { @@ -334,9 +366,14 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta, SCD_QUEUE_CONFIG_CMD); struct iwl_scd_queue_cfg_cmd remove_cmd = { .operation = cpu_to_le32(IWL_SCD_QUEUE_REMOVE), - .u.remove.queue = cpu_to_le32(queue), + .u.remove.sta_mask = cpu_to_le32(BIT(sta_id)), }; + if (tid == IWL_MAX_TID_COUNT) + tid = IWL_MGMT_TID; + + remove_cmd.u.remove.tid = cpu_to_le32(tid); + ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(remove_cmd), &remove_cmd); @@ -384,7 +421,11 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_tid(sta, tid); + spin_lock_bh(&mvm->add_stream_lock); + list_del_init(&mvmtxq->list); + clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state); mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; + spin_unlock_bh(&mvm->add_stream_lock); } /* Regardless if this is a reserved TXQ for a STA - mark it as false */ @@ -478,7 +519,11 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue) disable_agg_tids |= BIT(tid); mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE; + spin_lock_bh(&mvm->add_stream_lock); + list_del_init(&mvmtxq->list); + clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state); mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; + spin_unlock_bh(&mvm->add_stream_lock); } mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */ @@ -530,7 +575,7 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue, iwl_mvm_invalidate_sta_queue(mvm, queue, disable_agg_tids, false); - ret = iwl_mvm_disable_txq(mvm, old_sta, &queue_tmp, tid); + ret = iwl_mvm_disable_txq(mvm, old_sta, sta_id, &queue_tmp, tid); if (ret) { IWL_ERR(mvm, "Failed to free inactive queue %d (ret=%d)\n", @@ -691,7 +736,7 @@ static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid, queue, iwl_mvm_ac_to_tx_fifo[ac]); /* Stop the queue and wait for it to empty */ - txq->stopped = true; + set_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, &txq->state); ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue)); if (ret) { @@ -734,7 +779,7 @@ static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid, out: /* Continue using the queue */ - txq->stopped = false; + clear_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, &txq->state); return ret; } @@ -764,32 +809,52 @@ static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, return -ENOSPC; } -static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, - u8 sta_id, u8 tid, unsigned int timeout) +static int iwl_mvm_get_queue_size(struct ieee80211_sta *sta) +{ + int max_size = IWL_DEFAULT_QUEUE_SIZE; + unsigned int link_id; + + /* this queue isn't used for traffic (cab_queue) */ + if (!sta) + return IWL_MGMT_QUEUE_SIZE; + + rcu_read_lock(); + + for (link_id = 0; link_id < ARRAY_SIZE(sta->link); link_id++) { + struct ieee80211_link_sta *link = + rcu_dereference(sta->link[link_id]); + + if (!link) + continue; + + /* support for 1k ba size */ + if (link->eht_cap.has_eht && + max_size < IWL_DEFAULT_QUEUE_SIZE_EHT) + max_size = IWL_DEFAULT_QUEUE_SIZE_EHT; + + /* support for 256 ba size */ + if (link->he_cap.has_he && + max_size < IWL_DEFAULT_QUEUE_SIZE_HE) + max_size = IWL_DEFAULT_QUEUE_SIZE_HE; + } + + rcu_read_unlock(); + return max_size; +} + +int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + u8 sta_id, u8 tid, unsigned int timeout) { int queue, size; + u32 sta_mask = 0; if (tid == IWL_MAX_TID_COUNT) { tid = IWL_MGMT_TID; size = max_t(u32, IWL_MGMT_QUEUE_SIZE, mvm->trans->cfg->min_txq_size); } else { - struct ieee80211_sta *sta; - - rcu_read_lock(); - sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); - - /* this queue isn't used for traffic (cab_queue) */ - if (IS_ERR_OR_NULL(sta)) { - size = IWL_MGMT_QUEUE_SIZE; - } else if (sta->he_cap.has_he) { - /* support for 256 ba size */ - size = IWL_DEFAULT_QUEUE_SIZE_HE; - } else { - size = IWL_DEFAULT_QUEUE_SIZE; - } - - rcu_read_unlock(); + size = iwl_mvm_get_queue_size(sta); } /* take the min with bc tbl entries allowed */ @@ -798,22 +863,45 @@ static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, /* size needs to be power of 2 values for calculating read/write pointers */ size = rounddown_pow_of_two(size); + if (sta) { + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + unsigned int link_id; + + for (link_id = 0; + link_id < ARRAY_SIZE(mvmsta->link); + link_id++) { + struct iwl_mvm_link_sta *link = + rcu_dereference_protected(mvmsta->link[link_id], + lockdep_is_held(&mvm->mutex)); + + if (!link) + continue; + + sta_mask |= BIT(link->sta_id); + } + } else { + sta_mask |= BIT(sta_id); + } + + if (!sta_mask) + return -EINVAL; + do { - queue = iwl_trans_txq_alloc(mvm->trans, 0, BIT(sta_id), + queue = iwl_trans_txq_alloc(mvm->trans, 0, sta_mask, tid, size, timeout); if (queue < 0) IWL_DEBUG_TX_QUEUES(mvm, - "Failed allocating TXQ of size %d for sta %d tid %d, ret: %d\n", - size, sta_id, tid, queue); + "Failed allocating TXQ of size %d for sta mask %x tid %d, ret: %d\n", + size, sta_mask, tid, queue); size /= 2; } while (queue < 0 && size >= 16); if (queue < 0) return queue; - IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n", - queue, sta_id, tid); + IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta mask 0x%x tid %d\n", + queue, sta_mask, tid); return queue; } @@ -833,14 +921,15 @@ static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm, IWL_DEBUG_TX_QUEUES(mvm, "Allocating queue for sta %d on tid %d\n", - mvmsta->sta_id, tid); - queue = iwl_mvm_tvqm_enable_txq(mvm, mvmsta->sta_id, tid, wdg_timeout); + mvmsta->deflink.sta_id, tid); + queue = iwl_mvm_tvqm_enable_txq(mvm, sta, mvmsta->deflink.sta_id, + tid, wdg_timeout); if (queue < 0) return queue; mvmtxq->txq_id = queue; mvm->tvqm_info[queue].txq_tid = tid; - mvm->tvqm_info[queue].sta_id = mvmsta->sta_id; + mvm->tvqm_info[queue].sta_id = mvmsta->deflink.sta_id; IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue); @@ -1025,7 +1114,7 @@ static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue) mvmsta->tid_disable_agg &= ~BIT(tid); cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); - cmd.sta_id = mvmsta->sta_id; + cmd.sta_id = mvmsta->deflink.sta_id; cmd.add_modify = STA_MODE_MODIFY; cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX; cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk); @@ -1059,7 +1148,7 @@ static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm, unsigned long *unshare_queues, unsigned long *changetid_queues) { - int tid; + unsigned int tid; lockdep_assert_held(&mvmsta->lock); lockdep_assert_held(&mvm->mutex); @@ -1250,7 +1339,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_trans_txq_scd_cfg cfg = { .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac), - .sta_id = mvmsta->sta_id, + .sta_id = mvmsta->deflink.sta_id, .tid = tid, .frame_limit = IWL_FRAME_LIMIT, }; @@ -1276,7 +1365,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, spin_unlock_bh(&mvmsta->lock); if (tid == IWL_MAX_TID_COUNT) { - queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, + queue = iwl_mvm_find_free_queue(mvm, mvmsta->deflink.sta_id, IWL_MVM_DQA_MIN_MGMT_QUEUE, IWL_MVM_DQA_MAX_MGMT_QUEUE); if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) @@ -1295,12 +1384,12 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, } if (queue < 0) - queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, + queue = iwl_mvm_find_free_queue(mvm, mvmsta->deflink.sta_id, IWL_MVM_DQA_MIN_DATA_QUEUE, IWL_MVM_DQA_MAX_DATA_QUEUE); if (queue < 0) { /* try harder - perhaps kill an inactive queue */ - queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id); + queue = iwl_mvm_inactivity_check(mvm, mvmsta->deflink.sta_id); } /* No free queue - we'll have to share */ @@ -1340,7 +1429,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, IWL_DEBUG_TX_QUEUES(mvm, "Allocating %squeue #%d to sta %d on tid %d\n", shared_queue ? "shared " : "", queue, - mvmsta->sta_id, tid); + mvmsta->deflink.sta_id, tid); if (shared_queue) { /* Disable any open aggs on this queue */ @@ -1407,7 +1496,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, out_err: queue_tmp = queue; - iwl_mvm_disable_txq(mvm, sta, &queue_tmp, tid); + iwl_mvm_disable_txq(mvm, sta, mvmsta->deflink.sta_id, &queue_tmp, tid); return ret; } @@ -1442,12 +1531,22 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) * a queue in the function itself. */ if (iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid)) { + spin_lock_bh(&mvm->add_stream_lock); list_del_init(&mvmtxq->list); + spin_unlock_bh(&mvm->add_stream_lock); continue; } - list_del_init(&mvmtxq->list); + /* now we're ready, any remaining races/concurrency will be + * handled in iwl_mvm_mac_itxq_xmit() + */ + set_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state); + local_bh_disable(); + spin_lock(&mvm->add_stream_lock); + list_del_init(&mvmtxq->list); + spin_unlock(&mvm->add_stream_lock); + iwl_mvm_mac_itxq_xmit(mvm->hw, txq); local_bh_enable(); } @@ -1476,12 +1575,12 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, IWL_MVM_QUEUE_FREE)) queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE; else - queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, + queue = iwl_mvm_find_free_queue(mvm, mvmsta->deflink.sta_id, IWL_MVM_DQA_MIN_DATA_QUEUE, IWL_MVM_DQA_MAX_DATA_QUEUE); if (queue < 0) { /* try again - this time kick out a queue if needed */ - queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id); + queue = iwl_mvm_inactivity_check(mvm, mvmsta->deflink.sta_id); if (queue < 0) { IWL_ERR(mvm, "No available queues for new station\n"); return -ENOSPC; @@ -1492,7 +1591,7 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, mvmsta->reserved_queue = queue; IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n", - queue, mvmsta->sta_id); + queue, mvmsta->deflink.sta_id); return 0; } @@ -1504,15 +1603,15 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, * * Note that re-enabling aggregations isn't done in this function. */ -static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, - struct ieee80211_sta *sta) +void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, + struct ieee80211_sta *sta) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); unsigned int wdg = iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false); int i; struct iwl_trans_txq_scd_cfg cfg = { - .sta_id = mvm_sta->sta_id, + .sta_id = mvm_sta->deflink.sta_id, .frame_limit = IWL_FRAME_LIMIT, }; @@ -1534,8 +1633,9 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, if (iwl_mvm_has_new_tx_api(mvm)) { IWL_DEBUG_TX_QUEUES(mvm, "Re-mapping sta %d tid %d\n", - mvm_sta->sta_id, i); - txq_id = iwl_mvm_tvqm_enable_txq(mvm, mvm_sta->sta_id, + mvm_sta->deflink.sta_id, i); + txq_id = iwl_mvm_tvqm_enable_txq(mvm, sta, + mvm_sta->deflink.sta_id, i, wdg); /* * on failures, just set it to IWL_MVM_INVALID_QUEUE @@ -1564,7 +1664,8 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, IWL_DEBUG_TX_QUEUES(mvm, "Re-mapping sta %d tid %d to queue %d\n", - mvm_sta->sta_id, i, txq_id); + mvm_sta->deflink.sta_id, i, + txq_id); iwl_mvm_enable_txq(mvm, sta, txq_id, seq, &cfg, wdg); mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY; @@ -1586,7 +1687,7 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm, memset(&cmd, 0, sizeof(cmd)); cmd.sta_id = sta->sta_id; - if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12 && + if (iwl_mvm_has_new_station_api(mvm->fw) && sta->type == IWL_STA_AUX_ACTIVITY) cmd.mac_id_n_color = cpu_to_le32(mac_id); else @@ -1622,74 +1723,45 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm, return ret; } -int iwl_mvm_add_sta(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta) +/* Initialize driver data of a new sta */ +int iwl_mvm_sta_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, int sta_id, u8 sta_type) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_rxq_dup_data *dup_data; - int i, ret, sta_id; - bool sta_update = false; - unsigned int sta_flags = 0; + int i, ret = 0; lockdep_assert_held(&mvm->mutex); - if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) - sta_id = iwl_mvm_find_free_sta_id(mvm, - ieee80211_vif_type_p2p(vif)); - else - sta_id = mvm_sta->sta_id; - - if (sta_id == IWL_MVM_INVALID_STA) - return -ENOSPC; - - spin_lock_init(&mvm_sta->lock); + mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id, + mvmvif->color); + mvm_sta->vif = vif; - /* if this is a HW restart re-alloc existing queues */ - if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { - struct iwl_mvm_int_sta tmp_sta = { - .sta_id = sta_id, - .type = mvm_sta->sta_type, - }; + /* for MLD sta_id(s) should be allocated for each link before calling + * this function + */ + if (!mvm->mld_api_is_used) { + if (WARN_ON(sta_id == IWL_MVM_INVALID_STA)) + return -EINVAL; - /* - * First add an empty station since allocating - * a queue requires a valid station - */ - ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr, - mvmvif->id, mvmvif->color); - if (ret) - goto err; + mvm_sta->deflink.sta_id = sta_id; + rcu_assign_pointer(mvm_sta->link[0], &mvm_sta->deflink); - iwl_mvm_realloc_queues_after_restart(mvm, sta); - sta_update = true; - sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES; - goto update_fw; + if (!mvm->trans->trans_cfg->gen2) + mvm_sta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize = + LINK_QUAL_AGG_FRAME_LIMIT_DEF; + else + mvm_sta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize = + LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF; } - mvm_sta->sta_id = sta_id; - mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id, - mvmvif->color); - mvm_sta->vif = vif; - if (!mvm->trans->trans_cfg->gen2) - mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF; - else - mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF; - mvm_sta->tx_protection = 0; mvm_sta->tt_tx_protection = false; - mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK; + mvm_sta->sta_type = sta_type; - /* HW restart, don't assume the memory has been zeroed */ mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */ - mvm_sta->tfd_queue_msk = 0; - /* for HW restart - reset everything but the sequence number */ for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { - u16 seq = mvm_sta->tid_data[i].seq_number; - memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i])); - mvm_sta->tid_data[i].seq_number = seq; - /* * Mark all queues for this STA as unallocated and defer TX * frames until the queue is allocated @@ -1706,10 +1778,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, atomic_set(&mvmtxq->tx_request, 0); } - mvm_sta->agg_tids = 0; - - if (iwl_mvm_has_new_rx_api(mvm) && - !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { + if (iwl_mvm_has_new_rx_api(mvm)) { int q; dup_data = kcalloc(mvm->trans->num_rx_queues, @@ -1735,7 +1804,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, ret = iwl_mvm_reserve_sta_stream(mvm, sta, ieee80211_vif_type_p2p(vif)); if (ret) - goto err; + return ret; } /* @@ -1745,10 +1814,62 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, if (iwl_mvm_has_tlc_offload(mvm)) iwl_mvm_rs_add_sta(mvm, mvm_sta); else - spin_lock_init(&mvm_sta->lq_sta.rs_drv.pers.lock); + spin_lock_init(&mvm_sta->deflink.lq_sta.rs_drv.pers.lock); iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant); + return 0; +} + +int iwl_mvm_add_sta(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + int ret, sta_id; + bool sta_update = false; + unsigned int sta_flags = 0; + + lockdep_assert_held(&mvm->mutex); + + if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + sta_id = iwl_mvm_find_free_sta_id(mvm, + ieee80211_vif_type_p2p(vif)); + else + sta_id = mvm_sta->deflink.sta_id; + + if (sta_id == IWL_MVM_INVALID_STA) + return -ENOSPC; + + spin_lock_init(&mvm_sta->lock); + + /* if this is a HW restart re-alloc existing queues */ + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { + struct iwl_mvm_int_sta tmp_sta = { + .sta_id = sta_id, + .type = mvm_sta->sta_type, + }; + + /* First add an empty station since allocating + * a queue requires a valid station + */ + ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr, + mvmvif->id, mvmvif->color); + if (ret) + goto err; + + iwl_mvm_realloc_queues_after_restart(mvm, sta); + sta_update = true; + sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES; + goto update_fw; + } + + ret = iwl_mvm_sta_init(mvm, vif, sta, sta_id, + sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK); + if (ret) + goto err; + update_fw: ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags); if (ret) @@ -1756,10 +1877,10 @@ update_fw: if (vif->type == NL80211_IFTYPE_STATION) { if (!sta->tdls) { - WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA); - mvmvif->ap_sta_id = sta_id; + WARN_ON(mvmvif->deflink.ap_sta_id != IWL_MVM_INVALID_STA); + mvmvif->deflink.ap_sta_id = sta_id; } else { - WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA); + WARN_ON(mvmvif->deflink.ap_sta_id == IWL_MVM_INVALID_STA); } } @@ -1781,7 +1902,7 @@ int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, lockdep_assert_held(&mvm->mutex); cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); - cmd.sta_id = mvmsta->sta_id; + cmd.sta_id = mvmsta->deflink.sta_id; cmd.add_modify = STA_MODE_MODIFY; cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0; cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW); @@ -1796,12 +1917,12 @@ int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, switch (status & IWL_ADD_STA_STATUS_MASK) { case ADD_STA_SUCCESS: IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n", - mvmsta->sta_id); + mvmsta->deflink.sta_id); break; default: ret = -EIO; IWL_ERR(mvm, "Couldn't drain frames for staid %d\n", - mvmsta->sta_id); + mvmsta->deflink.sta_id); break; } @@ -1853,7 +1974,8 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm, if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE) continue; - iwl_mvm_disable_txq(mvm, sta, &mvm_sta->tid_data[i].txq_id, i); + iwl_mvm_disable_txq(mvm, sta, mvm_sta->deflink.sta_id, + &mvm_sta->tid_data[i].txq_id, i); mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE; } @@ -1861,7 +1983,11 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm, struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(sta->txq[i]); + spin_lock_bh(&mvm->add_stream_lock); mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; + list_del_init(&mvmtxq->list); + clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state); + spin_unlock_bh(&mvm->add_stream_lock); } } @@ -1889,42 +2015,27 @@ int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm, return 0; } -int iwl_mvm_rm_sta(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta) +/* Execute the common part for both MLD and non-MLD modes. + * Returns if we're done with removing the station, either + * with error or success + */ +bool iwl_mvm_sta_del(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_link_sta *link_sta, int *ret) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_vif_link_info *mvm_link = + mvmvif->link[link_sta->link_id]; struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); - u8 sta_id = mvm_sta->sta_id; - int ret; + struct iwl_mvm_link_sta *mvm_link_sta; + u8 sta_id; lockdep_assert_held(&mvm->mutex); - if (iwl_mvm_has_new_rx_api(mvm)) - kfree(mvm_sta->dup_data); - - ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); - if (ret) - return ret; - - /* flush its queues here since we are freeing mvm_sta */ - ret = iwl_mvm_flush_sta(mvm, mvm_sta, false); - if (ret) - return ret; - if (iwl_mvm_has_new_tx_api(mvm)) { - ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta); - } else { - u32 q_mask = mvm_sta->tfd_queue_msk; - - ret = iwl_trans_wait_tx_queues_empty(mvm->trans, - q_mask); - } - if (ret) - return ret; - - ret = iwl_mvm_drain_sta(mvm, mvm_sta, false); - - iwl_mvm_disable_sta_queues(mvm, vif, sta); + mvm_link_sta = + rcu_dereference_protected(mvm_sta->link[link_sta->link_id], + lockdep_is_held(&mvm->mutex)); + sta_id = mvm_link_sta->sta_id; /* If there is a TXQ still marked as reserved - free it */ if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) { @@ -1940,20 +2051,24 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, if (WARN((*status != IWL_MVM_QUEUE_RESERVED) && (*status != IWL_MVM_QUEUE_FREE), "sta_id %d reserved txq %d status %d", - sta_id, reserved_txq, *status)) - return -EINVAL; + sta_id, reserved_txq, *status)) { + *ret = -EINVAL; + return true; + } *status = IWL_MVM_QUEUE_FREE; } - if (vif->type == NL80211_IFTYPE_STATION && - mvmvif->ap_sta_id == sta_id) { + if (vif->type == NL80211_IFTYPE_STATION) { /* if associated - we can't remove the AP STA now */ - if (vif->bss_conf.assoc) - return ret; + if (vif->cfg.assoc) + return true; + + /* first remove remaining keys */ + iwl_mvm_sec_key_remove_ap(mvm, vif, mvm_link, 0); /* unassoc - go ahead - remove the AP STA now */ - mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; + mvm_link->ap_sta_id = IWL_MVM_INVALID_STA; } /* @@ -1965,15 +2080,46 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, cancel_delayed_work(&mvm->tdls_cs.dwork); } - /* - * Make sure that the tx response code sees the station as -EBUSY and - * calls the drain worker. - */ - spin_lock_bh(&mvm_sta->lock); - spin_unlock_bh(&mvm_sta->lock); + return false; +} + +int iwl_mvm_rm_sta(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + int ret; + + lockdep_assert_held(&mvm->mutex); - ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id); - RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL); + ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); + if (ret) + return ret; + + /* flush its queues here since we are freeing mvm_sta */ + ret = iwl_mvm_flush_sta(mvm, mvm_sta, false); + if (ret) + return ret; + if (iwl_mvm_has_new_tx_api(mvm)) { + ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta); + } else { + u32 q_mask = mvm_sta->tfd_queue_msk; + + ret = iwl_trans_wait_tx_queues_empty(mvm->trans, + q_mask); + } + if (ret) + return ret; + + ret = iwl_mvm_drain_sta(mvm, mvm_sta, false); + + iwl_mvm_disable_sta_queues(mvm, vif, sta); + + if (iwl_mvm_sta_del(mvm, vif, sta, &sta->deflink, &ret)) + return ret; + + ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->deflink.sta_id); + RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->deflink.sta_id], NULL); return ret; } @@ -1993,7 +2139,7 @@ int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm, int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta, u32 qmask, enum nl80211_iftype iftype, - enum iwl_sta_type type) + u8 type) { if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) || sta->sta_id == IWL_MVM_INVALID_STA) { @@ -2006,7 +2152,7 @@ int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, sta->type = type; /* put a non-NULL value so iterating over the stations won't stop */ - rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL)); + RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL)); return 0; } @@ -2042,7 +2188,7 @@ static int iwl_mvm_enable_aux_snif_queue_tvqm(struct iwl_mvm *mvm, u8 sta_id) WARN_ON(!iwl_mvm_has_new_tx_api(mvm)); - return iwl_mvm_tvqm_enable_txq(mvm, sta_id, IWL_MAX_TID_COUNT, + return iwl_mvm_tvqm_enable_txq(mvm, NULL, sta_id, IWL_MAX_TID_COUNT, wdg_timeout); } @@ -2060,7 +2206,7 @@ static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx, ret = iwl_mvm_add_int_sta_common(mvm, sta, addr, macidx, maccolor); if (ret) { if (!iwl_mvm_has_new_tx_api(mvm)) - iwl_mvm_disable_txq(mvm, NULL, queue, + iwl_mvm_disable_txq(mvm, NULL, sta->sta_id, queue, IWL_MAX_TID_COUNT); return ret; } @@ -2087,11 +2233,13 @@ static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx, int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm, u32 lmac_id) { int ret; + u32 qmask = mvm->aux_queue == IWL_MVM_INVALID_QUEUE ? 0 : + BIT(mvm->aux_queue); lockdep_assert_held(&mvm->mutex); /* Allocate aux station and assign to it the aux queue */ - ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue), + ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, qmask, NL80211_IFTYPE_UNSPECIFIED, IWL_STA_AUX_ACTIVITY); if (ret) @@ -2133,7 +2281,8 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA)) return -EINVAL; - iwl_mvm_disable_txq(mvm, NULL, &mvm->snif_queue, IWL_MAX_TID_COUNT); + iwl_mvm_disable_txq(mvm, NULL, mvm->snif_sta.sta_id, + &mvm->snif_queue, IWL_MAX_TID_COUNT); ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id); if (ret) IWL_WARN(mvm, "Failed sending remove station\n"); @@ -2150,7 +2299,8 @@ int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm) if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA)) return -EINVAL; - iwl_mvm_disable_txq(mvm, NULL, &mvm->aux_queue, IWL_MAX_TID_COUNT); + iwl_mvm_disable_txq(mvm, NULL, mvm->aux_sta.sta_id, + &mvm->aux_queue, IWL_MAX_TID_COUNT); ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id); if (ret) IWL_WARN(mvm, "Failed sending remove station\n"); @@ -2175,7 +2325,7 @@ void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm) int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta; + struct iwl_mvm_int_sta *bsta = &mvmvif->deflink.bcast_sta; static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; const u8 *baddr = _baddr; int queue; @@ -2184,7 +2334,7 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) iwl_mvm_get_wd_timeout(mvm, vif, false, false); struct iwl_trans_txq_scd_cfg cfg = { .fifo = IWL_MVM_TX_FIFO_VO, - .sta_id = mvmvif->bcast_sta.sta_id, + .sta_id = mvmvif->deflink.bcast_sta.sta_id, .tid = IWL_MAX_TID_COUNT, .aggregate = false, .frame_limit = IWL_FRAME_LIMIT, @@ -2224,7 +2374,7 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) * to firmware so enable queue here - after the station was added */ if (iwl_mvm_has_new_tx_api(mvm)) { - queue = iwl_mvm_tvqm_enable_txq(mvm, bsta->sta_id, + queue = iwl_mvm_tvqm_enable_txq(mvm, NULL, bsta->sta_id, IWL_MAX_TID_COUNT, wdg_timeout); if (queue < 0) { @@ -2233,24 +2383,32 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) } if (vif->type == NL80211_IFTYPE_AP || - vif->type == NL80211_IFTYPE_ADHOC) + vif->type == NL80211_IFTYPE_ADHOC) { + /* for queue management */ mvm->probe_queue = queue; - else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) + /* for use in TX */ + mvmvif->deflink.mgmt_queue = queue; + } else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { mvm->p2p_dev_queue = queue; + } + } else if (vif->type == NL80211_IFTYPE_AP || + vif->type == NL80211_IFTYPE_ADHOC) { + /* set it for use in TX */ + mvmvif->deflink.mgmt_queue = mvm->probe_queue; } return 0; } -static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) +void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); u16 *queueptr, queue; lockdep_assert_held(&mvm->mutex); - iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true); + iwl_mvm_flush_sta(mvm, &mvmvif->deflink.bcast_sta, true); switch (vif->type) { case NL80211_IFTYPE_AP: @@ -2267,12 +2425,17 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm, } queue = *queueptr; - iwl_mvm_disable_txq(mvm, NULL, queueptr, IWL_MAX_TID_COUNT); + iwl_mvm_disable_txq(mvm, NULL, mvmvif->deflink.bcast_sta.sta_id, + queueptr, IWL_MAX_TID_COUNT); + + if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) + mvmvif->deflink.mgmt_queue = mvm->probe_queue; + if (iwl_mvm_has_new_tx_api(mvm)) return; - WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue))); - mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue); + WARN_ON(!(mvmvif->deflink.bcast_sta.tfd_queue_msk & BIT(queue))); + mvmvif->deflink.bcast_sta.tfd_queue_msk &= ~BIT(queue); } /* Send the FW a request to remove the station from it's internal data @@ -2286,7 +2449,7 @@ int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) iwl_mvm_free_bcast_sta_queues(mvm, vif); - ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id); + ret = iwl_mvm_rm_sta_common(mvm, mvmvif->deflink.bcast_sta.sta_id); if (ret) IWL_WARN(mvm, "Failed sending remove station\n"); return ret; @@ -2298,7 +2461,7 @@ int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) lockdep_assert_held(&mvm->mutex); - return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0, + return iwl_mvm_allocate_int_sta(mvm, &mvmvif->deflink.bcast_sta, 0, ieee80211_vif_type_p2p(vif), IWL_STA_GENERAL_PURPOSE); } @@ -2313,7 +2476,7 @@ int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta; + struct iwl_mvm_int_sta *bsta = &mvmvif->deflink.bcast_sta; int ret; lockdep_assert_held(&mvm->mutex); @@ -2334,7 +2497,7 @@ void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta); + iwl_mvm_dealloc_int_sta(mvm, &mvmvif->deflink.bcast_sta); } /* @@ -2365,7 +2528,7 @@ int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta; + struct iwl_mvm_int_sta *msta = &mvmvif->deflink.mcast_sta; static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00}; const u8 *maddr = _maddr; struct iwl_trans_txq_scd_cfg cfg = { @@ -2392,7 +2555,7 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) * changes in mac80211 layer. */ if (vif->type == NL80211_IFTYPE_ADHOC) - mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; + mvmvif->deflink.cab_queue = IWL_MVM_DQA_GCAST_QUEUE; /* * While in previous FWs we had to exclude cab queue from TFD queue @@ -2400,9 +2563,10 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) */ if (!iwl_mvm_has_new_tx_api(mvm) && fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) { - iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg, + iwl_mvm_enable_txq(mvm, NULL, mvmvif->deflink.cab_queue, 0, + &cfg, timeout); - msta->tfd_queue_msk |= BIT(mvmvif->cab_queue); + msta->tfd_queue_msk |= BIT(mvmvif->deflink.cab_queue); } ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr, mvmvif->id, mvmvif->color); @@ -2417,17 +2581,17 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) * tfd_queue_mask. */ if (iwl_mvm_has_new_tx_api(mvm)) { - int queue = iwl_mvm_tvqm_enable_txq(mvm, msta->sta_id, - 0, - timeout); + int queue = iwl_mvm_tvqm_enable_txq(mvm, NULL, msta->sta_id, + 0, timeout); if (queue < 0) { ret = queue; goto err; } - mvmvif->cab_queue = queue; + mvmvif->deflink.cab_queue = queue; } else if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) - iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg, + iwl_mvm_enable_txq(mvm, NULL, mvmvif->deflink.cab_queue, 0, + &cfg, timeout); return 0; @@ -2500,11 +2664,12 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) lockdep_assert_held(&mvm->mutex); - iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true); + iwl_mvm_flush_sta(mvm, &mvmvif->deflink.mcast_sta, true); - iwl_mvm_disable_txq(mvm, NULL, &mvmvif->cab_queue, 0); + iwl_mvm_disable_txq(mvm, NULL, mvmvif->deflink.mcast_sta.sta_id, + &mvmvif->deflink.cab_queue, 0); - ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id); + ret = iwl_mvm_rm_sta_common(mvm, mvmvif->deflink.mcast_sta.sta_id); if (ret) IWL_WARN(mvm, "Failed sending remove station\n"); @@ -2593,13 +2758,14 @@ static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm, } static int iwl_mvm_fw_baid_op_sta(struct iwl_mvm *mvm, - struct iwl_mvm_sta *mvm_sta, + struct ieee80211_sta *sta, bool start, int tid, u16 ssn, u16 buf_size) { + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_add_sta_cmd cmd = { .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color), - .sta_id = mvm_sta->sta_id, + .sta_id = mvm_sta->deflink.sta_id, .add_modify = STA_MODE_MODIFY, }; u32 status; @@ -2641,7 +2807,7 @@ static int iwl_mvm_fw_baid_op_sta(struct iwl_mvm *mvm, } static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm, - struct iwl_mvm_sta *mvm_sta, + struct ieee80211_sta *sta, bool start, int tid, u16 ssn, u16 buf_size, int baid) { @@ -2655,7 +2821,8 @@ static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm, BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid)); if (start) { - cmd.alloc.sta_id_mask = cpu_to_le32(BIT(mvm_sta->sta_id)); + cmd.alloc.sta_id_mask = + cpu_to_le32(iwl_mvm_sta_fw_id_mask(mvm, sta, -1)); cmd.alloc.tid = tid; cmd.alloc.ssn = cpu_to_le16(ssn); cmd.alloc.win_size = cpu_to_le16(buf_size); @@ -2664,7 +2831,8 @@ static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm, cmd.remove_v1.baid = cpu_to_le32(baid); BUILD_BUG_ON(sizeof(cmd.remove_v1) > sizeof(cmd.remove)); } else { - cmd.remove.sta_id_mask = cpu_to_le32(BIT(mvm_sta->sta_id)); + cmd.remove.sta_id_mask = + cpu_to_le32(iwl_mvm_sta_fw_id_mask(mvm, sta, -1)); cmd.remove.tid = cpu_to_le32(tid); } @@ -2687,16 +2855,16 @@ static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm, return baid; } -static int iwl_mvm_fw_baid_op(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvm_sta, +static int iwl_mvm_fw_baid_op(struct iwl_mvm *mvm, struct ieee80211_sta *sta, bool start, int tid, u16 ssn, u16 buf_size, int baid) { if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BAID_ML_SUPPORT)) - return iwl_mvm_fw_baid_op_cmd(mvm, mvm_sta, start, + return iwl_mvm_fw_baid_op_cmd(mvm, sta, start, tid, ssn, buf_size, baid); - return iwl_mvm_fw_baid_op_sta(mvm, mvm_sta, start, + return iwl_mvm_fw_baid_op_sta(mvm, sta, start, tid, ssn, buf_size); } @@ -2717,7 +2885,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, } if (iwl_mvm_has_new_rx_api(mvm) && start) { - u16 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]); + u32 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]); /* sparse doesn't like the __align() so don't check */ #ifndef __CHECKER__ @@ -2766,7 +2934,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, /* Don't send command to remove (start=0) BAID during restart */ if (start || !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) - baid = iwl_mvm_fw_baid_op(mvm, mvm_sta, start, tid, ssn, buf_size, + baid = iwl_mvm_fw_baid_op(mvm, sta, start, tid, ssn, buf_size, baid); if (baid < 0) { @@ -2788,7 +2956,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, iwl_mvm_rx_agg_session_expired, 0); baid_data->mvm = mvm; baid_data->tid = tid; - baid_data->sta_id = mvm_sta->sta_id; + baid_data->sta_mask = iwl_mvm_sta_fw_id_mask(mvm, sta, -1); mvm_sta->tid_to_baid[tid] = baid; if (timeout) @@ -2803,7 +2971,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, * RX is being processed in parallel */ IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n", - mvm_sta->sta_id, tid, baid); + mvm_sta->deflink.sta_id, tid, baid); WARN_ON(rcu_access_pointer(mvm->baid_map[baid])); rcu_assign_pointer(mvm->baid_map[baid], baid_data); } else { @@ -2824,7 +2992,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, /* synchronize all rx queues so we can safely delete */ iwl_mvm_free_reorder(mvm, baid_data); - del_timer_sync(&baid_data->session_timer); + timer_shutdown_sync(&baid_data->session_timer); RCU_INIT_POINTER(mvm->baid_map[baid], NULL); kfree_rcu(baid_data, rcu_head); IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid); @@ -2865,7 +3033,7 @@ int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, } cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color); - cmd.sta_id = mvm_sta->sta_id; + cmd.sta_id = mvm_sta->deflink.sta_id; cmd.add_modify = STA_MODE_MODIFY; if (!iwl_mvm_has_new_tx_api(mvm)) cmd.modify_mask = STA_MODIFY_QUEUES; @@ -2957,7 +3125,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, */ txq_id = mvmsta->tid_data[tid].txq_id; if (txq_id == IWL_MVM_INVALID_QUEUE) { - ret = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, + ret = iwl_mvm_find_free_queue(mvm, mvmsta->deflink.sta_id, IWL_MVM_DQA_MIN_DATA_QUEUE, IWL_MVM_DQA_MAX_DATA_QUEUE); if (ret < 0) { @@ -2995,7 +3163,8 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, IWL_DEBUG_TX_QUEUES(mvm, "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n", - mvmsta->sta_id, tid, txq_id, tid_data->ssn, + mvmsta->deflink.sta_id, tid, txq_id, + tid_data->ssn, tid_data->next_reclaimed); /* @@ -3034,7 +3203,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u16 ssn; struct iwl_trans_txq_scd_cfg cfg = { - .sta_id = mvmsta->sta_id, + .sta_id = mvmsta->deflink.sta_id, .tid = tid, .frame_limit = buf_size, .aggregate = true, @@ -3106,7 +3275,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, } ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo, - mvmsta->sta_id, tid, + mvmsta->deflink.sta_id, tid, buf_size, ssn); if (ret) { IWL_ERR(mvm, @@ -3137,14 +3306,16 @@ out: * for each station. Therefore, use the minimum of all the * aggregation sessions and our default value. */ - mvmsta->max_agg_bufsize = - min(mvmsta->max_agg_bufsize, buf_size); - mvmsta->lq_sta.rs_drv.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize; + mvmsta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize = + min(mvmsta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize, + buf_size); + mvmsta->deflink.lq_sta.rs_drv.lq.agg_frame_cnt_limit = + mvmsta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize; IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n", sta->addr, tid); - return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq); + return iwl_mvm_send_lq_cmd(mvm, &mvmsta->deflink.lq_sta.rs_drv.lq); } static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm, @@ -3193,7 +3364,8 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, txq_id = tid_data->txq_id; IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n", - mvmsta->sta_id, tid, txq_id, tid_data->state); + mvmsta->deflink.sta_id, tid, txq_id, + tid_data->state); mvmsta->agg_tids &= ~BIT(tid); @@ -3232,7 +3404,7 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, default: IWL_ERR(mvm, "Stopping AGG while state not ON or starting for %d on %d (%d)\n", - mvmsta->sta_id, tid, tid_data->state); + mvmsta->deflink.sta_id, tid, tid_data->state); IWL_ERR(mvm, "\ttid_data->txq_id = %d\n", tid_data->txq_id); err = -EINVAL; @@ -3258,7 +3430,8 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, spin_lock_bh(&mvmsta->lock); txq_id = tid_data->txq_id; IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n", - mvmsta->sta_id, tid, txq_id, tid_data->state); + mvmsta->deflink.sta_id, tid, txq_id, + tid_data->state); old_state = tid_data->state; tid_data->state = IWL_AGG_OFF; mvmsta->agg_tids &= ~BIT(tid); @@ -3270,7 +3443,7 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, iwl_mvm_drain_sta(mvm, mvmsta, true); if (iwl_mvm_has_new_tx_api(mvm)) { - if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id, + if (iwl_mvm_flush_sta_tids(mvm, mvmsta->deflink.sta_id, BIT(tid))) IWL_ERR(mvm, "Couldn't flush the AGG queue\n"); iwl_trans_wait_txq_empty(mvm->trans, txq_id); @@ -3330,8 +3503,8 @@ static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm, * station ID, then use AP's station ID. */ if (vif->type == NL80211_IFTYPE_STATION && - mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) { - u8 sta_id = mvmvif->ap_sta_id; + mvmvif->deflink.ap_sta_id != IWL_MVM_INVALID_STA) { + u8 sta_id = mvmvif->deflink.ap_sta_id; sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex)); @@ -3608,10 +3781,13 @@ static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm, return sta->addr; if (vif->type == NL80211_IFTYPE_STATION && - mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) { - u8 sta_id = mvmvif->ap_sta_id; + mvmvif->deflink.ap_sta_id != IWL_MVM_INVALID_STA) { + u8 sta_id = mvmvif->deflink.ap_sta_id; sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex)); + if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) + return NULL; + return sta->addr; } @@ -3635,13 +3811,13 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm, if (sta) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); - sta_id = mvm_sta->sta_id; + sta_id = mvm_sta->deflink.sta_id; mfp = sta->mfp; } else if (vif->type == NL80211_IFTYPE_AP && !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - sta_id = mvmvif->mcast_sta.sta_id; + sta_id = mvmvif->deflink.mcast_sta.sta_id; } else { IWL_ERR(mvm, "Failed to find station id\n"); return -EINVAL; @@ -3649,6 +3825,11 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm, if (keyconf->cipher == WLAN_CIPHER_SUITE_TKIP) { addr = iwl_mvm_get_mac_addr(mvm, vif, sta); + if (!addr) { + IWL_ERR(mvm, "Failed to find mac address\n"); + return -EINVAL; + } + /* get phase 1 key from mac80211 */ ieee80211_get_key_rx_seq(keyconf, 0, &seq); ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k); @@ -3684,7 +3865,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, IWL_ERR(mvm, "Failed to find station\n"); return -EINVAL; } - sta_id = mvm_sta->sta_id; + sta_id = mvm_sta->deflink.sta_id; /* * It is possible that the 'sta' parameter is NULL, and thus @@ -3706,7 +3887,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, } else { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - sta_id = mvmvif->mcast_sta.sta_id; + sta_id = mvmvif->deflink.mcast_sta.sta_id; } if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || @@ -3779,9 +3960,9 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, /* Get the station from the mvm local station table */ mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); if (mvm_sta) - sta_id = mvm_sta->sta_id; + sta_id = mvm_sta->deflink.sta_id; else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast) - sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id; + sta_id = iwl_mvm_vif_from_mac80211(vif)->deflink.mcast_sta.sta_id; IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n", @@ -3837,7 +4018,7 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm, mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); if (WARN_ON_ONCE(!mvm_sta)) goto unlock; - iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast, + iwl_mvm_send_sta_key(mvm, mvm_sta->deflink.sta_id, keyconf, mcast, iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx, mfp); @@ -3851,7 +4032,7 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_add_sta_cmd cmd = { .add_modify = STA_MODE_MODIFY, - .sta_id = mvmsta->sta_id, + .sta_id = mvmsta->deflink.sta_id, .station_flags_msk = cpu_to_le32(STA_FLG_PS), .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), }; @@ -3872,7 +4053,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_add_sta_cmd cmd = { .add_modify = STA_MODE_MODIFY, - .sta_id = mvmsta->sta_id, + .sta_id = mvmsta->deflink.sta_id, .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT, .sleep_tx_count = cpu_to_le16(cnt), .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), @@ -3965,17 +4146,23 @@ void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm, } void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm, - struct iwl_mvm_sta *mvmsta, bool disable) + struct iwl_mvm_sta *mvmsta, + bool disable) { struct iwl_mvm_add_sta_cmd cmd = { .add_modify = STA_MODE_MODIFY, - .sta_id = mvmsta->sta_id, + .sta_id = mvmsta->deflink.sta_id, .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0, .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX), .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), }; int ret; + if (mvm->mld_api_is_used) { + iwl_mvm_mld_sta_modify_disable_tx(mvm, mvmsta, disable); + return; + } + ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, iwl_mvm_add_sta_cmd_size(mvm), &cmd); if (ret) @@ -3988,6 +4175,11 @@ void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm, { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + if (mvm->mld_api_is_used) { + iwl_mvm_mld_sta_modify_disable_tx_ap(mvm, sta, disable); + return; + } + spin_lock_bh(&mvm_sta->lock); if (mvm_sta->disable_tx == disable) { @@ -4038,6 +4230,11 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvm_sta; int i; + if (mvm->mld_api_is_used) { + iwl_mvm_mld_modify_all_sta_disable_tx(mvm, mvmvif, disable); + return; + } + rcu_read_lock(); /* Block/unblock all the stations of the given mvmvif */ @@ -4060,17 +4257,19 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, return; /* Need to block/unblock also multicast station */ - if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA) + if (mvmvif->deflink.mcast_sta.sta_id != IWL_MVM_INVALID_STA) iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif, - &mvmvif->mcast_sta, disable); + &mvmvif->deflink.mcast_sta, + disable); /* * Only unblock the broadcast station (FW blocks it for immediate * quiet, not the driver) */ - if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA) + if (!disable && mvmvif->deflink.bcast_sta.sta_id != IWL_MVM_INVALID_STA) iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif, - &mvmvif->bcast_sta, disable); + &mvmvif->deflink.bcast_sta, + disable); } void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif) @@ -4080,7 +4279,7 @@ void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif) rcu_read_lock(); - mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id); + mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->deflink.ap_sta_id); if (mvmsta) iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true); @@ -4110,16 +4309,27 @@ int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u16 queue; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct ieee80211_key_conf *keyconf; + unsigned int wdg_timeout = + iwl_mvm_get_wd_timeout(mvm, vif, false, false); + bool mld = iwl_mvm_has_mld_api(mvm->fw); + u32 type = mld ? STATION_TYPE_PEER : IWL_STA_LINK; ret = iwl_mvm_allocate_int_sta(mvm, sta, 0, - NL80211_IFTYPE_UNSPECIFIED, - IWL_STA_LINK); + NL80211_IFTYPE_UNSPECIFIED, type); if (ret) return ret; - ret = iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color, - addr, sta, &queue, - IWL_MVM_TX_FIFO_BE); + if (mld) + ret = iwl_mvm_mld_add_int_sta_with_queue(mvm, sta, addr, + mvmvif->deflink.fw_link_id, + &queue, + IWL_MAX_TID_COUNT, + &wdg_timeout); + else + ret = iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, + mvmvif->color, addr, sta, + &queue, + IWL_MVM_TX_FIFO_BE); if (ret) goto out; @@ -4132,9 +4342,23 @@ int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, keyconf->cipher = cipher; memcpy(keyconf->key, key, key_len); keyconf->keylen = key_len; + keyconf->flags = IEEE80211_KEY_FLAG_PAIRWISE; + + if (mld) { + /* The MFP flag is set according to the station mfp field. Since + * we don't have a station, set it manually. + */ + u32 key_flags = + iwl_mvm_get_sec_flags(mvm, vif, NULL, keyconf) | + IWL_SEC_KEY_FLAG_MFP; + u32 sta_mask = BIT(sta->sta_id); + + ret = iwl_mvm_mld_send_key(mvm, sta_mask, key_flags, keyconf); + } else { + ret = iwl_mvm_send_sta_key(mvm, sta->sta_id, keyconf, false, + 0, NULL, 0, 0, true); + } - ret = iwl_mvm_send_sta_key(mvm, sta->sta_id, keyconf, false, - 0, NULL, 0, 0, true); kfree(keyconf); return 0; out: @@ -4144,10 +4368,10 @@ out: void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - u32 mac_id) + u32 id) { struct iwl_cancel_channel_switch_cmd cancel_channel_switch_cmd = { - .mac_id = cpu_to_le32(mac_id), + .id = cpu_to_le32(id), }; int ret; diff --git a/mvm/sta.h b/mvm/sta.h index f1a4fc3e4038..7364346a1209 100644 --- a/mvm/sta.h +++ b/mvm/sta.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2022 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2016 Intel Deutschland GmbH */ @@ -331,14 +331,37 @@ struct iwl_mvm_rxq_dup_data { } ____cacheline_aligned_in_smp; /** + * struct iwl_mvm_link_sta - link specific parameters of a station + * @rcu_head: used for freeing the data + * @sta_id: the index of the station in the fw + * @lq_sta: holds rate scaling data, either for the case when RS is done in + * the driver - %rs_drv or in the FW - %rs_fw. + * @orig_amsdu_len: used to save the original amsdu_len when it is changed via + * debugfs. If it's set to 0, it means that it is it's not set via + * debugfs. + * @avg_energy: energy as reported by FW statistics notification + */ +struct iwl_mvm_link_sta { + struct rcu_head rcu_head; + u32 sta_id; + union { + struct iwl_lq_sta_rs_fw rs_fw; + struct iwl_lq_sta rs_drv; + } lq_sta; + + u16 orig_amsdu_len; + + u8 avg_energy; +}; + +/** * struct iwl_mvm_sta - representation of a station in the driver - * @sta_id: the index of the station in the fw (will be replaced by id_n_color) * @tfd_queue_msk: the tfd queues used by the station * @mac_id_n_color: the MAC context this station is linked to * @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for * tid. - * @max_agg_bufsize: the maximal size of the AGG buffer for this station * @sta_type: station type + * @authorized: indicates station is authorized * @sta_state: station state according to enum %ieee80211_sta_state * @bt_reduced_txpower: is reduced tx power enabled for this station * @next_status_eosp: the next reclaimed packet is a PS-Poll response and @@ -347,8 +370,7 @@ struct iwl_mvm_rxq_dup_data { * and from Tx response flow, it needs a spinlock. * @tid_data: per tid data + mgmt. Look at %iwl_mvm_tid_data. * @tid_to_baid: a simple map of TID to baid - * @lq_sta: holds rate scaling data, either for the case when RS is done in - * the driver - %rs_drv or in the FW - %rs_fw. + * @vif: a vif pointer * @reserved_queue: the queue reserved for this STA for DQA purposes * Every STA has is given one reserved queue to allow it to operate. If no * such queue can be guaranteed, the STA addition will fail. @@ -358,10 +380,8 @@ struct iwl_mvm_rxq_dup_data { * @amsdu_enabled: bitmap of TX AMSDU allowed TIDs. * In case TLC offload is not active it is either 0xFFFF or 0. * @max_amsdu_len: max AMSDU length - * @orig_amsdu_len: used to save the original amsdu_len when it is changed via - * debugfs. If it's set to 0, it means that it is it's not set via - * debugfs. * @agg_tids: bitmap of tids whose status is operational aggregated (IWL_AGG_ON) + * @sleeping: sta sleep transitions in power management * @sleep_tx_count: the number of frames that we told the firmware to let out * even when that station is asleep. This is useful in case the queue * gets empty before all the frames were sent, which can happen when @@ -374,6 +394,12 @@ struct iwl_mvm_rxq_dup_data { * used during connection establishment (e.g. for the 4 way handshake * exchange). * @pairwise_cipher: used to feed iwlmei upon authorization + * @deflink: the default link station, for non-MLO STA, all link specific data + * is accessed via deflink (or link[0]). For MLO, it will hold data of the + * first added link STA. + * @link: per link sta entries. For non-MLO only link[0] holds data. For MLO, + * link[0] points to deflink and link[link_id] is allocated when new link + * sta is added. * * When mac80211 creates a station it reserves some space (hw->sta_data_size) * in the structure for use by driver. This structure is placed in that @@ -381,22 +407,17 @@ struct iwl_mvm_rxq_dup_data { * */ struct iwl_mvm_sta { - u32 sta_id; u32 tfd_queue_msk; u32 mac_id_n_color; u16 tid_disable_agg; - u16 max_agg_bufsize; - enum iwl_sta_type sta_type; + u8 sta_type; enum ieee80211_sta_state sta_state; bool bt_reduced_txpower; bool next_status_eosp; + bool authorized; spinlock_t lock; struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT + 1]; u8 tid_to_baid[IWL_MAX_TID_COUNT]; - union { - struct iwl_lq_sta_rs_fw rs_fw; - struct iwl_lq_sta rs_drv; - } lq_sta; struct ieee80211_vif *vif; struct iwl_mvm_key_pn __rcu *ptk_pn[4]; struct iwl_mvm_rxq_dup_data *dup_data; @@ -410,13 +431,14 @@ struct iwl_mvm_sta { bool disable_tx; u16 amsdu_enabled; u16 max_amsdu_len; - u16 orig_amsdu_len; bool sleeping; u8 agg_tids; u8 sleep_tx_count; - u8 avg_energy; u8 tx_ant; u32 pairwise_cipher; + + struct iwl_mvm_link_sta deflink; + struct iwl_mvm_link_sta __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS]; }; u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data); @@ -436,7 +458,7 @@ iwl_mvm_sta_from_mac80211(struct ieee80211_sta *sta) */ struct iwl_mvm_int_sta { u32 sta_id; - enum iwl_sta_type type; + u8 type; u32 tfd_queue_msk; }; @@ -452,6 +474,9 @@ struct iwl_mvm_int_sta { */ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, bool update, unsigned int flags); +int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, enum nl80211_iftype iftype); +int iwl_mvm_sta_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, int sta_id, u8 sta_type); int iwl_mvm_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta); @@ -463,8 +488,13 @@ static inline int iwl_mvm_update_sta(struct iwl_mvm *mvm, return iwl_mvm_sta_send_to_fw(mvm, sta, true, 0); } +void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, + struct ieee80211_sta *sta); int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvm_sta); +bool iwl_mvm_sta_del(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_link_sta *link_sta, int *ret); int iwl_mvm_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta); @@ -510,6 +540,8 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm, u32 lmac_id); int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm); int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm, + struct ieee80211_vif *vif); int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); @@ -519,7 +551,7 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta, u32 qmask, enum nl80211_iftype iftype, - enum iwl_sta_type type); + u8 type); void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta); int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); @@ -543,6 +575,7 @@ void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm, void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif, bool disable); + void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk); int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, @@ -550,5 +583,86 @@ int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u8 *key, u32 key_len); void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - u32 mac_id); + u32 id); +/* Queues */ +int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + u8 sta_id, u8 tid, unsigned int timeout); + +/* Sta state */ +/** + * struct iwl_mvm_sta_state_ops - callbacks for the sta_state() ops + * + * Since the only difference between both MLD and + * non-MLD versions of sta_state() is these function calls, + * each version will send its specific function calls to + * %iwl_mvm_mac_sta_state_common(). + * + * @add_sta: pointer to the function that adds a new sta + * @update_sta: pointer to the function that updates a sta + * @rm_sta: pointer to the functions that removes a sta + * @mac_ctxt_changed: pointer to the function that handles a change in mac ctxt + */ +struct iwl_mvm_sta_state_ops { + int (*add_sta)(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); + int (*update_sta)(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); + int (*rm_sta)(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); + int (*mac_ctxt_changed)(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + bool force_assoc_off); +}; + +int iwl_mvm_mac_sta_state_common(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + enum ieee80211_sta_state old_state, + enum ieee80211_sta_state new_state, + const struct iwl_mvm_sta_state_ops *callbacks); + +/* New MLD STA related APIs */ +/* STA */ +int iwl_mvm_mld_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf); +int iwl_mvm_mld_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf); +int iwl_mvm_mld_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf); +int iwl_mvm_mld_add_aux_sta(struct iwl_mvm *mvm, u32 lmac_id); +int iwl_mvm_mld_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf); +int iwl_mvm_mld_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +int iwl_mvm_mld_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf); +int iwl_mvm_mld_rm_aux_sta(struct iwl_mvm *mvm); +int iwl_mvm_mld_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +int iwl_mvm_mld_update_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +int iwl_mvm_mld_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +int iwl_mvm_mld_rm_sta_id(struct iwl_mvm *mvm, u8 sta_id); +int iwl_mvm_mld_update_sta_links(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + u16 old_links, u16 new_links); +u32 iwl_mvm_sta_fw_id_mask(struct iwl_mvm *mvm, struct ieee80211_sta *sta, + int filter_link_id); +int iwl_mvm_mld_add_int_sta_with_queue(struct iwl_mvm *mvm, + struct iwl_mvm_int_sta *sta, + const u8 *addr, int link_id, + u16 *queue, u8 tid, + unsigned int *_wdg_timeout); + +/* Queues */ +void iwl_mvm_mld_modify_all_sta_disable_tx(struct iwl_mvm *mvm, + struct iwl_mvm_vif *mvmvif, + bool disable); +void iwl_mvm_mld_sta_modify_disable_tx(struct iwl_mvm *mvm, + struct iwl_mvm_sta *mvm_sta, + bool disable); +void iwl_mvm_mld_sta_modify_disable_tx_ap(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + bool disable); #endif /* __sta_h__ */ diff --git a/mvm/tdls.c b/mvm/tdls.c index bf04326e35ff..dae6f2a1aad9 100644 --- a/mvm/tdls.c +++ b/mvm/tdls.c @@ -2,7 +2,7 @@ /* * Copyright (C) 2014 Intel Mobile Communications GmbH * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2020 Intel Corporation + * Copyright (C) 2018-2020, 2022 Intel Corporation */ #include <linux/etherdevice.h> #include "mvm.h" @@ -369,7 +369,7 @@ iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm, goto out; } mvmsta = iwl_mvm_sta_from_mac80211(sta); - cmd.peer_sta_id = cpu_to_le32(mvmsta->sta_id); + cmd.peer_sta_id = cpu_to_le32(mvmsta->deflink.sta_id); if (!chandef) { if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT && @@ -380,7 +380,7 @@ iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm, type == TDLS_MOVE_CH) { /* we need to return to base channel */ struct ieee80211_chanctx_conf *chanctx = - rcu_dereference(vif->chanctx_conf); + rcu_dereference(vif->bss_conf.chanctx_conf); if (WARN_ON_ONCE(!chanctx)) { rcu_read_unlock(); @@ -414,7 +414,7 @@ iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm, } iwl_mvm_set_tx_cmd(mvm, skb, &tail->frame.tx_cmd, info, - mvmsta->sta_id); + mvmsta->deflink.sta_id); iwl_mvm_set_tx_cmd_rate(mvm, &tail->frame.tx_cmd, info, sta, hdr->frame_control); @@ -431,7 +431,7 @@ iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm, /* channel switch has started, update state */ if (type != TDLS_MOVE_CH) { - mvm->tdls_cs.cur_sta_id = mvmsta->sta_id; + mvm->tdls_cs.cur_sta_id = mvmsta->deflink.sta_id; iwl_mvm_tdls_update_cs_state(mvm, type == TDLS_SEND_CHAN_SW_REQ ? IWL_MVM_TDLS_SW_REQ_SENT : @@ -541,7 +541,7 @@ iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw, } mvmsta = iwl_mvm_sta_from_mac80211(sta); - mvm->tdls_cs.peer.sta_id = mvmsta->sta_id; + mvm->tdls_cs.peer.sta_id = mvmsta->deflink.sta_id; mvm->tdls_cs.peer.chandef = *chandef; mvm->tdls_cs.peer.initiator = sta->tdls_initiator; mvm->tdls_cs.peer.op_class = oper_class; diff --git a/mvm/time-event.c b/mvm/time-event.c index 6edf2b79db43..5f0e7144a951 100644 --- a/mvm/time-event.c +++ b/mvm/time-event.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2017 Intel Deutschland GmbH */ @@ -79,7 +79,8 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk) if (!WARN_ON(!mvm->p2p_device_vif)) { mvmvif = iwl_mvm_vif_from_mac80211(mvm->p2p_device_vif); - iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true); + iwl_mvm_flush_sta(mvm, &mvmvif->deflink.bcast_sta, + true); } } @@ -94,13 +95,19 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk) /* do the same in case of hot spot 2.0 */ iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true); + if (mvm->mld_api_is_used) { + iwl_mvm_mld_rm_aux_sta(mvm); + goto out_unlock; + } + /* In newer version of this command an aux station is added only * in cases of dedicated tx queue and need to be removed in end * of use */ - if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12) + if (iwl_mvm_has_new_station_api(mvm->fw)) iwl_mvm_rm_aux_sta(mvm); } +out_unlock: mutex_unlock(&mvm->mutex); } @@ -123,7 +130,7 @@ static void iwl_mvm_csa_noa_start(struct iwl_mvm *mvm) rcu_read_lock(); csa_vif = rcu_dereference(mvm->csa_vif); - if (!csa_vif || !csa_vif->csa_active) + if (!csa_vif || !csa_vif->bss_conf.csa_active) goto out_unlock; IWL_DEBUG_TE(mvm, "CSA NOA started\n"); @@ -160,7 +167,7 @@ static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm, if (vif->type != NL80211_IFTYPE_STATION) return false; - if (!mvmvif->csa_bcn_pending && vif->bss_conf.assoc && + if (!mvmvif->csa_bcn_pending && vif->cfg.assoc && vif->bss_conf.dtim_period) return false; if (errmsg) @@ -170,13 +177,14 @@ static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta; rcu_read_lock(); - mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id); + mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, + mvmvif->deflink.ap_sta_id); if (!WARN_ON(!mvmsta)) iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false); rcu_read_unlock(); } - if (vif->bss_conf.assoc) { + if (vif->cfg.assoc) { /* * When not associated, this will be called from * iwl_mvm_event_mlme_callback_ini() @@ -346,7 +354,7 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm, * and know the dtim period. */ iwl_mvm_te_check_disconnect(mvm, te_data->vif, - !te_data->vif->bss_conf.assoc ? + !te_data->vif->cfg.assoc ? "Not associated and the time event is over already..." : "No beacon heard and the time event is over already..."); break; @@ -376,12 +384,11 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm, static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm, struct iwl_time_event_notif *notif) { - struct iwl_mvm_time_event_data *te_data, *tmp; - bool aux_roc_te = false; + struct iwl_mvm_time_event_data *aux_roc_te = NULL, *te_data; - list_for_each_entry_safe(te_data, tmp, &mvm->aux_roc_te_list, list) { + list_for_each_entry(te_data, &mvm->aux_roc_te_list, list) { if (le32_to_cpu(notif->unique_id) == te_data->uid) { - aux_roc_te = true; + aux_roc_te = te_data; break; } } @@ -859,7 +866,7 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm, * and know the dtim period. */ iwl_mvm_te_check_disconnect(mvm, vif, - !vif->bss_conf.assoc ? + !vif->cfg.assoc ? "Not associated and the session protection is over already..." : "No beacon heard and the session protection is over already..."); spin_lock_bh(&mvm->time_event_lock); diff --git a/mvm/time-sync.c b/mvm/time-sync.c new file mode 100644 index 000000000000..edae3e24192b --- /dev/null +++ b/mvm/time-sync.c @@ -0,0 +1,173 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2022 Intel Corporation + */ + +#include "mvm.h" +#include "time-sync.h" +#include <linux/ieee80211.h> + +void iwl_mvm_init_time_sync(struct iwl_time_sync_data *data) +{ + skb_queue_head_init(&data->frame_list); +} + +static bool iwl_mvm_is_skb_match(struct sk_buff *skb, u8 *addr, u8 dialog_token) +{ + struct ieee80211_mgmt *mgmt = (void *)skb->data; + u8 skb_dialog_token; + + if (ieee80211_is_timing_measurement(skb)) + skb_dialog_token = mgmt->u.action.u.wnm_timing_msr.dialog_token; + else + skb_dialog_token = mgmt->u.action.u.ftm.dialog_token; + + if ((ether_addr_equal(mgmt->sa, addr) || + ether_addr_equal(mgmt->da, addr)) && + skb_dialog_token == dialog_token) + return true; + + return false; +} + +static struct sk_buff *iwl_mvm_time_sync_find_skb(struct iwl_mvm *mvm, u8 *addr, + u8 dialog_token) +{ + struct sk_buff *skb; + + /* The queue is expected to have only one SKB. If there are other SKBs + * in the queue, they did not get a time sync notification and are + * probably obsolete by now, so drop them. + */ + while ((skb = skb_dequeue(&mvm->time_sync.frame_list))) { + if (iwl_mvm_is_skb_match(skb, addr, dialog_token)) + break; + + kfree_skb(skb); + skb = NULL; + } + + return skb; +} + +static u64 iwl_mvm_get_64_bit(__le32 high, __le32 low) +{ + return ((u64)le32_to_cpu(high) << 32) | le32_to_cpu(low); +} + +void iwl_mvm_time_sync_msmt_event(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_time_msmt_notify *notif = (void *)pkt->data; + struct ieee80211_rx_status *rx_status; + struct skb_shared_hwtstamps *shwt; + u64 ts_10ns; + struct sk_buff *skb = + iwl_mvm_time_sync_find_skb(mvm, notif->peer_addr, + le32_to_cpu(notif->dialog_token)); + u64 adj_time; + + if (!skb) { + IWL_DEBUG_INFO(mvm, "Time sync event but no pending skb\n"); + return; + } + + ts_10ns = iwl_mvm_get_64_bit(notif->t2_hi, notif->t2_lo); + adj_time = iwl_mvm_ptp_get_adj_time(mvm, ts_10ns * 10); + shwt = skb_hwtstamps(skb); + shwt->hwtstamp = ktime_set(0, adj_time); + + ts_10ns = iwl_mvm_get_64_bit(notif->t3_hi, notif->t3_lo); + adj_time = iwl_mvm_ptp_get_adj_time(mvm, ts_10ns * 10); + rx_status = IEEE80211_SKB_RXCB(skb); + rx_status->ack_tx_hwtstamp = ktime_set(0, adj_time); + + IWL_DEBUG_INFO(mvm, + "Time sync: RX event - report frame t2=%llu t3=%llu\n", + ktime_to_ns(shwt->hwtstamp), + ktime_to_ns(rx_status->ack_tx_hwtstamp)); + ieee80211_rx_napi(mvm->hw, NULL, skb, NULL); +} + +void iwl_mvm_time_sync_msmt_confirm_event(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_time_msmt_cfm_notify *notif = (void *)pkt->data; + struct ieee80211_tx_status status = {}; + struct skb_shared_hwtstamps *shwt; + u64 ts_10ns, adj_time; + + status.skb = + iwl_mvm_time_sync_find_skb(mvm, notif->peer_addr, + le32_to_cpu(notif->dialog_token)); + + if (!status.skb) { + IWL_DEBUG_INFO(mvm, "Time sync confirm but no pending skb\n"); + return; + } + + ts_10ns = iwl_mvm_get_64_bit(notif->t1_hi, notif->t1_lo); + adj_time = iwl_mvm_ptp_get_adj_time(mvm, ts_10ns * 10); + shwt = skb_hwtstamps(status.skb); + shwt->hwtstamp = ktime_set(0, adj_time); + + ts_10ns = iwl_mvm_get_64_bit(notif->t4_hi, notif->t4_lo); + adj_time = iwl_mvm_ptp_get_adj_time(mvm, ts_10ns * 10); + status.info = IEEE80211_SKB_CB(status.skb); + status.ack_hwtstamp = ktime_set(0, adj_time); + + IWL_DEBUG_INFO(mvm, + "Time sync: TX event - report frame t1=%llu t4=%llu\n", + ktime_to_ns(shwt->hwtstamp), + ktime_to_ns(status.ack_hwtstamp)); + ieee80211_tx_status_ext(mvm->hw, &status); +} + +int iwl_mvm_time_sync_config(struct iwl_mvm *mvm, const u8 *addr, u32 protocols) +{ + struct iwl_time_sync_cfg_cmd cmd = {}; + int err; + + lockdep_assert_held(&mvm->mutex); + + if (!fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_TIME_SYNC_BOTH_FTM_TM)) + return -EINVAL; + + /* The fw only supports one peer. We do allow reconfiguration of the + * same peer for cases of fw reset etc. + */ + if (mvm->time_sync.active && + !ether_addr_equal(addr, mvm->time_sync.peer_addr)) { + IWL_DEBUG_INFO(mvm, "Time sync: reject config for peer: %pM\n", + addr); + return -ENOBUFS; + } + + if (protocols & ~(IWL_TIME_SYNC_PROTOCOL_TM | + IWL_TIME_SYNC_PROTOCOL_FTM)) + return -EINVAL; + + cmd.protocols = cpu_to_le32(protocols); + + ether_addr_copy(cmd.peer_addr, addr); + + err = iwl_mvm_send_cmd_pdu(mvm, + WIDE_ID(DATA_PATH_GROUP, + WNM_80211V_TIMING_MEASUREMENT_CONFIG_CMD), + 0, sizeof(cmd), &cmd); + if (err) { + IWL_ERR(mvm, "Failed to send time sync cfg cmd: %d\n", err); + } else { + mvm->time_sync.active = protocols != 0; + ether_addr_copy(mvm->time_sync.peer_addr, addr); + IWL_DEBUG_INFO(mvm, "Time sync: set peer addr=%pM\n", addr); + } + + if (!mvm->time_sync.active) + skb_queue_purge(&mvm->time_sync.frame_list); + + return err; +} diff --git a/mvm/time-sync.h b/mvm/time-sync.h new file mode 100644 index 000000000000..2cfd0fb5e781 --- /dev/null +++ b/mvm/time-sync.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2022 Intel Corporation + */ +#ifndef __TIME_SYNC_H__ +#define __TIME_SYNC_H__ + +#include "mvm.h" +#include <linux/ieee80211.h> + +void iwl_mvm_init_time_sync(struct iwl_time_sync_data *data); +void iwl_mvm_time_sync_msmt_event(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_time_sync_msmt_confirm_event(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); +int iwl_mvm_time_sync_config(struct iwl_mvm *mvm, const u8 *addr, + u32 protocols); + +static inline +bool iwl_mvm_time_sync_frame(struct iwl_mvm *mvm, struct sk_buff *skb, u8 *addr) +{ + if (ether_addr_equal(mvm->time_sync.peer_addr, addr) && + (ieee80211_is_timing_measurement(skb) || ieee80211_is_ftm(skb))) { + skb_queue_tail(&mvm->time_sync.frame_list, skb); + return true; + } + + return false; +} +#endif @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2019-2021 Intel Corporation + * Copyright (C) 2012-2014, 2019-2022 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2016 Intel Deutschland GmbH */ @@ -334,7 +334,7 @@ static void iwl_mvm_tt_smps_iterator(void *_data, u8 *mac, if (vif->type != NL80211_IFTYPE_STATION) return; - iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT, smps_mode); + iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT, smps_mode, 0); } static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable) @@ -573,11 +573,11 @@ int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm) * and uncompressed, the FW should get it compressed and sorted */ - /* compress temp_trips to cmd array, remove uninitialized values*/ + /* compress trips to cmd array, remove uninitialized values*/ for (i = 0; i < IWL_MAX_DTS_TRIPS; i++) { - if (mvm->tz_device.temp_trips[i] != S16_MIN) { + if (mvm->tz_device.trips[i].temperature != INT_MIN) { cmd.thresholds[idx++] = - cpu_to_le16(mvm->tz_device.temp_trips[i]); + cpu_to_le16((s16)(mvm->tz_device.trips[i].temperature / 1000)); } } cmd.num_temps = cpu_to_le32(idx); @@ -593,8 +593,8 @@ int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm) */ for (i = 0; i < idx; i++) { for (j = 0; j < IWL_MAX_DTS_TRIPS; j++) { - if (le16_to_cpu(cmd.thresholds[i]) == - mvm->tz_device.temp_trips[j]) + if ((int)(le16_to_cpu(cmd.thresholds[i]) * 1000) == + mvm->tz_device.trips[j].temperature) mvm->tz_device.fw_trips_index[i] = j; } } @@ -615,7 +615,7 @@ send: static int iwl_mvm_tzone_get_temp(struct thermal_zone_device *device, int *temperature) { - struct iwl_mvm *mvm = (struct iwl_mvm *)device->devdata; + struct iwl_mvm *mvm = thermal_zone_device_priv(device); int ret; int temp; @@ -638,37 +638,12 @@ out: return ret; } -static int iwl_mvm_tzone_get_trip_temp(struct thermal_zone_device *device, - int trip, int *temp) -{ - struct iwl_mvm *mvm = (struct iwl_mvm *)device->devdata; - - if (trip < 0 || trip >= IWL_MAX_DTS_TRIPS) - return -EINVAL; - - *temp = mvm->tz_device.temp_trips[trip] * 1000; - - return 0; -} - -static int iwl_mvm_tzone_get_trip_type(struct thermal_zone_device *device, - int trip, enum thermal_trip_type *type) -{ - if (trip < 0 || trip >= IWL_MAX_DTS_TRIPS) - return -EINVAL; - - *type = THERMAL_TRIP_PASSIVE; - - return 0; -} - static int iwl_mvm_tzone_set_trip_temp(struct thermal_zone_device *device, int trip, int temp) { - struct iwl_mvm *mvm = (struct iwl_mvm *)device->devdata; + struct iwl_mvm *mvm = thermal_zone_device_priv(device); struct iwl_mvm_thermal_device *tzone; - int i, ret; - s16 temperature; + int ret; mutex_lock(&mvm->mutex); @@ -678,40 +653,17 @@ static int iwl_mvm_tzone_set_trip_temp(struct thermal_zone_device *device, goto out; } - if (trip < 0 || trip >= IWL_MAX_DTS_TRIPS) { - ret = -EINVAL; - goto out; - } - if ((temp / 1000) > S16_MAX) { ret = -EINVAL; goto out; } - temperature = (s16)(temp / 1000); tzone = &mvm->tz_device; - if (!tzone) { ret = -EIO; goto out; } - /* no updates*/ - if (tzone->temp_trips[trip] == temperature) { - ret = 0; - goto out; - } - - /* already existing temperature */ - for (i = 0; i < IWL_MAX_DTS_TRIPS; i++) { - if (tzone->temp_trips[i] == temperature) { - ret = -EINVAL; - goto out; - } - } - - tzone->temp_trips[trip] = temperature; - ret = iwl_mvm_send_temp_report_ths_cmd(mvm); out: mutex_unlock(&mvm->mutex); @@ -720,8 +672,6 @@ out: static struct thermal_zone_device_ops tzone_ops = { .get_temp = iwl_mvm_tzone_get_temp, - .get_trip_temp = iwl_mvm_tzone_get_trip_temp, - .get_trip_type = iwl_mvm_tzone_get_trip_type, .set_trip_temp = iwl_mvm_tzone_set_trip_temp, }; @@ -743,7 +693,8 @@ static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm) BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH); sprintf(name, "iwlwifi_%u", atomic_inc_return(&counter) & 0xFF); - mvm->tz_device.tzone = thermal_zone_device_register(name, + mvm->tz_device.tzone = thermal_zone_device_register_with_trips(name, + mvm->tz_device.trips, IWL_MAX_DTS_TRIPS, IWL_WRITABLE_TRIPS_MSK, mvm, &tzone_ops, @@ -766,8 +717,10 @@ static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm) /* 0 is a valid temperature, * so initialize the array with S16_MIN which invalid temperature */ - for (i = 0 ; i < IWL_MAX_DTS_TRIPS; i++) - mvm->tz_device.temp_trips[i] = S16_MIN; + for (i = 0 ; i < IWL_MAX_DTS_TRIPS; i++) { + mvm->tz_device.trips[i].temperature = INT_MIN; + mvm->tz_device.trips[i].type = THERMAL_TRIP_PASSIVE; + } } static int iwl_mvm_tcool_get_max_state(struct thermal_cooling_device *cdev, @@ -1,12 +1,13 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #include <linux/ieee80211.h> #include <linux/etherdevice.h> #include <linux/tcp.h> +#include <net/gso.h> #include <net/ip.h> #include <net/ipv6.h> @@ -14,6 +15,7 @@ #include "iwl-eeprom-parse.h" #include "mvm.h" #include "sta.h" +#include "time-sync.h" static void iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr, @@ -39,13 +41,14 @@ iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr, #define OPT_HDR(type, skb, off) \ (type *)(skb_network_header(skb) + (off)) -static u16 iwl_mvm_tx_csum_pre_bz(struct iwl_mvm *mvm, struct sk_buff *skb, - struct ieee80211_tx_info *info, bool amsdu) +static u32 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb, + struct ieee80211_tx_info *info, + bool amsdu) { struct ieee80211_hdr *hdr = (void *)skb->data; + u16 mh_len = ieee80211_hdrlen(hdr->frame_control); u16 offload_assist = 0; #if IS_ENABLED(CONFIG_INET) - u16 mh_len = ieee80211_hdrlen(hdr->frame_control); u8 protocol = 0; /* Do not compute checksum if already computed */ @@ -117,6 +120,8 @@ static u16 iwl_mvm_tx_csum_pre_bz(struct iwl_mvm *mvm, struct sk_buff *skb, else udp_hdr(skb)->check = 0; +out: +#endif /* * mac header len should include IV, size is in words unless * the IV is added by the firmware like in WEP. @@ -129,8 +134,6 @@ static u16 iwl_mvm_tx_csum_pre_bz(struct iwl_mvm *mvm, struct sk_buff *skb, mh_len /= 2; offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE; -out: -#endif if (amsdu) offload_assist |= BIT(TX_CMD_OFFLD_AMSDU); else if (ieee80211_hdrlen(hdr->frame_control) % 4) @@ -140,54 +143,6 @@ out: return offload_assist; } -u32 iwl_mvm_tx_csum_bz(struct iwl_mvm *mvm, struct sk_buff *skb, bool amsdu) -{ - struct ieee80211_hdr *hdr = (void *)skb->data; - u32 offload_assist = IWL_TX_CMD_OFFLD_BZ_PARTIAL_CSUM; - unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control); - unsigned int csum_start = skb_checksum_start_offset(skb); - - offload_assist |= u32_encode_bits(hdrlen / 2, - IWL_TX_CMD_OFFLD_BZ_MH_LEN); - if (amsdu) - offload_assist |= IWL_TX_CMD_OFFLD_BZ_AMSDU; - else if (hdrlen % 4) - /* padding is inserted later in transport */ - offload_assist |= IWL_TX_CMD_OFFLD_BZ_MH_PAD; - - if (skb->ip_summed != CHECKSUM_PARTIAL) - return offload_assist; - - offload_assist |= IWL_TX_CMD_OFFLD_BZ_ENABLE_CSUM | - IWL_TX_CMD_OFFLD_BZ_ZERO2ONES; - - /* - * mac80211 will always calculate checksum in software for - * non-fast-xmit, and so we can only do offloaded checksum - * for fast-xmit frames. In this case, we always have the - * RFC 1042 header present. skb_checksum_start_offset() - * returns the offset from the beginning, but the hardware - * needs it from after the header & SNAP header. - */ - csum_start -= hdrlen + 8; - - offload_assist |= u32_encode_bits(csum_start, - IWL_TX_CMD_OFFLD_BZ_START_OFFS); - offload_assist |= u32_encode_bits(csum_start + skb->csum_offset, - IWL_TX_CMD_OFFLD_BZ_RESULT_OFFS); - - return offload_assist; -} - -static u32 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb, - struct ieee80211_tx_info *info, - bool amsdu) -{ - if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ) - return iwl_mvm_tx_csum_pre_bz(mvm, skb, info, amsdu); - return iwl_mvm_tx_csum_bz(mvm, skb, amsdu); -} - /* * Sets most of the Tx cmd's fields */ @@ -287,7 +242,7 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, tx_cmd->sta_id = sta_id; tx_cmd->offload_assist = - cpu_to_le16(iwl_mvm_tx_csum_pre_bz(mvm, skb, info, amsdu)); + cpu_to_le16(iwl_mvm_tx_csum(mvm, skb, info, amsdu)); } static u32 iwl_mvm_get_tx_ant(struct iwl_mvm *mvm, @@ -307,6 +262,54 @@ static u32 iwl_mvm_get_tx_ant(struct iwl_mvm *mvm, return BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS; } +static u32 iwl_mvm_get_inject_tx_rate(struct iwl_mvm *mvm, + struct ieee80211_tx_info *info) +{ + struct ieee80211_tx_rate *rate = &info->control.rates[0]; + u32 result; + + /* + * we only care about legacy/HT/VHT so far, so we can + * build in v1 and use iwl_new_rate_from_v1() + */ + + if (rate->flags & IEEE80211_TX_RC_VHT_MCS) { + u8 mcs = ieee80211_rate_get_vht_mcs(rate); + u8 nss = ieee80211_rate_get_vht_nss(rate); + + result = RATE_MCS_VHT_MSK_V1; + result |= u32_encode_bits(mcs, RATE_VHT_MCS_RATE_CODE_MSK); + result |= u32_encode_bits(nss, RATE_MCS_NSS_MSK); + if (rate->flags & IEEE80211_TX_RC_SHORT_GI) + result |= RATE_MCS_SGI_MSK_V1; + if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) + result |= u32_encode_bits(1, RATE_MCS_CHAN_WIDTH_MSK_V1); + else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH) + result |= u32_encode_bits(2, RATE_MCS_CHAN_WIDTH_MSK_V1); + else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH) + result |= u32_encode_bits(3, RATE_MCS_CHAN_WIDTH_MSK_V1); + } else if (rate->flags & IEEE80211_TX_RC_MCS) { + result = RATE_MCS_HT_MSK_V1; + result |= u32_encode_bits(rate->idx, + RATE_HT_MCS_RATE_CODE_MSK_V1 | + RATE_HT_MCS_NSS_MSK_V1); + if (rate->flags & IEEE80211_TX_RC_SHORT_GI) + result |= RATE_MCS_SGI_MSK_V1; + if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) + result |= u32_encode_bits(1, RATE_MCS_CHAN_WIDTH_MSK_V1); + if (info->flags & IEEE80211_TX_CTL_LDPC) + result |= RATE_MCS_LDPC_MSK_V1; + if (u32_get_bits(info->flags, IEEE80211_TX_CTL_STBC)) + result |= RATE_MCS_STBC_MSK; + } else { + return 0; + } + + if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TX_CMD, 0) > 6) + return iwl_new_rate_from_v1(result); + return result; +} + static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, __le16 fc) @@ -316,8 +319,15 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm, u32 rate_flags = 0; bool is_cck; - /* info->control is only relevant for non HW rate control */ - if (!ieee80211_hw_check(mvm->hw, HAS_RATE_CONTROL)) { + if (unlikely(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)) { + u32 result = iwl_mvm_get_inject_tx_rate(mvm, info); + + if (result) + return result; + rate_idx = info->control.rates[0].idx; + } else if (!ieee80211_hw_check(mvm->hw, HAS_RATE_CONTROL)) { + /* info->control is only relevant for non HW rate control */ + /* HT rate doesn't make sense for a non data frame */ WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS && !ieee80211_is_data(fc), @@ -328,22 +338,23 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm, sta ? iwl_mvm_sta_from_mac80211(sta)->sta_state : -1); rate_idx = info->control.rates[0].idx; + + /* For non 2 GHZ band, remap mac80211 rate indices into driver + * indices. + */ + if (info->band != NL80211_BAND_2GHZ || + (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)) + rate_idx += IWL_FIRST_OFDM_RATE; + + /* For 2.4 GHZ band, check that there is no need to remap */ + BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0); } /* if the rate isn't a well known legacy rate, take the lowest one */ if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY) - rate_idx = rate_lowest_index( - &mvm->nvm_data->bands[info->band], sta); - - /* - * For non 2 GHZ band, remap mac80211 rate - * indices into driver indices - */ - if (info->band != NL80211_BAND_2GHZ) - rate_idx += IWL_FIRST_OFDM_RATE; - - /* For 2.4 GHZ band, check that there is no need to remap */ - BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0); + rate_idx = iwl_mvm_mac_ctxt_get_lowest_rate(mvm, + info, + info->control.vif); /* Get PLCP rate for tx_cmd->rate_n_flags */ rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(mvm->fw, rate_idx); @@ -396,7 +407,8 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, * table is controlled by LINK_QUALITY commands */ - if (ieee80211_is_data(fc) && sta) { + if (likely(ieee80211_is_data(fc) && sta && + !(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT))) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); if (mvmsta->sta_state >= IEEE80211_STA_AUTHORIZED) { @@ -554,9 +566,8 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, cmd->rate_n_flags = cpu_to_le32(rate_n_flags); } else { struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload; - u16 offload_assist = iwl_mvm_tx_csum_pre_bz(mvm, skb, - info, - amsdu); + u16 offload_assist = iwl_mvm_tx_csum(mvm, skb, + info, amsdu); cmd->offload_assist = cpu_to_le16(offload_assist); @@ -600,11 +611,11 @@ static void iwl_mvm_skb_prepare_status(struct sk_buff *skb, } static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm, + struct iwl_mvm_vif_link_info *link, struct ieee80211_tx_info *info, - struct ieee80211_hdr *hdr) + struct sk_buff *skb) { - struct iwl_mvm_vif *mvmvif = - iwl_mvm_vif_from_mac80211(info->control.vif); + struct ieee80211_hdr *hdr = (void *)skb->data; __le16 fc = hdr->frame_control; switch (info->control.vif->type) { @@ -621,17 +632,17 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm, * reason 7 ("Class 3 frame received from nonassociated STA"). */ if (ieee80211_is_mgmt(fc) && - (!ieee80211_is_bufferable_mmpdu(fc) || + (!ieee80211_is_bufferable_mmpdu(skb) || ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc))) - return mvm->probe_queue; + return link->mgmt_queue; if (!ieee80211_has_order(fc) && !ieee80211_is_probe_req(fc) && is_multicast_ether_addr(hdr->addr1)) - return mvmvif->cab_queue; + return link->cab_queue; WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC, "fc=0x%02x", le16_to_cpu(fc)); - return mvm->probe_queue; + return link->mgmt_queue; case NL80211_IFTYPE_P2P_DEVICE: if (ieee80211_is_mgmt(fc)) return mvm->p2p_dev_queue; @@ -664,7 +675,7 @@ static void iwl_mvm_probe_resp_set_noa(struct iwl_mvm *mvm, rcu_read_lock(); - resp_data = rcu_dereference(mvmvif->probe_resp_data); + resp_data = rcu_dereference(mvmvif->deflink.probe_resp_data); if (!resp_data) goto out; @@ -735,12 +746,28 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE || info.control.vif->type == NL80211_IFTYPE_AP || info.control.vif->type == NL80211_IFTYPE_ADHOC) { + u32 link_id = u32_get_bits(info.control.flags, + IEEE80211_TX_CTRL_MLO_LINK); + struct iwl_mvm_vif_link_info *link; + + if (link_id == IEEE80211_LINK_UNSPECIFIED) { + if (info.control.vif->active_links) + link_id = ffs(info.control.vif->active_links) - 1; + else + link_id = 0; + } + + link = mvmvif->link[link_id]; + if (WARN_ON(!link)) + return -1; + if (!ieee80211_is_data(hdr->frame_control)) - sta_id = mvmvif->bcast_sta.sta_id; + sta_id = link->bcast_sta.sta_id; else - sta_id = mvmvif->mcast_sta.sta_id; + sta_id = link->mcast_sta.sta_id; - queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info, hdr); + queue = iwl_mvm_get_ctrl_vif_queue(mvm, link, &info, + skb); } else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) { queue = mvm->snif_queue; sta_id = mvm->snif_sta.sta_id; @@ -788,13 +815,14 @@ unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm, struct ieee80211_sta *sta, unsigned int tid) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - enum nl80211_band band = mvmsta->vif->bss_conf.chandef.chan->band; u8 ac = tid_to_mac80211_ac[tid]; + enum nl80211_band band; unsigned int txf; - int lmac = iwl_mvm_get_lmac_id(mvm->fw, band); + unsigned int val; + int lmac; /* For HE redirect to trigger based fifos */ - if (sta->he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm))) + if (sta->deflink.he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm))) ac += 4; txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac); @@ -805,7 +833,37 @@ unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm, * We also want to have the start of the next packet inside the * fifo to be able to send bursts. */ - return min_t(unsigned int, mvmsta->max_amsdu_len, + val = mvmsta->max_amsdu_len; + + if (hweight16(sta->valid_links) <= 1) { + if (sta->valid_links) { + struct ieee80211_bss_conf *link_conf; + unsigned int link = ffs(sta->valid_links) - 1; + + rcu_read_lock(); + link_conf = rcu_dereference(mvmsta->vif->link_conf[link]); + if (WARN_ON(!link_conf)) + band = NL80211_BAND_2GHZ; + else + band = link_conf->chandef.chan->band; + rcu_read_unlock(); + } else { + band = mvmsta->vif->bss_conf.chandef.chan->band; + } + + lmac = iwl_mvm_get_lmac_id(mvm, band); + } else if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CDB_SUPPORT)) { + /* for real MLO restrict to both LMACs if they exist */ + lmac = IWL_LMAC_5G_INDEX; + val = min_t(unsigned int, val, + mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256); + lmac = IWL_LMAC_24G_INDEX; + } else { + lmac = IWL_LMAC_24G_INDEX; + } + + return min_t(unsigned int, val, mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256); } @@ -926,7 +984,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, * Take the min of ieee80211 station and mvm station */ max_amsdu_len = - min_t(unsigned int, sta->max_amsdu_len, + min_t(unsigned int, sta->cur->max_amsdu_len, iwl_mvm_max_amsdu_size(mvm, sta, tid)); /* @@ -935,7 +993,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, * section 8.7.3 NOTE 3). */ if (info->flags & IEEE80211_TX_CTL_AMPDU && - !sta->vht_cap.vht_supported) + !sta->deflink.vht_cap.vht_supported) max_amsdu_len = min_t(unsigned int, max_amsdu_len, 4095); /* Sub frame header + SNAP + IP header + TCP header + MSS */ @@ -1080,17 +1138,17 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, if (WARN_ON_ONCE(!mvmsta)) return -1; - if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA)) + if (WARN_ON_ONCE(mvmsta->deflink.sta_id == IWL_MVM_INVALID_STA)) return -1; - if (unlikely(ieee80211_is_any_nullfunc(fc)) && sta->he_cap.has_he) + if (unlikely(ieee80211_is_any_nullfunc(fc)) && sta->deflink.he_cap.has_he) return -1; if (unlikely(ieee80211_is_probe_resp(fc))) iwl_mvm_probe_resp_set_noa(mvm, skb); dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen, - sta, mvmsta->sta_id); + sta, mvmsta->deflink.sta_id); if (!dev_cmd) goto drop; @@ -1104,8 +1162,8 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, spin_lock(&mvmsta->lock); /* nullfunc frames should go to the MGMT queue regardless of QOS, - * the condition of !ieee80211_is_qos_nullfunc(fc) keeps the default - * assignment of MGMT TID + * the conditions of !ieee80211_is_qos_nullfunc(fc) and + * !ieee80211_is_data_qos(fc) keep the default assignment of MGMT TID */ if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) { tid = ieee80211_get_tid(hdr); @@ -1130,7 +1188,8 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, /* update the tx_cmd hdr as it was already copied */ tx_cmd->hdr->seq_ctrl = hdr->seq_ctrl; } - } else if (ieee80211_is_data(fc) && !ieee80211_is_data_qos(fc)) { + } else if (ieee80211_is_data(fc) && !ieee80211_is_data_qos(fc) && + !ieee80211_is_nullfunc(fc)) { tid = IWL_TID_NON_QOS; } @@ -1165,15 +1224,21 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, } IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x len %d\n", - mvmsta->sta_id, tid, txq_id, + mvmsta->deflink.sta_id, tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number), skb->len); /* From now on, we cannot access info->control */ iwl_mvm_skb_prepare_status(skb, dev_cmd); + /* + * The IV is introduced by the HW for new tx api, and it is not present + * in the skb, hence, don't tell iwl_mvm_mei_tx_copy_to_csme about the + * IV for those devices. + */ if (ieee80211_is_data(fc)) iwl_mvm_mei_tx_copy_to_csme(mvm, skb, - info->control.hw_key ? + info->control.hw_key && + !iwl_mvm_has_new_tx_api(mvm) ? info->control.hw_key->iv_len : 0); if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id)) @@ -1194,7 +1259,8 @@ drop_unlock_sta: iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); spin_unlock(&mvmsta->lock); drop: - IWL_DEBUG_TX(mvm, "TX to [%d|%d] dropped\n", mvmsta->sta_id, tid); + IWL_DEBUG_TX(mvm, "TX to [%d|%d] dropped\n", mvmsta->deflink.sta_id, + tid); return -1; } @@ -1206,11 +1272,12 @@ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb, struct sk_buff_head mpdus_skbs; unsigned int payload_len; int ret; + struct sk_buff *orig_skb = skb; if (WARN_ON_ONCE(!mvmsta)) return -1; - if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA)) + if (WARN_ON_ONCE(mvmsta->deflink.sta_id == IWL_MVM_INVALID_STA)) return -1; memcpy(&info, skb->cb, sizeof(info)); @@ -1230,16 +1297,24 @@ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb, if (ret) return ret; - if (WARN_ON(skb_queue_empty(&mpdus_skbs))) - return ret; + WARN_ON(skb_queue_empty(&mpdus_skbs)); while (!skb_queue_empty(&mpdus_skbs)) { skb = __skb_dequeue(&mpdus_skbs); ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta); if (ret) { + /* Free skbs created as part of TSO logic that have not yet been dequeued */ __skb_queue_purge(&mpdus_skbs); - return ret; + /* skb here is not necessarily same as skb that entered this method, + * so free it explicitly. + */ + if (skb == orig_skb) + ieee80211_free_txskb(mvm->hw, skb); + else + kfree_skb(skb); + /* there was error, but we consumed skb one way or another, so return 0 */ + return 0; } } @@ -1376,8 +1451,8 @@ void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags, r->idx = rate; } else if (format == RATE_MCS_VHT_MSK) { ieee80211_rate_set_vht(r, rate, - ((rate_n_flags & RATE_MCS_NSS_MSK) >> - RATE_MCS_NSS_POS) + 1); + FIELD_GET(RATE_MCS_NSS_MSK, + rate_n_flags) + 1); r->flags |= IEEE80211_TX_RC_VHT_MCS; } else if (format == RATE_MCS_HE_MSK) { /* mac80211 cannot do this without ieee80211_tx_status_ext() @@ -1408,8 +1483,7 @@ void iwl_mvm_hwrate_to_tx_rate_v1(u32 rate_n_flags, } else if (rate_n_flags & RATE_MCS_VHT_MSK_V1) { ieee80211_rate_set_vht( r, rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK, - ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> - RATE_VHT_MCS_NSS_POS) + 1); + FIELD_GET(RATE_MCS_NSS_MSK, rate_n_flags) + 1); r->flags |= IEEE80211_TX_RC_VHT_MCS; } else { r->idx = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, @@ -1624,7 +1698,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, info->status.status_driver_data[0] = RS_DRV_DATA_PACK(lq_color, tx_resp->reduced_tpc); - ieee80211_tx_status(mvm->hw, skb); + if (likely(!iwl_mvm_time_sync_frame(mvm, skb, hdr->addr1))) + ieee80211_tx_status(mvm->hw, skb); } /* This is an aggregation queue or might become one, so we use @@ -1811,7 +1886,7 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm, mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id); sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); - if (WARN_ON_ONCE(!sta || !sta->wme)) { + if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta) || !sta->wme)) { rcu_read_unlock(); return; } @@ -1954,12 +2029,14 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid, * possible (i.e. first MPDU in the aggregation wasn't acked) * Still it's important to update RS about sent vs. acked. */ - if (!is_flush && skb_queue_empty(&reclaimed_skbs)) { + if (!is_flush && skb_queue_empty(&reclaimed_skbs) && + !iwl_mvm_has_tlc_offload(mvm)) { struct ieee80211_chanctx_conf *chanctx_conf = NULL; + /* no TLC offload, so non-MLD mode */ if (mvmsta->vif) chanctx_conf = - rcu_dereference(mvmsta->vif->chanctx_conf); + rcu_dereference(mvmsta->vif->bss_conf.chanctx_conf); if (WARN_ON_ONCE(!chanctx_conf)) goto out; @@ -1967,11 +2044,8 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid, tx_info->band = chanctx_conf->def.chan->band; iwl_mvm_hwrate_to_tx_status(mvm->fw, rate, tx_info); - if (!iwl_mvm_has_tlc_offload(mvm)) { - IWL_DEBUG_TX_REPLY(mvm, - "No reclaim. Update rs directly\n"); - iwl_mvm_rs_tx_status(mvm, sta, tid, tx_info, false); - } + IWL_DEBUG_TX_REPLY(mvm, "No reclaim. Update rs directly\n"); + iwl_mvm_rs_tx_status(mvm, sta, tid, tx_info, false); } out: @@ -2002,7 +2076,8 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) u16 tfd_cnt; int i; - if (unlikely(sizeof(*ba_res) > pkt_len)) + if (IWL_FW_CHECK(mvm, sizeof(*ba_res) > pkt_len, + "short BA notification (%d)\n", pkt_len)) return; sta_id = ba_res->sta_id; @@ -2014,7 +2089,13 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) (void *)(uintptr_t)ba_res->reduced_txp; tfd_cnt = le16_to_cpu(ba_res->tfd_cnt); - if (!tfd_cnt || struct_size(ba_res, tfd, tfd_cnt) > pkt_len) + if (!tfd_cnt) + return; + + if (IWL_FW_CHECK(mvm, + struct_size(ba_res, tfd, tfd_cnt) > pkt_len, + "short BA notification (tfds:%d, size:%d)\n", + tfd_cnt, pkt_len)) return; rcu_read_lock(); @@ -2072,7 +2153,9 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) rcu_read_lock(); mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id); - if (WARN_ON_ONCE(!mvmsta)) { + if (IWL_FW_CHECK(mvm, !mvmsta, + "invalid STA ID %d in BA notif\n", + sta_id)) { rcu_read_unlock(); return; } @@ -2209,17 +2292,22 @@ free_rsp: int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal) { - struct iwl_mvm_int_sta *int_sta = sta; - struct iwl_mvm_sta *mvm_sta = sta; + u32 sta_id, tfd_queue_msk; - BUILD_BUG_ON(offsetof(struct iwl_mvm_int_sta, sta_id) != - offsetof(struct iwl_mvm_sta, sta_id)); + if (internal) { + struct iwl_mvm_int_sta *int_sta = sta; - if (iwl_mvm_has_new_tx_api(mvm)) - return iwl_mvm_flush_sta_tids(mvm, mvm_sta->sta_id, 0xffff); + sta_id = int_sta->sta_id; + tfd_queue_msk = int_sta->tfd_queue_msk; + } else { + struct iwl_mvm_sta *mvm_sta = sta; - if (internal) - return iwl_mvm_flush_tx_path(mvm, int_sta->tfd_queue_msk); + sta_id = mvm_sta->deflink.sta_id; + tfd_queue_msk = mvm_sta->tfd_queue_msk; + } + + if (iwl_mvm_has_new_tx_api(mvm)) + return iwl_mvm_flush_sta_tids(mvm, sta_id, 0xffff); - return iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk); + return iwl_mvm_flush_tx_path(mvm, tfd_queue_msk); } diff --git a/mvm/utils.c b/mvm/utils.c index bc947733d982..48016b4343d2 100644 --- a/mvm/utils.c +++ b/mvm/utils.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2022 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ @@ -272,13 +272,15 @@ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq) * @vif: Pointer to the ieee80211_vif structure * @req_type: The part of the driver who call for a change. * @smps_request: The request to change the SMPS mode. + * @link_id: for MLO link_id, otherwise 0 (deflink) * * Get a requst to change the SMPS mode, * and change it according to all other requests in the driver. */ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif, enum iwl_mvm_smps_type_request req_type, - enum ieee80211_smps_mode smps_request) + enum ieee80211_smps_mode smps_request, + unsigned int link_id) { struct iwl_mvm_vif *mvmvif; enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC; @@ -294,17 +296,42 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return; mvmvif = iwl_mvm_vif_from_mac80211(vif); - mvmvif->smps_requests[req_type] = smps_request; + + if (WARN_ON_ONCE(!mvmvif->link[link_id])) + return; + + mvmvif->link[link_id]->smps_requests[req_type] = smps_request; for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) { - if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC) { + if (mvmvif->link[link_id]->smps_requests[i] == + IEEE80211_SMPS_STATIC) { smps_mode = IEEE80211_SMPS_STATIC; break; } - if (mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC) + if (mvmvif->link[link_id]->smps_requests[i] == + IEEE80211_SMPS_DYNAMIC) smps_mode = IEEE80211_SMPS_DYNAMIC; } - ieee80211_request_smps(vif, smps_mode); + /* SMPS is disabled in eSR */ + if (mvmvif->esr_active) + smps_mode = IEEE80211_SMPS_OFF; + + ieee80211_request_smps(vif, link_id, smps_mode); +} + +void iwl_mvm_update_smps_on_active_links(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + enum iwl_mvm_smps_type_request req_type, + enum ieee80211_smps_mode smps_request) +{ + struct ieee80211_bss_conf *link_conf; + unsigned int link_id; + + rcu_read_lock(); + for_each_vif_active_link(vif, link_conf, link_id) + iwl_mvm_update_smps(mvm, vif, req_type, smps_request, + link_id); + rcu_read_unlock(); } static bool iwl_wait_stats_complete(struct iwl_notif_wait_data *notif_wait, @@ -390,16 +417,20 @@ static void iwl_mvm_diversity_iter(void *_data, u8 *mac, { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_diversity_iter_data *data = _data; - int i; + int i, link_id; - if (mvmvif->phy_ctxt != data->ctxt) - return; + for_each_mvm_vif_valid_link(mvmvif, link_id) { + struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id]; - for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) { - if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC || - mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC) { - data->result = false; - break; + if (link_info->phy_ctxt != data->ctxt) + continue; + + for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) { + if (link_info->smps_requests[i] == IEEE80211_SMPS_STATIC || + link_info->smps_requests[i] == IEEE80211_SMPS_DYNAMIC) { + data->result = false; + break; + } } } } @@ -495,10 +526,10 @@ static void iwl_mvm_ll_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) if (iwl_mvm_vif_low_latency(mvmvif)) { result->result = true; - if (!mvmvif->phy_ctxt) + if (!mvmvif->deflink.phy_ctxt) return; - band = mvmvif->phy_ctxt->channel->band; + band = mvmvif->deflink.phy_ctxt->channel->band; result->result_per_band[band] = true; } } @@ -604,7 +635,7 @@ static void iwl_mvm_sta_iface_iterator(void *_data, u8 *mac, if (vif->type != NL80211_IFTYPE_STATION) return; - if (vif->bss_conf.assoc) + if (vif->cfg.assoc) data->assoc = true; } @@ -816,13 +847,13 @@ static void iwl_mvm_uapsd_agg_disconnect(struct iwl_mvm *mvm, if (vif->type != NL80211_IFTYPE_STATION) return; - if (!vif->bss_conf.assoc) + if (!vif->cfg.assoc) return; - if (!mvmvif->queue_params[IEEE80211_AC_VO].uapsd && - !mvmvif->queue_params[IEEE80211_AC_VI].uapsd && - !mvmvif->queue_params[IEEE80211_AC_BE].uapsd && - !mvmvif->queue_params[IEEE80211_AC_BK].uapsd) + if (!mvmvif->deflink.queue_params[IEEE80211_AC_VO].uapsd && + !mvmvif->deflink.queue_params[IEEE80211_AC_VI].uapsd && + !mvmvif->deflink.queue_params[IEEE80211_AC_BE].uapsd && + !mvmvif->deflink.queue_params[IEEE80211_AC_BK].uapsd) return; if (mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected) @@ -831,7 +862,8 @@ static void iwl_mvm_uapsd_agg_disconnect(struct iwl_mvm *mvm, mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected = true; IWL_INFO(mvm, "detected AP should do aggregation but isn't, likely due to U-APSD\n"); - schedule_delayed_work(&mvmvif->uapsd_nonagg_detected_wk, 15 * HZ); + schedule_delayed_work(&mvmvif->uapsd_nonagg_detected_wk, + 15 * HZ); } static void iwl_mvm_check_uapsd_agg_expected_tpt(struct iwl_mvm *mvm, @@ -883,10 +915,10 @@ static void iwl_mvm_tcm_iterator(void *_data, u8 *mac, struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); u32 *band = _data; - if (!mvmvif->phy_ctxt) + if (!mvmvif->deflink.phy_ctxt) return; - band[mvmvif->id] = mvmvif->phy_ctxt->channel->band; + band[mvmvif->id] = mvmvif->deflink.phy_ctxt->channel->band; } static unsigned long iwl_mvm_calc_tcm_stats(struct iwl_mvm *mvm, @@ -1137,3 +1169,36 @@ void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, int clock_type, iwl_mvm_power_update_device(mvm); } } + +/* Find if at least two links from different vifs use same channel + * FIXME: consider having a refcount array in struct iwl_mvm_vif for + * used phy_ctxt ids. + */ +bool iwl_mvm_have_links_same_channel(struct iwl_mvm_vif *vif1, + struct iwl_mvm_vif *vif2) +{ + unsigned int i, j; + + for_each_mvm_vif_valid_link(vif1, i) { + for_each_mvm_vif_valid_link(vif2, j) { + if (vif1->link[i]->phy_ctxt == vif2->link[j]->phy_ctxt) + return true; + } + } + + return false; +} + +bool iwl_mvm_vif_is_active(struct iwl_mvm_vif *mvmvif) +{ + unsigned int i; + + /* FIXME: can it fail when phy_ctxt is assigned? */ + for_each_mvm_vif_valid_link(mvmvif, i) { + if (mvmvif->link[i]->phy_ctxt && + mvmvif->link[i]->phy_ctxt->id < NUM_PHY_CTX) + return true; + } + + return false; +} diff --git a/pcie/ctxt-info-gen3.c b/pcie/ctxt-info-gen3.c index 75fd386b048e..fa4a14546860 100644 --- a/pcie/ctxt-info-gen3.c +++ b/pcie/ctxt-info-gen3.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation */ #include "iwl-trans.h" #include "iwl-fh.h" @@ -136,6 +136,10 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, &control_flags); prph_sc_ctrl->control.control_flags = cpu_to_le32(control_flags); + /* initialize the Step equalizer data */ + prph_sc_ctrl->step_cfg.mbx_addr_0 = cpu_to_le32(trans->mbx_addr_0_step); + prph_sc_ctrl->step_cfg.mbx_addr_1 = cpu_to_le32(trans->mbx_addr_1_step); + /* allocate ucode sections in dram and set addresses */ ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram); if (ret) @@ -277,69 +281,273 @@ void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans, bool alive) trans_pcie->prph_info = NULL; } -int iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans, - const void *data, u32 len) +static int iwl_pcie_load_payloads_continuously(struct iwl_trans *trans, + const struct iwl_pnvm_image *pnvm_data, + struct iwl_dram_data *dram) +{ + u32 len, len0, len1; + + if (pnvm_data->n_chunks != UNFRAGMENTED_PNVM_PAYLOADS_NUMBER) { + IWL_DEBUG_FW(trans, "expected 2 payloads, got %d.\n", + pnvm_data->n_chunks); + return -EINVAL; + } + + len0 = pnvm_data->chunks[0].len; + len1 = pnvm_data->chunks[1].len; + if (len1 > 0xFFFFFFFF - len0) { + IWL_DEBUG_FW(trans, "sizes of payloads overflow.\n"); + return -EINVAL; + } + len = len0 + len1; + + dram->block = iwl_pcie_ctxt_info_dma_alloc_coherent(trans, len, + &dram->physical); + if (!dram->block) { + IWL_DEBUG_FW(trans, "Failed to allocate PNVM DMA.\n"); + return -ENOMEM; + } + + dram->size = len; + memcpy(dram->block, pnvm_data->chunks[0].data, len0); + memcpy((u8 *)dram->block + len0, pnvm_data->chunks[1].data, len1); + + return 0; +} + +static int iwl_pcie_load_payloads_segments + (struct iwl_trans *trans, + struct iwl_dram_regions *dram_regions, + const struct iwl_pnvm_image *pnvm_data) +{ + struct iwl_dram_data *cur_payload_dram = &dram_regions->drams[0]; + struct iwl_dram_data *desc_dram = &dram_regions->prph_scratch_mem_desc; + struct iwl_prph_scrath_mem_desc_addr_array *addresses; + const void *data; + u32 len; + int i; + + /* allocate and init DRAM descriptors array */ + len = sizeof(struct iwl_prph_scrath_mem_desc_addr_array); + desc_dram->block = iwl_pcie_ctxt_info_dma_alloc_coherent + (trans, + len, + &desc_dram->physical); + if (!desc_dram->block) { + IWL_DEBUG_FW(trans, "Failed to allocate PNVM DMA.\n"); + return -ENOMEM; + } + desc_dram->size = len; + memset(desc_dram->block, 0, len); + + /* allocate DRAM region for each payload */ + dram_regions->n_regions = 0; + for (i = 0; i < pnvm_data->n_chunks; i++) { + len = pnvm_data->chunks[i].len; + data = pnvm_data->chunks[i].data; + + if (iwl_pcie_ctxt_info_alloc_dma(trans, + data, + len, + cur_payload_dram)) { + iwl_trans_pcie_free_pnvm_dram_regions(dram_regions, + trans->dev); + return -ENOMEM; + } + + dram_regions->n_regions++; + cur_payload_dram++; + } + + /* fill desc with the DRAM payloads addresses */ + addresses = desc_dram->block; + for (i = 0; i < pnvm_data->n_chunks; i++) { + addresses->mem_descs[i] = + cpu_to_le64(dram_regions->drams[i].physical); + } + + return 0; + +} + +int iwl_trans_pcie_ctx_info_gen3_load_pnvm(struct iwl_trans *trans, + const struct iwl_pnvm_image *pnvm_payloads, + const struct iwl_ucode_capabilities *capa) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl = &trans_pcie->prph_scratch->ctrl_cfg; - int ret; + struct iwl_dram_regions *dram_regions = &trans_pcie->pnvm_data; + int ret = 0; + + /* only allocate the DRAM if not allocated yet */ + if (trans->pnvm_loaded) + return 0; + + if (WARN_ON(prph_sc_ctrl->pnvm_cfg.pnvm_size)) + return -EBUSY; if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) return 0; - /* only allocate the DRAM if not allocated yet */ - if (!trans->pnvm_loaded) { - if (WARN_ON(prph_sc_ctrl->pnvm_cfg.pnvm_size)) - return -EBUSY; - - ret = iwl_pcie_ctxt_info_alloc_dma(trans, data, len, - &trans_pcie->pnvm_dram); - if (ret < 0) { - IWL_DEBUG_FW(trans, "Failed to allocate PNVM DMA %d.\n", - ret); - return ret; + if (!pnvm_payloads->n_chunks) { + IWL_DEBUG_FW(trans, "no payloads\n"); + return -EINVAL; + } + + /* save payloads in several DRAM sections */ + if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG)) { + ret = iwl_pcie_load_payloads_segments(trans, + dram_regions, + pnvm_payloads); + if (!ret) + trans->pnvm_loaded = true; + } else { + /* save only in one DRAM section */ + ret = iwl_pcie_load_payloads_continuously + (trans, + pnvm_payloads, + &dram_regions->drams[0]); + if (!ret) { + dram_regions->n_regions = 1; + trans->pnvm_loaded = true; } } + return ret; +} + +static inline size_t +iwl_dram_regions_size(const struct iwl_dram_regions *dram_regions) +{ + size_t total_size = 0; + int i; + + for (i = 0; i < dram_regions->n_regions; i++) + total_size += dram_regions->drams[i].size; + + return total_size; +} + +static void iwl_pcie_set_pnvm_segments(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl = + &trans_pcie->prph_scratch->ctrl_cfg; + struct iwl_dram_regions *dram_regions = &trans_pcie->pnvm_data; + prph_sc_ctrl->pnvm_cfg.pnvm_base_addr = - cpu_to_le64(trans_pcie->pnvm_dram.physical); + cpu_to_le64(dram_regions->prph_scratch_mem_desc.physical); prph_sc_ctrl->pnvm_cfg.pnvm_size = - cpu_to_le32(trans_pcie->pnvm_dram.size); + cpu_to_le32(iwl_dram_regions_size(dram_regions)); +} - return 0; +static void iwl_pcie_set_continuous_pnvm(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl = + &trans_pcie->prph_scratch->ctrl_cfg; + + prph_sc_ctrl->pnvm_cfg.pnvm_base_addr = + cpu_to_le64(trans_pcie->pnvm_data.drams[0].physical); + prph_sc_ctrl->pnvm_cfg.pnvm_size = + cpu_to_le32(trans_pcie->pnvm_data.drams[0].size); } -int iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans, - const void *data, u32 len) +void iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans, + const struct iwl_ucode_capabilities *capa) +{ + if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) + return; + + if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG)) + iwl_pcie_set_pnvm_segments(trans); + else + iwl_pcie_set_continuous_pnvm(trans); +} + +int iwl_trans_pcie_ctx_info_gen3_load_reduce_power(struct iwl_trans *trans, + const struct iwl_pnvm_image *payloads, + const struct iwl_ucode_capabilities *capa) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl = &trans_pcie->prph_scratch->ctrl_cfg; - int ret; + struct iwl_dram_regions *dram_regions = &trans_pcie->reduced_tables_data; + int ret = 0; + + /* only allocate the DRAM if not allocated yet */ + if (trans->reduce_power_loaded) + return 0; if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) return 0; - /* only allocate the DRAM if not allocated yet */ - if (!trans->reduce_power_loaded) { - if (WARN_ON(prph_sc_ctrl->reduce_power_cfg.size)) - return -EBUSY; + if (WARN_ON(prph_sc_ctrl->reduce_power_cfg.size)) + return -EBUSY; - ret = iwl_pcie_ctxt_info_alloc_dma(trans, data, len, - &trans_pcie->reduce_power_dram); - if (ret < 0) { - IWL_DEBUG_FW(trans, - "Failed to allocate reduce power DMA %d.\n", - ret); - return ret; + if (!payloads->n_chunks) { + IWL_DEBUG_FW(trans, "no payloads\n"); + return -EINVAL; + } + + /* save payloads in several DRAM sections */ + if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG)) { + ret = iwl_pcie_load_payloads_segments(trans, + dram_regions, + payloads); + if (!ret) + trans->reduce_power_loaded = true; + } else { + /* save only in one DRAM section */ + ret = iwl_pcie_load_payloads_continuously + (trans, + payloads, + &dram_regions->drams[0]); + if (!ret) { + dram_regions->n_regions = 1; + trans->reduce_power_loaded = true; } } + return ret; +} + +static void iwl_pcie_set_reduce_power_segments(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl = + &trans_pcie->prph_scratch->ctrl_cfg; + struct iwl_dram_regions *dram_regions = &trans_pcie->reduced_tables_data; + prph_sc_ctrl->reduce_power_cfg.base_addr = - cpu_to_le64(trans_pcie->reduce_power_dram.physical); + cpu_to_le64(dram_regions->prph_scratch_mem_desc.physical); prph_sc_ctrl->reduce_power_cfg.size = - cpu_to_le32(trans_pcie->reduce_power_dram.size); + cpu_to_le32(iwl_dram_regions_size(dram_regions)); +} - return 0; +static void iwl_pcie_set_continuous_reduce_power(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl = + &trans_pcie->prph_scratch->ctrl_cfg; + + prph_sc_ctrl->reduce_power_cfg.base_addr = + cpu_to_le64(trans_pcie->reduced_tables_data.drams[0].physical); + prph_sc_ctrl->reduce_power_cfg.size = + cpu_to_le32(trans_pcie->reduced_tables_data.drams[0].size); +} + +void +iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans, + const struct iwl_ucode_capabilities *capa) +{ + if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) + return; + + if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG)) + iwl_pcie_set_reduce_power_segments(trans); + else + iwl_pcie_set_continuous_reduce_power(trans); } + diff --git a/pcie/ctxt-info.c b/pcie/ctxt-info.c index 74ce31fdf45e..5f55efe64bf5 100644 --- a/pcie/ctxt-info.c +++ b/pcie/ctxt-info.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2021 Intel Corporation + * Copyright (C) 2018-2022 Intel Corporation */ #include "iwl-trans.h" #include "iwl-fh.h" @@ -38,9 +38,9 @@ static void *_iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans, return result; } -static void *iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans, - size_t size, - dma_addr_t *phys) +void *iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans, + size_t size, + dma_addr_t *phys) { return _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size, phys, 0); } diff --git a/pcie/drv.c b/pcie/drv.c index b16d4ae182d1..73c1fb3c0c5e 100644 --- a/pcie/drv.c +++ b/pcie/drv.c @@ -484,17 +484,15 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x43F0, PCI_ANY_ID, iwl_qu_long_latency_trans_cfg)}, {IWL_PCI_DEVICE(0xA0F0, PCI_ANY_ID, iwl_qu_long_latency_trans_cfg)}, - {IWL_PCI_DEVICE(0x2720, PCI_ANY_ID, iwl_qnj_trans_cfg)}, - {IWL_PCI_DEVICE(0x2723, PCI_ANY_ID, iwl_ax200_trans_cfg)}, /* So devices */ {IWL_PCI_DEVICE(0x2725, PCI_ANY_ID, iwl_so_trans_cfg)}, - {IWL_PCI_DEVICE(0x2726, PCI_ANY_ID, iwl_snj_trans_cfg)}, {IWL_PCI_DEVICE(0x7A70, PCI_ANY_ID, iwl_so_long_latency_imr_trans_cfg)}, {IWL_PCI_DEVICE(0x7AF0, PCI_ANY_ID, iwl_so_trans_cfg)}, {IWL_PCI_DEVICE(0x51F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)}, {IWL_PCI_DEVICE(0x51F1, PCI_ANY_ID, iwl_so_long_latency_imr_trans_cfg)}, + {IWL_PCI_DEVICE(0x51F1, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)}, {IWL_PCI_DEVICE(0x54F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)}, {IWL_PCI_DEVICE(0x7F70, PCI_ANY_ID, iwl_so_trans_cfg)}, @@ -504,8 +502,12 @@ static const struct pci_device_id iwl_hw_card_ids[] = { /* Bz devices */ {IWL_PCI_DEVICE(0x2727, PCI_ANY_ID, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_bz_trans_cfg)}, {IWL_PCI_DEVICE(0xA840, PCI_ANY_ID, iwl_bz_trans_cfg)}, {IWL_PCI_DEVICE(0x7740, PCI_ANY_ID, iwl_bz_trans_cfg)}, + +/* Sc devices */ + {IWL_PCI_DEVICE(0xE440, PCI_ANY_ID, iwl_sc_trans_cfg)}, #endif /* CONFIG_IWLMVM */ {0} @@ -513,16 +515,16 @@ static const struct pci_device_id iwl_hw_card_ids[] = { MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); #define _IWL_DEV_INFO(_device, _subdevice, _mac_type, _mac_step, _rf_type, \ - _rf_id, _no_160, _cores, _cdb, _jacket, _cfg, _name) \ - { .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg), \ - .name = _name, .mac_type = _mac_type, .rf_type = _rf_type, \ - .no_160 = _no_160, .cores = _cores, .rf_id = _rf_id, \ - .mac_step = _mac_step, .cdb = _cdb, .jacket = _jacket } + _rf_id, _rf_step, _no_160, _cores, _cdb, _cfg, _name) \ + { .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg), \ + .name = _name, .mac_type = _mac_type, .rf_type = _rf_type, .rf_step = _rf_step, \ + .no_160 = _no_160, .cores = _cores, .rf_id = _rf_id, \ + .mac_step = _mac_step, .cdb = _cdb, .jacket = IWL_CFG_ANY } #define IWL_DEV_INFO(_device, _subdevice, _cfg, _name) \ - _IWL_DEV_INFO(_device, _subdevice, IWL_CFG_ANY, IWL_CFG_ANY, \ - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, \ - IWL_CFG_ANY, IWL_CFG_ANY, _cfg, _name) + _IWL_DEV_INFO(_device, _subdevice, IWL_CFG_ANY, IWL_CFG_ANY, \ + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, \ + IWL_CFG_ANY, _cfg, _name) static const struct iwl_dev_info iwl_dev_info_table[] = { #if IS_ENABLED(CONFIG_IWLMVM) @@ -542,14 +544,17 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { IWL_DEV_INFO(0x51F0, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_160_name), IWL_DEV_INFO(0x51F0, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name), IWL_DEV_INFO(0x51F0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), + IWL_DEV_INFO(0x51F1, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), IWL_DEV_INFO(0x54F0, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name), IWL_DEV_INFO(0x54F0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), IWL_DEV_INFO(0x7A70, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name), IWL_DEV_INFO(0x7A70, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), + IWL_DEV_INFO(0x7AF0, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name), + IWL_DEV_INFO(0x7AF0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), IWL_DEV_INFO(0x271C, 0x0214, iwl9260_2ac_cfg, iwl9260_1_name), - IWL_DEV_INFO(0x7E40, 0x1691, iwl_cfg_ma_a0_gf4_a0, iwl_ax411_killer_1690s_name), - IWL_DEV_INFO(0x7E40, 0x1692, iwl_cfg_ma_a0_gf4_a0, iwl_ax411_killer_1690i_name), + IWL_DEV_INFO(0x7E40, 0x1691, iwl_cfg_ma, iwl_ax411_killer_1690s_name), + IWL_DEV_INFO(0x7E40, 0x1692, iwl_cfg_ma, iwl_ax411_killer_1690i_name), /* AX200 */ IWL_DEV_INFO(0x2723, IWL_CFG_ANY, iwl_ax200_cfg_cc, iwl_ax200_name), @@ -565,7 +570,6 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { IWL_DEV_INFO(0x43F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, iwl_ax201_killer_1650i_name), IWL_DEV_INFO(0x43F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x43F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL), - IWL_DEV_INFO(0x43F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, iwl_ax201_killer_1650s_name), IWL_DEV_INFO(0xA0F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0xA0F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0xA0F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL), @@ -660,25 +664,13 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { IWL_DEV_INFO(0x7AF0, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_160_name), IWL_DEV_INFO(0x7AF0, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_160_name), - /* SnJ with HR */ - IWL_DEV_INFO(0x2725, 0x00B0, iwlax411_2ax_cfg_sosnj_gf4_a0, NULL), - IWL_DEV_INFO(0x2726, 0x0090, iwlax211_cfg_snj_gf_a0, NULL), - IWL_DEV_INFO(0x2726, 0x0098, iwlax211_cfg_snj_gf_a0, NULL), - IWL_DEV_INFO(0x2726, 0x00B0, iwlax411_2ax_cfg_sosnj_gf4_a0, NULL), - IWL_DEV_INFO(0x2726, 0x00B4, iwlax411_2ax_cfg_sosnj_gf4_a0, NULL), - IWL_DEV_INFO(0x2726, 0x0510, iwlax211_cfg_snj_gf_a0, NULL), - IWL_DEV_INFO(0x2726, 0x1651, iwl_cfg_snj_hr_b0, iwl_ax201_killer_1650s_name), - IWL_DEV_INFO(0x2726, 0x1652, iwl_cfg_snj_hr_b0, iwl_ax201_killer_1650i_name), - IWL_DEV_INFO(0x2726, 0x1691, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690s_name), - IWL_DEV_INFO(0x2726, 0x1692, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690i_name), - IWL_DEV_INFO(0x7F70, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name), - IWL_DEV_INFO(0x7F70, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), - /* SO with GF2 */ IWL_DEV_INFO(0x2726, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), IWL_DEV_INFO(0x2726, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), IWL_DEV_INFO(0x51F0, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), IWL_DEV_INFO(0x51F0, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), + IWL_DEV_INFO(0x51F1, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), + IWL_DEV_INFO(0x51F1, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), IWL_DEV_INFO(0x54F0, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), IWL_DEV_INFO(0x54F0, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), IWL_DEV_INFO(0x7A70, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), @@ -689,661 +681,451 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { IWL_DEV_INFO(0x7F70, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), /* MA with GF2 */ - IWL_DEV_INFO(0x7E40, 0x1671, iwl_cfg_ma_a0_gf_a0, iwl_ax211_killer_1675s_name), - IWL_DEV_INFO(0x7E40, 0x1672, iwl_cfg_ma_a0_gf_a0, iwl_ax211_killer_1675i_name), + IWL_DEV_INFO(0x7E40, 0x1671, iwl_cfg_ma, iwl_ax211_killer_1675s_name), + IWL_DEV_INFO(0x7E40, 0x1672, iwl_cfg_ma, iwl_ax211_killer_1675i_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_2ac_cfg_soc, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_2ac_cfg_soc, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_2ac_cfg_soc, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_2ac_cfg_soc, iwl9462_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_2ac_cfg_soc, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_2ac_cfg_soc, iwl9560_name), _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl9260_2ac_cfg, iwl9461_160_name), - _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl9260_2ac_cfg, iwl9461_name), - _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl9260_2ac_cfg, iwl9462_160_name), - _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl9260_2ac_cfg, iwl9462_name), - - _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB, iwl9260_2ac_cfg, iwl9270_160_name), _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB, iwl9260_2ac_cfg, iwl9270_name), _IWL_DEV_INFO(0x271B, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9260_2ac_cfg, iwl9162_160_name), _IWL_DEV_INFO(0x271B, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9260_2ac_cfg, iwl9162_name), _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9260_2ac_cfg, iwl9260_160_name), _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9260_2ac_cfg, iwl9260_name), /* Qu with Jf */ /* Qu B step */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_b0_jf_b0_cfg, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_b0_jf_b0_cfg, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_b0_jf_b0_cfg, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_b0_jf_b0_cfg, iwl9462_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_b0_jf_b0_cfg, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_b0_jf_b0_cfg, iwl9560_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_b0_jf_b0_cfg, iwl9560_killer_1550s_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_b0_jf_b0_cfg, iwl9560_killer_1550i_name), /* Qu C step */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_c0_jf_b0_cfg, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_c0_jf_b0_cfg, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_c0_jf_b0_cfg, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_c0_jf_b0_cfg, iwl9462_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_c0_jf_b0_cfg, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_c0_jf_b0_cfg, iwl9560_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_c0_jf_b0_cfg, iwl9560_killer_1550s_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_c0_jf_b0_cfg, iwl9560_killer_1550i_name), /* QuZ */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_quz_a0_jf_b0_cfg, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_quz_a0_jf_b0_cfg, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_quz_a0_jf_b0_cfg, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_quz_a0_jf_b0_cfg, iwl9462_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_quz_a0_jf_b0_cfg, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_quz_a0_jf_b0_cfg, iwl9560_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_quz_a0_jf_b0_cfg, iwl9560_killer_1550s_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_quz_a0_jf_b0_cfg, iwl9560_killer_1550i_name), - /* QnJ */ - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl9560_qnj_b0_jf_b0_cfg, iwl9461_160_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl9560_qnj_b0_jf_b0_cfg, iwl9461_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl9560_qnj_b0_jf_b0_cfg, iwl9462_160_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl9560_qnj_b0_jf_b0_cfg, iwl9462_name), - - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl9560_qnj_b0_jf_b0_cfg, iwl9560_160_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl9560_qnj_b0_jf_b0_cfg, iwl9560_name), - - _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551, - IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl9560_qnj_b0_jf_b0_cfg, iwl9560_killer_1550s_name), - _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552, - IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl9560_qnj_b0_jf_b0_cfg, iwl9560_killer_1550i_name), - /* Qu with Hr */ /* Qu B step */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, - IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_qu_b0_hr1_b0, iwl_ax101_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, - IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_qu_b0_hr_b0, iwl_ax203_name), /* Qu C step */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, - IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_qu_c0_hr1_b0, iwl_ax101_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, - IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_qu_c0_hr_b0, iwl_ax203_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, - IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_qu_c0_hr_b0, iwl_ax201_name), /* QuZ */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_quz_a0_hr1_b0, iwl_ax101_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, SILICON_B_STEP, - IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_quz_a0_hr_b0, iwl_ax203_name), - -/* QnJ with Hr */ - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl_qnj_b0_hr_b0_cfg, iwl_ax201_name), - -/* SnJ with Jf */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl_cfg_snj_a0_jf_b0, iwl9461_160_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl_cfg_snj_a0_jf_b0, iwl9461_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl_cfg_snj_a0_jf_b0, iwl9462_160_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl_cfg_snj_a0_jf_b0, iwl9462_name), - - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl_cfg_snj_a0_jf_b0, iwl9560_160_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl_cfg_snj_a0_jf_b0, iwl9560_name), - -/* SnJ with Hr */ - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl_cfg_snj_hr_b0, iwl_ax101_name), - - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl_cfg_snj_hr_b0, iwl_ax201_name), + IWL_CFG_MAC_TYPE_QUZ, SILICON_B_STEP, + IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + iwl_cfg_quz_a0_hr_b0, iwl_ax201_name), /* Ma */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl_cfg_ma_a0_hr_b0, iwl_ax201_name), + IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + iwl_cfg_ma, iwl_ax201_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl_cfg_ma_a0_gf_a0, iwl_ax211_name), + IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + iwl_cfg_ma, iwl_ax211_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_ANY, - iwl_cfg_ma_a0_gf4_a0, iwl_ax211_name), + IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + iwl_cfg_ma, iwl_ax221_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl_cfg_ma_a0_mr_a0, iwl_ax221_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl_cfg_ma_a0_fm_a0, iwl_ax231_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl_cfg_snj_a0_mr_a0, iwl_ax221_name), + IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + iwl_cfg_ma, iwl_ax231_name), /* So with Hr */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_so_a0_hr_a0, iwl_ax203_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_so_a0_hr_a0, iwl_ax101_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_so_a0_hr_a0, iwl_ax201_name), /* So-F with Hr */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_so_a0_hr_a0, iwl_ax203_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_so_a0_hr_a0, iwl_ax101_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_so_a0_hr_a0, iwl_ax201_name), /* So-F with Gf */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_name), /* Bz */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl_cfg_bz_a0_hr_b0, iwl_bz_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl_cfg_bz_a0_gf_a0, iwl_bz_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_ANY, - iwl_cfg_bz_a0_gf4_a0, iwl_bz_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl_cfg_bz_a0_mr_a0, iwl_bz_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl_cfg_bz_a0_fm_a0, iwl_bz_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_NO_JACKET, - iwl_cfg_gl_a0_fm_a0, iwl_bz_name), - -/* BZ Z step */ - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_BZ, SILICON_Z_STEP, - IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl_cfg_bz_z0_gf_a0, iwl_bz_name), + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + iwl_cfg_bz, iwl_bz_name), -/* BNJ */ - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_IS_JACKET, - iwl_cfg_bnj_a0_fm_a0, iwl_bz_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_IS_JACKET, - iwl_cfg_bnj_a0_fm4_a0, iwl_bz_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_IS_JACKET, - iwl_cfg_bnj_a0_gf_a0, iwl_bz_name), +/* Ga (Gl) */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_IS_JACKET, - iwl_cfg_bnj_a0_gf4_a0, iwl_bz_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_IS_JACKET, - iwl_cfg_bnj_a0_hr_b0, iwl_bz_name), + IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + iwl_cfg_gl, iwl_bz_name), /* SoF with JF2 */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9560_name), /* SoF with JF */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwlax210_2ax_cfg_so_jf_b0, iwl9462_name), - -/* SoF with JF2 */ - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwlax210_2ax_cfg_so_jf_b0, iwl9560_name), - -/* SoF with JF */ - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwlax210_2ax_cfg_so_jf_b0, iwl9461_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9462_name), /* So with GF */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_name), /* So with JF2 */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9560_name), /* So with JF */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9462_name), /* MsP */ /* For now we use the same FW as MR, but this will change in the future. */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_so_a0_ms_a0, iwl_ax204_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_so_a0_ms_a0, iwl_ax204_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl_cfg_ma_a0_ms_a0, iwl_ax204_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, - iwl_cfg_snj_a0_ms_a0, iwl_ax204_name) + IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + iwl_cfg_ma, iwl_ax204_name), +/* Sc */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SC, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + iwl_cfg_sc, iwl_sc_name), #endif /* CONFIG_IWLMVM */ }; /* - * In case that there is no OTP on the NIC, get the rf id and cdb info - * from the prph registers. + * Read rf id and cdb info from prph register and store it */ static int get_crf_id(struct iwl_trans *iwl_trans) { int ret = 0; u32 sd_reg_ver_addr; - u32 cdb = 0; - u32 val; + u32 val = 0; if (iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) sd_reg_ver_addr = SD_REG_VER_GEN2; @@ -1362,10 +1144,38 @@ static int get_crf_id(struct iwl_trans *iwl_trans) iwl_write_umac_prph_no_grab(iwl_trans, WFPM_CTRL_REG, val); /* Read crf info */ - val = iwl_read_prph_no_grab(iwl_trans, sd_reg_ver_addr); + iwl_trans->hw_crf_id = iwl_read_prph_no_grab(iwl_trans, sd_reg_ver_addr); + + /* Read cnv info */ + iwl_trans->hw_cnv_id = + iwl_read_prph_no_grab(iwl_trans, CNVI_AUX_MISC_CHIP); /* Read cdb info (also contains the jacket info if needed in the future */ - cdb = iwl_read_umac_prph_no_grab(iwl_trans, WFPM_OTP_CFG1_ADDR); + iwl_trans->hw_wfpm_id = + iwl_read_umac_prph_no_grab(iwl_trans, WFPM_OTP_CFG1_ADDR); + IWL_INFO(iwl_trans, "Detected crf-id 0x%x, cnv-id 0x%x wfpm id 0x%x\n", + iwl_trans->hw_crf_id, iwl_trans->hw_cnv_id, + iwl_trans->hw_wfpm_id); + + iwl_trans_release_nic_access(iwl_trans); + +out: + return ret; +} + +/* + * In case that there is no OTP on the NIC, map the rf id and cdb info + * from the prph registers. + */ +static int map_crf_id(struct iwl_trans *iwl_trans) +{ + int ret = 0; + u32 val = iwl_trans->hw_crf_id; + u32 step_id = REG_CRF_ID_STEP(val); + u32 slave_id = REG_CRF_ID_SLAVE(val); + u32 jacket_id_cnv = REG_CRF_ID_SLAVE(iwl_trans->hw_cnv_id); + u32 jacket_id_wfpm = WFPM_OTP_CFG1_IS_JACKET(iwl_trans->hw_wfpm_id); + u32 cdb_id_wfpm = WFPM_OTP_CFG1_IS_CDB(iwl_trans->hw_wfpm_id); /* Map between crf id to rf id */ switch (REG_CRF_ID_TYPE(val)) { @@ -1375,9 +1185,12 @@ static int get_crf_id(struct iwl_trans *iwl_trans) case REG_CRF_ID_TYPE_JF_2: iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_JF2 << 12); break; - case REG_CRF_ID_TYPE_HR_NONE_CDB: + case REG_CRF_ID_TYPE_HR_NONE_CDB_1X1: iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_HR1 << 12); break; + case REG_CRF_ID_TYPE_HR_NONE_CDB: + iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_HR2 << 12); + break; case REG_CRF_ID_TYPE_HR_CDB: iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_HR2 << 12); break; @@ -1387,29 +1200,43 @@ static int get_crf_id(struct iwl_trans *iwl_trans) case REG_CRF_ID_TYPE_MR: iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_MR << 12); break; - case REG_CRF_ID_TYPE_FM: - iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_FM << 12); - break; + case REG_CRF_ID_TYPE_FM: + case REG_CRF_ID_TYPE_FMI: + case REG_CRF_ID_TYPE_FMR: + iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_FM << 12); + break; default: ret = -EIO; IWL_ERR(iwl_trans, - "Can find a correct rfid for crf id 0x%x\n", + "Can't find a correct rfid for crf id 0x%x\n", REG_CRF_ID_TYPE(val)); - goto out_release; + goto out; } + /* Set Step-id */ + iwl_trans->hw_rf_id |= (step_id << 8); + /* Set CDB capabilities */ - if (cdb & BIT(4)) { + if (cdb_id_wfpm || slave_id) { iwl_trans->hw_rf_id += BIT(28); IWL_INFO(iwl_trans, "Adding cdb to rf id\n"); } - IWL_INFO(iwl_trans, "Detected RF 0x%x from crf id 0x%x\n", - iwl_trans->hw_rf_id, REG_CRF_ID_TYPE(val)); + /* Set Jacket capabilities */ + if (jacket_id_wfpm || jacket_id_cnv) { + iwl_trans->hw_rf_id += BIT(29); + IWL_INFO(iwl_trans, "Adding jacket to rf id\n"); + } -out_release: - iwl_trans_release_nic_access(iwl_trans); + IWL_INFO(iwl_trans, + "Detected rf-type 0x%x step-id 0x%x slave-id 0x%x from crf id 0x%x\n", + REG_CRF_ID_TYPE(val), step_id, slave_id, iwl_trans->hw_rf_id); + IWL_INFO(iwl_trans, + "Detected cdb-id 0x%x jacket-id 0x%x from wfpm id 0x%x\n", + cdb_id_wfpm, jacket_id_wfpm, iwl_trans->hw_wfpm_id); + IWL_INFO(iwl_trans, "Detected jacket-id 0x%x from cnvi id 0x%x\n", + jacket_id_cnv, iwl_trans->hw_cnv_id); out: return ret; @@ -1420,8 +1247,8 @@ out: static const struct iwl_dev_info * iwl_pci_find_dev_info(u16 device, u16 subsystem_device, - u16 mac_type, u8 mac_step, - u16 rf_type, u8 cdb, u8 jacket, u8 rf_id, u8 no_160, u8 cores) + u16 mac_type, u8 mac_step, u16 rf_type, u8 cdb, + u8 jacket, u8 rf_id, u8 no_160, u8 cores, u8 rf_step) { int num_devices = ARRAY_SIZE(iwl_dev_info_table); int i; @@ -1472,6 +1299,10 @@ iwl_pci_find_dev_info(u16 device, u16 subsystem_device, dev_info->cores != cores) continue; + if (dev_info->rf_step != (u8)IWL_CFG_ANY && + dev_info->rf_step != rf_step) + continue; + return dev_info; } @@ -1529,6 +1360,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } iwl_trans->hw_rf_id = iwl_read32(iwl_trans, CSR_HW_RF_ID); + get_crf_id(iwl_trans); /* * The RF_ID is set to zero in blank OTP so read version to @@ -1537,11 +1369,15 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) */ if (iwl_trans->trans_cfg->rf_id && iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000 && - !CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) && get_crf_id(iwl_trans)) { + !CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) && map_crf_id(iwl_trans)) { ret = -EINVAL; goto out_free_trans; } + IWL_INFO(iwl_trans, "PCI dev %04x/%04x, rev=0x%x, rfid=0x%x\n", + pdev->device, pdev->subsystem_device, + iwl_trans->hw_rev, iwl_trans->hw_rf_id); + dev_info = iwl_pci_find_dev_info(pdev->device, pdev->subsystem_device, CSR_HW_REV_TYPE(iwl_trans->hw_rev), iwl_trans->hw_rev_step, @@ -1550,25 +1386,14 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) CSR_HW_RFID_IS_JACKET(iwl_trans->hw_rf_id), IWL_SUBDEVICE_RF_ID(pdev->subsystem_device), IWL_SUBDEVICE_NO_160(pdev->subsystem_device), - IWL_SUBDEVICE_CORES(pdev->subsystem_device)); - + IWL_SUBDEVICE_CORES(pdev->subsystem_device), + CSR_HW_RFID_STEP(iwl_trans->hw_rf_id)); if (dev_info) { iwl_trans->cfg = dev_info->cfg; iwl_trans->name = dev_info->name; } #if IS_ENABLED(CONFIG_IWLMVM) - - /* - * Workaround for problematic SnJ device: sometimes when - * certain RF modules are connected to SnJ, the device ID - * changes to QnJ's ID. So we are using QnJ's trans_cfg until - * here. But if we detect that the MAC type is actually SnJ, - * we should switch to it here to avoid problems later. - */ - if (CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_SNJ) - iwl_trans->trans_cfg = &iwl_so_trans_cfg; - /* * special-case 7265D, it has the same PCI IDs. * @@ -1641,6 +1466,15 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) trans_pcie->num_rx_bufs = RX_QUEUE_SIZE; } + if (!iwl_trans->trans_cfg->integrated) { + u16 link_status; + + pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &link_status); + + iwl_trans->pcie_link_speed = + u16_get_bits(link_status, PCI_EXP_LNKSTA_CLS); + } + ret = iwl_trans_init(iwl_trans); if (ret) goto out_free_trans; @@ -1671,6 +1505,9 @@ static void iwl_pci_remove(struct pci_dev *pdev) { struct iwl_trans *trans = pci_get_drvdata(pdev); + if (!trans) + return; + iwl_drv_stop(trans->drv); iwl_trans_pcie_free(trans); diff --git a/pcie/internal.h b/pcie/internal.h index f7e4f868363d..0adcf0e13e85 100644 --- a/pcie/internal.h +++ b/pcie/internal.h @@ -23,6 +23,7 @@ #include "iwl-op-mode.h" #include "iwl-drv.h" #include "queue/tx.h" +#include "iwl-context-info.h" /* * RX related structures and functions @@ -306,7 +307,9 @@ enum iwl_pcie_imr_status { * @trans: pointer to the generic transport area * @scd_base_addr: scheduler sram base address in SRAM * @kw: keep warm address - * @pnvm_dram: DRAM area that contains the PNVM data + * @pnvm_data: holds info about pnvm payloads allocated in DRAM + * @reduced_tables_data: holds info about power reduced tablse + * payloads allocated in DRAM * @pci_dev: basic pci-network driver stuff * @hw_base: pci hardware address support * @ucode_write_complete: indicates that the ucode has been copied. @@ -380,8 +383,9 @@ struct iwl_trans_pcie { u32 scd_base_addr; struct iwl_dma_ptr kw; - struct iwl_dram_data pnvm_dram; - struct iwl_dram_data reduce_power_dram; + /* pnvm data */ + struct iwl_dram_regions pnvm_data; + struct iwl_dram_regions reduced_tables_data; struct iwl_txq *txq_memory; @@ -478,6 +482,8 @@ struct iwl_trans const struct pci_device_id *ent, const struct iwl_cfg_trans_params *cfg_trans); void iwl_trans_pcie_free(struct iwl_trans *trans); +void iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions *dram_regions, + struct device *dev); bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans); #define _iwl_trans_pcie_grab_nic_access(trans) \ @@ -497,6 +503,7 @@ int iwl_pcie_rx_stop(struct iwl_trans *trans); void iwl_pcie_rx_free(struct iwl_trans *trans); void iwl_pcie_free_rbs_pool(struct iwl_trans *trans); void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq); +void iwl_pcie_rx_napi_sync(struct iwl_trans *trans); void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, struct iwl_rxq *rxq); diff --git a/pcie/rx.c b/pcie/rx.c index 68a4572cee53..f87b28edc267 100644 --- a/pcie/rx.c +++ b/pcie/rx.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2003-2014, 2018-2022 Intel Corporation + * Copyright (C) 2003-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -699,17 +699,25 @@ static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans, rxq->used_bd = NULL; } +static size_t iwl_pcie_rb_stts_size(struct iwl_trans *trans) +{ + bool use_rx_td = (trans->trans_cfg->device_family >= + IWL_DEVICE_FAMILY_AX210); + + if (use_rx_td) + return sizeof(__le16); + + return sizeof(struct iwl_rb_status); +} + static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, struct iwl_rxq *rxq) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + size_t rb_stts_size = iwl_pcie_rb_stts_size(trans); struct device *dev = trans->dev; int i; int free_size; - bool use_rx_td = (trans->trans_cfg->device_family >= - IWL_DEVICE_FAMILY_AX210); - size_t rb_stts_size = use_rx_td ? sizeof(__le16) : - sizeof(struct iwl_rb_status); spin_lock_init(&rxq->lock); if (trans->trans_cfg->mq_rx_supported) @@ -757,11 +765,9 @@ err: static int iwl_pcie_rx_alloc(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + size_t rb_stts_size = iwl_pcie_rb_stts_size(trans); struct iwl_rb_allocator *rba = &trans_pcie->rba; int i, ret; - size_t rb_stts_size = trans->trans_cfg->device_family >= - IWL_DEVICE_FAMILY_AX210 ? - sizeof(__le16) : sizeof(struct iwl_rb_status); if (WARN_ON(trans_pcie->rxq)) return -EINVAL; @@ -1053,6 +1059,22 @@ static int iwl_pcie_napi_poll_msix(struct napi_struct *napi, int budget) return ret; } +void iwl_pcie_rx_napi_sync(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int i; + + if (unlikely(!trans_pcie->rxq)) + return; + + for (i = 0; i < trans->num_rx_queues; i++) { + struct iwl_rxq *rxq = &trans_pcie->rxq[i]; + + if (rxq && rxq->napi.poll) + napi_synchronize(&rxq->napi); + } +} + static int _iwl_pcie_rx_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -1110,7 +1132,7 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans) poll = iwl_pcie_napi_poll_msix; netif_napi_add(&trans_pcie->napi_dev, &rxq->napi, - poll, NAPI_POLL_WEIGHT); + poll); napi_enable(&rxq->napi); } @@ -1177,11 +1199,9 @@ int iwl_pcie_gen2_rx_init(struct iwl_trans *trans) void iwl_pcie_rx_free(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + size_t rb_stts_size = iwl_pcie_rb_stts_size(trans); struct iwl_rb_allocator *rba = &trans_pcie->rba; int i; - size_t rb_stts_size = trans->trans_cfg->device_family >= - IWL_DEVICE_FAMILY_AX210 ? - sizeof(__le16) : sizeof(struct iwl_rb_status); /* * if rxq is NULL, it means that nothing has been allocated, @@ -1620,14 +1640,14 @@ irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id) struct msix_entry *entry = dev_id; struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry); struct iwl_trans *trans = trans_pcie->trans; - struct iwl_rxq *rxq = &trans_pcie->rxq[entry->entry]; + struct iwl_rxq *rxq; trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0); if (WARN_ON(entry->entry >= trans->num_rx_queues)) return IRQ_NONE; - if (!rxq) { + if (!trans_pcie->rxq) { if (net_ratelimit()) IWL_ERR(trans, "[%d] Got MSI-X interrupt before we have Rx queues\n", @@ -1635,6 +1655,7 @@ irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id) return IRQ_NONE; } + rxq = &trans_pcie->rxq[entry->entry]; lock_map_acquire(&trans->sync_cmd_lockdep_map); IWL_DEBUG_ISR(trans, "[%d] Got interrupt\n", entry->entry); @@ -1857,7 +1878,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) return IRQ_NONE; } - if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { + if (unlikely(inta == 0xFFFFFFFF || iwl_trans_is_hw_error_value(inta))) { /* * Hardware disappeared. It might have * already raised an interrupt. diff --git a/pcie/trans-gen2.c b/pcie/trans-gen2.c index 94f40c4d2421..fa46dad5fd68 100644 --- a/pcie/trans-gen2.c +++ b/pcie/trans-gen2.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2021 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation */ #include "iwl-trans.h" #include "iwl-prph.h" @@ -117,9 +117,14 @@ static void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans) trans_pcie->fw_reset_state != FW_RESET_REQUESTED, FW_RESET_TIMEOUT); if (!ret || trans_pcie->fw_reset_state == FW_RESET_ERROR) { - IWL_INFO(trans, - "firmware didn't ACK the reset - continue anyway\n"); - iwl_trans_fw_error(trans, true); + u32 inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD); + + IWL_ERR(trans, + "timeout waiting for FW reset ACK (inta_hw=0x%x)\n", + inta_hw); + + if (!(inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE)) + iwl_trans_fw_error(trans, true); } trans_pcie->fw_reset_state = FW_RESET_IDLE; @@ -156,6 +161,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans) if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) { IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n"); + iwl_pcie_rx_napi_sync(trans); iwl_txq_gen2_tx_free(trans); iwl_pcie_rx_stop(trans); } @@ -277,6 +283,9 @@ static void iwl_pcie_get_rf_name(struct iwl_trans *trans) case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HRCDB): pos = scnprintf(buf, buflen, "HRCDB"); break; + case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_MS): + pos = scnprintf(buf, buflen, "MS"); + break; default: return; } @@ -347,7 +356,7 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr) mutex_unlock(&trans_pcie->mutex); } -static void iwl_pcie_set_ltr(struct iwl_trans *trans) +static bool iwl_pcie_set_ltr(struct iwl_trans *trans) { u32 ltr_val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ | u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC, @@ -368,18 +377,77 @@ static void iwl_pcie_set_ltr(struct iwl_trans *trans) trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) && !trans->trans_cfg->integrated) { iwl_write32(trans, CSR_LTR_LONG_VAL_AD, ltr_val); - } else if (trans->trans_cfg->integrated && - trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) { + return true; + } + + if (trans->trans_cfg->integrated && + trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) { iwl_write_prph(trans, HPM_MAC_LTR_CSR, HPM_MAC_LRT_ENABLE_ALL); iwl_write_prph(trans, HPM_UMAC_LTR, ltr_val); + return true; + } + + if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) { + /* First clear the interrupt, just in case */ + iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, + MSIX_HW_INT_CAUSES_REG_IML); + /* In this case, unfortunately the same ROM bug exists in the + * device (not setting LTR correctly), but we don't have control + * over the settings from the host due to some hardware security + * features. The only workaround we've been able to come up with + * so far is to try to keep the CPU and device busy by polling + * it and the IML (image loader) completed interrupt. + */ + return false; } + + /* nothing needs to be done on other devices */ + return true; +} + +static void iwl_pcie_spin_for_iml(struct iwl_trans *trans) +{ +/* in practice, this seems to complete in around 20-30ms at most, wait 100 */ +#define IML_WAIT_TIMEOUT (HZ / 10) + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + unsigned long end_time = jiffies + IML_WAIT_TIMEOUT; + u32 value, loops = 0; + bool irq = false; + + if (WARN_ON(!trans_pcie->iml)) + return; + + value = iwl_read32(trans, CSR_LTR_LAST_MSG); + IWL_DEBUG_INFO(trans, "Polling for IML load - CSR_LTR_LAST_MSG=0x%x\n", + value); + + while (time_before(jiffies, end_time)) { + if (iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD) & + MSIX_HW_INT_CAUSES_REG_IML) { + irq = true; + break; + } + /* Keep the CPU and device busy. */ + value = iwl_read32(trans, CSR_LTR_LAST_MSG); + loops++; + } + + IWL_DEBUG_INFO(trans, + "Polled for IML load: irq=%d, loops=%d, CSR_LTR_LAST_MSG=0x%x\n", + irq, loops, value); + + /* We don't fail here even if we timed out - maybe we get lucky and the + * interrupt comes in later (and we get alive from firmware) and then + * we're all happy - but if not we'll fail on alive timeout or get some + * other error out. + */ } int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, const struct fw_img *fw, bool run_in_rfkill) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - bool hw_rfkill; + bool hw_rfkill, keep_ram_busy; int ret; /* This may fail if AMT took ownership of the device */ @@ -440,7 +508,7 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, if (ret) goto out; - iwl_pcie_set_ltr(trans); + keep_ram_busy = !iwl_pcie_set_ltr(trans); if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { iwl_write32(trans, CSR_FUNC_SCRATCH, CSR_FUNC_SCRATCH_INIT_VALUE); @@ -452,6 +520,9 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1); } + if (keep_ram_busy) + iwl_pcie_spin_for_iml(trans); + /* re-check RF-Kill state since we may have missed the interrupt */ hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); if (hw_rfkill && !run_in_rfkill) diff --git a/pcie/trans.c b/pcie/trans.c index 8be3c3c8c68b..3e988da44973 100644 --- a/pcie/trans.c +++ b/pcie/trans.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2007-2015, 2018-2022 Intel Corporation + * Copyright (C) 2007-2015, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -131,13 +131,15 @@ static int iwl_trans_pcie_sw_reset(struct iwl_trans *trans, bool retake_ownership) { /* Reset entire device - do controller reset (results in SHRD_HW_RST) */ - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_SW_RESET); - else + usleep_range(10000, 20000); + } else { iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); - usleep_range(5000, 6000); + usleep_range(5000, 6000); + } if (retake_ownership) return iwl_pcie_prepare_card_hw(trans); @@ -161,7 +163,7 @@ static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans) } static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans, - u8 max_power, u8 min_power) + u8 max_power) { struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; void *block = NULL; @@ -169,10 +171,13 @@ static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans, u32 size = 0; u8 power; - if (fw_mon->size) + if (fw_mon->size) { + memset(fw_mon->block, 0, fw_mon->size); return; + } - for (power = max_power; power >= min_power; power--) { + /* need at least 2 KiB, so stop at 11 */ + for (power = max_power; power >= 11; power--) { size = BIT(power); block = dma_alloc_coherent(trans->dev, size, &physical, GFP_KERNEL | __GFP_NOWARN); @@ -213,10 +218,7 @@ void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power) max_power)) return; - if (trans->dbg.fw_mon.size) - return; - - iwl_pcie_alloc_fw_monitor_block(trans, max_power, 11); + iwl_pcie_alloc_fw_monitor_block(trans, max_power); } static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg) @@ -475,7 +477,7 @@ void iwl_pcie_apm_stop_master(struct iwl_trans *trans) CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS, CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS, 100); - msleep(100); + usleep_range(10000, 20000); } else { iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); @@ -599,7 +601,6 @@ static int iwl_pcie_set_hw_ready(struct iwl_trans *trans) int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) { int ret; - int t = 0; int iter; IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n"); @@ -616,6 +617,8 @@ int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) usleep_range(1000, 2000); for (iter = 0; iter < 10; iter++) { + int t = 0; + /* If HW is not ready, prepare the conditions to check again */ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PREPARE); @@ -816,7 +819,7 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans, iwl_enable_interrupts(trans); - if (trans->trans_cfg->use_tfh) { + if (trans->trans_cfg->gen2) { if (cpu == 1) iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, 0xFFFF); @@ -1085,34 +1088,44 @@ bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans) } struct iwl_causes_list { - u32 cause_num; - u32 mask_reg; + u16 mask_reg; + u8 bit; u8 addr; }; +#define IWL_CAUSE(reg, mask) \ + { \ + .mask_reg = reg, \ + .bit = ilog2(mask), \ + .addr = ilog2(mask) + \ + ((reg) == CSR_MSIX_FH_INT_MASK_AD ? -16 : \ + (reg) == CSR_MSIX_HW_INT_MASK_AD ? 16 : \ + 0xffff), /* causes overflow warning */ \ + } + static const struct iwl_causes_list causes_list_common[] = { - {MSIX_FH_INT_CAUSES_D2S_CH0_NUM, CSR_MSIX_FH_INT_MASK_AD, 0}, - {MSIX_FH_INT_CAUSES_D2S_CH1_NUM, CSR_MSIX_FH_INT_MASK_AD, 0x1}, - {MSIX_FH_INT_CAUSES_S2D, CSR_MSIX_FH_INT_MASK_AD, 0x3}, - {MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5}, - {MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10}, - {MSIX_HW_INT_CAUSES_REG_WAKEUP, CSR_MSIX_HW_INT_MASK_AD, 0x11}, - {MSIX_HW_INT_CAUSES_REG_RESET_DONE, CSR_MSIX_HW_INT_MASK_AD, 0x12}, - {MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16}, - {MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17}, - {MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18}, - {MSIX_HW_INT_CAUSES_REG_SCD, CSR_MSIX_HW_INT_MASK_AD, 0x2A}, - {MSIX_HW_INT_CAUSES_REG_FH_TX, CSR_MSIX_HW_INT_MASK_AD, 0x2B}, - {MSIX_HW_INT_CAUSES_REG_HW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x2D}, - {MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E}, + IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_D2S_CH0_NUM), + IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_D2S_CH1_NUM), + IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_S2D), + IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_FH_ERR), + IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_ALIVE), + IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_WAKEUP), + IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RESET_DONE), + IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_CT_KILL), + IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RF_KILL), + IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_PERIODIC), + IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SCD), + IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_FH_TX), + IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_HW_ERR), + IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_HAP), }; static const struct iwl_causes_list causes_list_pre_bz[] = { - {MSIX_HW_INT_CAUSES_REG_SW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x29}, + IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SW_ERR), }; static const struct iwl_causes_list causes_list_bz[] = { - {MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ, CSR_MSIX_HW_INT_MASK_AD, 0x15}, + IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ), }; static void iwl_pcie_map_list(struct iwl_trans *trans, @@ -1124,7 +1137,7 @@ static void iwl_pcie_map_list(struct iwl_trans *trans, for (i = 0; i < arr_size; i++) { iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val); iwl_clear_bit(trans, causes[i].mask_reg, - causes[i].cause_num); + BIT(causes[i].bit)); } } @@ -1250,6 +1263,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans) if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) { IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n"); + iwl_pcie_rx_napi_sync(trans); iwl_pcie_tx_stop(trans); iwl_pcie_rx_stop(trans); @@ -1512,19 +1526,16 @@ static int iwl_pcie_d3_handshake(struct iwl_trans *trans, bool suspend) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int ret; - if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) { + if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6, suspend ? UREG_DOORBELL_TO_ISR6_SUSPEND : UREG_DOORBELL_TO_ISR6_RESUME); - } else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { + else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) iwl_write32(trans, CSR_IPC_SLEEP_CONTROL, suspend ? CSR_IPC_SLEEP_CONTROL_SUSPEND : CSR_IPC_SLEEP_CONTROL_RESUME); - iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6, - UREG_DOORBELL_TO_ISR6_SLEEP_CTRL); - } else { + else return 0; - } ret = wait_event_timeout(trans_pcie->sx_waitq, trans_pcie->sx_complete, 2 * HZ); @@ -1777,7 +1788,7 @@ static int iwl_trans_pcie_clear_persistence_bit(struct iwl_trans *trans) } hpm = iwl_read_umac_prph_no_grab(trans, HPM_DEBUG); - if (hpm != 0xa5a5a5a0 && (hpm & PERSISTENCE_BIT)) { + if (!iwl_trans_is_hw_error_value(hpm) && (hpm & PERSISTENCE_BIT)) { u32 wprot_val = iwl_read_umac_prph_no_grab(trans, wprot); if (wprot_val & PREG_WFPM_ACCESS) { @@ -1984,6 +1995,29 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans, trans_pcie->fw_reset_handshake = trans_cfg->fw_reset_handshake; } +void iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions *dram_regions, + struct device *dev) +{ + u8 i; + struct iwl_dram_data *desc_dram = &dram_regions->prph_scratch_mem_desc; + + /* free DRAM payloads */ + for (i = 0; i < dram_regions->n_regions; i++) { + dma_free_coherent(dev, dram_regions->drams[i].size, + dram_regions->drams[i].block, + dram_regions->drams[i].physical); + } + dram_regions->n_regions = 0; + + /* free DRAM addresses array */ + if (desc_dram->block) { + dma_free_coherent(dev, desc_dram->size, + desc_dram->block, + desc_dram->physical); + } + memset(desc_dram, 0, sizeof(*desc_dram)); +} + void iwl_trans_pcie_free(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -2016,16 +2050,10 @@ void iwl_trans_pcie_free(struct iwl_trans *trans) iwl_pcie_free_fw_monitor(trans); - if (trans_pcie->pnvm_dram.size) - dma_free_coherent(trans->dev, trans_pcie->pnvm_dram.size, - trans_pcie->pnvm_dram.block, - trans_pcie->pnvm_dram.physical); - - if (trans_pcie->reduce_power_dram.size) - dma_free_coherent(trans->dev, - trans_pcie->reduce_power_dram.size, - trans_pcie->reduce_power_dram.block, - trans_pcie->reduce_power_dram.physical); + iwl_trans_pcie_free_pnvm_dram_regions(&trans_pcie->pnvm_data, + trans->dev); + iwl_trans_pcie_free_pnvm_dram_regions(&trans_pcie->reduced_tables_data, + trans->dev); mutex_destroy(&trans_pcie->mutex); iwl_trans_free(trans); @@ -2042,6 +2070,7 @@ static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state) struct iwl_trans_pcie_removal { struct pci_dev *pdev; struct work_struct work; + bool rescan; }; static void iwl_trans_pcie_removal_wk(struct work_struct *wk) @@ -2050,18 +2079,61 @@ static void iwl_trans_pcie_removal_wk(struct work_struct *wk) container_of(wk, struct iwl_trans_pcie_removal, work); struct pci_dev *pdev = removal->pdev; static char *prop[] = {"EVENT=INACCESSIBLE", NULL}; + struct pci_bus *bus = pdev->bus; dev_err(&pdev->dev, "Device gone - attempting removal\n"); kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop); pci_lock_rescan_remove(); pci_dev_put(pdev); pci_stop_and_remove_bus_device(pdev); + if (removal->rescan) + pci_rescan_bus(bus->parent); pci_unlock_rescan_remove(); kfree(removal); module_put(THIS_MODULE); } +void iwl_trans_pcie_remove(struct iwl_trans *trans, bool rescan) +{ + struct iwl_trans_pcie_removal *removal; + + if (test_bit(STATUS_TRANS_DEAD, &trans->status)) + return; + + IWL_ERR(trans, "Device gone - scheduling removal!\n"); + + /* + * get a module reference to avoid doing this + * while unloading anyway and to avoid + * scheduling a work with code that's being + * removed. + */ + if (!try_module_get(THIS_MODULE)) { + IWL_ERR(trans, + "Module is being unloaded - abort\n"); + return; + } + + removal = kzalloc(sizeof(*removal), GFP_ATOMIC); + if (!removal) { + module_put(THIS_MODULE); + return; + } + /* + * we don't need to clear this flag, because + * the trans will be freed and reallocated. + */ + set_bit(STATUS_TRANS_DEAD, &trans->status); + + removal->pdev = to_pci_dev(trans->dev); + removal->rescan = rescan; + INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk); + pci_dev_get(removal->pdev); + schedule_work(&removal->work); +} +EXPORT_SYMBOL(iwl_trans_pcie_remove); + /* * This version doesn't disable BHs but rather assumes they're * already disabled. @@ -2121,47 +2193,12 @@ bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) iwl_trans_pcie_dump_regs(trans); - if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U) { - struct iwl_trans_pcie_removal *removal; - - if (test_bit(STATUS_TRANS_DEAD, &trans->status)) - goto err; - - IWL_ERR(trans, "Device gone - scheduling removal!\n"); - - /* - * get a module reference to avoid doing this - * while unloading anyway and to avoid - * scheduling a work with code that's being - * removed. - */ - if (!try_module_get(THIS_MODULE)) { - IWL_ERR(trans, - "Module is being unloaded - abort\n"); - goto err; - } - - removal = kzalloc(sizeof(*removal), GFP_ATOMIC); - if (!removal) { - module_put(THIS_MODULE); - goto err; - } - /* - * we don't need to clear this flag, because - * the trans will be freed and reallocated. - */ - set_bit(STATUS_TRANS_DEAD, &trans->status); - - removal->pdev = to_pci_dev(trans->dev); - INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk); - pci_dev_get(removal->pdev); - schedule_work(&removal->work); - } else { + if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U) + iwl_trans_pcie_remove(trans, false); + else iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI); - } -err: spin_unlock(&trans_pcie->reg_lock); return false; } @@ -2844,7 +2881,7 @@ static bool iwl_write_to_user_buf(char __user *user_buf, ssize_t count, void *buf, ssize_t *size, ssize_t *bytes_copied) { - int buf_size_left = count - *bytes_copied; + ssize_t buf_size_left = count - *bytes_copied; buf_size_left = buf_size_left - (buf_size_left % sizeof(u32)); if (*size > buf_size_left) @@ -3357,7 +3394,7 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans, u8 tfdidx; u32 caplen, cmdlen; - if (trans->trans_cfg->use_tfh) + if (trans->trans_cfg->gen2) tfdidx = idx; else tfdidx = ptr; @@ -3516,7 +3553,9 @@ static const struct iwl_trans_ops trans_ops_pcie_gen2 = { .txq_free = iwl_txq_dyn_free, .wait_txq_empty = iwl_trans_pcie_wait_txq_empty, .rxq_dma_data = iwl_trans_pcie_rxq_dma_data, + .load_pnvm = iwl_trans_pcie_ctx_info_gen3_load_pnvm, .set_pnvm = iwl_trans_pcie_ctx_info_gen3_set_pnvm, + .load_reduce_power = iwl_trans_pcie_ctx_info_gen3_load_reduce_power, .set_reduce_power = iwl_trans_pcie_ctx_info_gen3_set_reduce_power, #ifdef CONFIG_IWLWIFI_DEBUGFS .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup, @@ -3558,7 +3597,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, init_waitqueue_head(&trans_pcie->imr_waitq); trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator", - WQ_HIGHPRI | WQ_UNBOUND, 1); + WQ_HIGHPRI | WQ_UNBOUND, 0); if (!trans_pcie->rba.alloc_wq) { ret = -ENOMEM; goto out_free_trans; diff --git a/pcie/tx.c b/pcie/tx.c index 3546c5269c3b..790e5b124740 100644 --- a/pcie/tx.c +++ b/pcie/tx.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2003-2014, 2018-2021 Intel Corporation + * Copyright (C) 2003-2014, 2018-2021, 2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -364,7 +364,7 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans) for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; txq_id++) { struct iwl_txq *txq = trans->txqs.txq[txq_id]; - if (trans->trans_cfg->use_tfh) + if (trans->trans_cfg->gen2) iwl_write_direct64(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), txq->dma_addr); @@ -1547,6 +1547,9 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, /* there must be data left over for TB1 or this code must be changed */ BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE); + BUILD_BUG_ON(sizeof(struct iwl_cmd_header) + + offsetofend(struct iwl_tx_cmd, scratch) > + IWL_FIRST_TB_SIZE); /* map the data for TB1 */ tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; diff --git a/queue/tx.c b/queue/tx.c index 726185d6fab8..5bb3cc3367c9 100644 --- a/queue/tx.c +++ b/queue/tx.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2020-2022 Intel Corporation + * Copyright (C) 2020-2023 Intel Corporation */ #include <net/tso.h> #include <linux/tcp.h> @@ -648,6 +648,13 @@ struct iwl_tfh_tfd *iwl_txq_gen2_build_tfd(struct iwl_trans *trans, /* There must be data left over for TB1 or this code must be changed */ BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE); + BUILD_BUG_ON(sizeof(struct iwl_cmd_header) + + offsetofend(struct iwl_tx_cmd_gen2, dram_info) > + IWL_FIRST_TB_SIZE); + BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) < IWL_FIRST_TB_SIZE); + BUILD_BUG_ON(sizeof(struct iwl_cmd_header) + + offsetofend(struct iwl_tx_cmd_gen3, dram_info) > + IWL_FIRST_TB_SIZE); memset(tfd, 0, sizeof(*tfd)); @@ -978,7 +985,7 @@ void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq) bool active; u8 fifo; - if (trans->trans_cfg->use_tfh) { + if (trans->trans_cfg->gen2) { IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id, txq->read_ptr, txq->write_ptr); /* TODO: access new SCD registers and dump them */ @@ -1027,10 +1034,13 @@ int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, size_t tb0_buf_sz; int i; + if (WARN_ONCE(slots_num <= 0, "Invalid slots num:%d\n", slots_num)) + return -EINVAL; + if (WARN_ON(txq->entries || txq->tfds)) return -EINVAL; - if (trans->trans_cfg->use_tfh) + if (trans->trans_cfg->gen2) tfd_sz = trans->txqs.tfd.size * slots_num; timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0); @@ -1337,7 +1347,7 @@ static inline dma_addr_t iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans *trans, dma_addr_t addr; dma_addr_t hi_len; - if (trans->trans_cfg->use_tfh) { + if (trans->trans_cfg->gen2) { struct iwl_tfh_tfd *tfh_tfd = _tfd; struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx]; @@ -1398,7 +1408,7 @@ void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans, meta->tbs = 0; - if (trans->trans_cfg->use_tfh) { + if (trans->trans_cfg->gen2) { struct iwl_tfh_tfd *tfd_fh = (void *)tfd; tfd_fh->num_tbs = 0; @@ -1554,14 +1564,18 @@ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn, struct sk_buff_head *skbs) { struct iwl_txq *txq = trans->txqs.txq[txq_id]; - int tfd_num = iwl_txq_get_cmd_index(txq, ssn); - int read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr); - int last_to_free; + int tfd_num, read_ptr, last_to_free; /* This function is not meant to release cmd queue*/ if (WARN_ON(txq_id == trans->txqs.cmd.q_id)) return; + if (WARN_ON(!txq)) + return; + + tfd_num = iwl_txq_get_cmd_index(txq, ssn); + read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr); + spin_lock_bh(&txq->lock); if (!test_bit(txq_id, trans->txqs.queue_used)) { @@ -1611,7 +1625,7 @@ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn, txq->entries[read_ptr].skb = NULL; - if (!trans->trans_cfg->use_tfh) + if (!trans->trans_cfg->gen2) iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq); iwl_txq_free_tfd(trans, txq); diff --git a/queue/tx.h b/queue/tx.h index eca53bfd326d..1e4a24ab9bab 100644 --- a/queue/tx.h +++ b/queue/tx.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2020-2022 Intel Corporation + * Copyright (C) 2020-2023 Intel Corporation */ #ifndef __iwl_trans_queue_tx_h__ #define __iwl_trans_queue_tx_h__ @@ -38,7 +38,7 @@ static inline void iwl_wake_queue(struct iwl_trans *trans, static inline void *iwl_txq_get_tfd(struct iwl_trans *trans, struct iwl_txq *txq, int idx) { - if (trans->trans_cfg->use_tfh) + if (trans->trans_cfg->gen2) idx = iwl_txq_get_cmd_index(txq, idx); return (u8 *)txq->tfds + trans->txqs.tfd.size * idx; @@ -135,7 +135,7 @@ static inline u8 iwl_txq_gen1_tfd_get_num_tbs(struct iwl_trans *trans, { struct iwl_tfd *tfd; - if (trans->trans_cfg->use_tfh) { + if (trans->trans_cfg->gen2) { struct iwl_tfh_tfd *tfh_tfd = _tfd; return le16_to_cpu(tfh_tfd->num_tbs) & 0x1f; @@ -151,7 +151,7 @@ static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans, struct iwl_tfd *tfd; struct iwl_tfd_tb *tb; - if (trans->trans_cfg->use_tfh) { + if (trans->trans_cfg->gen2) { struct iwl_tfh_tfd *tfh_tfd = _tfd; struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx]; |