aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBjoern A. Zeeb <bz@FreeBSD.org>2024-09-18 10:42:14 +0000
committerBjoern A. Zeeb <bz@FreeBSD.org>2024-09-27 21:22:00 +0000
commit5d6ad179ba1337044d054d06596e2169952660b6 (patch)
treea973be937966a1a2e55e8788afe126da6ae3af65
parentc6bcce0f5b920c7be597b105cc01ae72f9189747 (diff)
iwlwifi: update Intel's iwlwifi/mvm driver.vendor/Linux/iwlwifi/torvalds-v6.11vendor/Linux/iwlwifi
This version is based on git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git 98f7e32f20d28ec452afb208f9cffc08448a2652 ( tag: v6.11 ).
-rw-r--r--cfg/22000.c4
-rw-r--r--cfg/ax210.c12
-rw-r--r--cfg/bz.c29
-rw-r--r--cfg/sc.c48
-rw-r--r--fw/acpi.c649
-rw-r--r--fw/acpi.h220
-rw-r--r--fw/api/alive.h6
-rw-r--r--fw/api/binding.h4
-rw-r--r--fw/api/coex.h81
-rw-r--r--fw/api/commands.h30
-rw-r--r--fw/api/config.h4
-rw-r--r--fw/api/d3.h107
-rw-r--r--fw/api/datapath.h36
-rw-r--r--fw/api/dbg-tlv.h81
-rw-r--r--fw/api/debug.h40
-rw-r--r--fw/api/location.h160
-rw-r--r--fw/api/mac-cfg.h60
-rw-r--r--fw/api/mac.h17
-rw-r--r--fw/api/nvm-reg.h198
-rw-r--r--fw/api/offload.h8
-rw-r--r--fw/api/phy-ctxt.h24
-rw-r--r--fw/api/phy.h9
-rw-r--r--fw/api/power.h134
-rw-r--r--fw/api/rfi.h7
-rw-r--r--fw/api/rs.h4
-rw-r--r--fw/api/rx.h26
-rw-r--r--fw/api/scan.h49
-rw-r--r--fw/api/sta.h4
-rw-r--r--fw/api/stats.h153
-rw-r--r--fw/api/time-event.h80
-rw-r--r--fw/api/tx.h23
-rw-r--r--fw/api/txq.h14
-rw-r--r--fw/dbg.c291
-rw-r--r--fw/dbg.h4
-rw-r--r--fw/debugfs.c17
-rw-r--r--fw/dump.c3
-rw-r--r--fw/error-dump.h33
-rw-r--r--fw/file.h71
-rw-r--r--fw/img.h4
-rw-r--r--fw/init.c7
-rw-r--r--fw/notif-wait.h3
-rw-r--r--fw/pnvm.c49
-rw-r--r--fw/regulatory.c636
-rw-r--r--fw/regulatory.h220
-rw-r--r--fw/rs.c1
-rw-r--r--fw/runtime.h30
-rw-r--r--fw/uefi.c462
-rw-r--r--fw/uefi.h245
-rw-r--r--iwl-config.h43
-rw-r--r--iwl-context-info-gen3.h13
-rw-r--r--iwl-csr.h10
-rw-r--r--iwl-dbg-tlv.c94
-rw-r--r--iwl-dbg-tlv.h6
-rw-r--r--iwl-drv.c146
-rw-r--r--iwl-drv.h12
-rw-r--r--iwl-eeprom-parse.c875
-rw-r--r--iwl-eeprom-read.c394
-rw-r--r--iwl-eeprom-read.h12
-rw-r--r--iwl-fh.h85
-rw-r--r--iwl-io.c4
-rw-r--r--iwl-modparams.h21
-rw-r--r--iwl-nvm-parse.c223
-rw-r--r--iwl-nvm-parse.h23
-rw-r--r--iwl-nvm-utils.c118
-rw-r--r--iwl-nvm-utils.h (renamed from iwl-eeprom-parse.h)19
-rw-r--r--iwl-op-mode.h32
-rw-r--r--iwl-prph.h42
-rw-r--r--iwl-trans.c452
-rw-r--r--iwl-trans.h732
-rw-r--r--mvm/coex.c124
-rw-r--r--mvm/constants.h26
-rw-r--r--mvm/d3.c649
-rw-r--r--mvm/debugfs-vif.c270
-rw-r--r--mvm/debugfs.c418
-rw-r--r--mvm/debugfs.h44
-rw-r--r--mvm/ftm-initiator.c277
-rw-r--r--mvm/ftm-responder.c49
-rw-r--r--mvm/fw.c524
-rw-r--r--mvm/link.c953
-rw-r--r--mvm/mac-ctxt.c117
-rw-r--r--mvm/mac80211.c1421
-rw-r--r--mvm/mld-key.c42
-rw-r--r--mvm/mld-mac.c11
-rw-r--r--mvm/mld-mac80211.c672
-rw-r--r--mvm/mld-sta.c98
-rw-r--r--mvm/mvm.h587
-rw-r--r--mvm/nvm.c19
-rw-r--r--mvm/offloading.c8
-rw-r--r--mvm/ops.c310
-rw-r--r--mvm/phy-ctxt.c115
-rw-r--r--mvm/power.c179
-rw-r--r--mvm/rfi.c8
-rw-r--r--mvm/rs-fw.c33
-rw-r--r--mvm/rx.c329
-rw-r--r--mvm/rxmq.c471
-rw-r--r--mvm/scan.c694
-rw-r--r--mvm/sf.c5
-rw-r--r--mvm/sta.c258
-rw-r--r--mvm/sta.h47
-rw-r--r--mvm/tdls.c39
-rw-r--r--mvm/testmode.h92
-rw-r--r--mvm/time-event.c470
-rw-r--r--mvm/time-event.h21
-rw-r--r--mvm/tt.c146
-rw-r--r--mvm/tx.c325
-rw-r--r--mvm/utils.c99
-rw-r--r--pcie/ctxt-info-gen3.c38
-rw-r--r--pcie/ctxt-info.c6
-rw-r--r--pcie/drv.c211
-rw-r--r--pcie/internal.h360
-rw-r--r--pcie/rx.c47
-rw-r--r--pcie/trans-gen2.c29
-rw-r--r--pcie/trans.c468
-rw-r--r--pcie/tx-gen2.c1195
-rw-r--r--pcie/tx.c1268
-rw-r--r--queue/tx.c1883
-rw-r--r--queue/tx.h183
-rw-r--r--tests/Makefile7
-rw-r--r--tests/devinfo.c78
-rw-r--r--tests/module.c10
120 files changed, 14566 insertions, 8920 deletions
diff --git a/cfg/22000.c b/cfg/22000.c
index d594694206b3..2e2fcb3807ef 100644
--- a/cfg/22000.c
+++ b/cfg/22000.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/module.h>
#include <linux/stringify.h>
@@ -13,7 +13,7 @@
#define IWL_22000_UCODE_API_MAX 77
/* Lowest firmware API version supported */
-#define IWL_22000_UCODE_API_MIN 50
+#define IWL_22000_UCODE_API_MIN 77
/* NVM versions */
#define IWL_22000_NVM_VERSION 0x0a1d
diff --git a/cfg/ax210.c b/cfg/ax210.c
index 8d5f9dce71d5..975e8aed1526 100644
--- a/cfg/ax210.c
+++ b/cfg/ax210.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/module.h>
#include <linux/stringify.h>
@@ -10,10 +10,10 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_AX210_UCODE_API_MAX 83
+#define IWL_AX210_UCODE_API_MAX 89
/* Lowest firmware API version supported */
-#define IWL_AX210_UCODE_API_MIN 59
+#define IWL_AX210_UCODE_API_MIN 77
/* NVM versions */
#define IWL_AX210_NVM_VERSION 0x0a1d
@@ -299,3 +299,9 @@ MODULE_FIRMWARE(IWL_MA_B_HR_B_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_MA_B_GF_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_MA_B_GF4_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_MA_B_MR_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
+
+MODULE_FIRMWARE("iwlwifi-so-a0-gf-a0.pnvm");
+MODULE_FIRMWARE("iwlwifi-so-a0-gf4-a0.pnvm");
+MODULE_FIRMWARE("iwlwifi-ty-a0-gf-a0.pnvm");
+MODULE_FIRMWARE("iwlwifi-ma-b0-gf-a0.pnvm");
+MODULE_FIRMWARE("iwlwifi-ma-b0-gf4-a0.pnvm");
diff --git a/cfg/bz.c b/cfg/bz.c
index b9893b22e41d..3b6b8b410be5 100644
--- a/cfg/bz.c
+++ b/cfg/bz.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/module.h>
#include <linux/stringify.h>
@@ -10,10 +10,10 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_BZ_UCODE_API_MAX 83
+#define IWL_BZ_UCODE_API_MAX 92
/* Lowest firmware API version supported */
-#define IWL_BZ_UCODE_API_MIN 80
+#define IWL_BZ_UCODE_API_MIN 90
/* NVM versions */
#define IWL_BZ_NVM_VERSION 0x0a1d
@@ -129,17 +129,11 @@ static const struct iwl_base_params iwl_bz_base_params = {
IWL_DEVICE_BZ_COMMON, \
.ht_params = &iwl_22000_ht_params
-#define IWL_DEVICE_GL_A \
- IWL_DEVICE_BZ_COMMON, \
- .ht_params = &iwl_gl_a_ht_params
-
/*
- * If the device doesn't support HE, no need to have that many buffers.
- * These sizes were picked according to 8 MSDUs inside 256 A-MSDUs in an
+ * This size was picked according to 8 MSDUs inside 512 A-MSDUs in an
* A-MPDU, with additional overhead to account for processing time.
*/
-#define IWL_NUM_RBDS_NON_HE 512
-#define IWL_NUM_RBDS_BZ_HE 4096
+#define IWL_NUM_RBDS_BZ_EHT (512 * 16)
const struct iwl_cfg_trans_params iwl_bz_trans_cfg = {
.device_family = IWL_DEVICE_FAMILY_BZ,
@@ -155,21 +149,24 @@ const struct iwl_cfg_trans_params iwl_bz_trans_cfg = {
};
const char iwl_bz_name[] = "Intel(R) TBD Bz device";
+const char iwl_fm_name[] = "Intel(R) Wi-Fi 7 BE201 320MHz";
+const char iwl_gl_name[] = "Intel(R) Wi-Fi 7 BE200 320MHz";
+const char iwl_mtp_name[] = "Intel(R) Wi-Fi 7 BE202 160MHz";
const struct iwl_cfg iwl_cfg_bz = {
.fw_name_mac = "bz",
.uhb_supported = true,
IWL_DEVICE_BZ,
- .features = IWL_TX_CSUM_NETIF_FLAGS_BZ | NETIF_F_RXCSUM,
- .num_rbds = IWL_NUM_RBDS_BZ_HE,
+ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
+ .num_rbds = IWL_NUM_RBDS_BZ_EHT,
};
const struct iwl_cfg iwl_cfg_gl = {
.fw_name_mac = "gl",
.uhb_supported = true,
IWL_DEVICE_BZ,
- .features = IWL_TX_CSUM_NETIF_FLAGS_BZ | NETIF_F_RXCSUM,
- .num_rbds = IWL_NUM_RBDS_BZ_HE,
+ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
+ .num_rbds = IWL_NUM_RBDS_BZ_EHT,
};
@@ -181,3 +178,5 @@ MODULE_FIRMWARE(IWL_BZ_A_FM_C_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_BZ_A_FM4_B_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_GL_B_FM_B_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_GL_C_FM_C_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX));
+
+MODULE_FIRMWARE("iwlwifi-gl-c0-fm-c0.pnvm");
diff --git a/cfg/sc.c b/cfg/sc.c
index ad283fd22e2a..4ccb0b7bdc20 100644
--- a/cfg/sc.c
+++ b/cfg/sc.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/module.h>
#include <linux/stringify.h>
@@ -10,10 +10,10 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_SC_UCODE_API_MAX 83
+#define IWL_SC_UCODE_API_MAX 92
/* Lowest firmware API version supported */
-#define IWL_SC_UCODE_API_MIN 82
+#define IWL_SC_UCODE_API_MIN 90
/* NVM versions */
#define IWL_SC_NVM_VERSION 0x0a1d
@@ -33,6 +33,10 @@
#define IWL_SC_A_GF_A_FW_PRE "iwlwifi-sc-a0-gf-a0"
#define IWL_SC_A_GF4_A_FW_PRE "iwlwifi-sc-a0-gf4-a0"
#define IWL_SC_A_WH_A_FW_PRE "iwlwifi-sc-a0-wh-a0"
+#define IWL_SC2_A_FM_C_FW_PRE "iwlwifi-sc2-a0-fm-c0"
+#define IWL_SC2_A_WH_A_FW_PRE "iwlwifi-sc2-a0-wh-a0"
+#define IWL_SC2F_A_FM_C_FW_PRE "iwlwifi-sc2f-a0-fm-c0"
+#define IWL_SC2F_A_WH_A_FW_PRE "iwlwifi-sc2f-a0-wh-a0"
#define IWL_SC_A_FM_B_FW_MODULE_FIRMWARE(api) \
IWL_SC_A_FM_B_FW_PRE "-" __stringify(api) ".ucode"
@@ -48,6 +52,14 @@
IWL_SC_A_GF4_A_FW_PRE "-" __stringify(api) ".ucode"
#define IWL_SC_A_WH_A_FW_MODULE_FIRMWARE(api) \
IWL_SC_A_WH_A_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL_SC2_A_FM_C_FW_MODULE_FIRMWARE(api) \
+ IWL_SC2_A_FM_C_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL_SC2_A_WH_A_FW_MODULE_FIRMWARE(api) \
+ IWL_SC2_A_WH_A_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL_SC2F_A_FM_C_FW_MODULE_FIRMWARE(api) \
+ IWL_SC2F_A_FM_C_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL_SC2F_A_WH_A_FW_MODULE_FIRMWARE(api) \
+ IWL_SC2F_A_WH_A_FW_PRE "-" __stringify(api) ".ucode"
static const struct iwl_base_params iwl_sc_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
@@ -124,15 +136,16 @@ static const struct iwl_base_params iwl_sc_base_params = {
#define IWL_DEVICE_SC \
IWL_DEVICE_BZ_COMMON, \
+ .uhb_supported = true, \
+ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \
+ .num_rbds = IWL_NUM_RBDS_SC_EHT, \
.ht_params = &iwl_22000_ht_params
/*
- * If the device doesn't support HE, no need to have that many buffers.
- * These sizes were picked according to 8 MSDUs inside 256 A-MSDUs in an
+ * This size was picked according to 8 MSDUs inside 512 A-MSDUs in an
* A-MPDU, with additional overhead to account for processing time.
*/
-#define IWL_NUM_RBDS_NON_HE 512
-#define IWL_NUM_RBDS_SC_HE 4096
+#define IWL_NUM_RBDS_SC_EHT (512 * 16)
const struct iwl_cfg_trans_params iwl_sc_trans_cfg = {
.device_family = IWL_DEVICE_FAMILY_SC,
@@ -151,10 +164,21 @@ const char iwl_sc_name[] = "Intel(R) TBD Sc device";
const struct iwl_cfg iwl_cfg_sc = {
.fw_name_mac = "sc",
- .uhb_supported = true,
IWL_DEVICE_SC,
- .features = IWL_TX_CSUM_NETIF_FLAGS_BZ | NETIF_F_RXCSUM,
- .num_rbds = IWL_NUM_RBDS_SC_HE,
+};
+
+const char iwl_sc2_name[] = "Intel(R) TBD Sc2 device";
+
+const struct iwl_cfg iwl_cfg_sc2 = {
+ .fw_name_mac = "sc2",
+ IWL_DEVICE_SC,
+};
+
+const char iwl_sc2f_name[] = "Intel(R) TBD Sc2f device";
+
+const struct iwl_cfg iwl_cfg_sc2f = {
+ .fw_name_mac = "sc2f",
+ IWL_DEVICE_SC,
};
MODULE_FIRMWARE(IWL_SC_A_FM_B_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
@@ -164,3 +188,7 @@ MODULE_FIRMWARE(IWL_SC_A_HR_B_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SC_A_GF_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SC_A_GF4_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SC_A_WH_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_SC2_A_FM_C_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_SC2_A_WH_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_SC2F_A_FM_C_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_SC2F_A_WH_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
diff --git a/fw/acpi.c b/fw/acpi.c
index dfe8357036eb..8c8880b44827 100644
--- a/fw/acpi.c
+++ b/fw/acpi.c
@@ -1,10 +1,9 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2019-2023 Intel Corporation
+ * Copyright (C) 2019-2024 Intel Corporation
*/
#include <linux/uuid.h>
-#include <linux/dmi.h>
#include "iwl-drv.h"
#include "iwl-debug.h"
#include "acpi.h"
@@ -13,63 +12,22 @@
const guid_t iwl_guid = GUID_INIT(0xF21202BF, 0x8F78, 0x4DC6,
0xA5, 0xB3, 0x1F, 0x73,
0x8E, 0x28, 0x5A, 0xDE);
-IWL_EXPORT_SYMBOL(iwl_guid);
-const guid_t iwl_rfi_guid = GUID_INIT(0x7266172C, 0x220B, 0x4B29,
- 0x81, 0x4F, 0x75, 0xE4,
- 0xDD, 0x26, 0xB5, 0xFD);
-IWL_EXPORT_SYMBOL(iwl_rfi_guid);
-
-static const struct dmi_system_id dmi_ppag_approved_list[] = {
- { .ident = "HP",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "HP"),
- },
- },
- { .ident = "SAMSUNG",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
- },
- },
- { .ident = "MSFT",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
- },
- },
- { .ident = "ASUS",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- },
- },
- { .ident = "GOOGLE-HP",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Google"),
- DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
- },
- },
- { .ident = "GOOGLE-ASUS",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Google"),
- DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek COMPUTER INC."),
- },
- },
- { .ident = "GOOGLE-SAMSUNG",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Google"),
- DMI_MATCH(DMI_BOARD_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
- },
- },
- { .ident = "DELL",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- },
- },
- { .ident = "DELL",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
- },
- },
- {}
+static const size_t acpi_dsm_size[DSM_FUNC_NUM_FUNCS] = {
+ [DSM_FUNC_QUERY] = sizeof(u32),
+ [DSM_FUNC_DISABLE_SRD] = sizeof(u8),
+ [DSM_FUNC_ENABLE_INDONESIA_5G2] = sizeof(u8),
+ [DSM_FUNC_ENABLE_6E] = sizeof(u32),
+ [DSM_FUNC_REGULATORY_CONFIG] = sizeof(u32),
+ /* Not supported in driver */
+ [5] = (size_t)0,
+ [DSM_FUNC_11AX_ENABLEMENT] = sizeof(u32),
+ [DSM_FUNC_ENABLE_UNII4_CHAN] = sizeof(u32),
+ [DSM_FUNC_ACTIVATE_CHANNEL] = sizeof(u32),
+ [DSM_FUNC_FORCE_DISABLE_CHANNELS] = sizeof(u32),
+ [DSM_FUNC_ENERGY_DETECTION_THRESHOLD] = sizeof(u32),
+ [DSM_FUNC_RFI_CONFIG] = sizeof(u32),
+ [DSM_FUNC_ENABLE_11BE] = sizeof(u32),
};
static int iwl_acpi_get_handle(struct device *dev, acpi_string method,
@@ -195,46 +153,41 @@ out:
}
/*
- * Evaluate a DSM with no arguments and a u8 return value,
+ * This function receives a DSM function number, calculates its expected size
+ * according to Intel BIOS spec, and fills in the value in a 32-bit field.
+ * In case the expected size is smaller than 32-bit, padding will be added.
*/
-int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func,
- const guid_t *guid, u8 *value)
+int iwl_acpi_get_dsm(struct iwl_fw_runtime *fwrt,
+ enum iwl_dsm_funcs func, u32 *value)
{
+ size_t expected_size;
+ u64 tmp;
int ret;
- u64 val;
- ret = iwl_acpi_get_dsm_integer(dev, rev, func,
- guid, &val, sizeof(u8));
+ BUILD_BUG_ON(ARRAY_SIZE(acpi_dsm_size) != DSM_FUNC_NUM_FUNCS);
- if (ret < 0)
- return ret;
-
- /* cast val (u64) to be u8 */
- *value = (u8)val;
- return 0;
-}
-IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u8);
+ if (WARN_ON(func >= ARRAY_SIZE(acpi_dsm_size)))
+ return -EINVAL;
-/*
- * Evaluate a DSM with no arguments and a u32 return value,
- */
-int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func,
- const guid_t *guid, u32 *value)
-{
- int ret;
- u64 val;
+ expected_size = acpi_dsm_size[func];
- ret = iwl_acpi_get_dsm_integer(dev, rev, func,
- guid, &val, sizeof(u32));
+ /* Currently all ACPI DSMs are either 8-bit or 32-bit */
+ if (expected_size != sizeof(u8) && expected_size != sizeof(u32))
+ return -EOPNOTSUPP;
- if (ret < 0)
+ ret = iwl_acpi_get_dsm_integer(fwrt->dev, ACPI_DSM_REV, func,
+ &iwl_guid, &tmp, expected_size);
+ if (ret)
return ret;
- /* cast val (u64) to be u32 */
- *value = (u32)val;
+ if ((expected_size == sizeof(u8) && tmp != (u8)tmp) ||
+ (expected_size == sizeof(u32) && tmp != (u32)tmp))
+ IWL_DEBUG_RADIO(fwrt,
+ "DSM value overflows the expected size, truncating\n");
+ *value = (u32)tmp;
+
return 0;
}
-IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u32);
static union acpi_object *
iwl_acpi_get_wifi_pkg_range(struct device *dev,
@@ -302,9 +255,8 @@ iwl_acpi_get_wifi_pkg(struct device *dev,
tbl_rev);
}
-
-int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
- union iwl_tas_config_cmd *cmd, int fw_ver)
+int iwl_acpi_get_tas_table(struct iwl_fw_runtime *fwrt,
+ struct iwl_tas_data *tas_data)
{
union acpi_object *wifi_pkg, *data;
int ret, tbl_rev, i, block_list_size, enabled;
@@ -326,22 +278,9 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
ACPI_TYPE_INTEGER) {
u32 tas_selection =
(u32)wifi_pkg->package.elements[1].integer.value;
- u16 override_iec =
- (tas_selection & ACPI_WTAS_OVERRIDE_IEC_MSK) >> ACPI_WTAS_OVERRIDE_IEC_POS;
- u16 enabled_iec = (tas_selection & ACPI_WTAS_ENABLE_IEC_MSK) >>
- ACPI_WTAS_ENABLE_IEC_POS;
- u8 usa_tas_uhb = (tas_selection & ACPI_WTAS_USA_UHB_MSK) >> ACPI_WTAS_USA_UHB_POS;
-
- enabled = tas_selection & ACPI_WTAS_ENABLED_MSK;
- if (fw_ver <= 3) {
- cmd->v3.override_tas_iec = cpu_to_le16(override_iec);
- cmd->v3.enable_tas_iec = cpu_to_le16(enabled_iec);
- } else {
- cmd->v4.usa_tas_uhb_allowed = usa_tas_uhb;
- cmd->v4.override_tas_iec = (u8)override_iec;
- cmd->v4.enable_tas_iec = (u8)enabled_iec;
- }
+ enabled = iwl_parse_tas_selection(fwrt, tas_data,
+ tas_selection);
} else if (tbl_rev == 0 &&
wifi_pkg->package.elements[1].type == ACPI_TYPE_INTEGER) {
@@ -360,22 +299,16 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
IWL_DEBUG_RADIO(fwrt, "Reading TAS table revision %d\n", tbl_rev);
if (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER ||
wifi_pkg->package.elements[2].integer.value >
- APCI_WTAS_BLACK_LIST_MAX) {
+ IWL_WTAS_BLACK_LIST_MAX) {
IWL_DEBUG_RADIO(fwrt, "TAS invalid array size %llu\n",
wifi_pkg->package.elements[2].integer.value);
ret = -EINVAL;
goto out_free;
}
block_list_size = wifi_pkg->package.elements[2].integer.value;
- cmd->v4.block_list_size = cpu_to_le32(block_list_size);
+ tas_data->block_list_size = cpu_to_le32(block_list_size);
IWL_DEBUG_RADIO(fwrt, "TAS array size %u\n", block_list_size);
- if (block_list_size > APCI_WTAS_BLACK_LIST_MAX) {
- IWL_DEBUG_RADIO(fwrt, "TAS invalid array size value %u\n",
- block_list_size);
- ret = -EINVAL;
- goto out_free;
- }
for (i = 0; i < block_list_size; i++) {
u32 country;
@@ -389,7 +322,7 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
}
country = wifi_pkg->package.elements[3 + i].integer.value;
- cmd->v4.block_list_array[i] = cpu_to_le32(country);
+ tas_data->block_list_array[i] = cpu_to_le32(country);
IWL_DEBUG_RADIO(fwrt, "TAS block list country %d\n", country);
}
@@ -398,19 +331,19 @@ out_free:
kfree(data);
return ret;
}
-IWL_EXPORT_SYMBOL(iwl_acpi_get_tas);
-int iwl_acpi_get_mcc(struct device *dev, char *mcc)
+int iwl_acpi_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc)
{
union acpi_object *wifi_pkg, *data;
u32 mcc_val;
int ret, tbl_rev;
- data = iwl_acpi_get_object(dev, ACPI_WRDD_METHOD);
+ data = iwl_acpi_get_object(fwrt->dev, ACPI_WRDD_METHOD);
if (IS_ERR(data))
return PTR_ERR(data);
- wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_WRDD_WIFI_DATA_SIZE,
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_WRDD_WIFI_DATA_SIZE,
&tbl_rev);
if (IS_ERR(wifi_pkg)) {
ret = PTR_ERR(wifi_pkg);
@@ -434,46 +367,42 @@ out_free:
kfree(data);
return ret;
}
-IWL_EXPORT_SYMBOL(iwl_acpi_get_mcc);
-u64 iwl_acpi_get_pwr_limit(struct device *dev)
+int iwl_acpi_get_pwr_limit(struct iwl_fw_runtime *fwrt, u64 *dflt_pwr_limit)
{
union acpi_object *data, *wifi_pkg;
- u64 dflt_pwr_limit;
- int tbl_rev;
+ int tbl_rev, ret = -EINVAL;
- data = iwl_acpi_get_object(dev, ACPI_SPLC_METHOD);
- if (IS_ERR(data)) {
- dflt_pwr_limit = 0;
+ *dflt_pwr_limit = 0;
+ data = iwl_acpi_get_object(fwrt->dev, ACPI_SPLC_METHOD);
+ if (IS_ERR(data))
goto out;
- }
- wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data,
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_SPLC_WIFI_DATA_SIZE, &tbl_rev);
if (IS_ERR(wifi_pkg) || tbl_rev != 0 ||
- wifi_pkg->package.elements[1].integer.value != ACPI_TYPE_INTEGER) {
- dflt_pwr_limit = 0;
+ wifi_pkg->package.elements[1].integer.value != ACPI_TYPE_INTEGER)
goto out_free;
- }
- dflt_pwr_limit = wifi_pkg->package.elements[1].integer.value;
+ *dflt_pwr_limit = wifi_pkg->package.elements[1].integer.value;
+ ret = 0;
out_free:
kfree(data);
out:
- return dflt_pwr_limit;
+ return ret;
}
-IWL_EXPORT_SYMBOL(iwl_acpi_get_pwr_limit);
-int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk)
+int iwl_acpi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk)
{
union acpi_object *wifi_pkg, *data;
int ret, tbl_rev;
- data = iwl_acpi_get_object(dev, ACPI_ECKV_METHOD);
+ data = iwl_acpi_get_object(fwrt->dev, ACPI_ECKV_METHOD);
if (IS_ERR(data))
return PTR_ERR(data);
- wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_ECKV_WIFI_DATA_SIZE,
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_ECKV_WIFI_DATA_SIZE,
&tbl_rev);
if (IS_ERR(wifi_pkg)) {
ret = PTR_ERR(wifi_pkg);
@@ -494,11 +423,11 @@ out_free:
kfree(data);
return ret;
}
-IWL_EXPORT_SYMBOL(iwl_acpi_get_eckv);
-static int iwl_sar_set_profile(union acpi_object *table,
- struct iwl_sar_profile *profile,
- bool enabled, u8 num_chains, u8 num_sub_bands)
+static int iwl_acpi_sar_set_profile(union acpi_object *table,
+ struct iwl_sar_profile *profile,
+ bool enabled, u8 num_chains,
+ u8 num_sub_bands)
{
int i, j, idx = 0;
@@ -506,8 +435,8 @@ static int iwl_sar_set_profile(union acpi_object *table,
* The table from ACPI is flat, but we store it in a
* structured array.
*/
- for (i = 0; i < ACPI_SAR_NUM_CHAINS_REV2; i++) {
- for (j = 0; j < ACPI_SAR_NUM_SUB_BANDS_REV2; j++) {
+ for (i = 0; i < BIOS_SAR_MAX_CHAINS_PER_PROFILE; i++) {
+ for (j = 0; j < BIOS_SAR_MAX_SUB_BANDS_NUM; j++) {
/* if we don't have the values, use the default */
if (i >= num_chains || j >= num_sub_bands) {
profile->chains[i].subbands[j] = 0;
@@ -530,73 +459,7 @@ static int iwl_sar_set_profile(union acpi_object *table,
return 0;
}
-static int iwl_sar_fill_table(struct iwl_fw_runtime *fwrt,
- __le16 *per_chain, u32 n_subbands,
- int prof_a, int prof_b)
-{
- int profs[ACPI_SAR_NUM_CHAINS_REV0] = { prof_a, prof_b };
- int i, j;
-
- for (i = 0; i < ACPI_SAR_NUM_CHAINS_REV0; i++) {
- struct iwl_sar_profile *prof;
-
- /* don't allow SAR to be disabled (profile 0 means disable) */
- if (profs[i] == 0)
- return -EPERM;
-
- /* we are off by one, so allow up to ACPI_SAR_PROFILE_NUM */
- if (profs[i] > ACPI_SAR_PROFILE_NUM)
- return -EINVAL;
-
- /* profiles go from 1 to 4, so decrement to access the array */
- prof = &fwrt->sar_profiles[profs[i] - 1];
-
- /* if the profile is disabled, do nothing */
- if (!prof->enabled) {
- IWL_DEBUG_RADIO(fwrt, "SAR profile %d is disabled.\n",
- profs[i]);
- /*
- * if one of the profiles is disabled, we
- * ignore all of them and return 1 to
- * differentiate disabled from other failures.
- */
- return 1;
- }
-
- IWL_DEBUG_INFO(fwrt,
- "SAR EWRD: chain %d profile index %d\n",
- i, profs[i]);
- IWL_DEBUG_RADIO(fwrt, " Chain[%d]:\n", i);
- for (j = 0; j < n_subbands; j++) {
- per_chain[i * n_subbands + j] =
- cpu_to_le16(prof->chains[i].subbands[j]);
- IWL_DEBUG_RADIO(fwrt, " Band[%d] = %d * .125dBm\n",
- j, prof->chains[i].subbands[j]);
- }
- }
-
- return 0;
-}
-
-int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt,
- __le16 *per_chain, u32 n_tables, u32 n_subbands,
- int prof_a, int prof_b)
-{
- int i, ret = 0;
-
- for (i = 0; i < n_tables; i++) {
- ret = iwl_sar_fill_table(fwrt,
- &per_chain[i * n_subbands * ACPI_SAR_NUM_CHAINS_REV0],
- n_subbands, prof_a, prof_b);
- if (ret)
- break;
- }
-
- return ret;
-}
-IWL_EXPORT_SYMBOL(iwl_sar_select_profile);
-
-int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
+int iwl_acpi_get_wrds_table(struct iwl_fw_runtime *fwrt)
{
union acpi_object *wifi_pkg, *table, *data;
int ret, tbl_rev;
@@ -613,7 +476,7 @@ int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
&tbl_rev);
if (!IS_ERR(wifi_pkg)) {
if (tbl_rev != 2) {
- ret = PTR_ERR(wifi_pkg);
+ ret = -EINVAL;
goto out_free;
}
@@ -629,7 +492,7 @@ int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
&tbl_rev);
if (!IS_ERR(wifi_pkg)) {
if (tbl_rev != 1) {
- ret = PTR_ERR(wifi_pkg);
+ ret = -EINVAL;
goto out_free;
}
@@ -645,7 +508,7 @@ int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
&tbl_rev);
if (!IS_ERR(wifi_pkg)) {
if (tbl_rev != 0) {
- ret = PTR_ERR(wifi_pkg);
+ ret = -EINVAL;
goto out_free;
}
@@ -675,16 +538,15 @@ read_table:
/* The profile from WRDS is officially profile 1, but goes
* into sar_profiles[0] (because we don't have a profile 0).
*/
- ret = iwl_sar_set_profile(table, &fwrt->sar_profiles[0],
- flags & IWL_SAR_ENABLE_MSK,
- num_chains, num_sub_bands);
+ ret = iwl_acpi_sar_set_profile(table, &fwrt->sar_profiles[0],
+ flags & IWL_SAR_ENABLE_MSK,
+ num_chains, num_sub_bands);
out_free:
kfree(data);
return ret;
}
-IWL_EXPORT_SYMBOL(iwl_sar_get_wrds_table);
-int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
+int iwl_acpi_get_ewrd_table(struct iwl_fw_runtime *fwrt)
{
union acpi_object *wifi_pkg, *data;
bool enabled;
@@ -702,7 +564,7 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
&tbl_rev);
if (!IS_ERR(wifi_pkg)) {
if (tbl_rev != 2) {
- ret = PTR_ERR(wifi_pkg);
+ ret = -EINVAL;
goto out_free;
}
@@ -718,7 +580,7 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
&tbl_rev);
if (!IS_ERR(wifi_pkg)) {
if (tbl_rev != 1) {
- ret = PTR_ERR(wifi_pkg);
+ ret = -EINVAL;
goto out_free;
}
@@ -734,7 +596,7 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
&tbl_rev);
if (!IS_ERR(wifi_pkg)) {
if (tbl_rev != 0) {
- ret = PTR_ERR(wifi_pkg);
+ ret = -EINVAL;
goto out_free;
}
@@ -762,7 +624,7 @@ read_table:
* from index 1, so the maximum value allowed here is
* ACPI_SAR_PROFILES_NUM - 1.
*/
- if (n_profiles <= 0 || n_profiles >= ACPI_SAR_PROFILE_NUM) {
+ if (n_profiles >= BIOS_SAR_MAX_PROFILE_NUM) {
ret = -EINVAL;
goto out_free;
}
@@ -771,13 +633,15 @@ read_table:
pos = 3;
for (i = 0; i < n_profiles; i++) {
+ union acpi_object *table = &wifi_pkg->package.elements[pos];
/* The EWRD profiles officially go from 2 to 4, but we
* save them in sar_profiles[1-3] (because we don't
* have profile 0). So in the array we start from 1.
*/
- ret = iwl_sar_set_profile(&wifi_pkg->package.elements[pos],
- &fwrt->sar_profiles[i + 1], enabled,
- num_chains, num_sub_bands);
+ ret = iwl_acpi_sar_set_profile(table,
+ &fwrt->sar_profiles[i + 1],
+ enabled, num_chains,
+ num_sub_bands);
if (ret < 0)
break;
@@ -789,9 +653,8 @@ out_free:
kfree(data);
return ret;
}
-IWL_EXPORT_SYMBOL(iwl_sar_get_ewrd_table);
-int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt)
+int iwl_acpi_get_wgds_table(struct iwl_fw_runtime *fwrt)
{
union acpi_object *wifi_pkg, *data;
int i, j, k, ret, tbl_rev;
@@ -806,7 +669,7 @@ int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt)
.revisions = BIT(3),
.bands = ACPI_GEO_NUM_BANDS_REV2,
.profiles = ACPI_NUM_GEO_PROFILES_REV3,
- .min_profiles = 3,
+ .min_profiles = BIOS_GEO_MIN_PROFILE_NUM,
},
{
.revisions = BIT(2),
@@ -862,22 +725,25 @@ int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt)
entry = &wifi_pkg->package.elements[entry_idx];
entry_idx++;
if (entry->type != ACPI_TYPE_INTEGER ||
- entry->integer.value > num_profiles) {
+ entry->integer.value > num_profiles ||
+ entry->integer.value <
+ rev_data[idx].min_profiles) {
ret = -EINVAL;
goto out_free;
}
- num_profiles = entry->integer.value;
/*
- * this also validates >= min_profiles since we
- * otherwise wouldn't have gotten the data when
- * looking up in ACPI
+ * Check to see if we received package count
+ * same as max # of profiles
*/
if (wifi_pkg->package.count !=
hdr_size + profile_size * num_profiles) {
ret = -EINVAL;
goto out_free;
}
+
+ /* Number of valid profiles */
+ num_profiles = entry->integer.value;
}
goto read_table;
}
@@ -892,7 +758,7 @@ int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt)
read_table:
fwrt->geo_rev = tbl_rev;
for (i = 0; i < num_profiles; i++) {
- for (j = 0; j < ACPI_GEO_NUM_BANDS_REV2; j++) {
+ for (j = 0; j < BIOS_GEO_MAX_NUM_BANDS; j++) {
union acpi_object *entry;
/*
@@ -916,7 +782,7 @@ read_table:
entry->integer.value;
}
- for (k = 0; k < ACPI_GEO_NUM_CHAINS; k++) {
+ for (k = 0; k < BIOS_GEO_NUM_CHAINS; k++) {
/* same here as above */
if (j >= num_bands) {
fwrt->geo_profiles[i].bands[j].chains[k] =
@@ -944,123 +810,26 @@ out_free:
kfree(data);
return ret;
}
-IWL_EXPORT_SYMBOL(iwl_sar_get_wgds_table);
-
-bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
-{
- /*
- * The PER_CHAIN_LIMIT_OFFSET_CMD command is not supported on
- * earlier firmware versions. Unfortunately, we don't have a
- * TLV API flag to rely on, so rely on the major version which
- * is in the first byte of ucode_ver. This was implemented
- * initially on version 38 and then backported to 17. It was
- * also backported to 29, but only for 7265D devices. The
- * intention was to have it in 36 as well, but not all 8000
- * family got this feature enabled. The 8000 family is the
- * only one using version 36, so skip this version entirely.
- */
- return IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) >= 38 ||
- (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 &&
- fwrt->trans->hw_rev != CSR_HW_REV_TYPE_3160) ||
- (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 &&
- ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
- CSR_HW_REV_TYPE_7265D));
-}
-IWL_EXPORT_SYMBOL(iwl_sar_geo_support);
-
-int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
- struct iwl_per_chain_offset *table,
- u32 n_bands, u32 n_profiles)
-{
- int i, j;
-
- if (!fwrt->geo_enabled)
- return -ENODATA;
-
- if (!iwl_sar_geo_support(fwrt))
- return -EOPNOTSUPP;
-
- for (i = 0; i < n_profiles; i++) {
- for (j = 0; j < n_bands; j++) {
- struct iwl_per_chain_offset *chain =
- &table[i * n_bands + j];
-
- chain->max_tx_power =
- cpu_to_le16(fwrt->geo_profiles[i].bands[j].max);
- chain->chain_a = fwrt->geo_profiles[i].bands[j].chains[0];
- chain->chain_b = fwrt->geo_profiles[i].bands[j].chains[1];
- IWL_DEBUG_RADIO(fwrt,
- "SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n",
- i, j,
- fwrt->geo_profiles[i].bands[j].chains[0],
- fwrt->geo_profiles[i].bands[j].chains[1],
- fwrt->geo_profiles[i].bands[j].max);
- }
- }
-
- return 0;
-}
-IWL_EXPORT_SYMBOL(iwl_sar_geo_init);
-
-__le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
-{
- int ret;
- u8 value;
- __le32 config_bitmap = 0;
-
- /*
- ** Evaluate func 'DSM_FUNC_ENABLE_INDONESIA_5G2'
- */
- ret = iwl_acpi_get_dsm_u8(fwrt->dev, 0,
- DSM_FUNC_ENABLE_INDONESIA_5G2,
- &iwl_guid, &value);
-
- if (!ret && value == DSM_VALUE_INDONESIA_ENABLE)
- config_bitmap |=
- cpu_to_le32(LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK);
-
- /*
- ** Evaluate func 'DSM_FUNC_DISABLE_SRD'
- */
- ret = iwl_acpi_get_dsm_u8(fwrt->dev, 0,
- DSM_FUNC_DISABLE_SRD,
- &iwl_guid, &value);
- if (!ret) {
- if (value == DSM_VALUE_SRD_PASSIVE)
- config_bitmap |=
- cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK);
- else if (value == DSM_VALUE_SRD_DISABLE)
- config_bitmap |=
- cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK);
- }
-
- return config_bitmap;
-}
-IWL_EXPORT_SYMBOL(iwl_acpi_get_lari_config_bitmap);
int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt)
{
union acpi_object *wifi_pkg, *data, *flags;
int i, j, ret, tbl_rev, num_sub_bands = 0;
int idx = 2;
- u8 cmd_ver;
-
- fwrt->ppag_flags = 0;
- fwrt->ppag_table_valid = false;
data = iwl_acpi_get_object(fwrt->dev, ACPI_PPAG_METHOD);
if (IS_ERR(data))
return PTR_ERR(data);
- /* try to read ppag table rev 2 or 1 (both have the same data size) */
+ /* try to read ppag table rev 3, 2 or 1 (all have the same data size) */
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_PPAG_WIFI_DATA_SIZE_V2, &tbl_rev);
if (!IS_ERR(wifi_pkg)) {
- if (tbl_rev == 1 || tbl_rev == 2) {
+ if (tbl_rev >= 1 && tbl_rev <= 3) {
num_sub_bands = IWL_NUM_SUB_BANDS_V2;
IWL_DEBUG_RADIO(fwrt,
- "Reading PPAG table v2 (tbl_rev=%d)\n",
+ "Reading PPAG table (tbl_rev=%d)\n",
tbl_rev);
goto read_table;
} else {
@@ -1083,6 +852,9 @@ int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt)
goto read_table;
}
+ ret = PTR_ERR(wifi_pkg);
+ goto out_free;
+
read_table:
fwrt->ppag_ver = tbl_rev;
flags = &wifi_pkg->package.elements[1];
@@ -1092,19 +864,8 @@ read_table:
goto out_free;
}
- fwrt->ppag_flags = flags->integer.value & ACPI_PPAG_MASK;
- cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw,
- WIDE_ID(PHY_OPS_GROUP,
- PER_PLATFORM_ANT_GAIN_CMD),
- IWL_FW_CMD_VER_UNKNOWN);
- if (cmd_ver == IWL_FW_CMD_VER_UNKNOWN) {
- ret = -EINVAL;
- goto out_free;
- }
- if (!fwrt->ppag_flags && cmd_ver <= 3) {
- ret = 0;
- goto out_free;
- }
+ fwrt->ppag_flags = iwl_bios_get_ppag_flags(flags->integer.value,
+ fwrt->ppag_ver);
/*
* read, verify gain values and save them into the PPAG table.
@@ -1122,132 +883,15 @@ read_table:
}
fwrt->ppag_chains[i].subbands[j] = ent->integer.value;
- /* from ver 4 the fw deals with out of range values */
- if (cmd_ver >= 4)
- continue;
- if ((j == 0 &&
- (fwrt->ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_LB ||
- fwrt->ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_LB)) ||
- (j != 0 &&
- (fwrt->ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_HB ||
- fwrt->ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_HB))) {
- ret = -EINVAL;
- goto out_free;
- }
}
}
- fwrt->ppag_table_valid = true;
ret = 0;
out_free:
kfree(data);
return ret;
}
-IWL_EXPORT_SYMBOL(iwl_acpi_get_ppag_table);
-
-int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt, union iwl_ppag_table_cmd *cmd,
- int *cmd_size)
-{
- u8 cmd_ver;
- int i, j, num_sub_bands;
- s8 *gain;
-
- /* many firmware images for JF lie about this */
- if (CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id) ==
- CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF))
- return -EOPNOTSUPP;
-
- if (!fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) {
- IWL_DEBUG_RADIO(fwrt,
- "PPAG capability not supported by FW, command not sent.\n");
- return -EINVAL;
- }
-
- cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw,
- WIDE_ID(PHY_OPS_GROUP,
- PER_PLATFORM_ANT_GAIN_CMD),
- IWL_FW_CMD_VER_UNKNOWN);
- if (!fwrt->ppag_table_valid || (cmd_ver <= 3 && !fwrt->ppag_flags)) {
- IWL_DEBUG_RADIO(fwrt, "PPAG not enabled, command not sent.\n");
- return -EINVAL;
- }
-
- /* The 'flags' field is the same in v1 and in v2 so we can just
- * use v1 to access it.
- */
- cmd->v1.flags = cpu_to_le32(fwrt->ppag_flags);
-
- IWL_DEBUG_RADIO(fwrt, "PPAG cmd ver is %d\n", cmd_ver);
- if (cmd_ver == 1) {
- num_sub_bands = IWL_NUM_SUB_BANDS_V1;
- gain = cmd->v1.gain[0];
- *cmd_size = sizeof(cmd->v1);
- if (fwrt->ppag_ver == 1 || fwrt->ppag_ver == 2) {
- /* in this case FW supports revision 0 */
- IWL_DEBUG_RADIO(fwrt,
- "PPAG table rev is %d, send truncated table\n",
- fwrt->ppag_ver);
- }
- } else if (cmd_ver >= 2 && cmd_ver <= 4) {
- num_sub_bands = IWL_NUM_SUB_BANDS_V2;
- gain = cmd->v2.gain[0];
- *cmd_size = sizeof(cmd->v2);
- if (fwrt->ppag_ver == 0) {
- /* in this case FW supports revisions 1 or 2 */
- IWL_DEBUG_RADIO(fwrt,
- "PPAG table rev is 0, send padded table\n");
- }
- } else {
- IWL_DEBUG_RADIO(fwrt, "Unsupported PPAG command version\n");
- return -EINVAL;
- }
-
- /* ppag mode */
- IWL_DEBUG_RADIO(fwrt,
- "PPAG MODE bits were read from bios: %d\n",
- cmd->v1.flags & cpu_to_le32(ACPI_PPAG_MASK));
- if ((cmd_ver == 1 && !fw_has_capa(&fwrt->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_PPAG_CHINA_BIOS_SUPPORT)) ||
- (cmd_ver == 2 && fwrt->ppag_ver == 2)) {
- cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK);
- IWL_DEBUG_RADIO(fwrt, "masking ppag China bit\n");
- } else {
- IWL_DEBUG_RADIO(fwrt, "isn't masking ppag China bit\n");
- }
-
- IWL_DEBUG_RADIO(fwrt,
- "PPAG MODE bits going to be sent: %d\n",
- cmd->v1.flags & cpu_to_le32(ACPI_PPAG_MASK));
-
- for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
- for (j = 0; j < num_sub_bands; j++) {
- gain[i * num_sub_bands + j] =
- fwrt->ppag_chains[i].subbands[j];
- IWL_DEBUG_RADIO(fwrt,
- "PPAG table: chain[%d] band[%d]: gain = %d\n",
- i, j, gain[i * num_sub_bands + j]);
- }
- }
-
- return 0;
-}
-IWL_EXPORT_SYMBOL(iwl_read_ppag_table);
-
-bool iwl_acpi_is_ppag_approved(struct iwl_fw_runtime *fwrt)
-{
-
- if (!dmi_check_system(dmi_ppag_approved_list)) {
- IWL_DEBUG_RADIO(fwrt,
- "System vendor '%s' is not in the approved list, disabling PPAG.\n",
- dmi_get_system_info(DMI_SYS_VENDOR));
- fwrt->ppag_flags = 0;
- return false;
- }
-
- return true;
-}
-IWL_EXPORT_SYMBOL(iwl_acpi_is_ppag_approved);
void iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt,
struct iwl_phy_specific_cfg *filters)
@@ -1260,7 +904,6 @@ void iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt,
if (IS_ERR(data))
return;
- /* try to read wtas table revision 1 or revision 0*/
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_WPFC_WIFI_DATA_SIZE,
&tbl_rev);
@@ -1270,13 +913,14 @@ void iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt,
if (tbl_rev != 0)
goto out_free;
- BUILD_BUG_ON(ARRAY_SIZE(filters->filter_cfg_chains) != ACPI_WPFC_WIFI_DATA_SIZE);
+ BUILD_BUG_ON(ARRAY_SIZE(filters->filter_cfg_chains) !=
+ ACPI_WPFC_WIFI_DATA_SIZE - 1);
for (i = 0; i < ARRAY_SIZE(filters->filter_cfg_chains); i++) {
- if (wifi_pkg->package.elements[i].type != ACPI_TYPE_INTEGER)
- return;
+ if (wifi_pkg->package.elements[i + 1].type != ACPI_TYPE_INTEGER)
+ goto out_free;
tmp.filter_cfg_chains[i] =
- cpu_to_le32(wifi_pkg->package.elements[i].integer.value);
+ cpu_to_le32(wifi_pkg->package.elements[i + 1].integer.value);
}
IWL_DEBUG_RADIO(fwrt, "Loaded WPFC filter config from ACPI\n");
@@ -1285,3 +929,72 @@ out_free:
kfree(data);
}
IWL_EXPORT_SYMBOL(iwl_acpi_get_phy_filters);
+
+void iwl_acpi_get_guid_lock_status(struct iwl_fw_runtime *fwrt)
+{
+ union acpi_object *wifi_pkg, *data;
+ int tbl_rev;
+
+ data = iwl_acpi_get_object(fwrt->dev, ACPI_GLAI_METHOD);
+ if (IS_ERR(data))
+ return;
+
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_GLAI_WIFI_DATA_SIZE,
+ &tbl_rev);
+ if (IS_ERR(wifi_pkg))
+ goto out_free;
+
+ if (tbl_rev != 0) {
+ IWL_DEBUG_RADIO(fwrt, "Invalid GLAI revision: %d\n", tbl_rev);
+ goto out_free;
+ }
+
+ if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER ||
+ wifi_pkg->package.elements[1].integer.value > ACPI_GLAI_MAX_STATUS)
+ goto out_free;
+
+ fwrt->uefi_tables_lock_status =
+ wifi_pkg->package.elements[1].integer.value;
+
+ IWL_DEBUG_RADIO(fwrt,
+ "Loaded UEFI WIFI GUID lock status: %d from ACPI\n",
+ fwrt->uefi_tables_lock_status);
+out_free:
+ kfree(data);
+}
+IWL_EXPORT_SYMBOL(iwl_acpi_get_guid_lock_status);
+
+int iwl_acpi_get_wbem(struct iwl_fw_runtime *fwrt, u32 *value)
+{
+ union acpi_object *wifi_pkg, *data;
+ int ret = -ENOENT;
+ int tbl_rev;
+
+ data = iwl_acpi_get_object(fwrt->dev, ACPI_WBEM_METHOD);
+ if (IS_ERR(data))
+ return ret;
+
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_WBEM_WIFI_DATA_SIZE,
+ &tbl_rev);
+ if (IS_ERR(wifi_pkg))
+ goto out_free;
+
+ if (tbl_rev != IWL_ACPI_WBEM_REVISION) {
+ IWL_DEBUG_RADIO(fwrt, "Unsupported ACPI WBEM revision:%d\n",
+ tbl_rev);
+ goto out_free;
+ }
+
+ if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER)
+ goto out_free;
+
+ *value = wifi_pkg->package.elements[1].integer.value &
+ IWL_ACPI_WBEM_REV0_MASK;
+ IWL_DEBUG_RADIO(fwrt, "Loaded WBEM config from ACPI\n");
+ ret = 0;
+out_free:
+ kfree(data);
+ return ret;
+}
diff --git a/fw/acpi.h b/fw/acpi.h
index c36c62d6414d..bb88398a6987 100644
--- a/fw/acpi.h
+++ b/fw/acpi.h
@@ -7,6 +7,7 @@
#define __iwl_fw_acpi__
#include <linux/acpi.h>
+#include "fw/regulatory.h"
#include "fw/api/commands.h"
#include "fw/api/power.h"
#include "fw/api/phy.h"
@@ -25,6 +26,8 @@
#define ACPI_PPAG_METHOD "PPAG"
#define ACPI_WTAS_METHOD "WTAS"
#define ACPI_WPFC_METHOD "WPFC"
+#define ACPI_GLAI_METHOD "GLAI"
+#define ACPI_WBEM_METHOD "WBEM"
#define ACPI_WIFI_DOMAIN (0x07)
@@ -56,181 +59,100 @@
#define ACPI_EWRD_WIFI_DATA_SIZE_REV2 ((ACPI_SAR_PROFILE_NUM - 1) * \
ACPI_SAR_NUM_CHAINS_REV2 * \
ACPI_SAR_NUM_SUB_BANDS_REV2 + 3)
-#define ACPI_WPFC_WIFI_DATA_SIZE 4 /* 4 filter config words */
+#define ACPI_WPFC_WIFI_DATA_SIZE 5 /* domain and 4 filter config words */
/* revision 0 and 1 are identical, except for the semantics in the FW */
#define ACPI_GEO_NUM_BANDS_REV0 2
#define ACPI_GEO_NUM_BANDS_REV2 3
-#define ACPI_GEO_NUM_CHAINS 2
#define ACPI_WRDD_WIFI_DATA_SIZE 2
#define ACPI_SPLC_WIFI_DATA_SIZE 2
#define ACPI_ECKV_WIFI_DATA_SIZE 2
/*
+ * One element for domain type,
+ * and one for enablement of Wi-Fi 320MHz per MCC
+ */
+#define ACPI_WBEM_WIFI_DATA_SIZE 2
+/*
+ * One element for domain type,
+ * and one for the status
+ */
+#define ACPI_GLAI_WIFI_DATA_SIZE 2
+#define ACPI_GLAI_MAX_STATUS 2
+/*
* TAS size: 1 elelment for type,
* 1 element for enabled field,
* 1 element for block list size,
* 16 elements for block list array
*/
-#define APCI_WTAS_BLACK_LIST_MAX 16
-#define ACPI_WTAS_WIFI_DATA_SIZE (3 + APCI_WTAS_BLACK_LIST_MAX)
-#define ACPI_WTAS_ENABLED_MSK 0x1
-#define ACPI_WTAS_OVERRIDE_IEC_MSK 0x2
-#define ACPI_WTAS_ENABLE_IEC_MSK 0x4
-#define ACPI_WTAS_OVERRIDE_IEC_POS 0x1
-#define ACPI_WTAS_ENABLE_IEC_POS 0x2
-#define ACPI_WTAS_USA_UHB_MSK BIT(16)
-#define ACPI_WTAS_USA_UHB_POS 16
-
+#define ACPI_WTAS_WIFI_DATA_SIZE (3 + IWL_WTAS_BLACK_LIST_MAX)
#define ACPI_PPAG_WIFI_DATA_SIZE_V1 ((IWL_NUM_CHAIN_LIMITS * \
IWL_NUM_SUB_BANDS_V1) + 2)
#define ACPI_PPAG_WIFI_DATA_SIZE_V2 ((IWL_NUM_CHAIN_LIMITS * \
IWL_NUM_SUB_BANDS_V2) + 2)
-/* PPAG gain value bounds in 1/8 dBm */
-#define ACPI_PPAG_MIN_LB -16
-#define ACPI_PPAG_MAX_LB 24
-#define ACPI_PPAG_MIN_HB -16
-#define ACPI_PPAG_MAX_HB 40
-#define ACPI_PPAG_MASK 3
-#define IWL_PPAG_ETSI_MASK BIT(0)
-
#define IWL_SAR_ENABLE_MSK BIT(0)
#define IWL_REDUCE_POWER_FLAGS_POS 1
-/*
- * The profile for revision 2 is a superset of revision 1, which is in
- * turn a superset of revision 0. So we can store all revisions
- * inside revision 2, which is what we represent here.
- */
-struct iwl_sar_profile_chain {
- u8 subbands[ACPI_SAR_NUM_SUB_BANDS_REV2];
-};
-
-struct iwl_sar_profile {
- bool enabled;
- struct iwl_sar_profile_chain chains[ACPI_SAR_NUM_CHAINS_REV2];
-};
-
-/* Same thing as with SAR, all revisions fit in revision 2 */
-struct iwl_geo_profile_band {
- u8 max;
- u8 chains[ACPI_GEO_NUM_CHAINS];
-};
-
-struct iwl_geo_profile {
- struct iwl_geo_profile_band bands[ACPI_GEO_NUM_BANDS_REV2];
-};
-
-/* Same thing as with SAR, all revisions fit in revision 2 */
-struct iwl_ppag_chain {
- s8 subbands[ACPI_SAR_NUM_SUB_BANDS_REV2];
-};
-
-enum iwl_dsm_funcs_rev_0 {
- DSM_FUNC_QUERY = 0,
- DSM_FUNC_DISABLE_SRD = 1,
- DSM_FUNC_ENABLE_INDONESIA_5G2 = 2,
- DSM_FUNC_ENABLE_6E = 3,
- DSM_FUNC_11AX_ENABLEMENT = 6,
- DSM_FUNC_ENABLE_UNII4_CHAN = 7,
- DSM_FUNC_ACTIVATE_CHANNEL = 8,
- DSM_FUNC_FORCE_DISABLE_CHANNELS = 9
-};
-
-enum iwl_dsm_values_srd {
- DSM_VALUE_SRD_ACTIVE,
- DSM_VALUE_SRD_PASSIVE,
- DSM_VALUE_SRD_DISABLE,
- DSM_VALUE_SRD_MAX
-};
-
-enum iwl_dsm_values_indonesia {
- DSM_VALUE_INDONESIA_DISABLE,
- DSM_VALUE_INDONESIA_ENABLE,
- DSM_VALUE_INDONESIA_RESERVED,
- DSM_VALUE_INDONESIA_MAX
-};
-
-/* DSM RFI uses a different GUID, so need separate definitions */
-
-#define DSM_RFI_FUNC_ENABLE 3
-
-enum iwl_dsm_values_rfi {
- DSM_VALUE_RFI_ENABLE,
- DSM_VALUE_RFI_DISABLE,
- DSM_VALUE_RFI_MAX
-};
+/* The Inidcator whether UEFI WIFI GUID tables are locked is read from ACPI */
+#define UEFI_WIFI_GUID_UNLOCKED 0
+
+#define ACPI_DSM_REV 0
+
+#define IWL_ACPI_WBEM_REV0_MASK (BIT(0) | BIT(1))
+#define IWL_ACPI_WBEM_REVISION 0
#ifdef CONFIG_ACPI
struct iwl_fw_runtime;
extern const guid_t iwl_guid;
-extern const guid_t iwl_rfi_guid;
-
-int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func,
- const guid_t *guid, u8 *value);
-
-int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func,
- const guid_t *guid, u32 *value);
/**
* iwl_acpi_get_mcc - read MCC from ACPI, if available
*
- * @dev: the struct device
+ * @fwrt: the fw runtime struct
* @mcc: output buffer (3 bytes) that will get the MCC
*
* This function tries to read the current MCC from ACPI if available.
*/
-int iwl_acpi_get_mcc(struct device *dev, char *mcc);
+int iwl_acpi_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc);
-u64 iwl_acpi_get_pwr_limit(struct device *dev);
+int iwl_acpi_get_pwr_limit(struct iwl_fw_runtime *fwrt, u64 *dflt_pwr_limit);
/*
* iwl_acpi_get_eckv - read external clock validation from ACPI, if available
*
- * @dev: the struct device
+ * @fwrt: the fw runtime struct
* @extl_clk: output var (2 bytes) that will get the clk indication.
*
* This function tries to read the external clock indication
* from ACPI if available.
*/
-int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk);
-
-int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt,
- __le16 *per_chain, u32 n_tables, u32 n_subbands,
- int prof_a, int prof_b);
-
-int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt);
+int iwl_acpi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk);
-int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt);
+int iwl_acpi_get_wrds_table(struct iwl_fw_runtime *fwrt);
-int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt);
+int iwl_acpi_get_ewrd_table(struct iwl_fw_runtime *fwrt);
-bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt);
+int iwl_acpi_get_wgds_table(struct iwl_fw_runtime *fwrt);
-int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
- struct iwl_per_chain_offset *table,
- u32 n_bands, u32 n_profiles);
-
-int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
- union iwl_tas_config_cmd *cmd, int fw_ver);
-
-__le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt);
+int iwl_acpi_get_tas_table(struct iwl_fw_runtime *fwrt,
+ struct iwl_tas_data *data);
int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt);
-int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt, union iwl_ppag_table_cmd *cmd,
- int *cmd_size);
-
-bool iwl_acpi_is_ppag_approved(struct iwl_fw_runtime *fwrt);
-
void iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt,
struct iwl_phy_specific_cfg *filters);
+void iwl_acpi_get_guid_lock_status(struct iwl_fw_runtime *fwrt);
+
+int iwl_acpi_get_dsm(struct iwl_fw_runtime *fwrt,
+ enum iwl_dsm_funcs func, u32 *value);
+
+int iwl_acpi_get_wbem(struct iwl_fw_runtime *fwrt, u32 *value);
#else /* CONFIG_ACPI */
static inline void *iwl_acpi_get_dsm_object(struct device *dev, int rev,
@@ -239,92 +161,66 @@ static inline void *iwl_acpi_get_dsm_object(struct device *dev, int rev,
return ERR_PTR(-ENOENT);
}
-static inline int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func,
- const guid_t *guid, u8 *value)
-{
- return -ENOENT;
-}
-
-static inline int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func,
- const guid_t *guid, u32 *value)
-{
- return -ENOENT;
-}
-
-static inline int iwl_acpi_get_mcc(struct device *dev, char *mcc)
+static inline int iwl_acpi_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc)
{
return -ENOENT;
}
-static inline u64 iwl_acpi_get_pwr_limit(struct device *dev)
+static inline int iwl_acpi_get_pwr_limit(struct iwl_fw_runtime *fwrt,
+ u64 *dflt_pwr_limit)
{
+ *dflt_pwr_limit = 0;
return 0;
}
-static inline int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk)
+static inline int iwl_acpi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk)
{
return -ENOENT;
}
-static inline int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt,
- __le16 *per_chain, u32 n_tables, u32 n_subbands,
- int prof_a, int prof_b)
+static inline int iwl_acpi_get_wrds_table(struct iwl_fw_runtime *fwrt)
{
return -ENOENT;
}
-static inline int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
+static inline int iwl_acpi_get_ewrd_table(struct iwl_fw_runtime *fwrt)
{
return -ENOENT;
}
-static inline int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
-{
- return -ENOENT;
-}
-
-static inline int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt)
+static inline int iwl_acpi_get_wgds_table(struct iwl_fw_runtime *fwrt)
{
return 1;
}
-static inline bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
-{
- return false;
-}
-
-static inline int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
- union iwl_tas_config_cmd *cmd, int fw_ver)
+static inline int iwl_acpi_get_tas_table(struct iwl_fw_runtime *fwrt,
+ struct iwl_tas_data *data)
{
return -ENOENT;
}
-static inline __le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
-{
- return 0;
-}
-
static inline int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt)
{
return -ENOENT;
}
-static inline int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt,
- union iwl_ppag_table_cmd *cmd, int *cmd_size)
+/* macro since the second argument doesn't always exist */
+#define iwl_acpi_get_phy_filters(fwrt, filters) do { } while (0)
+
+static inline void iwl_acpi_get_guid_lock_status(struct iwl_fw_runtime *fwrt)
{
- return -ENOENT;
}
-static inline bool iwl_acpi_is_ppag_approved(struct iwl_fw_runtime *fwrt)
+static inline int iwl_acpi_get_dsm(struct iwl_fw_runtime *fwrt,
+ enum iwl_dsm_funcs func, u32 *value)
{
- return false;
+ return -ENOENT;
}
-static inline void iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt,
- struct iwl_phy_specific_cfg *filters)
+static inline int iwl_acpi_get_wbem(struct iwl_fw_runtime *fwrt, u32 *value)
{
+ return -ENOENT;
}
-
#endif /* CONFIG_ACPI */
#endif /* __iwl_fw_acpi__ */
diff --git a/fw/api/alive.h b/fw/api/alive.h
index e00ab21e7358..ebe85fdf08d3 100644
--- a/fw/api/alive.h
+++ b/fw/api/alive.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018, 2020-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018, 2020-2021, 2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -113,7 +113,7 @@ struct iwl_alive_ntf_v6 {
} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_6 */
/**
- * enum iwl_extended_cfg_flag - commands driver may send before
+ * enum iwl_extended_cfg_flags - commands driver may send before
* finishing init flow
* @IWL_INIT_DEBUG_CFG: driver is going to send debug config command
* @IWL_INIT_NVM: driver is going to send NVM_ACCESS commands
@@ -126,7 +126,7 @@ enum iwl_extended_cfg_flags {
};
/**
- * struct iwl_extended_cfg_cmd - mark what commands ucode should wait for
+ * struct iwl_init_extended_cfg_cmd - mark what commands ucode should wait for
* before finishing init flows
* @init_flags: values from iwl_extended_cfg_flags
*/
diff --git a/fw/api/binding.h b/fw/api/binding.h
index d9044ada6a43..2397fdc37fc5 100644
--- a/fw/api/binding.h
+++ b/fw/api/binding.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2020, 2022 Intel Corporation
+ * Copyright (C) 2012-2014, 2020, 2022, 2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -77,7 +77,7 @@ struct iwl_time_quota_data_v1 {
} __packed; /* TIME_QUOTA_DATA_API_S_VER_1 */
/**
- * struct iwl_time_quota_cmd - configuration of time quota between bindings
+ * struct iwl_time_quota_cmd_v1 - configuration of time quota between bindings
* ( TIME_QUOTA_CMD = 0x2c )
* @quotas: allocations per binding
* Note: on non-CDB the fourth one is the auxilary mac and is
diff --git a/fw/api/coex.h b/fw/api/coex.h
index 3e81e9369224..b97a43353779 100644
--- a/fw/api/coex.h
+++ b/fw/api/coex.h
@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
+ * Copyright (C) 2023-2024 Intel Corporation
* Copyright (C) 2013-2014, 2018-2019 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2017 Intel Deutschland GmbH
@@ -76,73 +77,6 @@ struct iwl_bt_coex_ci_cmd {
__le32 secondary_ch_phy_id;
} __packed; /* BT_CI_MSG_API_S_VER_2 */
-#define BT_MBOX(n_dw, _msg, _pos, _nbits) \
- BT_MBOX##n_dw##_##_msg##_POS = (_pos), \
- BT_MBOX##n_dw##_##_msg = BITS(_nbits) << BT_MBOX##n_dw##_##_msg##_POS
-
-enum iwl_bt_mxbox_dw0 {
- BT_MBOX(0, LE_SLAVE_LAT, 0, 3),
- BT_MBOX(0, LE_PROF1, 3, 1),
- BT_MBOX(0, LE_PROF2, 4, 1),
- BT_MBOX(0, LE_PROF_OTHER, 5, 1),
- BT_MBOX(0, CHL_SEQ_N, 8, 4),
- BT_MBOX(0, INBAND_S, 13, 1),
- BT_MBOX(0, LE_MIN_RSSI, 16, 4),
- BT_MBOX(0, LE_SCAN, 20, 1),
- BT_MBOX(0, LE_ADV, 21, 1),
- BT_MBOX(0, LE_MAX_TX_POWER, 24, 4),
- BT_MBOX(0, OPEN_CON_1, 28, 2),
-};
-
-enum iwl_bt_mxbox_dw1 {
- BT_MBOX(1, BR_MAX_TX_POWER, 0, 4),
- BT_MBOX(1, IP_SR, 4, 1),
- BT_MBOX(1, LE_MSTR, 5, 1),
- BT_MBOX(1, AGGR_TRFC_LD, 8, 6),
- BT_MBOX(1, MSG_TYPE, 16, 3),
- BT_MBOX(1, SSN, 19, 2),
-};
-
-enum iwl_bt_mxbox_dw2 {
- BT_MBOX(2, SNIFF_ACT, 0, 3),
- BT_MBOX(2, PAG, 3, 1),
- BT_MBOX(2, INQUIRY, 4, 1),
- BT_MBOX(2, CONN, 5, 1),
- BT_MBOX(2, SNIFF_INTERVAL, 8, 5),
- BT_MBOX(2, DISC, 13, 1),
- BT_MBOX(2, SCO_TX_ACT, 16, 2),
- BT_MBOX(2, SCO_RX_ACT, 18, 2),
- BT_MBOX(2, ESCO_RE_TX, 20, 2),
- BT_MBOX(2, SCO_DURATION, 24, 6),
-};
-
-enum iwl_bt_mxbox_dw3 {
- BT_MBOX(3, SCO_STATE, 0, 1),
- BT_MBOX(3, SNIFF_STATE, 1, 1),
- BT_MBOX(3, A2DP_STATE, 2, 1),
- BT_MBOX(3, ACL_STATE, 3, 1),
- BT_MBOX(3, MSTR_STATE, 4, 1),
- BT_MBOX(3, OBX_STATE, 5, 1),
- BT_MBOX(3, A2DP_SRC, 6, 1),
- BT_MBOX(3, OPEN_CON_2, 8, 2),
- BT_MBOX(3, TRAFFIC_LOAD, 10, 2),
- BT_MBOX(3, CHL_SEQN_LSB, 12, 1),
- BT_MBOX(3, INBAND_P, 13, 1),
- BT_MBOX(3, MSG_TYPE_2, 16, 3),
- BT_MBOX(3, SSN_2, 19, 2),
- BT_MBOX(3, UPDATE_REQUEST, 21, 1),
-};
-
-#define BT_MBOX_MSG(_notif, _num, _field) \
- ((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\
- >> BT_MBOX##_num##_##_field##_POS)
-
-#define BT_MBOX_PRINT(_num, _field, _end) \
- pos += scnprintf(buf + pos, bufsz - pos, \
- "\t%s: %d%s", \
- #_field, \
- BT_MBOX_MSG(notif, _num, _field), \
- true ? "\n" : ", ")
enum iwl_bt_activity_grading {
BT_OFF = 0,
BT_ON_NO_CONNECTION = 1,
@@ -170,7 +104,11 @@ enum iwl_bt_ci_compliance {
* @bt_activity_grading: the activity of BT &enum iwl_bt_activity_grading
* @ttc_status: is TTC enabled - one bit per PHY
* @rrc_status: is RRC enabled - one bit per PHY
- * @reserved: reserved
+ * The following fields are only for version 5, and are reserved in version 4:
+ * @wifi_loss_low_rssi: The predicted lost WiFi rate (% of air time that BT is
+ * utilizing) when the RSSI is low (<= -65 dBm)
+ * @wifi_loss_mid_high_rssi: The predicted lost WiFi rate (% of air time that
+ * BT is utilizing) when the RSSI is mid/high (>= -65 dBm)
*/
struct iwl_bt_coex_profile_notif {
__le32 mbox_msg[4];
@@ -182,7 +120,10 @@ struct iwl_bt_coex_profile_notif {
__le32 bt_activity_grading;
u8 ttc_status;
u8 rrc_status;
- __le16 reserved;
-} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_4 */
+ u8 wifi_loss_low_rssi;
+ u8 wifi_loss_mid_high_rssi;
+} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_4
+ * BT_COEX_PROFILE_NTFY_API_S_VER_5
+ */
#endif /* __iwl_fw_api_coex_h__ */
diff --git a/fw/api/commands.h b/fw/api/commands.h
index 13cb0d53a1a3..7544c4cb1a30 100644
--- a/fw/api/commands.h
+++ b/fw/api/commands.h
@@ -30,6 +30,8 @@
* @REGULATORY_AND_NVM_GROUP: regulatory/NVM group, uses command IDs from
* &enum iwl_regulatory_and_nvm_subcmd_ids
* @DEBUG_GROUP: Debug group, uses command IDs from &enum iwl_debug_cmds
+ * @STATISTICS_GROUP: Statistics group, uses command IDs from
+ * &enum iwl_statistics_subcmd_ids
*/
enum iwl_mvm_command_groups {
LEGACY_GROUP = 0x0,
@@ -44,6 +46,7 @@ enum iwl_mvm_command_groups {
PROT_OFFLOAD_GROUP = 0xb,
REGULATORY_AND_NVM_GROUP = 0xc,
DEBUG_GROUP = 0xf,
+ STATISTICS_GROUP = 0x10,
};
/**
@@ -617,9 +620,36 @@ enum iwl_system_subcmd_ids {
SYSTEM_FEATURES_CONTROL_CMD = 0xd,
/**
+ * @SYSTEM_STATISTICS_CMD: &struct iwl_system_statistics_cmd
+ */
+ SYSTEM_STATISTICS_CMD = 0xf,
+
+ /**
+ * @SYSTEM_STATISTICS_END_NOTIF: &struct iwl_system_statistics_end_notif
+ */
+ SYSTEM_STATISTICS_END_NOTIF = 0xfd,
+
+ /**
* @RFI_DEACTIVATE_NOTIF: &struct iwl_rfi_deactivate_notif
*/
RFI_DEACTIVATE_NOTIF = 0xff,
};
+/**
+ * enum iwl_statistics_subcmd_ids - Statistics group command IDs
+ */
+enum iwl_statistics_subcmd_ids {
+ /**
+ * @STATISTICS_OPER_NOTIF: Notification about operational
+ * statistics &struct iwl_system_statistics_notif_oper
+ */
+ STATISTICS_OPER_NOTIF = 0x0,
+
+ /**
+ * @STATISTICS_OPER_PART1_NOTIF: Notification about operational part1
+ * statistics &struct iwl_system_statistics_part1_notif_oper
+ */
+ STATISTICS_OPER_PART1_NOTIF = 0x1,
+};
+
#endif /* __iwl_fw_api_commands_h__ */
diff --git a/fw/api/config.h b/fw/api/config.h
index 4419631604b4..1fc65469990e 100644
--- a/fw/api/config.h
+++ b/fw/api/config.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2019, 2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2019, 2023-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -76,7 +76,7 @@ struct iwl_phy_specific_cfg {
} __packed; /* PHY_SPECIFIC_CONFIGURATION_API_VER_1*/
/**
- * struct iwl_phy_cfg_cmd - Phy configuration command
+ * struct iwl_phy_cfg_cmd_v1 - Phy configuration command
*
* @phy_cfg: PHY configuration value, uses &enum iwl_fw_phy_cfg
* @calib_control: calibration control data
diff --git a/fw/api/d3.h b/fw/api/d3.h
index 72d461c47323..ffee7927cf26 100644
--- a/fw/api/d3.h
+++ b/fw/api/d3.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -42,7 +42,7 @@ struct iwl_d3_manager_config {
/* TODO: OFFLOADS_QUERY_API_S_VER_1 */
/**
- * enum iwl_d3_proto_offloads - enabled protocol offloads
+ * enum iwl_proto_offloads - enabled protocol offloads
* @IWL_D3_PROTO_OFFLOAD_ARP: ARP data is enabled
* @IWL_D3_PROTO_OFFLOAD_NS: NS (Neighbor Solicitation) is enabled
* @IWL_D3_PROTO_IPV4_VALID: IPv4 data is valid
@@ -195,7 +195,7 @@ struct iwl_wowlan_pattern_v1 {
#define IWL_WOWLAN_MAX_PATTERNS 20
/**
- * struct iwl_wowlan_patterns_cmd - WoWLAN wakeup patterns
+ * struct iwl_wowlan_patterns_cmd_v1 - WoWLAN wakeup patterns
*/
struct iwl_wowlan_patterns_cmd_v1 {
/**
@@ -324,7 +324,7 @@ struct iwl_wowlan_patterns_cmd {
u8 n_patterns;
/**
- * @n_patterns: sta_id
+ * @sta_id: sta_id
*/
u8 sta_id;
@@ -397,6 +397,8 @@ struct iwl_wowlan_config_cmd {
#define WOWLAN_GTK_KEYS_NUM 2
#define WOWLAN_IGTK_KEYS_NUM 2
#define WOWLAN_IGTK_MIN_INDEX 4
+#define WOWLAN_BIGTK_KEYS_NUM 2
+#define WOWLAN_BIGTK_MIN_INDEX 6
/*
* WOWLAN_TSC_RSC_PARAMS
@@ -621,9 +623,10 @@ struct iwl_wowlan_gtk_status_v3 {
* @ipn: the IGTK packet number (replay counter)
* @key_len: IGTK length, if set to 0, the key is not available
* @key_flags: information about the key:
- * bits[0]: key index assigned by the AP (0: index 4, 1: index 5)
- * bits[1:5]: IGTK index of the key in the internal DB
- * bit[6]: Set iff this is the currently used IGTK
+ * bits[0]: key index assigned by the AP (0: index 4, 1: index 5)
+ * (0: index 6, 1: index 7 with bigtk)
+ * bits[1:5]: IGTK index of the key in the internal DB
+ * bit[6]: Set iff this is the currently used IGTK
*/
struct iwl_wowlan_igtk_status {
u8 key[WOWLAN_KEY_MAX_SIZE];
@@ -808,7 +811,7 @@ struct iwl_wowlan_info_notif_v1 {
} __packed; /* WOWLAN_INFO_NTFY_API_S_VER_1 */
/**
- * struct iwl_wowlan_info_notif - WoWLAN information notification
+ * struct iwl_wowlan_info_notif_v2 - WoWLAN information notification
* @gtk: GTK data
* @igtk: IGTK data
* @replay_ctr: GTK rekey replay counter
@@ -824,7 +827,7 @@ struct iwl_wowlan_info_notif_v1 {
* @station_id: station id
* @reserved2: reserved
*/
-struct iwl_wowlan_info_notif {
+struct iwl_wowlan_info_notif_v2 {
struct iwl_wowlan_gtk_status_v3 gtk[WOWLAN_GTK_KEYS_NUM];
struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM];
__le64 replay_ctr;
@@ -840,6 +843,92 @@ struct iwl_wowlan_info_notif {
u8 reserved2[2];
} __packed; /* WOWLAN_INFO_NTFY_API_S_VER_2 */
+/* MAX MLO keys of non-active links that can arrive in the notification */
+#define WOWLAN_MAX_MLO_KEYS 18
+
+/**
+ * enum iwl_wowlan_mlo_gtk_type - GTK types
+ * @WOWLAN_MLO_GTK_KEY_TYPE_GTK: GTK
+ * @WOWLAN_MLO_GTK_KEY_TYPE_IGTK: IGTK
+ * @WOWLAN_MLO_GTK_KEY_TYPE_BIGTK: BIGTK
+ * @WOWLAN_MLO_GTK_KEY_NUM_TYPES: number of key types
+ */
+enum iwl_wowlan_mlo_gtk_type {
+ WOWLAN_MLO_GTK_KEY_TYPE_GTK,
+ WOWLAN_MLO_GTK_KEY_TYPE_IGTK,
+ WOWLAN_MLO_GTK_KEY_TYPE_BIGTK,
+ WOWLAN_MLO_GTK_KEY_NUM_TYPES
+}; /* WOWLAN_MLO_GTK_KEY_TYPE_API_E_VER_1 */
+
+/**
+ * enum iwl_wowlan_mlo_gtk_flag - MLO GTK flags
+ * @WOWLAN_MLO_GTK_FLAG_KEY_LEN_MSK: 0 for len 16, 1 for len 32
+ * @WOWLAN_MLO_GTK_FLAG_KEY_ID_MSK: key id (ranges from 0 to 7)
+ * @WOWLAN_MLO_GTK_FLAG_LINK_ID_MSK: spec link id of the key
+ * @WOWLAN_MLO_GTK_FLAG_KEY_TYPE_MSK: &enum iwl_wowlan_mlo_gtk_type
+ * @WOWLAN_MLO_GTK_FLAG_LAST_KEY_MSK: is this the last given key per
+ * key-type / link-id - the currently used key
+ */
+enum iwl_wowlan_mlo_gtk_flag {
+ WOWLAN_MLO_GTK_FLAG_KEY_LEN_MSK = 0x0001,
+ WOWLAN_MLO_GTK_FLAG_KEY_ID_MSK = 0x000E,
+ WOWLAN_MLO_GTK_FLAG_LINK_ID_MSK = 0x00F0,
+ WOWLAN_MLO_GTK_FLAG_KEY_TYPE_MSK = 0x0300,
+ WOWLAN_MLO_GTK_FLAG_LAST_KEY_MSK = 0x0400
+}; /* WOWLAN_MLO_GTK_FLAG_API_E_VER_1 */
+
+/**
+ * struct iwl_wowlan_mlo_gtk - MLO GTK info
+ * @key: key material
+ * @flags: &enum iwl_wowlan_mlo_gtk_flag
+ * @pn: packet number
+ */
+struct iwl_wowlan_mlo_gtk {
+ u8 key[WOWLAN_KEY_MAX_SIZE];
+ __le16 flags;
+ u8 pn[6];
+} __packed; /* WOWLAN_MLO_GTK_KEY_API_S_VER_1 */
+
+/**
+ * struct iwl_wowlan_info_notif - WoWLAN information notification
+ * @gtk: GTK data
+ * @igtk: IGTK data
+ * @bigtk: BIGTK data
+ * @replay_ctr: GTK rekey replay counter
+ * @pattern_number: number of the matched patterns
+ * @reserved1: reserved
+ * @qos_seq_ctr: QoS sequence counters to use next
+ * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason
+ * @num_of_gtk_rekeys: number of GTK rekeys
+ * @transmitted_ndps: number of transmitted neighbor discovery packets
+ * @received_beacons: number of received beacons
+ * @tid_tear_down: bit mask of tids whose BA sessions were closed
+ * in suspend state
+ * @station_id: station id
+ * @num_mlo_link_keys: number of &struct iwl_wowlan_mlo_gtk structs
+ * following this notif, or reserved in version < 4
+ * @reserved2: reserved
+ * @mlo_gtks: array of GTKs of size num_mlo_link_keys for version >= 4
+ */
+struct iwl_wowlan_info_notif {
+ struct iwl_wowlan_gtk_status_v3 gtk[WOWLAN_GTK_KEYS_NUM];
+ struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM];
+ struct iwl_wowlan_igtk_status bigtk[WOWLAN_BIGTK_KEYS_NUM];
+ __le64 replay_ctr;
+ __le16 pattern_number;
+ __le16 reserved1;
+ __le16 qos_seq_ctr[8];
+ __le32 wakeup_reasons;
+ __le32 num_of_gtk_rekeys;
+ __le32 transmitted_ndps;
+ __le32 received_beacons;
+ u8 tid_tear_down;
+ u8 station_id;
+ u8 num_mlo_link_keys;
+ u8 reserved2;
+ struct iwl_wowlan_mlo_gtk mlo_gtks[];
+} __packed; /* WOWLAN_INFO_NTFY_API_S_VER_3, _VER_4 */
+
/**
* struct iwl_wowlan_wake_pkt_notif - WoWLAN wake packet notification
* @wake_packet_length: wakeup packet length
diff --git a/fw/api/datapath.h b/fw/api/datapath.h
index 751b596ea1a5..2ab38eaeb290 100644
--- a/fw/api/datapath.h
+++ b/fw/api/datapath.h
@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
+ * Copyright (C) 2024 Intel Corporation
* Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
@@ -90,6 +91,12 @@ enum iwl_data_path_subcmd_ids {
SEC_KEY_CMD = 0x18,
/**
+ * @ESR_MODE_NOTIF: notification to recommend/force a wanted esr mode,
+ * uses &struct iwl_mvm_esr_mode_notif
+ */
+ ESR_MODE_NOTIF = 0xF3,
+
+ /**
* @MONITOR_NOTIF: Datapath monitoring notification, using
* &struct iwl_datapath_monitor_notif
*/
@@ -101,7 +108,7 @@ enum iwl_data_path_subcmd_ids {
RX_NO_DATA_NOTIF = 0xF5,
/**
- * @THERMAL_DUAL_CHAIN_DISABLE_REQ: firmware request for SMPS mode,
+ * @THERMAL_DUAL_CHAIN_REQUEST: firmware request for SMPS mode,
* &struct iwl_thermal_dual_chain_request
*/
THERMAL_DUAL_CHAIN_REQUEST = 0xF6,
@@ -224,28 +231,33 @@ struct iwl_synced_time_rsp {
#define PTP_CTX_MAX_DATA_SIZE 128
/**
- * struct iwl_time_msmt_ptp_ctx - Vendor specific information element
+ * struct iwl_time_msmt_ptp_ctx - Vendor specific element
* to allow a space for flexibility for the userspace App
*
- * @element_id: element id of vendor specific ie
- * @length: length of vendor specific ie
- * @reserved: for alignment
- * @data: vendor specific data blob
+ * @ftm: FTM specific vendor element
+ * @ftm.element_id: element id of vendor specific ie
+ * @ftm.length: length of vendor specific ie
+ * @ftm.reserved: for alignment
+ * @ftm.data: vendor specific data blob
+ * @tm: TM specific vendor element
+ * @tm.element_id: element id of vendor specific ie
+ * @tm.length: length of vendor specific ie
+ * @tm.data: vendor specific data blob
*/
struct iwl_time_msmt_ptp_ctx {
- /* Differentiate between FTM and TM specific Vendor IEs */
+ /* Differentiate between FTM and TM specific Vendor elements */
union {
struct {
u8 element_id;
u8 length;
__le16 reserved;
u8 data[PTP_CTX_MAX_DATA_SIZE];
- } ftm; /* FTM specific vendor IE */
+ } ftm;
struct {
u8 element_id;
u8 length;
u8 data[PTP_CTX_MAX_DATA_SIZE];
- } tm; /* TM specific vendor IE */
+ } tm;
};
} __packed /* PTP_CTX_VER_1 */;
@@ -524,6 +536,10 @@ struct iwl_rx_baid_cfg_cmd_remove {
/**
* struct iwl_rx_baid_cfg_cmd - BAID allocation/config command
* @action: the action, from &enum iwl_rx_baid_action
+ * @alloc: allocation data
+ * @modify: modify data
+ * @remove_v1: remove data (version 1)
+ * @remove: remove data
*/
struct iwl_rx_baid_cfg_cmd {
__le32 action;
@@ -558,6 +574,7 @@ enum iwl_scd_queue_cfg_operation {
/**
* struct iwl_scd_queue_cfg_cmd - scheduler queue allocation command
* @operation: the operation, see &enum iwl_scd_queue_cfg_operation
+ * @u: union depending on command usage
* @u.add.sta_mask: station mask
* @u.add.tid: TID
* @u.add.reserved: reserved
@@ -627,6 +644,7 @@ enum iwl_sec_key_flags {
/**
* struct iwl_sec_key_cmd - security key command
* @action: action from &enum iwl_ctxt_action
+ * @u: union depending on command type
* @u.add.sta_mask: station mask for the new key
* @u.add.key_id: key ID (0-7) for the new key
* @u.add.key_flags: key flags per &enum iwl_sec_key_flags
diff --git a/fw/api/dbg-tlv.h b/fw/api/dbg-tlv.h
index ba538d70985f..855cd13a181e 100644
--- a/fw/api/dbg-tlv.h
+++ b/fw/api/dbg-tlv.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2018-2022 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#ifndef __iwl_fw_dbg_tlv_h__
#define __iwl_fw_dbg_tlv_h__
@@ -13,6 +13,7 @@
#define IWL_FW_INI_DOMAIN_ALWAYS_ON 0
#define IWL_FW_INI_REGION_ID_MASK GENMASK(15, 0)
#define IWL_FW_INI_REGION_DUMP_POLICY_MASK GENMASK(31, 16)
+#define IWL_FW_INI_PRESET_DISABLE 0xff
/**
* struct iwl_fw_ini_hcmd
@@ -42,6 +43,30 @@ struct iwl_fw_ini_header {
} __packed; /* FW_TLV_DEBUG_HEADER_S_VER_1 */
/**
+ * struct iwl_fw_ini_addr_size - Base address and size that defines
+ * a chunk of memory
+ *
+ * @addr: the base address (fixed size - 4 bytes)
+ * @size: the size to read
+ */
+struct iwl_fw_ini_addr_size {
+ __le32 addr;
+ __le32 size;
+} __packed; /* FW_TLV_DEBUG_ADDR_SIZE_VER_1 */
+
+/**
+ * struct iwl_fw_ini_region_dev_addr_range - Configuration to read
+ * device address range
+ *
+ * @offset: offset to add to the base address of each chunk
+ * The addrs[] array will be treated as an array of &iwl_fw_ini_addr_size -
+ * an array of (addr, size) pairs.
+ */
+struct iwl_fw_ini_region_dev_addr_range {
+ __le32 offset;
+} __packed; /* FW_TLV_DEBUG_DEVICE_ADDR_RANGE_API_S_VER_1 */
+
+/**
* struct iwl_fw_ini_region_dev_addr - Configuration to read device addresses
*
* @size: size of each memory chunk
@@ -122,28 +147,34 @@ struct iwl_fw_ini_region_internal_buffer {
* Configures parameters for region data collection
*
* @hdr: debug header
- * @id: region id. Max id is &IWL_FW_INI_MAX_REGION_ID
+ * @id: region id. Max id is %IWL_FW_INI_MAX_REGION_ID
* @type: region type. One of &enum iwl_fw_ini_region_type
* @sub_type: region sub type
* @sub_type_ver: region sub type version
* @reserved: not in use
* @name: region name
* @dev_addr: device address configuration. Used by
- * &IWL_FW_INI_REGION_DEVICE_MEMORY, &IWL_FW_INI_REGION_PERIPHERY_MAC,
- * &IWL_FW_INI_REGION_PERIPHERY_PHY, &IWL_FW_INI_REGION_PERIPHERY_AUX,
- * &IWL_FW_INI_REGION_PAGING, &IWL_FW_INI_REGION_CSR,
- * &IWL_FW_INI_REGION_DRAM_IMR and &IWL_FW_INI_REGION_PCI_IOSF_CONFIG
- * &IWL_FW_INI_REGION_DBGI_SRAM, &FW_TLV_DEBUG_REGION_TYPE_DBGI_SRAM,
- * @fifos: fifos configuration. Used by &IWL_FW_INI_REGION_TXF and
- * &IWL_FW_INI_REGION_RXF
+ * %IWL_FW_INI_REGION_DEVICE_MEMORY, %IWL_FW_INI_REGION_PERIPHERY_MAC,
+ * %IWL_FW_INI_REGION_PERIPHERY_PHY, %IWL_FW_INI_REGION_PERIPHERY_AUX,
+ * %IWL_FW_INI_REGION_PAGING, %IWL_FW_INI_REGION_CSR,
+ * %IWL_FW_INI_REGION_DRAM_IMR and %IWL_FW_INI_REGION_PCI_IOSF_CONFIG
+ * %IWL_FW_INI_REGION_DBGI_SRAM, %FW_TLV_DEBUG_REGION_TYPE_DBGI_SRAM,
+ * %IWL_FW_INI_REGION_PERIPHERY_SNPS_DPHYIP,
+ * @dev_addr_range: device address range configuration. Used by
+ * %IWL_FW_INI_REGION_PERIPHERY_MAC_RANGE and
+ * %IWL_FW_INI_REGION_PERIPHERY_PHY_RANGE
+ * @fifos: fifos configuration. Used by %IWL_FW_INI_REGION_TXF and
+ * %IWL_FW_INI_REGION_RXF
* @err_table: error table configuration. Used by
- * IWL_FW_INI_REGION_LMAC_ERROR_TABLE and
- * IWL_FW_INI_REGION_UMAC_ERROR_TABLE
+ * %IWL_FW_INI_REGION_LMAC_ERROR_TABLE and
+ * %IWL_FW_INI_REGION_UMAC_ERROR_TABLE
* @internal_buffer: internal monitor buffer configuration. Used by
- * &IWL_FW_INI_REGION_INTERNAL_BUFFER
+ * %IWL_FW_INI_REGION_INTERNAL_BUFFER
+ * @special_mem: special device memory region, used by
+ * %IWL_FW_INI_REGION_SPECIAL_DEVICE_MEMORY
* @dram_alloc_id: dram allocation id. One of &enum iwl_fw_ini_allocation_id.
- * Used by &IWL_FW_INI_REGION_DRAM_BUFFER
- * @tlv_mask: tlv collection mask. Used by &IWL_FW_INI_REGION_TLV
+ * Used by %IWL_FW_INI_REGION_DRAM_BUFFER
+ * @tlv_mask: tlv collection mask. Used by %IWL_FW_INI_REGION_TLV
* @addrs: array of addresses attached to the end of the region tlv
*/
struct iwl_fw_ini_region_tlv {
@@ -156,6 +187,7 @@ struct iwl_fw_ini_region_tlv {
u8 name[IWL_FW_INI_MAX_NAME];
union {
struct iwl_fw_ini_region_dev_addr dev_addr;
+ struct iwl_fw_ini_region_dev_addr_range dev_addr_range;
struct iwl_fw_ini_region_fifos fifos;
struct iwl_fw_ini_region_err_table err_table;
struct iwl_fw_ini_region_internal_buffer internal_buffer;
@@ -261,7 +293,7 @@ struct iwl_fw_ini_addr_val {
} __packed; /* FW_TLV_DEBUG_ADDR_VALUE_VER_1 */
/**
- * struct iwl_fw_ini_conf_tlv - configuration TLV to set register/memory.
+ * struct iwl_fw_ini_conf_set_tlv - configuration TLV to set register/memory.
*
* @hdr: debug header
* @time_point: time point to apply config. One of &enum iwl_fw_ini_time_point
@@ -289,7 +321,7 @@ struct iwl_fw_ini_conf_set_tlv {
* @IWL_FW_INI_CONFIG_SET_TYPE_CSR: for CSR configuration
* @IWL_FW_INI_CONFIG_SET_TYPE_DBGC_DRAM_ADDR: for DBGC_DRAM_ADDR configuration
* @IWL_FW_INI_CONFIG_SET_TYPE_PERIPH_SCRATCH_HWM: for PERIPH SCRATCH HWM configuration
- * @IWL_FW_INI_ALLOCATION_NUM: max number of configuration supported
+ * @IWL_FW_INI_CONFIG_SET_TYPE_MAX_NUM: max number of configuration supported
*/
enum iwl_fw_ini_config_set_type {
@@ -330,6 +362,7 @@ enum iwl_fw_ini_allocation_id {
* @IWL_FW_INI_LOCATION_SRAM_PATH: SRAM location
* @IWL_FW_INI_LOCATION_DRAM_PATH: DRAM location
* @IWL_FW_INI_LOCATION_NPK_PATH: NPK location
+ * @IWL_FW_INI_LOCATION_NUM: number of valid locations
*/
enum iwl_fw_ini_buffer_location {
IWL_FW_INI_LOCATION_INVALID,
@@ -361,6 +394,9 @@ enum iwl_fw_ini_buffer_location {
* @IWL_FW_INI_REGION_PCI_IOSF_CONFIG: PCI/IOSF config
* @IWL_FW_INI_REGION_SPECIAL_DEVICE_MEMORY: special device memory
* @IWL_FW_INI_REGION_DBGI_SRAM: periphery registers of DBGI SRAM
+ * @IWL_FW_INI_REGION_PERIPHERY_MAC_RANGE: a range of periphery registers of MAC
+ * @IWL_FW_INI_REGION_PERIPHERY_PHY_RANGE: a range of periphery registers of PHY
+ * @IWL_FW_INI_REGION_PERIPHERY_SNPS_DPHYIP: periphery registers of SNPS DPHYIP
* @IWL_FW_INI_REGION_NUM: number of region types
*/
enum iwl_fw_ini_region_type {
@@ -383,6 +419,9 @@ enum iwl_fw_ini_region_type {
IWL_FW_INI_REGION_PCI_IOSF_CONFIG,
IWL_FW_INI_REGION_SPECIAL_DEVICE_MEMORY,
IWL_FW_INI_REGION_DBGI_SRAM,
+ IWL_FW_INI_REGION_PERIPHERY_MAC_RANGE,
+ IWL_FW_INI_REGION_PERIPHERY_PHY_RANGE,
+ IWL_FW_INI_REGION_PERIPHERY_SNPS_DPHYIP,
IWL_FW_INI_REGION_NUM
}; /* FW_TLV_DEBUG_REGION_TYPE_API_E */
@@ -403,6 +442,7 @@ enum iwl_fw_ini_region_device_memory_subtype {
* Hard coded time points in which the driver can send hcmd or perform dump
* collection
*
+ * @IWL_FW_INI_TIME_POINT_INVALID: invalid timepoint
* @IWL_FW_INI_TIME_POINT_EARLY: pre loading the FW
* @IWL_FW_INI_TIME_POINT_AFTER_ALIVE: first cmd from host after alive notif
* @IWL_FW_INI_TIME_POINT_POST_INIT: last cmd in series of init sequence
@@ -432,6 +472,10 @@ enum iwl_fw_ini_region_device_memory_subtype {
* @IWL_FW_INI_TIME_POINT_EAPOL_FAILED: EAPOL failed
* @IWL_FW_INI_TIME_POINT_FAKE_TX: fake Tx
* @IWL_FW_INI_TIME_POINT_DEASSOC: de association
+ * @IWL_FW_INI_TIME_POINT_PRESET_OVERRIDE_EXT_REQ: request to override preset
+ * @IWL_FW_INI_TIME_POINT_PRESET_OVERRIDE_START: start handling override preset
+ * request
+ * @IWL_FW_INI_TIME_SCAN_FAILURE: failed scan channel list
* @IWL_FW_INI_TIME_POINT_NUM: number of time points
*/
enum iwl_fw_ini_time_point {
@@ -462,6 +506,9 @@ enum iwl_fw_ini_time_point {
IWL_FW_INI_TIME_POINT_EAPOL_FAILED,
IWL_FW_INI_TIME_POINT_FAKE_TX,
IWL_FW_INI_TIME_POINT_DEASSOC,
+ IWL_FW_INI_TIME_POINT_PRESET_OVERRIDE_EXT_REQ,
+ IWL_FW_INI_TIME_POINT_PRESET_OVERRIDE_START,
+ IWL_FW_INI_TIME_SCAN_FAILURE,
IWL_FW_INI_TIME_POINT_NUM,
}; /* FW_TLV_DEBUG_TIME_POINT_API_E */
@@ -517,7 +564,7 @@ enum iwl_fw_ini_dump_policy {
* enum iwl_fw_ini_dump_type - Determines dump type based on size defined by FW.
*
* @IWL_FW_INI_DUMP_BRIEF : only dump the most important regions
- * @IWL_FW_INI_DEBUG_MEDIUM: dump more regions than "brief", but not all regions
+ * @IWL_FW_INI_DUMP_MEDIUM: dump more regions than "brief", but not all regions
* @IWL_FW_INI_DUMP_VERBOSE : dump all regions
*/
enum iwl_fw_ini_dump_type {
diff --git a/fw/api/debug.h b/fw/api/debug.h
index 8fef38139bf6..bea0f4668cc8 100644
--- a/fw/api/debug.h
+++ b/fw/api/debug.h
@@ -1,11 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2022 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_debug_h__
#define __iwl_fw_api_debug_h__
+#include "dbg-tlv.h"
/**
* enum iwl_debug_cmds - debug commands
@@ -30,6 +31,11 @@ enum iwl_debug_cmds {
*/
HOST_EVENT_CFG = 0x3,
/**
+ * @INVALID_WR_PTR_CMD: invalid write pointer, set in the TFD
+ * when it's not in use
+ */
+ INVALID_WR_PTR_CMD = 0x6,
+ /**
* @DBGC_SUSPEND_RESUME:
* DBGC suspend/resume commad. Uses a single dword as data:
* 0 - resume DBGC recording
@@ -55,6 +61,12 @@ enum iwl_debug_cmds {
*/
FW_DUMP_COMPLETE_CMD = 0xB,
/**
+ * @FW_CLEAR_BUFFER:
+ * clears the firmware's internal buffer
+ * no payload
+ */
+ FW_CLEAR_BUFFER = 0xD,
+ /**
* @MFU_ASSERT_DUMP_NTF:
* &struct iwl_mfu_assert_dump_notif
*/
@@ -377,13 +389,13 @@ struct iwl_buf_alloc_cmd {
#define DRAM_INFO_SECOND_MAGIC_WORD 0x89ABCDEF
/**
- * struct iwL_dram_info - DRAM fragments allocation struct
+ * struct iwl_dram_info - DRAM fragments allocation struct
*
* Driver will fill in the first 1K(+) of the pointed DRAM fragment
*
* @first_word: magic word value
* @second_word: magic word value
- * @framfrags: DRAM fragmentaion detail
+ * @dram_frags: DRAM fragmentaion detail
*/
struct iwl_dram_info {
__le32 first_word;
@@ -517,4 +529,26 @@ enum iwl_mvm_tas_statically_disabled_reason {
TAS_DISABLED_REASON_MAX,
}; /*_TAS_STATICALLY_DISABLED_REASON_E*/
+/**
+ * enum iwl_fw_dbg_config_cmd_type - types of FW debug config command
+ * @DEBUG_TOKEN_CONFIG_TYPE: token config type
+ */
+enum iwl_fw_dbg_config_cmd_type {
+ DEBUG_TOKEN_CONFIG_TYPE = 0x2B,
+}; /* LDBG_CFG_CMD_TYPE_API_E_VER_1 */
+
+/* this token disables debug asserts in the firmware */
+#define IWL_FW_DBG_CONFIG_TOKEN 0x00010001
+
+/**
+ * struct iwl_fw_dbg_config_cmd - configure FW debug
+ *
+ * @type: according to &enum iwl_fw_dbg_config_cmd_type
+ * @conf: FW configuration
+ */
+struct iwl_fw_dbg_config_cmd {
+ __le32 type;
+ __le32 conf;
+} __packed; /* LDBG_CFG_CMD_API_S_VER_7 */
+
#endif /* __iwl_fw_api_debug_h__ */
diff --git a/fw/api/location.h b/fw/api/location.h
index b044990c7b87..30a54c7fa001 100644
--- a/fw/api/location.h
+++ b/fw/api/location.h
@@ -2,6 +2,7 @@
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
* Copyright (C) 2018-2022 Intel Corporation
+ * Copyright (C) 2024 Intel Corporation
*/
#ifndef __iwl_fw_api_location_h__
#define __iwl_fw_api_location_h__
@@ -390,10 +391,62 @@ struct iwl_tof_responder_config_cmd_v9 {
__le16 max_time_between_msr;
} __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_8 */
+/**
+ * struct iwl_tof_responder_config_cmd - ToF AP mode
+ * @cmd_valid_fields: &iwl_tof_responder_cmd_valid_field
+ * @responder_cfg_flags: &iwl_tof_responder_cfg_flags
+ * @format_bw: bits 0 - 3: &enum iwl_location_frame_format.
+ * bits 4 - 7: &enum iwl_location_bw.
+ * @bss_color: current AP bss_color
+ * @channel_num: current AP Channel
+ * @ctrl_ch_position: coding of the control channel position relative to
+ * the center frequency, see iwl_mvm_get_ctrl_pos()
+ * @sta_id: index of the AP STA when in AP mode
+ * @band: current AP band
+ * @toa_offset: Artificial addition [pSec] for the ToA - to be used for debug
+ * purposes, simulating station movement by adding various values
+ * to this field
+ * @common_calib: XVT: common calibration value
+ * @specific_calib: XVT: specific calibration value
+ * @bssid: Current AP BSSID
+ * @r2i_ndp_params: parameters for R2I NDP.
+ * bits 0 - 2: max number of LTF repetitions
+ * bits 3 - 5: max number of spatial streams (supported values are < 2)
+ * bits 6 - 7: max number of total LTFs see
+ * &enum ieee80211_range_params_max_total_ltf
+ * @i2r_ndp_params: parameters for I2R NDP.
+ * bits 0 - 2: max number of LTF repetitions
+ * bits 3 - 5: max number of spatial streams
+ * bits 6 - 7: max number of total LTFs see
+ * &enum ieee80211_range_params_max_total_ltf
+ * @min_time_between_msr: for non trigger based NDP ranging, minimum time
+ * between measurements in milliseconds.
+ * @max_time_between_msr: for non trigger based NDP ranging, maximum time
+ * between measurements in milliseconds.
+ */
+struct iwl_tof_responder_config_cmd {
+ __le32 cmd_valid_fields;
+ __le32 responder_cfg_flags;
+ u8 format_bw;
+ u8 bss_color;
+ u8 channel_num;
+ u8 ctrl_ch_position;
+ u8 sta_id;
+ u8 band;
+ __le16 toa_offset;
+ __le16 common_calib;
+ __le16 specific_calib;
+ u8 bssid[ETH_ALEN];
+ u8 r2i_ndp_params;
+ u8 i2r_ndp_params;
+ __le16 min_time_between_msr;
+ __le16 max_time_between_msr;
+} __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_10 */
+
#define IWL_LCI_CIVIC_IE_MAX_SIZE 400
/**
- * struct iwl_tof_responder_dyn_config_cmd - Dynamic responder settings
+ * struct iwl_tof_responder_dyn_config_cmd_v2 - Dynamic responder settings
* @lci_len: The length of the 1st (LCI) part in the @lci_civic buffer
* @civic_len: The length of the 2nd (CIVIC) part in the @lci_civic buffer
* @lci_civic: The LCI/CIVIC buffer. LCI data (if exists) comes first, then, if
@@ -561,6 +614,8 @@ struct iwl_tof_range_req_ap_entry_v2 {
* the responder asked for LMR feedback although the initiator did not set
* the LMR feedback bit in the FTM request. If not set, the initiator will
* continue with the session and will provide the LMR feedback.
+ * @IWL_INITIATOR_AP_FLAGS_TEST_INCORRECT_SAC: send an incorrect SAC in the
+ * first NDP exchange. This is used for testing.
*/
enum iwl_initiator_ap_flags {
IWL_INITIATOR_AP_FLAGS_ASAP = BIT(1),
@@ -577,6 +632,7 @@ enum iwl_initiator_ap_flags {
IWL_INITIATOR_AP_FLAGS_USE_CALIB = BIT(13),
IWL_INITIATOR_AP_FLAGS_PMF = BIT(14),
IWL_INITIATOR_AP_FLAGS_TERMINATE_ON_LMR_FEEDBACK = BIT(15),
+ IWL_INITIATOR_AP_FLAGS_TEST_INCORRECT_SAC = BIT(16),
};
/**
@@ -630,6 +686,7 @@ enum iwl_location_frame_format {
* @IWL_LOCATION_BW_20MHZ: 20MHz
* @IWL_LOCATION_BW_40MHZ: 40MHz
* @IWL_LOCATION_BW_80MHZ: 80MHz
+ * @IWL_LOCATION_BW_160MHZ: 160MHz
*/
enum iwl_location_bw {
IWL_LOCATION_BW_20MHZ,
@@ -796,6 +853,7 @@ struct iwl_tof_range_req_ap_entry_v7 {
} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_7 */
#define IWL_LOCATION_MAX_STS_POS 3
+#define IWL_LOCATION_TOTAL_LTF_POS 6
/**
* struct iwl_tof_range_req_ap_entry_v8 - AP configuration parameters
@@ -953,6 +1011,78 @@ struct iwl_tof_range_req_ap_entry_v9 {
} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_9 */
/**
+ * struct iwl_tof_range_req_ap_entry_v10 - AP configuration parameters
+ * @initiator_ap_flags: see &enum iwl_initiator_ap_flags.
+ * @band: 0 for 5.2 GHz, 1 for 2.4 GHz, 2 for 6GHz
+ * @channel_num: AP Channel number
+ * @format_bw: bits 0 - 3: &enum iwl_location_frame_format.
+ * bits 4 - 7: &enum iwl_location_bw.
+ * @ctrl_ch_position: Coding of the control channel position relative to the
+ * center frequency, see iwl_mvm_get_ctrl_pos().
+ * @bssid: AP's BSSID
+ * @burst_period: For EDCA based ranging: Recommended value to be sent to the
+ * AP. Measurement periodicity In units of 100ms. ignored if
+ * num_of_bursts_exp = 0.
+ * For non trigger based NDP ranging, the maximum time between
+ * measurements in units of milliseconds.
+ * @samples_per_burst: the number of FTMs pairs in single Burst (1-31);
+ * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of
+ * the number of measurement iterations (min 2^0 = 1, max 2^14)
+ * @sta_id: the station id of the AP. Only relevant when associated to the AP,
+ * otherwise should be set to &IWL_MVM_INVALID_STA.
+ * @cipher: pairwise cipher suite for secured measurement.
+ * &enum iwl_location_cipher.
+ * @hltk: HLTK to be used for secured 11az measurement
+ * @tk: TK to be used for secured 11az measurement
+ * @calib: An array of calibration values per FTM rx bandwidth.
+ * If &IWL_INITIATOR_AP_FLAGS_USE_CALIB is set, the fw will use the
+ * calibration value that corresponds to the rx bandwidth of the FTM
+ * frame.
+ * @beacon_interval: beacon interval of the AP in TUs. Only required if
+ * &IWL_INITIATOR_AP_FLAGS_TB is set.
+ * @rx_pn: the next expected PN for protected management frames Rx. LE byte
+ * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id
+ * is set to &IWL_MVM_INVALID_STA.
+ * @tx_pn: the next PN to use for protected management frames Tx. LE byte
+ * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id
+ * is set to &IWL_MVM_INVALID_STA.
+ * @r2i_ndp_params: parameters for R2I NDP ranging negotiation.
+ * bits 0 - 2: max LTF repetitions
+ * bits 3 - 5: max number of spatial streams
+ * bits 6 - 7: max total LTFs. One of
+ * &enum ieee80211_range_params_max_total_ltf.
+ * @i2r_ndp_params: parameters for I2R NDP ranging negotiation.
+ * bits 0 - 2: max LTF repetitions
+ * bits 3 - 5: max number of spatial streams (supported values are < 2)
+ * bits 6 - 7: max total LTFs. One of
+ * &enum ieee80211_range_params_max_total_ltf.
+ * @min_time_between_msr: For non trigger based NDP ranging, the minimum time
+ * between measurements in units of milliseconds
+ */
+struct iwl_tof_range_req_ap_entry_v10 {
+ __le32 initiator_ap_flags;
+ u8 band;
+ u8 channel_num;
+ u8 format_bw;
+ u8 ctrl_ch_position;
+ u8 bssid[ETH_ALEN];
+ __le16 burst_period;
+ u8 samples_per_burst;
+ u8 num_of_bursts;
+ u8 sta_id;
+ u8 cipher;
+ u8 hltk[HLTK_11AZ_LEN];
+ u8 tk[TK_11AZ_LEN];
+ __le16 calib[IWL_TOF_BW_NUM];
+ __le16 beacon_interval;
+ u8 rx_pn[IEEE80211_CCMP_PN_LEN];
+ u8 tx_pn[IEEE80211_CCMP_PN_LEN];
+ u8 r2i_ndp_params;
+ u8 i2r_ndp_params;
+ __le16 min_time_between_msr;
+} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_9 */
+
+/**
* enum iwl_tof_response_mode
* @IWL_MVM_TOF_RESPONSE_ASAP: report each AP measurement separately as soon as
* possible (not supported for this release)
@@ -1229,6 +1359,34 @@ struct iwl_tof_range_req_cmd_v13 {
struct iwl_tof_range_req_ap_entry_v9 ap[IWL_MVM_TOF_MAX_APS];
} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_13 */
+/**
+ * struct iwl_tof_range_req_cmd_v14 - start measurement cmd
+ * @initiator_flags: see flags @ iwl_tof_initiator_flags
+ * @request_id: A Token incremented per request. The same Token will be
+ * sent back in the range response
+ * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @range_req_bssid: ranging request BSSID
+ * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
+ * Bits set to 1 shall be randomized by the UMAC
+ * @macaddr_template: MAC address template to use for non-randomized bits
+ * @req_timeout_ms: Requested timeout of the response in units of milliseconds.
+ * This is the session time for completing the measurement.
+ * @tsf_mac_id: report the measurement start time for each ap in terms of the
+ * TSF of this mac id. 0xff to disable TSF reporting.
+ * @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry_v10.
+ */
+struct iwl_tof_range_req_cmd_v14 {
+ __le32 initiator_flags;
+ u8 request_id;
+ u8 num_of_ap;
+ u8 range_req_bssid[ETH_ALEN];
+ u8 macaddr_mask[ETH_ALEN];
+ u8 macaddr_template[ETH_ALEN];
+ __le32 req_timeout_ms;
+ __le32 tsf_mac_id;
+ struct iwl_tof_range_req_ap_entry_v10 ap[IWL_MVM_TOF_MAX_APS];
+} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_13 */
+
/*
* enum iwl_tof_range_request_status - status of the sent request
* @IWL_TOF_RANGE_REQUEST_STATUS_SUCCESSFUL - FW successfully received the
diff --git a/fw/api/mac-cfg.h b/fw/api/mac-cfg.h
index 184db5a6f06f..ca6fa66d1917 100644
--- a/fw/api/mac-cfg.h
+++ b/fw/api/mac-cfg.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2019, 2021-2022 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2019, 2021-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -58,6 +58,14 @@ enum iwl_mac_conf_subcmd_ids {
*/
STA_DISABLE_TX_CMD = 0xD,
/**
+ * @ROC_CMD: &struct iwl_roc_req
+ */
+ ROC_CMD = 0xE,
+ /**
+ * @ROC_NOTIF: &struct iwl_roc_notif
+ */
+ ROC_NOTIF = 0xF8,
+ /**
* @SESSION_PROTECTION_NOTIF: &struct iwl_mvm_session_prot_notif
*/
SESSION_PROTECTION_NOTIF = 0xFB,
@@ -136,7 +144,7 @@ struct iwl_missed_vap_notif {
} __packed; /* MISSED_VAP_NTFY_API_S_VER_1 */
/**
- * struct iwl_channel_switch_start_notif - Channel switch start notification
+ * struct iwl_channel_switch_start_notif_v1 - Channel switch start notification
*
* @id_and_color: ID and color of the MAC
*/
@@ -234,9 +242,9 @@ struct iwl_mac_low_latency_cmd {
* @esr_transition_timeout: the timeout required by the AP for the
* eSR transition.
* Available only from version 2 of the command.
- * This values comes from the EMLSR transition delay in the EML
+ * This value comes from the EMLSR transition delay in the EML
* Capabilities subfield.
- * @medium_sync_delay: the value as it appeasr in P802.11be_D2.2 Figure 9-1002j.
+ * @medium_sync_delay: the value as it appears in P802.11be_D2.2 Figure 9-1002j.
* @assoc_id: unique ID assigned by the AP during association
* @reserved1: alignment
* @data_policy: see &enum iwl_mac_data_policy
@@ -309,7 +317,6 @@ enum iwl_mac_config_filter_flags {
* If the NIC is not ACK_ENABLED it may use the EOF-bit in first non-0
* len delim to determine if AGG or single.
* @client: client mac data
- * @go_ibss: mac data for go or ibss
* @p2p_dev: mac data for p2p device
*/
struct iwl_mac_config_cmd {
@@ -366,7 +373,7 @@ struct iwl_mac_config_cmd {
* iwl_link_ctx_cfg_cmd::bss_color_disable
* @LINK_CONTEXT_MODIFY_EHT_PARAMS: covers iwl_link_ctx_cfg_cmd::puncture_mask.
* This flag can be set only if the MAC that this link relates to has
- * eht_support set to true.
+ * eht_support set to true. No longer used since _VER_3 of this command.
* @LINK_CONTEXT_MODIFY_ALL: set all above flags
*/
enum iwl_link_ctx_modify_flags {
@@ -439,6 +446,7 @@ enum iwl_link_ctx_flags {
* @listen_lmac: indicates whether the link should be allocated on the Listen
* Lmac or on the Main Lmac. Cannot be changed on an active Link.
* Relevant only for eSR.
+ * @reserved1: in version 2, listen_lmac became reserved
* @cck_rates: basic rates available for CCK
* @ofdm_rates: basic rates available for OFDM
* @cck_short_preamble: 1 for enabling short preamble, 0 otherwise
@@ -454,7 +462,7 @@ enum iwl_link_ctx_flags {
* @bi: beacon interval in TU, applicable only when associated
* @dtim_interval: DTIM interval in TU.
* Relevant only for GO, otherwise this is offloaded.
- * @puncture_mask: puncture mask for EHT
+ * @puncture_mask: puncture mask for EHT (removed in VER_3)
* @frame_time_rts_th: HE duration RTS threshold, in units of 32us
* @flags: a combination from &enum iwl_link_ctx_flags
* @flags_mask: what of %flags have changed. Also &enum iwl_link_ctx_flags
@@ -464,10 +472,10 @@ enum iwl_link_ctx_flags {
* @bssid_index: index of the associated VAP
* @bss_color: 11ax AP ID that is used in the HE SIG-A to mark inter BSS frame
* @spec_link_id: link_id as the AP knows it
- * @reserved: alignment
+ * @reserved2: alignment
* @ibss_bssid_addr: bssid for ibss
* @reserved_for_ibss_bssid_addr: reserved
- * @reserved1: reserved for future use
+ * @reserved3: reserved for future use
*/
struct iwl_link_config_cmd {
__le32 action;
@@ -478,7 +486,10 @@ struct iwl_link_config_cmd {
__le16 reserved_for_local_link_addr;
__le32 modify_mask;
__le32 active;
- __le32 listen_lmac;
+ union {
+ __le32 listen_lmac;
+ __le32 reserved1;
+ };
__le32 cck_rates;
__le32 ofdm_rates;
__le32 cck_short_preamble;
@@ -494,7 +505,7 @@ struct iwl_link_config_cmd {
struct iwl_he_backoff_conf trig_based_txf[AC_NUM];
__le32 bi;
__le32 dtim_interval;
- __le16 puncture_mask;
+ __le16 puncture_mask; /* removed in _VER_3 */
__le16 frame_time_rts_th;
__le32 flags;
__le32 flags_mask;
@@ -504,11 +515,11 @@ struct iwl_link_config_cmd {
u8 bssid_index;
u8 bss_color;
u8 spec_link_id;
- u8 reserved;
+ u8 reserved2;
u8 ibss_bssid_addr[6];
__le16 reserved_for_ibss_bssid_addr;
- __le32 reserved1[8];
-} __packed; /* LINK_CONTEXT_CONFIG_CMD_API_S_VER_1 */
+ __le32 reserved3[8];
+} __packed; /* LINK_CONTEXT_CONFIG_CMD_API_S_VER_1, _VER_2, _VER_3 */
/* Currently FW supports link ids in the range 0-3 and can have
* at most two active links for each vif.
@@ -631,4 +642,25 @@ struct iwl_mvm_sta_disable_tx_cmd {
__le32 disable;
} __packed; /* STA_DISABLE_TX_API_S_VER_1 */
+/**
+ * enum iwl_mvm_fw_esr_recommendation - FW recommendation code
+ * @ESR_RECOMMEND_LEAVE: recommendation to leave esr
+ * @ESR_FORCE_LEAVE: force exiting esr
+ * @ESR_RECOMMEND_ENTER: recommendation to enter esr
+ */
+enum iwl_mvm_fw_esr_recommendation {
+ ESR_RECOMMEND_LEAVE,
+ ESR_FORCE_LEAVE,
+ ESR_RECOMMEND_ENTER,
+}; /* ESR_MODE_RECOMMENDATION_CODE_API_E_VER_1 */
+
+/**
+ * struct iwl_mvm_esr_mode_notif - FWs recommendation/force for esr mode
+ *
+ * @action: the action to apply on esr state. See &iwl_mvm_fw_esr_recommendation
+ */
+struct iwl_mvm_esr_mode_notif {
+ __le32 action;
+} __packed; /* ESR_MODE_RECOMMENDATION_NTFY_API_S_VER_1 */
+
#endif /* __iwl_fw_api_mac_cfg_h__ */
diff --git a/fw/api/mac.h b/fw/api/mac.h
index 55882190251c..bcbbf8c4a297 100644
--- a/fw/api/mac.h
+++ b/fw/api/mac.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022, 2024 Intel Corporation
* Copyright (C) 2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_mac_h__
@@ -310,6 +310,13 @@ struct iwl_ac_qos {
* @filter_flags: combination of &enum iwl_mac_filter_flags
* @qos_flags: from &enum iwl_mac_qos_flags
* @ac: one iwl_mac_qos configuration for each AC
+ * @ap: AP specific config data, see &struct iwl_mac_data_ap
+ * @go: GO specific config data, see &struct iwl_mac_data_go
+ * @sta: BSS client specific config data, see &struct iwl_mac_data_sta
+ * @p2p_sta: P2P client specific config data, see &struct iwl_mac_data_p2p_sta
+ * @p2p_dev: P2P-device specific config data, see &struct iwl_mac_data_p2p_dev
+ * @pibss: Pseudo-IBSS specific data, unused; see struct iwl_mac_data_pibss
+ * @ibss: IBSS specific config data, see &struct iwl_mac_data_ibss
*/
struct iwl_mac_ctx_cmd {
/* COMMON_INDEX_HDR_API_S_VER_1 */
@@ -431,8 +438,8 @@ enum iwl_he_pkt_ext_constellations {
};
#define MAX_HE_SUPP_NSS 2
-#define MAX_CHANNEL_BW_INDX_API_D_VER_2 4
-#define MAX_CHANNEL_BW_INDX_API_D_VER_3 5
+#define MAX_CHANNEL_BW_INDX_API_D_VER_1 4
+#define MAX_CHANNEL_BW_INDX_API_D_VER_2 5
/**
* struct iwl_he_pkt_ext_v1 - QAM thresholds
@@ -455,7 +462,7 @@ enum iwl_he_pkt_ext_constellations {
* (0-low_th, 1-high_th)
*/
struct iwl_he_pkt_ext_v1 {
- u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_CHANNEL_BW_INDX_API_D_VER_2][2];
+ u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_CHANNEL_BW_INDX_API_D_VER_1][2];
} __packed; /* PKT_EXT_DOT11AX_API_S_VER_1 */
/**
@@ -480,7 +487,7 @@ struct iwl_he_pkt_ext_v1 {
* (0-low_th, 1-high_th)
*/
struct iwl_he_pkt_ext_v2 {
- u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_CHANNEL_BW_INDX_API_D_VER_3][2];
+ u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_CHANNEL_BW_INDX_API_D_VER_2][2];
} __packed; /* PKT_EXT_DOT11AX_API_S_VER_2 */
/**
diff --git a/fw/api/nvm-reg.h b/fw/api/nvm-reg.h
index 28bfabb399b2..d424d0126367 100644
--- a/fw/api/nvm-reg.h
+++ b/fw/api/nvm-reg.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -21,8 +21,11 @@ enum iwl_regulatory_and_nvm_subcmd_ids {
* &struct iwl_lari_config_change_cmd_v2,
* &struct iwl_lari_config_change_cmd_v3,
* &struct iwl_lari_config_change_cmd_v4,
- * &struct iwl_lari_config_change_cmd_v5 or
- * &struct iwl_lari_config_change_cmd_v6
+ * &struct iwl_lari_config_change_cmd_v5,
+ * &struct iwl_lari_config_change_cmd_v6,
+ * &struct iwl_lari_config_change_cmd_v7,
+ * &struct iwl_lari_config_change_cmd_v10 or
+ * &struct iwl_lari_config_change_cmd
*/
LARI_CONFIG_CHANGE = 0x1,
@@ -44,6 +47,11 @@ enum iwl_regulatory_and_nvm_subcmd_ids {
SAR_OFFSET_MAPPING_TABLE_CMD = 0x4,
/**
+ * @MCC_ALLOWED_AP_TYPE_CMD: &struct iwl_mcc_allowed_ap_type_cmd
+ */
+ MCC_ALLOWED_AP_TYPE_CMD = 0x5,
+
+ /**
* @PNVM_INIT_COMPLETE_NTFY: &struct iwl_pnvm_init_complete_ntfy
*/
PNVM_INIT_COMPLETE_NTFY = 0xFE,
@@ -112,7 +120,7 @@ struct iwl_nvm_access_cmd {
} __packed; /* NVM_ACCESS_CMD_API_S_VER_2 */
/**
- * struct iwl_nvm_access_resp_ver2 - response to NVM_ACCESS_CMD
+ * struct iwl_nvm_access_resp - response to NVM_ACCESS_CMD
* @offset: offset in bytes into the section
* @length: in bytes, either how much was written or read
* @type: NVM_SECTION_TYPE_*
@@ -204,7 +212,7 @@ struct iwl_nvm_get_info_phy {
#define IWL_NUM_CHANNELS 110
/**
- * struct iwl_nvm_get_info_regulatory - regulatory information
+ * struct iwl_nvm_get_info_regulatory_v1 - regulatory information
* @lar_enabled: is LAR enabled
* @channel_profile: regulatory data of this channel
* @reserved: reserved
@@ -263,6 +271,9 @@ struct iwl_nvm_access_complete_cmd {
__le32 reserved;
} __packed; /* NVM_ACCESS_COMPLETE_CMD_API_S_VER_1 */
+#define IWL_MCC_US 0x5553
+#define IWL_MCC_CANADA 0x4341
+
/**
* struct iwl_mcc_update_cmd - Request the device to update geographic
* regulatory profile according to the given MCC (Mobile Country Code).
@@ -429,36 +440,31 @@ enum iwl_mcc_source {
MCC_SOURCE_GETTING_MCC_TEST_MODE = 0x11,
};
-#define IWL_TAS_BLOCK_LIST_MAX 16
+#define IWL_WTAS_BLACK_LIST_MAX 16
/**
- * struct iwl_tas_config_cmd_v2 - configures the TAS
+ * struct iwl_tas_config_cmd_common - configures the TAS.
+ * This is also the v2 structure.
* @block_list_size: size of relevant field in block_list_array
* @block_list_array: list of countries where TAS must be disabled
*/
-struct iwl_tas_config_cmd_v2 {
+struct iwl_tas_config_cmd_common {
__le32 block_list_size;
- __le32 block_list_array[IWL_TAS_BLOCK_LIST_MAX];
+ __le32 block_list_array[IWL_WTAS_BLACK_LIST_MAX];
} __packed; /* TAS_CONFIG_CMD_API_S_VER_2 */
/**
* struct iwl_tas_config_cmd_v3 - configures the TAS
- * @block_list_size: size of relevant field in block_list_array
- * @block_list_array: list of countries where TAS must be disabled
* @override_tas_iec: indicates whether to override default value of IEC regulatory
* @enable_tas_iec: in case override_tas_iec is set -
* indicates whether IEC regulatory is enabled or disabled
*/
struct iwl_tas_config_cmd_v3 {
- __le32 block_list_size;
- __le32 block_list_array[IWL_TAS_BLOCK_LIST_MAX];
__le16 override_tas_iec;
__le16 enable_tas_iec;
} __packed; /* TAS_CONFIG_CMD_API_S_VER_3 */
/**
- * struct iwl_tas_config_cmd_v3 - configures the TAS
- * @block_list_size: size of relevant field in block_list_array
- * @block_list_array: list of countries where TAS must be disabled
+ * struct iwl_tas_config_cmd_v4 - configures the TAS
* @override_tas_iec: indicates whether to override default value of IEC regulatory
* @enable_tas_iec: in case override_tas_iec is set -
* indicates whether IEC regulatory is enabled or disabled
@@ -466,32 +472,35 @@ struct iwl_tas_config_cmd_v3 {
* @reserved: reserved
*/
struct iwl_tas_config_cmd_v4 {
- __le32 block_list_size;
- __le32 block_list_array[IWL_TAS_BLOCK_LIST_MAX];
u8 override_tas_iec;
u8 enable_tas_iec;
u8 usa_tas_uhb_allowed;
u8 reserved;
} __packed; /* TAS_CONFIG_CMD_API_S_VER_4 */
-union iwl_tas_config_cmd {
- struct iwl_tas_config_cmd_v2 v2;
- struct iwl_tas_config_cmd_v3 v3;
- struct iwl_tas_config_cmd_v4 v4;
+struct iwl_tas_config_cmd {
+ struct iwl_tas_config_cmd_common common;
+ union {
+ struct iwl_tas_config_cmd_v3 v3;
+ struct iwl_tas_config_cmd_v4 v4;
+ };
};
+
/**
- * enum iwl_lari_configs - bit masks for the various LARI config operations
+ * enum iwl_lari_config_masks - bit masks for the various LARI config operations
* @LARI_CONFIG_DISABLE_11AC_UKRAINE_MSK: disable 11ac in ukraine
* @LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK: ETSI 5.8GHz SRD passive scan
* @LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK: ETSI 5.8GHz SRD disabled
* @LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK: enable 5.15/5.35GHz bands in
* Indonesia
+ * @LARI_CONFIG_ENABLE_CHINA_22_REG_SUPPORT_MSK: enable 2022 china regulatory
*/
enum iwl_lari_config_masks {
LARI_CONFIG_DISABLE_11AC_UKRAINE_MSK = BIT(0),
LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK = BIT(1),
LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK = BIT(2),
LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK = BIT(3),
+ LARI_CONFIG_ENABLE_CHINA_22_REG_SUPPORT_MSK = BIT(7),
};
#define IWL_11AX_UKRAINE_MASK 3
@@ -601,6 +610,136 @@ struct iwl_lari_config_change_cmd_v6 {
} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_6 */
/**
+ * struct iwl_lari_config_change_cmd_v7 - change LARI configuration
+ * This structure is used also for lari cmd version 8 and 9.
+ * @config_bitmap: Bitmap of the config commands. Each bit will trigger a
+ * different predefined FW config operation.
+ * @oem_uhb_allow_bitmap: Bitmap of UHB enabled MCC sets.
+ * @oem_11ax_allow_bitmap: Bitmap of 11ax allowed MCCs. There are two bits
+ * per country, one to indicate whether to override and the other to
+ * indicate the value to use.
+ * @oem_unii4_allow_bitmap: Bitmap of unii4 allowed MCCs.There are two bits
+ * per country, one to indicate whether to override and the other to
+ * indicate allow/disallow unii4 channels.
+ * For LARI cmd version 4 to 8 - bits 0:3 are supported.
+ * For LARI cmd version 9 - bits 0:5 are supported.
+ * @chan_state_active_bitmap: Bitmap to enable different bands per country
+ * or region.
+ * Each bit represents a country or region, and a band to activate
+ * according to the BIOS definitions.
+ * For LARI cmd version 7 - bits 0:3 are supported.
+ * For LARI cmd version 8 - bits 0:4 are supported.
+ * @force_disable_channels_bitmap: Bitmap of disabled bands/channels.
+ * Each bit represents a set of channels in a specific band that should be
+ * disabled
+ * @edt_bitmap: Bitmap of energy detection threshold table.
+ * Disable/enable the EDT optimization method for different band.
+ */
+struct iwl_lari_config_change_cmd_v7 {
+ __le32 config_bitmap;
+ __le32 oem_uhb_allow_bitmap;
+ __le32 oem_11ax_allow_bitmap;
+ __le32 oem_unii4_allow_bitmap;
+ __le32 chan_state_active_bitmap;
+ __le32 force_disable_channels_bitmap;
+ __le32 edt_bitmap;
+} __packed;
+/* LARI_CHANGE_CONF_CMD_S_VER_7 */
+/* LARI_CHANGE_CONF_CMD_S_VER_8 */
+/* LARI_CHANGE_CONF_CMD_S_VER_9 */
+
+/**
+ * struct iwl_lari_config_change_cmd_v10 - change LARI configuration
+ * @config_bitmap: Bitmap of the config commands. Each bit will trigger a
+ * different predefined FW config operation.
+ * @oem_uhb_allow_bitmap: Bitmap of UHB enabled MCC sets.
+ * @oem_11ax_allow_bitmap: Bitmap of 11ax allowed MCCs. There are two bits
+ * per country, one to indicate whether to override and the other to
+ * indicate the value to use.
+ * @oem_unii4_allow_bitmap: Bitmap of unii4 allowed MCCs.There are two bits
+ * per country, one to indicate whether to override and the other to
+ * indicate allow/disallow unii4 channels.
+ * For LARI cmd version 10 - bits 0:5 are supported.
+ * @chan_state_active_bitmap: Bitmap to enable different bands per country
+ * or region.
+ * Each bit represents a country or region, and a band to activate
+ * according to the BIOS definitions.
+ * For LARI cmd version 10 - bits 0:4 are supported.
+ * @force_disable_channels_bitmap: Bitmap of disabled bands/channels.
+ * Each bit represents a set of channels in a specific band that should be
+ * disabled
+ * @edt_bitmap: Bitmap of energy detection threshold table.
+ * Disable/enable the EDT optimization method for different band.
+ * @oem_320mhz_allow_bitmap: 320Mhz bandwidth enablement bitmap per MCC.
+ * bit0: enable 320Mhz in Japan.
+ * bit1: enable 320Mhz in South Korea.
+ * bit 2 - 31: reserved.
+ */
+struct iwl_lari_config_change_cmd_v10 {
+ __le32 config_bitmap;
+ __le32 oem_uhb_allow_bitmap;
+ __le32 oem_11ax_allow_bitmap;
+ __le32 oem_unii4_allow_bitmap;
+ __le32 chan_state_active_bitmap;
+ __le32 force_disable_channels_bitmap;
+ __le32 edt_bitmap;
+ __le32 oem_320mhz_allow_bitmap;
+} __packed;
+/* LARI_CHANGE_CONF_CMD_S_VER_10 */
+
+/**
+ * struct iwl_lari_config_change_cmd - change LARI configuration
+ * @config_bitmap: Bitmap of the config commands. Each bit will trigger a
+ * different predefined FW config operation.
+ * @oem_uhb_allow_bitmap: Bitmap of UHB enabled MCC sets.
+ * @oem_11ax_allow_bitmap: Bitmap of 11ax allowed MCCs. There are two bits
+ * per country, one to indicate whether to override and the other to
+ * indicate the value to use.
+ * @oem_unii4_allow_bitmap: Bitmap of unii4 allowed MCCs.There are two bits
+ * per country, one to indicate whether to override and the other to
+ * indicate allow/disallow unii4 channels.
+ * For LARI cmd version 11 - bits 0:5 are supported.
+ * @chan_state_active_bitmap: Bitmap to enable different bands per country
+ * or region.
+ * Each bit represents a country or region, and a band to activate
+ * according to the BIOS definitions.
+ * For LARI cmd version 11 - bits 0:4 are supported.
+ * For LARI cmd version 12 - bits 0:6 are supported and bits 7:31 are
+ * reserved. No need to mask out the reserved bits.
+ * @force_disable_channels_bitmap: Bitmap of disabled bands/channels.
+ * Each bit represents a set of channels in a specific band that should be
+ * disabled
+ * @edt_bitmap: Bitmap of energy detection threshold table.
+ * Disable/enable the EDT optimization method for different band.
+ * @oem_320mhz_allow_bitmap: 320Mhz bandwidth enablement bitmap per MCC.
+ * bit0: enable 320Mhz in Japan.
+ * bit1: enable 320Mhz in South Korea.
+ * bit 2 - 31: reserved.
+ * @oem_11be_allow_bitmap: Bitmap of 11be allowed MCCs. No need to mask out the
+ * unsupported bits
+ * bit0: enable 11be in China(CB/CN).
+ * bit1: enable 11be in South Korea.
+ * bit 2 - 31: reserved.
+ */
+struct iwl_lari_config_change_cmd {
+ __le32 config_bitmap;
+ __le32 oem_uhb_allow_bitmap;
+ __le32 oem_11ax_allow_bitmap;
+ __le32 oem_unii4_allow_bitmap;
+ __le32 chan_state_active_bitmap;
+ __le32 force_disable_channels_bitmap;
+ __le32 edt_bitmap;
+ __le32 oem_320mhz_allow_bitmap;
+ __le32 oem_11be_allow_bitmap;
+} __packed;
+/* LARI_CHANGE_CONF_CMD_S_VER_11 */
+/* LARI_CHANGE_CONF_CMD_S_VER_12 */
+
+/* Activate UNII-1 (5.2GHz) for World Wide */
+#define ACTIVATE_5G2_IN_WW_MASK BIT(4)
+#define CHAN_STATE_ACTIVE_BITMAP_CMD_V11 0x1F
+
+/**
* struct iwl_pnvm_init_complete_ntfy - PNVM initialization complete
* @status: PNVM image loading status
*/
@@ -608,4 +747,17 @@ struct iwl_pnvm_init_complete_ntfy {
__le32 status;
} __packed; /* PNVM_INIT_COMPLETE_NTFY_S_VER_1 */
+#define UATS_TABLE_ROW_SIZE 26
+#define UATS_TABLE_COL_SIZE 13
+
+/**
+ * struct iwl_mcc_allowed_ap_type_cmd - struct for MCC_ALLOWED_AP_TYPE_CMD
+ * @offset_map: mapping a mcc to UHB AP type support (UATS) allowed
+ * @reserved: reserved
+ */
+struct iwl_mcc_allowed_ap_type_cmd {
+ u8 offset_map[UATS_TABLE_ROW_SIZE][UATS_TABLE_COL_SIZE];
+ __le16 reserved;
+} __packed; /* MCC_ALLOWED_AP_TYPE_CMD_API_S_VER_1 */
+
#endif /* __iwl_fw_api_nvm_reg_h__ */
diff --git a/fw/api/offload.h b/fw/api/offload.h
index 898bf351f6e4..6a7bbfd6b2b7 100644
--- a/fw/api/offload.h
+++ b/fw/api/offload.h
@@ -3,7 +3,7 @@
* Copyright (C) 2012-2014 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2021-2022 Intel Corporation
+ * Copyright (C) 2021-2024 Intel Corporation
*/
#ifndef __iwl_fw_api_offload_h__
#define __iwl_fw_api_offload_h__
@@ -18,7 +18,9 @@ enum iwl_prot_offload_subcmd_ids {
WOWLAN_WAKE_PKT_NOTIFICATION = 0xFC,
/**
- * @WOWLAN_INFO_NOTIFICATION: Notification in &struct iwl_wowlan_info_notif
+ * @WOWLAN_INFO_NOTIFICATION: Notification in
+ * &struct iwl_wowlan_info_notif_v1, &struct iwl_wowlan_info_notif_v2,
+ * or &struct iwl_wowlan_info_notif
*/
WOWLAN_INFO_NOTIFICATION = 0xFD,
@@ -58,7 +60,7 @@ struct iwl_stored_beacon_notif_common {
} __packed;
/**
- * struct iwl_stored_beacon_notif - Stored beacon notification
+ * struct iwl_stored_beacon_notif_v2 - Stored beacon notification
*
* @common: fields common for all versions
* @data: beacon data, length in @byte_count
diff --git a/fw/api/phy-ctxt.h b/fw/api/phy-ctxt.h
index 8fe42cff1102..4d8a12799c4d 100644
--- a/fw/api/phy-ctxt.h
+++ b/fw/api/phy-ctxt.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018, 2020-2022 Intel Corporation
+ * Copyright (C) 2012-2014, 2018, 2020-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -25,8 +25,8 @@
* For legacy set bit means upper channel, otherwise lower.
* For VHT - bit-2 marks if the control is lower/upper relative to center-freq
* bits-1:0 mark the distance from the center freq. for 20Mhz, offset is 0.
- * center_freq
* For EHT - bit-3 is used for extended distance
+ * center_freq
* |
* 40Mhz |____|____|
* 80Mhz |____|____|____|____|
@@ -113,7 +113,7 @@ struct iwl_phy_context_cmd_tail {
} __packed;
/**
- * struct iwl_phy_context_cmd - config of the PHY context
+ * struct iwl_phy_context_cmd_v1 - config of the PHY context
* ( PHY_CONTEXT_CMD = 0x8 )
* @id_and_color: ID and color of the relevant Binding
* @action: action to perform, see &enum iwl_ctxt_action
@@ -142,6 +142,9 @@ struct iwl_phy_context_cmd_v1 {
* @lmac_id: the lmac id the phy context belongs to
* @ci: channel info
* @rxchain_info: ???
+ * @sbb_bandwidth: 0 disabled, 1 - 40Mhz ... 4 - 320MHz
+ * @sbb_ctrl_channel_loc: location of the control channel
+ * @puncture_mask: bitmap of punctured subchannels
* @dsp_cfg_flags: set to 0
* @reserved: reserved to align to 64 bit
*/
@@ -152,9 +155,20 @@ struct iwl_phy_context_cmd {
/* PHY_CONTEXT_DATA_API_S_VER_3, PHY_CONTEXT_DATA_API_S_VER_4 */
struct iwl_fw_channel_info ci;
__le32 lmac_id;
- __le32 rxchain_info; /* reserved in _VER_4 */
+ union {
+ __le32 rxchain_info; /* reserved in _VER_4 */
+ struct { /* used for _VER_5/_VER_6 */
+ u8 sbb_bandwidth;
+ u8 sbb_ctrl_channel_loc;
+ __le16 puncture_mask; /* added in VER_6 */
+ };
+ };
__le32 dsp_cfg_flags;
__le32 reserved;
-} __packed; /* PHY_CONTEXT_CMD_API_VER_3, PHY_CONTEXT_CMD_API_VER_4 */
+} __packed; /* PHY_CONTEXT_CMD_API_VER_3,
+ * PHY_CONTEXT_CMD_API_VER_4,
+ * PHY_CONTEXT_CMD_API_VER_5,
+ * PHY_CONTEXT_CMD_API_VER_6
+ */
#endif /* __iwl_fw_api_phy_ctxt_h__ */
diff --git a/fw/api/phy.h b/fw/api/phy.h
index 5a3f30e5e06d..c73d4d597857 100644
--- a/fw/api/phy.h
+++ b/fw/api/phy.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2019-2022 Intel Corporation
+ * Copyright (C) 2012-2014, 2019-2022, 2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -43,6 +43,11 @@ enum iwl_phy_ops_subcmd_ids {
PER_PLATFORM_ANT_GAIN_CMD = 0x07,
/**
+ * @AP_TX_POWER_CONSTRAINTS_CMD: &struct iwl_txpower_constraints_cmd
+ */
+ AP_TX_POWER_CONSTRAINTS_CMD = 0x0C,
+
+ /**
* @CT_KILL_NOTIFICATION: &struct ct_kill_notif
*/
CT_KILL_NOTIFICATION = 0xFE,
@@ -190,7 +195,7 @@ struct ct_kill_notif {
} __packed; /* CT_KILL_NOTIFICATION_API_S_VER_1, CT_KILL_NOTIFICATION_API_S_VER_2 */
/**
-* enum ctdp_cmd_operation - CTDP command operations
+* enum iwl_mvm_ctdp_cmd_operation - CTDP command operations
* @CTDP_CMD_OPERATION_START: update the current budget
* @CTDP_CMD_OPERATION_STOP: stop ctdp
* @CTDP_CMD_OPERATION_REPORT: get the average budget
diff --git a/fw/api/power.h b/fw/api/power.h
index 85d89f559f6c..6e6a92d173cc 100644
--- a/fw/api/power.h
+++ b/fw/api/power.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -144,6 +144,8 @@ struct iwl_powertable_cmd {
* receiver and transmitter. '0' - does not allow.
* @DEVICE_POWER_FLAGS_ALLOW_MEM_RETENTION_MSK:
* Device Retention indication, '1' indicate retention is enabled.
+ * @DEVICE_POWER_FLAGS_NO_SLEEP_TILL_D3_MSK:
+ * Prevent power save until entering d3 is completed.
* @DEVICE_POWER_FLAGS_32K_CLK_VALID_MSK:
* 32Khz external slow clock valid indication, '1' indicate cloack is
* valid.
@@ -151,6 +153,7 @@ struct iwl_powertable_cmd {
enum iwl_device_power_flags {
DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0),
DEVICE_POWER_FLAGS_ALLOW_MEM_RETENTION_MSK = BIT(1),
+ DEVICE_POWER_FLAGS_NO_SLEEP_TILL_D3_MSK = BIT(7),
DEVICE_POWER_FLAGS_32K_CLK_VALID_MSK = BIT(12),
};
@@ -162,7 +165,7 @@ enum iwl_device_power_flags {
* @reserved: reserved (padding)
*/
struct iwl_device_power_cmd {
- /* PM_POWER_TABLE_CMD_API_S_VER_6 */
+ /* PM_POWER_TABLE_CMD_API_S_VER_7 */
__le16 flags;
__le16 reserved;
} __packed;
@@ -382,6 +385,33 @@ struct iwl_dev_tx_power_cmd_v7 {
__le32 timer_period;
__le32 flags;
} __packed; /* TX_REDUCED_POWER_API_S_VER_7 */
+
+/**
+ * struct iwl_dev_tx_power_cmd_v8 - TX power reduction command version 8
+ * @per_chain: per chain restrictions
+ * @enable_ack_reduction: enable or disable close range ack TX power
+ * reduction.
+ * @per_chain_restriction_changed: is per_chain_restriction has changed
+ * from last command. used if set_mode is
+ * IWL_TX_POWER_MODE_SET_SAR_TIMER.
+ * note: if not changed, the command is used for keep alive only.
+ * @reserved: reserved (padding)
+ * @timer_period: timer in milliseconds. if expires FW will change to default
+ * BIOS values. relevant if setMode is IWL_TX_POWER_MODE_SET_SAR_TIMER
+ * @flags: reduce power flags.
+ * @tpc_vlp_backoff_level: user backoff of UNII5,7 VLP channels in USA.
+ * Not in use.
+ */
+struct iwl_dev_tx_power_cmd_v8 {
+ __le16 per_chain[IWL_NUM_CHAIN_TABLES_V2][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2];
+ u8 enable_ack_reduction;
+ u8 per_chain_restriction_changed;
+ u8 reserved[2];
+ __le32 timer_period;
+ __le32 flags;
+ __le32 tpc_vlp_backoff_level;
+} __packed; /* TX_REDUCED_POWER_API_S_VER_8 */
+
/**
* struct iwl_dev_tx_power_cmd - TX power reduction command (multiversion)
* @common: common part of the command
@@ -389,6 +419,8 @@ struct iwl_dev_tx_power_cmd_v7 {
* @v4: version 4 part of the command
* @v5: version 5 part of the command
* @v6: version 6 part of the command
+ * @v7: version 7 part of the command
+ * @v8: version 8 part of the command
*/
struct iwl_dev_tx_power_cmd {
struct iwl_dev_tx_power_common common;
@@ -398,6 +430,7 @@ struct iwl_dev_tx_power_cmd {
struct iwl_dev_tx_power_cmd_v5 v5;
struct iwl_dev_tx_power_cmd_v6 v6;
struct iwl_dev_tx_power_cmd_v7 v7;
+ struct iwl_dev_tx_power_cmd_v8 v8;
};
};
@@ -429,7 +462,7 @@ struct iwl_per_chain_offset {
} __packed; /* PER_CHAIN_LIMIT_OFFSET_PER_CHAIN_S_VER_1 */
/**
- * struct iwl_geo_tx_power_profile_cmd_v1 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
+ * struct iwl_geo_tx_power_profiles_cmd_v1 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
* @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
* @table: offset profile per band.
*/
@@ -439,7 +472,7 @@ struct iwl_geo_tx_power_profiles_cmd_v1 {
} __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_1 */
/**
- * struct iwl_geo_tx_power_profile_cmd_v2 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
+ * struct iwl_geo_tx_power_profiles_cmd_v2 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
* @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
* @table: offset profile per band.
* @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading)
@@ -451,7 +484,7 @@ struct iwl_geo_tx_power_profiles_cmd_v2 {
} __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_2 */
/**
- * struct iwl_geo_tx_power_profile_cmd_v3 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
+ * struct iwl_geo_tx_power_profiles_cmd_v3 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
* @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
* @table: offset profile per band.
* @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading)
@@ -463,7 +496,7 @@ struct iwl_geo_tx_power_profiles_cmd_v3 {
} __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_3 */
/**
- * struct iwl_geo_tx_power_profile_cmd_v4 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
+ * struct iwl_geo_tx_power_profiles_cmd_v4 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
* @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
* @table: offset profile per band.
* @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading)
@@ -475,7 +508,7 @@ struct iwl_geo_tx_power_profiles_cmd_v4 {
} __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_4 */
/**
- * struct iwl_geo_tx_power_profile_cmd_v5 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
+ * struct iwl_geo_tx_power_profiles_cmd_v5 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
* @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
* @table: offset profile per band.
* @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading)
@@ -503,15 +536,45 @@ struct iwl_geo_tx_power_profiles_resp {
} __packed; /* PER_CHAIN_LIMIT_OFFSET_RSP */
/**
+ * enum iwl_ppag_flags - PPAG enable masks
+ * @IWL_PPAG_ETSI_MASK: enable PPAG in ETSI
+ * @IWL_PPAG_CHINA_MASK: enable PPAG in China
+ * @IWL_PPAG_ETSI_LPI_UHB_MASK: enable LPI in ETSI for UHB
+ * @IWL_PPAG_ETSI_VLP_UHB_MASK: enable VLP in ETSI for UHB
+ * @IWL_PPAG_ETSI_SP_UHB_MASK: enable SP in ETSI for UHB
+ * @IWL_PPAG_USA_LPI_UHB_MASK: enable LPI in USA for UHB
+ * @IWL_PPAG_USA_VLP_UHB_MASK: enable VLP in USA for UHB
+ * @IWL_PPAG_USA_SP_UHB_MASK: enable SP in USA for UHB
+ * @IWL_PPAG_CANADA_LPI_UHB_MASK: enable LPI in CANADA for UHB
+ * @IWL_PPAG_CANADA_VLP_UHB_MASK: enable VLP in CANADA for UHB
+ * @IWL_PPAG_CANADA_SP_UHB_MASK: enable SP in CANADA for UHB
+ */
+enum iwl_ppag_flags {
+ IWL_PPAG_ETSI_MASK = BIT(0),
+ IWL_PPAG_CHINA_MASK = BIT(1),
+ IWL_PPAG_ETSI_LPI_UHB_MASK = BIT(2),
+ IWL_PPAG_ETSI_VLP_UHB_MASK = BIT(3),
+ IWL_PPAG_ETSI_SP_UHB_MASK = BIT(4),
+ IWL_PPAG_USA_LPI_UHB_MASK = BIT(5),
+ IWL_PPAG_USA_VLP_UHB_MASK = BIT(6),
+ IWL_PPAG_USA_SP_UHB_MASK = BIT(7),
+ IWL_PPAG_CANADA_LPI_UHB_MASK = BIT(8),
+ IWL_PPAG_CANADA_VLP_UHB_MASK = BIT(9),
+ IWL_PPAG_CANADA_SP_UHB_MASK = BIT(10),
+};
+
+/**
* union iwl_ppag_table_cmd - union for all versions of PPAG command
* @v1: version 1
* @v2: version 2
- *
- * @flags: bit 0 - indicates enablement of PPAG for ETSI
- * bit 1 - indicates enablement of PPAG for CHINA BIOS
- * bit 1 can be used only in v3 (identical to v2)
- * @gain: table of antenna gain values per chain and sub-band
- * @reserved: reserved
+ * version 3, 4, 5 and 6 are the same structure as v2,
+ * but has a different format of the flags bitmap
+ * @v1.flags: values from &enum iwl_ppag_flags
+ * @v1.gain: table of antenna gain values per chain and sub-band
+ * @v1.reserved: reserved
+ * @v2.flags: values from &enum iwl_ppag_flags
+ * @v2.gain: table of antenna gain values per chain and sub-band
+ * @v2.reserved: reserved
*/
union iwl_ppag_table_cmd {
struct {
@@ -526,6 +589,11 @@ union iwl_ppag_table_cmd {
} v2;
} __packed;
+#define IWL_PPAG_CMD_V4_MASK (IWL_PPAG_ETSI_MASK | IWL_PPAG_CHINA_MASK)
+#define IWL_PPAG_CMD_V5_MASK (IWL_PPAG_CMD_V4_MASK | \
+ IWL_PPAG_ETSI_LPI_UHB_MASK | \
+ IWL_PPAG_USA_LPI_UHB_MASK)
+
#define MCC_TO_SAR_OFFSET_TABLE_ROW_SIZE 26
#define MCC_TO_SAR_OFFSET_TABLE_COL_SIZE 13
@@ -667,4 +735,44 @@ struct iwl_beacon_filter_cmd {
#define IWL_BF_CMD_CONFIG_DEFAULTS IWL_BF_CMD_CONFIG(_DEFAULT)
#define IWL_BF_CMD_CONFIG_D0I3 IWL_BF_CMD_CONFIG(_D0I3)
+
+#define DEFAULT_TPE_TX_POWER 0x7F
+
+/*
+ * Bandwidth: 20/40/80/(160/80+80)/320
+ */
+#define IWL_MAX_TX_EIRP_PWR_MAX_SIZE 5
+#define IWL_MAX_TX_EIRP_PSD_PWR_MAX_SIZE 16
+
+enum iwl_6ghz_ap_type {
+ IWL_6GHZ_AP_TYPE_LPI,
+ IWL_6GHZ_AP_TYPE_SP,
+ IWL_6GHZ_AP_TYPE_VLP,
+}; /* PHY_AP_TYPE_API_E_VER_1 */
+
+/**
+ * struct iwl_txpower_constraints_cmd
+ * AP_TX_POWER_CONSTRAINTS_CMD
+ * Used for VLP/LPI/AFC Access Point power constraints for 6GHz channels
+ * @link_id: linkId
+ * @ap_type: see &enum iwl_ap_type
+ * @eirp_pwr: 8-bit 2s complement signed integer in the range
+ * -64 dBm to 63 dBm with a 0.5 dB step
+ * default &DEFAULT_TPE_TX_POWER (no maximum limit)
+ * @psd_pwr: 8-bit 2s complement signed integer in the range
+ * -63.5 to +63 dBm/MHz with a 0.5 step
+ * value - 128 indicates that the corresponding 20
+ * MHz channel cannot be used for transmission.
+ * value +127 indicates that no maximum PSD limit
+ * is specified for the corresponding 20 MHz channel
+ * default &DEFAULT_TPE_TX_POWER (no maximum limit)
+ * @reserved: reserved (padding)
+ */
+struct iwl_txpower_constraints_cmd {
+ __le16 link_id;
+ __le16 ap_type;
+ __s8 eirp_pwr[IWL_MAX_TX_EIRP_PWR_MAX_SIZE];
+ __s8 psd_pwr[IWL_MAX_TX_EIRP_PSD_PWR_MAX_SIZE];
+ u8 reserved[3];
+} __packed; /* PHY_AP_TX_POWER_CONSTRAINTS_CMD_API_S_VER_1 */
#endif /* __iwl_fw_api_power_h__ */
diff --git a/fw/api/rfi.h b/fw/api/rfi.h
index 1a84a4081e7c..34d664023473 100644
--- a/fw/api/rfi.h
+++ b/fw/api/rfi.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2020-2021 Intel Corporation
+ * Copyright (C) 2020-2021, 2023 Intel Corporation
*/
#ifndef __iwl_fw_api_rfi_h__
#define __iwl_fw_api_rfi_h__
@@ -25,8 +25,9 @@ struct iwl_rfi_lut_entry {
/**
* struct iwl_rfi_config_cmd - RFI configuration table
*
- * @entry: a table can have 24 frequency/channel mappings
+ * @table: a table can have 24 frequency/channel mappings
* @oem: specifies if this is the default table or set by OEM
+ * @reserved: (reserved/padding)
*/
struct iwl_rfi_config_cmd {
struct iwl_rfi_lut_entry table[IWL_RFI_LUT_SIZE];
@@ -35,7 +36,7 @@ struct iwl_rfi_config_cmd {
} __packed; /* RFI_CONFIG_CMD_API_S_VER_1 */
/**
- * iwl_rfi_freq_table_status - status of the frequency table query
+ * enum iwl_rfi_freq_table_status - status of the frequency table query
* @RFI_FREQ_TABLE_OK: can be used
* @RFI_FREQ_TABLE_DVFS_NOT_READY: DVFS is not ready yet, should try later
* @RFI_FREQ_TABLE_DISABLED: the feature is disabled in FW
diff --git a/fw/api/rs.h b/fw/api/rs.h
index a1a272433b09..1a60f0cdf972 100644
--- a/fw/api/rs.h
+++ b/fw/api/rs.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022, 2024 Intel Corporation
* Copyright (C) 2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_rs_h__
@@ -9,7 +9,7 @@
#include "mac.h"
/**
- * enum iwl_tlc_mng_cfg_flags_enum - options for TLC config flags
+ * enum iwl_tlc_mng_cfg_flags - options for TLC config flags
* @IWL_TLC_MNG_CFG_FLAGS_STBC_MSK: enable STBC. For HE this enables STBC for
* bandwidths <= 80MHz
* @IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK: enable LDPC
diff --git a/fw/api/rx.h b/fw/api/rx.h
index 25e2e23dce3d..691c879cb90d 100644
--- a/fw/api/rx.h
+++ b/fw/api/rx.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -371,7 +371,7 @@ enum iwl_rx_phy_eht_data1 {
IWL_RX_PHY_DATA1_EHT_RU_ALLOC_B1_B7 = 0x0000fe00,
};
-/* goes into Metadata DW 7 */
+/* goes into Metadata DW 7 (Qu) or 8 (So or higher) */
enum iwl_rx_phy_he_data2 {
/* info type: HE MU-EXT */
/* the a1/a2/... is what the PHY/firmware calls the values */
@@ -387,7 +387,7 @@ enum iwl_rx_phy_he_data2 {
IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE4 = 0x0000f000,
};
-/* goes into Metadata DW 8 */
+/* goes into Metadata DW 8 (Qu) or 7 (So or higher) */
enum iwl_rx_phy_he_data3 {
/* info type: HE MU-EXT */
IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU1 = 0x000000ff, /* c1 */
@@ -408,10 +408,9 @@ enum iwl_rx_phy_he_he_data4 {
IWL_RX_PHY_DATA4_HE_MU_EXT_PREAMBLE_PUNC_TYPE_MASK = 0x0600,
};
-/* goes into Metadata DW 7 */
+/* goes into Metadata DW 8 (Qu has no EHT) */
enum iwl_rx_phy_eht_data2 {
/* info type: EHT-MU-EXT */
- /* OFDM_RX_VECTOR_COMMON_RU_ALLOC_0_OUT */
IWL_RX_PHY_DATA2_EHT_MU_EXT_RU_ALLOC_A1 = 0x000001ff,
IWL_RX_PHY_DATA2_EHT_MU_EXT_RU_ALLOC_A2 = 0x0003fe00,
IWL_RX_PHY_DATA2_EHT_MU_EXT_RU_ALLOC_B1 = 0x07fc0000,
@@ -420,11 +419,10 @@ enum iwl_rx_phy_eht_data2 {
IWL_RX_PHY_DATA2_EHT_TB_EXT_TRIG_SIGA1 = 0xffffffff,
};
-/* goes into Metadata DW 8 */
+/* goes into Metadata DW 7 (Qu has no EHT) */
enum iwl_rx_phy_eht_data3 {
+ /* note: low 8 bits cannot be used */
/* info type: EHT-MU-EXT */
- /* OFDM_RX_VECTOR_COMMON_RU_ALLOC_1_OUT */
- IWL_RX_PHY_DATA3_EHT_MU_EXT_RU_ALLOC_B2 = 0x000001ff,
IWL_RX_PHY_DATA3_EHT_MU_EXT_RU_ALLOC_C1 = 0x0003fe00,
IWL_RX_PHY_DATA3_EHT_MU_EXT_RU_ALLOC_C2 = 0x07fc0000,
};
@@ -432,10 +430,10 @@ enum iwl_rx_phy_eht_data3 {
/* goes into Metadata DW 4 */
enum iwl_rx_phy_eht_data4 {
/* info type: EHT-MU-EXT */
- /* OFDM_RX_VECTOR_COMMON_RU_ALLOC_2_OUT */
IWL_RX_PHY_DATA4_EHT_MU_EXT_RU_ALLOC_D1 = 0x000001ff,
IWL_RX_PHY_DATA4_EHT_MU_EXT_RU_ALLOC_D2 = 0x0003fe00,
IWL_RX_PHY_DATA4_EHT_MU_EXT_SIGB_MCS = 0x000c0000,
+ IWL_RX_PHY_DATA4_EHT_MU_EXT_RU_ALLOC_B2 = 0x1ff00000,
};
/* goes into Metadata DW 16 */
@@ -712,7 +710,15 @@ struct iwl_rx_mpdu_desc {
__le32 reorder_data;
union {
+ /**
+ * @v1: version 1 of the remaining RX descriptor,
+ * see &struct iwl_rx_mpdu_desc_v1
+ */
struct iwl_rx_mpdu_desc_v1 v1;
+ /**
+ * @v3: version 3 of the remaining RX descriptor,
+ * see &struct iwl_rx_mpdu_desc_v3
+ */
struct iwl_rx_mpdu_desc_v3 v3;
};
} __packed; /* RX_MPDU_RES_START_API_S_VER_3,
@@ -978,7 +984,7 @@ struct iwl_ba_window_status_notif {
} __packed; /* BA_WINDOW_STATUS_NTFY_API_S_VER_1 */
/**
- * struct iwl_rfh_queue_config - RX queue configuration
+ * struct iwl_rfh_queue_data - RX queue configuration
* @q_num: Q num
* @enable: enable queue
* @reserved: alignment
diff --git a/fw/api/scan.h b/fw/api/scan.h
index 93078f8cc08c..8598031567bb 100644
--- a/fw/api/scan.h
+++ b/fw/api/scan.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -14,6 +14,10 @@
*/
enum iwl_scan_subcmd_ids {
/**
+ * @CHANNEL_SURVEY_NOTIF: &struct iwl_umac_scan_channel_survey_notif
+ */
+ CHANNEL_SURVEY_NOTIF = 0xFB,
+ /**
* @OFFLOAD_MATCH_INFO_NOTIF: &struct iwl_scan_offload_match_info
*/
OFFLOAD_MATCH_INFO_NOTIF = 0xFC,
@@ -62,6 +66,8 @@ struct iwl_ssid_ie {
#define IWL_FAST_SCHED_SCAN_ITERATIONS 3
#define IWL_MAX_SCHED_SCAN_PLANS 2
+#define IWL_MAX_NUM_NOISE_RESULTS 22
+
enum scan_framework_client {
SCAN_CLIENT_SCHED_SCAN = BIT(0),
SCAN_CLIENT_NETDETECT = BIT(1),
@@ -143,7 +149,7 @@ struct iwl_scan_offload_profile_cfg_data {
} __packed;
/**
- * struct iwl_scan_offload_profile_cfg
+ * struct iwl_scan_offload_profile_cfg_v1 - scan offload profile config
* @profiles: profiles to search for match
* @data: the rest of the data for profile_cfg
*/
@@ -417,7 +423,7 @@ struct iwl_lmac_scan_complete_notif {
} __packed;
/**
- * struct iwl_scan_offload_complete - PERIODIC_SCAN_COMPLETE_NTF_API_S_VER_2
+ * struct iwl_periodic_scan_complete - PERIODIC_SCAN_COMPLETE_NTF_API_S_VER_2
* @last_schedule_line: last schedule line executed (fast or regular)
* @last_schedule_iteration: last scan iteration executed before scan abort
* @status: &enum iwl_scan_offload_complete_status
@@ -437,10 +443,10 @@ struct iwl_periodic_scan_complete {
/* UMAC Scan API */
/* The maximum of either of these cannot exceed 8, because we use an
- * 8-bit mask (see IWL_MVM_SCAN_MASK in mvm.h).
+ * 8-bit mask (see enum iwl_scan_status).
*/
-#define IWL_MVM_MAX_UMAC_SCANS 4
-#define IWL_MVM_MAX_LMAC_SCANS 1
+#define IWL_MAX_UMAC_SCANS 4
+#define IWL_MAX_LMAC_SCANS 1
enum scan_config_flags {
SCAN_CONFIG_FLAG_ACTIVATE = BIT(0),
@@ -642,10 +648,13 @@ enum iwl_umac_scan_general_flags {
* notification per channel or not.
* @IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER: Whether to allow channel
* reorder optimization or not.
+ * @IWL_UMAC_SCAN_GEN_FLAGS2_COLLECT_CHANNEL_STATS: Enable channel statistics
+ * collection when #IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE is set.
*/
enum iwl_umac_scan_general_flags2 {
IWL_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL = BIT(0),
IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER = BIT(1),
+ IWL_UMAC_SCAN_GEN_FLAGS2_COLLECT_CHANNEL_STATS = BIT(3),
};
/**
@@ -780,7 +789,7 @@ struct iwl_scan_req_umac_tail_v1 {
} __packed;
/**
- * struct iwl_scan_req_umac_tail - the rest of the UMAC scan request command
+ * struct iwl_scan_req_umac_tail_v2 - the rest of the UMAC scan request command
* parameters following channels configuration array.
* @schedule: two scheduling plans.
* @delay: delay in TUs before starting the first scan iteration
@@ -1076,7 +1085,7 @@ struct iwl_scan_req_params_v12 {
} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_12 */
/**
- * struct iwl_scan_req_params_v16
+ * struct iwl_scan_req_params_v17 - scan request parameters (v17)
* @general_params: &struct iwl_scan_general_params_v11
* @channel_params: &struct iwl_scan_channel_params_v7
* @periodic_params: &struct iwl_scan_periodic_parms_v1
@@ -1102,7 +1111,7 @@ struct iwl_scan_req_umac_v12 {
} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_12 */
/**
- * struct iwl_scan_req_umac_v16
+ * struct iwl_scan_req_umac_v17 - scan request command (v17)
* @uid: scan id, &enum iwl_umac_scan_uid_offsets
* @ooc_priority: out of channel priority - &enum iwl_scan_priority
* @scan_params: scan parameters
@@ -1258,4 +1267,26 @@ struct iwl_umac_scan_iter_complete_notif {
struct iwl_scan_results_notif results[];
} __packed; /* SCAN_ITER_COMPLETE_NTF_UMAC_API_S_VER_2 */
+/**
+ * struct iwl_umac_scan_channel_survey_notif - data for survey
+ * @channel: the channel scanned
+ * @band: band of channel
+ * @noise: noise floor measurements in negative dBm, invalid 0xff
+ * @reserved: for future use and alignment
+ * @active_time: time in ms the radio was turned on (on the channel)
+ * @busy_time: time in ms the channel was sensed busy, 0 for a clean channel
+ * @tx_time: time the radio spent transmitting data
+ * @rx_time: time the radio spent receiving data
+ */
+struct iwl_umac_scan_channel_survey_notif {
+ __le32 channel;
+ __le32 band;
+ u8 noise[IWL_MAX_NUM_NOISE_RESULTS];
+ u8 reserved[2];
+ __le32 active_time;
+ __le32 busy_time;
+ __le32 tx_time;
+ __le32 rx_time;
+} __packed; /* SCAN_CHANNEL_SURVEY_NTF_API_S_VER_1 */
+
#endif /* __iwl_fw_api_scan_h__ */
diff --git a/fw/api/sta.h b/fw/api/sta.h
index d62fed543276..d7f8a276b683 100644
--- a/fw/api/sta.h
+++ b/fw/api/sta.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021, 2023 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -109,6 +109,7 @@ enum iwl_sta_flags {
* @STA_KEY_FLG_EN_MSK: mask for encryption algorithmi value
* @STA_KEY_FLG_WEP_KEY_MAP: wep is either a group key (0 - legacy WEP) or from
* station info array (1 - n 1X mode)
+ * @STA_KEY_FLG_AMSDU_SPP: SPP (signaling and payload protected) A-MSDU
* @STA_KEY_FLG_KEYID_MSK: the index of the key
* @STA_KEY_FLG_KEYID_POS: key index bit position
* @STA_KEY_NOT_VALID: key is invalid
@@ -129,6 +130,7 @@ enum iwl_sta_key_flag {
STA_KEY_FLG_EN_MSK = (7 << 0),
STA_KEY_FLG_WEP_KEY_MAP = BIT(3),
+ STA_KEY_FLG_AMSDU_SPP = BIT(7),
STA_KEY_FLG_KEYID_POS = 8,
STA_KEY_FLG_KEYID_MSK = (3 << STA_KEY_FLG_KEYID_POS),
STA_KEY_NOT_VALID = BIT(11),
diff --git a/fw/api/stats.h b/fw/api/stats.h
index 898e62326e6c..2271b19213fa 100644
--- a/fw/api/stats.h
+++ b/fw/api/stats.h
@@ -1,12 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018, 2020 - 2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018, 2020 - 2021, 2023 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_stats_h__
#define __iwl_fw_api_stats_h__
#include "mac.h"
+#include "mac-cfg.h"
struct mvm_statistics_dbg {
__le32 burst_check;
@@ -412,6 +413,49 @@ struct iwl_statistics_cmd {
#define MAX_BCAST_FILTER_NUM 8
/**
+ * enum iwl_statistics_notify_type_id - type_id used in system statistics
+ * command
+ * @IWL_STATS_NTFY_TYPE_ID_OPER: request legacy statistics
+ * @IWL_STATS_NTFY_TYPE_ID_OPER_PART1: request operational part1 statistics
+ * @IWL_STATS_NTFY_TYPE_ID_OPER_PART2: request operational part2 statistics
+ * @IWL_STATS_NTFY_TYPE_ID_OPER_PART3: request operational part3 statistics
+ * @IWL_STATS_NTFY_TYPE_ID_OPER_PART4: request operational part4 statistics
+ */
+enum iwl_statistics_notify_type_id {
+ IWL_STATS_NTFY_TYPE_ID_OPER = BIT(0),
+ IWL_STATS_NTFY_TYPE_ID_OPER_PART1 = BIT(1),
+ IWL_STATS_NTFY_TYPE_ID_OPER_PART2 = BIT(2),
+ IWL_STATS_NTFY_TYPE_ID_OPER_PART3 = BIT(3),
+ IWL_STATS_NTFY_TYPE_ID_OPER_PART4 = BIT(4),
+};
+
+/**
+ * enum iwl_statistics_cfg_flags - cfg_mask used in system statistics command
+ * @IWL_STATS_CFG_FLG_DISABLE_NTFY_MSK: 0 for enable, 1 for disable
+ * @IWL_STATS_CFG_FLG_ON_DEMAND_NTFY_MSK: 0 for periodic, 1 for on-demand
+ * @IWL_STATS_CFG_FLG_RESET_MSK: 0 for reset statistics after
+ * sending the notification, 1 for do not reset statistics after sending
+ * the notification
+ */
+enum iwl_statistics_cfg_flags {
+ IWL_STATS_CFG_FLG_DISABLE_NTFY_MSK = BIT(0),
+ IWL_STATS_CFG_FLG_ON_DEMAND_NTFY_MSK = BIT(1),
+ IWL_STATS_CFG_FLG_RESET_MSK = BIT(2),
+};
+
+/**
+ * struct iwl_system_statistics_cmd - system statistics command
+ * @cfg_mask: configuration mask, &enum iwl_statistics_cfg_flags
+ * @config_time_sec: time in sec for periodic notification
+ * @type_id_mask: type_id masks, &enum iwl_statistics_notify_type_id
+ */
+struct iwl_system_statistics_cmd {
+ __le32 cfg_mask;
+ __le32 config_time_sec;
+ __le32 type_id_mask;
+} __packed; /* STATISTICS_FW_CMD_API_S_VER_1 */
+
+/**
* enum iwl_fw_statistics_type
*
* @FW_STATISTICS_OPERATIONAL: operational statistics
@@ -447,7 +491,49 @@ struct iwl_statistics_ntfy_hdr {
}; /* STATISTICS_NTFY_HDR_API_S_VER_1 */
/**
- * struct iwl_statistics_ntfy_per_mac
+ * struct iwl_stats_ntfy_per_link
+ *
+ * @beacon_filter_average_energy: Average energy [-dBm] of the 2
+ * antennas.
+ * @air_time: air time
+ * @beacon_counter: all beacons (both filtered and not filtered)
+ * @beacon_average_energy: Average energy [-dBm] of all beacons
+ * (both filtered and not filtered)
+ * @beacon_rssi_a: beacon RSSI on antenna A
+ * @beacon_rssi_b: beacon RSSI on antenna B
+ * @rx_bytes: RX byte count
+ */
+struct iwl_stats_ntfy_per_link {
+ __le32 beacon_filter_average_energy;
+ __le32 air_time;
+ __le32 beacon_counter;
+ __le32 beacon_average_energy;
+ __le32 beacon_rssi_a;
+ __le32 beacon_rssi_b;
+ __le32 rx_bytes;
+} __packed; /* STATISTICS_NTFY_PER_LINK_API_S_VER_1 */
+
+/**
+ * struct iwl_stats_ntfy_part1_per_link
+ *
+ * @rx_time: rx time
+ * @tx_time: tx time
+ * @rx_action: action frames handled by FW
+ * @tx_action: action frames generated and transmitted by FW
+ * @cca_defers: cca defer count
+ * @beacon_filtered: filtered out beacons
+ */
+struct iwl_stats_ntfy_part1_per_link {
+ __le64 rx_time;
+ __le64 tx_time;
+ __le32 rx_action;
+ __le32 tx_action;
+ __le32 cca_defers;
+ __le32 beacon_filtered;
+} __packed; /* STATISTICS_FW_NTFY_OPERATIONAL_PART1_PER_LINK_API_S_VER_1 */
+
+/**
+ * struct iwl_stats_ntfy_per_mac
*
* @beacon_filter_average_energy: Average energy [-dBm] of the 2
* antennas.
@@ -459,7 +545,7 @@ struct iwl_statistics_ntfy_hdr {
* @beacon_rssi_b: beacon RSSI on antenna B
* @rx_bytes: RX byte count
*/
-struct iwl_statistics_ntfy_per_mac {
+struct iwl_stats_ntfy_per_mac {
__le32 beacon_filter_average_energy;
__le32 air_time;
__le32 beacon_counter;
@@ -470,7 +556,7 @@ struct iwl_statistics_ntfy_per_mac {
} __packed; /* STATISTICS_NTFY_PER_MAC_API_S_VER_1 */
#define IWL_STATS_MAX_BW_INDEX 5
-/** struct iwl_statistics_ntfy_per_phy
+/** struct iwl_stats_ntfy_per_phy
* @channel_load: channel load
* @channel_load_by_us: device contribution to MCLM
* @channel_load_not_by_us: other devices' contribution to MCLM
@@ -485,7 +571,7 @@ struct iwl_statistics_ntfy_per_mac {
* per channel BW. note BACK counted as 1
* @last_tx_ch_width_indx: last txed frame channel width index
*/
-struct iwl_statistics_ntfy_per_phy {
+struct iwl_stats_ntfy_per_phy {
__le32 channel_load;
__le32 channel_load_by_us;
__le32 channel_load_not_by_us;
@@ -499,23 +585,62 @@ struct iwl_statistics_ntfy_per_phy {
} __packed; /* STATISTICS_NTFY_PER_PHY_API_S_VER_1 */
/**
- * struct iwl_statistics_ntfy_per_sta
+ * struct iwl_stats_ntfy_per_sta
*
* @average_energy: in fact it is minus the energy..
*/
-struct iwl_statistics_ntfy_per_sta {
+struct iwl_stats_ntfy_per_sta {
__le32 average_energy;
} __packed; /* STATISTICS_NTFY_PER_STA_API_S_VER_1 */
-#define IWL_STATS_MAX_PHY_OPERTINAL 3
+#define IWL_STATS_MAX_PHY_OPERATIONAL 3
+#define IWL_STATS_MAX_FW_LINKS (IWL_MVM_FW_MAX_LINK_ID + 1)
+
+/**
+ * struct iwl_system_statistics_notif_oper
+ *
+ * @time_stamp: time when the notification is sent from firmware
+ * @per_link: per link statistics, &struct iwl_stats_ntfy_per_link
+ * @per_phy: per phy statistics, &struct iwl_stats_ntfy_per_phy
+ * @per_sta: per sta statistics, &struct iwl_stats_ntfy_per_sta
+ */
+struct iwl_system_statistics_notif_oper {
+ __le32 time_stamp;
+ struct iwl_stats_ntfy_per_link per_link[IWL_STATS_MAX_FW_LINKS];
+ struct iwl_stats_ntfy_per_phy per_phy[IWL_STATS_MAX_PHY_OPERATIONAL];
+ struct iwl_stats_ntfy_per_sta per_sta[IWL_MVM_STATION_COUNT_MAX];
+} __packed; /* STATISTICS_FW_NTFY_OPERATIONAL_API_S_VER_3 */
+
+/**
+ * struct iwl_system_statistics_part1_notif_oper
+ *
+ * @time_stamp: time when the notification is sent from firmware
+ * @per_link: per link statistics &struct iwl_stats_ntfy_part1_per_link
+ * @per_phy_crc_error_stats: per phy crc error statistics
+ */
+struct iwl_system_statistics_part1_notif_oper {
+ __le32 time_stamp;
+ struct iwl_stats_ntfy_part1_per_link per_link[IWL_STATS_MAX_FW_LINKS];
+ __le32 per_phy_crc_error_stats[IWL_STATS_MAX_PHY_OPERATIONAL];
+} __packed; /* STATISTICS_FW_NTFY_OPERATIONAL_PART1_API_S_VER_4 */
+
+/**
+ * struct iwl_system_statistics_end_notif
+ *
+ * @time_stamp: time when the notification is sent from firmware
+ */
+struct iwl_system_statistics_end_notif {
+ __le32 time_stamp;
+} __packed; /* STATISTICS_FW_NTFY_END_API_S_VER_1 */
+
/**
* struct iwl_statistics_operational_ntfy
*
* @hdr: general statistics header
* @flags: bitmap of possible notification structures
- * @per_mac_stats: per mac statistics, &struct iwl_statistics_ntfy_per_mac
- * @per_phy_stats: per phy statistics, &struct iwl_statistics_ntfy_per_phy
- * @per_sta_stats: per sta statistics, &struct iwl_statistics_ntfy_per_sta
+ * @per_mac: per mac statistics, &struct iwl_stats_ntfy_per_mac
+ * @per_phy: per phy statistics, &struct iwl_stats_ntfy_per_phy
+ * @per_sta: per sta statistics, &struct iwl_stats_ntfy_per_sta
* @rx_time: rx time
* @tx_time: usec the radio is transmitting.
* @on_time_rf: The total time in usec the RF is awake.
@@ -524,9 +649,9 @@ struct iwl_statistics_ntfy_per_sta {
struct iwl_statistics_operational_ntfy {
struct iwl_statistics_ntfy_hdr hdr;
__le32 flags;
- struct iwl_statistics_ntfy_per_mac per_mac_stats[MAC_INDEX_AUX];
- struct iwl_statistics_ntfy_per_phy per_phy_stats[IWL_STATS_MAX_PHY_OPERTINAL];
- struct iwl_statistics_ntfy_per_sta per_sta_stats[IWL_MVM_STATION_COUNT_MAX];
+ struct iwl_stats_ntfy_per_mac per_mac[MAC_INDEX_AUX];
+ struct iwl_stats_ntfy_per_phy per_phy[IWL_STATS_MAX_PHY_OPERATIONAL];
+ struct iwl_stats_ntfy_per_sta per_sta[IWL_MVM_STATION_COUNT_MAX];
__le64 rx_time;
__le64 tx_time;
__le64 on_time_rf;
diff --git a/fw/api/time-event.h b/fw/api/time-event.h
index 7cc706731d70..f4b827b58bd3 100644
--- a/fw/api/time-event.h
+++ b/fw/api/time-event.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2020, 2022 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2020, 2022-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -335,6 +335,65 @@ struct iwl_hs20_roc_res {
__le32 status;
} __packed; /* HOT_SPOT_RSP_API_S_VER_1 */
+/*
+ * Activity types for the ROC command
+ * @ROC_ACTIVITY_HOTSPOT: ROC for hs20 activity
+ * @ROC_ACTIVITY_P2P_DISC: ROC for p2p discoverability activity
+ * @ROC_ACTIVITY_P2P_TXRX: ROC for p2p action frames activity
+ * @ROC_ACTIVITY_P2P_NEG: ROC for p2p negotiation (used also for TX)
+ */
+enum iwl_roc_activity {
+ ROC_ACTIVITY_HOTSPOT,
+ ROC_ACTIVITY_P2P_DISC,
+ ROC_ACTIVITY_P2P_TXRX,
+ ROC_ACTIVITY_P2P_NEG,
+ ROC_NUM_ACTIVITIES
+}; /* ROC_ACTIVITY_API_E_VER_1 */
+
+/*
+ * ROC command
+ *
+ * Command requests the firmware to remain on a channel for a certain duration.
+ *
+ * ( MAC_CONF_GROUP 0x3, ROC_CMD 0xE )
+ *
+ * @action: action to perform, see &enum iwl_ctxt_action
+ * @activity: type of activity, see &enum iwl_roc_activity
+ * @sta_id: station id, resumed during "Remain On Channel" activity.
+ * @channel_info: &struct iwl_fw_channel_info
+ * @node_addr: node MAC address for Rx filtering
+ * @reserved: align to a dword
+ * @max_delay: max delay the ROC can start in TU
+ * @duration: remain on channel duration in TU
+ */
+struct iwl_roc_req {
+ __le32 action;
+ __le32 activity;
+ __le32 sta_id;
+ struct iwl_fw_channel_info channel_info;
+ u8 node_addr[ETH_ALEN];
+ __le16 reserved;
+ __le32 max_delay;
+ __le32 duration;
+} __packed; /* ROC_CMD_API_S_VER_3 */
+
+/*
+ * ROC notification
+ *
+ * Notification when ROC startes and when ROC ended.
+ *
+ * ( MAC_CONF_GROUP 0x3, ROC_NOTIF 0xf8 )
+ *
+ * @status: true if ROC succeeded to start
+ * @start_end: true if ROC started, false if ROC ended
+ * @activity: notification to which activity - &enum iwl_roc_activity
+ */
+struct iwl_roc_notif {
+ __le32 success;
+ __le32 started;
+ __le32 activity;
+} __packed; /* ROC_NOTIF_API_S_VER_1 */
+
/**
* enum iwl_mvm_session_prot_conf_id - session protection's configurations
* @SESSION_PROTECT_CONF_ASSOC: Start a session protection for association.
@@ -375,8 +434,8 @@ enum iwl_mvm_session_prot_conf_id {
/**
* struct iwl_mvm_session_prot_cmd - configure a session protection
- * @id_and_color: the id and color of the mac for which this session protection
- * is sent
+ * @id_and_color: the id and color of the link (or mac, for command version 1)
+ * for which this session protection is sent
* @action: can be either FW_CTXT_ACTION_ADD or FW_CTXT_ACTION_REMOVE,
* see &enum iwl_ctxt_action
* @conf_id: see &enum iwl_mvm_session_prot_conf_id
@@ -397,11 +456,15 @@ struct iwl_mvm_session_prot_cmd {
__le32 duration_tu;
__le32 repetition_count;
__le32 interval;
-} __packed; /* SESSION_PROTECTION_CMD_API_S_VER_1 */
+} __packed;
+/* SESSION_PROTECTION_CMD_API_S_VER_1 and
+ * SESSION_PROTECTION_CMD_API_S_VER_2
+ */
/**
* struct iwl_mvm_session_prot_notif - session protection started / ended
- * @mac_id: the mac id for which the session protection started / ended
+ * @mac_link_id: the mac id (or link id, for notif ver > 2) for which the
+ * session protection started / ended
* @status: 1 means success, 0 means failure
* @start: 1 means the session protection started, 0 means it ended
* @conf_id: see &enum iwl_mvm_session_prot_conf_id
@@ -410,10 +473,13 @@ struct iwl_mvm_session_prot_cmd {
* and end even the firmware could not schedule it.
*/
struct iwl_mvm_session_prot_notif {
- __le32 mac_id;
+ __le32 mac_link_id;
__le32 status;
__le32 start;
__le32 conf_id;
-} __packed; /* SESSION_PROTECTION_NOTIFICATION_API_S_VER_2 */
+} __packed;
+/* SESSION_PROTECTION_NOTIFICATION_API_S_VER_2 and
+ * SESSION_PROTECTION_NOTIFICATION_API_S_VER_3
+ */
#endif /* __iwl_fw_api_time_event_h__ */
diff --git a/fw/api/tx.h b/fw/api/tx.h
index 842360b1e995..c5277e2f8cd4 100644
--- a/fw/api/tx.h
+++ b/fw/api/tx.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_tx_h__
@@ -76,6 +76,8 @@ enum iwl_tx_flags {
* to a secured STA
* @IWL_TX_FLAGS_HIGH_PRI: high priority frame (like EAPOL) - can affect rate
* selection, retry limits and BT kill
+ * @IWL_TX_FLAGS_RTS: firmware used an RTS
+ * @IWL_TX_FLAGS_CTS: firmware used CTS-to-self
*/
enum iwl_tx_cmd_flags {
IWL_TX_FLAGS_CMD_RATE = BIT(0),
@@ -696,6 +698,7 @@ enum iwl_mvm_ba_resp_flags {
* @query_frame_cnt: SCD query frame count
* @txed: number of frames sent in the aggregation (all-TIDs)
* @done: number of frames that were Acked by the BA (all-TIDs)
+ * @rts_retry_cnt: RTS retry count
* @reserved: reserved (for alignment)
* @wireless_time: Wireless-media time
* @tx_rate: the rate the aggregation was sent at
@@ -716,7 +719,8 @@ struct iwl_mvm_compressed_ba_notif {
__le16 query_frame_cnt;
__le16 txed;
__le16 done;
- __le16 reserved;
+ u8 rts_retry_cnt;
+ u8 reserved;
__le32 wireless_time;
__le32 tx_rate;
__le16 tfd_cnt;
@@ -791,7 +795,8 @@ enum iwl_mac_beacon_flags {
* @reserved: reserved
* @link_id: the firmware id of the link that will use this beacon
* @tim_idx: the offset of the tim IE in the beacon
- * @tim_size: the length of the tim IE
+ * @tim_size: the length of the tim IE (version < 14)
+ * @btwt_offset: offset to the broadcast TWT IE if present (version >= 14)
* @ecsa_offset: offset to the ECSA IE if present
* @csa_offset: offset to the CSA IE if present
* @frame: the template of the beacon frame
@@ -803,14 +808,18 @@ struct iwl_mac_beacon_cmd {
__le32 reserved;
__le32 link_id;
__le32 tim_idx;
- __le32 tim_size;
+ union {
+ __le32 tim_size;
+ __le32 btwt_offset;
+ };
__le32 ecsa_offset;
__le32 csa_offset;
struct ieee80211_hdr frame[];
} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_10,
* BEACON_TEMPLATE_CMD_API_S_VER_11,
* BEACON_TEMPLATE_CMD_API_S_VER_12,
- * BEACON_TEMPLATE_CMD_API_S_VER_13
+ * BEACON_TEMPLATE_CMD_API_S_VER_13,
+ * BEACON_TEMPLATE_CMD_API_S_VER_14
*/
struct iwl_beacon_notif {
@@ -857,7 +866,7 @@ enum iwl_dump_control {
};
/**
- * struct iwl_tx_path_flush_cmd -- queue/FIFO flush command
+ * struct iwl_tx_path_flush_cmd_v1 -- queue/FIFO flush command
* @queues_ctl: bitmap of queues to flush
* @flush_ctl: control flags
* @reserved: reserved
@@ -884,6 +893,7 @@ struct iwl_tx_path_flush_cmd {
/**
* struct iwl_flush_queue_info - virtual flush queue info
+ * @tid: the tid to flush
* @queue_num: virtual queue id
* @read_before_flush: read pointer before flush
* @read_after_flush: read pointer after flush
@@ -897,6 +907,7 @@ struct iwl_flush_queue_info {
/**
* struct iwl_tx_path_flush_cmd_rsp -- queue/FIFO flush command response
+ * @sta_id: the station for which the queue was flushed
* @num_flushed_queues: number of queues in queues array
* @queues: all flushed queues
*/
diff --git a/fw/api/txq.h b/fw/api/txq.h
index e018946310d1..e6c0f928a6bb 100644
--- a/fw/api/txq.h
+++ b/fw/api/txq.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2019-2021 Intel Corporation
+ * Copyright (C) 2005-2014, 2019-2021, 2023-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -66,6 +66,16 @@ enum iwl_gen2_tx_fifo {
IWL_GEN2_TRIG_TX_FIFO_VO,
};
+enum iwl_bz_tx_fifo {
+ IWL_BZ_EDCA_TX_FIFO_BK,
+ IWL_BZ_EDCA_TX_FIFO_BE,
+ IWL_BZ_EDCA_TX_FIFO_VI,
+ IWL_BZ_EDCA_TX_FIFO_VO,
+ IWL_BZ_TRIG_TX_FIFO_BK,
+ IWL_BZ_TRIG_TX_FIFO_BE,
+ IWL_BZ_TRIG_TX_FIFO_VI,
+ IWL_BZ_TRIG_TX_FIFO_VO,
+};
/**
* enum iwl_tx_queue_cfg_actions - TXQ config options
* @TX_QUEUE_CFG_ENABLE_QUEUE: enable a queue
@@ -76,7 +86,7 @@ enum iwl_tx_queue_cfg_actions {
TX_QUEUE_CFG_TFD_SHORT_FORMAT = BIT(1),
};
-#define IWL_DEFAULT_QUEUE_SIZE_EHT (1024 * 4)
+#define IWL_DEFAULT_QUEUE_SIZE_EHT (512 * 4)
#define IWL_DEFAULT_QUEUE_SIZE_HE 1024
#define IWL_DEFAULT_QUEUE_SIZE 256
#define IWL_MGMT_QUEUE_SIZE 16
diff --git a/fw/dbg.c b/fw/dbg.c
index 3ab6a68f1e9f..fb2ea38e89ac 100644
--- a/fw/dbg.c
+++ b/fw/dbg.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -19,7 +19,6 @@
* @fwrt_ptr: pointer to the buffer coming from fwrt
* @trans_ptr: pointer to struct %iwl_trans_dump_data which contains the
* transport's data.
- * @trans_len: length of the valid data in trans_ptr
* @fwrt_len: length of the valid data in fwrt_ptr
*/
struct iwl_fw_dump_ptrs {
@@ -880,10 +879,10 @@ iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt,
cpu_to_le32(fwrt->trans->hw_rev_step);
memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable,
sizeof(dump_info->fw_human_readable));
- strncpy(dump_info->dev_human_readable, fwrt->trans->name,
- sizeof(dump_info->dev_human_readable) - 1);
- strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name,
- sizeof(dump_info->bus_human_readable) - 1);
+ strscpy_pad(dump_info->dev_human_readable, fwrt->trans->name,
+ sizeof(dump_info->dev_human_readable));
+ strscpy_pad(dump_info->bus_human_readable, fwrt->dev->bus->name,
+ sizeof(dump_info->bus_human_readable));
dump_info->num_of_lmacs = fwrt->smem_cfg.num_lmacs;
dump_info->lmac_err_id[0] =
cpu_to_le32(fwrt->dump.lmac_err_id[0]);
@@ -1021,64 +1020,78 @@ struct iwl_dump_ini_region_data {
struct iwl_fwrt_dump_data *dump_data;
};
-static int
-iwl_dump_ini_prph_mac_iter(struct iwl_fw_runtime *fwrt,
- struct iwl_dump_ini_region_data *reg_data,
- void *range_ptr, u32 range_len, int idx)
+static int iwl_dump_ini_prph_mac_iter_common(struct iwl_fw_runtime *fwrt,
+ void *range_ptr, u32 addr,
+ __le32 size)
{
- struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
struct iwl_fw_ini_error_dump_range *range = range_ptr;
__le32 *val = range->data;
- u32 prph_val;
- u32 addr = le32_to_cpu(reg->addrs[idx]) +
- le32_to_cpu(reg->dev_addr.offset);
int i;
range->internal_base_addr = cpu_to_le32(addr);
- range->range_data_size = reg->dev_addr.size;
- for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4) {
- prph_val = iwl_read_prph(fwrt->trans, addr + i);
- if (iwl_trans_is_hw_error_value(prph_val))
- return -EBUSY;
- *val++ = cpu_to_le32(prph_val);
- }
+ range->range_data_size = size;
+ for (i = 0; i < le32_to_cpu(size); i += 4)
+ *val++ = cpu_to_le32(iwl_read_prph(fwrt->trans, addr + i));
return sizeof(*range) + le32_to_cpu(range->range_data_size);
}
static int
-iwl_dump_ini_prph_phy_iter(struct iwl_fw_runtime *fwrt,
+iwl_dump_ini_prph_mac_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, u32 range_len, int idx)
{
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ u32 addr = le32_to_cpu(reg->addrs[idx]) +
+ le32_to_cpu(reg->dev_addr.offset);
+
+ return iwl_dump_ini_prph_mac_iter_common(fwrt, range_ptr, addr,
+ reg->dev_addr.size);
+}
+
+static int
+iwl_dump_ini_prph_mac_block_iter(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *range_ptr, u32 range_len, int idx)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_fw_ini_addr_size *pairs = (void *)reg->addrs;
+ u32 addr = le32_to_cpu(reg->dev_addr_range.offset) +
+ le32_to_cpu(pairs[idx].addr);
+
+ return iwl_dump_ini_prph_mac_iter_common(fwrt, range_ptr, addr,
+ pairs[idx].size);
+}
+
+static int iwl_dump_ini_prph_phy_iter_common(struct iwl_fw_runtime *fwrt,
+ void *range_ptr, u32 addr,
+ __le32 size, __le32 offset)
+{
struct iwl_fw_ini_error_dump_range *range = range_ptr;
__le32 *val = range->data;
u32 indirect_wr_addr = WMAL_INDRCT_RD_CMD1;
u32 indirect_rd_addr = WMAL_MRSPF_1;
u32 prph_val;
- u32 addr = le32_to_cpu(reg->addrs[idx]);
u32 dphy_state;
u32 dphy_addr;
int i;
range->internal_base_addr = cpu_to_le32(addr);
- range->range_data_size = reg->dev_addr.size;
+ range->range_data_size = size;
if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
indirect_wr_addr = WMAL_INDRCT_CMD1;
- indirect_wr_addr += le32_to_cpu(reg->dev_addr.offset);
- indirect_rd_addr += le32_to_cpu(reg->dev_addr.offset);
+ indirect_wr_addr += le32_to_cpu(offset);
+ indirect_rd_addr += le32_to_cpu(offset);
if (!iwl_trans_grab_nic_access(fwrt->trans))
return -EBUSY;
- dphy_addr = (reg->dev_addr.offset) ? WFPM_LMAC2_PS_CTL_RW :
- WFPM_LMAC1_PS_CTL_RW;
+ dphy_addr = (offset) ? WFPM_LMAC2_PS_CTL_RW : WFPM_LMAC1_PS_CTL_RW;
dphy_state = iwl_read_umac_prph_no_grab(fwrt->trans, dphy_addr);
- for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4) {
+ for (i = 0; i < le32_to_cpu(size); i += 4) {
if (dphy_state == HBUS_TIMEOUT ||
(dphy_state & WFPM_PS_CTL_RW_PHYRF_PD_FSM_CURSTATE_MSK) !=
WFPM_PHYRF_STATE_ON) {
@@ -1097,6 +1110,33 @@ iwl_dump_ini_prph_phy_iter(struct iwl_fw_runtime *fwrt,
return sizeof(*range) + le32_to_cpu(range->range_data_size);
}
+static int
+iwl_dump_ini_prph_phy_iter(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *range_ptr, u32 range_len, int idx)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ u32 addr = le32_to_cpu(reg->addrs[idx]);
+
+ return iwl_dump_ini_prph_phy_iter_common(fwrt, range_ptr, addr,
+ reg->dev_addr.size,
+ reg->dev_addr.offset);
+}
+
+static int
+iwl_dump_ini_prph_phy_block_iter(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *range_ptr, u32 range_len, int idx)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_fw_ini_addr_size *pairs = (void *)reg->addrs;
+ u32 addr = le32_to_cpu(pairs[idx].addr);
+
+ return iwl_dump_ini_prph_phy_iter_common(fwrt, range_ptr, addr,
+ pairs[idx].size,
+ reg->dev_addr_range.offset);
+}
+
static int iwl_dump_ini_csr_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, u32 range_len, int idx)
@@ -1128,17 +1168,13 @@ static int iwl_dump_ini_config_iter(struct iwl_fw_runtime *fwrt,
le32_to_cpu(reg->dev_addr.offset);
int i;
- /* we shouldn't get here if the trans doesn't have read_config32 */
- if (WARN_ON_ONCE(!trans->ops->read_config32))
- return -EOPNOTSUPP;
-
range->internal_base_addr = cpu_to_le32(addr);
range->range_data_size = reg->dev_addr.size;
for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4) {
int ret;
u32 tmp;
- ret = trans->ops->read_config32(trans, addr + i, &tmp);
+ ret = iwl_trans_read_config32(trans, addr + i, &tmp);
if (ret < 0)
return ret;
@@ -1370,6 +1406,53 @@ out:
return sizeof(*range) + le32_to_cpu(range->range_data_size);
}
+static int
+iwl_dump_ini_prph_snps_dphyip_iter(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *range_ptr, u32 range_len, int idx)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_fw_ini_error_dump_range *range = range_ptr;
+ __le32 *val = range->data;
+ __le32 offset = reg->dev_addr.offset;
+ u32 indirect_rd_wr_addr = DPHYIP_INDIRECT;
+ u32 addr = le32_to_cpu(reg->addrs[idx]);
+ u32 dphy_state, dphy_addr, prph_val;
+ int i;
+
+ range->internal_base_addr = cpu_to_le32(addr);
+ range->range_data_size = reg->dev_addr.size;
+
+ if (!iwl_trans_grab_nic_access(fwrt->trans))
+ return -EBUSY;
+
+ indirect_rd_wr_addr += le32_to_cpu(offset);
+
+ dphy_addr = offset ? WFPM_LMAC2_PS_CTL_RW : WFPM_LMAC1_PS_CTL_RW;
+ dphy_state = iwl_read_umac_prph_no_grab(fwrt->trans, dphy_addr);
+
+ for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4) {
+ if (dphy_state == HBUS_TIMEOUT ||
+ (dphy_state & WFPM_PS_CTL_RW_PHYRF_PD_FSM_CURSTATE_MSK) !=
+ WFPM_PHYRF_STATE_ON) {
+ *val++ = cpu_to_le32(WFPM_DPHY_OFF);
+ continue;
+ }
+
+ iwl_write_prph_no_grab(fwrt->trans, indirect_rd_wr_addr,
+ addr + i);
+ /* wait a bit for value to be ready in register */
+ udelay(1);
+ prph_val = iwl_read_prph_no_grab(fwrt->trans,
+ indirect_rd_wr_addr);
+ *val++ = cpu_to_le32((prph_val & DPHYIP_INDIRECT_RD_MSK) >>
+ DPHYIP_INDIRECT_RD_SHIFT);
+ }
+
+ iwl_trans_release_nic_access(fwrt->trans);
+ return sizeof(*range) + le32_to_cpu(range->range_data_size);
+}
+
struct iwl_ini_rxf_data {
u32 fifo_num;
u32 size;
@@ -1635,10 +1718,12 @@ iwl_dump_ini_mem_fill_header(struct iwl_fw_runtime *fwrt,
/**
* mask_apply_and_normalize - applies mask on val and normalize the result
*
- * The normalization is based on the first set bit in the mask
- *
* @val: value
* @mask: mask to apply and to normalize with
+ *
+ * The normalization is based on the first set bit in the mask
+ *
+ * Returns: the extracted value
*/
static u32 mask_apply_and_normalize(u32 val, u32 mask)
{
@@ -1781,6 +1866,16 @@ static u32 iwl_dump_ini_mem_ranges(struct iwl_fw_runtime *fwrt,
return iwl_tlv_array_len(reg_data->reg_tlv, reg, addrs);
}
+static u32
+iwl_dump_ini_mem_block_ranges(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ size_t size = sizeof(struct iwl_fw_ini_addr_size);
+
+ return iwl_tlv_array_len_with_size(reg_data->reg_tlv, reg, size);
+}
+
static u32 iwl_dump_ini_paging_ranges(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data)
{
@@ -1867,6 +1962,25 @@ static u32 iwl_dump_ini_mem_get_size(struct iwl_fw_runtime *fwrt,
}
static u32
+iwl_dump_ini_mem_block_get_size(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_fw_ini_addr_size *pairs = (void *)reg->addrs;
+ u32 ranges = iwl_dump_ini_mem_block_ranges(fwrt, reg_data);
+ u32 size = sizeof(struct iwl_fw_ini_error_dump);
+ int range;
+
+ if (!ranges)
+ return 0;
+
+ for (range = 0; range < ranges; range++)
+ size += le32_to_cpu(pairs[range].size);
+
+ return size + ranges * sizeof(struct iwl_fw_ini_error_dump_range);
+}
+
+static u32
iwl_dump_ini_paging_get_size(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data)
{
@@ -2078,15 +2192,16 @@ struct iwl_dump_ini_mem_ops {
};
/**
- * iwl_dump_ini_mem
- *
- * Creates a dump tlv and copy a memory region into it.
- * Returns the size of the current dump tlv or 0 if failed
+ * iwl_dump_ini_mem - dump memory region
*
* @fwrt: fw runtime struct
* @list: list to add the dump tlv to
* @reg_data: memory region
* @ops: memory dump operations
+ *
+ * Creates a dump tlv and copy a memory region into it.
+ *
+ * Returns: the size of the current dump tlv or 0 if failed
*/
static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
struct iwl_dump_ini_region_data *reg_data,
@@ -2305,9 +2420,12 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt,
struct iwl_fw_ini_debug_info_tlv *debug_info =
(void *)node->tlv.data;
+ BUILD_BUG_ON(sizeof(cfg_name->cfg_name) !=
+ sizeof(debug_info->debug_cfg_name));
+
cfg_name->image_type = debug_info->image_type;
cfg_name->cfg_name_len =
- cpu_to_le32(IWL_FW_INI_MAX_CFG_NAME);
+ cpu_to_le32(sizeof(cfg_name->cfg_name));
memcpy(cfg_name->cfg_name, debug_info->debug_cfg_name,
sizeof(cfg_name->cfg_name));
cfg_name++;
@@ -2413,6 +2531,18 @@ static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = {
.fill_mem_hdr = iwl_dump_ini_mem_fill_header,
.fill_range = iwl_dump_ini_prph_phy_iter,
},
+ [IWL_FW_INI_REGION_PERIPHERY_MAC_RANGE] = {
+ .get_num_of_ranges = iwl_dump_ini_mem_block_ranges,
+ .get_size = iwl_dump_ini_mem_block_get_size,
+ .fill_mem_hdr = iwl_dump_ini_mem_fill_header,
+ .fill_range = iwl_dump_ini_prph_mac_block_iter,
+ },
+ [IWL_FW_INI_REGION_PERIPHERY_PHY_RANGE] = {
+ .get_num_of_ranges = iwl_dump_ini_mem_block_ranges,
+ .get_size = iwl_dump_ini_mem_block_get_size,
+ .fill_mem_hdr = iwl_dump_ini_mem_fill_header,
+ .fill_range = iwl_dump_ini_prph_phy_block_iter,
+ },
[IWL_FW_INI_REGION_PERIPHERY_AUX] = {},
[IWL_FW_INI_REGION_PAGING] = {
.fill_mem_hdr = iwl_dump_ini_mem_fill_header,
@@ -2450,6 +2580,12 @@ static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = {
.fill_mem_hdr = iwl_dump_ini_mon_dbgi_fill_header,
.fill_range = iwl_dump_ini_dbgi_sram_iter,
},
+ [IWL_FW_INI_REGION_PERIPHERY_SNPS_DPHYIP] = {
+ .get_num_of_ranges = iwl_dump_ini_mem_ranges,
+ .get_size = iwl_dump_ini_mem_get_size,
+ .fill_mem_hdr = iwl_dump_ini_mem_fill_header,
+ .fill_range = iwl_dump_ini_prph_snps_dphyip_iter,
+ },
};
static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
@@ -2492,7 +2628,9 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
if (reg_type >= ARRAY_SIZE(iwl_dump_ini_region_ops))
continue;
- if (reg_type == IWL_FW_INI_REGION_PERIPHERY_PHY &&
+ if ((reg_type == IWL_FW_INI_REGION_PERIPHERY_PHY ||
+ reg_type == IWL_FW_INI_REGION_PERIPHERY_PHY_RANGE ||
+ reg_type == IWL_FW_INI_REGION_PERIPHERY_SNPS_DPHYIP) &&
tp_id != IWL_FW_INI_TIME_POINT_FW_ASSERT) {
IWL_WARN(fwrt,
"WRT: trying to collect phy prph at time point: %d, skipping\n",
@@ -2731,7 +2869,8 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
IWL_WARN(fwrt, "Collecting data: trigger %d fired.\n",
le32_to_cpu(desc->trig_desc.type));
- schedule_delayed_work(&wk_data->wk, usecs_to_jiffies(delay));
+ queue_delayed_work(system_unbound_wq, &wk_data->wk,
+ usecs_to_jiffies(delay));
return 0;
}
@@ -2933,11 +3072,10 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
struct iwl_fw_dbg_params params = {0};
struct iwl_fwrt_dump_data *dump_data =
&fwrt->dump.wks[wk_idx].dump_data;
- u32 policy;
- u32 time_point;
if (!test_bit(wk_idx, &fwrt->dump.active_wks))
return;
+ /* also checks 'desc' for pre-ini mode, since that shadows in union */
if (!dump_data->trig) {
IWL_ERR(fwrt, "dump trigger data is not set\n");
goto out;
@@ -2965,13 +3103,16 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
iwl_fw_dbg_stop_restart_recording(fwrt, &params, false);
- policy = le32_to_cpu(dump_data->trig->apply_policy);
- time_point = le32_to_cpu(dump_data->trig->time_point);
+ if (iwl_trans_dbg_ini_valid(fwrt->trans)) {
+ u32 policy = le32_to_cpu(dump_data->trig->apply_policy);
+ u32 time_point = le32_to_cpu(dump_data->trig->time_point);
- if (policy & IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD) {
- IWL_DEBUG_FW_INFO(fwrt, "WRT: sending dump complete\n");
- iwl_send_dbg_dump_complete_cmd(fwrt, time_point, 0);
+ if (policy & IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD) {
+ IWL_DEBUG_FW_INFO(fwrt, "WRT: sending dump complete\n");
+ iwl_send_dbg_dump_complete_cmd(fwrt, time_point, 0);
+ }
}
+
if (fwrt->trans->dbg.last_tp_resetfw == IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY)
iwl_force_nmi(fwrt->trans);
@@ -3033,7 +3174,9 @@ int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
if (sync)
iwl_fw_dbg_collect_sync(fwrt, idx);
else
- schedule_delayed_work(&fwrt->dump.wks[idx].wk, usecs_to_jiffies(delay));
+ queue_delayed_work(system_unbound_wq,
+ &fwrt->dump.wks[idx].wk,
+ usecs_to_jiffies(delay));
return 0;
}
@@ -3205,7 +3348,7 @@ void iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt,
{
int ret __maybe_unused = 0;
- if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status))
+ if (!iwl_trans_fw_running(fwrt->trans))
return;
if (fw_has_capa(&fwrt->fw->ucode_capa,
@@ -3228,3 +3371,47 @@ void iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt,
#endif
}
IWL_EXPORT_SYMBOL(iwl_fw_dbg_stop_restart_recording);
+
+void iwl_fw_disable_dbg_asserts(struct iwl_fw_runtime *fwrt)
+{
+ struct iwl_fw_dbg_config_cmd cmd = {
+ .type = cpu_to_le32(DEBUG_TOKEN_CONFIG_TYPE),
+ .conf = cpu_to_le32(IWL_FW_DBG_CONFIG_TOKEN),
+ };
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(LONG_GROUP, LDBG_CONFIG_CMD),
+ .data[0] = &cmd,
+ .len[0] = sizeof(cmd),
+ };
+ u32 preset = u32_get_bits(fwrt->trans->dbg.domains_bitmap,
+ GENMASK(31, IWL_FW_DBG_DOMAIN_POS + 1));
+
+ /* supported starting from 9000 devices */
+ if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000)
+ return;
+
+ if (fwrt->trans->dbg.yoyo_bin_loaded || (preset && preset != 1))
+ return;
+
+ iwl_trans_send_cmd(fwrt->trans, &hcmd);
+}
+IWL_EXPORT_SYMBOL(iwl_fw_disable_dbg_asserts);
+
+void iwl_fw_dbg_clear_monitor_buf(struct iwl_fw_runtime *fwrt)
+{
+ struct iwl_fw_dbg_params params = {0};
+
+ iwl_fw_dbg_stop_sync(fwrt);
+
+ if (fw_has_api(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_API_INT_DBG_BUF_CLEAR)) {
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(DEBUG_GROUP, FW_CLEAR_BUFFER),
+ };
+ iwl_trans_send_cmd(fwrt->trans, &hcmd);
+ }
+
+ iwl_dbg_tlv_init_cfg(fwrt);
+ iwl_fw_dbg_stop_restart_recording(fwrt, &params, false);
+}
+IWL_EXPORT_SYMBOL(iwl_fw_dbg_clear_monitor_buf);
diff --git a/fw/dbg.h b/fw/dbg.h
index 4227fbd2b977..98d56e778d99 100644
--- a/fw/dbg.h
+++ b/fw/dbg.h
@@ -306,8 +306,6 @@ static inline void iwl_fw_error_collect(struct iwl_fw_runtime *fwrt, bool sync)
_iwl_dbg_tlv_time_point(fwrt, tp_id, NULL, sync);
}
-void iwl_fw_error_print_fseq_regs(struct iwl_fw_runtime *fwrt);
-
static inline void iwl_fwrt_update_fw_versions(struct iwl_fw_runtime *fwrt,
struct iwl_lmac_alive *lmac,
struct iwl_umac_alive *umac)
@@ -329,6 +327,8 @@ void iwl_fwrt_dump_error_logs(struct iwl_fw_runtime *fwrt);
void iwl_send_dbg_dump_complete_cmd(struct iwl_fw_runtime *fwrt,
u32 timepoint,
u32 timepoint_data);
+void iwl_fw_disable_dbg_asserts(struct iwl_fw_runtime *fwrt);
+void iwl_fw_dbg_clear_monitor_buf(struct iwl_fw_runtime *fwrt);
#define IWL_FW_CHECK_FAILED(_obj, _fmt, ...) \
IWL_ERR_LIMIT(_obj, _fmt, __VA_ARGS__)
diff --git a/fw/debugfs.c b/fw/debugfs.c
index 3cdbc6ac7ae5..893b21fcaf87 100644
--- a/fw/debugfs.c
+++ b/fw/debugfs.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -141,7 +141,11 @@ static int iwl_dbgfs_enabled_severities_write(struct iwl_fw_runtime *fwrt,
event_cfg.enabled_severities = cpu_to_le32(enabled_severities);
- ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
+ if (fwrt->ops && fwrt->ops->send_hcmd)
+ ret = fwrt->ops->send_hcmd(fwrt->ops_ctx, &hcmd);
+ else
+ ret = -EPERM;
+
IWL_INFO(fwrt,
"sent host event cfg with enabled_severities: %u, ret: %d\n",
enabled_severities, ret);
@@ -226,8 +230,7 @@ static ssize_t iwl_dbgfs_send_hcmd_write(struct iwl_fw_runtime *fwrt, char *buf,
.data = { NULL, },
};
- if (fwrt->ops && fwrt->ops->fw_running &&
- !fwrt->ops->fw_running(fwrt->ops_ctx))
+ if (!iwl_trans_fw_running(fwrt->trans))
return -EIO;
if (count < header_size + 1 || count > 1024 * 4)
@@ -342,6 +345,12 @@ static int iwl_dbgfs_fw_info_seq_show(struct seq_file *seq, void *v)
" %d: %d\n",
IWL_UCODE_TLV_CAPA_PPAG_CHINA_BIOS_SUPPORT,
has_capa);
+ has_capa = fw_has_capa(&fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CHINA_22_REG_SUPPORT) ? 1 : 0;
+ seq_printf(seq,
+ " %d: %d\n",
+ IWL_UCODE_TLV_CAPA_CHINA_22_REG_SUPPORT,
+ has_capa);
seq_puts(seq, "fw_api_ver:\n");
}
diff --git a/fw/dump.c b/fw/dump.c
index 5876f917e536..8f107ceec407 100644
--- a/fw/dump.c
+++ b/fw/dump.c
@@ -182,8 +182,7 @@ static void iwl_fwrt_dump_lmac_error_log(struct iwl_fw_runtime *fwrt, u8 lmac_nu
base = fwrt->fw->inst_errlog_ptr;
}
- if ((fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ && !base) ||
- (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ && base < 0x400000)) {
+ if (!base) {
IWL_ERR(fwrt,
"Not valid error log pointer 0x%08X for %s uCode\n",
base,
diff --git a/fw/error-dump.h b/fw/error-dump.h
index f5e08988dc7b..e63b08b7d336 100644
--- a/fw/error-dump.h
+++ b/fw/error-dump.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2014, 2018-2022 Intel Corporation
+ * Copyright (C) 2014, 2018-2024 Intel Corporation
* Copyright (C) 2014-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -16,7 +16,7 @@
/**
* enum iwl_fw_error_dump_type - types of data in the dump file
* @IWL_FW_ERROR_DUMP_CSR: Control Status Registers - from offset 0
- * @IWL_FW_ERROR_DUMP_RXF:
+ * @IWL_FW_ERROR_DUMP_RXF: RX FIFO contents
* @IWL_FW_ERROR_DUMP_TXCMD: last TX command data, structured as
* &struct iwl_fw_error_dump_txcmd packets
* @IWL_FW_ERROR_DUMP_DEV_FW_INFO: struct %iwl_fw_error_dump_info
@@ -24,21 +24,24 @@
* @IWL_FW_ERROR_DUMP_FW_MONITOR: firmware monitor
* @IWL_FW_ERROR_DUMP_PRPH: range of periphery registers - there can be several
* sections like this in a single file.
+ * @IWL_FW_ERROR_DUMP_TXF: TX FIFO contents
* @IWL_FW_ERROR_DUMP_FH_REGS: range of FH registers
* @IWL_FW_ERROR_DUMP_MEM: chunk of memory
* @IWL_FW_ERROR_DUMP_ERROR_INFO: description of what triggered this dump.
* Structured as &struct iwl_fw_error_dump_trigger_desc.
* @IWL_FW_ERROR_DUMP_RB: the content of an RB structured as
* &struct iwl_fw_error_dump_rb
- * @IWL_FW_ERROR_PAGING: UMAC's image memory segments which were
+ * @IWL_FW_ERROR_DUMP_PAGING: UMAC's image memory segments which were
* paged to the DRAM.
* @IWL_FW_ERROR_DUMP_RADIO_REG: Dump the radio registers.
+ * @IWL_FW_ERROR_DUMP_INTERNAL_TXF: internal TX FIFO data
* @IWL_FW_ERROR_DUMP_EXTERNAL: used only by external code utilities, and
* for that reason is not in use in any other place in the Linux Wi-Fi
* stack.
* @IWL_FW_ERROR_DUMP_MEM_CFG: the addresses and sizes of fifos in the smem,
* which we get from the fw after ALIVE. The content is structured as
* &struct iwl_fw_error_dump_smem_cfg.
+ * @IWL_FW_ERROR_DUMP_D3_DEBUG_DATA: D3 debug data
*/
enum iwl_fw_error_dump_type {
/* 0 is deprecated */
@@ -59,8 +62,6 @@ enum iwl_fw_error_dump_type {
IWL_FW_ERROR_DUMP_EXTERNAL = 15, /* Do not move */
IWL_FW_ERROR_DUMP_MEM_CFG = 16,
IWL_FW_ERROR_DUMP_D3_DEBUG_DATA = 17,
-
- IWL_FW_ERROR_DUMP_MAX,
};
/**
@@ -247,7 +248,7 @@ struct iwl_fw_error_dump_mem {
#define IWL_INI_DUMP_NAME_TYPE (BIT(31) | BIT(24))
/**
- * struct iwl_fw_error_dump_data - data for one type
+ * struct iwl_fw_ini_error_dump_data - data for one type
* @type: &enum iwl_fw_ini_region_type
* @sub_type: sub type id
* @sub_type_ver: sub type version
@@ -277,7 +278,7 @@ struct iwl_fw_ini_dump_entry {
} __packed;
/**
- * struct iwl_fw_error_dump_file - header of dump file
+ * struct iwl_fw_ini_dump_file_hdr - header of dump file
* @barker: must be %IWL_FW_INI_ERROR_DUMP_BARKER
* @file_len: the length of all the file including the header
*/
@@ -310,9 +311,9 @@ struct iwl_fw_ini_fifo_hdr {
struct iwl_fw_ini_error_dump_range {
__le32 range_data_size;
union {
- __le32 internal_base_addr;
- __le64 dram_base_addr;
- __le32 page_num;
+ __le32 internal_base_addr __packed;
+ __le64 dram_base_addr __packed;
+ __le32 page_num __packed;
struct iwl_fw_ini_fifo_hdr fifo_hdr;
struct iwl_cmd_header fw_pkt_hdr;
};
@@ -442,7 +443,7 @@ struct iwl_fw_ini_err_table_dump {
* struct iwl_fw_error_dump_rb - content of an Receive Buffer
* @index: the index of the Receive Buffer in the Rx queue
* @rxq: the RB's Rx queue
- * @reserved:
+ * @reserved: reserved
* @data: the content of the Receive Buffer
*/
struct iwl_fw_error_dump_rb {
@@ -488,7 +489,7 @@ struct iwl_fw_ini_special_device_memory {
* struct iwl_fw_error_dump_paging - content of the UMAC's image page
* block on DRAM
* @index: the index of the page block
- * @reserved:
+ * @reserved: reserved
* @data: the content of the page block
*/
struct iwl_fw_error_dump_paging {
@@ -511,6 +512,7 @@ iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data)
/**
* enum iwl_fw_dbg_trigger - triggers available
*
+ * @FW_DBG_TRIGGER_INVALID: invalid trigger value
* @FW_DBG_TRIGGER_USER: trigger log collection by user
* This should not be defined as a trigger to the driver, but a value the
* driver should set to indicate that the trigger was initiated by the
@@ -530,14 +532,15 @@ iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data)
* @FW_DBG_TRIGGER_TIME_EVENT: trigger log collection upon time events related
* events.
* @FW_DBG_TRIGGER_BA: trigger log collection upon BlockAck related events.
- * @FW_DBG_TX_LATENCY: trigger log collection when the tx latency goes above a
- * threshold.
- * @FW_DBG_TDLS: trigger log collection upon TDLS related events.
+ * @FW_DBG_TRIGGER_TX_LATENCY: trigger log collection when the tx latency
+ * goes above a threshold.
+ * @FW_DBG_TRIGGER_TDLS: trigger log collection upon TDLS related events.
* @FW_DBG_TRIGGER_TX_STATUS: trigger log collection upon tx status when
* the firmware sends a tx reply.
* @FW_DBG_TRIGGER_ALIVE_TIMEOUT: trigger log collection if alive flow timeouts
* @FW_DBG_TRIGGER_DRIVER: trigger log collection upon a flow failure
* in the driver.
+ * @FW_DBG_TRIGGER_MAX: beyond triggers, number for sizing arrays etc.
*/
enum iwl_fw_dbg_trigger {
FW_DBG_TRIGGER_INVALID = 0,
diff --git a/fw/file.h b/fw/file.h
index b36e9613a52c..ae05227b6153 100644
--- a/fw/file.h
+++ b/fw/file.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2008-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2008-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -20,7 +20,7 @@ struct iwl_ucode_header {
__le32 init_size; /* bytes of init code */
__le32 init_data_size; /* bytes of init data */
__le32 boot_size; /* bytes of bootstrap code */
- u8 data[0]; /* in same order as sizes */
+ u8 data[]; /* in same order as sizes */
} v1;
struct {
__le32 build; /* build number */
@@ -29,7 +29,7 @@ struct iwl_ucode_header {
__le32 init_size; /* bytes of init code */
__le32 init_data_size; /* bytes of init data */
__le32 boot_size; /* bytes of bootstrap code */
- u8 data[0]; /* in same order as sizes */
+ u8 data[]; /* in same order as sizes */
} v2;
} u;
};
@@ -216,6 +216,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
* ADD_MODIFY_STA_KEY_API_S_VER_2.
* @IWL_UCODE_TLV_API_STA_TYPE: This ucode supports station type assignement.
* @IWL_UCODE_TLV_API_NAN2_VER2: This ucode supports NAN API version 2
+ * @IWL_UCODE_TLV_API_ADAPTIVE_DWELL: support for adaptive dwell in scanning
* @IWL_UCODE_TLV_API_NEW_RX_STATS: should new RX STATISTICS API be used
* @IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY: Quota command includes a field
* indicating low latency direction.
@@ -239,10 +240,21 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S.
* @IWL_UCODE_TLV_API_MBSSID_HE: This ucode supports v2 of
* STA_CONTEXT_DOT11AX_API_S
+ * @IWL_UCODE_TLV_API_FTM_RTT_ACCURACY: version 7 of the range response API
+ * is supported by FW, this indicates the RTT confidence value
* @IWL_UCODE_TLV_API_SAR_TABLE_VER: This ucode supports different sar
* version tables.
* @IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG: This ucode supports v3 of
- * SCAN_CONFIG_DB_CMD_API_S.
+ * SCAN_CONFIG_DB_CMD_API_S.
+ * @IWL_UCODE_TLV_API_ADWELL_HB_DEF_N_AP: support for setting adaptive dwell
+ * number of APs in the 5 GHz band
+ * @IWL_UCODE_TLV_API_BAND_IN_RX_DATA: FW reports band number in RX notification
+ * @IWL_UCODE_TLV_API_NO_HOST_DISABLE_TX: Firmware offloaded the station disable tx
+ * logic.
+ * @IWL_UCODE_TLV_API_INT_DBG_BUF_CLEAR: Firmware supports clearing the debug
+ * internal buffer
+ * @IWL_UCODE_TLV_API_SMART_FIFO_OFFLOAD: Firmware doesn't need the host to
+ * configure the smart fifo
*
* @NUM_IWL_UCODE_TLV_API: number of bits used
*/
@@ -280,13 +292,21 @@ enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_ADWELL_HB_DEF_N_AP = (__force iwl_ucode_tlv_api_t)57,
IWL_UCODE_TLV_API_SCAN_EXT_CHAN_VER = (__force iwl_ucode_tlv_api_t)58,
IWL_UCODE_TLV_API_BAND_IN_RX_DATA = (__force iwl_ucode_tlv_api_t)59,
+ /* API Set 2 */
+ IWL_UCODE_TLV_API_NO_HOST_DISABLE_TX = (__force iwl_ucode_tlv_api_t)66,
+ IWL_UCODE_TLV_API_INT_DBG_BUF_CLEAR = (__force iwl_ucode_tlv_api_t)67,
+ IWL_UCODE_TLV_API_SMART_FIFO_OFFLOAD = (__force iwl_ucode_tlv_api_t)68,
-
-#ifdef __CHECKER__
- /* sparse says it cannot increment the previous enum member */
-#define NUM_IWL_UCODE_TLV_API 128
-#else
NUM_IWL_UCODE_TLV_API
+/*
+ * This construction make both sparse (which cannot increment the previous
+ * member due to its bitwise type) and kernel-doc (which doesn't understand
+ * the ifdef/else properly) work.
+ */
+#ifdef __CHECKER__
+#define __CHECKER_NUM_IWL_UCODE_TLV_API 128
+ = (__force iwl_ucode_tlv_api_t)__CHECKER_NUM_IWL_UCODE_TLV_API,
+#define NUM_IWL_UCODE_TLV_API __CHECKER_NUM_IWL_UCODE_TLV_API
#endif
};
@@ -372,6 +392,11 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* channels even when these are not enabled.
* @IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT: Support for indicating dump collection
* complete to FW.
+ * @IWL_UCODE_TLV_CAPA_SPP_AMSDU_SUPPORT: Support SPP (signaling and payload
+ * protected) A-MSDU.
+ * @IWL_UCODE_TLV_CAPA_SECURE_LTF_SUPPORT: Support secure LTF measurement.
+ * @IWL_UCODE_TLV_CAPA_MONITOR_PASSIVE_CHANS: Support monitor mode on otherwise
+ * passive channels
*
* @NUM_IWL_UCODE_TLV_CAPA: number of bits used
*/
@@ -457,6 +482,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_PSC_CHAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)98,
IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT = (__force iwl_ucode_tlv_capa_t)100,
+ IWL_UCODE_TLV_CAPA_SPP_AMSDU_SUPPORT = (__force iwl_ucode_tlv_capa_t)103,
IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT = (__force iwl_ucode_tlv_capa_t)104,
IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT = (__force iwl_ucode_tlv_capa_t)105,
IWL_UCODE_TLV_CAPA_SYNCED_TIME = (__force iwl_ucode_tlv_capa_t)106,
@@ -468,12 +494,19 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_OFFLOAD_BTM_SUPPORT = (__force iwl_ucode_tlv_capa_t)113,
IWL_UCODE_TLV_CAPA_STA_EXP_MFP_SUPPORT = (__force iwl_ucode_tlv_capa_t)114,
IWL_UCODE_TLV_CAPA_SNIFF_VALIDATE_SUPPORT = (__force iwl_ucode_tlv_capa_t)116,
-
-#ifdef __CHECKER__
- /* sparse says it cannot increment the previous enum member */
-#define NUM_IWL_UCODE_TLV_CAPA 128
-#else
+ IWL_UCODE_TLV_CAPA_CHINA_22_REG_SUPPORT = (__force iwl_ucode_tlv_capa_t)117,
+ IWL_UCODE_TLV_CAPA_SECURE_LTF_SUPPORT = (__force iwl_ucode_tlv_capa_t)121,
+ IWL_UCODE_TLV_CAPA_MONITOR_PASSIVE_CHANS = (__force iwl_ucode_tlv_capa_t)122,
NUM_IWL_UCODE_TLV_CAPA
+/*
+ * This construction make both sparse (which cannot increment the previous
+ * member due to its bitwise type) and kernel-doc (which doesn't understand
+ * the ifdef/else properly) work.
+ */
+#ifdef __CHECKER__
+#define __CHECKER_NUM_IWL_UCODE_TLV_CAPA 128
+ = (__force iwl_ucode_tlv_capa_t)__CHECKER_NUM_IWL_UCODE_TLV_CAPA,
+#define NUM_IWL_UCODE_TLV_CAPA __CHECKER_NUM_IWL_UCODE_TLV_CAPA
#endif
};
@@ -549,6 +582,7 @@ enum iwl_fw_dbg_reg_operator {
* struct iwl_fw_dbg_reg_op - an operation on a register
*
* @op: &enum iwl_fw_dbg_reg_operator
+ * @reserved: reserved
* @addr: offset of the register
* @val: value
*/
@@ -595,6 +629,7 @@ struct iwl_fw_dbg_mem_seg_tlv {
* @version: version of the TLV - currently 0
* @monitor_mode: &enum iwl_fw_dbg_monitor_mode
* @size_power: buffer size will be 2^(size_power + 11)
+ * @reserved: reserved
* @base_reg: addr of the base addr register (PRPH)
* @end_reg: addr of the end addr register (PRPH)
* @write_ptr_reg: the addr of the reg of the write pointer
@@ -705,6 +740,8 @@ enum iwl_fw_dbg_trigger_vif_type {
* @trig_dis_ms: the time, in milliseconds, after an occurrence of this
* trigger in which another occurrence should be ignored.
* @flags: &enum iwl_fw_dbg_trigger_flags
+ * @reserved: reserved (for alignment)
+ * @data: trigger data
*/
struct iwl_fw_dbg_trigger_tlv {
__le32 id;
@@ -745,7 +782,7 @@ struct iwl_fw_dbg_trigger_missed_bcon {
/**
* struct iwl_fw_dbg_trigger_cmd - configures trigger for messages from FW.
- * cmds: the list of commands to trigger the collection on
+ * @cmds: the list of commands to trigger the collection on
*/
struct iwl_fw_dbg_trigger_cmd {
struct cmd {
@@ -755,7 +792,7 @@ struct iwl_fw_dbg_trigger_cmd {
} __packed;
/**
- * iwl_fw_dbg_trigger_stats - configures trigger for statistics
+ * struct iwl_fw_dbg_trigger_stats - configures trigger for statistics
* @stop_offset: the offset of the value to be monitored
* @stop_threshold: the threshold above which to collect
* @start_offset: the offset of the value to be monitored
@@ -965,4 +1002,6 @@ static inline size_t _iwl_tlv_array_len(const struct iwl_ucode_tlv *tlv,
_iwl_tlv_array_len((_tlv_ptr), sizeof(*(_struct_ptr)), \
sizeof(_struct_ptr->_memb[0]))
+#define iwl_tlv_array_len_with_size(_tlv_ptr, _struct_ptr, _size) \
+ _iwl_tlv_array_len((_tlv_ptr), sizeof(*(_struct_ptr)), _size)
#endif /* __iwl_fw_file_h__ */
diff --git a/fw/img.h b/fw/img.h
index 8d0d58d61892..96bda80632f3 100644
--- a/fw/img.h
+++ b/fw/img.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2023 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016 Intel Deutschland GmbH
*/
@@ -198,7 +198,7 @@ struct iwl_dump_exclude {
struct iwl_fw {
u32 ucode_ver;
- char fw_version[64];
+ char fw_version[128];
/* ucode images */
struct fw_img img[IWL_UCODE_TYPE_MAX];
diff --git a/fw/init.c b/fw/init.c
index 135bd48bfe9f..d8b083be5b6b 100644
--- a/fw/init.c
+++ b/fw/init.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2019-2021 Intel Corporation
+ * Copyright (C) 2019-2021, 2024 Intel Corporation
*/
#include "iwl-drv.h"
#include "runtime.h"
@@ -135,7 +135,9 @@ int iwl_configure_rxq(struct iwl_fw_runtime *fwrt)
struct iwl_trans_rxq_dma_data data;
cmd->data[i].q_num = i + 1;
- iwl_trans_get_rxq_dma_data(fwrt->trans, i + 1, &data);
+ ret = iwl_trans_get_rxq_dma_data(fwrt->trans, i + 1, &data);
+ if (ret)
+ goto out;
cmd->data[i].fr_bd_cb = cpu_to_le64(data.fr_bd_cb);
cmd->data[i].urbd_stts_wrptr =
@@ -149,6 +151,7 @@ int iwl_configure_rxq(struct iwl_fw_runtime *fwrt)
ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
+out:
kfree(cmd);
if (ret)
diff --git a/fw/notif-wait.h b/fw/notif-wait.h
index 49e8ba11b6a8..0e49794911c1 100644
--- a/fw/notif-wait.h
+++ b/fw/notif-wait.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014 Intel Corporation
+ * Copyright (C) 2005-2014, 2023 Intel Corporation
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_notif_wait_h__
@@ -25,6 +25,7 @@ struct iwl_notif_wait_data {
* returns true, the wait is over, if it returns false then
* the waiter stays blocked. If no function is given, any
* of the listed commands will unblock the waiter.
+ * @fn_data: pointer to pass to the @fn's data argument
* @cmds: command IDs
* @n_cmds: number of command IDs
* @triggered: waiter should be woken up
diff --git a/fw/pnvm.c b/fw/pnvm.c
index 650e4bde9c17..1195e708caa9 100644
--- a/fw/pnvm.c
+++ b/fw/pnvm.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright(c) 2020-2023 Intel Corporation
+ * Copyright(c) 2020-2024 Intel Corporation
*/
#include "iwl-drv.h"
@@ -12,6 +12,8 @@
#include "fw/api/alive.h"
#include "fw/uefi.h"
+#define IWL_PNVM_REDUCED_CAP_BIT BIT(25)
+
struct iwl_pnvm_section {
__le32 offset;
const u8 data[];
@@ -173,6 +175,7 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
while (len >= sizeof(*tlv)) {
u32 tlv_len, tlv_type;
+ u32 rf_type;
len -= sizeof(*tlv);
tlv = (const void *)data;
@@ -201,6 +204,16 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
data += sizeof(*tlv) + ALIGN(tlv_len, 4);
len -= ALIGN(tlv_len, 4);
+ trans->reduced_cap_sku = false;
+ rf_type = CSR_HW_RFID_TYPE(trans->hw_rf_id);
+ if ((trans->sku_id[0] & IWL_PNVM_REDUCED_CAP_BIT) &&
+ rf_type == IWL_CFG_RF_TYPE_FM)
+ trans->reduced_cap_sku = true;
+
+ IWL_DEBUG_FW(trans,
+ "Reduced SKU device %d\n",
+ trans->reduced_cap_sku);
+
if (trans->sku_id[0] == le32_to_cpu(sku_id->data[0]) &&
trans->sku_id[1] == le32_to_cpu(sku_id->data[1]) &&
trans->sku_id[2] == le32_to_cpu(sku_id->data[2])) {
@@ -239,7 +252,7 @@ static int iwl_pnvm_get_from_fs(struct iwl_trans *trans, u8 **data, size_t *len)
}
new_len = pnvm->size;
- *data = kmemdup(pnvm->data, pnvm->size, GFP_KERNEL);
+ *data = kvmemdup(pnvm->data, pnvm->size, GFP_KERNEL);
release_firmware(pnvm);
if (!*data)
@@ -255,21 +268,27 @@ static u8 *iwl_get_pnvm_image(struct iwl_trans *trans_p, size_t *len)
struct pnvm_sku_package *package;
u8 *image = NULL;
- /* First attempt to get the PNVM from BIOS */
- package = iwl_uefi_get_pnvm(trans_p, len);
- if (!IS_ERR_OR_NULL(package)) {
- if (*len >= sizeof(*package)) {
- /* we need only the data */
- *len -= sizeof(*package);
- image = kmemdup(package->data, *len, GFP_KERNEL);
+ /* Get PNVM from BIOS for non-Intel SKU */
+ if (trans_p->sku_id[2]) {
+ package = iwl_uefi_get_pnvm(trans_p, len);
+ if (!IS_ERR_OR_NULL(package)) {
+ if (*len >= sizeof(*package)) {
+ /* we need only the data */
+ *len -= sizeof(*package);
+ image = kvmemdup(package->data,
+ *len, GFP_KERNEL);
+ }
+ /*
+ * free package regardless of whether kmemdup
+ * succeeded
+ */
+ kfree(package);
+ if (image)
+ return image;
}
- /* free package regardless of whether kmemdup succeeded */
- kfree(package);
- if (image)
- return image;
}
- /* If it's not available, try from the filesystem */
+ /* If it's not available, or for Intel SKU, try from the filesystem */
if (iwl_pnvm_get_from_fs(trans_p, &image, len))
return NULL;
return image;
@@ -314,7 +333,7 @@ static void iwl_pnvm_load_pnvm_to_trans(struct iwl_trans *trans,
set:
iwl_trans_set_pnvm(trans, capa);
free:
- kfree(data);
+ kvfree(data);
kfree(pnvm_data);
}
diff --git a/fw/regulatory.c b/fw/regulatory.c
new file mode 100644
index 000000000000..560a91998cc4
--- /dev/null
+++ b/fw/regulatory.c
@@ -0,0 +1,636 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2023 Intel Corporation
+ */
+#include <linux/dmi.h>
+#include "iwl-drv.h"
+#include "iwl-debug.h"
+#include "regulatory.h"
+#include "fw/runtime.h"
+#include "fw/uefi.h"
+
+#define GET_BIOS_TABLE(__name, ...) \
+do { \
+ int ret = -ENOENT; \
+ if (fwrt->uefi_tables_lock_status > UEFI_WIFI_GUID_UNLOCKED) \
+ ret = iwl_uefi_get_ ## __name(__VA_ARGS__); \
+ if (ret < 0) \
+ ret = iwl_acpi_get_ ## __name(__VA_ARGS__); \
+ return ret; \
+} while (0)
+
+#define IWL_BIOS_TABLE_LOADER(__name) \
+int iwl_bios_get_ ## __name(struct iwl_fw_runtime *fwrt) \
+{GET_BIOS_TABLE(__name, fwrt); } \
+IWL_EXPORT_SYMBOL(iwl_bios_get_ ## __name)
+
+#define IWL_BIOS_TABLE_LOADER_DATA(__name, data_type) \
+int iwl_bios_get_ ## __name(struct iwl_fw_runtime *fwrt, \
+ data_type * data) \
+{GET_BIOS_TABLE(__name, fwrt, data); } \
+IWL_EXPORT_SYMBOL(iwl_bios_get_ ## __name)
+
+IWL_BIOS_TABLE_LOADER(wrds_table);
+IWL_BIOS_TABLE_LOADER(ewrd_table);
+IWL_BIOS_TABLE_LOADER(wgds_table);
+IWL_BIOS_TABLE_LOADER(ppag_table);
+IWL_BIOS_TABLE_LOADER_DATA(tas_table, struct iwl_tas_data);
+IWL_BIOS_TABLE_LOADER_DATA(pwr_limit, u64);
+IWL_BIOS_TABLE_LOADER_DATA(mcc, char);
+IWL_BIOS_TABLE_LOADER_DATA(eckv, u32);
+IWL_BIOS_TABLE_LOADER_DATA(wbem, u32);
+
+
+static const struct dmi_system_id dmi_ppag_approved_list[] = {
+ { .ident = "HP",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ },
+ },
+ { .ident = "SAMSUNG",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
+ },
+ },
+ { .ident = "MSFT",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ },
+ },
+ { .ident = "ASUS",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ },
+ },
+ { .ident = "GOOGLE-HP",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
+ },
+ },
+ { .ident = "GOOGLE-ASUS",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek COMPUTER INC."),
+ },
+ },
+ { .ident = "GOOGLE-SAMSUNG",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
+ },
+ },
+ { .ident = "DELL",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ },
+ },
+ { .ident = "DELL",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ },
+ },
+ { .ident = "RAZER",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Razer"),
+ },
+ },
+ { .ident = "Honor",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HONOR"),
+ },
+ },
+ {}
+};
+
+static const struct dmi_system_id dmi_tas_approved_list[] = {
+ { .ident = "HP",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ },
+ },
+ { .ident = "SAMSUNG",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
+ },
+ },
+ { .ident = "LENOVO",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ },
+ },
+ { .ident = "DELL",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ },
+ },
+ { .ident = "MSFT",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ },
+ },
+ { .ident = "Acer",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ },
+ },
+ { .ident = "ASUS",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ },
+ },
+ { .ident = "GOOGLE-HP",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
+ },
+ },
+ { .ident = "MSI",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International Co., Ltd."),
+ },
+ },
+ { .ident = "Honor",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HONOR"),
+ },
+ },
+ /* keep last */
+ {}
+};
+
+bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
+{
+ /*
+ * The PER_CHAIN_LIMIT_OFFSET_CMD command is not supported on
+ * earlier firmware versions. Unfortunately, we don't have a
+ * TLV API flag to rely on, so rely on the major version which
+ * is in the first byte of ucode_ver. This was implemented
+ * initially on version 38 and then backported to 17. It was
+ * also backported to 29, but only for 7265D devices. The
+ * intention was to have it in 36 as well, but not all 8000
+ * family got this feature enabled. The 8000 family is the
+ * only one using version 36, so skip this version entirely.
+ */
+ return IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) >= 38 ||
+ (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 &&
+ fwrt->trans->hw_rev != CSR_HW_REV_TYPE_3160) ||
+ (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 &&
+ ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
+ CSR_HW_REV_TYPE_7265D));
+}
+IWL_EXPORT_SYMBOL(iwl_sar_geo_support);
+
+int iwl_sar_geo_fill_table(struct iwl_fw_runtime *fwrt,
+ struct iwl_per_chain_offset *table,
+ u32 n_bands, u32 n_profiles)
+{
+ int i, j;
+
+ if (!fwrt->geo_enabled)
+ return -ENODATA;
+
+ if (!iwl_sar_geo_support(fwrt))
+ return -EOPNOTSUPP;
+
+ for (i = 0; i < n_profiles; i++) {
+ for (j = 0; j < n_bands; j++) {
+ struct iwl_per_chain_offset *chain =
+ &table[i * n_bands + j];
+
+ chain->max_tx_power =
+ cpu_to_le16(fwrt->geo_profiles[i].bands[j].max);
+ chain->chain_a =
+ fwrt->geo_profiles[i].bands[j].chains[0];
+ chain->chain_b =
+ fwrt->geo_profiles[i].bands[j].chains[1];
+ IWL_DEBUG_RADIO(fwrt,
+ "SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n",
+ i, j,
+ fwrt->geo_profiles[i].bands[j].chains[0],
+ fwrt->geo_profiles[i].bands[j].chains[1],
+ fwrt->geo_profiles[i].bands[j].max);
+ }
+ }
+
+ return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_sar_geo_fill_table);
+
+static int iwl_sar_fill_table(struct iwl_fw_runtime *fwrt,
+ __le16 *per_chain, u32 n_subbands,
+ int prof_a, int prof_b)
+{
+ int profs[BIOS_SAR_NUM_CHAINS] = { prof_a, prof_b };
+ int i, j;
+
+ for (i = 0; i < BIOS_SAR_NUM_CHAINS; i++) {
+ struct iwl_sar_profile *prof;
+
+ /* don't allow SAR to be disabled (profile 0 means disable) */
+ if (profs[i] == 0)
+ return -EPERM;
+
+ /* we are off by one, so allow up to BIOS_SAR_MAX_PROFILE_NUM */
+ if (profs[i] > BIOS_SAR_MAX_PROFILE_NUM)
+ return -EINVAL;
+
+ /* profiles go from 1 to 4, so decrement to access the array */
+ prof = &fwrt->sar_profiles[profs[i] - 1];
+
+ /* if the profile is disabled, do nothing */
+ if (!prof->enabled) {
+ IWL_DEBUG_RADIO(fwrt, "SAR profile %d is disabled.\n",
+ profs[i]);
+ /*
+ * if one of the profiles is disabled, we
+ * ignore all of them and return 1 to
+ * differentiate disabled from other failures.
+ */
+ return 1;
+ }
+
+ IWL_DEBUG_INFO(fwrt,
+ "SAR EWRD: chain %d profile index %d\n",
+ i, profs[i]);
+ IWL_DEBUG_RADIO(fwrt, " Chain[%d]:\n", i);
+ for (j = 0; j < n_subbands; j++) {
+ per_chain[i * n_subbands + j] =
+ cpu_to_le16(prof->chains[i].subbands[j]);
+ IWL_DEBUG_RADIO(fwrt, " Band[%d] = %d * .125dBm\n",
+ j, prof->chains[i].subbands[j]);
+ }
+ }
+
+ return 0;
+}
+
+int iwl_sar_fill_profile(struct iwl_fw_runtime *fwrt,
+ __le16 *per_chain, u32 n_tables, u32 n_subbands,
+ int prof_a, int prof_b)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < n_tables; i++) {
+ ret = iwl_sar_fill_table(fwrt,
+ &per_chain[i * n_subbands * BIOS_SAR_NUM_CHAINS],
+ n_subbands, prof_a, prof_b);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_sar_fill_profile);
+
+static bool iwl_ppag_value_valid(struct iwl_fw_runtime *fwrt, int chain,
+ int subband)
+{
+ s8 ppag_val = fwrt->ppag_chains[chain].subbands[subband];
+
+ if ((subband == 0 &&
+ (ppag_val > IWL_PPAG_MAX_LB || ppag_val < IWL_PPAG_MIN_LB)) ||
+ (subband != 0 &&
+ (ppag_val > IWL_PPAG_MAX_HB || ppag_val < IWL_PPAG_MIN_HB))) {
+ IWL_DEBUG_RADIO(fwrt, "Invalid PPAG value: %d\n", ppag_val);
+ return false;
+ }
+ return true;
+}
+
+int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt,
+ union iwl_ppag_table_cmd *cmd, int *cmd_size)
+{
+ u8 cmd_ver;
+ int i, j, num_sub_bands;
+ s8 *gain;
+ bool send_ppag_always;
+
+ /* many firmware images for JF lie about this */
+ if (CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id) ==
+ CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF))
+ return -EOPNOTSUPP;
+
+ if (!fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) {
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG capability not supported by FW, command not sent.\n");
+ return -EINVAL;
+ }
+
+ cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw,
+ WIDE_ID(PHY_OPS_GROUP,
+ PER_PLATFORM_ANT_GAIN_CMD), 1);
+ /*
+ * Starting from ver 4, driver needs to send the PPAG CMD regardless
+ * if PPAG is enabled/disabled or valid/invalid.
+ */
+ send_ppag_always = cmd_ver > 3;
+
+ /* Don't send PPAG if it is disabled */
+ if (!send_ppag_always && !fwrt->ppag_flags) {
+ IWL_DEBUG_RADIO(fwrt, "PPAG not enabled, command not sent.\n");
+ return -EINVAL;
+ }
+
+ /* The 'flags' field is the same in v1 and in v2 so we can just
+ * use v1 to access it.
+ */
+ cmd->v1.flags = cpu_to_le32(fwrt->ppag_flags);
+
+ IWL_DEBUG_RADIO(fwrt, "PPAG cmd ver is %d\n", cmd_ver);
+ if (cmd_ver == 1) {
+ num_sub_bands = IWL_NUM_SUB_BANDS_V1;
+ gain = cmd->v1.gain[0];
+ *cmd_size = sizeof(cmd->v1);
+ if (fwrt->ppag_ver >= 1) {
+ /* in this case FW supports revision 0 */
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG table rev is %d, send truncated table\n",
+ fwrt->ppag_ver);
+ }
+ } else if (cmd_ver >= 2 && cmd_ver <= 6) {
+ num_sub_bands = IWL_NUM_SUB_BANDS_V2;
+ gain = cmd->v2.gain[0];
+ *cmd_size = sizeof(cmd->v2);
+ if (fwrt->ppag_ver == 0) {
+ /* in this case FW supports revisions 1,2 or 3 */
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG table rev is 0, send padded table\n");
+ }
+ } else {
+ IWL_DEBUG_RADIO(fwrt, "Unsupported PPAG command version\n");
+ return -EINVAL;
+ }
+
+ /* ppag mode */
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG MODE bits were read from bios: %d\n",
+ le32_to_cpu(cmd->v1.flags));
+
+ if (cmd_ver == 5)
+ cmd->v1.flags &= cpu_to_le32(IWL_PPAG_CMD_V5_MASK);
+ else if (cmd_ver < 5)
+ cmd->v1.flags &= cpu_to_le32(IWL_PPAG_CMD_V4_MASK);
+
+ if ((cmd_ver == 1 &&
+ !fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_PPAG_CHINA_BIOS_SUPPORT)) ||
+ (cmd_ver == 2 && fwrt->ppag_ver >= 2)) {
+ cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK);
+ IWL_DEBUG_RADIO(fwrt, "masking ppag China bit\n");
+ } else {
+ IWL_DEBUG_RADIO(fwrt, "isn't masking ppag China bit\n");
+ }
+
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG MODE bits going to be sent: %d\n",
+ le32_to_cpu(cmd->v1.flags));
+
+ for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
+ for (j = 0; j < num_sub_bands; j++) {
+ if (!send_ppag_always &&
+ !iwl_ppag_value_valid(fwrt, i, j))
+ return -EINVAL;
+
+ gain[i * num_sub_bands + j] =
+ fwrt->ppag_chains[i].subbands[j];
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG table: chain[%d] band[%d]: gain = %d\n",
+ i, j, gain[i * num_sub_bands + j]);
+ }
+ }
+
+ return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_fill_ppag_table);
+
+bool iwl_is_ppag_approved(struct iwl_fw_runtime *fwrt)
+{
+ if (!dmi_check_system(dmi_ppag_approved_list)) {
+ IWL_DEBUG_RADIO(fwrt,
+ "System vendor '%s' is not in the approved list, disabling PPAG.\n",
+ dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>");
+ fwrt->ppag_flags = 0;
+ return false;
+ }
+
+ return true;
+}
+IWL_EXPORT_SYMBOL(iwl_is_ppag_approved);
+
+bool iwl_is_tas_approved(void)
+{
+ return dmi_check_system(dmi_tas_approved_list);
+}
+IWL_EXPORT_SYMBOL(iwl_is_tas_approved);
+
+int iwl_parse_tas_selection(struct iwl_fw_runtime *fwrt,
+ struct iwl_tas_data *tas_data,
+ const u32 tas_selection)
+{
+ u8 override_iec = u32_get_bits(tas_selection,
+ IWL_WTAS_OVERRIDE_IEC_MSK);
+ u8 enabled_iec = u32_get_bits(tas_selection, IWL_WTAS_ENABLE_IEC_MSK);
+ u8 usa_tas_uhb = u32_get_bits(tas_selection, IWL_WTAS_USA_UHB_MSK);
+ int enabled = tas_selection & IWL_WTAS_ENABLED_MSK;
+
+ IWL_DEBUG_RADIO(fwrt, "TAS selection as read from BIOS: 0x%x\n",
+ tas_selection);
+
+ tas_data->usa_tas_uhb_allowed = usa_tas_uhb;
+ tas_data->override_tas_iec = override_iec;
+ tas_data->enable_tas_iec = enabled_iec;
+
+ return enabled;
+}
+
+static __le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
+{
+ int ret;
+ u32 val;
+ __le32 config_bitmap = 0;
+
+ switch (CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id)) {
+ case IWL_CFG_RF_TYPE_HR1:
+ case IWL_CFG_RF_TYPE_HR2:
+ case IWL_CFG_RF_TYPE_JF1:
+ case IWL_CFG_RF_TYPE_JF2:
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_INDONESIA_5G2,
+ &val);
+
+ if (!ret && val == DSM_VALUE_INDONESIA_ENABLE)
+ config_bitmap |=
+ cpu_to_le32(LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK);
+ break;
+ default:
+ break;
+ }
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_DISABLE_SRD, &val);
+ if (!ret) {
+ if (val == DSM_VALUE_SRD_PASSIVE)
+ config_bitmap |=
+ cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK);
+ else if (val == DSM_VALUE_SRD_DISABLE)
+ config_bitmap |=
+ cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK);
+ }
+
+ if (fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CHINA_22_REG_SUPPORT)) {
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_REGULATORY_CONFIG,
+ &val);
+ /*
+ * China 2022 enable if the BIOS object does not exist or
+ * if it is enabled in BIOS.
+ */
+ if (ret < 0 || val & DSM_MASK_CHINA_22_REG)
+ config_bitmap |=
+ cpu_to_le32(LARI_CONFIG_ENABLE_CHINA_22_REG_SUPPORT_MSK);
+ }
+
+ return config_bitmap;
+}
+
+static size_t iwl_get_lari_config_cmd_size(u8 cmd_ver)
+{
+ size_t cmd_size;
+
+ switch (cmd_ver) {
+ case 12:
+ case 11:
+ cmd_size = sizeof(struct iwl_lari_config_change_cmd);
+ break;
+ case 10:
+ cmd_size = sizeof(struct iwl_lari_config_change_cmd_v10);
+ break;
+ case 9:
+ case 8:
+ case 7:
+ cmd_size = sizeof(struct iwl_lari_config_change_cmd_v7);
+ break;
+ case 6:
+ cmd_size = sizeof(struct iwl_lari_config_change_cmd_v6);
+ break;
+ case 5:
+ cmd_size = sizeof(struct iwl_lari_config_change_cmd_v5);
+ break;
+ case 4:
+ cmd_size = sizeof(struct iwl_lari_config_change_cmd_v4);
+ break;
+ case 3:
+ cmd_size = sizeof(struct iwl_lari_config_change_cmd_v3);
+ break;
+ case 2:
+ cmd_size = sizeof(struct iwl_lari_config_change_cmd_v2);
+ break;
+ default:
+ cmd_size = sizeof(struct iwl_lari_config_change_cmd_v1);
+ break;
+ }
+ return cmd_size;
+}
+
+int iwl_fill_lari_config(struct iwl_fw_runtime *fwrt,
+ struct iwl_lari_config_change_cmd *cmd,
+ size_t *cmd_size)
+{
+ int ret;
+ u32 value;
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw,
+ WIDE_ID(REGULATORY_AND_NVM_GROUP,
+ LARI_CONFIG_CHANGE), 1);
+
+ memset(cmd, 0, sizeof(*cmd));
+ *cmd_size = iwl_get_lari_config_cmd_size(cmd_ver);
+
+ cmd->config_bitmap = iwl_get_lari_config_bitmap(fwrt);
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_11AX_ENABLEMENT, &value);
+ if (!ret)
+ cmd->oem_11ax_allow_bitmap = cpu_to_le32(value);
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_UNII4_CHAN, &value);
+ if (!ret) {
+ if (cmd_ver < 9)
+ value &= DSM_UNII4_ALLOW_BITMAP_CMD_V8;
+ else
+ value &= DSM_UNII4_ALLOW_BITMAP;
+
+ cmd->oem_unii4_allow_bitmap = cpu_to_le32(value);
+ }
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ACTIVATE_CHANNEL, &value);
+ if (!ret) {
+ if (cmd_ver < 8)
+ value &= ~ACTIVATE_5G2_IN_WW_MASK;
+ if (cmd_ver < 12)
+ value &= CHAN_STATE_ACTIVE_BITMAP_CMD_V11;
+
+ cmd->chan_state_active_bitmap = cpu_to_le32(value);
+ }
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_6E, &value);
+ if (!ret)
+ cmd->oem_uhb_allow_bitmap = cpu_to_le32(value);
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_FORCE_DISABLE_CHANNELS, &value);
+ if (!ret)
+ cmd->force_disable_channels_bitmap = cpu_to_le32(value);
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENERGY_DETECTION_THRESHOLD,
+ &value);
+ if (!ret)
+ cmd->edt_bitmap = cpu_to_le32(value);
+
+ ret = iwl_bios_get_wbem(fwrt, &value);
+ if (!ret)
+ cmd->oem_320mhz_allow_bitmap = cpu_to_le32(value);
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_11BE, &value);
+ if (!ret)
+ cmd->oem_11be_allow_bitmap = cpu_to_le32(value);
+
+ if (cmd->config_bitmap ||
+ cmd->oem_uhb_allow_bitmap ||
+ cmd->oem_11ax_allow_bitmap ||
+ cmd->oem_unii4_allow_bitmap ||
+ cmd->chan_state_active_bitmap ||
+ cmd->force_disable_channels_bitmap ||
+ cmd->edt_bitmap ||
+ cmd->oem_320mhz_allow_bitmap ||
+ cmd->oem_11be_allow_bitmap) {
+ IWL_DEBUG_RADIO(fwrt,
+ "sending LARI_CONFIG_CHANGE, config_bitmap=0x%x, oem_11ax_allow_bitmap=0x%x\n",
+ le32_to_cpu(cmd->config_bitmap),
+ le32_to_cpu(cmd->oem_11ax_allow_bitmap));
+ IWL_DEBUG_RADIO(fwrt,
+ "sending LARI_CONFIG_CHANGE, oem_unii4_allow_bitmap=0x%x, chan_state_active_bitmap=0x%x, cmd_ver=%d\n",
+ le32_to_cpu(cmd->oem_unii4_allow_bitmap),
+ le32_to_cpu(cmd->chan_state_active_bitmap),
+ cmd_ver);
+ IWL_DEBUG_RADIO(fwrt,
+ "sending LARI_CONFIG_CHANGE, oem_uhb_allow_bitmap=0x%x, force_disable_channels_bitmap=0x%x\n",
+ le32_to_cpu(cmd->oem_uhb_allow_bitmap),
+ le32_to_cpu(cmd->force_disable_channels_bitmap));
+ IWL_DEBUG_RADIO(fwrt,
+ "sending LARI_CONFIG_CHANGE, edt_bitmap=0x%x, oem_320mhz_allow_bitmap=0x%x\n",
+ le32_to_cpu(cmd->edt_bitmap),
+ le32_to_cpu(cmd->oem_320mhz_allow_bitmap));
+ IWL_DEBUG_RADIO(fwrt,
+ "sending LARI_CONFIG_CHANGE, oem_11be_allow_bitmap=0x%x\n",
+ le32_to_cpu(cmd->oem_11be_allow_bitmap));
+ } else {
+ return 1;
+ }
+
+ return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_fill_lari_config);
+
+int iwl_bios_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
+ u32 *value)
+{
+ GET_BIOS_TABLE(dsm, fwrt, func, value);
+}
+IWL_EXPORT_SYMBOL(iwl_bios_get_dsm);
diff --git a/fw/regulatory.h b/fw/regulatory.h
new file mode 100644
index 000000000000..e2c056f483c1
--- /dev/null
+++ b/fw/regulatory.h
@@ -0,0 +1,220 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2023-2024 Intel Corporation
+ */
+
+#ifndef __fw_regulatory_h__
+#define __fw_regulatory_h__
+
+#include "fw/img.h"
+#include "fw/api/commands.h"
+#include "fw/api/power.h"
+#include "fw/api/phy.h"
+#include "fw/api/config.h"
+#include "fw/api/nvm-reg.h"
+#include "fw/img.h"
+#include "iwl-trans.h"
+
+#define BIOS_SAR_MAX_PROFILE_NUM 4
+/*
+ * Each SAR profile has (up to, depends on the table revision) 4 chains:
+ * chain A, chain B, chain A when in CDB, chain B when in CDB
+ */
+#define BIOS_SAR_MAX_CHAINS_PER_PROFILE 4
+#define BIOS_SAR_NUM_CHAINS 2
+#define BIOS_SAR_MAX_SUB_BANDS_NUM 11
+
+#define BIOS_GEO_NUM_CHAINS 2
+#define BIOS_GEO_MAX_NUM_BANDS 3
+#define BIOS_GEO_MAX_PROFILE_NUM 8
+#define BIOS_GEO_MIN_PROFILE_NUM 3
+
+#define IWL_SAR_ENABLE_MSK BIT(0)
+
+/* PPAG gain value bounds in 1/8 dBm */
+#define IWL_PPAG_MIN_LB -16
+#define IWL_PPAG_MAX_LB 24
+#define IWL_PPAG_MIN_HB -16
+#define IWL_PPAG_MAX_HB 40
+
+#define IWL_PPAG_ETSI_CHINA_MASK 3
+#define IWL_PPAG_REV3_MASK 0x7FF
+
+#define IWL_WTAS_ENABLED_MSK 0x1
+#define IWL_WTAS_OVERRIDE_IEC_MSK 0x2
+#define IWL_WTAS_ENABLE_IEC_MSK 0x4
+#define IWL_WTAS_USA_UHB_MSK BIT(16)
+
+/*
+ * The profile for revision 2 is a superset of revision 1, which is in
+ * turn a superset of revision 0. So we can store all revisions
+ * inside revision 2, which is what we represent here.
+ */
+
+/*
+ * struct iwl_sar_profile_chain - per-chain values of a SAR profile
+ * @subbands: the SAR value for each subband
+ */
+struct iwl_sar_profile_chain {
+ u8 subbands[BIOS_SAR_MAX_SUB_BANDS_NUM];
+};
+
+/*
+ * struct iwl_sar_profile - SAR profile from SAR tables
+ * @enabled: whether the profile is enabled or not
+ * @chains: per-chain SAR values
+ */
+struct iwl_sar_profile {
+ bool enabled;
+ struct iwl_sar_profile_chain chains[BIOS_SAR_MAX_CHAINS_PER_PROFILE];
+};
+
+/* Same thing as with SAR, all revisions fit in revision 2 */
+
+/*
+ * struct iwl_geo_profile_band - per-band geo SAR offsets
+ * @max: the max tx power allowed for the band
+ * @chains: SAR offsets values for each chain
+ */
+struct iwl_geo_profile_band {
+ u8 max;
+ u8 chains[BIOS_GEO_NUM_CHAINS];
+};
+
+/*
+ * struct iwl_geo_profile - geo profile
+ * @bands: per-band table of the SAR offsets
+ */
+struct iwl_geo_profile {
+ struct iwl_geo_profile_band bands[BIOS_GEO_MAX_NUM_BANDS];
+};
+
+/* Same thing as with SAR, all revisions fit in revision 2 */
+struct iwl_ppag_chain {
+ s8 subbands[BIOS_SAR_MAX_SUB_BANDS_NUM];
+};
+
+struct iwl_tas_data {
+ __le32 block_list_size;
+ __le32 block_list_array[IWL_WTAS_BLACK_LIST_MAX];
+ u8 override_tas_iec;
+ u8 enable_tas_iec;
+ u8 usa_tas_uhb_allowed;
+};
+
+/* For DSM revision 0 and 4 */
+enum iwl_dsm_funcs {
+ DSM_FUNC_QUERY = 0,
+ DSM_FUNC_DISABLE_SRD = 1,
+ DSM_FUNC_ENABLE_INDONESIA_5G2 = 2,
+ DSM_FUNC_ENABLE_6E = 3,
+ DSM_FUNC_REGULATORY_CONFIG = 4,
+ DSM_FUNC_11AX_ENABLEMENT = 6,
+ DSM_FUNC_ENABLE_UNII4_CHAN = 7,
+ DSM_FUNC_ACTIVATE_CHANNEL = 8,
+ DSM_FUNC_FORCE_DISABLE_CHANNELS = 9,
+ DSM_FUNC_ENERGY_DETECTION_THRESHOLD = 10,
+ DSM_FUNC_RFI_CONFIG = 11,
+ DSM_FUNC_ENABLE_11BE = 12,
+ DSM_FUNC_NUM_FUNCS = 13,
+};
+
+enum iwl_dsm_values_srd {
+ DSM_VALUE_SRD_ACTIVE,
+ DSM_VALUE_SRD_PASSIVE,
+ DSM_VALUE_SRD_DISABLE,
+ DSM_VALUE_SRD_MAX
+};
+
+enum iwl_dsm_values_indonesia {
+ DSM_VALUE_INDONESIA_DISABLE,
+ DSM_VALUE_INDONESIA_ENABLE,
+ DSM_VALUE_INDONESIA_RESERVED,
+ DSM_VALUE_INDONESIA_MAX
+};
+
+enum iwl_dsm_unii4_bitmap {
+ DSM_VALUE_UNII4_US_OVERRIDE_MSK = BIT(0),
+ DSM_VALUE_UNII4_US_EN_MSK = BIT(1),
+ DSM_VALUE_UNII4_ETSI_OVERRIDE_MSK = BIT(2),
+ DSM_VALUE_UNII4_ETSI_EN_MSK = BIT(3),
+ DSM_VALUE_UNII4_CANADA_OVERRIDE_MSK = BIT(4),
+ DSM_VALUE_UNII4_CANADA_EN_MSK = BIT(5),
+};
+
+#define DSM_UNII4_ALLOW_BITMAP_CMD_V8 (DSM_VALUE_UNII4_US_OVERRIDE_MSK | \
+ DSM_VALUE_UNII4_US_EN_MSK | \
+ DSM_VALUE_UNII4_ETSI_OVERRIDE_MSK | \
+ DSM_VALUE_UNII4_ETSI_EN_MSK)
+#define DSM_UNII4_ALLOW_BITMAP (DSM_UNII4_ALLOW_BITMAP_CMD_V8 | \
+ DSM_VALUE_UNII4_CANADA_OVERRIDE_MSK | \
+ DSM_VALUE_UNII4_CANADA_EN_MSK)
+
+enum iwl_dsm_values_rfi {
+ DSM_VALUE_RFI_DLVR_DISABLE = BIT(0),
+ DSM_VALUE_RFI_DDR_DISABLE = BIT(1),
+};
+
+#define DSM_VALUE_RFI_DISABLE (DSM_VALUE_RFI_DLVR_DISABLE |\
+ DSM_VALUE_RFI_DDR_DISABLE)
+
+enum iwl_dsm_masks_reg {
+ DSM_MASK_CHINA_22_REG = BIT(2)
+};
+
+struct iwl_fw_runtime;
+
+bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt);
+
+int iwl_sar_geo_fill_table(struct iwl_fw_runtime *fwrt,
+ struct iwl_per_chain_offset *table,
+ u32 n_bands, u32 n_profiles);
+
+int iwl_sar_fill_profile(struct iwl_fw_runtime *fwrt,
+ __le16 *per_chain, u32 n_tables, u32 n_subbands,
+ int prof_a, int prof_b);
+
+int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt,
+ union iwl_ppag_table_cmd *cmd,
+ int *cmd_size);
+
+bool iwl_is_ppag_approved(struct iwl_fw_runtime *fwrt);
+
+bool iwl_is_tas_approved(void);
+
+int iwl_parse_tas_selection(struct iwl_fw_runtime *fwrt,
+ struct iwl_tas_data *tas_data,
+ const u32 tas_selection);
+
+int iwl_bios_get_wrds_table(struct iwl_fw_runtime *fwrt);
+
+int iwl_bios_get_ewrd_table(struct iwl_fw_runtime *fwrt);
+
+int iwl_bios_get_wgds_table(struct iwl_fw_runtime *fwrt);
+
+int iwl_bios_get_ppag_table(struct iwl_fw_runtime *fwrt);
+
+int iwl_bios_get_tas_table(struct iwl_fw_runtime *fwrt,
+ struct iwl_tas_data *data);
+
+int iwl_bios_get_pwr_limit(struct iwl_fw_runtime *fwrt,
+ u64 *dflt_pwr_limit);
+
+int iwl_bios_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc);
+int iwl_bios_get_eckv(struct iwl_fw_runtime *fwrt, u32 *ext_clk);
+int iwl_bios_get_wbem(struct iwl_fw_runtime *fwrt, u32 *value);
+
+int iwl_fill_lari_config(struct iwl_fw_runtime *fwrt,
+ struct iwl_lari_config_change_cmd *cmd,
+ size_t *cmd_size);
+
+int iwl_bios_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
+ u32 *value);
+
+static inline u32 iwl_bios_get_ppag_flags(const u32 ppag_modes,
+ const u8 ppag_ver)
+{
+ return ppag_modes & (ppag_ver < 3 ? IWL_PPAG_ETSI_CHINA_MASK :
+ IWL_PPAG_REV3_MASK);
+}
+#endif /* __fw_regulatory_h__ */
diff --git a/fw/rs.c b/fw/rs.c
index b09e68dbf5a9..8f99e501629e 100644
--- a/fw/rs.c
+++ b/fw/rs.c
@@ -208,7 +208,6 @@ int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
return scnprintf(buf, bufsz, "Legacy | ANT: %s Rate: %s Mbps",
iwl_rs_pretty_ant(ant),
- index == IWL_RATE_INVALID ? "BAD" :
iwl_rate_mcs(index)->mbps);
}
diff --git a/fw/runtime.h b/fw/runtime.h
index 702586945533..048877fa7c71 100644
--- a/fw/runtime.h
+++ b/fw/runtime.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#ifndef __iwl_fw_runtime_h__
#define __iwl_fw_runtime_h__
@@ -12,13 +12,13 @@
#include "fw/api/debug.h"
#include "fw/api/paging.h"
#include "fw/api/power.h"
-#include "iwl-eeprom-parse.h"
+#include "iwl-nvm-utils.h"
#include "fw/acpi.h"
+#include "fw/regulatory.h"
struct iwl_fw_runtime_ops {
void (*dump_start)(void *ctx);
void (*dump_end)(void *ctx);
- bool (*fw_running)(void *ctx);
int (*send_hcmd)(void *ctx, struct iwl_host_cmd *host_cmd);
bool (*d3_debug_enable)(void *ctx);
};
@@ -45,6 +45,10 @@ struct iwl_fwrt_shared_mem_cfg {
* struct iwl_fwrt_dump_data - dump data
* @trig: trigger the worker was scheduled upon
* @fw_pkt: packet received from FW
+ *
+ * Note that the decision which part of the union is used
+ * is based on iwl_trans_dbg_ini_valid(): the 'trig' part
+ * is used if it is %true, the 'desc' part otherwise.
*/
struct iwl_fwrt_dump_data {
union {
@@ -53,6 +57,7 @@ struct iwl_fwrt_dump_data {
struct iwl_rx_packet *fw_pkt;
};
struct {
+ /* must be first to be same as 'trig' */
const struct iwl_fw_dump_desc *desc;
bool monitor_only;
};
@@ -98,6 +103,12 @@ struct iwl_txf_iter_data {
* @cur_fw_img: current firmware image, must be maintained by
* the driver by calling &iwl_fw_set_current_image()
* @dump: debug dump data
+ * @uats_table: AP type table
+ * @uefi_tables_lock_status: The status of the WIFI GUID UEFI variables lock:
+ * 0: Unlocked, 1 and 2: Locked.
+ * Only read the UEFI variables if locked.
+ * @sar_profiles: sar profiles as read from WRDS/EWRD BIOS tables
+ * @geo_profiles: geographic profiles as read from WGDS BIOS table
*/
struct iwl_fw_runtime {
struct iwl_trans *trans;
@@ -156,22 +167,21 @@ struct iwl_fw_runtime {
#ifdef CONFIG_IWLWIFI_DEBUGFS
bool tpc_enabled;
#endif /* CONFIG_IWLWIFI_DEBUGFS */
-#ifdef CONFIG_ACPI
- struct iwl_sar_profile sar_profiles[ACPI_SAR_PROFILE_NUM];
+ struct iwl_sar_profile sar_profiles[BIOS_SAR_MAX_PROFILE_NUM];
u8 sar_chain_a_profile;
u8 sar_chain_b_profile;
- struct iwl_geo_profile geo_profiles[ACPI_NUM_GEO_PROFILES_REV3];
+ u8 reduced_power_flags;
+ struct iwl_geo_profile geo_profiles[BIOS_GEO_MAX_PROFILE_NUM];
u32 geo_rev;
u32 geo_num_profiles;
bool geo_enabled;
struct iwl_ppag_chain ppag_chains[IWL_NUM_CHAIN_LIMITS];
u32 ppag_flags;
- u32 ppag_ver;
- bool ppag_table_valid;
+ u8 ppag_ver;
struct iwl_sar_offset_mapping_cmd sgom_table;
bool sgom_enabled;
- u8 reduced_power_flags;
-#endif
+ struct iwl_mcc_allowed_ap_type_cmd uats_table;
+ u8 uefi_tables_lock_status;
};
void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
diff --git a/fw/uefi.c b/fw/uefi.c
index 9877988db0d2..fb982d4fe851 100644
--- a/fw/uefi.c
+++ b/fw/uefi.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright(c) 2021-2023 Intel Corporation
+ * Copyright(c) 2021-2024 Intel Corporation
*/
#include "iwl-drv.h"
@@ -76,6 +76,42 @@ void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len)
return data;
}
+static
+void *iwl_uefi_get_verified_variable(struct iwl_trans *trans,
+ efi_char16_t *uefi_var_name,
+ char *var_name,
+ unsigned int expected_size,
+ unsigned long *size)
+{
+ void *var;
+ unsigned long var_size;
+
+ var = iwl_uefi_get_variable(uefi_var_name, &IWL_EFI_VAR_GUID,
+ &var_size);
+
+ if (IS_ERR(var)) {
+ IWL_DEBUG_RADIO(trans,
+ "%s UEFI variable not found 0x%lx\n", var_name,
+ PTR_ERR(var));
+ return var;
+ }
+
+ if (var_size < expected_size) {
+ IWL_DEBUG_RADIO(trans,
+ "Invalid %s UEFI variable len (%lu)\n",
+ var_name, var_size);
+ kfree(var);
+ return ERR_PTR(-EINVAL);
+ }
+
+ IWL_DEBUG_RADIO(trans, "%s from UEFI with size %lu\n", var_name,
+ var_size);
+
+ if (size)
+ *size = var_size;
+ return var;
+}
+
int iwl_uefi_handle_tlv_mem_desc(struct iwl_trans *trans, const u8 *data,
u32 tlv_len, struct iwl_pnvm_image *pnvm_data)
{
@@ -230,26 +266,13 @@ u8 *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len)
unsigned long package_size;
u8 *data;
- package = iwl_uefi_get_variable(IWL_UEFI_REDUCED_POWER_NAME,
- &IWL_EFI_VAR_GUID, &package_size);
-
- if (IS_ERR(package)) {
- IWL_DEBUG_FW(trans,
- "Reduced Power UEFI variable not found 0x%lx (len %lu)\n",
- PTR_ERR(package), package_size);
+ package = iwl_uefi_get_verified_variable(trans,
+ IWL_UEFI_REDUCED_POWER_NAME,
+ "Reduced Power",
+ sizeof(*package),
+ &package_size);
+ if (IS_ERR(package))
return ERR_CAST(package);
- }
-
- if (package_size < sizeof(*package)) {
- IWL_DEBUG_FW(trans,
- "Invalid Reduced Power UEFI variable len (%lu)\n",
- package_size);
- kfree(package);
- return ERR_PTR(-EINVAL);
- }
-
- IWL_DEBUG_FW(trans, "Read reduced power from UEFI with size %lu\n",
- package_size);
IWL_DEBUG_FW(trans, "rev %d, total_size %d, n_skus %d\n",
package->rev, package->total_size, package->n_skus);
@@ -283,32 +306,15 @@ static int iwl_uefi_step_parse(struct uefi_cnv_common_step_data *common_step_dat
void iwl_uefi_get_step_table(struct iwl_trans *trans)
{
struct uefi_cnv_common_step_data *data;
- unsigned long package_size;
int ret;
if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
return;
- data = iwl_uefi_get_variable(IWL_UEFI_STEP_NAME, &IWL_EFI_VAR_GUID,
- &package_size);
-
- if (IS_ERR(data)) {
- IWL_DEBUG_FW(trans,
- "STEP UEFI variable not found 0x%lx\n",
- PTR_ERR(data));
+ data = iwl_uefi_get_verified_variable(trans, IWL_UEFI_STEP_NAME,
+ "STEP", sizeof(*data), NULL);
+ if (IS_ERR(data))
return;
- }
-
- if (package_size < sizeof(*data)) {
- IWL_DEBUG_FW(trans,
- "Invalid STEP table UEFI variable len (%lu)\n",
- package_size);
- kfree(data);
- return;
- }
-
- IWL_DEBUG_FW(trans, "Read STEP from UEFI with size %lu\n",
- package_size);
ret = iwl_uefi_step_parse(data, trans);
if (ret < 0)
@@ -318,7 +324,6 @@ void iwl_uefi_get_step_table(struct iwl_trans *trans)
}
IWL_EXPORT_SYMBOL(iwl_uefi_get_step_table);
-#ifdef CONFIG_ACPI
static int iwl_uefi_sgom_parse(struct uefi_cnv_wlan_sgom_data *sgom_data,
struct iwl_fw_runtime *fwrt)
{
@@ -355,31 +360,15 @@ void iwl_uefi_get_sgom_table(struct iwl_trans *trans,
struct iwl_fw_runtime *fwrt)
{
struct uefi_cnv_wlan_sgom_data *data;
- unsigned long package_size;
int ret;
if (!fwrt->geo_enabled)
return;
- data = iwl_uefi_get_variable(IWL_UEFI_SGOM_NAME, &IWL_EFI_VAR_GUID,
- &package_size);
- if (IS_ERR(data)) {
- IWL_DEBUG_FW(trans,
- "SGOM UEFI variable not found 0x%lx\n",
- PTR_ERR(data));
- return;
- }
-
- if (package_size < sizeof(*data)) {
- IWL_DEBUG_FW(trans,
- "Invalid SGOM table UEFI variable len (%lu)\n",
- package_size);
- kfree(data);
+ data = iwl_uefi_get_verified_variable(trans, IWL_UEFI_SGOM_NAME,
+ "SGOM", sizeof(*data), NULL);
+ if (IS_ERR(data))
return;
- }
-
- IWL_DEBUG_FW(trans, "Read SGOM from UEFI with size %lu\n",
- package_size);
ret = iwl_uefi_sgom_parse(data, fwrt);
if (ret < 0)
@@ -388,4 +377,355 @@ void iwl_uefi_get_sgom_table(struct iwl_trans *trans,
kfree(data);
}
IWL_EXPORT_SYMBOL(iwl_uefi_get_sgom_table);
-#endif /* CONFIG_ACPI */
+
+static int iwl_uefi_uats_parse(struct uefi_cnv_wlan_uats_data *uats_data,
+ struct iwl_fw_runtime *fwrt)
+{
+ if (uats_data->revision != 1)
+ return -EINVAL;
+
+ memcpy(fwrt->uats_table.offset_map, uats_data->offset_map,
+ sizeof(fwrt->uats_table.offset_map));
+ return 0;
+}
+
+int iwl_uefi_get_uats_table(struct iwl_trans *trans,
+ struct iwl_fw_runtime *fwrt)
+{
+ struct uefi_cnv_wlan_uats_data *data;
+ int ret;
+
+ data = iwl_uefi_get_verified_variable(trans, IWL_UEFI_UATS_NAME,
+ "UATS", sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ ret = iwl_uefi_uats_parse(data, fwrt);
+ if (ret < 0) {
+ IWL_DEBUG_FW(trans, "Cannot read UATS table. rev is invalid\n");
+ kfree(data);
+ return ret;
+ }
+
+ kfree(data);
+ return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_uefi_get_uats_table);
+
+static void iwl_uefi_set_sar_profile(struct iwl_fw_runtime *fwrt,
+ struct uefi_sar_profile *uefi_sar_prof,
+ u8 prof_index, bool enabled)
+{
+ memcpy(&fwrt->sar_profiles[prof_index].chains, uefi_sar_prof,
+ sizeof(struct uefi_sar_profile));
+
+ fwrt->sar_profiles[prof_index].enabled = enabled & IWL_SAR_ENABLE_MSK;
+}
+
+int iwl_uefi_get_wrds_table(struct iwl_fw_runtime *fwrt)
+{
+ struct uefi_cnv_var_wrds *data;
+ int ret = 0;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_WRDS_NAME,
+ "WRDS", sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision != IWL_UEFI_WRDS_REVISION) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WRDS revision:%d\n",
+ data->revision);
+ goto out;
+ }
+
+ /* The profile from WRDS is officially profile 1, but goes
+ * into sar_profiles[0] (because we don't have a profile 0).
+ */
+ iwl_uefi_set_sar_profile(fwrt, &data->sar_profile, 0, data->mode);
+out:
+ kfree(data);
+ return ret;
+}
+
+int iwl_uefi_get_ewrd_table(struct iwl_fw_runtime *fwrt)
+{
+ struct uefi_cnv_var_ewrd *data;
+ int i, ret = 0;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_EWRD_NAME,
+ "EWRD", sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision != IWL_UEFI_EWRD_REVISION) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI EWRD revision:%d\n",
+ data->revision);
+ goto out;
+ }
+
+ if (data->num_profiles >= BIOS_SAR_MAX_PROFILE_NUM) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ for (i = 0; i < data->num_profiles; i++)
+ /* The EWRD profiles officially go from 2 to 4, but we
+ * save them in sar_profiles[1-3] (because we don't
+ * have profile 0). So in the array we start from 1.
+ */
+ iwl_uefi_set_sar_profile(fwrt, &data->sar_profiles[i], i + 1,
+ data->mode);
+
+out:
+ kfree(data);
+ return ret;
+}
+
+int iwl_uefi_get_wgds_table(struct iwl_fw_runtime *fwrt)
+{
+ struct uefi_cnv_var_wgds *data;
+ int i, ret = 0;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_WGDS_NAME,
+ "WGDS", sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision != IWL_UEFI_WGDS_REVISION) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WGDS revision:%d\n",
+ data->revision);
+ goto out;
+ }
+
+ if (data->num_profiles < BIOS_GEO_MIN_PROFILE_NUM ||
+ data->num_profiles > BIOS_GEO_MAX_PROFILE_NUM) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Invalid number of profiles in WGDS: %d\n",
+ data->num_profiles);
+ goto out;
+ }
+
+ fwrt->geo_rev = data->revision;
+ for (i = 0; i < data->num_profiles; i++)
+ memcpy(&fwrt->geo_profiles[i], &data->geo_profiles[i],
+ sizeof(struct iwl_geo_profile));
+
+ fwrt->geo_num_profiles = data->num_profiles;
+ fwrt->geo_enabled = true;
+out:
+ kfree(data);
+ return ret;
+}
+
+int iwl_uefi_get_ppag_table(struct iwl_fw_runtime *fwrt)
+{
+ struct uefi_cnv_var_ppag *data;
+ int ret = 0;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_PPAG_NAME,
+ "PPAG", sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision < IWL_UEFI_MIN_PPAG_REV ||
+ data->revision > IWL_UEFI_MAX_PPAG_REV) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI PPAG revision:%d\n",
+ data->revision);
+ goto out;
+ }
+
+ fwrt->ppag_ver = data->revision;
+ fwrt->ppag_flags = iwl_bios_get_ppag_flags(data->ppag_modes,
+ fwrt->ppag_ver);
+
+ BUILD_BUG_ON(sizeof(fwrt->ppag_chains) != sizeof(data->ppag_chains));
+ memcpy(&fwrt->ppag_chains, &data->ppag_chains,
+ sizeof(data->ppag_chains));
+out:
+ kfree(data);
+ return ret;
+}
+
+int iwl_uefi_get_tas_table(struct iwl_fw_runtime *fwrt,
+ struct iwl_tas_data *tas_data)
+{
+ struct uefi_cnv_var_wtas *uefi_tas;
+ int ret = 0, enabled, i;
+
+ uefi_tas = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_WTAS_NAME,
+ "WTAS", sizeof(*uefi_tas), NULL);
+ if (IS_ERR(uefi_tas))
+ return -EINVAL;
+
+ if (uefi_tas->revision != IWL_UEFI_WTAS_REVISION) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WTAS revision:%d\n",
+ uefi_tas->revision);
+ goto out;
+ }
+
+ enabled = iwl_parse_tas_selection(fwrt, tas_data,
+ uefi_tas->tas_selection);
+ if (!enabled) {
+ IWL_DEBUG_RADIO(fwrt, "TAS not enabled\n");
+ ret = 0;
+ goto out;
+ }
+
+ IWL_DEBUG_RADIO(fwrt, "Reading TAS table revision %d\n",
+ uefi_tas->revision);
+ if (uefi_tas->black_list_size > IWL_WTAS_BLACK_LIST_MAX) {
+ IWL_DEBUG_RADIO(fwrt, "TAS invalid array size %d\n",
+ uefi_tas->black_list_size);
+ ret = -EINVAL;
+ goto out;
+ }
+ tas_data->block_list_size = cpu_to_le32(uefi_tas->black_list_size);
+ IWL_DEBUG_RADIO(fwrt, "TAS array size %u\n", uefi_tas->black_list_size);
+
+ for (i = 0; i < uefi_tas->black_list_size; i++) {
+ tas_data->block_list_array[i] =
+ cpu_to_le32(uefi_tas->black_list[i]);
+ IWL_DEBUG_RADIO(fwrt, "TAS block list country %d\n",
+ uefi_tas->black_list[i]);
+ }
+out:
+ kfree(uefi_tas);
+ return ret;
+}
+
+int iwl_uefi_get_pwr_limit(struct iwl_fw_runtime *fwrt,
+ u64 *dflt_pwr_limit)
+{
+ struct uefi_cnv_var_splc *data;
+ int ret = 0;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_SPLC_NAME,
+ "SPLC", sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision != IWL_UEFI_SPLC_REVISION) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI SPLC revision:%d\n",
+ data->revision);
+ goto out;
+ }
+ *dflt_pwr_limit = data->default_pwr_limit;
+out:
+ kfree(data);
+ return ret;
+}
+
+int iwl_uefi_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc)
+{
+ struct uefi_cnv_var_wrdd *data;
+ int ret = 0;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_WRDD_NAME,
+ "WRDD", sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision != IWL_UEFI_WRDD_REVISION) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WRDD revision:%d\n",
+ data->revision);
+ goto out;
+ }
+
+ if (data->mcc != UEFI_MCC_CHINA) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "UEFI WRDD is supported only for CN\n");
+ goto out;
+ }
+
+ mcc[0] = (data->mcc >> 8) & 0xff;
+ mcc[1] = data->mcc & 0xff;
+ mcc[2] = '\0';
+out:
+ kfree(data);
+ return ret;
+}
+
+int iwl_uefi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk)
+{
+ struct uefi_cnv_var_eckv *data;
+ int ret = 0;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_ECKV_NAME,
+ "ECKV", sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision != IWL_UEFI_ECKV_REVISION) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WRDD revision:%d\n",
+ data->revision);
+ goto out;
+ }
+ *extl_clk = data->ext_clock_valid;
+out:
+ kfree(data);
+ return ret;
+}
+
+int iwl_uefi_get_wbem(struct iwl_fw_runtime *fwrt, u32 *value)
+{
+ struct uefi_cnv_wlan_wbem_data *data;
+ int ret = 0;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_WBEM_NAME,
+ "WBEM", sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision != IWL_UEFI_WBEM_REVISION) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WBEM revision:%d\n",
+ data->revision);
+ goto out;
+ }
+ *value = data->wbem_320mhz_per_mcc & IWL_UEFI_WBEM_REV0_MASK;
+ IWL_DEBUG_RADIO(fwrt, "Loaded WBEM config from UEFI\n");
+out:
+ kfree(data);
+ return ret;
+}
+
+int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
+ u32 *value)
+{
+ struct uefi_cnv_var_general_cfg *data;
+ int ret = -EINVAL;
+
+ /* Not supported function index */
+ if (func >= DSM_FUNC_NUM_FUNCS || func == 5)
+ return -EOPNOTSUPP;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_DSM_NAME,
+ "DSM", sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision != IWL_UEFI_DSM_REVISION) {
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI DSM revision:%d\n",
+ data->revision);
+ goto out;
+ }
+
+ if (ARRAY_SIZE(data->functions) != UEFI_MAX_DSM_FUNCS) {
+ IWL_DEBUG_RADIO(fwrt, "Invalid size of DSM functions array\n");
+ goto out;
+ }
+
+ *value = data->functions[func];
+ ret = 0;
+out:
+ kfree(data);
+ return ret;
+}
diff --git a/fw/uefi.h b/fw/uefi.h
index 1369cc4855c3..1f8884ca8997 100644
--- a/fw/uefi.h
+++ b/fw/uefi.h
@@ -1,16 +1,43 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright(c) 2021-2023 Intel Corporation
+ * Copyright(c) 2021-2024 Intel Corporation
*/
#ifndef __iwl_fw_uefi__
#define __iwl_fw_uefi__
+#include "fw/regulatory.h"
+
#define IWL_UEFI_OEM_PNVM_NAME L"UefiCnvWlanOemSignedPnvm"
#define IWL_UEFI_REDUCED_POWER_NAME L"UefiCnvWlanReducedPower"
#define IWL_UEFI_SGOM_NAME L"UefiCnvWlanSarGeoOffsetMapping"
#define IWL_UEFI_STEP_NAME L"UefiCnvCommonSTEP"
+#define IWL_UEFI_UATS_NAME L"CnvUefiWlanUATS"
+#define IWL_UEFI_WRDS_NAME L"UefiCnvWlanWRDS"
+#define IWL_UEFI_EWRD_NAME L"UefiCnvWlanEWRD"
+#define IWL_UEFI_WGDS_NAME L"UefiCnvWlanWGDS"
+#define IWL_UEFI_PPAG_NAME L"UefiCnvWlanPPAG"
+#define IWL_UEFI_WTAS_NAME L"UefiCnvWlanWTAS"
+#define IWL_UEFI_SPLC_NAME L"UefiCnvWlanSPLC"
+#define IWL_UEFI_WRDD_NAME L"UefiCnvWlanWRDD"
+#define IWL_UEFI_ECKV_NAME L"UefiCnvWlanECKV"
+#define IWL_UEFI_DSM_NAME L"UefiCnvWlanGeneralCfg"
+#define IWL_UEFI_WBEM_NAME L"UefiCnvWlanWBEM"
+
#define IWL_SGOM_MAP_SIZE 339
+#define IWL_UATS_MAP_SIZE 339
+
+#define IWL_UEFI_WRDS_REVISION 2
+#define IWL_UEFI_EWRD_REVISION 2
+#define IWL_UEFI_WGDS_REVISION 3
+#define IWL_UEFI_MIN_PPAG_REV 1
+#define IWL_UEFI_MAX_PPAG_REV 3
+#define IWL_UEFI_WTAS_REVISION 1
+#define IWL_UEFI_SPLC_REVISION 0
+#define IWL_UEFI_WRDD_REVISION 0
+#define IWL_UEFI_ECKV_REVISION 0
+#define IWL_UEFI_WBEM_REVISION 0
+#define IWL_UEFI_DSM_REVISION 4
struct pnvm_sku_package {
u8 rev;
@@ -25,6 +52,11 @@ struct uefi_cnv_wlan_sgom_data {
u8 offset_map[IWL_SGOM_MAP_SIZE - 1];
} __packed;
+struct uefi_cnv_wlan_uats_data {
+ u8 revision;
+ u8 offset_map[IWL_UATS_MAP_SIZE - 1];
+} __packed;
+
struct uefi_cnv_common_step_data {
u8 revision;
u8 step_mode;
@@ -35,6 +67,134 @@ struct uefi_cnv_common_step_data {
} __packed;
/*
+ * struct uefi_sar_profile - a SAR profile as defined in UEFI
+ *
+ * @chains: a per-chain table of SAR values
+ */
+struct uefi_sar_profile {
+ struct iwl_sar_profile_chain chains[BIOS_SAR_MAX_CHAINS_PER_PROFILE];
+} __packed;
+
+/*
+ * struct uefi_cnv_var_wrds - WRDS table as defined in UEFI
+ *
+ * @revision: the revision of the table
+ * @mode: is WRDS enbaled/disabled
+ * @sar_profile: sar profile #1
+ */
+struct uefi_cnv_var_wrds {
+ u8 revision;
+ u32 mode;
+ struct uefi_sar_profile sar_profile;
+} __packed;
+
+/*
+ * struct uefi_cnv_var_ewrd - EWRD table as defined in UEFI
+ * @revision: the revision of the table
+ * @mode: is WRDS enbaled/disabled
+ * @num_profiles: how many additional profiles we have in this table (0-3)
+ * @sar_profiles: the additional SAR profiles (#2-#4)
+ */
+struct uefi_cnv_var_ewrd {
+ u8 revision;
+ u32 mode;
+ u32 num_profiles;
+ struct uefi_sar_profile sar_profiles[BIOS_SAR_MAX_PROFILE_NUM - 1];
+} __packed;
+
+/*
+ * struct uefi_cnv_var_wgds - WGDS table as defined in UEFI
+ * @revision: the revision of the table
+ * @num_profiles: the number of geo profiles we have in the table.
+ * The first 3 are mandatory, and can have up to 8.
+ * @geo_profiles: a per-profile table of the offsets to add to SAR values.
+ */
+struct uefi_cnv_var_wgds {
+ u8 revision;
+ u8 num_profiles;
+ struct iwl_geo_profile geo_profiles[BIOS_GEO_MAX_PROFILE_NUM];
+} __packed;
+
+/*
+ * struct uefi_cnv_var_ppag - PPAG table as defined in UEFI
+ * @revision: the revision of the table
+ * @ppag_modes: values from &enum iwl_ppag_flags
+ * @ppag_chains: the PPAG values per chain and band
+ */
+struct uefi_cnv_var_ppag {
+ u8 revision;
+ u32 ppag_modes;
+ struct iwl_ppag_chain ppag_chains[IWL_NUM_CHAIN_LIMITS];
+} __packed;
+
+/* struct uefi_cnv_var_wtas - WTAS tabled as defined in UEFI
+ * @revision: the revision of the table
+ * @tas_selection: different options of TAS enablement.
+ * @black_list_size: the number of defined entried in the black list
+ * @black_list: a list of countries that are not allowed to use the TAS feature
+ */
+struct uefi_cnv_var_wtas {
+ u8 revision;
+ u32 tas_selection;
+ u8 black_list_size;
+ u16 black_list[IWL_WTAS_BLACK_LIST_MAX];
+} __packed;
+
+/* struct uefi_cnv_var_splc - SPLC tabled as defined in UEFI
+ * @revision: the revision of the table
+ * @default_pwr_limit: The default maximum power per device
+ */
+struct uefi_cnv_var_splc {
+ u8 revision;
+ u32 default_pwr_limit;
+} __packed;
+
+#define UEFI_MCC_CHINA 0x434e
+
+/* struct uefi_cnv_var_wrdd - WRDD table as defined in UEFI
+ * @revision: the revision of the table
+ * @mcc: country identifier as defined in ISO/IEC 3166-1 Alpha 2 code
+ */
+struct uefi_cnv_var_wrdd {
+ u8 revision;
+ u32 mcc;
+} __packed;
+
+/* struct uefi_cnv_var_eckv - ECKV table as defined in UEFI
+ * @revision: the revision of the table
+ * @ext_clock_valid: indicates if external 32KHz clock is valid
+ */
+struct uefi_cnv_var_eckv {
+ u8 revision;
+ u32 ext_clock_valid;
+} __packed;
+
+#define UEFI_MAX_DSM_FUNCS 32
+
+/* struct uefi_cnv_var_general_cfg - DSM-like table as defined in UEFI
+ * @revision: the revision of the table
+ * @functions: payload of the different DSM functions
+ */
+struct uefi_cnv_var_general_cfg {
+ u8 revision;
+ u32 functions[UEFI_MAX_DSM_FUNCS];
+} __packed;
+
+#define IWL_UEFI_WBEM_REV0_MASK (BIT(0) | BIT(1))
+/* struct uefi_cnv_wlan_wbem_data - Bandwidth enablement per MCC as defined
+ * in UEFI
+ * @revision: the revision of the table
+ * @wbem_320mhz_per_mcc: enablement of 320MHz bandwidth per MCC
+ * bit 0 - if set, 320MHz is enabled for Japan
+ * bit 1 - if set, 320MHz is enabled for South Korea
+ * bit 2- 31, Reserved
+ */
+struct uefi_cnv_wlan_wbem_data {
+ u8 revision;
+ u32 wbem_320mhz_per_mcc;
+} __packed;
+
+/*
* This is known to be broken on v4.19 and to work on v5.4. Until we
* figure out why this is the case and how to make it work, simply
* disable the feature in old kernels.
@@ -48,6 +208,22 @@ int iwl_uefi_reduce_power_parse(struct iwl_trans *trans,
void iwl_uefi_get_step_table(struct iwl_trans *trans);
int iwl_uefi_handle_tlv_mem_desc(struct iwl_trans *trans, const u8 *data,
u32 tlv_len, struct iwl_pnvm_image *pnvm_data);
+int iwl_uefi_get_wrds_table(struct iwl_fw_runtime *fwrt);
+int iwl_uefi_get_ewrd_table(struct iwl_fw_runtime *fwrt);
+int iwl_uefi_get_wgds_table(struct iwl_fw_runtime *fwrt);
+int iwl_uefi_get_ppag_table(struct iwl_fw_runtime *fwrt);
+int iwl_uefi_get_tas_table(struct iwl_fw_runtime *fwrt,
+ struct iwl_tas_data *data);
+int iwl_uefi_get_pwr_limit(struct iwl_fw_runtime *fwrt,
+ u64 *dflt_pwr_limit);
+int iwl_uefi_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc);
+int iwl_uefi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk);
+int iwl_uefi_get_wbem(struct iwl_fw_runtime *fwrt, u32 *value);
+int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
+ u32 *value);
+void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt);
+int iwl_uefi_get_uats_table(struct iwl_trans *trans,
+ struct iwl_fw_runtime *fwrt);
#else /* CONFIG_EFI */
static inline void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len)
{
@@ -78,14 +254,71 @@ iwl_uefi_handle_tlv_mem_desc(struct iwl_trans *trans, const u8 *data,
{
return 0;
}
-#endif /* CONFIG_EFI */
-#if defined(CONFIG_EFI) && defined(CONFIG_ACPI)
-void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt);
-#else
+static inline int iwl_uefi_get_wrds_table(struct iwl_fw_runtime *fwrt)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_uefi_get_ewrd_table(struct iwl_fw_runtime *fwrt)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_uefi_get_wgds_table(struct iwl_fw_runtime *fwrt)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_uefi_get_ppag_table(struct iwl_fw_runtime *fwrt)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_uefi_get_tas_table(struct iwl_fw_runtime *fwrt,
+ struct iwl_tas_data *data)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_uefi_get_pwr_limit(struct iwl_fw_runtime *fwrt,
+ u64 *dflt_pwr_limit)
+{
+ *dflt_pwr_limit = 0;
+ return 0;
+}
+
+static inline int iwl_uefi_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_uefi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_uefi_get_wbem(struct iwl_fw_runtime *fwrt, u32 *value)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt,
+ enum iwl_dsm_funcs func, u32 *value)
+{
+ return -ENOENT;
+}
+
static inline
void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt)
{
}
-#endif
+
+static inline
+int iwl_uefi_get_uats_table(struct iwl_trans *trans,
+ struct iwl_fw_runtime *fwrt)
+{
+ return 0;
+}
+#endif /* CONFIG_EFI */
#endif /* __iwl_fw_uefi__ */
diff --git a/iwl-config.h b/iwl-config.h
index 241a9e3f2a1a..b2abd4fd1944 100644
--- a/iwl-config.h
+++ b/iwl-config.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2005-2014, 2018-2021 Intel Corporation
* Copyright (C) 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#ifndef __IWL_CONFIG_H__
#define __IWL_CONFIG_H__
@@ -11,7 +11,9 @@
#include <linux/netdevice.h>
#include <linux/ieee80211.h>
#include <linux/nl80211.h>
+#include <linux/mod_devicetable.h>
#include "iwl-csr.h"
+#include "iwl-drv.h"
enum iwl_device_family {
IWL_DEVICE_FAMILY_UNDEFINED,
@@ -86,10 +88,7 @@ enum iwl_nvm_type {
#define IWL_DEFAULT_MAX_TX_POWER 22
#define IWL_TX_CSUM_NETIF_FLAGS (NETIF_F_IPV6_CSUM | NETIF_F_IP_CSUM |\
NETIF_F_TSO | NETIF_F_TSO6)
-#define IWL_TX_CSUM_NETIF_FLAGS_BZ (NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6)
-#define IWL_CSUM_NETIF_FLAGS_MASK (IWL_TX_CSUM_NETIF_FLAGS | \
- IWL_TX_CSUM_NETIF_FLAGS_BZ | \
- NETIF_F_RXCSUM)
+#define IWL_CSUM_NETIF_FLAGS_MASK (IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM)
/* Antenna presence definitions */
#define ANT_NONE 0x0
@@ -242,7 +241,7 @@ enum iwl_cfg_trans_ltr_delay {
};
/**
- * struct iwl_cfg_trans - information needed to start the trans
+ * struct iwl_cfg_trans_params - information needed to start the trans
*
* These values are specific to the device ID and do not change when
* multiple configs are used for a single device ID. They values are
@@ -250,7 +249,6 @@ enum iwl_cfg_trans_ltr_delay {
* RFID can be read before deciding the remaining parameters to use.
*
* @base_params: pointer to basic parameters
- * @csr: csr flags and addresses that are different across devices
* @device_family: the device family
* @umac_prph_offset: offset to add to UMAC periphery address
* @xtal_latency: power up latency to get the xtal stabilized
@@ -260,6 +258,7 @@ enum iwl_cfg_trans_ltr_delay {
* @mq_rx_supported: multi-queue rx support
* @integrated: discrete or integrated
* @low_latency_xtal: use the low latency xtal if supported
+ * @bisr_workaround: BISR hardware workaround (for 22260 series devices)
* @ltr_delay: LTR delay parameter, &enum iwl_cfg_trans_ltr_delay.
* @imr_enabled: use the IMR if supported.
*/
@@ -319,7 +318,6 @@ struct iwl_fw_mon_regs {
* @non_shared_ant: the antenna that is for WiFi only
* @nvm_ver: NVM version
* @nvm_calib_ver: NVM calibration version
- * @lib: pointer to the lib ops
* @ht_params: point to ht parameters
* @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
* @rx_with_siso_diversity: 1x1 device with rx antenna diversity
@@ -344,15 +342,12 @@ struct iwl_fw_mon_regs {
* @nvm_type: see &enum iwl_nvm_type
* @d3_debug_data_base_addr: base address where D3 debug data is stored
* @d3_debug_data_length: length of the D3 debug data
- * @bisr_workaround: BISR hardware workaround (for 22260 series devices)
* @min_txq_size: minimum number of slots required in a TX queue
* @uhb_supported: ultra high band channels supported
* @min_ba_txq_size: minimum number of slots required in a TX queue which
* based on hardware support (HE - 256, EHT - 1K).
* @num_rbds: number of receive buffer descriptors to use
* (only used for multi-queue capable devices)
- * @mac_addr_csr_base: CSR base register for MAC address access, if not set
- * assume 0x380
*
* We enable the driver to be backward compatible wrt. hardware features.
* API differences in uCode shouldn't be handled here but through TLVs
@@ -385,7 +380,6 @@ struct iwl_cfg {
u16 nvm_calib_ver;
u32 rx_with_siso_diversity:1,
tx_with_siso_diversity:1,
- bt_shared_single_ant:1,
internal_wimax_coex:1,
host_interrupt_operation_mode:1,
high_temp:1,
@@ -427,6 +421,9 @@ struct iwl_cfg {
#define IWL_CFG_MAC_TYPE_BZ 0x46
#define IWL_CFG_MAC_TYPE_GL 0x47
#define IWL_CFG_MAC_TYPE_SC 0x48
+#define IWL_CFG_MAC_TYPE_SC2 0x49
+#define IWL_CFG_MAC_TYPE_SC2F 0x4A
+#define IWL_CFG_MAC_TYPE_BZ_W 0x4B
#define IWL_CFG_RF_TYPE_TH 0x105
#define IWL_CFG_RF_TYPE_TH1 0x108
@@ -435,8 +432,6 @@ struct iwl_cfg {
#define IWL_CFG_RF_TYPE_HR2 0x10A
#define IWL_CFG_RF_TYPE_HR1 0x10C
#define IWL_CFG_RF_TYPE_GF 0x10D
-#define IWL_CFG_RF_TYPE_MR 0x110
-#define IWL_CFG_RF_TYPE_MS 0x111
#define IWL_CFG_RF_TYPE_FM 0x112
#define IWL_CFG_RF_TYPE_WH 0x113
@@ -451,6 +446,9 @@ struct iwl_cfg {
#define IWL_CFG_NO_160 0x1
#define IWL_CFG_160 0x0
+#define IWL_CFG_NO_320 0x1
+#define IWL_CFG_320 0x0
+
#define IWL_CFG_CORES_BT 0x0
#define IWL_CFG_CORES_BT_GNSS 0x5
@@ -480,6 +478,16 @@ struct iwl_dev_info {
const char *name;
};
+#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
+extern const struct iwl_dev_info iwl_dev_info_table[];
+extern const unsigned int iwl_dev_info_table_size;
+const struct iwl_dev_info *
+iwl_pci_find_dev_info(u16 device, u16 subsystem_device,
+ u16 mac_type, u8 mac_step, u16 rf_type, u8 cdb,
+ u8 jacket, u8 rf_id, u8 no_160, u8 cores, u8 rf_step);
+extern const struct pci_device_id iwl_hw_card_ids[];
+#endif
+
/*
* This list declares the config structures for all devices.
*/
@@ -535,7 +543,12 @@ extern const char iwl_ax221_name[];
extern const char iwl_ax231_name[];
extern const char iwl_ax411_name[];
extern const char iwl_bz_name[];
+extern const char iwl_fm_name[];
+extern const char iwl_gl_name[];
+extern const char iwl_mtp_name[];
extern const char iwl_sc_name[];
+extern const char iwl_sc2_name[];
+extern const char iwl_sc2f_name[];
#if IS_ENABLED(CONFIG_IWLDVM)
extern const struct iwl_cfg iwl5300_agn_cfg;
extern const struct iwl_cfg iwl5100_agn_cfg;
@@ -641,6 +654,8 @@ extern const struct iwl_cfg iwl_cfg_bz;
extern const struct iwl_cfg iwl_cfg_gl;
extern const struct iwl_cfg iwl_cfg_sc;
+extern const struct iwl_cfg iwl_cfg_sc2;
+extern const struct iwl_cfg iwl_cfg_sc2f;
#endif /* CONFIG_IWLMVM */
#endif /* __IWL_CONFIG_H__ */
diff --git a/iwl-context-info-gen3.h b/iwl-context-info-gen3.h
index 96bf353469b8..5b62933134cf 100644
--- a/iwl-context-info-gen3.h
+++ b/iwl-context-info-gen3.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2018, 2020-2022 Intel Corporation
+ * Copyright (C) 2018, 2020-2024 Intel Corporation
*/
#ifndef __iwl_context_info_file_gen3_h__
#define __iwl_context_info_file_gen3_h__
@@ -44,7 +44,7 @@ enum iwl_prph_scratch_mtr_format {
* @IWL_PRPH_SCRATCH_EDBG_DEST_ST_ARBITER: use st arbiter, mainly for
* multicomm.
* @IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF: route debug data to SoC HW
- * @IWL_PRPH_SCTATCH_RB_SIZE_4K: Use 4K RB size (the default is 2K)
+ * @IWL_PRPH_SCRATCH_RB_SIZE_4K: Use 4K RB size (the default is 2K)
* @IWL_PRPH_SCRATCH_MTR_MODE: format used for completion - 0: for
* completion descriptor, 1 for responses (legacy)
* @IWL_PRPH_SCRATCH_MTR_FORMAT: a mask for the size of the tfd.
@@ -56,6 +56,8 @@ enum iwl_prph_scratch_mtr_format {
* @IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K: 8kB RB size
* @IWL_PRPH_SCRATCH_RB_SIZE_EXT_12K: 12kB RB size
* @IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K: 16kB RB size
+ * @IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE: Indicate fw to set SCU_FORCE_ACTIVE
+ * upon reset.
*/
enum iwl_prph_scratch_flags {
IWL_PRPH_SCRATCH_IMR_DEBUG_EN = BIT(1),
@@ -71,6 +73,7 @@ enum iwl_prph_scratch_flags {
IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K = 8 << 20,
IWL_PRPH_SCRATCH_RB_SIZE_EXT_12K = 9 << 20,
IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K = 10 << 20,
+ IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE = BIT(29),
};
/*
@@ -187,11 +190,15 @@ struct iwl_prph_scratch_ctrl_cfg {
* struct iwl_prph_scratch - peripheral scratch mapping
* @ctrl_cfg: control and configuration of prph scratch
* @dram: firmware images addresses in DRAM
+ * @fseq_override: FSEQ override parameters
+ * @step_analog_params: STEP analog calibration values
* @reserved: reserved
*/
struct iwl_prph_scratch {
struct iwl_prph_scratch_ctrl_cfg ctrl_cfg;
- __le32 reserved[10];
+ __le32 fseq_override;
+ __le32 step_analog_params;
+ __le32 reserved[8];
struct iwl_context_info_dram dram;
} __packed; /* PERIPH_SCRATCH_S */
diff --git a/iwl-csr.h b/iwl-csr.h
index 587368a0ad4a..98563757ce2c 100644
--- a/iwl-csr.h
+++ b/iwl-csr.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2022 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2016 Intel Deutschland GmbH
*/
@@ -304,15 +304,14 @@
#define CSR_HW_RFID_IS_CDB(_val) (((_val) & 0x10000000) >> 28)
#define CSR_HW_RFID_IS_JACKET(_val) (((_val) & 0x20000000) >> 29)
-/**
- * hw_rev values
- */
+/* hw_rev values */
enum {
SILICON_A_STEP = 0,
SILICON_B_STEP,
SILICON_C_STEP,
SILICON_D_STEP,
SILICON_E_STEP,
+ SILICON_TC_STEP = 0xe,
SILICON_Z_STEP = 0xf,
};
@@ -353,6 +352,8 @@ enum {
#define CSR_HW_RF_ID_TYPE_GF (0x0010D000)
#define CSR_HW_RF_ID_TYPE_GF4 (0x0010E000)
#define CSR_HW_RF_ID_TYPE_MS (0x00111000)
+#define CSR_HW_RF_ID_TYPE_FM (0x00112000)
+#define CSR_HW_RF_ID_TYPE_WP (0x00113000)
/* HW_RF CHIP STEP */
#define CSR_HW_RF_STEP(_val) (((_val) >> 8) & 0xF)
@@ -618,6 +619,7 @@ enum msix_hw_int_causes {
MSIX_HW_INT_CAUSES_REG_WAKEUP = BIT(1),
MSIX_HW_INT_CAUSES_REG_IML = BIT(1),
MSIX_HW_INT_CAUSES_REG_RESET_DONE = BIT(2),
+ MSIX_HW_INT_CAUSES_REG_TOP_FATAL_ERR = BIT(3),
MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ = BIT(5),
MSIX_HW_INT_CAUSES_REG_CT_KILL = BIT(6),
MSIX_HW_INT_CAUSES_REG_RF_KILL = BIT(7),
diff --git a/iwl-dbg-tlv.c b/iwl-dbg-tlv.c
index ef5baee6c9c5..08d990ba8a79 100644
--- a/iwl-dbg-tlv.c
+++ b/iwl-dbg-tlv.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/firmware.h>
#include "iwl-drv.h"
@@ -64,21 +64,22 @@ dbg_ver_table[IWL_DBG_TLV_TYPE_NUM] = {
[IWL_DBG_TLV_TYPE_CONF_SET] = {.min_ver = 1, .max_ver = 1,},
};
-static int iwl_dbg_tlv_add(const struct iwl_ucode_tlv *tlv,
- struct list_head *list)
+/* add a new TLV node, returning it so it can be modified */
+static struct iwl_ucode_tlv *iwl_dbg_tlv_add(const struct iwl_ucode_tlv *tlv,
+ struct list_head *list)
{
u32 len = le32_to_cpu(tlv->length);
struct iwl_dbg_tlv_node *node;
- node = kzalloc(sizeof(*node) + len, GFP_KERNEL);
+ node = kzalloc(struct_size(node, tlv.data, len), GFP_KERNEL);
if (!node)
- return -ENOMEM;
+ return NULL;
memcpy(&node->tlv, tlv, sizeof(node->tlv));
memcpy(node->tlv.data, tlv->data, len);
list_add_tail(&node->list, list);
- return 0;
+ return &node->tlv;
}
static bool iwl_dbg_tlv_ver_support(const struct iwl_ucode_tlv *tlv)
@@ -103,10 +104,18 @@ static int iwl_dbg_tlv_alloc_debug_info(struct iwl_trans *trans,
if (le32_to_cpu(tlv->length) != sizeof(*debug_info))
return -EINVAL;
+ /* we use this as a string, ensure input was NUL terminated */
+ if (strnlen(debug_info->debug_cfg_name,
+ sizeof(debug_info->debug_cfg_name)) ==
+ sizeof(debug_info->debug_cfg_name))
+ return -EINVAL;
+
IWL_DEBUG_FW(trans, "WRT: Loading debug cfg: %s\n",
debug_info->debug_cfg_name);
- return iwl_dbg_tlv_add(tlv, &trans->dbg.debug_info_tlv_list);
+ if (!iwl_dbg_tlv_add(tlv, &trans->dbg.debug_info_tlv_list))
+ return -ENOMEM;
+ return 0;
}
static int iwl_dbg_tlv_alloc_buf_alloc(struct iwl_trans *trans,
@@ -175,7 +184,9 @@ static int iwl_dbg_tlv_alloc_hcmd(struct iwl_trans *trans,
return -EINVAL;
}
- return iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].hcmd_list);
+ if (!iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].hcmd_list))
+ return -ENOMEM;
+ return 0;
}
static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
@@ -212,12 +223,6 @@ static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
return -EINVAL;
}
- if (type == IWL_FW_INI_REGION_PCI_IOSF_CONFIG &&
- !trans->ops->read_config32) {
- IWL_ERR(trans, "WRT: Unsupported region type %u\n", type);
- return -EOPNOTSUPP;
- }
-
if (type == IWL_FW_INI_REGION_INTERNAL_BUFFER) {
trans->dbg.imr_data.sram_addr =
le32_to_cpu(reg->internal_buffer.base_addr);
@@ -246,11 +251,9 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
const struct iwl_ucode_tlv *tlv)
{
const struct iwl_fw_ini_trigger_tlv *trig = (const void *)tlv->data;
- struct iwl_fw_ini_trigger_tlv *dup_trig;
u32 tp = le32_to_cpu(trig->time_point);
u32 rf = le32_to_cpu(trig->reset_fw);
- struct iwl_ucode_tlv *dup = NULL;
- int ret;
+ struct iwl_ucode_tlv *new_tlv;
if (le32_to_cpu(tlv->length) < sizeof(*trig))
return -EINVAL;
@@ -267,20 +270,18 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
"WRT: time point %u for trigger TLV with reset_fw %u\n",
tp, rf);
trans->dbg.last_tp_resetfw = 0xFF;
+
+ new_tlv = iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].trig_list);
+ if (!new_tlv)
+ return -ENOMEM;
+
if (!le32_to_cpu(trig->occurrences)) {
- dup = kmemdup(tlv, sizeof(*tlv) + le32_to_cpu(tlv->length),
- GFP_KERNEL);
- if (!dup)
- return -ENOMEM;
- dup_trig = (void *)dup->data;
- dup_trig->occurrences = cpu_to_le32(-1);
- tlv = dup;
- }
+ struct iwl_fw_ini_trigger_tlv *new_trig = (void *)new_tlv->data;
- ret = iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].trig_list);
- kfree(dup);
+ new_trig->occurrences = cpu_to_le32(-1);
+ }
- return ret;
+ return 0;
}
static int iwl_dbg_tlv_config_set(struct iwl_trans *trans,
@@ -304,7 +305,9 @@ static int iwl_dbg_tlv_config_set(struct iwl_trans *trans,
return -EINVAL;
}
- return iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].config_list);
+ if (!iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].config_list))
+ return -ENOMEM;
+ return 0;
}
static int (*dbg_tlv_alloc[])(struct iwl_trans *trans,
@@ -509,6 +512,8 @@ void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans)
if (res)
return;
+ trans->dbg.yoyo_bin_loaded = true;
+
iwl_dbg_tlv_parse_bin(trans, fw->data, fw->size);
release_firmware(fw);
@@ -1094,7 +1099,7 @@ static int iwl_dbg_tlv_override_trig_node(struct iwl_fw_runtime *fwrt,
node_trig = (void *)node_tlv->data;
}
- memcpy(node_trig->data + offset, trig->data, trig_data_len);
+ memcpy((u8 *)node_trig->data + offset, trig->data, trig_data_len);
node_tlv->length = cpu_to_le32(size);
if (policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG) {
@@ -1146,7 +1151,9 @@ iwl_dbg_tlv_add_active_trigger(struct iwl_fw_runtime *fwrt,
if (!match) {
IWL_DEBUG_FW(fwrt, "WRT: Enabling trigger (time point %u)\n",
le32_to_cpu(trig->time_point));
- return iwl_dbg_tlv_add(trig_tlv, trig_list);
+ if (!iwl_dbg_tlv_add(trig_tlv, trig_list))
+ return -ENOMEM;
+ return 0;
}
return iwl_dbg_tlv_override_trig_node(fwrt, trig_tlv, match);
@@ -1232,38 +1239,27 @@ iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync,
}
}
- fwrt->trans->dbg.restart_required = FALSE;
- IWL_DEBUG_FW(fwrt, "WRT: tp %d, reset_fw %d\n",
- tp, dump_data.trig->reset_fw);
- IWL_DEBUG_FW(fwrt,
- "WRT: restart_required %d, last_tp_resetfw %d\n",
- fwrt->trans->dbg.restart_required,
- fwrt->trans->dbg.last_tp_resetfw);
+ fwrt->trans->dbg.restart_required = false;
if (fwrt->trans->trans_cfg->device_family ==
IWL_DEVICE_FAMILY_9000) {
- fwrt->trans->dbg.restart_required = TRUE;
+ fwrt->trans->dbg.restart_required = true;
} else if (tp == IWL_FW_INI_TIME_POINT_FW_ASSERT &&
fwrt->trans->dbg.last_tp_resetfw ==
IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) {
- fwrt->trans->dbg.restart_required = FALSE;
+ fwrt->trans->dbg.restart_required = false;
fwrt->trans->dbg.last_tp_resetfw = 0xFF;
- IWL_DEBUG_FW(fwrt, "WRT: FW_ASSERT due to reset_fw_mode-no restart\n");
} else if (le32_to_cpu(dump_data.trig->reset_fw) ==
IWL_FW_INI_RESET_FW_MODE_STOP_AND_RELOAD_FW) {
- IWL_DEBUG_FW(fwrt, "WRT: stop and reload firmware\n");
- fwrt->trans->dbg.restart_required = TRUE;
+ fwrt->trans->dbg.restart_required = true;
} else if (le32_to_cpu(dump_data.trig->reset_fw) ==
IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) {
- IWL_DEBUG_FW(fwrt,
- "WRT: stop only and no reload firmware\n");
- fwrt->trans->dbg.restart_required = FALSE;
+ fwrt->trans->dbg.restart_required = false;
fwrt->trans->dbg.last_tp_resetfw =
le32_to_cpu(dump_data.trig->reset_fw);
} else if (le32_to_cpu(dump_data.trig->reset_fw) ==
IWL_FW_INI_RESET_FW_MODE_NOTHING) {
- IWL_DEBUG_FW(fwrt,
- "WRT: nothing need to be done after debug collection\n");
+ /* nothing */
} else {
IWL_ERR(fwrt, "WRT: wrong resetfw %d\n",
le32_to_cpu(dump_data.trig->reset_fw));
@@ -1272,7 +1268,7 @@ iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync,
return 0;
}
-static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt)
+void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt)
{
enum iwl_fw_ini_buffer_location *ini_dest = &fwrt->trans->dbg.ini_dest;
int ret, i;
diff --git a/iwl-dbg-tlv.h b/iwl-dbg-tlv.h
index 128059ca77e6..7ed6329fd8ca 100644
--- a/iwl-dbg-tlv.h
+++ b/iwl-dbg-tlv.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2018-2022 Intel Corporation
+ * Copyright (C) 2018-2023 Intel Corporation
*/
#ifndef __iwl_dbg_tlv_h__
#define __iwl_dbg_tlv_h__
@@ -10,7 +10,8 @@
#include <fw/file.h>
#include <fw/api/dbg-tlv.h>
-#define IWL_DBG_TLV_MAX_PRESET 15
+#define IWL_DBG_TLV_MAX_PRESET 15
+#define ENABLE_INI (IWL_DBG_TLV_MAX_PRESET + 1)
/**
* struct iwl_dbg_tlv_node - debug TLV node
@@ -56,6 +57,7 @@ void _iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt,
enum iwl_fw_ini_time_point tp_id,
union iwl_dbg_tlv_tp_data *tp_data,
bool sync);
+void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt);
static inline void iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt,
enum iwl_fw_ini_time_point tp_id,
diff --git a/iwl-drv.c b/iwl-drv.c
index 3d87d26845e7..aaaabd67f959 100644
--- a/iwl-drv.c
+++ b/iwl-drv.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -128,6 +128,7 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv)
kfree(drv->fw.ucode_capa.cmd_versions);
kfree(drv->fw.phy_integration_ver);
kfree(drv->trans->dbg.pc_data);
+ drv->trans->dbg.pc_data = NULL;
for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
iwl_free_fw_img(drv, drv->fw.img + i);
@@ -162,6 +163,8 @@ static inline char iwl_drv_get_step(int step)
{
if (step == SILICON_Z_STEP)
return 'z';
+ if (step == SILICON_TC_STEP)
+ return 'a';
return 'a' + step;
}
@@ -178,25 +181,28 @@ const char *iwl_drv_get_fwname_pre(struct iwl_trans *trans, char *buf)
mac_step = iwl_drv_get_step(trans->hw_rev_step);
+ rf_step = iwl_drv_get_step(CSR_HW_RFID_STEP(trans->hw_rf_id));
+
switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
case IWL_CFG_RF_TYPE_HR1:
case IWL_CFG_RF_TYPE_HR2:
rf = "hr";
+ rf_step = 'b';
break;
case IWL_CFG_RF_TYPE_GF:
rf = "gf";
break;
- case IWL_CFG_RF_TYPE_MR:
- rf = "mr";
- break;
- case IWL_CFG_RF_TYPE_MS:
- rf = "ms";
- break;
case IWL_CFG_RF_TYPE_FM:
rf = "fm";
break;
case IWL_CFG_RF_TYPE_WH:
- rf = "wh";
+ if (SILICON_Z_STEP ==
+ CSR_HW_RFID_STEP(trans->hw_rf_id)) {
+ rf = "whtc";
+ rf_step = 'a';
+ } else {
+ rf = "wh";
+ }
break;
default:
return "unknown-rf";
@@ -204,8 +210,6 @@ const char *iwl_drv_get_fwname_pre(struct iwl_trans *trans, char *buf)
cdb = CSR_HW_RFID_IS_CDB(trans->hw_rf_id) ? "4" : "";
- rf_step = iwl_drv_get_step(CSR_HW_RFID_STEP(trans->hw_rf_id));
-
scnprintf(buf, FW_NAME_PRE_BUFSIZE,
"iwlwifi-%s-%c0-%s%s-%c0",
trans->cfg->fw_name_mac, mac_step,
@@ -978,16 +982,10 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
minor = le32_to_cpup(ptr++);
local_comp = le32_to_cpup(ptr);
- if (major >= 35)
- snprintf(drv->fw.fw_version,
- sizeof(drv->fw.fw_version),
- "%u.%08x.%u %s", major, minor,
- local_comp, iwl_reduced_fw_name(drv));
- else
- snprintf(drv->fw.fw_version,
- sizeof(drv->fw.fw_version),
- "%u.%u.%u %s", major, minor,
- local_comp, iwl_reduced_fw_name(drv));
+ snprintf(drv->fw.fw_version,
+ sizeof(drv->fw.fw_version),
+ "%u.%08x.%u %s", major, minor,
+ local_comp, iwl_reduced_fw_name(drv));
break;
}
case IWL_UCODE_TLV_FW_DBG_DEST: {
@@ -1303,10 +1301,12 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
case IWL_UCODE_TLV_CURRENT_PC:
if (tlv_len < sizeof(struct iwl_pc_data))
goto invalid_tlv_len;
- drv->trans->dbg.num_pc =
- tlv_len / sizeof(struct iwl_pc_data);
drv->trans->dbg.pc_data =
kmemdup(tlv_data, tlv_len, GFP_KERNEL);
+ if (!drv->trans->dbg.pc_data)
+ return -ENOMEM;
+ drv->trans->dbg.num_pc =
+ tlv_len / sizeof(struct iwl_pc_data);
break;
default:
IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
@@ -1413,35 +1413,34 @@ _iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op)
const struct iwl_op_mode_ops *ops = op->ops;
struct dentry *dbgfs_dir = NULL;
struct iwl_op_mode *op_mode = NULL;
- int retry, max_retry = !!iwlwifi_mod_params.fw_restart * IWL_MAX_INIT_RETRY;
- for (retry = 0; retry <= max_retry; retry++) {
+ /* also protects start/stop from racing against each other */
+ lockdep_assert_held(&iwlwifi_opmode_table_mtx);
#ifdef CONFIG_IWLWIFI_DEBUGFS
- drv->dbgfs_op_mode = debugfs_create_dir(op->name,
- drv->dbgfs_drv);
- dbgfs_dir = drv->dbgfs_op_mode;
+ drv->dbgfs_op_mode = debugfs_create_dir(op->name,
+ drv->dbgfs_drv);
+ dbgfs_dir = drv->dbgfs_op_mode;
#endif
- op_mode = ops->start(drv->trans, drv->trans->cfg,
- &drv->fw, dbgfs_dir);
-
- if (op_mode)
- return op_mode;
-
- IWL_ERR(drv, "retry init count %d\n", retry);
+ op_mode = ops->start(drv->trans, drv->trans->cfg,
+ &drv->fw, dbgfs_dir);
+ if (op_mode)
+ return op_mode;
#ifdef CONFIG_IWLWIFI_DEBUGFS
- debugfs_remove_recursive(drv->dbgfs_op_mode);
- drv->dbgfs_op_mode = NULL;
+ debugfs_remove_recursive(drv->dbgfs_op_mode);
+ drv->dbgfs_op_mode = NULL;
#endif
- }
return NULL;
}
static void _iwl_op_mode_stop(struct iwl_drv *drv)
{
+ /* also protects start/stop from racing against each other */
+ lockdep_assert_held(&iwlwifi_opmode_table_mtx);
+
/* op_mode can be NULL if its start failed */
if (drv->op_mode) {
iwl_op_mode_stop(drv->op_mode);
@@ -1473,7 +1472,6 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
size_t trigger_tlv_sz[FW_DBG_TRIGGER_MAX];
u32 api_ver;
int i;
- bool load_module = false;
bool usniffer_images = false;
bool failure = true;
@@ -1721,24 +1719,12 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
goto out_unbind;
}
} else {
- load_module = true;
+ request_module_nowait("%s", op->name);
}
mutex_unlock(&iwlwifi_opmode_table_mtx);
- /*
- * Complete the firmware request last so that
- * a driver unbind (stop) doesn't run while we
- * are doing the start() above.
- */
complete(&drv->request_firmware_complete);
- /*
- * Load the module last so we don't block anything
- * else from proceeding if the module fails to load
- * or hangs loading.
- */
- if (load_module)
- request_module("%s", op->name);
failure = false;
goto free;
@@ -1795,6 +1781,22 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
#endif
drv->trans->dbg.domains_bitmap = IWL_TRANS_FW_DBG_DOMAIN(drv->trans);
+ if (iwlwifi_mod_params.enable_ini != ENABLE_INI) {
+ /* We have a non-default value in the module parameter,
+ * take its value
+ */
+ drv->trans->dbg.domains_bitmap &= 0xffff;
+ if (iwlwifi_mod_params.enable_ini != IWL_FW_INI_PRESET_DISABLE) {
+ if (iwlwifi_mod_params.enable_ini > ENABLE_INI) {
+ IWL_ERR(trans,
+ "invalid enable_ini module parameter value: max = %d, using 0 instead\n",
+ ENABLE_INI);
+ iwlwifi_mod_params.enable_ini = 0;
+ }
+ drv->trans->dbg.domains_bitmap =
+ BIT(IWL_FW_DBG_DOMAIN_POS + iwlwifi_mod_params.enable_ini);
+ }
+ }
ret = iwl_request_firmware(drv, true);
if (ret) {
@@ -1807,8 +1809,8 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
err_fw:
#ifdef CONFIG_IWLWIFI_DEBUGFS
debugfs_remove_recursive(drv->dbgfs_drv);
- iwl_dbg_tlv_free(drv->trans);
#endif
+ iwl_dbg_tlv_free(drv->trans);
kfree(drv);
err:
return ERR_PTR(ret);
@@ -1818,11 +1820,12 @@ void iwl_drv_stop(struct iwl_drv *drv)
{
wait_for_completion(&drv->request_firmware_complete);
+ mutex_lock(&iwlwifi_opmode_table_mtx);
+
_iwl_op_mode_stop(drv);
iwl_dealloc_ucode(drv);
- mutex_lock(&iwlwifi_opmode_table_mtx);
/*
* List is empty (this item wasn't added)
* when firmware loading failed -- in that
@@ -1833,7 +1836,7 @@ void iwl_drv_stop(struct iwl_drv *drv)
mutex_unlock(&iwlwifi_opmode_table_mtx);
#ifdef CONFIG_IWLWIFI_DEBUGFS
- drv->trans->ops->debugfs_cleanup(drv->trans);
+ iwl_trans_debugfs_cleanup(drv->trans);
debugfs_remove_recursive(drv->dbgfs_drv);
#endif
@@ -1843,8 +1846,6 @@ void iwl_drv_stop(struct iwl_drv *drv)
kfree(drv);
}
-#define ENABLE_INI (IWL_DBG_TLV_MAX_PRESET + 1)
-
/* shared module parameters */
struct iwl_mod_params iwlwifi_mod_params = {
.fw_restart = true,
@@ -1964,38 +1965,7 @@ module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, uint, 0644);
MODULE_PARM_DESC(uapsd_disable,
"disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 3)");
-static int enable_ini_set(const char *arg, const struct kernel_param *kp)
-{
- int ret = 0;
- bool res;
- __u32 new_enable_ini;
-
- /* in case the argument type is a number */
- ret = kstrtou32(arg, 0, &new_enable_ini);
- if (!ret) {
- if (new_enable_ini > ENABLE_INI) {
- pr_err("enable_ini cannot be %d, in range 0-16\n", new_enable_ini);
- return -EINVAL;
- }
- goto out;
- }
-
- /* in case the argument type is boolean */
- ret = kstrtobool(arg, &res);
- if (ret)
- return ret;
- new_enable_ini = (res ? ENABLE_INI : 0);
-
-out:
- iwlwifi_mod_params.enable_ini = new_enable_ini;
- return 0;
-}
-
-static const struct kernel_param_ops enable_ini_ops = {
- .set = enable_ini_set
-};
-
-module_param_cb(enable_ini, &enable_ini_ops, &iwlwifi_mod_params.enable_ini, 0644);
+module_param_named(enable_ini, iwlwifi_mod_params.enable_ini, uint, 0444);
MODULE_PARM_DESC(enable_ini,
"0:disable, 1-15:FW_DBG_PRESET Values, 16:enabled without preset value defined,"
"Debug INI TLV FW debug infrastructure (default: 16)");
diff --git a/iwl-drv.h b/iwl-drv.h
index 6c19989e4ab7..1549ff429549 100644
--- a/iwl-drv.h
+++ b/iwl-drv.h
@@ -6,6 +6,7 @@
#ifndef __iwl_drv_h__
#define __iwl_drv_h__
#include <linux/export.h>
+#include <kunit/visibility.h>
/* for all modules */
#define DRV_NAME "iwlwifi"
@@ -56,7 +57,7 @@ struct iwl_cfg;
/**
* iwl_drv_start - start the drv
*
- * @trans_ops: the ops of the transport
+ * @trans: the transport
*
* starts the driver: fetches the firmware. This should be called by bus
* specific system flows implementations. For example, the bus specific probe
@@ -89,8 +90,13 @@ void iwl_drv_stop(struct iwl_drv *drv);
#define IWL_EXPORT_SYMBOL(sym)
#endif
-/* max retry for init flow */
-#define IWL_MAX_INIT_RETRY 2
+#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
+#define EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(sym) EXPORT_SYMBOL_IF_KUNIT(sym)
+#define VISIBLE_IF_IWLWIFI_KUNIT
+#else
+#define EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(sym)
+#define VISIBLE_IF_IWLWIFI_KUNIT static
+#endif
#define FW_NAME_PRE_BUFSIZE 64
struct iwl_trans;
diff --git a/iwl-eeprom-parse.c b/iwl-eeprom-parse.c
deleted file mode 100644
index f12b86563728..000000000000
--- a/iwl-eeprom-parse.c
+++ /dev/null
@@ -1,875 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
-/*
- * Copyright (C) 2005-2014, 2018-2020 Intel Corporation
- * Copyright (C) 2015 Intel Mobile Communications GmbH
- */
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-#include "iwl-drv.h"
-#include "iwl-modparams.h"
-#include "iwl-eeprom-parse.h"
-
-/* EEPROM offset definitions */
-
-/* indirect access definitions */
-#define ADDRESS_MSK 0x0000FFFF
-#define INDIRECT_TYPE_MSK 0x000F0000
-#define INDIRECT_HOST 0x00010000
-#define INDIRECT_GENERAL 0x00020000
-#define INDIRECT_REGULATORY 0x00030000
-#define INDIRECT_CALIBRATION 0x00040000
-#define INDIRECT_PROCESS_ADJST 0x00050000
-#define INDIRECT_OTHERS 0x00060000
-#define INDIRECT_TXP_LIMIT 0x00070000
-#define INDIRECT_TXP_LIMIT_SIZE 0x00080000
-#define INDIRECT_ADDRESS 0x00100000
-
-/* corresponding link offsets in EEPROM */
-#define EEPROM_LINK_HOST (2*0x64)
-#define EEPROM_LINK_GENERAL (2*0x65)
-#define EEPROM_LINK_REGULATORY (2*0x66)
-#define EEPROM_LINK_CALIBRATION (2*0x67)
-#define EEPROM_LINK_PROCESS_ADJST (2*0x68)
-#define EEPROM_LINK_OTHERS (2*0x69)
-#define EEPROM_LINK_TXP_LIMIT (2*0x6a)
-#define EEPROM_LINK_TXP_LIMIT_SIZE (2*0x6b)
-
-/* General */
-#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */
-#define EEPROM_SUBSYSTEM_ID (2*0x0A) /* 2 bytes */
-#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */
-#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */
-#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
-#define EEPROM_VERSION (2*0x44) /* 2 bytes */
-#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */
-#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
-#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */
-#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */
-
-/* calibration */
-struct iwl_eeprom_calib_hdr {
- u8 version;
- u8 pa_type;
- __le16 voltage;
-} __packed;
-
-#define EEPROM_CALIB_ALL (INDIRECT_ADDRESS | INDIRECT_CALIBRATION)
-#define EEPROM_XTAL ((2*0x128) | EEPROM_CALIB_ALL)
-
-/* temperature */
-#define EEPROM_KELVIN_TEMPERATURE ((2*0x12A) | EEPROM_CALIB_ALL)
-#define EEPROM_RAW_TEMPERATURE ((2*0x12B) | EEPROM_CALIB_ALL)
-
-/* SKU Capabilities (actual values from EEPROM definition) */
-enum eeprom_sku_bits {
- EEPROM_SKU_CAP_BAND_24GHZ = BIT(4),
- EEPROM_SKU_CAP_BAND_52GHZ = BIT(5),
- EEPROM_SKU_CAP_11N_ENABLE = BIT(6),
- EEPROM_SKU_CAP_AMT_ENABLE = BIT(7),
- EEPROM_SKU_CAP_IPAN_ENABLE = BIT(8)
-};
-
-/* radio config bits (actual values from EEPROM definition) */
-#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */
-#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
-#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
-#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
-#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
-#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
-
-
-/*
- * EEPROM bands
- * These are the channel numbers from each band in the order
- * that they are stored in the EEPROM band information. Note
- * that EEPROM bands aren't the same as mac80211 bands, and
- * there are even special "ht40 bands" in the EEPROM.
- */
-static const u8 iwl_eeprom_band_1[14] = { /* 2.4 GHz */
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
-};
-
-static const u8 iwl_eeprom_band_2[] = { /* 4915-5080MHz */
- 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
-};
-
-static const u8 iwl_eeprom_band_3[] = { /* 5170-5320MHz */
- 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
-};
-
-static const u8 iwl_eeprom_band_4[] = { /* 5500-5700MHz */
- 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
-};
-
-static const u8 iwl_eeprom_band_5[] = { /* 5725-5825MHz */
- 145, 149, 153, 157, 161, 165
-};
-
-static const u8 iwl_eeprom_band_6[] = { /* 2.4 ht40 channel */
- 1, 2, 3, 4, 5, 6, 7
-};
-
-static const u8 iwl_eeprom_band_7[] = { /* 5.2 ht40 channel */
- 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
-};
-
-#define IWL_NUM_CHANNELS (ARRAY_SIZE(iwl_eeprom_band_1) + \
- ARRAY_SIZE(iwl_eeprom_band_2) + \
- ARRAY_SIZE(iwl_eeprom_band_3) + \
- ARRAY_SIZE(iwl_eeprom_band_4) + \
- ARRAY_SIZE(iwl_eeprom_band_5))
-
-/* rate data (static) */
-static struct ieee80211_rate iwl_cfg80211_rates[] = {
- { .bitrate = 1 * 10, .hw_value = 0, .hw_value_short = 0, },
- { .bitrate = 2 * 10, .hw_value = 1, .hw_value_short = 1,
- .flags = IEEE80211_RATE_SHORT_PREAMBLE, },
- { .bitrate = 5.5 * 10, .hw_value = 2, .hw_value_short = 2,
- .flags = IEEE80211_RATE_SHORT_PREAMBLE, },
- { .bitrate = 11 * 10, .hw_value = 3, .hw_value_short = 3,
- .flags = IEEE80211_RATE_SHORT_PREAMBLE, },
- { .bitrate = 6 * 10, .hw_value = 4, .hw_value_short = 4, },
- { .bitrate = 9 * 10, .hw_value = 5, .hw_value_short = 5, },
- { .bitrate = 12 * 10, .hw_value = 6, .hw_value_short = 6, },
- { .bitrate = 18 * 10, .hw_value = 7, .hw_value_short = 7, },
- { .bitrate = 24 * 10, .hw_value = 8, .hw_value_short = 8, },
- { .bitrate = 36 * 10, .hw_value = 9, .hw_value_short = 9, },
- { .bitrate = 48 * 10, .hw_value = 10, .hw_value_short = 10, },
- { .bitrate = 54 * 10, .hw_value = 11, .hw_value_short = 11, },
-};
-#define RATES_24_OFFS 0
-#define N_RATES_24 ARRAY_SIZE(iwl_cfg80211_rates)
-#define RATES_52_OFFS 4
-#define N_RATES_52 (N_RATES_24 - RATES_52_OFFS)
-
-/* EEPROM reading functions */
-
-static u16 iwl_eeprom_query16(const u8 *eeprom, size_t eeprom_size, int offset)
-{
- if (WARN_ON(offset + sizeof(u16) > eeprom_size))
- return 0;
- return le16_to_cpup((__le16 *)(eeprom + offset));
-}
-
-static u32 eeprom_indirect_address(const u8 *eeprom, size_t eeprom_size,
- u32 address)
-{
- u16 offset = 0;
-
- if ((address & INDIRECT_ADDRESS) == 0)
- return address;
-
- switch (address & INDIRECT_TYPE_MSK) {
- case INDIRECT_HOST:
- offset = iwl_eeprom_query16(eeprom, eeprom_size,
- EEPROM_LINK_HOST);
- break;
- case INDIRECT_GENERAL:
- offset = iwl_eeprom_query16(eeprom, eeprom_size,
- EEPROM_LINK_GENERAL);
- break;
- case INDIRECT_REGULATORY:
- offset = iwl_eeprom_query16(eeprom, eeprom_size,
- EEPROM_LINK_REGULATORY);
- break;
- case INDIRECT_TXP_LIMIT:
- offset = iwl_eeprom_query16(eeprom, eeprom_size,
- EEPROM_LINK_TXP_LIMIT);
- break;
- case INDIRECT_TXP_LIMIT_SIZE:
- offset = iwl_eeprom_query16(eeprom, eeprom_size,
- EEPROM_LINK_TXP_LIMIT_SIZE);
- break;
- case INDIRECT_CALIBRATION:
- offset = iwl_eeprom_query16(eeprom, eeprom_size,
- EEPROM_LINK_CALIBRATION);
- break;
- case INDIRECT_PROCESS_ADJST:
- offset = iwl_eeprom_query16(eeprom, eeprom_size,
- EEPROM_LINK_PROCESS_ADJST);
- break;
- case INDIRECT_OTHERS:
- offset = iwl_eeprom_query16(eeprom, eeprom_size,
- EEPROM_LINK_OTHERS);
- break;
- default:
- WARN_ON(1);
- break;
- }
-
- /* translate the offset from words to byte */
- return (address & ADDRESS_MSK) + (offset << 1);
-}
-
-static const u8 *iwl_eeprom_query_addr(const u8 *eeprom, size_t eeprom_size,
- u32 offset)
-{
- u32 address = eeprom_indirect_address(eeprom, eeprom_size, offset);
-
- if (WARN_ON(address >= eeprom_size))
- return NULL;
-
- return &eeprom[address];
-}
-
-static int iwl_eeprom_read_calib(const u8 *eeprom, size_t eeprom_size,
- struct iwl_nvm_data *data)
-{
- struct iwl_eeprom_calib_hdr *hdr;
-
- hdr = (void *)iwl_eeprom_query_addr(eeprom, eeprom_size,
- EEPROM_CALIB_ALL);
- if (!hdr)
- return -ENODATA;
- data->calib_version = hdr->version;
- data->calib_voltage = hdr->voltage;
-
- return 0;
-}
-
-/**
- * enum iwl_eeprom_channel_flags - channel flags in EEPROM
- * @EEPROM_CHANNEL_VALID: channel is usable for this SKU/geo
- * @EEPROM_CHANNEL_IBSS: usable as an IBSS channel
- * @EEPROM_CHANNEL_ACTIVE: active scanning allowed
- * @EEPROM_CHANNEL_RADAR: radar detection required
- * @EEPROM_CHANNEL_WIDE: 20 MHz channel okay (?)
- * @EEPROM_CHANNEL_DFS: dynamic freq selection candidate
- */
-enum iwl_eeprom_channel_flags {
- EEPROM_CHANNEL_VALID = BIT(0),
- EEPROM_CHANNEL_IBSS = BIT(1),
- EEPROM_CHANNEL_ACTIVE = BIT(3),
- EEPROM_CHANNEL_RADAR = BIT(4),
- EEPROM_CHANNEL_WIDE = BIT(5),
- EEPROM_CHANNEL_DFS = BIT(7),
-};
-
-/**
- * struct iwl_eeprom_channel - EEPROM channel data
- * @flags: %EEPROM_CHANNEL_* flags
- * @max_power_avg: max power (in dBm) on this channel, at most 31 dBm
- */
-struct iwl_eeprom_channel {
- u8 flags;
- s8 max_power_avg;
-} __packed;
-
-
-enum iwl_eeprom_enhanced_txpwr_flags {
- IWL_EEPROM_ENH_TXP_FL_VALID = BIT(0),
- IWL_EEPROM_ENH_TXP_FL_BAND_52G = BIT(1),
- IWL_EEPROM_ENH_TXP_FL_OFDM = BIT(2),
- IWL_EEPROM_ENH_TXP_FL_40MHZ = BIT(3),
- IWL_EEPROM_ENH_TXP_FL_HT_AP = BIT(4),
- IWL_EEPROM_ENH_TXP_FL_RES1 = BIT(5),
- IWL_EEPROM_ENH_TXP_FL_RES2 = BIT(6),
- IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE = BIT(7),
-};
-
-/**
- * struct iwl_eeprom_enhanced_txpwr
- * @flags: entry flags
- * @channel: channel number
- * @chain_a_max: chain a max power in 1/2 dBm
- * @chain_b_max: chain b max power in 1/2 dBm
- * @chain_c_max: chain c max power in 1/2 dBm
- * @delta_20_in_40: 20-in-40 deltas (hi/lo)
- * @mimo2_max: mimo2 max power in 1/2 dBm
- * @mimo3_max: mimo3 max power in 1/2 dBm
- *
- * This structure presents the enhanced regulatory tx power limit layout
- * in an EEPROM image.
- */
-struct iwl_eeprom_enhanced_txpwr {
- u8 flags;
- u8 channel;
- s8 chain_a_max;
- s8 chain_b_max;
- s8 chain_c_max;
- u8 delta_20_in_40;
- s8 mimo2_max;
- s8 mimo3_max;
-} __packed;
-
-static s8 iwl_get_max_txpwr_half_dbm(const struct iwl_nvm_data *data,
- struct iwl_eeprom_enhanced_txpwr *txp)
-{
- s8 result = 0; /* (.5 dBm) */
-
- /* Take the highest tx power from any valid chains */
- if (data->valid_tx_ant & ANT_A && txp->chain_a_max > result)
- result = txp->chain_a_max;
-
- if (data->valid_tx_ant & ANT_B && txp->chain_b_max > result)
- result = txp->chain_b_max;
-
- if (data->valid_tx_ant & ANT_C && txp->chain_c_max > result)
- result = txp->chain_c_max;
-
- if ((data->valid_tx_ant == ANT_AB ||
- data->valid_tx_ant == ANT_BC ||
- data->valid_tx_ant == ANT_AC) && txp->mimo2_max > result)
- result = txp->mimo2_max;
-
- if (data->valid_tx_ant == ANT_ABC && txp->mimo3_max > result)
- result = txp->mimo3_max;
-
- return result;
-}
-
-#define EEPROM_TXP_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT)
-#define EEPROM_TXP_ENTRY_LEN sizeof(struct iwl_eeprom_enhanced_txpwr)
-#define EEPROM_TXP_SZ_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT_SIZE)
-
-#define TXP_CHECK_AND_PRINT(x) \
- ((txp->flags & IWL_EEPROM_ENH_TXP_FL_##x) ? # x " " : "")
-
-static void
-iwl_eeprom_enh_txp_read_element(struct iwl_nvm_data *data,
- struct iwl_eeprom_enhanced_txpwr *txp,
- int n_channels, s8 max_txpower_avg)
-{
- int ch_idx;
- enum nl80211_band band;
-
- band = txp->flags & IWL_EEPROM_ENH_TXP_FL_BAND_52G ?
- NL80211_BAND_5GHZ : NL80211_BAND_2GHZ;
-
- for (ch_idx = 0; ch_idx < n_channels; ch_idx++) {
- struct ieee80211_channel *chan = &data->channels[ch_idx];
-
- /* update matching channel or from common data only */
- if (txp->channel != 0 && chan->hw_value != txp->channel)
- continue;
-
- /* update matching band only */
- if (band != chan->band)
- continue;
-
- if (chan->max_power < max_txpower_avg &&
- !(txp->flags & IWL_EEPROM_ENH_TXP_FL_40MHZ))
- chan->max_power = max_txpower_avg;
- }
-}
-
-static void iwl_eeprom_enhanced_txpower(struct device *dev,
- struct iwl_nvm_data *data,
- const u8 *eeprom, size_t eeprom_size,
- int n_channels)
-{
- struct iwl_eeprom_enhanced_txpwr *txp_array, *txp;
- int idx, entries;
- __le16 *txp_len;
- s8 max_txp_avg_halfdbm;
-
- BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8);
-
- /* the length is in 16-bit words, but we want entries */
- txp_len = (__le16 *)iwl_eeprom_query_addr(eeprom, eeprom_size,
- EEPROM_TXP_SZ_OFFS);
- entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN;
-
- txp_array = (void *)iwl_eeprom_query_addr(eeprom, eeprom_size,
- EEPROM_TXP_OFFS);
-
- for (idx = 0; idx < entries; idx++) {
- txp = &txp_array[idx];
- /* skip invalid entries */
- if (!(txp->flags & IWL_EEPROM_ENH_TXP_FL_VALID))
- continue;
-
- IWL_DEBUG_EEPROM(dev, "%s %d:\t %s%s%s%s%s%s%s%s (0x%02x)\n",
- (txp->channel && (txp->flags &
- IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE)) ?
- "Common " : (txp->channel) ?
- "Channel" : "Common",
- (txp->channel),
- TXP_CHECK_AND_PRINT(VALID),
- TXP_CHECK_AND_PRINT(BAND_52G),
- TXP_CHECK_AND_PRINT(OFDM),
- TXP_CHECK_AND_PRINT(40MHZ),
- TXP_CHECK_AND_PRINT(HT_AP),
- TXP_CHECK_AND_PRINT(RES1),
- TXP_CHECK_AND_PRINT(RES2),
- TXP_CHECK_AND_PRINT(COMMON_TYPE),
- txp->flags);
- IWL_DEBUG_EEPROM(dev,
- "\t\t chain_A: %d chain_B: %d chain_C: %d\n",
- txp->chain_a_max, txp->chain_b_max,
- txp->chain_c_max);
- IWL_DEBUG_EEPROM(dev,
- "\t\t MIMO2: %d MIMO3: %d High 20_on_40: 0x%02x Low 20_on_40: 0x%02x\n",
- txp->mimo2_max, txp->mimo3_max,
- ((txp->delta_20_in_40 & 0xf0) >> 4),
- (txp->delta_20_in_40 & 0x0f));
-
- max_txp_avg_halfdbm = iwl_get_max_txpwr_half_dbm(data, txp);
-
- iwl_eeprom_enh_txp_read_element(data, txp, n_channels,
- DIV_ROUND_UP(max_txp_avg_halfdbm, 2));
-
- if (max_txp_avg_halfdbm > data->max_tx_pwr_half_dbm)
- data->max_tx_pwr_half_dbm = max_txp_avg_halfdbm;
- }
-}
-
-static void iwl_init_band_reference(const struct iwl_cfg *cfg,
- const u8 *eeprom, size_t eeprom_size,
- int eeprom_band, int *eeprom_ch_count,
- const struct iwl_eeprom_channel **ch_info,
- const u8 **eeprom_ch_array)
-{
- u32 offset = cfg->eeprom_params->regulatory_bands[eeprom_band - 1];
-
- offset |= INDIRECT_ADDRESS | INDIRECT_REGULATORY;
-
- *ch_info = (void *)iwl_eeprom_query_addr(eeprom, eeprom_size, offset);
-
- switch (eeprom_band) {
- case 1: /* 2.4GHz band */
- *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1);
- *eeprom_ch_array = iwl_eeprom_band_1;
- break;
- case 2: /* 4.9GHz band */
- *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2);
- *eeprom_ch_array = iwl_eeprom_band_2;
- break;
- case 3: /* 5.2GHz band */
- *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3);
- *eeprom_ch_array = iwl_eeprom_band_3;
- break;
- case 4: /* 5.5GHz band */
- *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4);
- *eeprom_ch_array = iwl_eeprom_band_4;
- break;
- case 5: /* 5.7GHz band */
- *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5);
- *eeprom_ch_array = iwl_eeprom_band_5;
- break;
- case 6: /* 2.4GHz ht40 channels */
- *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6);
- *eeprom_ch_array = iwl_eeprom_band_6;
- break;
- case 7: /* 5 GHz ht40 channels */
- *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7);
- *eeprom_ch_array = iwl_eeprom_band_7;
- break;
- default:
- *eeprom_ch_count = 0;
- *eeprom_ch_array = NULL;
- WARN_ON(1);
- }
-}
-
-#define CHECK_AND_PRINT(x) \
- ((eeprom_ch->flags & EEPROM_CHANNEL_##x) ? # x " " : "")
-
-static void iwl_mod_ht40_chan_info(struct device *dev,
- struct iwl_nvm_data *data, int n_channels,
- enum nl80211_band band, u16 channel,
- const struct iwl_eeprom_channel *eeprom_ch,
- u8 clear_ht40_extension_channel)
-{
- struct ieee80211_channel *chan = NULL;
- int i;
-
- for (i = 0; i < n_channels; i++) {
- if (data->channels[i].band != band)
- continue;
- if (data->channels[i].hw_value != channel)
- continue;
- chan = &data->channels[i];
- break;
- }
-
- if (!chan)
- return;
-
- IWL_DEBUG_EEPROM(dev,
- "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
- channel,
- band == NL80211_BAND_5GHZ ? "5.2" : "2.4",
- CHECK_AND_PRINT(IBSS),
- CHECK_AND_PRINT(ACTIVE),
- CHECK_AND_PRINT(RADAR),
- CHECK_AND_PRINT(WIDE),
- CHECK_AND_PRINT(DFS),
- eeprom_ch->flags,
- eeprom_ch->max_power_avg,
- ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) &&
- !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? ""
- : "not ");
-
- if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
- chan->flags &= ~clear_ht40_extension_channel;
-}
-
-#define CHECK_AND_PRINT_I(x) \
- ((eeprom_ch_info[ch_idx].flags & EEPROM_CHANNEL_##x) ? # x " " : "")
-
-static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
- struct iwl_nvm_data *data,
- const u8 *eeprom, size_t eeprom_size)
-{
- int band, ch_idx;
- const struct iwl_eeprom_channel *eeprom_ch_info;
- const u8 *eeprom_ch_array;
- int eeprom_ch_count;
- int n_channels = 0;
-
- /*
- * Loop through the 5 EEPROM bands and add them to the parse list
- */
- for (band = 1; band <= 5; band++) {
- struct ieee80211_channel *channel;
-
- iwl_init_band_reference(cfg, eeprom, eeprom_size, band,
- &eeprom_ch_count, &eeprom_ch_info,
- &eeprom_ch_array);
-
- /* Loop through each band adding each of the channels */
- for (ch_idx = 0; ch_idx < eeprom_ch_count; ch_idx++) {
- const struct iwl_eeprom_channel *eeprom_ch;
-
- eeprom_ch = &eeprom_ch_info[ch_idx];
-
- if (!(eeprom_ch->flags & EEPROM_CHANNEL_VALID)) {
- IWL_DEBUG_EEPROM(dev,
- "Ch. %d Flags %x [%sGHz] - No traffic\n",
- eeprom_ch_array[ch_idx],
- eeprom_ch_info[ch_idx].flags,
- (band != 1) ? "5.2" : "2.4");
- continue;
- }
-
- channel = &data->channels[n_channels];
- n_channels++;
-
- channel->hw_value = eeprom_ch_array[ch_idx];
- channel->band = (band == 1) ? NL80211_BAND_2GHZ
- : NL80211_BAND_5GHZ;
- channel->center_freq =
- ieee80211_channel_to_frequency(
- channel->hw_value, channel->band);
-
- /* set no-HT40, will enable as appropriate later */
- channel->flags = IEEE80211_CHAN_NO_HT40;
-
- if (!(eeprom_ch->flags & EEPROM_CHANNEL_IBSS))
- channel->flags |= IEEE80211_CHAN_NO_IR;
-
- if (!(eeprom_ch->flags & EEPROM_CHANNEL_ACTIVE))
- channel->flags |= IEEE80211_CHAN_NO_IR;
-
- if (eeprom_ch->flags & EEPROM_CHANNEL_RADAR)
- channel->flags |= IEEE80211_CHAN_RADAR;
-
- /* Initialize regulatory-based run-time data */
- channel->max_power =
- eeprom_ch_info[ch_idx].max_power_avg;
- IWL_DEBUG_EEPROM(dev,
- "Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
- channel->hw_value,
- (band != 1) ? "5.2" : "2.4",
- CHECK_AND_PRINT_I(VALID),
- CHECK_AND_PRINT_I(IBSS),
- CHECK_AND_PRINT_I(ACTIVE),
- CHECK_AND_PRINT_I(RADAR),
- CHECK_AND_PRINT_I(WIDE),
- CHECK_AND_PRINT_I(DFS),
- eeprom_ch_info[ch_idx].flags,
- eeprom_ch_info[ch_idx].max_power_avg,
- ((eeprom_ch_info[ch_idx].flags &
- EEPROM_CHANNEL_IBSS) &&
- !(eeprom_ch_info[ch_idx].flags &
- EEPROM_CHANNEL_RADAR))
- ? "" : "not ");
- }
- }
-
- if (cfg->eeprom_params->enhanced_txpower) {
- /*
- * for newer device (6000 series and up)
- * EEPROM contain enhanced tx power information
- * driver need to process addition information
- * to determine the max channel tx power limits
- */
- iwl_eeprom_enhanced_txpower(dev, data, eeprom, eeprom_size,
- n_channels);
- } else {
- /* All others use data from channel map */
- int i;
-
- data->max_tx_pwr_half_dbm = -128;
-
- for (i = 0; i < n_channels; i++)
- data->max_tx_pwr_half_dbm =
- max_t(s8, data->max_tx_pwr_half_dbm,
- data->channels[i].max_power * 2);
- }
-
- /* Check if we do have HT40 channels */
- if (cfg->eeprom_params->regulatory_bands[5] ==
- EEPROM_REGULATORY_BAND_NO_HT40 &&
- cfg->eeprom_params->regulatory_bands[6] ==
- EEPROM_REGULATORY_BAND_NO_HT40)
- return n_channels;
-
- /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
- for (band = 6; band <= 7; band++) {
- enum nl80211_band ieeeband;
-
- iwl_init_band_reference(cfg, eeprom, eeprom_size, band,
- &eeprom_ch_count, &eeprom_ch_info,
- &eeprom_ch_array);
-
- /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
- ieeeband = (band == 6) ? NL80211_BAND_2GHZ
- : NL80211_BAND_5GHZ;
-
- /* Loop through each band adding each of the channels */
- for (ch_idx = 0; ch_idx < eeprom_ch_count; ch_idx++) {
- /* Set up driver's info for lower half */
- iwl_mod_ht40_chan_info(dev, data, n_channels, ieeeband,
- eeprom_ch_array[ch_idx],
- &eeprom_ch_info[ch_idx],
- IEEE80211_CHAN_NO_HT40PLUS);
-
- /* Set up driver's info for upper half */
- iwl_mod_ht40_chan_info(dev, data, n_channels, ieeeband,
- eeprom_ch_array[ch_idx] + 4,
- &eeprom_ch_info[ch_idx],
- IEEE80211_CHAN_NO_HT40MINUS);
- }
- }
-
- return n_channels;
-}
-
-int iwl_init_sband_channels(struct iwl_nvm_data *data,
- struct ieee80211_supported_band *sband,
- int n_channels, enum nl80211_band band)
-{
- struct ieee80211_channel *chan = &data->channels[0];
- int n = 0, idx = 0;
-
- while (idx < n_channels && chan->band != band)
- chan = &data->channels[++idx];
-
- sband->channels = &data->channels[idx];
-
- while (idx < n_channels && chan->band == band) {
- chan = &data->channels[++idx];
- n++;
- }
-
- sband->n_channels = n;
-
- return n;
-}
-
-#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
-#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
-
-void iwl_init_ht_hw_capab(struct iwl_trans *trans,
- struct iwl_nvm_data *data,
- struct ieee80211_sta_ht_cap *ht_info,
- enum nl80211_band band,
- u8 tx_chains, u8 rx_chains)
-{
- const struct iwl_cfg *cfg = trans->cfg;
- int max_bit_rate = 0;
-
- tx_chains = hweight8(tx_chains);
- if (cfg->rx_with_siso_diversity)
- rx_chains = 1;
- else
- rx_chains = hweight8(rx_chains);
-
- if (!(data->sku_cap_11n_enable) ||
- (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL) ||
- !cfg->ht_params) {
- ht_info->ht_supported = false;
- return;
- }
-
- if (data->sku_cap_mimo_disabled)
- rx_chains = 1;
-
- ht_info->ht_supported = true;
- ht_info->cap = IEEE80211_HT_CAP_DSSSCCK40;
-
- if (cfg->ht_params->stbc) {
- ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
-
- if (tx_chains > 1)
- ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
- }
-
- if (cfg->ht_params->ldpc)
- ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
-
- if (trans->trans_cfg->mq_rx_supported ||
- iwlwifi_mod_params.amsdu_size >= IWL_AMSDU_8K)
- ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
-
- ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
- ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_4;
-
- ht_info->mcs.rx_mask[0] = 0xFF;
- if (rx_chains >= 2)
- ht_info->mcs.rx_mask[1] = 0xFF;
- if (rx_chains >= 3)
- ht_info->mcs.rx_mask[2] = 0xFF;
-
- if (cfg->ht_params->ht_greenfield_support)
- ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
- ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
-
- max_bit_rate = MAX_BIT_RATE_20_MHZ;
-
- if (cfg->ht_params->ht40_bands & BIT(band)) {
- ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
- ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
- max_bit_rate = MAX_BIT_RATE_40_MHZ;
- }
-
- /* Highest supported Rx data rate */
- max_bit_rate *= rx_chains;
- WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
- ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
-
- /* Tx MCS capabilities */
- ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
- if (tx_chains != rx_chains) {
- ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
- ht_info->mcs.tx_params |= ((tx_chains - 1) <<
- IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
- }
-}
-
-static void iwl_init_sbands(struct iwl_trans *trans, const struct iwl_cfg *cfg,
- struct iwl_nvm_data *data,
- const u8 *eeprom, size_t eeprom_size)
-{
- struct device *dev = trans->dev;
- int n_channels = iwl_init_channel_map(dev, cfg, data,
- eeprom, eeprom_size);
- int n_used = 0;
- struct ieee80211_supported_band *sband;
-
- sband = &data->bands[NL80211_BAND_2GHZ];
- sband->band = NL80211_BAND_2GHZ;
- sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS];
- sband->n_bitrates = N_RATES_24;
- n_used += iwl_init_sband_channels(data, sband, n_channels,
- NL80211_BAND_2GHZ);
- iwl_init_ht_hw_capab(trans, data, &sband->ht_cap, NL80211_BAND_2GHZ,
- data->valid_tx_ant, data->valid_rx_ant);
-
- sband = &data->bands[NL80211_BAND_5GHZ];
- sband->band = NL80211_BAND_5GHZ;
- sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS];
- sband->n_bitrates = N_RATES_52;
- n_used += iwl_init_sband_channels(data, sband, n_channels,
- NL80211_BAND_5GHZ);
- iwl_init_ht_hw_capab(trans, data, &sband->ht_cap, NL80211_BAND_5GHZ,
- data->valid_tx_ant, data->valid_rx_ant);
-
- if (n_channels != n_used)
- IWL_ERR_DEV(dev, "EEPROM: used only %d of %d channels\n",
- n_used, n_channels);
-}
-
-/* EEPROM data functions */
-
-struct iwl_nvm_data *
-iwl_parse_eeprom_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
- const u8 *eeprom, size_t eeprom_size)
-{
- struct iwl_nvm_data *data;
- struct device *dev = trans->dev;
- const void *tmp;
- u16 radio_cfg, sku;
-
- if (WARN_ON(!cfg || !cfg->eeprom_params))
- return NULL;
-
- data = kzalloc(struct_size(data, channels, IWL_NUM_CHANNELS),
- GFP_KERNEL);
- if (!data)
- return NULL;
-
- /* get MAC address(es) */
- tmp = iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_MAC_ADDRESS);
- if (!tmp)
- goto err_free;
- memcpy(data->hw_addr, tmp, ETH_ALEN);
- data->n_hw_addrs = iwl_eeprom_query16(eeprom, eeprom_size,
- EEPROM_NUM_MAC_ADDRESS);
-
- if (iwl_eeprom_read_calib(eeprom, eeprom_size, data))
- goto err_free;
-
- tmp = iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_XTAL);
- if (!tmp)
- goto err_free;
- memcpy(data->xtal_calib, tmp, sizeof(data->xtal_calib));
-
- tmp = iwl_eeprom_query_addr(eeprom, eeprom_size,
- EEPROM_RAW_TEMPERATURE);
- if (!tmp)
- goto err_free;
- data->raw_temperature = *(__le16 *)tmp;
-
- tmp = iwl_eeprom_query_addr(eeprom, eeprom_size,
- EEPROM_KELVIN_TEMPERATURE);
- if (!tmp)
- goto err_free;
- data->kelvin_temperature = *(__le16 *)tmp;
- data->kelvin_voltage = *((__le16 *)tmp + 1);
-
- radio_cfg = iwl_eeprom_query16(eeprom, eeprom_size,
- EEPROM_RADIO_CONFIG);
- data->radio_cfg_dash = EEPROM_RF_CFG_DASH_MSK(radio_cfg);
- data->radio_cfg_pnum = EEPROM_RF_CFG_PNUM_MSK(radio_cfg);
- data->radio_cfg_step = EEPROM_RF_CFG_STEP_MSK(radio_cfg);
- data->radio_cfg_type = EEPROM_RF_CFG_TYPE_MSK(radio_cfg);
- data->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg);
- data->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
-
- sku = iwl_eeprom_query16(eeprom, eeprom_size,
- EEPROM_SKU_CAP);
- data->sku_cap_11n_enable = sku & EEPROM_SKU_CAP_11N_ENABLE;
- data->sku_cap_amt_enable = sku & EEPROM_SKU_CAP_AMT_ENABLE;
- data->sku_cap_band_24ghz_enable = sku & EEPROM_SKU_CAP_BAND_24GHZ;
- data->sku_cap_band_52ghz_enable = sku & EEPROM_SKU_CAP_BAND_52GHZ;
- data->sku_cap_ipan_enable = sku & EEPROM_SKU_CAP_IPAN_ENABLE;
- if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
- data->sku_cap_11n_enable = false;
-
- data->nvm_version = iwl_eeprom_query16(eeprom, eeprom_size,
- EEPROM_VERSION);
-
- /* check overrides (some devices have wrong EEPROM) */
- if (cfg->valid_tx_ant)
- data->valid_tx_ant = cfg->valid_tx_ant;
- if (cfg->valid_rx_ant)
- data->valid_rx_ant = cfg->valid_rx_ant;
-
- if (!data->valid_tx_ant || !data->valid_rx_ant) {
- IWL_ERR_DEV(dev, "invalid antennas (0x%x, 0x%x)\n",
- data->valid_tx_ant, data->valid_rx_ant);
- goto err_free;
- }
-
- iwl_init_sbands(trans, cfg, data, eeprom, eeprom_size);
-
- return data;
- err_free:
- kfree(data);
- return NULL;
-}
-IWL_EXPORT_SYMBOL(iwl_parse_eeprom_data);
diff --git a/iwl-eeprom-read.c b/iwl-eeprom-read.c
deleted file mode 100644
index 5f386bb1a353..000000000000
--- a/iwl-eeprom-read.c
+++ /dev/null
@@ -1,394 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
-/*
- * Copyright (C) 2005-2014, 2018-2019, 2021 Intel Corporation
- */
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-
-#include "iwl-drv.h"
-#include "iwl-debug.h"
-#include "iwl-eeprom-read.h"
-#include "iwl-io.h"
-#include "iwl-prph.h"
-#include "iwl-csr.h"
-
-/*
- * EEPROM access time values:
- *
- * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
- * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
- * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
- * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
- */
-#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
-
-/*
- * The device's EEPROM semaphore prevents conflicts between driver and uCode
- * when accessing the EEPROM; each access is a series of pulses to/from the
- * EEPROM chip, not a single event, so even reads could conflict if they
- * weren't arbitrated by the semaphore.
- */
-#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
-#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
-
-
-static int iwl_eeprom_acquire_semaphore(struct iwl_trans *trans)
-{
- u16 count;
- int ret;
-
- for (count = 0; count < IWL_EEPROM_SEM_RETRY_LIMIT; count++) {
- /* Request semaphore */
- iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
-
- /* See if we got it */
- ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
- CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
- IWL_EEPROM_SEM_TIMEOUT);
- if (ret >= 0) {
- IWL_DEBUG_EEPROM(trans->dev,
- "Acquired semaphore after %d tries.\n",
- count+1);
- return ret;
- }
- }
-
- return ret;
-}
-
-static void iwl_eeprom_release_semaphore(struct iwl_trans *trans)
-{
- iwl_clear_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
-}
-
-static int iwl_eeprom_verify_signature(struct iwl_trans *trans, bool nvm_is_otp)
-{
- u32 gp = iwl_read32(trans, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
-
- IWL_DEBUG_EEPROM(trans->dev, "EEPROM signature=0x%08x\n", gp);
-
- switch (gp) {
- case CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP:
- if (!nvm_is_otp) {
- IWL_ERR(trans, "EEPROM with bad signature: 0x%08x\n",
- gp);
- return -ENOENT;
- }
- return 0;
- case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
- case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
- if (nvm_is_otp) {
- IWL_ERR(trans, "OTP with bad signature: 0x%08x\n", gp);
- return -ENOENT;
- }
- return 0;
- case CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP:
- default:
- IWL_ERR(trans,
- "bad EEPROM/OTP signature, type=%s, EEPROM_GP=0x%08x\n",
- nvm_is_otp ? "OTP" : "EEPROM", gp);
- return -ENOENT;
- }
-}
-
-/******************************************************************************
- *
- * OTP related functions
- *
-******************************************************************************/
-
-static void iwl_set_otp_access_absolute(struct iwl_trans *trans)
-{
- iwl_read32(trans, CSR_OTP_GP_REG);
-
- iwl_clear_bit(trans, CSR_OTP_GP_REG,
- CSR_OTP_GP_REG_OTP_ACCESS_MODE);
-}
-
-static int iwl_nvm_is_otp(struct iwl_trans *trans)
-{
- u32 otpgp;
-
- /* OTP only valid for CP/PP and after */
- switch (trans->hw_rev & CSR_HW_REV_TYPE_MSK) {
- case CSR_HW_REV_TYPE_NONE:
- IWL_ERR(trans, "Unknown hardware type\n");
- return -EIO;
- case CSR_HW_REV_TYPE_5300:
- case CSR_HW_REV_TYPE_5350:
- case CSR_HW_REV_TYPE_5100:
- case CSR_HW_REV_TYPE_5150:
- return 0;
- default:
- otpgp = iwl_read32(trans, CSR_OTP_GP_REG);
- if (otpgp & CSR_OTP_GP_REG_DEVICE_SELECT)
- return 1;
- return 0;
- }
-}
-
-static int iwl_init_otp_access(struct iwl_trans *trans)
-{
- int ret;
-
- ret = iwl_finish_nic_init(trans);
- if (ret)
- return ret;
-
- iwl_set_bits_prph(trans, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_RESET_REQ);
- udelay(5);
- iwl_clear_bits_prph(trans, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_RESET_REQ);
-
- /*
- * CSR auto clock gate disable bit -
- * this is only applicable for HW with OTP shadow RAM
- */
- if (trans->trans_cfg->base_params->shadow_ram_support)
- iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
- CSR_RESET_LINK_PWR_MGMT_DISABLED);
-
- return 0;
-}
-
-static int iwl_read_otp_word(struct iwl_trans *trans, u16 addr,
- __le16 *eeprom_data)
-{
- int ret = 0;
- u32 r;
- u32 otpgp;
-
- iwl_write32(trans, CSR_EEPROM_REG,
- CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
- ret = iwl_poll_bit(trans, CSR_EEPROM_REG,
- CSR_EEPROM_REG_READ_VALID_MSK,
- CSR_EEPROM_REG_READ_VALID_MSK,
- IWL_EEPROM_ACCESS_TIMEOUT);
- if (ret < 0) {
- IWL_ERR(trans, "Time out reading OTP[%d]\n", addr);
- return ret;
- }
- r = iwl_read32(trans, CSR_EEPROM_REG);
- /* check for ECC errors: */
- otpgp = iwl_read32(trans, CSR_OTP_GP_REG);
- if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) {
- /* stop in this case */
- /* set the uncorrectable OTP ECC bit for acknowledgment */
- iwl_set_bit(trans, CSR_OTP_GP_REG,
- CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
- IWL_ERR(trans, "Uncorrectable OTP ECC error, abort OTP read\n");
- return -EINVAL;
- }
- if (otpgp & CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK) {
- /* continue in this case */
- /* set the correctable OTP ECC bit for acknowledgment */
- iwl_set_bit(trans, CSR_OTP_GP_REG,
- CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
- IWL_ERR(trans, "Correctable OTP ECC error, continue read\n");
- }
- *eeprom_data = cpu_to_le16(r >> 16);
- return 0;
-}
-
-/*
- * iwl_is_otp_empty: check for empty OTP
- */
-static bool iwl_is_otp_empty(struct iwl_trans *trans)
-{
- u16 next_link_addr = 0;
- __le16 link_value;
- bool is_empty = false;
-
- /* locate the beginning of OTP link list */
- if (!iwl_read_otp_word(trans, next_link_addr, &link_value)) {
- if (!link_value) {
- IWL_ERR(trans, "OTP is empty\n");
- is_empty = true;
- }
- } else {
- IWL_ERR(trans, "Unable to read first block of OTP list.\n");
- is_empty = true;
- }
-
- return is_empty;
-}
-
-
-/*
- * iwl_find_otp_image: find EEPROM image in OTP
- * finding the OTP block that contains the EEPROM image.
- * the last valid block on the link list (the block _before_ the last block)
- * is the block we should read and used to configure the device.
- * If all the available OTP blocks are full, the last block will be the block
- * we should read and used to configure the device.
- * only perform this operation if shadow RAM is disabled
- */
-static int iwl_find_otp_image(struct iwl_trans *trans,
- u16 *validblockaddr)
-{
- u16 next_link_addr = 0, valid_addr;
- __le16 link_value = 0;
- int usedblocks = 0;
-
- /* set addressing mode to absolute to traverse the link list */
- iwl_set_otp_access_absolute(trans);
-
- /* checking for empty OTP or error */
- if (iwl_is_otp_empty(trans))
- return -EINVAL;
-
- /*
- * start traverse link list
- * until reach the max number of OTP blocks
- * different devices have different number of OTP blocks
- */
- do {
- /* save current valid block address
- * check for more block on the link list
- */
- valid_addr = next_link_addr;
- next_link_addr = le16_to_cpu(link_value) * sizeof(u16);
- IWL_DEBUG_EEPROM(trans->dev, "OTP blocks %d addr 0x%x\n",
- usedblocks, next_link_addr);
- if (iwl_read_otp_word(trans, next_link_addr, &link_value))
- return -EINVAL;
- if (!link_value) {
- /*
- * reach the end of link list, return success and
- * set address point to the starting address
- * of the image
- */
- *validblockaddr = valid_addr;
- /* skip first 2 bytes (link list pointer) */
- *validblockaddr += 2;
- return 0;
- }
- /* more in the link list, continue */
- usedblocks++;
- } while (usedblocks <= trans->trans_cfg->base_params->max_ll_items);
-
- /* OTP has no valid blocks */
- IWL_DEBUG_EEPROM(trans->dev, "OTP has no valid blocks\n");
- return -EINVAL;
-}
-
-/*
- * iwl_read_eeprom - read EEPROM contents
- *
- * Load the EEPROM contents from adapter and return it
- * and its size.
- *
- * NOTE: This routine uses the non-debug IO access functions.
- */
-int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size)
-{
- __le16 *e;
- u32 gp = iwl_read32(trans, CSR_EEPROM_GP);
- int sz;
- int ret;
- u16 addr;
- u16 validblockaddr = 0;
- u16 cache_addr = 0;
- int nvm_is_otp;
-
- if (!eeprom || !eeprom_size)
- return -EINVAL;
-
- nvm_is_otp = iwl_nvm_is_otp(trans);
- if (nvm_is_otp < 0)
- return nvm_is_otp;
-
- sz = trans->trans_cfg->base_params->eeprom_size;
- IWL_DEBUG_EEPROM(trans->dev, "NVM size = %d\n", sz);
-
- e = kmalloc(sz, GFP_KERNEL);
- if (!e)
- return -ENOMEM;
-
- ret = iwl_eeprom_verify_signature(trans, nvm_is_otp);
- if (ret < 0) {
- IWL_ERR(trans, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
- goto err_free;
- }
-
- /* Make sure driver (instead of uCode) is allowed to read EEPROM */
- ret = iwl_eeprom_acquire_semaphore(trans);
- if (ret < 0) {
- IWL_ERR(trans, "Failed to acquire EEPROM semaphore.\n");
- goto err_free;
- }
-
- if (nvm_is_otp) {
- ret = iwl_init_otp_access(trans);
- if (ret) {
- IWL_ERR(trans, "Failed to initialize OTP access.\n");
- goto err_unlock;
- }
-
- iwl_write32(trans, CSR_EEPROM_GP,
- iwl_read32(trans, CSR_EEPROM_GP) &
- ~CSR_EEPROM_GP_IF_OWNER_MSK);
-
- iwl_set_bit(trans, CSR_OTP_GP_REG,
- CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK |
- CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
- /* traversing the linked list if no shadow ram supported */
- if (!trans->trans_cfg->base_params->shadow_ram_support) {
- ret = iwl_find_otp_image(trans, &validblockaddr);
- if (ret)
- goto err_unlock;
- }
- for (addr = validblockaddr; addr < validblockaddr + sz;
- addr += sizeof(u16)) {
- __le16 eeprom_data;
-
- ret = iwl_read_otp_word(trans, addr, &eeprom_data);
- if (ret)
- goto err_unlock;
- e[cache_addr / 2] = eeprom_data;
- cache_addr += sizeof(u16);
- }
- } else {
- /* eeprom is an array of 16bit values */
- for (addr = 0; addr < sz; addr += sizeof(u16)) {
- u32 r;
-
- iwl_write32(trans, CSR_EEPROM_REG,
- CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
-
- ret = iwl_poll_bit(trans, CSR_EEPROM_REG,
- CSR_EEPROM_REG_READ_VALID_MSK,
- CSR_EEPROM_REG_READ_VALID_MSK,
- IWL_EEPROM_ACCESS_TIMEOUT);
- if (ret < 0) {
- IWL_ERR(trans,
- "Time out reading EEPROM[%d]\n", addr);
- goto err_unlock;
- }
- r = iwl_read32(trans, CSR_EEPROM_REG);
- e[addr / 2] = cpu_to_le16(r >> 16);
- }
- }
-
- IWL_DEBUG_EEPROM(trans->dev, "NVM Type: %s\n",
- nvm_is_otp ? "OTP" : "EEPROM");
-
- iwl_eeprom_release_semaphore(trans);
-
- *eeprom_size = sz;
- *eeprom = (u8 *)e;
- return 0;
-
- err_unlock:
- iwl_eeprom_release_semaphore(trans);
- err_free:
- kfree(e);
-
- return ret;
-}
-IWL_EXPORT_SYMBOL(iwl_read_eeprom);
diff --git a/iwl-eeprom-read.h b/iwl-eeprom-read.h
deleted file mode 100644
index 63b8e6c6659b..000000000000
--- a/iwl-eeprom-read.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/*
- * Copyright (C) 2005-2014 Intel Corporation
- */
-#ifndef __iwl_eeprom_h__
-#define __iwl_eeprom_h__
-
-#include "iwl-trans.h"
-
-int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size);
-
-#endif /* __iwl_eeprom_h__ */
diff --git a/iwl-fh.h b/iwl-fh.h
index 4e4a60ddf9b2..5c8f1868db64 100644
--- a/iwl-fh.h
+++ b/iwl-fh.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2021, 2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2021, 2023-2024 Intel Corporation
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fh_h__
@@ -15,7 +15,7 @@
/* Flow Handler Definitions */
/****************************/
-/**
+/*
* This I/O area is directly read/writable by driver (e.g. Linux uses writel())
* Addresses are offsets from device's PCI hardware base address.
*/
@@ -24,7 +24,7 @@
#define FH_MEM_LOWER_BOUND_GEN2 (0xa06000)
#define FH_MEM_UPPER_BOUND_GEN2 (0xa08000)
-/**
+/*
* Keep-Warm (KW) buffer base address.
*
* Driver must allocate a 4KByte buffer that is for keeping the
@@ -44,7 +44,7 @@
#define FH_KW_MEM_ADDR_REG (FH_MEM_LOWER_BOUND + 0x97C)
-/**
+/*
* TFD Circular Buffers Base (CBBC) addresses
*
* Device has 16 base pointer registers, one for each of 16 host-DRAM-resident
@@ -143,7 +143,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
*/
#define TFH_SRV_DMA_CHNL0_BC (0x1F70)
-/**
+/*
* Rx SRAM Control and Status Registers (RSCSR)
*
* These registers provide handshake between driver and device for the Rx queue
@@ -216,21 +216,21 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
#define FH_MEM_RSCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
#define FH_MEM_RSCSR_CHNL0 (FH_MEM_RSCSR_LOWER_BOUND)
-/**
+/*
* Physical base address of 8-byte Rx Status buffer.
* Bit fields:
* 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned.
*/
#define FH_RSCSR_CHNL0_STTS_WPTR_REG (FH_MEM_RSCSR_CHNL0)
-/**
+/*
* Physical base address of Rx Buffer Descriptor Circular Buffer.
* Bit fields:
* 27-0: RBD CD physical base address [35:8], must be 256-byte aligned.
*/
#define FH_RSCSR_CHNL0_RBDCB_BASE_REG (FH_MEM_RSCSR_CHNL0 + 0x004)
-/**
+/*
* Rx write pointer (index, really!).
* Bit fields:
* 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1.
@@ -242,7 +242,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
#define FW_RSCSR_CHNL0_RXDCB_RDPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x00c)
#define FH_RSCSR_CHNL0_RDPTR FW_RSCSR_CHNL0_RXDCB_RDPTR_REG
-/**
+/*
* Rx Config/Status Registers (RCSR)
* Rx Config Reg for channel 0 (only channel used)
*
@@ -300,7 +300,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000)
#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000)
-/**
+/*
* Rx Shared Status Registers (RSSR)
*
* After stopping Rx DMA channel (writing 0 to
@@ -356,7 +356,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
#define RFH_RBDBUF_RBD0_LSB 0xA08300
#define RFH_RBDBUF_RBD_LSB(q) (RFH_RBDBUF_RBD0_LSB + (q) * 8)
-/**
+/*
* RFH Status Register
*
* Bit fields:
@@ -440,7 +440,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
#define FH_TFDIB_CTRL0_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl))
#define FH_TFDIB_CTRL1_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4)
-/**
+/*
* Transmit DMA Channel Control/Status Registers (TCSR)
*
* Device has one configuration register for each of 8 Tx DMA/FIFO channels
@@ -501,7 +501,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20)
#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12)
-/**
+/*
* Tx Shared Status Registers (TSSR)
*
* After stopping Tx DMA channel (writing 0 to
@@ -518,7 +518,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
#define FH_TSSR_TX_STATUS_REG (FH_TSSR_LOWER_BOUND + 0x010)
-/**
+/*
* Bit fields for TSSR(Tx Shared Status & Control) error status register:
* 31: Indicates an address error when accessed to internal memory
* uCode/driver must write "1" in order to clear this flag
@@ -565,21 +565,24 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
#define RX_QUEUE_MASK 255
#define RX_QUEUE_SIZE_LOG 8
+#define IWL_DEFAULT_RX_QUEUE 0
+
/**
* struct iwl_rb_status - reserve buffer status
* host memory mapped FH registers
- * @closed_rb_num [0:11] - Indicates the index of the RB which was closed
- * @closed_fr_num [0:11] - Indicates the index of the RX Frame which was closed
- * @finished_rb_num [0:11] - Indicates the index of the current RB
+ * @closed_rb_num: [0:11] Indicates the index of the RB which was closed
+ * @closed_fr_num: [0:11] Indicates the index of the RX Frame which was closed
+ * @finished_rb_num: [0:11] Indicates the index of the current RB
* in which the last frame was written to
- * @finished_fr_num [0:11] - Indicates the index of the RX Frame
+ * @finished_fr_num: [0:11] Indicates the index of the RX Frame
* which was transferred
+ * @__spare: reserved
*/
struct iwl_rb_status {
__le16 closed_rb_num;
__le16 closed_fr_num;
__le16 finished_rb_num;
- __le16 finished_fr_nam;
+ __le16 finished_fr_num;
__le32 __spare;
} __packed;
@@ -631,7 +634,7 @@ enum iwl_tfd_tb_hi_n_len {
};
/**
- * struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor
+ * struct iwl_tfd_tb - transmit buffer descriptor within transmit frame descriptor
*
* This structure contains dma address and length of transmission address
*
@@ -645,19 +648,19 @@ struct iwl_tfd_tb {
} __packed;
/**
- * struct iwl_tfh_tb transmit buffer descriptor within transmit frame descriptor
+ * struct iwl_tfh_tb - transmit buffer descriptor within transmit frame descriptor
*
* This structure contains dma address and length of transmission address
*
- * @tb_len length of the tx buffer
- * @addr 64 bits dma address
+ * @tb_len: length of the tx buffer
+ * @addr: 64 bits dma address
*/
struct iwl_tfh_tb {
__le16 tb_len;
__le64 addr;
} __packed;
-/**
+/*
* Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
* Both driver and device share these circular buffers, each of which must be
* contiguous 256 TFDs.
@@ -679,12 +682,13 @@ struct iwl_tfh_tb {
/**
* struct iwl_tfd - Transmit Frame Descriptor (TFD)
- * @ __reserved1[3] reserved
- * @ num_tbs 0-4 number of active tbs
- * 5 reserved
- * 6-7 padding (not used)
- * @ tbs[20] transmit frame buffer descriptors
- * @ __pad padding
+ * @__reserved1: reserved
+ * @num_tbs:
+ * 0-4 number of active tbs
+ * 5 reserved
+ * 6-7 padding (not used)
+ * @tbs: transmit frame buffer descriptors
+ * @__pad: padding
*/
struct iwl_tfd {
u8 __reserved1[3];
@@ -695,10 +699,11 @@ struct iwl_tfd {
/**
* struct iwl_tfh_tfd - Transmit Frame Descriptor (TFD)
- * @ num_tbs 0-4 number of active tbs
- * 5 -15 reserved
- * @ tbs[25] transmit frame buffer descriptors
- * @ __pad padding
+ * @num_tbs:
+ * 0-4 number of active tbs
+ * 5-15 reserved
+ * @tbs: transmit frame buffer descriptors
+ * @__pad: padding
*/
struct iwl_tfh_tfd {
__le16 num_tbs;
@@ -712,13 +717,15 @@ struct iwl_tfh_tfd {
/* Fixed (non-configurable) rx data from phy */
/**
- * struct iwlagn_schedq_bc_tbl scheduler byte count table
+ * struct iwlagn_scd_bc_tbl - scheduler byte count table
* base physical address provided by SCD_DRAM_BASE_ADDR
* For devices up to 22000:
- * @tfd_offset 0-12 - tx command byte count
+ * @tfd_offset:
+ * For devices up to 22000:
+ * 0-12 - tx command byte count
* 12-16 - station index
- * For 22000:
- * @tfd_offset 0-12 - tx command byte count
+ * For 22000:
+ * 0-12 - tx command byte count
* 12-13 - number of 64 byte chunks
* 14-16 - reserved
*/
@@ -727,7 +734,7 @@ struct iwlagn_scd_bc_tbl {
} __packed;
/**
- * struct iwl_gen3_bc_tbl_entry scheduler byte count table entry gen3
+ * struct iwl_gen3_bc_tbl_entry - scheduler byte count table entry gen3
* For AX210 and on:
* @tfd_offset: 0-12 - tx command byte count
* 12-13 - number of 64 byte chunks
diff --git a/iwl-io.c b/iwl-io.c
index c60f9466c5fd..060becfd64f3 100644
--- a/iwl-io.c
+++ b/iwl-io.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2003-2014, 2018-2022 Intel Corporation
+ * Copyright (C) 2003-2014, 2018-2022, 2024 Intel Corporation
* Copyright (C) 2015-2016 Intel Deutschland GmbH
*/
#include <linux/delay.h>
@@ -460,7 +460,7 @@ int iwl_finish_nic_init(struct iwl_trans *trans)
*/
if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_BZ) {
iwl_set_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
+ CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ |
CSR_GP_CNTRL_REG_FLAG_MAC_INIT);
poll_ready = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS;
} else {
diff --git a/iwl-modparams.h b/iwl-modparams.h
index 1cf26ab4f488..21eabfc3ffc8 100644
--- a/iwl-modparams.h
+++ b/iwl-modparams.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2022 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2022, 2024 Intel Corporation
*/
#ifndef __iwl_modparams_h__
#define __iwl_modparams_h__
@@ -106,4 +106,23 @@ static inline bool iwl_enable_tx_ampdu(void)
return true;
}
+/* Verify amsdu_size module parameter and convert it to a rxb size */
+static inline enum iwl_amsdu_size
+iwl_amsdu_size_to_rxb_size(void)
+{
+ switch (iwlwifi_mod_params.amsdu_size) {
+ case IWL_AMSDU_8K:
+ return IWL_AMSDU_8K;
+ case IWL_AMSDU_12K:
+ return IWL_AMSDU_12K;
+ default:
+ pr_err("%s: Unsupported amsdu_size: %d\n", KBUILD_MODNAME,
+ iwlwifi_mod_params.amsdu_size);
+ fallthrough;
+ case IWL_AMSDU_DEF:
+ case IWL_AMSDU_4K:
+ return IWL_AMSDU_4K;
+ }
+}
+
#endif /* #__iwl_modparams_h__ */
diff --git a/iwl-nvm-parse.c b/iwl-nvm-parse.c
index 8c23f57f5c89..d902121da009 100644
--- a/iwl-nvm-parse.c
+++ b/iwl-nvm-parse.c
@@ -38,16 +38,13 @@ enum nvm_offsets {
N_HW_ADDRS = 3,
NVM_CHANNELS = 0x1E0 - NVM_SW_SECTION,
- /* NVM calibration section offset (in words) definitions */
- NVM_CALIB_SECTION = 0x2B8,
- XTAL_CALIB = 0x316 - NVM_CALIB_SECTION,
-
/* NVM REGULATORY -Section offset (in words) definitions */
NVM_CHANNELS_SDP = 0,
};
enum ext_nvm_offsets {
/* NVM HW-Section offset (in words) definitions */
+
MAC_ADDRESS_OVERRIDE_EXT_NVM = 1,
/* NVM SW-Section offset (in words) definitions */
@@ -156,6 +153,8 @@ static struct ieee80211_rate iwl_cfg80211_rates[] = {
* @NVM_CHANNEL_80MHZ: 80 MHz channel okay
* @NVM_CHANNEL_160MHZ: 160 MHz channel okay
* @NVM_CHANNEL_DC_HIGH: DC HIGH required/allowed (?)
+ * @NVM_CHANNEL_VLP: client support connection to UHB VLP AP
+ * @NVM_CHANNEL_AFC: client support connection to UHB AFC AP
*/
enum iwl_nvm_channel_flags {
NVM_CHANNEL_VALID = BIT(0),
@@ -170,6 +169,8 @@ enum iwl_nvm_channel_flags {
NVM_CHANNEL_80MHZ = BIT(10),
NVM_CHANNEL_160MHZ = BIT(11),
NVM_CHANNEL_DC_HIGH = BIT(12),
+ NVM_CHANNEL_VLP = BIT(13),
+ NVM_CHANNEL_AFC = BIT(14),
};
/**
@@ -309,7 +310,7 @@ static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
/* Note: already can print up to 101 characters, 110 is the limit! */
IWL_DEBUG_DEV(dev, level,
- "Ch. %d: 0x%x:%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ "Ch. %d: 0x%x:%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
chan, flags,
CHECK_AND_PRINT_I(VALID),
CHECK_AND_PRINT_I(IBSS),
@@ -322,7 +323,9 @@ static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
CHECK_AND_PRINT_I(40MHZ),
CHECK_AND_PRINT_I(80MHZ),
CHECK_AND_PRINT_I(160MHZ),
- CHECK_AND_PRINT_I(DC_HIGH));
+ CHECK_AND_PRINT_I(DC_HIGH),
+ CHECK_AND_PRINT_I(VLP),
+ CHECK_AND_PRINT_I(AFC));
#undef CHECK_AND_PRINT_I
}
@@ -366,6 +369,14 @@ static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, enum nl80211_band band,
(flags & IEEE80211_CHAN_NO_IR))
flags |= IEEE80211_CHAN_IR_CONCURRENT;
+ /* Set the AP type for the UHB case. */
+ if (nvm_flags & NVM_CHANNEL_VLP)
+ flags |= IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP;
+ else
+ flags |= IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT;
+ if (!(nvm_flags & NVM_CHANNEL_AFC))
+ flags |= IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT;
+
return flags;
}
@@ -380,11 +391,14 @@ static enum nl80211_band iwl_nl80211_band_from_channel_idx(int ch_idx)
return NL80211_BAND_2GHZ;
}
-static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
+static int iwl_init_channel_map(struct iwl_trans *trans,
+ const struct iwl_fw *fw,
struct iwl_nvm_data *data,
const void * const nvm_ch_flags,
u32 sbands_flags, bool v4)
{
+ const struct iwl_cfg *cfg = trans->cfg;
+ struct device *dev = trans->dev;
int ch_idx;
int n_channels = 0;
struct ieee80211_channel *channel;
@@ -466,11 +480,10 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
else
channel->flags = 0;
- /* TODO: Don't put limitations on UHB devices as we still don't
- * have NVM for them
- */
- if (cfg->uhb_supported)
- channel->flags = 0;
+ if (fw_has_capa(&fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_MONITOR_PASSIVE_CHANS))
+ channel->flags |= IEEE80211_CHAN_CAN_MONITOR;
+
iwl_nvm_print_channel_flags(dev, IWL_DL_EEPROM,
channel->hw_value, ch_flags);
IWL_DEBUG_EEPROM(dev, "Ch. %d: %ddBm\n",
@@ -585,7 +598,8 @@ static const u8 iwl_vendor_caps[] = {
static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
{
- .types_mask = BIT(NL80211_IFTYPE_STATION),
+ .types_mask = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT),
.he_cap = {
.has_he = true,
.he_cap_elem = {
@@ -668,10 +682,10 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
.has_eht = true,
.eht_cap_elem = {
.mac_cap_info[0] =
- IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 |
- IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2,
+ IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2 |
+ IEEE80211_EHT_MAC_CAP0_SCS_TRAFFIC_DESC,
.phy_cap_info[0] =
IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ |
IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI |
@@ -695,10 +709,11 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP |
IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI,
.phy_cap_info[5] =
+ FIELD_PREP_CONST(IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK,
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US) |
IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK |
IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP |
- IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP |
- IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT,
+ IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP,
.phy_cap_info[6] =
IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK |
IEEE80211_EHT_PHY_CAP6_EHT_DUP_6GHZ_SUPP,
@@ -732,19 +747,22 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
/*
* PPE thresholds for NSS = 2, and RU index bitmap set
* to 0xc.
+ * Note: just for stating what we want, not present in
+ * the transmitted data due to not including
+ * IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT.
*/
.eht_ppe_thres = {0xc1, 0x0e, 0xe0 }
},
},
{
- .types_mask = BIT(NL80211_IFTYPE_AP),
+ .types_mask = BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_GO),
.he_cap = {
.has_he = true,
.he_cap_elem = {
.mac_cap_info[0] =
IEEE80211_HE_MAC_CAP0_HTC_HE,
.mac_cap_info[1] =
- IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
.mac_cap_info[3] =
IEEE80211_HE_MAC_CAP3_OMI_CONTROL,
@@ -792,7 +810,6 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
.has_eht = true,
.eht_cap_elem = {
.mac_cap_info[0] =
- IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 |
IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2,
@@ -800,7 +817,8 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ |
IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI,
.phy_cap_info[5] =
- IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT,
+ FIELD_PREP_CONST(IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK,
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US),
},
/* For all MCS and bandwidth, set 2 NSS for both Tx and
@@ -828,6 +846,9 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
/*
* PPE thresholds for NSS = 2, and RU index bitmap set
* to 0xc.
+ * Note: just for stating what we want, not present in
+ * the transmitted data due to not including
+ * IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT.
*/
.eht_ppe_thres = {0xc1, 0x0e, 0xe0 }
},
@@ -888,11 +909,13 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
u8 tx_chains, u8 rx_chains,
const struct iwl_fw *fw)
{
- bool is_ap = iftype_data->types_mask & BIT(NL80211_IFTYPE_AP);
+ bool is_ap = iftype_data->types_mask & (BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_GO));
bool no_320;
- no_320 = !trans->trans_cfg->integrated &&
- trans->pcie_link_speed < PCI_EXP_LNKSTA_CLS_8_0GB;
+ no_320 = (!trans->trans_cfg->integrated &&
+ trans->pcie_link_speed < PCI_EXP_LNKSTA_CLS_8_0GB) ||
+ trans->reduced_cap_sku;
if (!data->sku_cap_11be_enable || iwlwifi_mod_params.disable_11be)
iftype_data->eht_cap.has_eht = false;
@@ -962,6 +985,9 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
}
}
} else {
+ struct ieee80211_he_mcs_nss_supp *he_mcs_nss_supp =
+ &iftype_data->he_cap.he_mcs_nss_supp;
+
if (iftype_data->eht_cap.has_eht) {
struct ieee80211_eht_mcs_nss_supp *mcs_nss =
&iftype_data->eht_cap.eht_mcs_nss_supp;
@@ -980,6 +1006,19 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
iftype_data->he_cap.he_cap_elem.phy_cap_info[7] |=
IEEE80211_HE_PHY_CAP7_MAX_NC_1;
}
+
+ he_mcs_nss_supp->rx_mcs_80 |=
+ cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << 2);
+ he_mcs_nss_supp->tx_mcs_80 |=
+ cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << 2);
+ he_mcs_nss_supp->rx_mcs_160 |=
+ cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << 2);
+ he_mcs_nss_supp->tx_mcs_160 |=
+ cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << 2);
+ he_mcs_nss_supp->rx_mcs_80p80 |=
+ cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << 2);
+ he_mcs_nss_supp->tx_mcs_80p80 |=
+ cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << 2);
}
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210 && !is_ap)
@@ -988,8 +1027,8 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
case IWL_CFG_RF_TYPE_GF:
- case IWL_CFG_RF_TYPE_MR:
- case IWL_CFG_RF_TYPE_MS:
+ case IWL_CFG_RF_TYPE_FM:
+ case IWL_CFG_RF_TYPE_WH:
iftype_data->he_cap.he_cap_elem.phy_cap_info[9] |=
IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU;
if (!is_ap)
@@ -1001,8 +1040,7 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
if (CSR_HW_REV_TYPE(trans->hw_rev) == IWL_CFG_MAC_TYPE_GL &&
iftype_data->eht_cap.has_eht) {
iftype_data->eht_cap.eht_cap_elem.mac_cap_info[0] &=
- ~(IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
- IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 |
+ ~(IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 |
IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2);
iftype_data->eht_cap.eht_cap_elem.phy_cap_info[3] &=
~(IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO |
@@ -1010,7 +1048,8 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK |
IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK |
IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK |
- IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK);
+ IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK);
iftype_data->eht_cap.eht_cap_elem.phy_cap_info[4] &=
~(IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO |
IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP);
@@ -1039,6 +1078,26 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
iftype_data->he_cap.he_cap_elem.phy_cap_info[7] &=
~IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ;
}
+
+ if (trans->step_urm) {
+ iftype_data->eht_cap.eht_mcs_nss_supp.bw._320.rx_tx_mcs11_max_nss = 0;
+ iftype_data->eht_cap.eht_mcs_nss_supp.bw._320.rx_tx_mcs13_max_nss = 0;
+ }
+
+ if (trans->no_160)
+ iftype_data->he_cap.he_cap_elem.phy_cap_info[0] &=
+ ~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
+
+ if (trans->reduced_cap_sku) {
+ memset(&iftype_data->eht_cap.eht_mcs_nss_supp.bw._320, 0,
+ sizeof(iftype_data->eht_cap.eht_mcs_nss_supp.bw._320));
+ iftype_data->eht_cap.eht_mcs_nss_supp.bw._80.rx_tx_mcs13_max_nss = 0;
+ iftype_data->eht_cap.eht_mcs_nss_supp.bw._160.rx_tx_mcs13_max_nss = 0;
+ iftype_data->eht_cap.eht_cap_elem.phy_cap_info[8] &=
+ ~IEEE80211_EHT_PHY_CAP8_RX_4096QAM_WIDER_BW_DL_OFDMA;
+ iftype_data->eht_cap.eht_cap_elem.phy_cap_info[2] &=
+ ~IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK;
+ }
}
static void iwl_init_he_hw_capab(struct iwl_trans *trans,
@@ -1050,10 +1109,6 @@ static void iwl_init_he_hw_capab(struct iwl_trans *trans,
struct ieee80211_sband_iftype_data *iftype_data;
int i;
- /* should only initialize once */
- if (WARN_ON(sband->iftype_data))
- return;
-
BUILD_BUG_ON(sizeof(data->iftd.low) != sizeof(iwl_he_eht_capa));
BUILD_BUG_ON(sizeof(data->iftd.high) != sizeof(iwl_he_eht_capa));
BUILD_BUG_ON(sizeof(data->iftd.uhb) != sizeof(iwl_he_eht_capa));
@@ -1075,8 +1130,8 @@ static void iwl_init_he_hw_capab(struct iwl_trans *trans,
memcpy(iftype_data, iwl_he_eht_capa, sizeof(iwl_he_eht_capa));
- sband->iftype_data = iftype_data;
- sband->n_iftype_data = ARRAY_SIZE(iwl_he_eht_capa);
+ _ieee80211_set_sband_iftype_data(sband, iftype_data,
+ ARRAY_SIZE(iwl_he_eht_capa));
for (i = 0; i < sband->n_iftype_data; i++)
iwl_nvm_fixup_sband_iftd(trans, data, sband, &iftype_data[i],
@@ -1085,6 +1140,37 @@ static void iwl_init_he_hw_capab(struct iwl_trans *trans,
iwl_init_he_6ghz_capa(trans, data, sband, tx_chains, rx_chains);
}
+void iwl_reinit_cab(struct iwl_trans *trans, struct iwl_nvm_data *data,
+ u8 tx_chains, u8 rx_chains, const struct iwl_fw *fw)
+{
+ struct ieee80211_supported_band *sband;
+
+ sband = &data->bands[NL80211_BAND_2GHZ];
+ iwl_init_ht_hw_capab(trans, data, &sband->ht_cap, NL80211_BAND_2GHZ,
+ tx_chains, rx_chains);
+
+ if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax)
+ iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains,
+ fw);
+
+ sband = &data->bands[NL80211_BAND_5GHZ];
+ iwl_init_ht_hw_capab(trans, data, &sband->ht_cap, NL80211_BAND_5GHZ,
+ tx_chains, rx_chains);
+ if (data->sku_cap_11ac_enable && !iwlwifi_mod_params.disable_11ac)
+ iwl_init_vht_hw_capab(trans, data, &sband->vht_cap,
+ tx_chains, rx_chains);
+
+ if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax)
+ iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains,
+ fw);
+
+ sband = &data->bands[NL80211_BAND_6GHZ];
+ if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax)
+ iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains,
+ fw);
+}
+IWL_EXPORT_SYMBOL(iwl_reinit_cab);
+
static void iwl_init_sbands(struct iwl_trans *trans,
struct iwl_nvm_data *data,
const void *nvm_ch_flags, u8 tx_chains,
@@ -1092,12 +1178,11 @@ static void iwl_init_sbands(struct iwl_trans *trans,
const struct iwl_fw *fw)
{
struct device *dev = trans->dev;
- const struct iwl_cfg *cfg = trans->cfg;
int n_channels;
int n_used = 0;
struct ieee80211_supported_band *sband;
- n_channels = iwl_init_channel_map(dev, cfg, data, nvm_ch_flags,
+ n_channels = iwl_init_channel_map(trans, fw, data, nvm_ch_flags,
sbands_flags, v4);
sband = &data->bands[NL80211_BAND_2GHZ];
sband->band = NL80211_BAND_2GHZ;
@@ -1363,7 +1448,7 @@ iwl_nvm_no_wide_in_5ghz(struct iwl_trans *trans, const struct iwl_cfg *cfg,
struct iwl_nvm_data *
iwl_parse_mei_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
const struct iwl_mei_nvm *mei_nvm,
- const struct iwl_fw *fw)
+ const struct iwl_fw *fw, u8 tx_ant, u8 rx_ant)
{
struct iwl_nvm_data *data;
u32 sbands_flags = 0;
@@ -1390,6 +1475,10 @@ iwl_parse_mei_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
tx_chains &= data->valid_tx_ant;
if (data->valid_rx_ant)
rx_chains &= data->valid_rx_ant;
+ if (tx_ant)
+ tx_chains &= tx_ant;
+ if (rx_ant)
+ rx_chains &= rx_ant;
data->sku_cap_mimo_disabled = false;
data->sku_cap_band_24ghz_enable = true;
@@ -1484,9 +1573,6 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
&regulatory[NVM_CHANNELS_SDP] :
&nvm_sw[NVM_CHANNELS];
- /* in family 8000 Xtal calibration values moved to OTP */
- data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB);
- data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1);
lar_enabled = true;
} else {
u16 lar_offset = data->nvm_version < 0xE39 ?
@@ -1534,11 +1620,15 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
flags &= ~NL80211_RRF_NO_HT40PLUS;
if (nvm_chan[ch_idx] >= FIRST_2GHZ_HT_MINUS)
flags &= ~NL80211_RRF_NO_HT40MINUS;
- } else if (nvm_flags & NVM_CHANNEL_40MHZ) {
+ } else if (ch_idx < NUM_2GHZ_CHANNELS + NUM_5GHZ_CHANNELS &&
+ nvm_flags & NVM_CHANNEL_40MHZ) {
if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0)
flags &= ~NL80211_RRF_NO_HT40PLUS;
else
flags &= ~NL80211_RRF_NO_HT40MINUS;
+ } else if (nvm_flags & NVM_CHANNEL_40MHZ) {
+ flags &= ~NL80211_RRF_NO_HT40PLUS;
+ flags &= ~NL80211_RRF_NO_HT40MINUS;
}
if (!(nvm_flags & NVM_CHANNEL_80MHZ))
@@ -1558,9 +1648,26 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
/* Set the GO concurrent flag only in case that NO_IR is set.
* Otherwise it is meaningless
*/
- if ((nvm_flags & NVM_CHANNEL_GO_CONCURRENT) &&
- (flags & NL80211_RRF_NO_IR))
- flags |= NL80211_RRF_GO_CONCURRENT;
+ if ((nvm_flags & NVM_CHANNEL_GO_CONCURRENT)) {
+ if (flags & NL80211_RRF_NO_IR)
+ flags |= NL80211_RRF_GO_CONCURRENT;
+ if (flags & NL80211_RRF_DFS) {
+ flags |= NL80211_RRF_DFS_CONCURRENT;
+ /* Our device doesn't set active bit for DFS channels
+ * however, once marked as DFS no-ir is not needed.
+ */
+ flags &= ~NL80211_RRF_NO_IR;
+ }
+ }
+
+ /* Set the AP type for the UHB case. */
+ if (nvm_flags & NVM_CHANNEL_VLP)
+ flags |= NL80211_RRF_ALLOW_6GHZ_VLP_AP;
+ else
+ flags |= NL80211_RRF_NO_6GHZ_VLP_CLIENT;
+
+ if (!(nvm_flags & NVM_CHANNEL_AFC))
+ flags |= NL80211_RRF_NO_6GHZ_AFC_CLIENT;
/*
* reg_capa is per regulatory domain so apply it for every channel
@@ -1624,7 +1731,6 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
const u16 *nvm_chan;
struct ieee80211_regdomain *regd, *copy_rd;
struct ieee80211_reg_rule *rule;
- enum nl80211_band band;
int center_freq, prev_center_freq = 0;
int valid_rules = 0;
bool new_rule;
@@ -1668,8 +1774,10 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
reg_capa = iwl_get_reg_capa(cap, resp_ver);
for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
+ enum nl80211_band band =
+ iwl_nl80211_band_from_channel_idx(ch_idx);
+
ch_flags = (u16)__le32_to_cpup(channels + ch_idx);
- band = iwl_nl80211_band_from_channel_idx(ch_idx);
center_freq = ieee80211_channel_to_frequency(nvm_chan[ch_idx],
band);
new_rule = false;
@@ -1955,7 +2063,8 @@ out:
IWL_EXPORT_SYMBOL(iwl_read_external_nvm);
struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
- const struct iwl_fw *fw)
+ const struct iwl_fw *fw,
+ u8 set_tx_ant, u8 set_rx_ant)
{
struct iwl_nvm_get_info cmd = {};
struct iwl_nvm_data *nvm;
@@ -1969,6 +2078,9 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
bool empty_otp;
u32 mac_flags;
u32 sbands_flags = 0;
+ u8 tx_ant;
+ u8 rx_ant;
+
/*
* All the values in iwl_nvm_get_info_rsp v4 are the same as
* in v3, except for the channel profile part of the
@@ -2038,7 +2150,7 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
!!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED);
nvm->sku_cap_mimo_disabled =
!!(mac_flags & NVM_MAC_SKU_FLAGS_MIMO_DISABLED);
- if (CSR_HW_RFID_TYPE(trans->hw_rf_id) == IWL_CFG_RF_TYPE_FM)
+ if (CSR_HW_RFID_TYPE(trans->hw_rf_id) >= IWL_CFG_RF_TYPE_FM)
nvm->sku_cap_11be_enable = true;
/* Initialize PHY sku data */
@@ -2056,10 +2168,15 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
channel_profile = v4 ? (void *)rsp->regulatory.channel_profile :
(void *)rsp_v3->regulatory.channel_profile;
- iwl_init_sbands(trans, nvm,
- channel_profile,
- nvm->valid_tx_ant & fw->valid_tx_ant,
- nvm->valid_rx_ant & fw->valid_rx_ant,
+ tx_ant = nvm->valid_tx_ant & fw->valid_tx_ant;
+ rx_ant = nvm->valid_rx_ant & fw->valid_rx_ant;
+
+ if (set_tx_ant)
+ tx_ant &= set_tx_ant;
+ if (set_rx_ant)
+ rx_ant &= set_rx_ant;
+
+ iwl_init_sbands(trans, nvm, channel_profile, tx_ant, rx_ant,
sbands_flags, v4, fw);
iwl_free_resp(&hcmd);
diff --git a/iwl-nvm-parse.h b/iwl-nvm-parse.h
index c79f72d54482..0c6c3fb8c6dd 100644
--- a/iwl-nvm-parse.h
+++ b/iwl-nvm-parse.h
@@ -1,13 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2015, 2018-2022 Intel Corporation
+ * Copyright (C) 2005-2015, 2018-2024 Intel Corporation
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_nvm_parse_h__
#define __iwl_nvm_parse_h__
#include <net/cfg80211.h>
-#include "iwl-eeprom-parse.h"
+#include "iwl-nvm-utils.h"
#include "mei/iwl-mei.h"
/**
@@ -21,7 +21,7 @@ enum iwl_nvm_sbands_flags {
IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ = BIT(1),
};
-/**
+/*
* iwl_parse_nvm_data - parse NVM data and return values
*
* This function parses all NVM values we need and then
@@ -38,7 +38,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
u8 tx_chains, u8 rx_chains);
/**
- * iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW
+ * iwl_parse_nvm_mcc_info - parse MCC (mobile country code) info coming from FW
*
* This function parses the regulatory channel data received as a
* MCC_UPDATE_CMD command. It returns a newly allocation regulatory domain,
@@ -73,21 +73,28 @@ int iwl_read_external_nvm(struct iwl_trans *trans,
void iwl_nvm_fixups(u32 hw_id, unsigned int section, u8 *data,
unsigned int len);
-/**
+/*
* iwl_get_nvm - retrieve NVM data from firmware
*
* Allocates a new iwl_nvm_data structure, fills it with
* NVM data, and returns it to caller.
*/
struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
- const struct iwl_fw *fw);
+ const struct iwl_fw *fw,
+ u8 set_tx_ant, u8 set_rx_ant);
-/**
+/*
* iwl_parse_mei_nvm_data - parse the mei_nvm_data and get an iwl_nvm_data
*/
struct iwl_nvm_data *
iwl_parse_mei_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
const struct iwl_mei_nvm *mei_nvm,
- const struct iwl_fw *fw);
+ const struct iwl_fw *fw, u8 set_tx_ant, u8 set_rx_ant);
+
+/*
+ * iwl_reinit_cab - to be called when the tx_chains or rx_chains are modified
+ */
+void iwl_reinit_cab(struct iwl_trans *trans, struct iwl_nvm_data *data,
+ u8 tx_chains, u8 rx_chains, const struct iwl_fw *fw);
#endif /* __iwl_nvm_parse_h__ */
diff --git a/iwl-nvm-utils.c b/iwl-nvm-utils.c
new file mode 100644
index 000000000000..b3c25acd3691
--- /dev/null
+++ b/iwl-nvm-utils.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2005-2014, 2018-2021, 2023 Intel Corporation
+ * Copyright (C) 2015 Intel Mobile Communications GmbH
+ */
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include "iwl-drv.h"
+#include "iwl-modparams.h"
+#include "iwl-nvm-utils.h"
+
+int iwl_init_sband_channels(struct iwl_nvm_data *data,
+ struct ieee80211_supported_band *sband,
+ int n_channels, enum nl80211_band band)
+{
+ struct ieee80211_channel *chan = &data->channels[0];
+ int n = 0, idx = 0;
+
+ while (idx < n_channels && chan->band != band)
+ chan = &data->channels[++idx];
+
+ sband->channels = &data->channels[idx];
+
+ while (idx < n_channels && chan->band == band) {
+ chan = &data->channels[++idx];
+ n++;
+ }
+
+ sband->n_channels = n;
+
+ return n;
+}
+IWL_EXPORT_SYMBOL(iwl_init_sband_channels);
+
+#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
+#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
+
+void iwl_init_ht_hw_capab(struct iwl_trans *trans,
+ struct iwl_nvm_data *data,
+ struct ieee80211_sta_ht_cap *ht_info,
+ enum nl80211_band band,
+ u8 tx_chains, u8 rx_chains)
+{
+ const struct iwl_cfg *cfg = trans->cfg;
+ int max_bit_rate = 0;
+
+ tx_chains = hweight8(tx_chains);
+ if (cfg->rx_with_siso_diversity)
+ rx_chains = 1;
+ else
+ rx_chains = hweight8(rx_chains);
+
+ if (!(data->sku_cap_11n_enable) ||
+ (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL) ||
+ !cfg->ht_params) {
+ ht_info->ht_supported = false;
+ return;
+ }
+
+ if (data->sku_cap_mimo_disabled)
+ rx_chains = 1;
+
+ ht_info->ht_supported = true;
+ ht_info->cap = IEEE80211_HT_CAP_DSSSCCK40;
+
+ if (cfg->ht_params->stbc) {
+ ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+
+ if (tx_chains > 1)
+ ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
+ }
+
+ if (cfg->ht_params->ldpc)
+ ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
+
+ if (trans->trans_cfg->mq_rx_supported ||
+ iwlwifi_mod_params.amsdu_size >= IWL_AMSDU_8K)
+ ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
+
+ ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_4;
+
+ ht_info->mcs.rx_mask[0] = 0xFF;
+ ht_info->mcs.rx_mask[1] = 0x00;
+ ht_info->mcs.rx_mask[2] = 0x00;
+
+ if (rx_chains >= 2)
+ ht_info->mcs.rx_mask[1] = 0xFF;
+ if (rx_chains >= 3)
+ ht_info->mcs.rx_mask[2] = 0xFF;
+
+ if (cfg->ht_params->ht_greenfield_support)
+ ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
+ ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
+
+ max_bit_rate = MAX_BIT_RATE_20_MHZ;
+
+ if (cfg->ht_params->ht40_bands & BIT(band)) {
+ ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
+ max_bit_rate = MAX_BIT_RATE_40_MHZ;
+ }
+
+ /* Highest supported Rx data rate */
+ max_bit_rate *= rx_chains;
+ WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
+ ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
+
+ /* Tx MCS capabilities */
+ ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+ if (tx_chains != rx_chains) {
+ ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
+ ht_info->mcs.tx_params |= ((tx_chains - 1) <<
+ IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
+ }
+}
+IWL_EXPORT_SYMBOL(iwl_init_ht_hw_capab);
diff --git a/iwl-eeprom-parse.h b/iwl-nvm-utils.h
index 0e8ca761d24b..ac0a29a1c31f 100644
--- a/iwl-eeprom-parse.h
+++ b/iwl-nvm-utils.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018, 2020-2022 Intel Corporation
+ * Copyright (C) 2005-2014, 2018, 2020-2023 Intel Corporation
* Copyright (C) 2015 Intel Mobile Communications GmbH
*/
#ifndef __iwl_eeprom_parse_h__
@@ -58,23 +58,6 @@ struct iwl_nvm_data {
struct ieee80211_channel channels[];
};
-/**
- * iwl_parse_eeprom_data - parse EEPROM data and return values
- *
- * @dev: device pointer we're parsing for, for debug only
- * @cfg: device configuration for parsing and overrides
- * @eeprom: the EEPROM data
- * @eeprom_size: length of the EEPROM data
- *
- * This function parses all EEPROM values we need and then
- * returns a (newly allocated) struct containing all the
- * relevant values for driver use. The struct must be freed
- * later with iwl_free_nvm_data().
- */
-struct iwl_nvm_data *
-iwl_parse_eeprom_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
- const u8 *eeprom, size_t eeprom_size);
-
int iwl_init_sband_channels(struct iwl_nvm_data *data,
struct ieee80211_supported_band *sband,
int n_channels, enum nl80211_band band);
diff --git a/iwl-op-mode.h b/iwl-op-mode.h
index af5f9b210f22..8ef5ed2db051 100644
--- a/iwl-op-mode.h
+++ b/iwl-op-mode.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2021, 2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 Intel Deutschland GmbH
*/
@@ -64,15 +64,15 @@ struct iwl_cfg;
* received on the RSS queue(s). The queue parameter indicates which of the
* RSS queues received this frame; it will always be non-zero.
* This method must not sleep.
- * @async_cb: called when an ASYNC command with CMD_WANT_ASYNC_CALLBACK set
- * completes. Must be atomic.
* @queue_full: notifies that a HW queue is full.
* Must be atomic and called with BH disabled.
* @queue_not_full: notifies that a HW queue is not full any more.
* Must be atomic and called with BH disabled.
- * @hw_rf_kill:notifies of a change in the HW rf kill switch. True means that
+ * @hw_rf_kill: notifies of a change in the HW rf kill switch. True means that
* the radio is killed. Return %true if the device should be stopped by
* the transport immediately after the call. May sleep.
+ * Note that this must not return %true for newer devices using gen2 PCIe
+ * transport.
* @free_skb: allows the transport layer to free skbs that haven't been
* reclaimed by the op_mode. This can happen when the driver is freed and
* there are Tx packets pending in the transport layer.
@@ -85,6 +85,10 @@ struct iwl_cfg;
* May sleep
* @wimax_active: invoked when WiMax becomes active. May sleep
* @time_point: called when transport layer wants to collect debug data
+ * @device_powered_off: called upon resume from hibernation but not only.
+ * Op_mode needs to reset its internal state because the device did not
+ * survive the system state transition. The firmware is no longer running,
+ * etc...
*/
struct iwl_op_mode_ops {
struct iwl_op_mode *(*start)(struct iwl_trans *trans,
@@ -96,8 +100,6 @@ struct iwl_op_mode_ops {
struct iwl_rx_cmd_buffer *rxb);
void (*rx_rss)(struct iwl_op_mode *op_mode, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, unsigned int queue);
- void (*async_cb)(struct iwl_op_mode *op_mode,
- const struct iwl_device_cmd *cmd);
void (*queue_full)(struct iwl_op_mode *op_mode, int queue);
void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
@@ -109,6 +111,7 @@ struct iwl_op_mode_ops {
void (*time_point)(struct iwl_op_mode *op_mode,
enum iwl_fw_ini_time_point tp_id,
union iwl_dbg_tlv_tp_data *tp_data);
+ void (*device_powered_off)(struct iwl_op_mode *op_mode);
};
int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops);
@@ -147,13 +150,6 @@ static inline void iwl_op_mode_rx_rss(struct iwl_op_mode *op_mode,
op_mode->ops->rx_rss(op_mode, napi, rxb, queue);
}
-static inline void iwl_op_mode_async_cb(struct iwl_op_mode *op_mode,
- const struct iwl_device_cmd *cmd)
-{
- if (op_mode->ops->async_cb)
- op_mode->ops->async_cb(op_mode, cmd);
-}
-
static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode,
int queue)
{
@@ -194,7 +190,8 @@ static inline void iwl_op_mode_cmd_queue_full(struct iwl_op_mode *op_mode)
static inline void iwl_op_mode_nic_config(struct iwl_op_mode *op_mode)
{
might_sleep();
- op_mode->ops->nic_config(op_mode);
+ if (op_mode->ops->nic_config)
+ op_mode->ops->nic_config(op_mode);
}
static inline void iwl_op_mode_wimax_active(struct iwl_op_mode *op_mode)
@@ -212,4 +209,11 @@ static inline void iwl_op_mode_time_point(struct iwl_op_mode *op_mode,
op_mode->ops->time_point(op_mode, tp_id, tp_data);
}
+static inline void iwl_op_mode_device_powered_off(struct iwl_op_mode *op_mode)
+{
+ if (!op_mode || !op_mode->ops || !op_mode->ops->device_powered_off)
+ return;
+ op_mode->ops->device_powered_off(op_mode);
+}
+
#endif /* __iwl_op_mode_h__ */
diff --git a/iwl-prph.h b/iwl-prph.h
index 6dd381ff0f9e..dc171c29eb7b 100644
--- a/iwl-prph.h
+++ b/iwl-prph.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016 Intel Deutschland GmbH
*/
@@ -96,7 +96,7 @@
#define DTSC_PTAT_AVG (0x00a10650)
-/**
+/*
* Tx Scheduler
*
* The Tx Scheduler selects the next frame to be transmitted, choosing TFDs
@@ -169,7 +169,7 @@
*/
#define SCD_MEM_LOWER_BOUND (0x0000)
-/**
+/*
* Max Tx window size is the max number of contiguous TFDs that the scheduler
* can keep track of at one time when creating block-ack chains of frames.
* Note that "64" matches the number of ack bits in a block-ack packet.
@@ -348,8 +348,8 @@
#define RFIC_REG_RD 0xAD0470
#define WFPM_CTRL_REG 0xA03030
#define WFPM_OTP_CFG1_ADDR 0x00a03098
-#define WFPM_OTP_CFG1_IS_JACKET_BIT BIT(4)
-#define WFPM_OTP_CFG1_IS_CDB_BIT BIT(5)
+#define WFPM_OTP_CFG1_IS_JACKET_BIT BIT(5)
+#define WFPM_OTP_CFG1_IS_CDB_BIT BIT(4)
#define WFPM_OTP_BZ_BNJ_JACKET_BIT 5
#define WFPM_OTP_BZ_BNJ_CDB_BIT 4
#define WFPM_OTP_CFG1_IS_JACKET(_val) (((_val) & 0x00000020) >> WFPM_OTP_BZ_BNJ_JACKET_BIT)
@@ -365,16 +365,25 @@
#define DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK 0x00000FFF
enum {
- ENABLE_WFPM = BIT(31),
WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK = 0x80000000,
};
-#define CNVI_AUX_MISC_CHIP 0xA200B0
+#define CNVI_AUX_MISC_CHIP 0xA200B0
+#define CNVI_AUX_MISC_CHIP_MAC_STEP(_val) (((_val) & 0xf000000) >> 24)
+#define CNVI_AUX_MISC_CHIP_PROD_TYPE(_val) ((_val) & 0xfff)
+#define CNVI_AUX_MISC_CHIP_PROD_TYPE_GL 0x910
+#define CNVI_AUX_MISC_CHIP_PROD_TYPE_BZ_U 0x930
+#define CNVI_AUX_MISC_CHIP_PROD_TYPE_BZ_I 0x900
+#define CNVI_AUX_MISC_CHIP_PROD_TYPE_BZ_W 0x901
+
#define CNVR_AUX_MISC_CHIP 0xA2B800
#define CNVR_SCU_SD_REGS_SD_REG_DIG_DCDC_VTRIM 0xA29890
#define CNVR_SCU_SD_REGS_SD_REG_ACTIVE_VDIG_MIRROR 0xA29938
#define CNVI_SCU_SEQ_DATA_DW9 0xA27488
+#define CNVI_PMU_STEP_FLOW 0xA2D588
+#define CNVI_PMU_STEP_FLOW_FORCE_URM BIT(2)
+
#define PREG_AUX_BUS_WPROT_0 0xA04CC0
/* device family 9000 WPROT register */
@@ -383,7 +392,7 @@ enum {
#define PREG_PRPH_WPROT_22000 0xA04D00
#define SB_MODIFY_CFG_FLAG 0xA03088
-#define SB_CFG_RESIDES_IN_OTP_MASK 0x10
+#define SB_CFG_RESIDES_IN_ROM 0x80
#define SB_CPU_1_STATUS 0xA01E30
#define SB_CPU_2_STATUS 0xA01E34
#define UMAG_SB_CPU_1_STATUS 0xA038C0
@@ -424,14 +433,14 @@ enum {
* reserved: bits 12-18
* slave_exist: bit 19
* dash: bits 20-23
- * step: bits 24-26
- * flavor: bits 27-31
+ * step: bits 24-27
+ * flavor: bits 28-31
*/
#define REG_CRF_ID_TYPE(val) (((val) & 0x00000FFF) >> 0)
#define REG_CRF_ID_SLAVE(val) (((val) & 0x00080000) >> 19)
#define REG_CRF_ID_DASH(val) (((val) & 0x00F00000) >> 20)
-#define REG_CRF_ID_STEP(val) (((val) & 0x07000000) >> 24)
-#define REG_CRF_ID_FLAVOR(val) (((val) & 0xF8000000) >> 27)
+#define REG_CRF_ID_STEP(val) (((val) & 0x0F000000) >> 24)
+#define REG_CRF_ID_FLAVOR(val) (((val) & 0xF0000000) >> 28)
#define UREG_CHICK (0xA05C00)
#define UREG_CHICK_MSI_ENABLE BIT(24)
@@ -447,11 +456,8 @@ enum {
#define REG_CRF_ID_TYPE_HR_NONE_CDB_1X1 0x501
#define REG_CRF_ID_TYPE_HR_NONE_CDB_CCP 0x532
#define REG_CRF_ID_TYPE_GF 0x410
-#define REG_CRF_ID_TYPE_GF_TC 0xF08
-#define REG_CRF_ID_TYPE_MR 0x810
#define REG_CRF_ID_TYPE_FM 0x910
-#define REG_CRF_ID_TYPE_FMI 0x930
-#define REG_CRF_ID_TYPE_FMR 0x900
+#define REG_CRF_ID_TYPE_WHP 0xA10
#define HPM_DEBUG 0xA03440
#define PERSISTENCE_BIT BIT(12)
@@ -516,4 +522,8 @@ enum {
#define WFPM_LMAC2_PD_NOTIFICATION 0xA033CC
#define WFPM_LMAC2_PD_RE_READ BIT(31)
+#define DPHYIP_INDIRECT 0xA2D800
+#define DPHYIP_INDIRECT_RD_MSK 0xFF000000
+#define DPHYIP_INDIRECT_RD_SHIFT 24
+
#endif /* __iwl_prph_h__ */
diff --git a/iwl-trans.c b/iwl-trans.c
index 4bd759432d44..3c9d91496c82 100644
--- a/iwl-trans.c
+++ b/iwl-trans.c
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2019-2021, 2023 Intel Corporation
+ * Copyright (C) 2019-2021, 2023-2024 Intel Corporation
*/
#include <linux/kernel.h>
#include <linux/bsearch.h>
@@ -11,13 +11,13 @@
#include "iwl-trans.h"
#include "iwl-drv.h"
#include "iwl-fh.h"
-#include "queue/tx.h"
#include <linux/dmapool.h>
#include "fw/api/commands.h"
+#include "pcie/internal.h"
+#include "iwl-context-info-gen3.h"
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
- const struct iwl_trans_ops *ops,
const struct iwl_cfg_trans_params *cfg_trans)
{
struct iwl_trans *trans;
@@ -37,22 +37,8 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
#endif
trans->dev = dev;
- trans->ops = ops;
trans->num_rx_queues = 1;
- WARN_ON(!ops->wait_txq_empty && !ops->wait_tx_queues_empty);
-
- if (trans->trans_cfg->gen2) {
- trans->txqs.tfd.addr_size = 64;
- trans->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS;
- trans->txqs.tfd.size = sizeof(struct iwl_tfh_tfd);
- } else {
- trans->txqs.tfd.addr_size = 36;
- trans->txqs.tfd.max_tbs = IWL_NUM_OF_TBS;
- trans->txqs.tfd.size = sizeof(struct iwl_tfd);
- }
- trans->max_skb_frags = IWL_TRANS_MAX_FRAGS(trans);
-
return trans;
}
@@ -78,31 +64,6 @@ int iwl_trans_init(struct iwl_trans *trans)
if (WARN_ON(trans->trans_cfg->gen2 && txcmd_size >= txcmd_align))
return -EINVAL;
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
- trans->txqs.bc_tbl_size =
- sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_BZ;
- else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
- trans->txqs.bc_tbl_size =
- sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_AX210;
- else
- trans->txqs.bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl);
- /*
- * For gen2 devices, we use a single allocation for each byte-count
- * table, but they're pretty small (1k) so use a DMA pool that we
- * allocate here.
- */
- if (trans->trans_cfg->gen2) {
- trans->txqs.bc_pool = dmam_pool_create("iwlwifi:bc", trans->dev,
- trans->txqs.bc_tbl_size,
- 256, 0);
- if (!trans->txqs.bc_pool)
- return -ENOMEM;
- }
-
- /* Some things must not change even if the config does */
- WARN_ON(trans->txqs.tfd.addr_size !=
- (trans->trans_cfg->gen2 ? 64 : 36));
-
snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
"iwl_cmd_pool:%s", dev_name(trans->dev));
trans->dev_cmd_pool =
@@ -112,12 +73,6 @@ int iwl_trans_init(struct iwl_trans *trans)
if (!trans->dev_cmd_pool)
return -ENOMEM;
- trans->txqs.tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
- if (!trans->txqs.tso_hdr_page) {
- kmem_cache_destroy(trans->dev_cmd_pool);
- return -ENOMEM;
- }
-
/* Initialize the wait queue for commands */
init_waitqueue_head(&trans->wait_command_queue);
@@ -126,20 +81,6 @@ int iwl_trans_init(struct iwl_trans *trans)
void iwl_trans_free(struct iwl_trans *trans)
{
- int i;
-
- if (trans->txqs.tso_hdr_page) {
- for_each_possible_cpu(i) {
- struct iwl_tso_hdr_page *p =
- per_cpu_ptr(trans->txqs.tso_hdr_page, i);
-
- if (p && p->page)
- __free_page(p->page);
- }
-
- free_percpu(trans->txqs.tso_hdr_page);
- }
-
kmem_cache_destroy(trans->dev_cmd_pool);
}
@@ -167,14 +108,9 @@ int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
return -EIO;
- if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) {
- IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
+ if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
+ "bad state = %d\n", trans->state))
return -EIO;
- }
-
- if (WARN_ON((cmd->flags & CMD_WANT_ASYNC_CALLBACK) &&
- !(cmd->flags & CMD_ASYNC)))
- return -EINVAL;
if (!(cmd->flags & CMD_ASYNC))
lock_map_acquire_read(&trans->sync_cmd_lockdep_map);
@@ -184,7 +120,7 @@ int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
cmd->id = DEF_ID(cmd->id);
}
- ret = iwl_trans_txq_send_hcmd(trans, cmd);
+ ret = iwl_trans_pcie_send_hcmd(trans, cmd);
if (!(cmd->flags & CMD_ASYNC))
lock_map_release(&trans->sync_cmd_lockdep_map);
@@ -251,3 +187,379 @@ int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans)
return 0;
}
IWL_EXPORT_SYMBOL(iwl_cmd_groups_verify_sorted);
+
+void iwl_trans_configure(struct iwl_trans *trans,
+ const struct iwl_trans_config *trans_cfg)
+{
+ trans->op_mode = trans_cfg->op_mode;
+
+ iwl_trans_pcie_configure(trans, trans_cfg);
+ WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg));
+}
+IWL_EXPORT_SYMBOL(iwl_trans_configure);
+
+int iwl_trans_start_hw(struct iwl_trans *trans)
+{
+ might_sleep();
+
+ return iwl_trans_pcie_start_hw(trans);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_start_hw);
+
+void iwl_trans_op_mode_leave(struct iwl_trans *trans)
+{
+ might_sleep();
+
+ iwl_trans_pcie_op_mode_leave(trans);
+
+ trans->op_mode = NULL;
+
+ trans->state = IWL_TRANS_NO_FW;
+}
+IWL_EXPORT_SYMBOL(iwl_trans_op_mode_leave);
+
+void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
+{
+ iwl_trans_pcie_write8(trans, ofs, val);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_write8);
+
+void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val)
+{
+ iwl_trans_pcie_write32(trans, ofs, val);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_write32);
+
+u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
+{
+ return iwl_trans_pcie_read32(trans, ofs);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_read32);
+
+u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs)
+{
+ return iwl_trans_pcie_read_prph(trans, ofs);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_read_prph);
+
+void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
+{
+ return iwl_trans_pcie_write_prph(trans, ofs, val);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_write_prph);
+
+int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
+ void *buf, int dwords)
+{
+ return iwl_trans_pcie_read_mem(trans, addr, buf, dwords);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_read_mem);
+
+int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
+ const void *buf, int dwords)
+{
+ return iwl_trans_pcie_write_mem(trans, addr, buf, dwords);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_write_mem);
+
+void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
+{
+ if (state)
+ set_bit(STATUS_TPOWER_PMI, &trans->status);
+ else
+ clear_bit(STATUS_TPOWER_PMI, &trans->status);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_set_pmi);
+
+int iwl_trans_sw_reset(struct iwl_trans *trans, bool retake_ownership)
+{
+ return iwl_trans_pcie_sw_reset(trans, retake_ownership);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_sw_reset);
+
+struct iwl_trans_dump_data *
+iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask,
+ const struct iwl_dump_sanitize_ops *sanitize_ops,
+ void *sanitize_ctx)
+{
+ return iwl_trans_pcie_dump_data(trans, dump_mask,
+ sanitize_ops, sanitize_ctx);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_dump_data);
+
+int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, bool reset)
+{
+ might_sleep();
+
+ return iwl_trans_pcie_d3_suspend(trans, test, reset);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_d3_suspend);
+
+int iwl_trans_d3_resume(struct iwl_trans *trans, enum iwl_d3_status *status,
+ bool test, bool reset)
+{
+ might_sleep();
+
+ return iwl_trans_pcie_d3_resume(trans, status, test, reset);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_d3_resume);
+
+void iwl_trans_interrupts(struct iwl_trans *trans, bool enable)
+{
+ iwl_trans_pci_interrupts(trans, enable);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_interrupts);
+
+void iwl_trans_sync_nmi(struct iwl_trans *trans)
+{
+ iwl_trans_pcie_sync_nmi(trans);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_sync_nmi);
+
+int iwl_trans_write_imr_mem(struct iwl_trans *trans, u32 dst_addr,
+ u64 src_addr, u32 byte_cnt)
+{
+ return iwl_trans_pcie_copy_imr(trans, dst_addr, src_addr, byte_cnt);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_write_imr_mem);
+
+void iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg,
+ u32 mask, u32 value)
+{
+ iwl_trans_pcie_set_bits_mask(trans, reg, mask, value);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_set_bits_mask);
+
+int iwl_trans_read_config32(struct iwl_trans *trans, u32 ofs,
+ u32 *val)
+{
+ return iwl_trans_pcie_read_config32(trans, ofs, val);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_read_config32);
+
+bool _iwl_trans_grab_nic_access(struct iwl_trans *trans)
+{
+ return iwl_trans_pcie_grab_nic_access(trans);
+}
+IWL_EXPORT_SYMBOL(_iwl_trans_grab_nic_access);
+
+void __releases(nic_access)
+iwl_trans_release_nic_access(struct iwl_trans *trans)
+{
+ iwl_trans_pcie_release_nic_access(trans);
+ __release(nic_access);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_release_nic_access);
+
+void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
+{
+ might_sleep();
+
+ trans->state = IWL_TRANS_FW_ALIVE;
+
+ if (trans->trans_cfg->gen2)
+ iwl_trans_pcie_gen2_fw_alive(trans);
+ else
+ iwl_trans_pcie_fw_alive(trans, scd_addr);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_fw_alive);
+
+int iwl_trans_start_fw(struct iwl_trans *trans, const struct fw_img *fw,
+ bool run_in_rfkill)
+{
+ int ret;
+
+ might_sleep();
+
+ WARN_ON_ONCE(!trans->rx_mpdu_cmd);
+
+ clear_bit(STATUS_FW_ERROR, &trans->status);
+
+ if (trans->trans_cfg->gen2)
+ ret = iwl_trans_pcie_gen2_start_fw(trans, fw, run_in_rfkill);
+ else
+ ret = iwl_trans_pcie_start_fw(trans, fw, run_in_rfkill);
+
+ if (ret == 0)
+ trans->state = IWL_TRANS_FW_STARTED;
+
+ return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_trans_start_fw);
+
+void iwl_trans_stop_device(struct iwl_trans *trans)
+{
+ might_sleep();
+
+ if (trans->trans_cfg->gen2)
+ iwl_trans_pcie_gen2_stop_device(trans);
+ else
+ iwl_trans_pcie_stop_device(trans);
+
+ trans->state = IWL_TRANS_NO_FW;
+}
+IWL_EXPORT_SYMBOL(iwl_trans_stop_device);
+
+int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_device_tx_cmd *dev_cmd, int queue)
+{
+ if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
+ return -EIO;
+
+ if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
+ "bad state = %d\n", trans->state))
+ return -EIO;
+
+ if (trans->trans_cfg->gen2)
+ return iwl_txq_gen2_tx(trans, skb, dev_cmd, queue);
+
+ return iwl_trans_pcie_tx(trans, skb, dev_cmd, queue);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_tx);
+
+void iwl_trans_reclaim(struct iwl_trans *trans, int queue, int ssn,
+ struct sk_buff_head *skbs, bool is_flush)
+{
+ if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
+ "bad state = %d\n", trans->state))
+ return;
+
+ iwl_pcie_reclaim(trans, queue, ssn, skbs, is_flush);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_reclaim);
+
+void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
+ bool configure_scd)
+{
+ iwl_trans_pcie_txq_disable(trans, queue, configure_scd);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_txq_disable);
+
+bool iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
+ const struct iwl_trans_txq_scd_cfg *cfg,
+ unsigned int queue_wdg_timeout)
+{
+ might_sleep();
+
+ if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
+ "bad state = %d\n", trans->state))
+ return false;
+
+ return iwl_trans_pcie_txq_enable(trans, queue, ssn,
+ cfg, queue_wdg_timeout);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_txq_enable_cfg);
+
+int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue)
+{
+ if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
+ "bad state = %d\n", trans->state))
+ return -EIO;
+
+ return iwl_trans_pcie_wait_txq_empty(trans, queue);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_wait_txq_empty);
+
+int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans, u32 txqs)
+{
+ if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
+ "bad state = %d\n", trans->state))
+ return -EIO;
+
+ return iwl_trans_pcie_wait_txqs_empty(trans, txqs);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_wait_tx_queues_empty);
+
+void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
+ unsigned long txqs, bool freeze)
+{
+ if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
+ "bad state = %d\n", trans->state))
+ return;
+
+ iwl_pcie_freeze_txq_timer(trans, txqs, freeze);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_freeze_txq_timer);
+
+void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
+ int txq_id, bool shared_mode)
+{
+ iwl_trans_pcie_txq_set_shared_mode(trans, txq_id, shared_mode);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_txq_set_shared_mode);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void iwl_trans_debugfs_cleanup(struct iwl_trans *trans)
+{
+ iwl_trans_pcie_debugfs_cleanup(trans);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_debugfs_cleanup);
+#endif
+
+void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue, int ptr)
+{
+ if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
+ "bad state = %d\n", trans->state))
+ return;
+
+ iwl_pcie_set_q_ptrs(trans, queue, ptr);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_set_q_ptrs);
+
+int iwl_trans_txq_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,
+ u8 tid, int size, unsigned int wdg_timeout)
+{
+ might_sleep();
+
+ if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
+ "bad state = %d\n", trans->state))
+ return -EIO;
+
+ return iwl_txq_dyn_alloc(trans, flags, sta_mask, tid,
+ size, wdg_timeout);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_txq_alloc);
+
+void iwl_trans_txq_free(struct iwl_trans *trans, int queue)
+{
+ iwl_txq_dyn_free(trans, queue);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_txq_free);
+
+int iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,
+ struct iwl_trans_rxq_dma_data *data)
+{
+ return iwl_trans_pcie_rxq_dma_data(trans, queue, data);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_get_rxq_dma_data);
+
+int iwl_trans_load_pnvm(struct iwl_trans *trans,
+ const struct iwl_pnvm_image *pnvm_data,
+ const struct iwl_ucode_capabilities *capa)
+{
+ return iwl_trans_pcie_ctx_info_gen3_load_pnvm(trans, pnvm_data, capa);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_load_pnvm);
+
+void iwl_trans_set_pnvm(struct iwl_trans *trans,
+ const struct iwl_ucode_capabilities *capa)
+{
+ iwl_trans_pcie_ctx_info_gen3_set_pnvm(trans, capa);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_set_pnvm);
+
+int iwl_trans_load_reduce_power(struct iwl_trans *trans,
+ const struct iwl_pnvm_image *payloads,
+ const struct iwl_ucode_capabilities *capa)
+{
+ return iwl_trans_pcie_ctx_info_gen3_load_reduce_power(trans, payloads,
+ capa);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_load_reduce_power);
+
+void iwl_trans_set_reduce_power(struct iwl_trans *trans,
+ const struct iwl_ucode_capabilities *capa)
+{
+ iwl_trans_pcie_ctx_info_gen3_set_reduce_power(trans, capa);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_set_reduce_power);
diff --git a/iwl-trans.h b/iwl-trans.h
index d02943d0ea62..0ef48effeefb 100644
--- a/iwl-trans.h
+++ b/iwl-trans.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2022 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2023 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -26,11 +26,9 @@
* DOC: Transport layer - what is it ?
*
* The transport layer is the layer that deals with the HW directly. It provides
- * an abstraction of the underlying HW to the upper layer. The transport layer
- * doesn't provide any policy, algorithm or anything of this kind, but only
- * mechanisms to make the HW do something. It is not completely stateless but
- * close to it.
- * We will have an implementation for each different supported bus.
+ * the PCIe access to the underlying hardwarwe. The transport layer doesn't
+ * provide any policy, algorithm or anything of this kind, but only mechanisms
+ * to make the HW do something. It is not completely stateless but close to it.
*/
/**
@@ -56,6 +54,10 @@
* 6) Eventually, the free function will be called.
*/
+/* default preset 0 (start from bit 16)*/
+#define IWL_FW_DBG_DOMAIN_POS 16
+#define IWL_FW_DBG_DOMAIN BIT(IWL_FW_DBG_DOMAIN_POS)
+
#define IWL_TRANS_FW_DBG_DOMAIN(trans) IWL_FW_INI_DOMAIN_ALWAYS_ON
#define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */
@@ -105,8 +107,8 @@ static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
* @CMD_ASYNC: Return right away and don't wait for the response
* @CMD_WANT_SKB: Not valid with CMD_ASYNC. The caller needs the buffer of
* the response. The caller needs to call iwl_free_resp when done.
- * @CMD_WANT_ASYNC_CALLBACK: the op_mode's async callback function must be
- * called after this command completes. Valid only with CMD_ASYNC.
+ * @CMD_SEND_IN_RFKILL: Send the command even if the NIC is in RF-kill.
+ * @CMD_BLOCK_TXQS: Block TXQs while the comment is executing.
* @CMD_SEND_IN_D3: Allow the command to be sent in D3 mode, relevant to
* SUSPEND and RESUME commands. We are in D3 mode when we set
* trans->system_pm_mode to IWL_PLAT_PM_MODE_D3.
@@ -115,9 +117,10 @@ enum CMD_MODE {
CMD_ASYNC = BIT(0),
CMD_WANT_SKB = BIT(1),
CMD_SEND_IN_RFKILL = BIT(2),
- CMD_WANT_ASYNC_CALLBACK = BIT(3),
+ CMD_BLOCK_TXQS = BIT(3),
CMD_SEND_IN_D3 = BIT(4),
};
+#define CMD_MODE_BITS 5
#define DEF_CMD_PAYLOAD_SIZE 320
@@ -127,6 +130,11 @@ enum CMD_MODE {
* For allocation of the command and tx queues, this establishes the overall
* size of the largest command we send to uCode, except for commands that
* aren't fully copied and use other TFD space.
+ *
+ * @hdr: command header
+ * @payload: payload for the command
+ * @hdr_wide: wide command header
+ * @payload_wide: payload for the wide command
*/
struct iwl_device_cmd {
union {
@@ -163,12 +171,6 @@ struct iwl_device_tx_cmd {
*/
#define IWL_MAX_CMD_TBS_PER_TFD 2
-/* We need 2 entries for the TX command and header, and another one might
- * be needed for potential data in the SKB's head. The remaining ones can
- * be used for frags.
- */
-#define IWL_TRANS_MAX_FRAGS(trans) ((trans)->txqs.tfd.max_tbs - 3)
-
/**
* enum iwl_hcmd_dataflag - flag for each one of the chunks of the command
*
@@ -274,10 +276,10 @@ static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
#define IWL_MGMT_TID 15
#define IWL_FRAME_LIMIT 64
#define IWL_MAX_RX_HW_QUEUES 16
-#define IWL_9000_MAX_RX_HW_QUEUES 6
+#define IWL_9000_MAX_RX_HW_QUEUES 1
/**
- * enum iwl_wowlan_status - WoWLAN image/device status
+ * enum iwl_d3_status - WoWLAN image/device status
* @IWL_D3_STATUS_ALIVE: firmware is still running after resume
* @IWL_D3_STATUS_RESET: device was reset while suspended
*/
@@ -295,9 +297,6 @@ enum iwl_d3_status {
* @STATUS_RFKILL_HW: the actual HW state of the RF-kill switch
* @STATUS_RFKILL_OPMODE: RF-kill state reported to opmode
* @STATUS_FW_ERROR: the fw is in error state
- * @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands
- * are sent
- * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent
* @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
* @STATUS_SUPPRESS_CMD_ERROR_ONCE: suppress "FW error in SYNC CMD" once,
* e.g. for testing
@@ -310,8 +309,6 @@ enum iwl_trans_status {
STATUS_RFKILL_HW,
STATUS_RFKILL_OPMODE,
STATUS_FW_ERROR,
- STATUS_TRANS_GOING_IDLE,
- STATUS_TRANS_IDLE,
STATUS_TRANS_DEAD,
STATUS_SUPPRESS_CMD_ERROR_ONCE,
};
@@ -478,182 +475,6 @@ struct iwl_pnvm_image {
};
/**
- * struct iwl_trans_ops - transport specific operations
- *
- * All the handlers MUST be implemented
- *
- * @start_hw: starts the HW. From that point on, the HW can send interrupts.
- * May sleep.
- * @op_mode_leave: Turn off the HW RF kill indication if on
- * May sleep
- * @start_fw: allocates and inits all the resources for the transport
- * layer. Also kick a fw image.
- * May sleep
- * @fw_alive: called when the fw sends alive notification. If the fw provides
- * the SCD base address in SRAM, then provide it here, or 0 otherwise.
- * May sleep
- * @stop_device: stops the whole device (embedded CPU put to reset) and stops
- * the HW. From that point on, the HW will be stopped but will still issue
- * an interrupt if the HW RF kill switch is triggered.
- * This callback must do the right thing and not crash even if %start_hw()
- * was called but not &start_fw(). May sleep.
- * @d3_suspend: put the device into the correct mode for WoWLAN during
- * suspend. This is optional, if not implemented WoWLAN will not be
- * supported. This callback may sleep.
- * @d3_resume: resume the device after WoWLAN, enabling the opmode to
- * talk to the WoWLAN image to get its status. This is optional, if not
- * implemented WoWLAN will not be supported. This callback may sleep.
- * @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted.
- * If RFkill is asserted in the middle of a SYNC host command, it must
- * return -ERFKILL straight away.
- * May sleep only if CMD_ASYNC is not set
- * @tx: send an skb. The transport relies on the op_mode to zero the
- * the ieee80211_tx_info->driver_data. If the MPDU is an A-MSDU, all
- * the CSUM will be taken care of (TCP CSUM and IP header in case of
- * IPv4). If the MPDU is a single MSDU, the op_mode must compute the IP
- * header if it is IPv4.
- * Must be atomic
- * @reclaim: free packet until ssn. Returns a list of freed packets.
- * Must be atomic
- * @txq_enable: setup a queue. To setup an AC queue, use the
- * iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before
- * this one. The op_mode must not configure the HCMD queue. The scheduler
- * configuration may be %NULL, in which case the hardware will not be
- * configured. If true is returned, the operation mode needs to increment
- * the sequence number of the packets routed to this queue because of a
- * hardware scheduler bug. May sleep.
- * @txq_disable: de-configure a Tx queue to send AMPDUs
- * Must be atomic
- * @txq_set_shared_mode: change Tx queue shared/unshared marking
- * @wait_tx_queues_empty: wait until tx queues are empty. May sleep.
- * @wait_txq_empty: wait until specific tx queue is empty. May sleep.
- * @freeze_txq_timer: prevents the timer of the queue from firing until the
- * queue is set to awake. Must be atomic.
- * @block_txq_ptrs: stop updating the write pointers of the Tx queues. Note
- * that the transport needs to refcount the calls since this function
- * will be called several times with block = true, and then the queues
- * need to be unblocked only after the same number of calls with
- * block = false.
- * @write8: write a u8 to a register at offset ofs from the BAR
- * @write32: write a u32 to a register at offset ofs from the BAR
- * @read32: read a u32 register at offset ofs from the BAR
- * @read_prph: read a DWORD from a periphery register
- * @write_prph: write a DWORD to a periphery register
- * @read_mem: read device's SRAM in DWORD
- * @write_mem: write device's SRAM in DWORD. If %buf is %NULL, then the memory
- * will be zeroed.
- * @read_config32: read a u32 value from the device's config space at
- * the given offset.
- * @configure: configure parameters required by the transport layer from
- * the op_mode. May be called several times before start_fw, can't be
- * called after that.
- * @set_pmi: set the power pmi state
- * @grab_nic_access: wake the NIC to be able to access non-HBUS regs.
- * Sleeping is not allowed between grab_nic_access and
- * release_nic_access.
- * @release_nic_access: let the NIC go to sleep. The "flags" parameter
- * must be the same one that was sent before to the grab_nic_access.
- * @set_bits_mask - set SRAM register according to value and mask.
- * @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last
- * TX'ed commands and similar. The buffer will be vfree'd by the caller.
- * Note that the transport must fill in the proper file headers.
- * @debugfs_cleanup: used in the driver unload flow to make a proper cleanup
- * of the trans debugfs
- * @load_pnvm: save the pnvm data in DRAM
- * @set_pnvm: set the pnvm data in the prph scratch buffer, inside the
- * context info.
- * @load_reduce_power: copy reduce power table to the corresponding DRAM memory
- * @set_reduce_power: set reduce power table addresses in the sratch buffer
- * @interrupts: disable/enable interrupts to transport
- */
-struct iwl_trans_ops {
-
- int (*start_hw)(struct iwl_trans *iwl_trans);
- void (*op_mode_leave)(struct iwl_trans *iwl_trans);
- int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
- bool run_in_rfkill);
- void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
- void (*stop_device)(struct iwl_trans *trans);
-
- int (*d3_suspend)(struct iwl_trans *trans, bool test, bool reset);
- int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status,
- bool test, bool reset);
-
- int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
-
- int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_tx_cmd *dev_cmd, int queue);
- void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
- struct sk_buff_head *skbs);
-
- void (*set_q_ptrs)(struct iwl_trans *trans, int queue, int ptr);
-
- bool (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn,
- const struct iwl_trans_txq_scd_cfg *cfg,
- unsigned int queue_wdg_timeout);
- void (*txq_disable)(struct iwl_trans *trans, int queue,
- bool configure_scd);
- /* 22000 functions */
- int (*txq_alloc)(struct iwl_trans *trans, u32 flags,
- u32 sta_mask, u8 tid,
- int size, unsigned int queue_wdg_timeout);
- void (*txq_free)(struct iwl_trans *trans, int queue);
- int (*rxq_dma_data)(struct iwl_trans *trans, int queue,
- struct iwl_trans_rxq_dma_data *data);
-
- void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id,
- bool shared);
-
- int (*wait_tx_queues_empty)(struct iwl_trans *trans, u32 txq_bm);
- int (*wait_txq_empty)(struct iwl_trans *trans, int queue);
- void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
- bool freeze);
- void (*block_txq_ptrs)(struct iwl_trans *trans, bool block);
-
- void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
- void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
- u32 (*read32)(struct iwl_trans *trans, u32 ofs);
- u32 (*read_prph)(struct iwl_trans *trans, u32 ofs);
- void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val);
- int (*read_mem)(struct iwl_trans *trans, u32 addr,
- void *buf, int dwords);
- int (*write_mem)(struct iwl_trans *trans, u32 addr,
- const void *buf, int dwords);
- int (*read_config32)(struct iwl_trans *trans, u32 ofs, u32 *val);
- void (*configure)(struct iwl_trans *trans,
- const struct iwl_trans_config *trans_cfg);
- void (*set_pmi)(struct iwl_trans *trans, bool state);
- int (*sw_reset)(struct iwl_trans *trans, bool retake_ownership);
- bool (*grab_nic_access)(struct iwl_trans *trans);
- void (*release_nic_access)(struct iwl_trans *trans);
- void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
- u32 value);
-
- struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
- u32 dump_mask,
- const struct iwl_dump_sanitize_ops *sanitize_ops,
- void *sanitize_ctx);
- void (*debugfs_cleanup)(struct iwl_trans *trans);
- void (*sync_nmi)(struct iwl_trans *trans);
- int (*load_pnvm)(struct iwl_trans *trans,
- const struct iwl_pnvm_image *pnvm_payloads,
- const struct iwl_ucode_capabilities *capa);
- void (*set_pnvm)(struct iwl_trans *trans,
- const struct iwl_ucode_capabilities *capa);
- int (*load_reduce_power)(struct iwl_trans *trans,
- const struct iwl_pnvm_image *payloads,
- const struct iwl_ucode_capabilities *capa);
- void (*set_reduce_power)(struct iwl_trans *trans,
- const struct iwl_ucode_capabilities *capa);
-
- void (*interrupts)(struct iwl_trans *trans, bool enable);
- int (*imr_dma_data)(struct iwl_trans *trans,
- u32 dst_addr, u64 src_addr,
- u32 byte_cnt);
-
-};
-
-/**
* enum iwl_trans_state - state of the transport layer
*
* @IWL_TRANS_NO_FW: firmware wasn't started yet, or crashed
@@ -734,6 +555,7 @@ struct iwl_dram_data {
};
/**
+ * struct iwl_dram_regions - DRAM regions container structure
* @drams: array of several DRAM areas that contains the pnvm and power
* reduction table payloads.
* @n_regions: number of DRAM regions that were allocated
@@ -776,7 +598,7 @@ struct iwl_self_init_dram {
* @imr_size: imr dram size received from fw
* @sram_addr: sram address from debug tlv
* @sram_size: sram size from debug tlv
- * @imr2sram_remainbyte`: size remained after each dma transfer
+ * @imr2sram_remainbyte: size remained after each dma transfer
* @imr_curr_addr: current dst address used during dma transfer
* @imr_base_addr: imr address received from fw
*/
@@ -823,16 +645,21 @@ struct iwl_pc_data {
* @fw_mon: DRAM buffer for firmware monitor
* @hw_error: equals true if hw error interrupt was received from the FW
* @ini_dest: debug monitor destination uses &enum iwl_fw_ini_buffer_location
+ * @unsupported_region_msk: unsupported regions out of active_regions
* @active_regions: active regions
* @debug_info_tlv_list: list of debug info TLVs
* @time_point: array of debug time points
* @periodic_trig_list: periodic triggers list
* @domains_bitmap: bitmap of active domains other than &IWL_FW_INI_DOMAIN_ALWAYS_ON
* @ucode_preset: preset based on ucode
+ * @restart_required: indicates debug restart is required
+ * @last_tp_resetfw: last handling of reset during debug timepoint
+ * @imr_data: IMR debug data allocation
* @dump_file_name_ext: dump file name extension
* @dump_file_name_ext_valid: dump file name extension if valid or not
* @num_pc: number of program counter for cpu
* @pc_data: details of the program counter
+ * @yoyo_bin_loaded: tells if a yoyo debug file has been loaded
*/
struct iwl_trans_debug {
u8 n_dest_reg;
@@ -862,8 +689,7 @@ struct iwl_trans_debug {
u64 unsupported_region_msk;
struct iwl_ucode_tlv *active_regions[IWL_FW_INI_MAX_REGION_ID];
struct list_head debug_info_tlv_list;
- struct iwl_dbg_tlv_time_point_data
- time_point[IWL_FW_INI_TIME_POINT_NUM];
+ struct iwl_dbg_tlv_time_point_data time_point[IWL_FW_INI_TIME_POINT_NUM];
struct list_head periodic_trig_list;
u32 domains_bitmap;
@@ -875,6 +701,7 @@ struct iwl_trans_debug {
bool dump_file_name_ext_valid;
u32 num_pc;
struct iwl_pc_data *pc_data;
+ bool yoyo_bin_loaded;
};
struct iwl_dma_ptr {
@@ -886,7 +713,9 @@ struct iwl_dma_ptr {
struct iwl_cmd_meta {
/* only for SYNC commands, iff the reply skb is wanted */
struct iwl_host_cmd *source;
- u32 flags;
+ u32 flags: CMD_MODE_BITS;
+ /* sg_offset is valid if it is non-zero */
+ u32 sg_offset: PAGE_SHIFT;
u32 tbs;
};
@@ -916,7 +745,6 @@ struct iwl_pcie_first_tb_buf {
/**
* struct iwl_txq - Tx Queue for DMA
- * @q: generic Rx/Tx queue descriptor
* @tfds: transmit frame descriptors (DMA memory)
* @first_tb_bufs: start of command headers, including scratch buffers, for
* the writeback -- this is DMA memory and an array holding one buffer
@@ -924,6 +752,7 @@ struct iwl_pcie_first_tb_buf {
* @first_tb_dma: DMA address for the first_tb_bufs start
* @entries: transmit entries (driver state)
* @lock: queue lock
+ * @reclaim_lock: reclaim lock
* @stuck_timer: timer that fires if queue gets stuck
* @trans: pointer back to transport (for timer)
* @need_update: indicates need to update read/write index
@@ -931,6 +760,7 @@ struct iwl_pcie_first_tb_buf {
* @wd_timeout: queue watchdog timeout (jiffies) - per queue
* @frozen: tx stuck queue timer is frozen
* @frozen_expiry_remainder: remember how long until the timer fires
+ * @block: queue is blocked
* @bc_tbl: byte count table of the queue (relevant only for gen2 transport)
* @write_ptr: 1-st empty entry (index) host_w
* @read_ptr: last used entry (index) host_r
@@ -939,6 +769,8 @@ struct iwl_pcie_first_tb_buf {
* @id: queue id
* @low_mark: low watermark, resume queue if free space more than this
* @high_mark: high watermark, stop queue if free space less than this
+ * @overflow_q: overflow queue for handling frames that didn't fit on HW queue
+ * @overflow_tx: need to transmit from overflow
*
* A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
* descriptors) and required locking structures.
@@ -963,6 +795,8 @@ struct iwl_txq {
struct iwl_pcie_txq_entry *entries;
/* lock for syncing changes on the queue */
spinlock_t lock;
+ /* lock to prevent concurrent reclaim */
+ spinlock_t reclaim_lock;
unsigned long frozen_expiry_remainder;
struct timer_list stuck_timer;
struct iwl_trans *trans;
@@ -986,68 +820,36 @@ struct iwl_txq {
};
/**
- * struct iwl_trans_txqs - transport tx queues data
- *
- * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
- * @page_offs: offset from skb->cb to mac header page pointer
- * @dev_cmd_offs: offset from skb->cb to iwl_device_tx_cmd pointer
- * @queue_used - bit mask of used queues
- * @queue_stopped - bit mask of stopped queues
- * @scd_bc_tbls: gen1 pointer to the byte count table of the scheduler
- * @queue_alloc_cmd_ver: queue allocation command version
- */
-struct iwl_trans_txqs {
- unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
- unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
- struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
- struct dma_pool *bc_pool;
- size_t bc_tbl_size;
- bool bc_table_dword;
- u8 page_offs;
- u8 dev_cmd_offs;
- struct iwl_tso_hdr_page __percpu *tso_hdr_page;
-
- struct {
- u8 fifo;
- u8 q_id;
- unsigned int wdg_timeout;
- } cmd;
-
- struct {
- u8 max_tbs;
- u16 size;
- u8 addr_size;
- } tfd;
-
- struct iwl_dma_ptr scd_bc_tbls;
-
- u8 queue_alloc_cmd_ver;
-};
-
-/**
* struct iwl_trans - transport common data
*
- * @csme_own - true if we couldn't get ownership on the device
- * @ops - pointer to iwl_trans_ops
- * @op_mode - pointer to the op_mode
+ * @csme_own: true if we couldn't get ownership on the device
+ * @op_mode: pointer to the op_mode
* @trans_cfg: the trans-specific configuration part
- * @cfg - pointer to the configuration
- * @drv - pointer to iwl_drv
+ * @cfg: pointer to the configuration
+ * @drv: pointer to iwl_drv
+ * @state: current device state
* @status: a bit-mask of transport status flags
- * @dev - pointer to struct device * that represents the device
+ * @dev: pointer to struct device * that represents the device
* @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
* 0 indicates that frag SKBs (NETIF_F_SG) aren't supported.
- * @hw_rf_id a u32 with the device RF ID
- * @hw_crf_id a u32 with the device CRF ID
- * @hw_wfpm_id a u32 with the device wfpm ID
+ * @hw_rf_id: a u32 with the device RF ID
+ * @hw_cnv_id: a u32 with the device CNV ID
+ * @hw_crf_id: a u32 with the device CRF ID
+ * @hw_wfpm_id: a u32 with the device wfpm ID
* @hw_id: a u32 with the ID of the device / sub-device.
* Set during transport allocation.
* @hw_id_str: a string with info about HW ID. Set during transport allocation.
+ * @sku_id: the SKU identifier (for PNVM matching)
+ * @pnvm_loaded: indicates PNVM was loaded
+ * @hw_rev: the revision data of the HW
* @hw_rev_step: The mac step of the HW
* @pm_support: set to true in start_hw if link pm is supported
* @ltr_enabled: set to true if the LTR is enabled
* @fail_to_parse_pnvm_image: set to true if pnvm parsing failed
+ * @reduce_power_loaded: indicates reduced power section was loaded
* @failed_to_load_reduce_power_image: set to true if pnvm loading failed
+ * @command_groups: pointer to command group name list array
+ * @command_groups_size: array size of @command_groups
* @wide_cmd_header: true when ucode supports wide command header format
* @wait_command_queue: wait queue for sync commands
* @num_rx_queues: number of RX queues allocated by the transport;
@@ -1056,23 +858,31 @@ struct iwl_trans_txqs {
* @iml: a pointer to the image loader itself
* @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
* The user should use iwl_trans_{alloc,free}_tx_cmd.
+ * @dev_cmd_pool_name: name for the TX command allocation pool
+ * @dbgfs_dir: iwlwifi debugfs base dir for this device
+ * @sync_cmd_lockdep_map: lockdep map for checking sync commands
* @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before
* starting the firmware, used for tracing
* @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
* start of the 802.11 header in the @rx_mpdu_cmd
- * @dflt_pwr_limit: default power limit fetched from the platform (ACPI)
+ * @dbg: additional debug data, see &struct iwl_trans_debug
+ * @init_dram: FW initialization DMA data
* @system_pm_mode: the system-wide power management mode in use.
* This mode is set dynamically, depending on the WoWLAN values
* configured from the userspace at runtime.
- * @iwl_trans_txqs: transport tx queues data.
+ * @name: the device name
* @mbx_addr_0_step: step address data 0
* @mbx_addr_1_step: step address data 1
* @pcie_link_speed: current PCIe link speed (%PCI_EXP_LNKSTA_CLS_*),
* only valid for discrete (not integrated) NICs
+ * @invalid_tx_cmd: invalid TX command buffer
+ * @reduced_cap_sku: reduced capability supported SKU
+ * @no_160: device not supporting 160 MHz
+ * @step_urm: STEP is in URM, no support for MCS>9 in 320 MHz
+ * @trans_specific: data for the specific transport this is allocated for/with
*/
struct iwl_trans {
bool csme_own;
- const struct iwl_trans_ops *ops;
struct iwl_op_mode *op_mode;
const struct iwl_cfg_trans_params *trans_cfg;
const struct iwl_cfg *cfg;
@@ -1091,6 +901,8 @@ struct iwl_trans {
u32 hw_id;
char hw_id_str[52];
u32 sku_id[3];
+ bool reduced_cap_sku;
+ u8 no_160:1, step_urm:1;
u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
@@ -1127,12 +939,13 @@ struct iwl_trans {
enum iwl_plat_pm_mode system_pm_mode;
const char *name;
- struct iwl_trans_txqs txqs;
u32 mbx_addr_0_step;
u32 mbx_addr_1_step;
u8 pcie_link_speed;
+ struct iwl_dma_ptr invalid_tx_cmd;
+
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
char trans_specific[] __aligned(sizeof(void *));
@@ -1141,101 +954,29 @@ struct iwl_trans {
const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id);
int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans);
-static inline void iwl_trans_configure(struct iwl_trans *trans,
- const struct iwl_trans_config *trans_cfg)
-{
- trans->op_mode = trans_cfg->op_mode;
-
- trans->ops->configure(trans, trans_cfg);
- WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg));
-}
-
-static inline int iwl_trans_start_hw(struct iwl_trans *trans)
-{
- might_sleep();
-
- return trans->ops->start_hw(trans);
-}
-
-static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans)
-{
- might_sleep();
-
- if (trans->ops->op_mode_leave)
- trans->ops->op_mode_leave(trans);
-
- trans->op_mode = NULL;
-
- trans->state = IWL_TRANS_NO_FW;
-}
-
-static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
-{
- might_sleep();
-
- trans->state = IWL_TRANS_FW_ALIVE;
-
- trans->ops->fw_alive(trans, scd_addr);
-}
-
-static inline int iwl_trans_start_fw(struct iwl_trans *trans,
- const struct fw_img *fw,
- bool run_in_rfkill)
-{
- int ret;
-
- might_sleep();
-
- WARN_ON_ONCE(!trans->rx_mpdu_cmd);
-
- clear_bit(STATUS_FW_ERROR, &trans->status);
- ret = trans->ops->start_fw(trans, fw, run_in_rfkill);
- if (ret == 0)
- trans->state = IWL_TRANS_FW_STARTED;
-
- return ret;
-}
+void iwl_trans_configure(struct iwl_trans *trans,
+ const struct iwl_trans_config *trans_cfg);
-static inline void iwl_trans_stop_device(struct iwl_trans *trans)
-{
- might_sleep();
+int iwl_trans_start_hw(struct iwl_trans *trans);
- trans->ops->stop_device(trans);
+void iwl_trans_op_mode_leave(struct iwl_trans *trans);
- trans->state = IWL_TRANS_NO_FW;
-}
+void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr);
-static inline int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test,
- bool reset)
-{
- might_sleep();
- if (!trans->ops->d3_suspend)
- return -EOPNOTSUPP;
+int iwl_trans_start_fw(struct iwl_trans *trans, const struct fw_img *fw,
+ bool run_in_rfkill);
- return trans->ops->d3_suspend(trans, test, reset);
-}
+void iwl_trans_stop_device(struct iwl_trans *trans);
-static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
- enum iwl_d3_status *status,
- bool test, bool reset)
-{
- might_sleep();
- if (!trans->ops->d3_resume)
- return -EOPNOTSUPP;
+int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, bool reset);
- return trans->ops->d3_resume(trans, status, test, reset);
-}
+int iwl_trans_d3_resume(struct iwl_trans *trans, enum iwl_d3_status *status,
+ bool test, bool reset);
-static inline struct iwl_trans_dump_data *
+struct iwl_trans_dump_data *
iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask,
const struct iwl_dump_sanitize_ops *sanitize_ops,
- void *sanitize_ctx)
-{
- if (!trans->ops->dump_data)
- return NULL;
- return trans->ops->dump_data(trans, dump_mask,
- sanitize_ops, sanitize_ctx);
-}
+ void *sanitize_ctx);
static inline struct iwl_device_tx_cmd *
iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
@@ -1251,108 +992,31 @@ static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
}
-static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_tx_cmd *dev_cmd, int queue)
-{
- if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
- return -EIO;
-
- if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
- IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
- return -EIO;
- }
-
- return trans->ops->tx(trans, skb, dev_cmd, queue);
-}
-
-static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
- int ssn, struct sk_buff_head *skbs)
-{
- if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
- IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
- return;
- }
-
- trans->ops->reclaim(trans, queue, ssn, skbs);
-}
-
-static inline void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue,
- int ptr)
-{
- if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
- IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
- return;
- }
-
- trans->ops->set_q_ptrs(trans, queue, ptr);
-}
-
-static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
- bool configure_scd)
-{
- trans->ops->txq_disable(trans, queue, configure_scd);
-}
-
-static inline bool
-iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
- const struct iwl_trans_txq_scd_cfg *cfg,
- unsigned int queue_wdg_timeout)
-{
- might_sleep();
-
- if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
- IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
- return false;
- }
-
- return trans->ops->txq_enable(trans, queue, ssn,
- cfg, queue_wdg_timeout);
-}
+int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_device_tx_cmd *dev_cmd, int queue);
-static inline int
-iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,
- struct iwl_trans_rxq_dma_data *data)
-{
- if (WARN_ON_ONCE(!trans->ops->rxq_dma_data))
- return -ENOTSUPP;
+void iwl_trans_reclaim(struct iwl_trans *trans, int queue, int ssn,
+ struct sk_buff_head *skbs, bool is_flush);
- return trans->ops->rxq_dma_data(trans, queue, data);
-}
-
-static inline void
-iwl_trans_txq_free(struct iwl_trans *trans, int queue)
-{
- if (WARN_ON_ONCE(!trans->ops->txq_free))
- return;
+void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue, int ptr);
- trans->ops->txq_free(trans, queue);
-}
+void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
+ bool configure_scd);
-static inline int
-iwl_trans_txq_alloc(struct iwl_trans *trans,
- u32 flags, u32 sta_mask, u8 tid,
- int size, unsigned int wdg_timeout)
-{
- might_sleep();
+bool iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
+ const struct iwl_trans_txq_scd_cfg *cfg,
+ unsigned int queue_wdg_timeout);
- if (WARN_ON_ONCE(!trans->ops->txq_alloc))
- return -ENOTSUPP;
+int iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,
+ struct iwl_trans_rxq_dma_data *data);
- if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
- IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
- return -EIO;
- }
+void iwl_trans_txq_free(struct iwl_trans *trans, int queue);
- return trans->ops->txq_alloc(trans, flags, sta_mask, tid,
- size, wdg_timeout);
-}
+int iwl_trans_txq_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,
+ u8 tid, int size, unsigned int wdg_timeout);
-static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
- int queue, bool shared_mode)
-{
- if (trans->ops->txq_set_shared_mode)
- trans->ops->txq_set_shared_mode(trans, queue, shared_mode);
-}
+void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
+ int txq_id, bool shared_mode);
static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
int fifo, int sta_id, int tid,
@@ -1385,90 +1049,32 @@ void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo,
iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout);
}
-static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
- unsigned long txqs,
- bool freeze)
-{
- if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
- IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
- return;
- }
-
- if (trans->ops->freeze_txq_timer)
- trans->ops->freeze_txq_timer(trans, txqs, freeze);
-}
-
-static inline void iwl_trans_block_txq_ptrs(struct iwl_trans *trans,
- bool block)
-{
- if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
- IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
- return;
- }
-
- if (trans->ops->block_txq_ptrs)
- trans->ops->block_txq_ptrs(trans, block);
-}
-
-static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans,
- u32 txqs)
-{
- if (WARN_ON_ONCE(!trans->ops->wait_tx_queues_empty))
- return -ENOTSUPP;
+void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
+ unsigned long txqs, bool freeze);
- /* No need to wait if the firmware is not alive */
- if (trans->state != IWL_TRANS_FW_ALIVE) {
- IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
- return -EIO;
- }
+int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans, u32 txqs);
- return trans->ops->wait_tx_queues_empty(trans, txqs);
-}
-
-static inline int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue)
-{
- if (WARN_ON_ONCE(!trans->ops->wait_txq_empty))
- return -ENOTSUPP;
+int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue);
- if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
- IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
- return -EIO;
- }
+void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val);
- return trans->ops->wait_txq_empty(trans, queue);
-}
+void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val);
-static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
-{
- trans->ops->write8(trans, ofs, val);
-}
+u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs);
-static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val)
-{
- trans->ops->write32(trans, ofs, val);
-}
+u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs);
-static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
-{
- return trans->ops->read32(trans, ofs);
-}
+void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs, u32 val);
-static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs)
-{
- return trans->ops->read_prph(trans, ofs);
-}
+int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
+ void *buf, int dwords);
-static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs,
- u32 val)
-{
- return trans->ops->write_prph(trans, ofs, val);
-}
+int iwl_trans_read_config32(struct iwl_trans *trans, u32 ofs,
+ u32 *val);
-static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
- void *buf, int dwords)
-{
- return trans->ops->read_mem(trans, addr, buf, dwords);
-}
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void iwl_trans_debugfs_cleanup(struct iwl_trans *trans);
+#endif
#define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize) \
do { \
@@ -1477,30 +1083,21 @@ static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
} while (0)
-static inline int iwl_trans_write_imr_mem(struct iwl_trans *trans,
- u32 dst_addr, u64 src_addr,
- u32 byte_cnt)
-{
- if (trans->ops->imr_dma_data)
- return trans->ops->imr_dma_data(trans, dst_addr, src_addr, byte_cnt);
- return 0;
-}
+int iwl_trans_write_imr_mem(struct iwl_trans *trans, u32 dst_addr,
+ u64 src_addr, u32 byte_cnt);
static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
{
u32 value;
- if (WARN_ON(iwl_trans_read_mem(trans, addr, &value, 1)))
+ if (iwl_trans_read_mem(trans, addr, &value, 1))
return 0xa5a5a5a5;
return value;
}
-static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
- const void *buf, int dwords)
-{
- return trans->ops->write_mem(trans, addr, buf, dwords);
-}
+int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
+ const void *buf, int dwords);
static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
u32 val)
@@ -1508,36 +1105,21 @@ static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
return iwl_trans_write_mem(trans, addr, &val, 1);
}
-static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
-{
- if (trans->ops->set_pmi)
- trans->ops->set_pmi(trans, state);
-}
+void iwl_trans_set_pmi(struct iwl_trans *trans, bool state);
-static inline int iwl_trans_sw_reset(struct iwl_trans *trans,
- bool retake_ownership)
-{
- if (trans->ops->sw_reset)
- return trans->ops->sw_reset(trans, retake_ownership);
- return 0;
-}
+int iwl_trans_sw_reset(struct iwl_trans *trans, bool retake_ownership);
-static inline void
-iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
-{
- trans->ops->set_bits_mask(trans, reg, mask, value);
-}
+void iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg,
+ u32 mask, u32 value);
+
+bool _iwl_trans_grab_nic_access(struct iwl_trans *trans);
#define iwl_trans_grab_nic_access(trans) \
__cond_lock(nic_access, \
- likely((trans)->ops->grab_nic_access(trans)))
+ likely(_iwl_trans_grab_nic_access(trans)))
-static inline void __releases(nic_access)
-iwl_trans_release_nic_access(struct iwl_trans *trans)
-{
- trans->ops->release_nic_access(trans);
- __release(nic_access);
-}
+void __releases(nic_access)
+iwl_trans_release_nic_access(struct iwl_trans *trans);
static inline void iwl_trans_fw_error(struct iwl_trans *trans, bool sync)
{
@@ -1546,8 +1128,8 @@ static inline void iwl_trans_fw_error(struct iwl_trans *trans, bool sync)
/* prevent double restarts due to the same erroneous FW */
if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) {
- iwl_op_mode_nic_error(trans->op_mode, sync);
trans->state = IWL_TRANS_NO_FW;
+ iwl_op_mode_nic_error(trans->op_mode, sync);
}
}
@@ -1556,44 +1138,24 @@ static inline bool iwl_trans_fw_running(struct iwl_trans *trans)
return trans->state == IWL_TRANS_FW_ALIVE;
}
-static inline void iwl_trans_sync_nmi(struct iwl_trans *trans)
-{
- if (trans->ops->sync_nmi)
- trans->ops->sync_nmi(trans);
-}
+void iwl_trans_sync_nmi(struct iwl_trans *trans);
void iwl_trans_sync_nmi_with_addr(struct iwl_trans *trans, u32 inta_addr,
u32 sw_err_bit);
-static inline int iwl_trans_load_pnvm(struct iwl_trans *trans,
- const struct iwl_pnvm_image *pnvm_data,
- const struct iwl_ucode_capabilities *capa)
-{
- return trans->ops->load_pnvm(trans, pnvm_data, capa);
-}
+int iwl_trans_load_pnvm(struct iwl_trans *trans,
+ const struct iwl_pnvm_image *pnvm_data,
+ const struct iwl_ucode_capabilities *capa);
-static inline void iwl_trans_set_pnvm(struct iwl_trans *trans,
- const struct iwl_ucode_capabilities *capa)
-{
- if (trans->ops->set_pnvm)
- trans->ops->set_pnvm(trans, capa);
-}
+void iwl_trans_set_pnvm(struct iwl_trans *trans,
+ const struct iwl_ucode_capabilities *capa);
-static inline int iwl_trans_load_reduce_power
- (struct iwl_trans *trans,
- const struct iwl_pnvm_image *payloads,
- const struct iwl_ucode_capabilities *capa)
-{
- return trans->ops->load_reduce_power(trans, payloads, capa);
-}
+int iwl_trans_load_reduce_power(struct iwl_trans *trans,
+ const struct iwl_pnvm_image *payloads,
+ const struct iwl_ucode_capabilities *capa);
-static inline void
-iwl_trans_set_reduce_power(struct iwl_trans *trans,
- const struct iwl_ucode_capabilities *capa)
-{
- if (trans->ops->set_reduce_power)
- trans->ops->set_reduce_power(trans, capa);
-}
+void iwl_trans_set_reduce_power(struct iwl_trans *trans,
+ const struct iwl_ucode_capabilities *capa);
static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
{
@@ -1601,18 +1163,13 @@ static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
trans->dbg.external_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED;
}
-static inline void iwl_trans_interrupts(struct iwl_trans *trans, bool enable)
-{
- if (trans->ops->interrupts)
- trans->ops->interrupts(trans, enable);
-}
+void iwl_trans_interrupts(struct iwl_trans *trans, bool enable);
/*****************************************************
* transport helper functions
*****************************************************/
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
- const struct iwl_trans_ops *ops,
const struct iwl_cfg_trans_params *cfg_trans);
int iwl_trans_init(struct iwl_trans *trans);
void iwl_trans_free(struct iwl_trans *trans);
@@ -1623,10 +1180,13 @@ static inline bool iwl_trans_is_hw_error_value(u32 val)
}
/*****************************************************
-* driver (transport) register/unregister functions
-******************************************************/
+ * PCIe handling
+ *****************************************************/
int __must_check iwl_pci_register_driver(void);
void iwl_pci_unregister_driver(void);
void iwl_trans_pcie_remove(struct iwl_trans *trans, bool rescan);
+int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans,
+ struct iwl_host_cmd *cmd);
+
#endif /* __iwl_trans_h__ */
diff --git a/mvm/coex.c b/mvm/coex.c
index 5a5b1128e75c..ad3e14a0d043 100644
--- a/mvm/coex.c
+++ b/mvm/coex.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2013-2014, 2018-2020, 2022 Intel Corporation
+ * Copyright (C) 2013-2014, 2018-2020, 2022-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
*/
#include <linux/ieee80211.h>
@@ -116,11 +116,6 @@ iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif)
ret = BT_COEX_TX_DIS_LUT;
- if (mvm->cfg->bt_shared_single_ant) {
- rcu_read_unlock();
- return ret;
- }
-
phy_ctx_id = *((u16 *)chanctx_conf->drv_priv);
primary_ch_phy_id = le32_to_cpu(mvm->last_bt_ci_cmd.primary_ch_phy_id);
secondary_ch_phy_id =
@@ -186,6 +181,9 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
struct iwl_mvm_sta *mvmsta;
u32 value;
+ if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ return 0;
+
mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
if (!mvmsta)
return 0;
@@ -221,15 +219,13 @@ struct iwl_bt_iterator_data {
static inline
void iwl_mvm_bt_coex_enable_rssi_event(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
+ struct iwl_mvm_vif_link_info *link_info,
bool enable, int rssi)
{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-
- mvmvif->bf_data.last_bt_coex_event = rssi;
- mvmvif->bf_data.bt_coex_max_thold =
+ link_info->bf_data.last_bt_coex_event = rssi;
+ link_info->bf_data.bt_coex_max_thold =
enable ? -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH : 0;
- mvmvif->bf_data.bt_coex_min_thold =
+ link_info->bf_data.bt_coex_min_thold =
enable ? -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH : 0;
}
@@ -257,6 +253,77 @@ static void iwl_mvm_bt_coex_tcm_based_ci(struct iwl_mvm *mvm,
swap(data->primary, data->secondary);
}
+/*
+ * This function receives the LB link id and checks if eSR should be
+ * enabled or disabled (due to BT coex)
+ */
+bool
+iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ s32 link_rssi,
+ bool primary)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ bool have_wifi_loss_rate =
+ iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
+ BT_PROFILE_NOTIFICATION, 0) > 4;
+ u8 wifi_loss_rate;
+
+ if (mvm->last_bt_notif.wifi_loss_low_rssi == BT_OFF)
+ return true;
+
+ if (primary)
+ return false;
+
+ /* The feature is not supported */
+ if (!have_wifi_loss_rate)
+ return true;
+
+
+ /*
+ * In case we don't know the RSSI - take the lower wifi loss,
+ * so we will more likely enter eSR, and if RSSI is low -
+ * we will get an update on this and exit eSR.
+ */
+ if (!link_rssi)
+ wifi_loss_rate = mvm->last_bt_notif.wifi_loss_mid_high_rssi;
+
+ else if (mvmvif->esr_active)
+ /* RSSI needs to get really low to disable eSR... */
+ wifi_loss_rate =
+ link_rssi <= -IWL_MVM_BT_COEX_DISABLE_ESR_THRESH ?
+ mvm->last_bt_notif.wifi_loss_low_rssi :
+ mvm->last_bt_notif.wifi_loss_mid_high_rssi;
+ else
+ /* ...And really high before we enable it back */
+ wifi_loss_rate =
+ link_rssi <= -IWL_MVM_BT_COEX_ENABLE_ESR_THRESH ?
+ mvm->last_bt_notif.wifi_loss_low_rssi :
+ mvm->last_bt_notif.wifi_loss_mid_high_rssi;
+
+ return wifi_loss_rate <= IWL_MVM_BT_COEX_WIFI_LOSS_THRESH;
+}
+
+void iwl_mvm_bt_coex_update_link_esr(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ int link_id)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_vif_link_info *link = mvmvif->link[link_id];
+
+ if (!ieee80211_vif_is_mld(vif) ||
+ !iwl_mvm_vif_from_mac80211(vif)->authorized ||
+ WARN_ON(!link))
+ return;
+
+ if (!iwl_mvm_bt_coex_calculate_esr_mode(mvm, vif,
+ (s8)link->beacon_stats.avg_signal,
+ link_id == iwl_mvm_get_primary_link(vif)))
+ /* In case we decided to exit eSR - stay with the primary */
+ iwl_mvm_exit_esr(mvm, vif, IWL_MVM_ESR_EXIT_COEX,
+ iwl_mvm_get_primary_link(vif));
+}
+
static void iwl_mvm_bt_notif_per_link(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_bt_iterator_data *data,
@@ -296,12 +363,14 @@ static void iwl_mvm_bt_notif_per_link(struct iwl_mvm *mvm,
smps_mode, link_id);
iwl_mvm_bt_coex_reduced_txp(mvm, link_info->ap_sta_id,
false);
- /* FIXME: should this be per link? */
- iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
+ iwl_mvm_bt_coex_enable_rssi_event(mvm, link_info, false,
+ 0);
}
return;
}
+ iwl_mvm_bt_coex_update_link_esr(mvm, vif, link_id);
+
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2))
min_ag_for_static_smps = BT_VERY_HIGH_TRAFFIC;
else
@@ -383,21 +452,19 @@ static void iwl_mvm_bt_notif_per_link(struct iwl_mvm *mvm,
/*
* don't reduce the Tx power if one of these is true:
* we are in LOOSE
- * single share antenna product
* BT is inactive
* we are not associated
*/
if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT ||
- mvm->cfg->bt_shared_single_ant || !vif->cfg.assoc ||
- le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF) {
+ le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF ||
+ !vif->cfg.assoc) {
iwl_mvm_bt_coex_reduced_txp(mvm, link_info->ap_sta_id, false);
- /* FIXME: should this be per link? */
- iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
+ iwl_mvm_bt_coex_enable_rssi_event(mvm, link_info, false, 0);
return;
}
/* try to get the avg rssi from fw */
- ave_rssi = mvmvif->bf_data.ave_beacon_signal;
+ ave_rssi = link_info->bf_data.ave_beacon_signal;
/* if the RSSI isn't valid, fake it is very low */
if (!ave_rssi)
@@ -413,7 +480,7 @@ static void iwl_mvm_bt_notif_per_link(struct iwl_mvm *mvm,
}
/* Begin to monitor the RSSI: it may influence the reduced Tx power */
- iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, true, ave_rssi);
+ iwl_mvm_bt_coex_enable_rssi_event(mvm, link_info, true, ave_rssi);
}
/* must be called under rcu_read_lock */
@@ -460,6 +527,11 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_bt_notif_iterator, &data);
+ if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ rcu_read_unlock();
+ return;
+ }
+
iwl_mvm_bt_coex_tcm_based_ci(mvm, &data);
if (data.primary) {
@@ -570,7 +642,7 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* Check if rssi is good enough for reduced Tx power, but not in loose
* scheme.
*/
- if (rssi_event == RSSI_EVENT_LOW || mvm->cfg->bt_shared_single_ant ||
+ if (rssi_event == RSSI_EVENT_LOW ||
iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT)
ret = iwl_mvm_bt_coex_reduced_txp(mvm,
mvmvif->deflink.ap_sta_id,
@@ -639,10 +711,6 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant)
{
- /* there is no other antenna, shared antenna is always available */
- if (mvm->cfg->bt_shared_single_ant)
- return true;
-
if (ant & mvm->cfg->non_shared_ant)
return true;
@@ -652,10 +720,6 @@ bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant)
bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm)
{
- /* there is no other antenna, shared antenna is always available */
- if (mvm->cfg->bt_shared_single_ant)
- return true;
-
return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC;
}
diff --git a/mvm/constants.h b/mvm/constants.h
index 243eccc68cb0..c4c1e67b9ac7 100644
--- a/mvm/constants.h
+++ b/mvm/constants.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
- * Copyright (C) 2013-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2013-2014, 2018-2024 Intel Corporation
* Copyright (C) 2015 Intel Deutschland GmbH
*/
#ifndef __MVM_CONSTANTS_H
@@ -11,6 +11,11 @@
#include "fw-api.h"
#define IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM 20
+#define IWL_MVM_BT_COEX_DISABLE_ESR_THRESH 69
+#define IWL_MVM_BT_COEX_ENABLE_ESR_THRESH 63
+#define IWL_MVM_BT_COEX_WIFI_LOSS_THRESH 0
+#define IWL_MVM_TRIGGER_LINK_SEL_TIME_SEC 30
+#define IWL_MVM_TPT_COUNT_WINDOW_SEC 5
#define IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
#define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
@@ -18,7 +23,7 @@
#define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT (10 * USEC_PER_MSEC)
#define IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT (2 * 1024) /* defined in TU */
#define IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT (40 * 1024) /* defined in TU */
-#define IWL_MVM_P2P_LOWLATENCY_PS_ENABLE 0
+#define IWL_MVM_P2P_LOWLATENCY_PS_ENABLE 1
#define IWL_MVM_UAPSD_RX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
#define IWL_MVM_UAPSD_TX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
#define IWL_MVM_UAPSD_QUEUES (IEEE80211_WMM_IE_STA_QOSINFO_AC_VO |\
@@ -51,7 +56,6 @@
#define IWL_MVM_RS_80_20_FAR_RANGE_TWEAK 1
#define IWL_MVM_TOF_IS_RESPONDER 0
#define IWL_MVM_HW_CSUM_DISABLE 0
-#define IWL_MVM_PARSE_NVM 0
#define IWL_MVM_ADWELL_ENABLE 1
#define IWL_MVM_ADWELL_MAX_BUDGET 0
#define IWL_MVM_TCM_LOAD_MEDIUM_THRESH 10 /* percentage */
@@ -60,6 +64,7 @@
#define IWL_MVM_UAPSD_NONAGG_PERIOD 5000 /* msecs */
#define IWL_MVM_UAPSD_NOAGG_LIST_LEN IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM
#define IWL_MVM_NON_TRANSMITTING_AP 0
+#define IWL_MVM_CONN_LISTEN_INTERVAL 10
#define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1
#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2
#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW 1
@@ -94,6 +99,7 @@
#define IWL_MVM_FTM_INITIATOR_ALGO IWL_TOF_ALGO_TYPE_MAX_LIKE
#define IWL_MVM_FTM_INITIATOR_DYNACK true
#define IWL_MVM_FTM_LMR_FEEDBACK_TERMINATE false
+#define IWL_MVM_FTM_TEST_INCORRECT_SAC false
#define IWL_MVM_FTM_R2I_MAX_REP 7
#define IWL_MVM_FTM_I2R_MAX_REP 7
#define IWL_MVM_FTM_R2I_MAX_STS 1
@@ -108,7 +114,6 @@
#define IWL_MVM_D3_DEBUG false
#define IWL_MVM_USE_TWT true
#define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA 20
-#define IWL_MVM_USE_NSSN_SYNC 0
#define IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH false
#define IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA 40
/* 20016 pSec is 6 meter RTT, meaning 3 meter range */
@@ -118,5 +123,18 @@
#define IWL_MVM_DISABLE_AP_FILS false
#define IWL_MVM_6GHZ_PASSIVE_SCAN_TIMEOUT 3000 /* in seconds */
#define IWL_MVM_6GHZ_PASSIVE_SCAN_ASSOC_TIMEOUT 60 /* in seconds */
+#define IWL_MVM_MIN_BEACON_INTERVAL_TU 16
+#define IWL_MVM_AUTO_EML_ENABLE true
+#define IWL_MVM_MISSED_BEACONS_EXIT_ESR_THRESH 7
+#define IWL_MVM_HIGH_RSSI_THRESH_20MHZ -67
+#define IWL_MVM_LOW_RSSI_THRESH_20MHZ -71
+#define IWL_MVM_HIGH_RSSI_THRESH_40MHZ -64
+#define IWL_MVM_LOW_RSSI_THRESH_40MHZ -67
+#define IWL_MVM_HIGH_RSSI_THRESH_80MHZ -61
+#define IWL_MVM_LOW_RSSI_THRESH_80MHZ -74
+#define IWL_MVM_HIGH_RSSI_THRESH_160MHZ -58
+#define IWL_MVM_LOW_RSSI_THRESH_160MHZ -61
+
+#define IWL_MVM_ENTER_ESR_TPT_THRESH 400
#endif /* __MVM_CONSTANTS_H */
diff --git a/mvm/d3.c b/mvm/d3.c
index f6488b4bbe68..99a541d442bb 100644
--- a/mvm/d3.c
+++ b/mvm/d3.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -450,9 +450,9 @@ static void iwl_mvm_wowlan_get_rsc_v5_data(struct ieee80211_hw *hw,
}
static int iwl_mvm_wowlan_config_rsc_tsc(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
+ struct ieee80211_vif *vif,
+ struct iwl_mvm_vif_link_info *mvm_link)
{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ver = iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_TSC_RSC_PARAM,
IWL_FW_CMD_VER_UNKNOWN);
int ret;
@@ -461,16 +461,14 @@ static int iwl_mvm_wowlan_config_rsc_tsc(struct iwl_mvm *mvm,
struct wowlan_key_rsc_v5_data data = {};
int i;
- data.rsc = kmalloc(sizeof(*data.rsc), GFP_KERNEL);
+ data.rsc = kzalloc(sizeof(*data.rsc), GFP_KERNEL);
if (!data.rsc)
return -ENOMEM;
- memset(data.rsc, 0xff, sizeof(*data.rsc));
-
for (i = 0; i < ARRAY_SIZE(data.rsc->mcast_key_id_map); i++)
data.rsc->mcast_key_id_map[i] =
IWL_MCAST_KEY_MAP_INVALID;
- data.rsc->sta_id = cpu_to_le32(mvmvif->deflink.ap_sta_id);
+ data.rsc->sta_id = cpu_to_le32(mvm_link->ap_sta_id);
ieee80211_iter_keys(mvm->hw, vif,
iwl_mvm_wowlan_get_rsc_v5_data,
@@ -494,7 +492,7 @@ static int iwl_mvm_wowlan_config_rsc_tsc(struct iwl_mvm *mvm,
if (ver == 4) {
size = sizeof(*data.rsc_tsc);
data.rsc_tsc->sta_id =
- cpu_to_le32(mvmvif->deflink.ap_sta_id);
+ cpu_to_le32(mvm_link->ap_sta_id);
} else {
/* ver == 2 || ver == IWL_FW_CMD_VER_UNKNOWN */
size = sizeof(data.rsc_tsc->params);
@@ -597,6 +595,12 @@ static void iwl_mvm_wowlan_gtk_type_iter(struct ieee80211_hw *hw,
void *_data)
{
struct wowlan_key_gtk_type_iter *data = _data;
+ __le32 *cipher = NULL;
+
+ if (key->keyidx == 4 || key->keyidx == 5)
+ cipher = &data->kek_kck_cmd->igtk_cipher;
+ if (key->keyidx == 6 || key->keyidx == 7)
+ cipher = &data->kek_kck_cmd->bigtk_cipher;
switch (key->cipher) {
default:
@@ -608,10 +612,13 @@ static void iwl_mvm_wowlan_gtk_type_iter(struct ieee80211_hw *hw,
return;
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
- data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_GCMP);
+ if (cipher)
+ *cipher = cpu_to_le32(STA_KEY_FLG_GCMP);
return;
case WLAN_CIPHER_SUITE_AES_CMAC:
- data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_CCM);
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ if (cipher)
+ *cipher = cpu_to_le32(STA_KEY_FLG_CCM);
return;
case WLAN_CIPHER_SUITE_CCMP:
if (!sta)
@@ -668,10 +675,9 @@ static int iwl_mvm_send_patterns_v1(struct iwl_mvm *mvm,
}
static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
+ struct iwl_mvm_vif_link_info *mvm_link,
struct cfg80211_wowlan *wowlan)
{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_wowlan_patterns_cmd *pattern_cmd;
struct iwl_host_cmd cmd = {
.id = WOWLAN_PATTERNS,
@@ -693,7 +699,7 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
pattern_cmd->n_patterns = wowlan->n_patterns;
if (ver >= 3)
- pattern_cmd->sta_id = mvmvif->deflink.ap_sta_id;
+ pattern_cmd->sta_id = mvm_link->ap_sta_id;
for (i = 0; i < wowlan->n_patterns; i++) {
int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
@@ -723,14 +729,15 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct ieee80211_chanctx_conf *ctx;
u8 chains_static, chains_dynamic;
- struct cfg80211_chan_def chandef;
+ struct cfg80211_chan_def chandef, ap_def;
int ret, i;
struct iwl_binding_cmd_v1 binding_cmd = {};
struct iwl_time_quota_cmd quota_cmd = {};
struct iwl_time_quota_data *quota;
u32 status;
- if (WARN_ON_ONCE(iwl_mvm_is_cdb_supported(mvm)))
+ if (WARN_ON_ONCE(iwl_mvm_is_cdb_supported(mvm) ||
+ ieee80211_vif_is_mld(vif)))
return -EINVAL;
/* add back the PHY */
@@ -744,12 +751,13 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return -EINVAL;
}
chandef = ctx->def;
+ ap_def = ctx->ap;
chains_static = ctx->rx_chains_static;
chains_dynamic = ctx->rx_chains_dynamic;
rcu_read_unlock();
ret = iwl_mvm_phy_ctxt_add(mvm, mvmvif->deflink.phy_ctxt, &chandef,
- chains_static, chains_dynamic);
+ &ap_def, chains_static, chains_dynamic);
if (ret)
return ret;
@@ -818,7 +826,7 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (ret)
IWL_ERR(mvm, "Failed to send quota: %d\n", ret);
- if (iwl_mvm_is_lar_supported(mvm) && iwl_mvm_init_fw_regd(mvm))
+ if (iwl_mvm_is_lar_supported(mvm) && iwl_mvm_init_fw_regd(mvm, false))
IWL_ERR(mvm, "Failed to initialize D3 LAR information\n");
return 0;
@@ -927,6 +935,9 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
wowlan_config_cmd->flags = ENABLE_L3_FILTERING |
ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING;
+ if (ap_sta->mfp)
+ wowlan_config_cmd->flags |= IS_11W_ASSOC;
+
if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_CONFIGURATION, 0) < 6) {
/* Query the last used seqno and set it */
int ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
@@ -987,7 +998,8 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
}
static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
+ struct ieee80211_vif *vif,
+ struct iwl_mvm_vif_link_info *mvm_link)
{
bool unified = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
@@ -1016,7 +1028,7 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
return -EIO;
}
- ret = iwl_mvm_wowlan_config_rsc_tsc(mvm, vif);
+ ret = iwl_mvm_wowlan_config_rsc_tsc(mvm, vif, mvm_link);
if (ret)
return ret;
@@ -1030,7 +1042,7 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
if (ver == 2) {
size = sizeof(tkip_data.tkip);
tkip_data.tkip.sta_id =
- cpu_to_le32(mvmvif->deflink.ap_sta_id);
+ cpu_to_le32(mvm_link->ap_sta_id);
} else if (ver == 1 || ver == IWL_FW_CMD_VER_UNKNOWN) {
size = sizeof(struct iwl_wowlan_tkip_params_cmd_ver_1);
} else {
@@ -1079,7 +1091,7 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
kek_kck_cmd.kek_len = cpu_to_le16(mvmvif->rekey_data.kek_len);
kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr;
kek_kck_cmd.akm = cpu_to_le32(mvmvif->rekey_data.akm);
- kek_kck_cmd.sta_id = cpu_to_le32(mvmvif->deflink.ap_sta_id);
+ kek_kck_cmd.sta_id = cpu_to_le32(mvm_link->ap_sta_id);
if (cmd_ver == 4) {
cmd_size = sizeof(struct iwl_wowlan_kek_kck_material_cmd_v4);
@@ -1112,6 +1124,7 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
struct cfg80211_wowlan *wowlan,
struct iwl_wowlan_config_cmd *wowlan_config_cmd,
struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif,
+ struct iwl_mvm_vif_link_info *mvm_link,
struct ieee80211_sta *ap_sta)
{
int ret;
@@ -1130,14 +1143,7 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
return ret;
}
- /*
- * This needs to be unlocked due to lock ordering
- * constraints. Since we're in the suspend path
- * that isn't really a problem though.
- */
- mutex_unlock(&mvm->mutex);
- ret = iwl_mvm_wowlan_config_key_params(mvm, vif);
- mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_wowlan_config_key_params(mvm, vif, mvm_link);
if (ret)
return ret;
@@ -1149,13 +1155,14 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
if (fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE))
- ret = iwl_mvm_send_patterns(mvm, vif, wowlan);
+ ret = iwl_mvm_send_patterns(mvm, mvm_link, wowlan);
else
ret = iwl_mvm_send_patterns_v1(mvm, wowlan);
if (ret)
return ret;
- return iwl_mvm_send_proto_offload(mvm, vif, false, true, 0);
+ return iwl_mvm_send_proto_offload(mvm, vif, false, true, 0,
+ mvm_link->ap_sta_id);
}
static int
@@ -1230,6 +1237,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
struct ieee80211_vif *vif = NULL;
struct iwl_mvm_vif *mvmvif = NULL;
struct ieee80211_sta *ap_sta = NULL;
+ struct iwl_mvm_vif_link_info *mvm_link;
struct iwl_d3_manager_config d3_cfg_cmd_data = {
/*
* Program the minimum sleep time to 10 seconds, as many
@@ -1258,21 +1266,29 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
return -EINVAL;
}
+ vif = iwl_mvm_get_bss_vif(mvm);
+ if (IS_ERR_OR_NULL(vif))
+ return 1;
+
+ ret = iwl_mvm_block_esr_sync(mvm, vif, IWL_MVM_ESR_BLOCKED_WOWLAN);
+ if (ret)
+ return ret;
+
mutex_lock(&mvm->mutex);
set_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
synchronize_net();
- vif = iwl_mvm_get_bss_vif(mvm);
- if (IS_ERR_OR_NULL(vif)) {
- ret = 1;
+ mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ mvm_link = mvmvif->link[iwl_mvm_get_primary_link(vif)];
+ if (WARN_ON_ONCE(!mvm_link)) {
+ ret = -EINVAL;
goto out_noreset;
}
- mvmvif = iwl_mvm_vif_from_mac80211(vif);
-
- if (mvmvif->deflink.ap_sta_id == IWL_MVM_INVALID_STA) {
+ if (mvm_link->ap_sta_id == IWL_MVM_INVALID_STA) {
/* if we're not associated, this must be netdetect */
if (!wowlan->nd_config) {
ret = 1;
@@ -1286,24 +1302,31 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
mvm->net_detect = true;
} else {
- struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
+ struct iwl_wowlan_config_cmd wowlan_config_cmd = {
+ .offloading_tid = 0,
+ };
- wowlan_config_cmd.sta_id = mvmvif->deflink.ap_sta_id;
+ wowlan_config_cmd.sta_id = mvm_link->ap_sta_id;
ap_sta = rcu_dereference_protected(
- mvm->fw_id_to_mac_id[mvmvif->deflink.ap_sta_id],
+ mvm->fw_id_to_mac_id[mvm_link->ap_sta_id],
lockdep_is_held(&mvm->mutex));
if (IS_ERR_OR_NULL(ap_sta)) {
ret = -EINVAL;
goto out_noreset;
}
+ ret = iwl_mvm_sta_ensure_queue(
+ mvm, ap_sta->txq[wowlan_config_cmd.offloading_tid]);
+ if (ret)
+ goto out_noreset;
+
ret = iwl_mvm_get_wowlan_config(mvm, wowlan, &wowlan_config_cmd,
vif, mvmvif, ap_sta);
if (ret)
goto out_noreset;
ret = iwl_mvm_wowlan_config(mvm, wowlan, &wowlan_config_cmd,
- vif, mvmvif, ap_sta);
+ vif, mvmvif, mvm_link, ap_sta);
if (ret)
goto out;
@@ -1438,6 +1461,10 @@ struct iwl_wowlan_status_data {
} ptk;
struct iwl_multicast_key_data igtk;
+ struct iwl_multicast_key_data bigtk[WOWLAN_BIGTK_KEYS_NUM];
+
+ int num_mlo_keys;
+ struct iwl_wowlan_mlo_gtk mlo_keys[WOWLAN_MAX_MLO_KEYS];
u8 *wake_packet;
};
@@ -1468,7 +1495,8 @@ static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
status->pattern_number;
if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
- IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH))
+ IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH |
+ IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE))
wakeup.disconnect = true;
if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE)
@@ -1492,6 +1520,9 @@ static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET)
wakeup.tcp_match = true;
+ if (reasons & IWL_WAKEUP_BY_11W_UNPROTECTED_DEAUTH_OR_DISASSOC)
+ wakeup.unprot_deauth_disassoc = true;
+
if (status->wake_packet) {
int pktsize = status->wake_packet_bufsize;
int pktlen = status->wake_packet_length;
@@ -1781,8 +1812,8 @@ static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key,
struct iwl_mvm_d3_gtk_iter_data {
struct iwl_mvm *mvm;
struct iwl_wowlan_status_data *status;
- u32 gtk_cipher, igtk_cipher;
- bool unhandled_cipher, igtk_support;
+ u32 gtk_cipher, igtk_cipher, bigtk_cipher;
+ bool unhandled_cipher, igtk_support, bigtk_support;
int num_keys;
};
@@ -1793,6 +1824,10 @@ static void iwl_mvm_d3_find_last_keys(struct ieee80211_hw *hw,
void *_data)
{
struct iwl_mvm_d3_gtk_iter_data *data = _data;
+ int link_id = vif->active_links ? __ffs(vif->active_links) : -1;
+
+ if (link_id >= 0 && key->link_id >= 0 && link_id != key->link_id)
+ return;
if (data->unhandled_cipher)
return;
@@ -1817,6 +1852,9 @@ static void iwl_mvm_d3_find_last_keys(struct ieee80211_hw *hw,
if (data->igtk_support &&
(key->keyidx == 4 || key->keyidx == 5)) {
data->igtk_cipher = key->cipher;
+ } else if (data->bigtk_support &&
+ (key->keyidx == 6 || key->keyidx == 7)) {
+ data->bigtk_cipher = key->cipher;
} else {
data->unhandled_cipher = true;
return;
@@ -1842,9 +1880,30 @@ iwl_mvm_d3_set_igtk_bigtk_ipn(const struct iwl_multicast_key_data *key,
memcpy(seq->aes_gmac.pn, key->ipn, sizeof(seq->aes_gmac.pn));
break;
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ case WLAN_CIPHER_SUITE_AES_CMAC:
BUILD_BUG_ON(sizeof(seq->aes_cmac.pn) != sizeof(key->ipn));
memcpy(seq->aes_cmac.pn, key->ipn, sizeof(seq->aes_cmac.pn));
break;
+ default:
+ WARN_ON(1);
+ }
+}
+
+static void
+iwl_mvm_d3_update_igtk_bigtk(struct iwl_wowlan_status_data *status,
+ struct ieee80211_key_conf *key,
+ struct iwl_multicast_key_data *key_data)
+{
+ if (status->num_of_gtk_rekeys && key_data->len) {
+ /* remove rekeyed key */
+ ieee80211_remove_key(key);
+ } else {
+ struct ieee80211_key_seq seq;
+
+ iwl_mvm_d3_set_igtk_bigtk_ipn(key_data,
+ &seq,
+ key->cipher);
+ ieee80211_set_key_rx_seq(key, 0, &seq);
}
}
@@ -1857,6 +1916,10 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw,
struct iwl_mvm_d3_gtk_iter_data *data = _data;
struct iwl_wowlan_status_data *status = data->status;
s8 keyidx;
+ int link_id = vif->active_links ? __ffs(vif->active_links) : -1;
+
+ if (link_id >= 0 && key->link_id >= 0 && link_id != key->link_id)
+ return;
if (data->unhandled_cipher)
return;
@@ -1900,26 +1963,186 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw,
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
case WLAN_CIPHER_SUITE_AES_CMAC:
if (key->keyidx == 4 || key->keyidx == 5) {
- /* remove rekeyed key */
- if (status->num_of_gtk_rekeys) {
- ieee80211_remove_key(key);
- } else {
- struct ieee80211_key_seq seq;
+ iwl_mvm_d3_update_igtk_bigtk(status, key,
+ &status->igtk);
+ }
+ if (key->keyidx == 6 || key->keyidx == 7) {
+ u8 idx = key->keyidx == status->bigtk[1].id;
- iwl_mvm_d3_set_igtk_bigtk_ipn(&status->igtk,
- &seq,
- key->cipher);
- ieee80211_set_key_rx_seq(key, 0, &seq);
- }
+ iwl_mvm_d3_update_igtk_bigtk(status, key,
+ &status->bigtk[idx]);
}
}
}
+struct iwl_mvm_d3_mlo_old_keys {
+ u32 cipher[IEEE80211_MLD_MAX_NUM_LINKS][WOWLAN_MLO_GTK_KEY_NUM_TYPES];
+ struct ieee80211_key_conf *key[IEEE80211_MLD_MAX_NUM_LINKS][8];
+};
+
+static void iwl_mvm_mlo_key_ciphers(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *data)
+{
+ struct iwl_mvm_d3_mlo_old_keys *old_keys = data;
+ enum iwl_wowlan_mlo_gtk_type key_type;
+
+ if (key->link_id < 0)
+ return;
+
+ if (WARN_ON(key->link_id >= IEEE80211_MLD_MAX_NUM_LINKS ||
+ key->keyidx >= 8))
+ return;
+
+ if (WARN_ON(old_keys->key[key->link_id][key->keyidx]))
+ return;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ key_type = WOWLAN_MLO_GTK_KEY_TYPE_GTK;
+ break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ if (key->keyidx == 4 || key->keyidx == 5) {
+ key_type = WOWLAN_MLO_GTK_KEY_TYPE_IGTK;
+ break;
+ } else if (key->keyidx == 6 || key->keyidx == 7) {
+ key_type = WOWLAN_MLO_GTK_KEY_TYPE_BIGTK;
+ break;
+ }
+ return;
+ default:
+ /* ignore WEP/TKIP or unknown ciphers */
+ return;
+ }
+
+ old_keys->cipher[key->link_id][key_type] = key->cipher;
+ old_keys->key[key->link_id][key->keyidx] = key;
+}
+
+static bool iwl_mvm_mlo_gtk_rekey(struct iwl_wowlan_status_data *status,
+ struct ieee80211_vif *vif,
+ struct iwl_mvm *mvm)
+{
+ int i;
+ struct iwl_mvm_d3_mlo_old_keys *old_keys;
+ bool ret = true;
+
+ IWL_DEBUG_WOWLAN(mvm, "Num of MLO Keys: %d\n", status->num_mlo_keys);
+ if (!status->num_mlo_keys)
+ return true;
+
+ old_keys = kzalloc(sizeof(*old_keys), GFP_KERNEL);
+ if (!old_keys)
+ return false;
+
+ /* find the cipher for each mlo key */
+ ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_mlo_key_ciphers, old_keys);
+
+ for (i = 0; i < status->num_mlo_keys; i++) {
+ struct iwl_wowlan_mlo_gtk *mlo_key = &status->mlo_keys[i];
+ struct ieee80211_key_conf *key, *old_key;
+ struct ieee80211_key_seq seq;
+ struct {
+ struct ieee80211_key_conf conf;
+ u8 key[32];
+ } conf = {};
+ u16 flags = le16_to_cpu(mlo_key->flags);
+ int j, link_id, key_id, key_type;
+
+ link_id = u16_get_bits(flags, WOWLAN_MLO_GTK_FLAG_LINK_ID_MSK);
+ key_id = u16_get_bits(flags, WOWLAN_MLO_GTK_FLAG_KEY_ID_MSK);
+ key_type = u16_get_bits(flags,
+ WOWLAN_MLO_GTK_FLAG_KEY_TYPE_MSK);
+
+ if (!(vif->valid_links & BIT(link_id)))
+ continue;
+
+ if (WARN_ON(link_id >= IEEE80211_MLD_MAX_NUM_LINKS ||
+ key_id >= 8 ||
+ key_type >= WOWLAN_MLO_GTK_KEY_NUM_TYPES))
+ continue;
+
+ conf.conf.cipher = old_keys->cipher[link_id][key_type];
+ /* WARN_ON? */
+ if (!conf.conf.cipher)
+ continue;
+
+ conf.conf.keylen = 0;
+ switch (conf.conf.cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ conf.conf.keylen = WLAN_KEY_LEN_CCMP;
+ break;
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ conf.conf.keylen = WLAN_KEY_LEN_GCMP_256;
+ break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ conf.conf.keylen = WLAN_KEY_LEN_BIP_GMAC_128;
+ break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ conf.conf.keylen = WLAN_KEY_LEN_BIP_GMAC_256;
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ conf.conf.keylen = WLAN_KEY_LEN_AES_CMAC;
+ break;
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ conf.conf.keylen = WLAN_KEY_LEN_BIP_CMAC_256;
+ break;
+ }
+
+ if (WARN_ON(!conf.conf.keylen ||
+ conf.conf.keylen > sizeof(conf.key)))
+ continue;
+
+ memcpy(conf.conf.key, mlo_key->key, conf.conf.keylen);
+ conf.conf.keyidx = key_id;
+
+ old_key = old_keys->key[link_id][key_id];
+ if (old_key) {
+ IWL_DEBUG_WOWLAN(mvm,
+ "Remove MLO key id %d, link id %d\n",
+ key_id, link_id);
+ ieee80211_remove_key(old_key);
+ }
+
+ IWL_DEBUG_WOWLAN(mvm, "Add MLO key id %d, link id %d\n",
+ key_id, link_id);
+ key = ieee80211_gtk_rekey_add(vif, &conf.conf, link_id);
+ if (WARN_ON(IS_ERR(key))) {
+ ret = false;
+ goto out;
+ }
+
+ /*
+ * mac80211 expects the pn in big-endian
+ * also note that seq is a union of all cipher types
+ * (ccmp, gcmp, cmac, gmac), and they all have the same
+ * pn field (of length 6) so just copy it to ccmp.pn.
+ */
+ for (j = 5; j >= 0; j--)
+ seq.ccmp.pn[5 - j] = mlo_key->pn[j];
+
+ /* group keys are non-QoS and use TID 0 */
+ ieee80211_set_key_rx_seq(key, 0, &seq);
+ }
+
+out:
+ kfree(old_keys);
+ return ret;
+}
+
static bool iwl_mvm_gtk_rekey(struct iwl_wowlan_status_data *status,
struct ieee80211_vif *vif,
struct iwl_mvm *mvm, u32 gtk_cipher)
{
- int i;
+ int i, j;
struct ieee80211_key_conf *key;
struct {
struct ieee80211_key_conf conf;
@@ -1927,6 +2150,7 @@ static bool iwl_mvm_gtk_rekey(struct iwl_wowlan_status_data *status,
} conf = {
.conf.cipher = gtk_cipher,
};
+ int link_id = vif->active_links ? __ffs(vif->active_links) : -1;
BUILD_BUG_ON(WLAN_KEY_LEN_CCMP != WLAN_KEY_LEN_GCMP);
BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_CCMP);
@@ -1960,10 +2184,18 @@ static bool iwl_mvm_gtk_rekey(struct iwl_wowlan_status_data *status,
memcpy(conf.conf.key, status->gtk[i].key,
sizeof(status->gtk[i].key));
- key = ieee80211_gtk_rekey_add(vif, &conf.conf);
+ key = ieee80211_gtk_rekey_add(vif, &conf.conf, link_id);
if (IS_ERR(key))
return false;
- iwl_mvm_set_key_rx_seq_idx(key, status, i);
+
+ for (j = 0; j < ARRAY_SIZE(status->gtk_seq); j++) {
+ if (!status->gtk_seq[j].valid ||
+ status->gtk_seq[j].key_id != key->keyidx)
+ continue;
+ iwl_mvm_set_key_rx_seq_idx(key, status, j);
+ break;
+ }
+ WARN_ON(j == ARRAY_SIZE(status->gtk_seq));
}
return true;
@@ -1983,6 +2215,7 @@ iwl_mvm_d3_igtk_bigtk_rekey_add(struct iwl_wowlan_status_data *status,
.conf.keyidx = key_data->id,
};
struct ieee80211_key_seq seq;
+ int link_id = vif->active_links ? __ffs(vif->active_links) : -1;
if (!key_data->len)
return true;
@@ -2008,10 +2241,20 @@ iwl_mvm_d3_igtk_bigtk_rekey_add(struct iwl_wowlan_status_data *status,
BUILD_BUG_ON(sizeof(conf.key) < sizeof(key_data->key));
memcpy(conf.conf.key, key_data->key, conf.conf.keylen);
- key_config = ieee80211_gtk_rekey_add(vif, &conf.conf);
+ key_config = ieee80211_gtk_rekey_add(vif, &conf.conf, link_id);
if (IS_ERR(key_config))
return false;
ieee80211_set_key_rx_seq(key_config, 0, &seq);
+
+ if (key_config->keyidx == 4 || key_config->keyidx == 5) {
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_vif_link_info *mvm_link;
+
+ link_id = link_id < 0 ? 0 : link_id;
+ mvm_link = mvmvif->link[link_id];
+ mvm_link->igtk = key_config;
+ }
+
return true;
}
@@ -2042,6 +2285,7 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
.mvm = mvm,
.status = status,
};
+ int i;
u32 disconnection_reasons =
IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH;
@@ -2049,15 +2293,17 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
if (!status || !vif->bss_conf.bssid)
return false;
- if (status->wakeup_reasons & disconnection_reasons)
- return false;
-
if (iwl_mvm_lookup_wowlan_status_ver(mvm) > 6 ||
iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP,
WOWLAN_INFO_NOTIFICATION,
0))
gtkdata.igtk_support = true;
+ if (iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP,
+ WOWLAN_INFO_NOTIFICATION,
+ 0) >= 3)
+ gtkdata.bigtk_support = true;
+
/* find last GTK that we used initially, if any */
ieee80211_iter_keys(mvm->hw, vif,
iwl_mvm_d3_find_last_keys, &gtkdata);
@@ -2088,18 +2334,32 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
&status->igtk))
return false;
+ for (i = 0; i < ARRAY_SIZE(status->bigtk); i++) {
+ if (!iwl_mvm_d3_igtk_bigtk_rekey_add(status, vif,
+ gtkdata.bigtk_cipher,
+ &status->bigtk[i]))
+ return false;
+ }
+
+ if (!iwl_mvm_mlo_gtk_rekey(status, vif, mvm))
+ return false;
+
ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid,
(void *)&replay_ctr, GFP_KERNEL);
}
out:
if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP,
- WOWLAN_GET_STATUSES, 0) < 10) {
+ WOWLAN_GET_STATUSES,
+ IWL_FW_CMD_VER_UNKNOWN) < 10) {
mvmvif->seqno_valid = true;
/* +0x10 because the set API expects next-to-use, not last-used */
mvmvif->seqno = status->non_qos_seq_ctr + 0x10;
}
+ if (status->wakeup_reasons & disconnection_reasons)
+ return false;
+
return true;
}
@@ -2157,7 +2417,10 @@ static void iwl_mvm_convert_gtk_v3(struct iwl_wowlan_status_data *status,
static void iwl_mvm_convert_igtk(struct iwl_wowlan_status_data *status,
struct iwl_wowlan_igtk_status *data)
{
+ int i;
+
BUILD_BUG_ON(sizeof(status->igtk.key) < sizeof(data->key));
+ BUILD_BUG_ON(sizeof(status->igtk.ipn) != sizeof(data->ipn));
if (!data->key_len)
return;
@@ -2169,15 +2432,50 @@ static void iwl_mvm_convert_igtk(struct iwl_wowlan_status_data *status,
+ WOWLAN_IGTK_MIN_INDEX;
memcpy(status->igtk.key, data->key, sizeof(data->key));
- memcpy(status->igtk.ipn, data->ipn, sizeof(data->ipn));
+
+ /* mac80211 expects big endian for memcmp() to work, convert */
+ for (i = 0; i < sizeof(data->ipn); i++)
+ status->igtk.ipn[i] = data->ipn[sizeof(data->ipn) - i - 1];
+}
+
+static void iwl_mvm_convert_bigtk(struct iwl_wowlan_status_data *status,
+ const struct iwl_wowlan_igtk_status *data)
+{
+ int data_idx, status_idx = 0;
+
+ BUILD_BUG_ON(ARRAY_SIZE(status->bigtk) < WOWLAN_BIGTK_KEYS_NUM);
+
+ for (data_idx = 0; data_idx < WOWLAN_BIGTK_KEYS_NUM; data_idx++) {
+ if (!data[data_idx].key_len)
+ continue;
+
+ status->bigtk[status_idx].len = data[data_idx].key_len;
+ status->bigtk[status_idx].flags = data[data_idx].key_flags;
+ status->bigtk[status_idx].id =
+ u32_get_bits(data[data_idx].key_flags,
+ IWL_WOWLAN_IGTK_BIGTK_IDX_MASK)
+ + WOWLAN_BIGTK_MIN_INDEX;
+
+ BUILD_BUG_ON(sizeof(status->bigtk[status_idx].key) <
+ sizeof(data[data_idx].key));
+ BUILD_BUG_ON(sizeof(status->bigtk[status_idx].ipn) <
+ sizeof(data[data_idx].ipn));
+
+ memcpy(status->bigtk[status_idx].key, data[data_idx].key,
+ sizeof(data[data_idx].key));
+ memcpy(status->bigtk[status_idx].ipn, data[data_idx].ipn,
+ sizeof(data[data_idx].ipn));
+ status_idx++;
+ }
}
static void iwl_mvm_parse_wowlan_info_notif(struct iwl_mvm *mvm,
struct iwl_wowlan_info_notif *data,
struct iwl_wowlan_status_data *status,
- u32 len)
+ u32 len, bool has_mlo_keys)
{
u32 i;
+ u32 expected_len = sizeof(*data);
if (!data) {
IWL_ERR(mvm, "iwl_wowlan_info_notif data is NULL\n");
@@ -2185,16 +2483,69 @@ static void iwl_mvm_parse_wowlan_info_notif(struct iwl_mvm *mvm,
return;
}
- if (len < sizeof(*data)) {
+ if (has_mlo_keys)
+ expected_len += (data->num_mlo_link_keys *
+ sizeof(status->mlo_keys[0]));
+
+ if (len < expected_len) {
IWL_ERR(mvm, "Invalid WoWLAN info notification!\n");
status = NULL;
return;
}
+ if (mvm->fast_resume)
+ return;
+
iwl_mvm_convert_key_counters_v5(status, &data->gtk[0].sc);
iwl_mvm_convert_gtk_v3(status, data->gtk);
iwl_mvm_convert_igtk(status, &data->igtk[0]);
+ iwl_mvm_convert_bigtk(status, data->bigtk);
+ status->replay_ctr = le64_to_cpu(data->replay_ctr);
+ status->pattern_number = le16_to_cpu(data->pattern_number);
+ for (i = 0; i < IWL_MAX_TID_COUNT; i++)
+ status->qos_seq_ctr[i] =
+ le16_to_cpu(data->qos_seq_ctr[i]);
+ status->wakeup_reasons = le32_to_cpu(data->wakeup_reasons);
+ status->num_of_gtk_rekeys =
+ le32_to_cpu(data->num_of_gtk_rekeys);
+ status->received_beacons = le32_to_cpu(data->received_beacons);
+ status->tid_tear_down = data->tid_tear_down;
+ if (has_mlo_keys && data->num_mlo_link_keys) {
+ status->num_mlo_keys = data->num_mlo_link_keys;
+ if (IWL_FW_CHECK(mvm,
+ status->num_mlo_keys > WOWLAN_MAX_MLO_KEYS,
+ "Too many mlo keys: %d, max %d\n",
+ status->num_mlo_keys, WOWLAN_MAX_MLO_KEYS))
+ status->num_mlo_keys = WOWLAN_MAX_MLO_KEYS;
+ memcpy(status->mlo_keys, data->mlo_gtks,
+ status->num_mlo_keys * sizeof(status->mlo_keys[0]));
+ }
+}
+
+static void
+iwl_mvm_parse_wowlan_info_notif_v2(struct iwl_mvm *mvm,
+ struct iwl_wowlan_info_notif_v2 *data,
+ struct iwl_wowlan_status_data *status,
+ u32 len)
+{
+ u32 i;
+
+ if (!data) {
+ IWL_ERR(mvm, "iwl_wowlan_info_notif data is NULL\n");
+ status = NULL;
+ return;
+ }
+
+ if (len < sizeof(*data)) {
+ IWL_ERR(mvm, "Invalid WoWLAN info notification!\n");
+ status = NULL;
+ return;
+ }
+
+ iwl_mvm_convert_key_counters_v5(status, &data->gtk[0].sc);
+ iwl_mvm_convert_gtk_v3(status, data->gtk);
+ iwl_mvm_convert_igtk(status, &data->igtk[0]);
status->replay_ctr = le64_to_cpu(data->replay_ctr);
status->pattern_number = le16_to_cpu(data->pattern_number);
for (i = 0; i < IWL_MAX_TID_COUNT; i++)
@@ -2388,8 +2739,14 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
struct iwl_wowlan_status_data *status)
{
int i;
- bool keep;
+ bool keep = false;
struct iwl_mvm_sta *mvm_ap_sta;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int link_id = vif->active_links ? __ffs(vif->active_links) : 0;
+ struct iwl_mvm_vif_link_info *mvm_link = mvmvif->link[link_id];
+
+ if (WARN_ON(!mvm_link))
+ goto out_unlock;
if (!status)
goto out_unlock;
@@ -2397,8 +2754,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
IWL_DEBUG_WOWLAN(mvm, "wakeup reason 0x%x\n",
status->wakeup_reasons);
- /* still at hard-coded place 0 for D3 image */
- mvm_ap_sta = iwl_mvm_sta_from_staid_protected(mvm, 0);
+ mvm_ap_sta = iwl_mvm_sta_from_staid_protected(mvm, mvm_link->ap_sta_id);
if (!mvm_ap_sta)
goto out_unlock;
@@ -2416,18 +2772,12 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
mvm_ap_sta->tid_data[i].seq_number >> 4);
}
- /* now we have all the data we need, unlock to avoid mac80211 issues */
- mutex_unlock(&mvm->mutex);
-
iwl_mvm_report_wakeup_reasons(mvm, vif, status);
keep = iwl_mvm_setup_connection_keep(mvm, vif, status);
-
- return keep;
-
out_unlock:
mutex_unlock(&mvm->mutex);
- return false;
+ return keep;
}
#define ND_QUERY_BUF_LEN (sizeof(struct iwl_scan_offload_profile_match) * \
@@ -2702,7 +3052,7 @@ static bool iwl_mvm_check_rt_status(struct iwl_mvm *mvm,
if (iwl_mvm_rt_status(mvm->trans,
mvm->trans->dbg.lmac_error_event_table[0],
&err_id)) {
- if (err_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
+ if (err_id == RF_KILL_INDICATOR_FOR_WOWLAN && vif) {
struct cfg80211_wowlan_wakeup wakeup = {
.rfkill_release = true,
};
@@ -2743,6 +3093,9 @@ iwl_mvm_choose_query_wakeup_reasons(struct iwl_mvm *mvm,
u8 sta_id = mvm->net_detect ? IWL_MVM_INVALID_STA :
mvmvif->deflink.ap_sta_id;
+ /* bug - FW with MLO has status notification */
+ WARN_ON(ieee80211_vif_is_mld(vif));
+
d3_data->status = iwl_mvm_send_wowlan_get_status(mvm, sta_id);
}
@@ -2851,7 +3204,7 @@ static void iwl_mvm_nd_match_info_handler(struct iwl_mvm *mvm,
if (results->matched_profiles) {
memcpy(results->matches, notif->matches, matches_len);
- d3_data->nd_results_valid = TRUE;
+ d3_data->nd_results_valid = true;
}
/* no scan should be active at this point */
@@ -2866,7 +3219,7 @@ static bool iwl_mvm_wait_d3_notif(struct iwl_notif_wait_data *notif_wait,
struct iwl_mvm *mvm =
container_of(notif_wait, struct iwl_mvm, notif_wait);
struct iwl_d3_data *d3_data = data;
- u32 len;
+ u32 len = iwl_rx_packet_payload_len(pkt);
int ret;
int wowlan_info_ver = iwl_fw_lookup_notif_ver(mvm->fw,
PROT_OFFLOAD_GROUP,
@@ -2876,7 +3229,6 @@ static bool iwl_mvm_wait_d3_notif(struct iwl_notif_wait_data *notif_wait,
switch (WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) {
case WIDE_ID(PROT_OFFLOAD_GROUP, WOWLAN_INFO_NOTIFICATION): {
- struct iwl_wowlan_info_notif *notif;
if (d3_data->notif_received & IWL_D3_NOTIF_WOWLAN_INFO) {
/* We might get two notifications due to dual bss */
@@ -2886,26 +3238,40 @@ static bool iwl_mvm_wait_d3_notif(struct iwl_notif_wait_data *notif_wait,
}
if (wowlan_info_ver < 2) {
- struct iwl_wowlan_info_notif_v1 *notif_v1 = (void *)pkt->data;
+ struct iwl_wowlan_info_notif_v1 *notif_v1 =
+ (void *)pkt->data;
+ struct iwl_wowlan_info_notif_v2 *notif_v2;
- notif = kmemdup(notif_v1, sizeof(*notif), GFP_ATOMIC);
- if (!notif)
+ notif_v2 = kmemdup(notif_v1, sizeof(*notif_v2), GFP_ATOMIC);
+
+ if (!notif_v2)
return false;
- notif->tid_tear_down = notif_v1->tid_tear_down;
- notif->station_id = notif_v1->station_id;
- memset_after(notif, 0, station_id);
+ notif_v2->tid_tear_down = notif_v1->tid_tear_down;
+ notif_v2->station_id = notif_v1->station_id;
+ memset_after(notif_v2, 0, station_id);
+ iwl_mvm_parse_wowlan_info_notif_v2(mvm, notif_v2,
+ d3_data->status,
+ len);
+ kfree(notif_v2);
+
+ } else if (wowlan_info_ver == 2) {
+ struct iwl_wowlan_info_notif_v2 *notif_v2 =
+ (void *)pkt->data;
+
+ iwl_mvm_parse_wowlan_info_notif_v2(mvm, notif_v2,
+ d3_data->status,
+ len);
} else {
- notif = (void *)pkt->data;
+ struct iwl_wowlan_info_notif *notif =
+ (void *)pkt->data;
+
+ iwl_mvm_parse_wowlan_info_notif(mvm, notif,
+ d3_data->status, len,
+ wowlan_info_ver > 3);
}
d3_data->notif_received |= IWL_D3_NOTIF_WOWLAN_INFO;
- len = iwl_rx_packet_payload_len(pkt);
- iwl_mvm_parse_wowlan_info_notif(mvm, notif, d3_data->status,
- len);
-
- if (wowlan_info_ver < 2)
- kfree(notif);
if (d3_data->status &&
d3_data->status->wakeup_reasons & IWL_WOWLAN_WAKEUP_REASON_HAS_WAKEUP_PKT)
@@ -3003,7 +3369,7 @@ static int iwl_mvm_resume_firmware(struct iwl_mvm *mvm, bool test)
return ret;
}
-#define IWL_MVM_D3_NOTIF_TIMEOUT (HZ / 5)
+#define IWL_MVM_D3_NOTIF_TIMEOUT (HZ / 3)
static int iwl_mvm_d3_notif_wait(struct iwl_mvm *mvm,
struct iwl_d3_data *d3_data)
@@ -3014,12 +3380,22 @@ static int iwl_mvm_d3_notif_wait(struct iwl_mvm *mvm,
WIDE_ID(SCAN_GROUP, OFFLOAD_MATCH_INFO_NOTIF),
WIDE_ID(PROT_OFFLOAD_GROUP, D3_END_NOTIFICATION)
};
+ static const u16 d3_fast_resume_notif[] = {
+ WIDE_ID(PROT_OFFLOAD_GROUP, D3_END_NOTIFICATION)
+ };
struct iwl_notification_wait wait_d3_notif;
int ret;
- iwl_init_notification_wait(&mvm->notif_wait, &wait_d3_notif,
- d3_resume_notif, ARRAY_SIZE(d3_resume_notif),
- iwl_mvm_wait_d3_notif, d3_data);
+ if (mvm->fast_resume)
+ iwl_init_notification_wait(&mvm->notif_wait, &wait_d3_notif,
+ d3_fast_resume_notif,
+ ARRAY_SIZE(d3_fast_resume_notif),
+ iwl_mvm_wait_d3_notif, d3_data);
+ else
+ iwl_init_notification_wait(&mvm->notif_wait, &wait_d3_notif,
+ d3_resume_notif,
+ ARRAY_SIZE(d3_resume_notif),
+ iwl_mvm_wait_d3_notif, d3_data);
ret = iwl_mvm_resume_firmware(mvm, d3_data->test);
if (ret) {
@@ -3063,6 +3439,16 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
mutex_lock(&mvm->mutex);
+ /* Apparently, the device went away and device_powered_off() was called,
+ * don't even try to read the rt_status, the device is currently
+ * inaccessible.
+ */
+ if (!test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status)) {
+ IWL_INFO(mvm,
+ "Can't resume, device_powered_off() was called during wowlan\n");
+ goto err;
+ }
+
mvm->last_reset_or_resume_time_jiffies = jiffies;
/* get the BSS vif pointer again */
@@ -3100,6 +3486,8 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
goto err;
}
+ iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_WOWLAN);
+
/* after the successful handshake, we're out of D3 */
mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
@@ -3202,6 +3590,68 @@ void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled)
device_set_wakeup_enable(mvm->trans->dev, enabled);
}
+void iwl_mvm_fast_suspend(struct iwl_mvm *mvm)
+{
+ struct iwl_d3_manager_config d3_cfg_cmd_data = {};
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ IWL_DEBUG_WOWLAN(mvm, "Starting fast suspend flow\n");
+
+ mvm->fast_resume = true;
+ set_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
+
+ WARN_ON(iwl_mvm_power_update_device(mvm));
+ mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
+ ret = iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD, CMD_SEND_IN_D3,
+ sizeof(d3_cfg_cmd_data), &d3_cfg_cmd_data);
+ if (ret)
+ IWL_ERR(mvm,
+ "fast suspend: couldn't send D3_CONFIG_CMD %d\n", ret);
+
+ WARN_ON(iwl_mvm_power_update_mac(mvm));
+
+ ret = iwl_trans_d3_suspend(mvm->trans, false, false);
+ if (ret)
+ IWL_ERR(mvm, "fast suspend: trans_d3_suspend failed %d\n", ret);
+}
+
+int iwl_mvm_fast_resume(struct iwl_mvm *mvm)
+{
+ struct iwl_d3_data d3_data = {
+ .notif_expected =
+ IWL_D3_NOTIF_D3_END_NOTIF,
+ };
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ IWL_DEBUG_WOWLAN(mvm, "Starting the fast resume flow\n");
+
+ mvm->last_reset_or_resume_time_jiffies = jiffies;
+ iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt);
+
+ if (iwl_mvm_check_rt_status(mvm, NULL)) {
+ set_bit(STATUS_FW_ERROR, &mvm->trans->status);
+ iwl_mvm_dump_nic_error_log(mvm);
+ iwl_dbg_tlv_time_point(&mvm->fwrt,
+ IWL_FW_INI_TIME_POINT_FW_ASSERT, NULL);
+ iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
+ false, 0);
+ return -ENODEV;
+ }
+ ret = iwl_mvm_d3_notif_wait(mvm, &d3_data);
+ clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
+ mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
+ mvm->fast_resume = false;
+
+ if (ret)
+ IWL_ERR(mvm, "Couldn't get the d3 notif %d\n", ret);
+
+ return ret;
+}
+
#ifdef CONFIG_IWLWIFI_DEBUGFS
static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
{
@@ -3237,6 +3687,7 @@ static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_mvm *mvm = file->private_data;
+ unsigned long end = jiffies + 60 * HZ;
u32 pme_asserted;
while (true) {
@@ -3250,6 +3701,12 @@ static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf,
if (msleep_interruptible(100))
break;
+
+ if (time_is_before_jiffies(end)) {
+ IWL_ERR(mvm,
+ "ending pseudo-D3 with timeout after ~60 seconds\n");
+ return -ETIMEDOUT;
+ }
}
return 0;
diff --git a/mvm/debugfs-vif.c b/mvm/debugfs-vif.c
index cb4ecad6103f..25f07e00db42 100644
--- a/mvm/debugfs-vif.c
+++ b/mvm/debugfs-vif.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -381,9 +381,9 @@ static ssize_t iwl_dbgfs_bf_params_write(struct ieee80211_vif *vif, char *buf,
mutex_lock(&mvm->mutex);
iwl_dbgfs_update_bf(vif, param, value);
if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value)
- ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif);
else
- ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
+ ret = iwl_mvm_enable_beacon_filter(mvm, vif);
mutex_unlock(&mvm->mutex);
return ret ?: count;
@@ -407,7 +407,7 @@ static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
};
iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
- if (mvmvif->bf_data.bf_enabled)
+ if (mvmvif->bf_enabled)
cmd.bf_enable_beacon_filter = cpu_to_le32(1);
else
cmd.bf_enable_beacon_filter = 0;
@@ -578,34 +578,47 @@ static ssize_t iwl_dbgfs_rx_phyinfo_write(struct ieee80211_vif *vif, char *buf,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
- struct ieee80211_chanctx_conf *chanctx_conf;
- struct iwl_mvm_phy_ctxt *phy_ctxt;
+ struct ieee80211_bss_conf *link_conf;
u16 value;
- int ret;
+ int link_id, ret = -EINVAL;
ret = kstrtou16(buf, 0, &value);
if (ret)
return ret;
mutex_lock(&mvm->mutex);
- rcu_read_lock();
- chanctx_conf = rcu_dereference(vif->bss_conf.chanctx_conf);
- /* make sure the channel context is assigned */
- if (!chanctx_conf) {
+ mvm->dbgfs_rx_phyinfo = value;
+
+ for_each_vif_active_link(vif, link_conf, link_id) {
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ struct cfg80211_chan_def min_def, ap_def;
+ struct iwl_mvm_phy_ctxt *phy_ctxt;
+ u8 chains_static, chains_dynamic;
+
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(link_conf->chanctx_conf);
+ if (!chanctx_conf) {
+ rcu_read_unlock();
+ continue;
+ }
+ /* A command can't be sent with RCU lock held, so copy
+ * everything here and use it after unlocking
+ */
+ min_def = chanctx_conf->min_def;
+ ap_def = chanctx_conf->ap;
+ chains_static = chanctx_conf->rx_chains_static;
+ chains_dynamic = chanctx_conf->rx_chains_dynamic;
rcu_read_unlock();
- mutex_unlock(&mvm->mutex);
- return -EINVAL;
- }
- phy_ctxt = &mvm->phy_ctxts[*(u16 *)chanctx_conf->drv_priv];
- rcu_read_unlock();
+ phy_ctxt = mvmvif->link[link_id]->phy_ctxt;
+ if (!phy_ctxt)
+ continue;
- mvm->dbgfs_rx_phyinfo = value;
+ ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &min_def, &ap_def,
+ chains_static, chains_dynamic);
+ }
- ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chanctx_conf->min_def,
- chanctx_conf->rx_chains_static,
- chanctx_conf->rx_chains_dynamic);
mutex_unlock(&mvm->mutex);
return ret ?: count;
@@ -679,6 +692,132 @@ static ssize_t iwl_dbgfs_quota_min_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
+static ssize_t iwl_dbgfs_max_tx_op_write(struct ieee80211_vif *vif, char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ u16 value;
+ int ret;
+
+ ret = kstrtou16(buf, 0, &value);
+ if (ret)
+ return ret;
+
+ mutex_lock(&mvm->mutex);
+ mvmvif->max_tx_op = value;
+ mutex_unlock(&mvm->mutex);
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_max_tx_op_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ char buf[10];
+ int len;
+
+ mutex_lock(&mvm->mutex);
+ len = scnprintf(buf, sizeof(buf), "%hu\n", mvmvif->max_tx_op);
+ mutex_unlock(&mvm->mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t iwl_dbgfs_int_mlo_scan_write(struct ieee80211_vif *vif,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ u32 action;
+ int ret;
+
+ if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif))
+ return -EINVAL;
+
+ if (kstrtou32(buf, 0, &action))
+ return -EINVAL;
+
+ mutex_lock(&mvm->mutex);
+
+ if (!action) {
+ ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_INT_MLO, false);
+ } else if (action == 1) {
+ ret = iwl_mvm_int_mlo_scan(mvm, vif);
+ } else {
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&mvm->mutex);
+
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_esr_disable_reason_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ unsigned long esr_mask;
+ char *buf;
+ int bufsz, pos, i;
+ ssize_t rv;
+
+ mutex_lock(&mvm->mutex);
+ esr_mask = mvmvif->esr_disable_reason;
+ mutex_unlock(&mvm->mutex);
+
+ bufsz = hweight32(esr_mask) * 32 + 40;
+ buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ pos = scnprintf(buf, bufsz, "EMLSR state: '0x%lx'\nreasons:\n",
+ esr_mask);
+ for_each_set_bit(i, &esr_mask, BITS_PER_LONG)
+ pos += scnprintf(buf + pos, bufsz - pos, " - %s\n",
+ iwl_get_esr_state_string(BIT(i)));
+
+ rv = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return rv;
+}
+
+static ssize_t iwl_dbgfs_esr_disable_reason_write(struct ieee80211_vif *vif,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ u32 reason;
+ u8 block;
+ int ret;
+
+ ret = sscanf(buf, "%u %hhu", &reason, &block);
+ if (ret < 0)
+ return ret;
+
+ if (hweight16(reason) != 1 || !(reason & IWL_MVM_BLOCK_ESR_REASONS))
+ return -EINVAL;
+
+ mutex_lock(&mvm->mutex);
+ if (block)
+ iwl_mvm_block_esr(mvm, vif, reason,
+ iwl_mvm_get_primary_link(vif));
+ else
+ iwl_mvm_unblock_esr(mvm, vif, reason);
+ mutex_unlock(&mvm->mutex);
+
+ return count;
+}
+
#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
_MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
@@ -698,20 +837,15 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(uapsd_misbehaving, 20);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(rx_phyinfo, 10);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32);
MVM_DEBUGFS_READ_FILE_OPS(os_device_timediff);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(max_tx_op, 10);
+MVM_DEBUGFS_WRITE_FILE_OPS(int_mlo_scan, 32);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(esr_disable_reason, 32);
-
-void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+void iwl_mvm_vif_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct dentry *dbgfs_dir = vif->debugfs_dir;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- char buf[100];
-
- /*
- * Check if debugfs directory already exist before creating it.
- * This may happen when, for example, resetting hw or suspend-resume
- */
- if (!dbgfs_dir || mvmvif->dbgfs_dir)
- return;
mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir);
if (IS_ERR_OR_NULL(mvmvif->dbgfs_dir)) {
@@ -733,10 +867,28 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
MVM_DEBUGFS_ADD_FILE_VIF(rx_phyinfo, mvmvif->dbgfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE_VIF(quota_min, mvmvif->dbgfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE_VIF(os_device_timediff, mvmvif->dbgfs_dir, 0400);
+ MVM_DEBUGFS_ADD_FILE_VIF(max_tx_op, mvmvif->dbgfs_dir, 0600);
+ debugfs_create_bool("ftm_unprotected", 0200, mvmvif->dbgfs_dir,
+ &mvmvif->ftm_unprotected);
+ MVM_DEBUGFS_ADD_FILE_VIF(int_mlo_scan, mvmvif->dbgfs_dir, 0200);
+ MVM_DEBUGFS_ADD_FILE_VIF(esr_disable_reason, mvmvif->dbgfs_dir, 0600);
if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
mvmvif == mvm->bf_allowed_vif)
MVM_DEBUGFS_ADD_FILE_VIF(bf_params, mvmvif->dbgfs_dir, 0600);
+}
+
+void iwl_mvm_vif_dbgfs_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct dentry *dbgfs_dir = vif->debugfs_dir;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ char buf[3 * 3 + 11 + (NL80211_WIPHY_NAME_MAXLEN + 1) +
+ (7 + IFNAMSIZ + 1) + 6 + 1];
+ char name[7 + IFNAMSIZ + 1];
+
+ /* this will happen in monitor mode */
+ if (!dbgfs_dir)
+ return;
/*
* Create symlink for convenience pointing to interface specific
@@ -745,21 +897,63 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
* find
* netdev:wlan0 -> ../../../ieee80211/phy0/netdev:wlan0/iwlmvm/
*/
- snprintf(buf, 100, "../../../%pd3/%pd",
- dbgfs_dir,
- mvmvif->dbgfs_dir);
+ snprintf(name, sizeof(name), "%pd", dbgfs_dir);
+ snprintf(buf, sizeof(buf), "../../../%pd3/iwlmvm", dbgfs_dir);
- mvmvif->dbgfs_slink = debugfs_create_symlink(dbgfs_dir->d_name.name,
- mvm->debugfs_dir, buf);
+ mvmvif->dbgfs_slink =
+ debugfs_create_symlink(name, mvm->debugfs_dir, buf);
}
-void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+void iwl_mvm_vif_dbgfs_rm_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
debugfs_remove(mvmvif->dbgfs_slink);
mvmvif->dbgfs_slink = NULL;
+}
+
+#define MVM_DEBUGFS_WRITE_LINK_FILE_OPS(name, bufsz) \
+ _MVM_DEBUGFS_WRITE_FILE_OPS(link_##name, bufsz, \
+ struct ieee80211_bss_conf)
+#define MVM_DEBUGFS_READ_WRITE_LINK_FILE_OPS(name, bufsz) \
+ _MVM_DEBUGFS_READ_WRITE_FILE_OPS(link_##name, bufsz, \
+ struct ieee80211_bss_conf)
+#define MVM_DEBUGFS_ADD_LINK_FILE(name, parent, mode) \
+ debugfs_create_file(#name, mode, parent, link_conf, \
+ &iwl_dbgfs_link_##name##_ops)
+
+static void iwl_mvm_debugfs_add_link_files(struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct dentry *mvm_dir)
+{
+ /* Add per-link files here*/
+}
+
+void iwl_mvm_link_add_debugfs(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct dentry *dir)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ unsigned int link_id = link_conf->link_id;
+ struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id];
+ struct dentry *mvm_dir;
+
+ if (WARN_ON(!link_info) || !dir)
+ return;
+
+ if (dir == vif->debugfs_dir) {
+ WARN_ON(!mvmvif->dbgfs_dir);
+ mvm_dir = mvmvif->dbgfs_dir;
+ } else {
+ mvm_dir = debugfs_create_dir("iwlmvm", dir);
+ if (IS_ERR_OR_NULL(mvm_dir)) {
+ IWL_ERR(mvm, "Failed to create debugfs directory under %pd\n",
+ dir);
+ return;
+ }
+ }
- debugfs_remove_recursive(mvmvif->dbgfs_dir);
- mvmvif->dbgfs_dir = NULL;
+ iwl_mvm_debugfs_add_link_files(vif, link_conf, mvm_dir);
}
diff --git a/mvm/debugfs.c b/mvm/debugfs.c
index cf27f106d4d5..91ca830a7b60 100644
--- a/mvm/debugfs.c
+++ b/mvm/debugfs.c
@@ -50,8 +50,18 @@ static ssize_t iwl_dbgfs_stop_ctdp_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
{
int ret;
+ bool force;
- if (!iwl_mvm_is_ctdp_supported(mvm))
+ if (!kstrtobool(buf, &force))
+ IWL_DEBUG_INFO(mvm,
+ "force start is %d [0=disabled, 1=enabled]\n",
+ force);
+
+ /* we allow skipping cap support check and force stop ctdp
+ * statistics collection and with guerantee that it is
+ * safe to use.
+ */
+ if (!force && !iwl_mvm_is_ctdp_supported(mvm))
return -EOPNOTSUPP;
if (!iwl_mvm_firmware_running(mvm) ||
@@ -65,6 +75,36 @@ static ssize_t iwl_dbgfs_stop_ctdp_write(struct iwl_mvm *mvm, char *buf,
return ret ?: count;
}
+static ssize_t iwl_dbgfs_start_ctdp_write(struct iwl_mvm *mvm,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ int ret;
+ bool force;
+
+ if (!kstrtobool(buf, &force))
+ IWL_DEBUG_INFO(mvm,
+ "force start is %d [0=disabled, 1=enabled]\n",
+ force);
+
+ /* we allow skipping cap support check and force enable ctdp
+ * for statistics collection and with guerantee that it is
+ * safe to use.
+ */
+ if (!force && !iwl_mvm_is_ctdp_supported(mvm))
+ return -EOPNOTSUPP;
+
+ if (!iwl_mvm_firmware_running(mvm) ||
+ mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)
+ return -EIO;
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START, 0);
+ mutex_unlock(&mvm->mutex);
+
+ return ret ?: count;
+}
+
static ssize_t iwl_dbgfs_force_ctkill_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
{
@@ -111,37 +151,6 @@ static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
return ret;
}
-static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_mvm_sta *mvmsta;
- int sta_id, drain, ret;
-
- if (!iwl_mvm_firmware_running(mvm) ||
- mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)
- return -EIO;
-
- if (sscanf(buf, "%d %d", &sta_id, &drain) != 2)
- return -EINVAL;
- if (sta_id < 0 || sta_id >= mvm->fw->ucode_capa.num_stations)
- return -EINVAL;
- if (drain < 0 || drain > 1)
- return -EINVAL;
-
- mutex_lock(&mvm->mutex);
-
- mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
-
- if (!mvmsta)
- ret = -ENOENT;
- else
- ret = iwl_mvm_drain_sta(mvm, mvmsta, drain) ? : count;
-
- mutex_unlock(&mvm->mutex);
-
- return ret;
-}
-
static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -351,9 +360,7 @@ static ssize_t iwl_dbgfs_wifi_6e_enable_read(struct file *file,
char buf[12];
u32 value;
- err = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
- DSM_FUNC_ENABLE_6E,
- &iwl_guid, &value);
+ err = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_ENABLE_6E, &value);
if (err)
return err;
@@ -530,193 +537,12 @@ static ssize_t iwl_dbgfs_disable_power_off_write(struct iwl_mvm *mvm, char *buf,
return ret ?: count;
}
-static
-int iwl_mvm_coex_dump_mbox(struct iwl_bt_coex_profile_notif *notif, char *buf,
- int pos, int bufsz)
-{
- pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw0:\n");
-
- BT_MBOX_PRINT(0, LE_SLAVE_LAT, false);
- BT_MBOX_PRINT(0, LE_PROF1, false);
- BT_MBOX_PRINT(0, LE_PROF2, false);
- BT_MBOX_PRINT(0, LE_PROF_OTHER, false);
- BT_MBOX_PRINT(0, CHL_SEQ_N, false);
- BT_MBOX_PRINT(0, INBAND_S, false);
- BT_MBOX_PRINT(0, LE_MIN_RSSI, false);
- BT_MBOX_PRINT(0, LE_SCAN, false);
- BT_MBOX_PRINT(0, LE_ADV, false);
- BT_MBOX_PRINT(0, LE_MAX_TX_POWER, false);
- BT_MBOX_PRINT(0, OPEN_CON_1, true);
-
- pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw1:\n");
-
- BT_MBOX_PRINT(1, BR_MAX_TX_POWER, false);
- BT_MBOX_PRINT(1, IP_SR, false);
- BT_MBOX_PRINT(1, LE_MSTR, false);
- BT_MBOX_PRINT(1, AGGR_TRFC_LD, false);
- BT_MBOX_PRINT(1, MSG_TYPE, false);
- BT_MBOX_PRINT(1, SSN, true);
-
- pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw2:\n");
-
- BT_MBOX_PRINT(2, SNIFF_ACT, false);
- BT_MBOX_PRINT(2, PAG, false);
- BT_MBOX_PRINT(2, INQUIRY, false);
- BT_MBOX_PRINT(2, CONN, false);
- BT_MBOX_PRINT(2, SNIFF_INTERVAL, false);
- BT_MBOX_PRINT(2, DISC, false);
- BT_MBOX_PRINT(2, SCO_TX_ACT, false);
- BT_MBOX_PRINT(2, SCO_RX_ACT, false);
- BT_MBOX_PRINT(2, ESCO_RE_TX, false);
- BT_MBOX_PRINT(2, SCO_DURATION, true);
-
- pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw3:\n");
-
- BT_MBOX_PRINT(3, SCO_STATE, false);
- BT_MBOX_PRINT(3, SNIFF_STATE, false);
- BT_MBOX_PRINT(3, A2DP_STATE, false);
- BT_MBOX_PRINT(3, A2DP_SRC, false);
- BT_MBOX_PRINT(3, ACL_STATE, false);
- BT_MBOX_PRINT(3, MSTR_STATE, false);
- BT_MBOX_PRINT(3, OBX_STATE, false);
- BT_MBOX_PRINT(3, OPEN_CON_2, false);
- BT_MBOX_PRINT(3, TRAFFIC_LOAD, false);
- BT_MBOX_PRINT(3, CHL_SEQN_LSB, false);
- BT_MBOX_PRINT(3, INBAND_P, false);
- BT_MBOX_PRINT(3, MSG_TYPE_2, false);
- BT_MBOX_PRINT(3, SSN_2, false);
- BT_MBOX_PRINT(3, UPDATE_REQUEST, true);
-
- return pos;
-}
-
-static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_mvm *mvm = file->private_data;
- struct iwl_bt_coex_profile_notif *notif = &mvm->last_bt_notif;
- char *buf;
- int ret, pos = 0, bufsz = sizeof(char) * 1024;
-
- buf = kmalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- mutex_lock(&mvm->mutex);
-
- pos += iwl_mvm_coex_dump_mbox(notif, buf, pos, bufsz);
-
- pos += scnprintf(buf + pos, bufsz - pos, "bt_ci_compliance = %d\n",
- notif->bt_ci_compliance);
- pos += scnprintf(buf + pos, bufsz - pos, "primary_ch_lut = %d\n",
- le32_to_cpu(notif->primary_ch_lut));
- pos += scnprintf(buf + pos, bufsz - pos, "secondary_ch_lut = %d\n",
- le32_to_cpu(notif->secondary_ch_lut));
- pos += scnprintf(buf + pos,
- bufsz - pos, "bt_activity_grading = %d\n",
- le32_to_cpu(notif->bt_activity_grading));
- pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n",
- notif->rrc_status & 0xF);
- pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n",
- notif->ttc_status & 0xF);
-
- pos += scnprintf(buf + pos, bufsz - pos, "sync_sco = %d\n",
- IWL_MVM_BT_COEX_SYNC2SCO);
- pos += scnprintf(buf + pos, bufsz - pos, "mplut = %d\n",
- IWL_MVM_BT_COEX_MPLUT);
-
- mutex_unlock(&mvm->mutex);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
-
- return ret;
-}
-#undef BT_MBOX_PRINT
-
-static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_mvm *mvm = file->private_data;
- struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd;
- char buf[256];
- int bufsz = sizeof(buf);
- int pos = 0;
-
- mutex_lock(&mvm->mutex);
-
- pos += scnprintf(buf + pos, bufsz - pos, "Channel inhibition CMD\n");
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tPrimary Channel Bitmap 0x%016llx\n",
- le64_to_cpu(cmd->bt_primary_ci));
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tSecondary Channel Bitmap 0x%016llx\n",
- le64_to_cpu(cmd->bt_secondary_ci));
-
- mutex_unlock(&mvm->mutex);
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t
-iwl_dbgfs_bt_tx_prio_write(struct iwl_mvm *mvm, char *buf,
- size_t count, loff_t *ppos)
-{
- u32 bt_tx_prio;
-
- if (sscanf(buf, "%u", &bt_tx_prio) != 1)
- return -EINVAL;
- if (bt_tx_prio > 4)
- return -EINVAL;
-
- mvm->bt_tx_prio = bt_tx_prio;
-
- return count;
-}
-
-static ssize_t
-iwl_dbgfs_bt_force_ant_write(struct iwl_mvm *mvm, char *buf,
- size_t count, loff_t *ppos)
-{
- static const char * const modes_str[BT_FORCE_ANT_MAX] = {
- [BT_FORCE_ANT_DIS] = "dis",
- [BT_FORCE_ANT_AUTO] = "auto",
- [BT_FORCE_ANT_BT] = "bt",
- [BT_FORCE_ANT_WIFI] = "wifi",
- };
- int ret, bt_force_ant_mode;
-
- ret = match_string(modes_str, ARRAY_SIZE(modes_str), buf);
- if (ret < 0)
- return ret;
-
- bt_force_ant_mode = ret;
- ret = 0;
- mutex_lock(&mvm->mutex);
- if (mvm->bt_force_ant_mode == bt_force_ant_mode)
- goto out;
-
- mvm->bt_force_ant_mode = bt_force_ant_mode;
- IWL_DEBUG_COEX(mvm, "Force mode: %s\n",
- modes_str[mvm->bt_force_ant_mode]);
-
- if (iwl_mvm_firmware_running(mvm))
- ret = iwl_mvm_send_bt_init_conf(mvm);
- else
- ret = 0;
-
-out:
- mutex_unlock(&mvm->mutex);
- return ret ?: count;
-}
-
static ssize_t iwl_dbgfs_fw_ver_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_mvm *mvm = file->private_data;
char *buff, *pos, *endpos;
static const size_t bufsz = 1024;
- char _fw_name_pre[FW_NAME_PRE_BUFSIZE];
int ret;
buff = kmalloc(bufsz, GFP_KERNEL);
@@ -726,8 +552,8 @@ static ssize_t iwl_dbgfs_fw_ver_read(struct file *file, char __user *user_buf,
pos = buff;
endpos = pos + bufsz;
- pos += scnprintf(pos, endpos - pos, "FW prefix: %s\n",
- iwl_drv_get_fwname_pre(mvm->trans, _fw_name_pre));
+ pos += scnprintf(pos, endpos - pos, "FW id: %s\n",
+ mvm->fwrt.fw->fw_version);
pos += scnprintf(pos, endpos - pos, "FW: %s\n",
mvm->fwrt.fw->human_readable);
pos += scnprintf(pos, endpos - pos, "Device: %s\n",
@@ -837,14 +663,14 @@ static ssize_t iwl_dbgfs_tas_get_status_read(struct file *file,
le16_to_cpu(rsp->curr_mcc));
pos += scnprintf(pos, endpos - pos, "Block list entries:");
- for (i = 0; i < APCI_WTAS_BLACK_LIST_MAX; i++)
+ for (i = 0; i < IWL_WTAS_BLACK_LIST_MAX; i++)
pos += scnprintf(pos, endpos - pos, " 0x%x",
le16_to_cpu(rsp->block_list[i]));
pos += scnprintf(pos, endpos - pos, "\nOEM name: %s\n",
- dmi_get_system_info(DMI_SYS_VENDOR));
+ dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>");
pos += scnprintf(pos, endpos - pos, "\tVendor In Approved List: %s\n",
- iwl_mvm_is_vendor_in_approved_list() ? "YES" : "NO");
+ iwl_is_tas_approved() ? "YES" : "NO");
pos += scnprintf(pos, endpos - pos,
"\tDo TAS Support Dual Radio?: %s\n",
rsp->in_dual_radio ? "TRUE" : "FALSE");
@@ -965,6 +791,13 @@ static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file,
char *buf;
int ret;
size_t bufsz;
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(SYSTEM_GROUP,
+ SYSTEM_STATISTICS_CMD),
+ IWL_FW_CMD_VER_UNKNOWN);
+
+ if (cmd_ver != IWL_FW_CMD_VER_UNKNOWN)
+ return -EOPNOTSUPP;
if (iwl_mvm_has_new_rx_stats_api(mvm))
bufsz = ((sizeof(struct mvm_statistics_rx) /
@@ -1144,6 +977,101 @@ static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file,
}
#undef PRINT_STAT_LE32
+static ssize_t iwl_dbgfs_fw_system_stats_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char *buff, *pos, *endpos;
+ int ret;
+ size_t bufsz;
+ int i;
+ struct iwl_mvm_vif *mvmvif;
+ struct ieee80211_vif *vif;
+ struct iwl_mvm *mvm = file->private_data;
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(SYSTEM_GROUP,
+ SYSTEM_STATISTICS_CMD),
+ IWL_FW_CMD_VER_UNKNOWN);
+
+ /* in case of a wrong cmd version, allocate buffer only for error msg */
+ bufsz = (cmd_ver == 1) ? 4096 : 64;
+
+ buff = kzalloc(bufsz, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ pos = buff;
+ endpos = pos + bufsz;
+
+ if (cmd_ver != 1) {
+ pos += scnprintf(pos, endpos - pos,
+ "System stats not supported:%d\n", cmd_ver);
+ goto send_out;
+ }
+
+ mutex_lock(&mvm->mutex);
+ if (iwl_mvm_firmware_running(mvm))
+ iwl_mvm_request_statistics(mvm, false);
+
+ for (i = 0; i < NUM_MAC_INDEX_DRIVER; i++) {
+ vif = iwl_mvm_rcu_dereference_vif_id(mvm, i, false);
+ if (!vif)
+ continue;
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ break;
+ }
+
+ if (i == NUM_MAC_INDEX_DRIVER || !vif) {
+ pos += scnprintf(pos, endpos - pos, "vif is NULL\n");
+ goto release_send_out;
+ }
+
+ mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ if (!mvmvif) {
+ pos += scnprintf(pos, endpos - pos, "mvmvif is NULL\n");
+ goto release_send_out;
+ }
+
+ for_each_mvm_vif_valid_link(mvmvif, i) {
+ struct iwl_mvm_vif_link_info *link_info = mvmvif->link[i];
+
+ pos += scnprintf(pos, endpos - pos,
+ "link_id %d", i);
+ pos += scnprintf(pos, endpos - pos,
+ " num_beacons %d",
+ link_info->beacon_stats.num_beacons);
+ pos += scnprintf(pos, endpos - pos,
+ " accu_num_beacons %d",
+ link_info->beacon_stats.accu_num_beacons);
+ pos += scnprintf(pos, endpos - pos,
+ " avg_signal %d\n",
+ link_info->beacon_stats.avg_signal);
+ }
+
+ pos += scnprintf(pos, endpos - pos,
+ "radio_stats.rx_time %lld\n",
+ mvm->radio_stats.rx_time);
+ pos += scnprintf(pos, endpos - pos,
+ "radio_stats.tx_time %lld\n",
+ mvm->radio_stats.tx_time);
+ pos += scnprintf(pos, endpos - pos,
+ "accu_radio_stats.rx_time %lld\n",
+ mvm->accu_radio_stats.rx_time);
+ pos += scnprintf(pos, endpos - pos,
+ "accu_radio_stats.tx_time %lld\n",
+ mvm->accu_radio_stats.tx_time);
+
+release_send_out:
+ mutex_unlock(&mvm->mutex);
+
+send_out:
+ ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff);
+ kfree(buff);
+
+ return ret;
+}
+
static ssize_t iwl_dbgfs_frame_stats_read(struct iwl_mvm *mvm,
char __user *user_buf, size_t count,
loff_t *ppos,
@@ -1256,6 +1184,8 @@ static ssize_t iwl_dbgfs_fw_nmi_write(struct iwl_mvm *mvm, char *buf,
if (!iwl_mvm_firmware_running(mvm))
return -EIO;
+ IWL_ERR(mvm, "Triggering an NMI from debugfs\n");
+
if (count == 6 && !strcmp(buf, "nolog\n"))
set_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE, &mvm->status);
@@ -1379,7 +1309,7 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm,
/* supporting only MQ RX */
if (!mvm->trans->trans_cfg->mq_rx_supported)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
rxb._page = alloc_pages(GFP_ATOMIC, 0);
if (!rxb._page)
@@ -1477,6 +1407,15 @@ static int _iwl_dbgfs_inject_beacon_ie(struct iwl_mvm *mvm, char *bin, int len)
&beacon_cmd.tim_size,
beacon->data, beacon->len);
+ if (iwl_fw_lookup_cmd_ver(mvm->fw,
+ BEACON_TEMPLATE_CMD, 0) >= 14) {
+ u32 offset = iwl_mvm_find_ie_offset(beacon->data,
+ WLAN_EID_S1G_TWT,
+ beacon->len);
+
+ beacon_cmd.btwt_offset = cpu_to_le32(offset);
+ }
+
iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd,
sizeof(beacon_cmd));
}
@@ -1572,6 +1511,20 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
return count;
}
+static ssize_t iwl_dbgfs_fw_dbg_clear_write(struct iwl_mvm *mvm,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&mvm->mutex);
+ iwl_fw_dbg_clear_monitor_buf(&mvm->fwrt);
+ mutex_unlock(&mvm->mutex);
+
+ return count;
+}
+
static ssize_t iwl_dbgfs_dbg_time_point_write(struct iwl_mvm *mvm,
char *buf, size_t count,
loff_t *ppos)
@@ -1673,7 +1626,7 @@ static ssize_t _iwl_dbgfs_link_sta_##name##_write(struct file *file, \
char buf[buflen] = {}; \
size_t buf_size = min(count, sizeof(buf) - 1); \
\
- if (copy_from_user(buf, user_buf, sizeof(buf))) \
+ if (copy_from_user(buf, user_buf, buf_size)) \
return -EFAULT; \
\
return _iwl_dbgfs_link_sta_wrap_write(iwl_dbgfs_##name##_write, \
@@ -1998,30 +1951,28 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(prph_reg, 64);
/* Device wide debugfs entries */
MVM_DEBUGFS_READ_FILE_OPS(ctdp_budget);
MVM_DEBUGFS_WRITE_FILE_OPS(stop_ctdp, 8);
+MVM_DEBUGFS_WRITE_FILE_OPS(start_ctdp, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(force_ctkill, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush, 16);
-MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(send_echo_cmd, 8);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram, 64);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(set_nic_temperature, 64);
MVM_DEBUGFS_READ_FILE_OPS(nic_temp);
MVM_DEBUGFS_READ_FILE_OPS(stations);
MVM_DEBUGFS_READ_LINK_STA_FILE_OPS(rs_data);
-MVM_DEBUGFS_READ_FILE_OPS(bt_notif);
-MVM_DEBUGFS_READ_FILE_OPS(bt_cmd);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off, 64);
MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats);
MVM_DEBUGFS_READ_FILE_OPS(drv_rx_stats);
+MVM_DEBUGFS_READ_FILE_OPS(fw_system_stats);
MVM_DEBUGFS_READ_FILE_OPS(fw_ver);
MVM_DEBUGFS_READ_FILE_OPS(phy_integration_ver);
MVM_DEBUGFS_READ_FILE_OPS(tas_get_status);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart, 10);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_nmi, 10);
-MVM_DEBUGFS_WRITE_FILE_OPS(bt_tx_prio, 10);
-MVM_DEBUGFS_WRITE_FILE_OPS(bt_force_ant, 10);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 64);
+MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_clear, 64);
MVM_DEBUGFS_WRITE_FILE_OPS(dbg_time_point, 64);
MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl,
(IWL_RSS_INDIRECTION_TABLE_SIZE * 2));
@@ -2204,28 +2155,26 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
spin_lock_init(&mvm->drv_stats_lock);
MVM_DEBUGFS_ADD_FILE(tx_flush, mvm->debugfs_dir, 0200);
- MVM_DEBUGFS_ADD_FILE(sta_drain, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE(set_nic_temperature, mvm->debugfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE(nic_temp, mvm->debugfs_dir, 0400);
MVM_DEBUGFS_ADD_FILE(ctdp_budget, mvm->debugfs_dir, 0400);
MVM_DEBUGFS_ADD_FILE(stop_ctdp, mvm->debugfs_dir, 0200);
+ MVM_DEBUGFS_ADD_FILE(start_ctdp, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(force_ctkill, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(stations, mvm->debugfs_dir, 0400);
- MVM_DEBUGFS_ADD_FILE(bt_notif, mvm->debugfs_dir, 0400);
- MVM_DEBUGFS_ADD_FILE(bt_cmd, mvm->debugfs_dir, 0400);
MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE(fw_ver, mvm->debugfs_dir, 0400);
MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, 0400);
MVM_DEBUGFS_ADD_FILE(drv_rx_stats, mvm->debugfs_dir, 0400);
+ MVM_DEBUGFS_ADD_FILE(fw_system_stats, mvm->debugfs_dir, 0400);
MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(fw_nmi, mvm->debugfs_dir, 0200);
- MVM_DEBUGFS_ADD_FILE(bt_tx_prio, mvm->debugfs_dir, 0200);
- MVM_DEBUGFS_ADD_FILE(bt_force_ant, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(scan_ant_rxchain, mvm->debugfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE(prph_reg, mvm->debugfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, 0200);
+ MVM_DEBUGFS_ADD_FILE(fw_dbg_clear, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(dbg_time_point, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(indirection_tbl, mvm->debugfs_dir, 0200);
@@ -2279,6 +2228,9 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
debugfs_create_file("mem", 0600, mvm->debugfs_dir, mvm,
&iwl_dbgfs_mem_ops);
+ debugfs_create_bool("rx_ts_ptp", 0600, mvm->debugfs_dir,
+ &mvm->rx_ts_ptp);
+
/*
* Create a symlink with mac80211. It will be removed when mac80211
* exists (before the opmode exists which removes the target.)
diff --git a/mvm/debugfs.h b/mvm/debugfs.h
new file mode 100644
index 000000000000..cc2c45b45ddc
--- /dev/null
+++ b/mvm/debugfs.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2023 Intel Corporation
+ * Copyright (C) 2012-2014 Intel Corporation
+ * Copyright (C) 2013-2014 Intel Mobile Communications GmbH
+ */
+#define MVM_DEBUGFS_READ_FILE_OPS(name) \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .read = iwl_dbgfs_##name##_read, \
+ .open = simple_open, \
+ .llseek = generic_file_llseek, \
+}
+
+#define MVM_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \
+static ssize_t _iwl_dbgfs_##name##_write(struct file *file, \
+ const char __user *user_buf, \
+ size_t count, loff_t *ppos) \
+{ \
+ argtype *arg = file->private_data; \
+ char buf[buflen] = {}; \
+ size_t buf_size = min(count, sizeof(buf) - 1); \
+ \
+ if (copy_from_user(buf, user_buf, buf_size)) \
+ return -EFAULT; \
+ \
+ return iwl_dbgfs_##name##_write(arg, buf, buf_size, ppos); \
+} \
+
+#define _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, buflen, argtype) \
+MVM_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .write = _iwl_dbgfs_##name##_write, \
+ .read = iwl_dbgfs_##name##_read, \
+ .open = simple_open, \
+ .llseek = generic_file_llseek, \
+};
+
+#define _MVM_DEBUGFS_WRITE_FILE_OPS(name, buflen, argtype) \
+MVM_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \
+static const struct file_operations iwl_dbgfs_##name##_ops = { \
+ .write = _iwl_dbgfs_##name##_write, \
+ .open = simple_open, \
+ .llseek = generic_file_llseek, \
+};
diff --git a/mvm/ftm-initiator.c b/mvm/ftm-initiator.c
index 233ae81884a0..afd90a52d4ec 100644
--- a/mvm/ftm-initiator.c
+++ b/mvm/ftm-initiator.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2022 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/etherdevice.h>
#include <linux/math64.h>
@@ -40,6 +40,12 @@ struct iwl_mvm_ftm_pasn_entry {
u32 flags;
};
+struct iwl_mvm_ftm_iter_data {
+ u8 *cipher;
+ u8 *bssid;
+ u8 *tk;
+};
+
int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
u8 *addr, u32 cipher, u8 *tk, u32 tk_len,
u8 *hltk, u32 hltk_len)
@@ -53,6 +59,8 @@ int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (!pasn)
return -ENOBUFS;
+ iwl_mvm_ftm_remove_pasn_sta(mvm, addr);
+
pasn->cipher = iwl_mvm_cipher_to_location_cipher(cipher);
switch (pasn->cipher) {
@@ -429,47 +437,55 @@ iwl_mvm_ftm_put_target_v2(struct iwl_mvm *mvm,
return 0;
}
-#define FTM_PUT_FLAG(flag) (target->initiator_ap_flags |= \
+#define FTM_SET_FLAG(flag) (*flags |= \
cpu_to_le32(IWL_INITIATOR_AP_FLAGS_##flag))
static void
-iwl_mvm_ftm_put_target_common(struct iwl_mvm *mvm,
- struct cfg80211_pmsr_request_peer *peer,
- struct iwl_tof_range_req_ap_entry_v6 *target)
+iwl_mvm_ftm_set_target_flags(struct iwl_mvm *mvm,
+ struct cfg80211_pmsr_request_peer *peer,
+ __le32 *flags)
{
- memcpy(target->bssid, peer->addr, ETH_ALEN);
- target->burst_period =
- cpu_to_le16(peer->ftm.burst_period);
- target->samples_per_burst = peer->ftm.ftms_per_burst;
- target->num_of_bursts = peer->ftm.num_bursts_exp;
- target->ftmr_max_retries = peer->ftm.ftmr_retries;
- target->initiator_ap_flags = cpu_to_le32(0);
+ *flags = cpu_to_le32(0);
if (peer->ftm.asap)
- FTM_PUT_FLAG(ASAP);
+ FTM_SET_FLAG(ASAP);
if (peer->ftm.request_lci)
- FTM_PUT_FLAG(LCI_REQUEST);
+ FTM_SET_FLAG(LCI_REQUEST);
if (peer->ftm.request_civicloc)
- FTM_PUT_FLAG(CIVIC_REQUEST);
+ FTM_SET_FLAG(CIVIC_REQUEST);
if (IWL_MVM_FTM_INITIATOR_DYNACK)
- FTM_PUT_FLAG(DYN_ACK);
+ FTM_SET_FLAG(DYN_ACK);
if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_LINEAR_REG)
- FTM_PUT_FLAG(ALGO_LR);
+ FTM_SET_FLAG(ALGO_LR);
else if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_FFT)
- FTM_PUT_FLAG(ALGO_FFT);
+ FTM_SET_FLAG(ALGO_FFT);
if (peer->ftm.trigger_based)
- FTM_PUT_FLAG(TB);
+ FTM_SET_FLAG(TB);
else if (peer->ftm.non_trigger_based)
- FTM_PUT_FLAG(NON_TB);
+ FTM_SET_FLAG(NON_TB);
if ((peer->ftm.trigger_based || peer->ftm.non_trigger_based) &&
peer->ftm.lmr_feedback)
- FTM_PUT_FLAG(LMR_FEEDBACK);
+ FTM_SET_FLAG(LMR_FEEDBACK);
+}
+
+static void
+iwl_mvm_ftm_put_target_common(struct iwl_mvm *mvm,
+ struct cfg80211_pmsr_request_peer *peer,
+ struct iwl_tof_range_req_ap_entry_v6 *target)
+{
+ memcpy(target->bssid, peer->addr, ETH_ALEN);
+ target->burst_period =
+ cpu_to_le16(peer->ftm.burst_period);
+ target->samples_per_burst = peer->ftm.ftms_per_burst;
+ target->num_of_bursts = peer->ftm.num_bursts_exp;
+ target->ftmr_max_retries = peer->ftm.ftmr_retries;
+ iwl_mvm_ftm_set_target_flags(mvm, peer, &target->initiator_ap_flags);
}
static int
@@ -512,21 +528,10 @@ iwl_mvm_ftm_put_target_v4(struct iwl_mvm *mvm,
return 0;
}
-static int
-iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct cfg80211_pmsr_request_peer *peer,
- struct iwl_tof_range_req_ap_entry_v6 *target)
+static int iwl_mvm_ftm_set_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request_peer *peer,
+ u8 *sta_id, __le32 *flags)
{
- int ret;
-
- ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num,
- &target->format_bw,
- &target->ctrl_ch_position);
- if (ret)
- return ret;
-
- iwl_mvm_ftm_put_target_common(mvm, peer, target);
-
if (vif->cfg.assoc) {
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct ieee80211_sta *sta;
@@ -538,8 +543,8 @@ iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (memcmp(peer->addr, link_conf->bssid, ETH_ALEN))
continue;
- target->sta_id = mvmvif->link[link_id]->ap_sta_id;
- sta = rcu_dereference(mvm->fw_id_to_mac_id[target->sta_id]);
+ *sta_id = mvmvif->link[link_id]->ap_sta_id;
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[*sta_id]);
if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
rcu_read_unlock();
return PTR_ERR_OR_ZERO(sta);
@@ -547,14 +552,42 @@ iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (sta->mfp && (peer->ftm.trigger_based ||
peer->ftm.non_trigger_based))
- FTM_PUT_FLAG(PMF);
+ FTM_SET_FLAG(PMF);
break;
}
rcu_read_unlock();
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (mvmvif->ftm_unprotected) {
+ *sta_id = IWL_MVM_INVALID_STA;
+ *flags &= ~cpu_to_le32(IWL_INITIATOR_AP_FLAGS_PMF);
+ }
+#endif
} else {
- target->sta_id = IWL_MVM_INVALID_STA;
+ *sta_id = IWL_MVM_INVALID_STA;
}
+ return 0;
+}
+
+static int
+iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request_peer *peer,
+ struct iwl_tof_range_req_ap_entry_v6 *target)
+{
+ int ret;
+
+ ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num,
+ &target->format_bw,
+ &target->ctrl_ch_position);
+ if (ret)
+ return ret;
+
+ iwl_mvm_ftm_put_target_common(mvm, peer, target);
+
+ iwl_mvm_ftm_set_sta(mvm, vif, peer, &target->sta_id,
+ &target->initiator_ap_flags);
+
/*
* TODO: Beacon interval is currently unknown, so use the common value
* of 100 TUs.
@@ -692,57 +725,62 @@ static void iter(struct ieee80211_hw *hw,
struct ieee80211_key_conf *key,
void *data)
{
- struct iwl_tof_range_req_ap_entry_v6 *target = data;
+ struct iwl_mvm_ftm_iter_data *target = data;
if (!sta || memcmp(sta->addr, target->bssid, ETH_ALEN))
return;
WARN_ON(!sta->mfp);
- if (WARN_ON(key->keylen > sizeof(target->tk)))
- return;
-
- memcpy(target->tk, key->key, key->keylen);
- target->cipher = iwl_mvm_cipher_to_location_cipher(key->cipher);
- WARN_ON(target->cipher == IWL_LOCATION_CIPHER_INVALID);
+ target->tk = key->key;
+ *target->cipher = iwl_mvm_cipher_to_location_cipher(key->cipher);
+ WARN_ON(*target->cipher == IWL_LOCATION_CIPHER_INVALID);
}
static void
iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct iwl_tof_range_req_ap_entry_v7 *target)
+ u8 *bssid, u8 *cipher, u8 *hltk, u8 *tk,
+ u8 *rx_pn, u8 *tx_pn, __le32 *flags)
{
struct iwl_mvm_ftm_pasn_entry *entry;
- u32 flags = le32_to_cpu(target->initiator_ap_flags);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (mvmvif->ftm_unprotected)
+ return;
+#endif
- if (!(flags & (IWL_INITIATOR_AP_FLAGS_NON_TB |
+ if (!(le32_to_cpu(*flags) & (IWL_INITIATOR_AP_FLAGS_NON_TB |
IWL_INITIATOR_AP_FLAGS_TB)))
return;
lockdep_assert_held(&mvm->mutex);
list_for_each_entry(entry, &mvm->ftm_initiator.pasn_list, list) {
- if (memcmp(entry->addr, target->bssid, sizeof(entry->addr)))
+ if (memcmp(entry->addr, bssid, sizeof(entry->addr)))
continue;
- target->cipher = entry->cipher;
+ *cipher = entry->cipher;
if (entry->flags & IWL_MVM_PASN_FLAG_HAS_HLTK)
- memcpy(target->hltk, entry->hltk, sizeof(target->hltk));
+ memcpy(hltk, entry->hltk, sizeof(entry->hltk));
else
- memset(target->hltk, 0, sizeof(target->hltk));
+ memset(hltk, 0, sizeof(entry->hltk));
if (vif->cfg.assoc &&
- !memcmp(vif->bss_conf.bssid, target->bssid,
- sizeof(target->bssid)))
- ieee80211_iter_keys(mvm->hw, vif, iter, target);
- else
- memcpy(target->tk, entry->tk, sizeof(target->tk));
+ !memcmp(vif->bss_conf.bssid, bssid, ETH_ALEN)) {
+ struct iwl_mvm_ftm_iter_data target;
+
+ target.bssid = bssid;
+ ieee80211_iter_keys(mvm->hw, vif, iter, &target);
+ } else {
+ memcpy(tk, entry->tk, sizeof(entry->tk));
+ }
- memcpy(target->rx_pn, entry->rx_pn, sizeof(target->rx_pn));
- memcpy(target->tx_pn, entry->tx_pn, sizeof(target->tx_pn));
+ memcpy(rx_pn, entry->rx_pn, sizeof(entry->rx_pn));
+ memcpy(tx_pn, entry->tx_pn, sizeof(entry->tx_pn));
- target->initiator_ap_flags |=
- cpu_to_le32(IWL_INITIATOR_AP_FLAGS_SECURED);
+ FTM_SET_FLAG(SECURED);
return;
}
}
@@ -756,7 +794,11 @@ iwl_mvm_ftm_put_target_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (err)
return err;
- iwl_mvm_ftm_set_secured_ranging(mvm, vif, target);
+ iwl_mvm_ftm_set_secured_ranging(mvm, vif, target->bssid,
+ &target->cipher, target->hltk,
+ target->tk, target->rx_pn,
+ target->tx_pn,
+ &target->initiator_ap_flags);
return err;
}
@@ -821,9 +863,10 @@ iwl_mvm_ftm_put_target_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* If secure LTF is turned off, replace the flag with PMF only
*/
flags = le32_to_cpu(target->initiator_ap_flags);
- if ((flags & IWL_INITIATOR_AP_FLAGS_SECURED) &&
- !IWL_MVM_FTM_INITIATOR_SECURE_LTF) {
- flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED;
+ if (flags & IWL_INITIATOR_AP_FLAGS_SECURED) {
+ if (!IWL_MVM_FTM_INITIATOR_SECURE_LTF)
+ flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED;
+
flags |= IWL_INITIATOR_AP_FLAGS_PMF;
target->initiator_ap_flags = cpu_to_le32(flags);
}
@@ -902,6 +945,105 @@ static int iwl_mvm_ftm_start_v13(struct iwl_mvm *mvm,
return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
}
+static int
+iwl_mvm_ftm_put_target_v10(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request_peer *peer,
+ struct iwl_tof_range_req_ap_entry_v10 *target)
+{
+ u32 i2r_max_sts, flags;
+ int ret;
+
+ ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num,
+ &target->format_bw,
+ &target->ctrl_ch_position);
+ if (ret)
+ return ret;
+
+ memcpy(target->bssid, peer->addr, ETH_ALEN);
+ target->burst_period =
+ cpu_to_le16(peer->ftm.burst_period);
+ target->samples_per_burst = peer->ftm.ftms_per_burst;
+ target->num_of_bursts = peer->ftm.num_bursts_exp;
+ iwl_mvm_ftm_set_target_flags(mvm, peer, &target->initiator_ap_flags);
+ iwl_mvm_ftm_set_sta(mvm, vif, peer, &target->sta_id,
+ &target->initiator_ap_flags);
+ iwl_mvm_ftm_set_secured_ranging(mvm, vif, target->bssid,
+ &target->cipher, target->hltk,
+ target->tk, target->rx_pn,
+ target->tx_pn,
+ &target->initiator_ap_flags);
+
+ i2r_max_sts = IWL_MVM_FTM_I2R_MAX_STS > 1 ? 1 :
+ IWL_MVM_FTM_I2R_MAX_STS;
+
+ target->r2i_ndp_params = IWL_MVM_FTM_R2I_MAX_REP |
+ (IWL_MVM_FTM_R2I_MAX_STS << IWL_LOCATION_MAX_STS_POS) |
+ (IWL_MVM_FTM_R2I_MAX_TOTAL_LTF << IWL_LOCATION_TOTAL_LTF_POS);
+ target->i2r_ndp_params = IWL_MVM_FTM_I2R_MAX_REP |
+ (i2r_max_sts << IWL_LOCATION_MAX_STS_POS) |
+ (IWL_MVM_FTM_I2R_MAX_TOTAL_LTF << IWL_LOCATION_TOTAL_LTF_POS);
+
+ if (peer->ftm.non_trigger_based) {
+ target->min_time_between_msr =
+ cpu_to_le16(IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR);
+ target->burst_period =
+ cpu_to_le16(IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR);
+ } else {
+ target->min_time_between_msr = cpu_to_le16(0);
+ }
+
+ target->band =
+ iwl_mvm_phy_band_from_nl80211(peer->chandef.chan->band);
+
+ /*
+ * TODO: Beacon interval is currently unknown, so use the common value
+ * of 100 TUs.
+ */
+ target->beacon_interval = cpu_to_le16(100);
+
+ /*
+ * If secure LTF is turned off, replace the flag with PMF only
+ */
+ flags = le32_to_cpu(target->initiator_ap_flags);
+ if (flags & IWL_INITIATOR_AP_FLAGS_SECURED) {
+ if (!IWL_MVM_FTM_INITIATOR_SECURE_LTF)
+ flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED;
+
+ flags |= IWL_INITIATOR_AP_FLAGS_PMF;
+ target->initiator_ap_flags = cpu_to_le32(flags);
+ }
+
+ return 0;
+}
+
+static int iwl_mvm_ftm_start_v14(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *req)
+{
+ struct iwl_tof_range_req_cmd_v14 cmd;
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
+ .dataflags[0] = IWL_HCMD_DFL_DUP,
+ .data[0] = &cmd,
+ .len[0] = sizeof(cmd),
+ };
+ u8 i;
+ int err;
+
+ iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req);
+
+ for (i = 0; i < cmd.num_of_ap; i++) {
+ struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
+ struct iwl_tof_range_req_ap_entry_v10 *target = &cmd.ap[i];
+
+ err = iwl_mvm_ftm_put_target_v10(mvm, vif, peer, target);
+ if (err)
+ return err;
+ }
+
+ return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
+}
+
int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct cfg80211_pmsr_request *req)
{
@@ -920,6 +1062,9 @@ int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
IWL_FW_CMD_VER_UNKNOWN);
switch (cmd_ver) {
+ case 14:
+ err = iwl_mvm_ftm_start_v14(mvm, vif, req);
+ break;
case 13:
err = iwl_mvm_ftm_start_v13(mvm, vif, req);
break;
diff --git a/mvm/ftm-responder.c b/mvm/ftm-responder.c
index b49781d1a07a..e4caa362f597 100644
--- a/mvm/ftm-responder.c
+++ b/mvm/ftm-responder.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2022 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <net/cfg80211.h>
#include <linux/etherdevice.h>
@@ -12,6 +12,9 @@ struct iwl_mvm_pasn_sta {
struct list_head list;
struct iwl_mvm_int_sta int_sta;
u8 addr[ETH_ALEN];
+
+ /* must be last as it followed by buffer holding the key */
+ struct ieee80211_key_conf keyconf;
};
struct iwl_mvm_pasn_hltk_data {
@@ -39,7 +42,7 @@ static int iwl_mvm_ftm_responder_set_bw_v1(struct cfg80211_chan_def *chandef,
*ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef);
break;
default:
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
return 0;
@@ -77,7 +80,7 @@ static int iwl_mvm_ftm_responder_set_bw_v2(struct cfg80211_chan_def *chandef,
}
fallthrough;
default:
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
return 0;
@@ -85,7 +88,7 @@ static int iwl_mvm_ftm_responder_set_bw_v2(struct cfg80211_chan_def *chandef,
static void
iwl_mvm_ftm_responder_set_ndp(struct iwl_mvm *mvm,
- struct iwl_tof_responder_config_cmd_v9 *cmd)
+ struct iwl_tof_responder_config_cmd *cmd)
{
/* Up to 2 R2I STS are allowed on the responder */
u32 r2i_max_sts = IWL_MVM_FTM_R2I_MAX_STS < 2 ?
@@ -114,7 +117,7 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
* field interpretation is different), so the same struct can be use
* for all cases.
*/
- struct iwl_tof_responder_config_cmd_v9 cmd = {
+ struct iwl_tof_responder_config_cmd cmd = {
.channel_num = chandef->chan->hw_value,
.cmd_valid_fields =
cpu_to_le32(IWL_TOF_RESPONDER_CMD_VALID_CHAN_INFO |
@@ -128,8 +131,13 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
+ if (cmd_ver == 10) {
+ cmd.band =
+ iwl_mvm_phy_band_from_nl80211(chandef->chan->band);
+ }
+
/* Use a default of bss_color=1 for now */
- if (cmd_ver == 9) {
+ if (cmd_ver >= 9) {
cmd.cmd_valid_fields |=
cpu_to_le32(IWL_TOF_RESPONDER_CMD_VALID_BSS_COLOR |
IWL_TOF_RESPONDER_CMD_VALID_MIN_MAX_TIME_BETWEEN_MSR);
@@ -145,7 +153,7 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
}
if (cmd_ver >= 8)
- iwl_mvm_ftm_responder_set_ndp(mvm, &cmd);
+ iwl_mvm_ftm_responder_set_ndp(mvm, (void *)&cmd);
if (cmd_ver >= 7)
err = iwl_mvm_ftm_responder_set_bw_v2(chandef, &cmd.format_bw,
@@ -291,7 +299,7 @@ iwl_mvm_ftm_responder_dyn_cfg_cmd(struct iwl_mvm *mvm,
default:
IWL_ERR(mvm, "Unsupported DYN_CONFIG_CMD version %u\n",
cmd_ver);
- ret = -ENOTSUPP;
+ ret = -EOPNOTSUPP;
}
return ret;
@@ -302,7 +310,16 @@ static void iwl_mvm_resp_del_pasn_sta(struct iwl_mvm *mvm,
struct iwl_mvm_pasn_sta *sta)
{
list_del(&sta->list);
- iwl_mvm_rm_sta_id(mvm, vif, sta->int_sta.sta_id);
+
+ if (sta->keyconf.keylen)
+ iwl_mvm_sec_key_del_pasn(mvm, vif, BIT(sta->int_sta.sta_id),
+ &sta->keyconf);
+
+ if (iwl_mvm_has_mld_api(mvm->fw))
+ iwl_mvm_mld_rm_sta_id(mvm, sta->int_sta.sta_id);
+ else
+ iwl_mvm_rm_sta_id(mvm, vif, sta->int_sta.sta_id);
+
iwl_mvm_dealloc_int_sta(mvm, &sta->int_sta);
kfree(sta);
}
@@ -328,7 +345,7 @@ int iwl_mvm_ftm_respoder_add_pasn_sta(struct iwl_mvm *mvm,
if (cmd_ver < 3) {
IWL_ERR(mvm, "Adding PASN station not supported by FW\n");
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
if ((!hltk || !hltk_len) && (!tk || !tk_len)) {
@@ -337,6 +354,12 @@ int iwl_mvm_ftm_respoder_add_pasn_sta(struct iwl_mvm *mvm,
}
if (hltk && hltk_len) {
+ if (!fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SECURE_LTF_SUPPORT)) {
+ IWL_ERR(mvm, "No support for secure LTF measurement\n");
+ return -EINVAL;
+ }
+
hltk_data.cipher = iwl_mvm_cipher_to_location_cipher(cipher);
if (hltk_data.cipher == IWL_LOCATION_CIPHER_INVALID) {
IWL_ERR(mvm, "invalid cipher: %u\n", cipher);
@@ -347,12 +370,12 @@ int iwl_mvm_ftm_respoder_add_pasn_sta(struct iwl_mvm *mvm,
}
if (tk && tk_len) {
- sta = kzalloc(sizeof(*sta), GFP_KERNEL);
+ sta = kzalloc(sizeof(*sta) + tk_len, GFP_KERNEL);
if (!sta)
return -ENOBUFS;
ret = iwl_mvm_add_pasn_sta(mvm, vif, &sta->int_sta, addr,
- cipher, tk, tk_len);
+ cipher, tk, tk_len, &sta->keyconf);
if (ret) {
kfree(sta);
return ret;
@@ -420,7 +443,7 @@ int iwl_mvm_ftm_start_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
rcu_read_unlock();
phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
- ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx.def,
+ ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx.def, &ctx.ap,
ctx.rx_chains_static,
ctx.rx_chains_dynamic);
if (ret)
diff --git a/mvm/fw.c b/mvm/fw.c
index 1f5db65a088d..08c4898c8f1a 100644
--- a/mvm/fw.c
+++ b/mvm/fw.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -15,6 +15,8 @@
#include "iwl-prph.h"
#include "fw/acpi.h"
#include "fw/pnvm.h"
+#include "fw/uefi.h"
+#include "fw/regulatory.h"
#include "mvm.h"
#include "fw/dbg.h"
@@ -23,12 +25,9 @@
#include "iwl-nvm-parse.h"
#include "time-sync.h"
-#define MVM_UCODE_ALIVE_TIMEOUT (HZ)
+#define MVM_UCODE_ALIVE_TIMEOUT (2 * HZ)
#define MVM_UCODE_CALIB_TIMEOUT (2 * HZ)
-#define IWL_TAS_US_MCC 0x5553
-#define IWL_TAS_CANADA_MCC 0x4341
-
struct iwl_mvm_alive_data {
bool valid;
u32 scd_base_addr;
@@ -92,20 +91,10 @@ void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_mfu_assert_dump_notif *mfu_dump_notif = (void *)pkt->data;
- __le32 *dump_data = mfu_dump_notif->data;
- int n_words = le32_to_cpu(mfu_dump_notif->data_size) / sizeof(__le32);
- int i;
if (mfu_dump_notif->index_num == 0)
IWL_INFO(mvm, "MFUART assert id 0x%x occurred\n",
le32_to_cpu(mfu_dump_notif->assert_id));
-
- for (i = 0; i < n_words; i++)
- IWL_DEBUG_INFO(mvm,
- "MFUART assert dump, dword %u: 0x%08x\n",
- le16_to_cpu(mfu_dump_notif->index_num) *
- n_words + i,
- le32_to_cpu(dump_data[i]));
}
static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
@@ -416,7 +405,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
UREG_LMAC2_CURRENT_PC));
}
- if (ret == -ETIMEDOUT && !mvm->pldr_sync)
+ if (ret == -ETIMEDOUT && !mvm->fw_product_reset)
iwl_fw_dbg_error_collect(&mvm->fwrt,
FW_DBG_TRIGGER_ALIVE_TIMEOUT);
@@ -468,12 +457,14 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
#endif
/*
+ * For pre-MLD API (MLD API doesn't use the timestamps):
* All the BSSes in the BSS table include the GP2 in the system
* at the beacon Rx time, this is of course no longer relevant
* since we are resetting the firmware.
* Purge all the BSS table.
*/
- cfg80211_bss_flush(mvm->hw->wiphy);
+ if (!mvm->mld_api_is_used)
+ cfg80211_bss_flush(mvm->hw->wiphy);
return 0;
}
@@ -486,7 +477,47 @@ static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm,
#endif /* CONFIG_ACPI */
}
-#if defined(CONFIG_ACPI) && defined(CONFIG_EFI)
+static void iwl_mvm_uats_init(struct iwl_mvm *mvm)
+{
+ u8 cmd_ver;
+ int ret;
+ struct iwl_host_cmd cmd = {
+ .id = WIDE_ID(REGULATORY_AND_NVM_GROUP,
+ MCC_ALLOWED_AP_TYPE_CMD),
+ .flags = 0,
+ .data[0] = &mvm->fwrt.uats_table,
+ .len[0] = sizeof(mvm->fwrt.uats_table),
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ };
+
+ if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
+ IWL_DEBUG_RADIO(mvm, "UATS feature is not supported\n");
+ return;
+ }
+
+ cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id,
+ IWL_FW_CMD_VER_UNKNOWN);
+ if (cmd_ver != 1) {
+ IWL_DEBUG_RADIO(mvm,
+ "MCC_ALLOWED_AP_TYPE_CMD ver %d not supported\n",
+ cmd_ver);
+ return;
+ }
+
+ ret = iwl_uefi_get_uats_table(mvm->trans, &mvm->fwrt);
+ if (ret < 0) {
+ IWL_DEBUG_FW(mvm, "failed to read UATS table (%d)\n", ret);
+ return;
+ }
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (ret < 0)
+ IWL_ERR(mvm, "failed to send MCC_ALLOWED_AP_TYPE_CMD (%d)\n",
+ ret);
+ else
+ IWL_DEBUG_RADIO(mvm, "MCC_ALLOWED_AP_TYPE_CMD sent to FW\n");
+}
+
static int iwl_mvm_sgom_init(struct iwl_mvm *mvm)
{
u8 cmd_ver;
@@ -520,13 +551,6 @@ static int iwl_mvm_sgom_init(struct iwl_mvm *mvm)
return ret;
}
-#else
-
-static int iwl_mvm_sgom_init(struct iwl_mvm *mvm)
-{
- return 0;
-}
-#endif
static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
{
@@ -583,6 +607,7 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm)
static const u16 init_complete[] = {
INIT_COMPLETE_NOTIF,
};
+ u32 sb_cfg;
int ret;
if (mvm->trans->cfg->tx_with_siso_diversity)
@@ -592,6 +617,14 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm)
mvm->rfkill_safe_init_done = false;
+ if (mvm->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) {
+ sb_cfg = iwl_read_umac_prph(mvm->trans, SB_MODIFY_CFG_FLAG);
+ /* if needed, we'll reset this on our way out later */
+ mvm->fw_product_reset = sb_cfg == SB_CFG_RESIDES_IN_ROM;
+ if (mvm->fw_product_reset && iwl_mei_pldr_req())
+ return -EBUSY;
+ }
+
iwl_init_notification_wait(&mvm->notif_wait,
&init_wait,
init_complete,
@@ -605,11 +638,23 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm)
ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
if (ret) {
IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
+
+ /* if we needed reset then fail here, but notify and remove */
+ if (mvm->fw_product_reset) {
+ iwl_mei_alive_notif(false);
+ iwl_trans_pcie_remove(mvm->trans, true);
+ }
+
goto error;
}
iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE,
NULL);
+ if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ mvm->trans->step_urm = !!(iwl_read_umac_prph(mvm->trans,
+ CNVI_PMU_STEP_FLOW) &
+ CNVI_PMU_STEP_FLOW_FORCE_URM);
+
/* Send init config command to mark that we are sending NVM access
* commands
*/
@@ -634,14 +679,6 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm)
goto error;
}
- if (IWL_MVM_PARSE_NVM && !mvm->nvm_data) {
- ret = iwl_nvm_init(mvm);
- if (ret) {
- IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
- goto error;
- }
- }
-
ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
NVM_ACCESS_COMPLETE),
CMD_SEND_IN_RFKILL,
@@ -666,8 +703,9 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm)
return ret;
/* Read the NVM only at driver load time, no need to do this twice */
- if (!IWL_MVM_PARSE_NVM && !mvm->nvm_data) {
- mvm->nvm_data = iwl_get_nvm(mvm->trans, mvm->fw);
+ if (!mvm->nvm_data) {
+ mvm->nvm_data = iwl_get_nvm(mvm->trans, mvm->fw,
+ mvm->set_tx_ant, mvm->set_rx_ant);
if (IS_ERR(mvm->nvm_data)) {
ret = PTR_ERR(mvm->nvm_data);
mvm->nvm_data = NULL;
@@ -790,7 +828,7 @@ remove_notif:
iwl_remove_notification(&mvm->notif_wait, &calib_wait);
out:
mvm->rfkill_safe_init_done = false;
- if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) {
+ if (!mvm->nvm_data) {
/* we want to debug INIT and we have no NVM - fake */
mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
sizeof(struct ieee80211_channel) +
@@ -802,7 +840,7 @@ out:
mvm->nvm_data->bands[0].n_channels = 1;
mvm->nvm_data->bands[0].n_bitrates = 1;
mvm->nvm_data->bands[0].bitrates =
- (void *)((u8 *)mvm->nvm_data->channels + 1);
+ (void *)(mvm->nvm_data->channels + 1);
mvm->nvm_data->bands[0].bitrates->hw_value = 10;
}
@@ -822,7 +860,6 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
sizeof(cmd), &cmd);
}
-#ifdef CONFIG_ACPI
int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
{
u32 cmd_id = REDUCE_TX_POWER_CMD;
@@ -833,13 +870,15 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
int ret;
u16 len = 0;
u32 n_subbands;
- u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
- IWL_FW_CMD_VER_UNKNOWN);
- if (cmd_ver == 7) {
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 3);
+
+ if (cmd_ver >= 7) {
len = sizeof(cmd.v7);
n_subbands = IWL_NUM_SUB_BANDS_V2;
per_chain = cmd.v7.per_chain[0][0];
cmd.v7.flags = cpu_to_le32(mvm->fwrt.reduced_power_flags);
+ if (cmd_ver == 8)
+ len = sizeof(cmd.v8);
} else if (cmd_ver == 6) {
len = sizeof(cmd.v6);
n_subbands = IWL_NUM_SUB_BANDS_V2;
@@ -863,9 +902,9 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
/* all structs have the same common part, add it */
len += sizeof(cmd.common);
- ret = iwl_sar_select_profile(&mvm->fwrt, per_chain,
- IWL_NUM_CHAIN_TABLES,
- n_subbands, prof_a, prof_b);
+ ret = iwl_sar_fill_profile(&mvm->fwrt, per_chain,
+ IWL_NUM_CHAIN_TABLES,
+ n_subbands, prof_a, prof_b);
/* return on error or if the profile is disabled (positive number) */
if (ret)
@@ -921,7 +960,7 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
resp = (void *)cmd.resp_pkt->data;
ret = le32_to_cpu(resp->profile_idx);
- if (WARN_ON(ret > ACPI_NUM_GEO_PROFILES_REV3))
+ if (WARN_ON(ret > BIOS_GEO_MAX_PROFILE_NUM))
ret = -EIO;
iwl_free_resp(&cmd);
@@ -935,7 +974,7 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
u16 len;
u32 n_bands;
u32 n_profiles;
- u32 sk = 0;
+ __le32 sk = cpu_to_le32(0);
int ret;
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
IWL_FW_CMD_VER_UNKNOWN);
@@ -952,27 +991,35 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
/* the ops field is at the same spot for all versions, so set in v1 */
cmd.v1.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES);
+ /* Only set to South Korea if the table revision is 1 */
+ if (mvm->fwrt.geo_rev == 1)
+ sk = cpu_to_le32(1);
+
if (cmd_ver == 5) {
len = sizeof(cmd.v5);
n_bands = ARRAY_SIZE(cmd.v5.table[0]);
- n_profiles = ACPI_NUM_GEO_PROFILES_REV3;
+ n_profiles = BIOS_GEO_MAX_PROFILE_NUM;
+ cmd.v5.table_revision = sk;
} else if (cmd_ver == 4) {
len = sizeof(cmd.v4);
n_bands = ARRAY_SIZE(cmd.v4.table[0]);
- n_profiles = ACPI_NUM_GEO_PROFILES_REV3;
+ n_profiles = BIOS_GEO_MAX_PROFILE_NUM;
+ cmd.v4.table_revision = sk;
} else if (cmd_ver == 3) {
len = sizeof(cmd.v3);
n_bands = ARRAY_SIZE(cmd.v3.table[0]);
- n_profiles = ACPI_NUM_GEO_PROFILES;
+ n_profiles = BIOS_GEO_MIN_PROFILE_NUM;
+ cmd.v3.table_revision = sk;
} else if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
IWL_UCODE_TLV_API_SAR_TABLE_VER)) {
len = sizeof(cmd.v2);
n_bands = ARRAY_SIZE(cmd.v2.table[0]);
- n_profiles = ACPI_NUM_GEO_PROFILES;
+ n_profiles = BIOS_GEO_MIN_PROFILE_NUM;
+ cmd.v2.table_revision = sk;
} else {
len = sizeof(cmd.v1);
n_bands = ARRAY_SIZE(cmd.v1.table[0]);
- n_profiles = ACPI_NUM_GEO_PROFILES;
+ n_profiles = BIOS_GEO_MIN_PROFILE_NUM;
}
BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v1, table) !=
@@ -984,8 +1031,8 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, table) !=
offsetof(struct iwl_geo_tx_power_profiles_cmd_v5, table));
/* the table is at the same position for all versions, so set use v1 */
- ret = iwl_sar_geo_init(&mvm->fwrt, &cmd.v1.table[0][0],
- n_bands, n_profiles);
+ ret = iwl_sar_geo_fill_table(&mvm->fwrt, &cmd.v1.table[0][0],
+ n_bands, n_profiles);
/*
* It is a valid scenario to not support SAR, or miss wgds table,
@@ -994,27 +1041,6 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
if (ret)
return 0;
- /* Only set to South Korea if the table revision is 1 */
- if (mvm->fwrt.geo_rev == 1)
- sk = 1;
-
- /*
- * Set the table_revision to South Korea (1) or not (0). The
- * element name is misleading, as it doesn't contain the table
- * revision number, but whether the South Korea variation
- * should be used.
- * This must be done after calling iwl_sar_geo_init().
- */
- if (cmd_ver == 5)
- cmd.v5.table_revision = cpu_to_le32(sk);
- else if (cmd_ver == 4)
- cmd.v4.table_revision = cpu_to_le32(sk);
- else if (cmd_ver == 3)
- cmd.v3.table_revision = cpu_to_le32(sk);
- else if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
- IWL_UCODE_TLV_API_SAR_TABLE_VER))
- cmd.v2.table_revision = cpu_to_le32(sk);
-
return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd);
}
@@ -1023,7 +1049,7 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
union iwl_ppag_table_cmd cmd;
int ret, cmd_size;
- ret = iwl_read_ppag_table(&mvm->fwrt, &cmd, &cmd_size);
+ ret = iwl_fill_ppag_table(&mvm->fwrt, &cmd, &cmd_size);
/* Not supporting PPAG table is a valid scenario */
if (ret < 0)
return 0;
@@ -1042,74 +1068,19 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
{
/* no need to read the table, done in INIT stage */
- if (!(iwl_acpi_is_ppag_approved(&mvm->fwrt)))
+ if (!(iwl_is_ppag_approved(&mvm->fwrt)))
return 0;
return iwl_mvm_ppag_send_cmd(mvm);
}
-static const struct dmi_system_id dmi_tas_approved_list[] = {
- { .ident = "HP",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "HP"),
- },
- },
- { .ident = "SAMSUNG",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
- },
- },
- { .ident = "LENOVO",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- },
- },
- { .ident = "DELL",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- },
- },
- { .ident = "MSFT",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
- },
- },
- { .ident = "Acer",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- },
- },
- { .ident = "ASUS",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- },
- },
- { .ident = "MSI",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International Co., Ltd."),
- },
- },
- { .ident = "Honor",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "HONOR"),
- },
- },
- /* keep last */
- {}
-};
-
-bool iwl_mvm_is_vendor_in_approved_list(void)
-{
- return dmi_check_system(dmi_tas_approved_list);
-}
-
static bool iwl_mvm_add_to_tas_block_list(__le32 *list, __le32 *le_size, unsigned int mcc)
{
int i;
u32 size = le32_to_cpu(*le_size);
/* Verify that there is room for another country */
- if (size >= IWL_TAS_BLOCK_LIST_MAX)
+ if (size >= IWL_WTAS_BLACK_LIST_MAX)
return false;
for (i = 0; i < size; i++) {
@@ -1126,21 +1097,21 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
{
u32 cmd_id = WIDE_ID(REGULATORY_AND_NVM_GROUP, TAS_CONFIG);
int ret;
- union iwl_tas_config_cmd cmd = {};
+ struct iwl_tas_data data = {};
+ struct iwl_tas_config_cmd cmd = {};
int cmd_size, fw_ver;
- BUILD_BUG_ON(ARRAY_SIZE(cmd.v3.block_list_array) <
- APCI_WTAS_BLACK_LIST_MAX);
+ BUILD_BUG_ON(ARRAY_SIZE(data.block_list_array) !=
+ IWL_WTAS_BLACK_LIST_MAX);
+ BUILD_BUG_ON(ARRAY_SIZE(cmd.common.block_list_array) !=
+ IWL_WTAS_BLACK_LIST_MAX);
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TAS_CFG)) {
IWL_DEBUG_RADIO(mvm, "TAS not enabled in FW\n");
return;
}
- fw_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
- IWL_FW_CMD_VER_UNKNOWN);
-
- ret = iwl_acpi_get_tas(&mvm->fwrt, &cmd, fw_ver);
+ ret = iwl_bios_get_tas_table(&mvm->fwrt, &data);
if (ret < 0) {
IWL_DEBUG_RADIO(mvm,
"TAS table invalid or unavailable. (%d)\n",
@@ -1151,16 +1122,16 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
if (ret == 0)
return;
- if (!iwl_mvm_is_vendor_in_approved_list()) {
+ if (!iwl_is_tas_approved()) {
IWL_DEBUG_RADIO(mvm,
"System vendor '%s' is not in the approved list, disabling TAS in US and Canada.\n",
- dmi_get_system_info(DMI_SYS_VENDOR));
- if ((!iwl_mvm_add_to_tas_block_list(cmd.v4.block_list_array,
- &cmd.v4.block_list_size,
- IWL_TAS_US_MCC)) ||
- (!iwl_mvm_add_to_tas_block_list(cmd.v4.block_list_array,
- &cmd.v4.block_list_size,
- IWL_TAS_CANADA_MCC))) {
+ dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>");
+ if ((!iwl_mvm_add_to_tas_block_list(data.block_list_array,
+ &data.block_list_size,
+ IWL_MCC_US)) ||
+ (!iwl_mvm_add_to_tas_block_list(data.block_list_array,
+ &data.block_list_size,
+ IWL_MCC_CANADA))) {
IWL_DEBUG_RADIO(mvm,
"Unable to add US/Canada to TAS block list, disabling TAS\n");
return;
@@ -1168,125 +1139,74 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
} else {
IWL_DEBUG_RADIO(mvm,
"System vendor '%s' is in the approved list.\n",
- dmi_get_system_info(DMI_SYS_VENDOR));
+ dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>");
+ }
+
+ fw_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
+ IWL_FW_CMD_VER_UNKNOWN);
+
+ memcpy(&cmd.common, &data, sizeof(struct iwl_tas_config_cmd_common));
+
+ /* Set v3 or v4 specific parts. will be trunctated for fw_ver < 3 */
+ if (fw_ver == 4) {
+ cmd.v4.override_tas_iec = data.override_tas_iec;
+ cmd.v4.enable_tas_iec = data.enable_tas_iec;
+ cmd.v4.usa_tas_uhb_allowed = data.usa_tas_uhb_allowed;
+ } else {
+ cmd.v3.override_tas_iec = cpu_to_le16(data.override_tas_iec);
+ cmd.v3.enable_tas_iec = cpu_to_le16(data.enable_tas_iec);
}
- /* v4 is the same size as v3, so no need to differentiate here */
- cmd_size = fw_ver < 3 ?
- sizeof(struct iwl_tas_config_cmd_v2) :
- sizeof(struct iwl_tas_config_cmd_v3);
+ cmd_size = sizeof(struct iwl_tas_config_cmd_common);
+ if (fw_ver >= 3)
+ /* v4 is the same size as v3 */
+ cmd_size += sizeof(struct iwl_tas_config_cmd_v3);
ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &cmd);
if (ret < 0)
IWL_DEBUG_RADIO(mvm, "failed to send TAS_CONFIG (%d)\n", ret);
}
-static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm)
+static bool iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm)
{
- u8 value;
- int ret = iwl_acpi_get_dsm_u8(mvm->fwrt.dev, 0, DSM_RFI_FUNC_ENABLE,
- &iwl_rfi_guid, &value);
+ u32 value = 0;
+ /* default behaviour is disabled */
+ bool bios_enable_rfi = false;
+ int ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_RFI_CONFIG, &value);
+
if (ret < 0) {
IWL_DEBUG_RADIO(mvm, "Failed to get DSM RFI, ret=%d\n", ret);
+ return bios_enable_rfi;
+ }
- } else if (value >= DSM_VALUE_RFI_MAX) {
- IWL_DEBUG_RADIO(mvm, "DSM RFI got invalid value, ret=%d\n",
- value);
-
- } else if (value == DSM_VALUE_RFI_ENABLE) {
+ value &= DSM_VALUE_RFI_DISABLE;
+ /* RFI BIOS CONFIG value can be 0 or 3 only.
+ * i.e 0 means DDR and DLVR enabled. 3 means DDR and DLVR disabled.
+ * 1 and 2 are invalid BIOS configurations, So, it's not possible to
+ * disable ddr/dlvr separately.
+ */
+ if (!value) {
IWL_DEBUG_RADIO(mvm, "DSM RFI is evaluated to enable\n");
- return DSM_VALUE_RFI_ENABLE;
+ bios_enable_rfi = true;
+ } else if (value == DSM_VALUE_RFI_DISABLE) {
+ IWL_DEBUG_RADIO(mvm, "DSM RFI is evaluated to disable\n");
+ } else {
+ IWL_DEBUG_RADIO(mvm,
+ "DSM RFI got invalid value, value=%d\n", value);
}
- IWL_DEBUG_RADIO(mvm, "DSM RFI is disabled\n");
-
- /* default behaviour is disabled */
- return DSM_VALUE_RFI_DISABLE;
+ return bios_enable_rfi;
}
static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
{
+ struct iwl_lari_config_change_cmd cmd;
+ size_t cmd_size;
int ret;
- u32 value;
- struct iwl_lari_config_change_cmd_v6 cmd = {};
-
- cmd.config_bitmap = iwl_acpi_get_lari_config_bitmap(&mvm->fwrt);
-
- ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0, DSM_FUNC_11AX_ENABLEMENT,
- &iwl_guid, &value);
- if (!ret)
- cmd.oem_11ax_allow_bitmap = cpu_to_le32(value);
-
- ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
- DSM_FUNC_ENABLE_UNII4_CHAN,
- &iwl_guid, &value);
- if (!ret)
- cmd.oem_unii4_allow_bitmap = cpu_to_le32(value);
-
- ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
- DSM_FUNC_ACTIVATE_CHANNEL,
- &iwl_guid, &value);
- if (!ret)
- cmd.chan_state_active_bitmap = cpu_to_le32(value);
-
- ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
- DSM_FUNC_ENABLE_6E,
- &iwl_guid, &value);
- if (!ret)
- cmd.oem_uhb_allow_bitmap = cpu_to_le32(value);
-
- ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
- DSM_FUNC_FORCE_DISABLE_CHANNELS,
- &iwl_guid, &value);
- if (!ret)
- cmd.force_disable_channels_bitmap = cpu_to_le32(value);
-
- if (cmd.config_bitmap ||
- cmd.oem_uhb_allow_bitmap ||
- cmd.oem_11ax_allow_bitmap ||
- cmd.oem_unii4_allow_bitmap ||
- cmd.chan_state_active_bitmap ||
- cmd.force_disable_channels_bitmap) {
- size_t cmd_size;
- u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
- WIDE_ID(REGULATORY_AND_NVM_GROUP,
- LARI_CONFIG_CHANGE),
- 1);
- switch (cmd_ver) {
- case 6:
- cmd_size = sizeof(struct iwl_lari_config_change_cmd_v6);
- break;
- case 5:
- cmd_size = sizeof(struct iwl_lari_config_change_cmd_v5);
- break;
- case 4:
- cmd_size = sizeof(struct iwl_lari_config_change_cmd_v4);
- break;
- case 3:
- cmd_size = sizeof(struct iwl_lari_config_change_cmd_v3);
- break;
- case 2:
- cmd_size = sizeof(struct iwl_lari_config_change_cmd_v2);
- break;
- default:
- cmd_size = sizeof(struct iwl_lari_config_change_cmd_v1);
- break;
- }
- IWL_DEBUG_RADIO(mvm,
- "sending LARI_CONFIG_CHANGE, config_bitmap=0x%x, oem_11ax_allow_bitmap=0x%x\n",
- le32_to_cpu(cmd.config_bitmap),
- le32_to_cpu(cmd.oem_11ax_allow_bitmap));
- IWL_DEBUG_RADIO(mvm,
- "sending LARI_CONFIG_CHANGE, oem_unii4_allow_bitmap=0x%x, chan_state_active_bitmap=0x%x, cmd_ver=%d\n",
- le32_to_cpu(cmd.oem_unii4_allow_bitmap),
- le32_to_cpu(cmd.chan_state_active_bitmap),
- cmd_ver);
- IWL_DEBUG_RADIO(mvm,
- "sending LARI_CONFIG_CHANGE, oem_uhb_allow_bitmap=0x%x, force_disable_channels_bitmap=0x%x\n",
- le32_to_cpu(cmd.oem_uhb_allow_bitmap),
- le32_to_cpu(cmd.force_disable_channels_bitmap));
+ ret = iwl_fill_lari_config(&mvm->fwrt, &cmd, &cmd_size);
+ if (!ret) {
ret = iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(REGULATORY_AND_NVM_GROUP,
LARI_CONFIG_CHANGE),
@@ -1298,12 +1218,14 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
}
}
-void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
+void iwl_mvm_get_bios_tables(struct iwl_mvm *mvm)
{
int ret;
+ iwl_acpi_get_guid_lock_status(&mvm->fwrt);
+
/* read PPAG table */
- ret = iwl_acpi_get_ppag_table(&mvm->fwrt);
+ ret = iwl_bios_get_ppag_table(&mvm->fwrt);
if (ret < 0) {
IWL_DEBUG_RADIO(mvm,
"PPAG BIOS table invalid or unavailable. (%d)\n",
@@ -1311,7 +1233,7 @@ void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
}
/* read SAR tables */
- ret = iwl_sar_get_wrds_table(&mvm->fwrt);
+ ret = iwl_bios_get_wrds_table(&mvm->fwrt);
if (ret < 0) {
IWL_DEBUG_RADIO(mvm,
"WRDS SAR BIOS table invalid or unavailable. (%d)\n",
@@ -1320,7 +1242,7 @@ void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
* If not available, don't fail and don't bother with EWRD and
* WGDS */
- if (!iwl_sar_get_wgds_table(&mvm->fwrt)) {
+ if (!iwl_bios_get_wgds_table(&mvm->fwrt)) {
/*
* If basic SAR is not available, we check for WGDS,
* which should *not* be available either. If it is
@@ -1331,7 +1253,7 @@ void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
}
} else {
- ret = iwl_sar_get_ewrd_table(&mvm->fwrt);
+ ret = iwl_bios_get_ewrd_table(&mvm->fwrt);
/* if EWRD is not available, we can still use
* WRDS, so don't fail */
if (ret < 0)
@@ -1341,7 +1263,7 @@ void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
/* read geo SAR table */
if (iwl_sar_geo_support(&mvm->fwrt)) {
- ret = iwl_sar_get_wgds_table(&mvm->fwrt);
+ ret = iwl_bios_get_wgds_table(&mvm->fwrt);
if (ret < 0)
IWL_DEBUG_RADIO(mvm,
"Geo SAR BIOS table invalid or unavailable. (%d)\n",
@@ -1351,59 +1273,18 @@ void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
}
iwl_acpi_get_phy_filters(&mvm->fwrt, &mvm->phy_filters);
-}
-#else /* CONFIG_ACPI */
-inline int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm,
- int prof_a, int prof_b)
-{
- return 1;
-}
-
-inline int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
-{
- return -ENOENT;
+ if (iwl_bios_get_eckv(&mvm->fwrt, &mvm->ext_clock_valid))
+ IWL_DEBUG_RADIO(mvm, "ECKV table doesn't exist in BIOS\n");
}
-static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
+static void iwl_mvm_disconnect_iterator(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
{
- return 0;
+ if (vif->type == NL80211_IFTYPE_STATION)
+ ieee80211_hw_restart_disconnect(vif);
}
-int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
-{
- return -ENOENT;
-}
-
-static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
-{
- return 0;
-}
-
-static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
-{
-}
-
-static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
-{
-}
-
-bool iwl_mvm_is_vendor_in_approved_list(void)
-{
- return false;
-}
-
-static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm)
-{
- return DSM_VALUE_RFI_DISABLE;
-}
-
-void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
-{
-}
-
-#endif /* CONFIG_ACPI */
-
void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags)
{
u32 error_log_size = mvm->fw->ucode_capa.error_log_size;
@@ -1448,10 +1329,15 @@ void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags)
/* skb respond is only relevant in ERROR_RECOVERY_UPDATE_DB */
if (flags & ERROR_RECOVERY_UPDATE_DB) {
resp = le32_to_cpu(*(__le32 *)host_cmd.resp_pkt->data);
- if (resp)
+ if (resp) {
IWL_ERR(mvm,
"Failed to send recovery cmd blob was invalid %d\n",
resp);
+
+ ieee80211_iterate_interfaces(mvm->hw, 0,
+ iwl_mvm_disconnect_iterator,
+ mvm);
+ }
}
}
@@ -1471,9 +1357,6 @@ static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
if (ret) {
IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
-
- if (iwlmvm_mod_params.init_dbg)
- return 0;
return ret;
}
@@ -1499,10 +1382,7 @@ static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
int iwl_mvm_up(struct iwl_mvm *mvm)
{
int ret, i;
- struct ieee80211_channel *chan;
- struct cfg80211_chan_def chandef;
struct ieee80211_supported_band *sband = NULL;
- u32 sb_cfg;
lockdep_assert_held(&mvm->mutex);
@@ -1510,23 +1390,19 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
if (ret)
return ret;
- sb_cfg = iwl_read_umac_prph(mvm->trans, SB_MODIFY_CFG_FLAG);
- mvm->pldr_sync = !(sb_cfg & SB_CFG_RESIDES_IN_OTP_MASK);
- if (mvm->pldr_sync && iwl_mei_pldr_req())
- return -EBUSY;
-
ret = iwl_mvm_load_rt_fw(mvm);
if (ret) {
IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
- if (ret != -ERFKILL && !mvm->pldr_sync)
+ if (ret != -ERFKILL && !mvm->fw_product_reset)
iwl_fw_dbg_error_collect(&mvm->fwrt,
FW_DBG_TRIGGER_DRIVER);
goto error;
}
/* FW loaded successfully */
- mvm->pldr_sync = false;
+ mvm->fw_product_reset = false;
+ iwl_fw_disable_dbg_asserts(&mvm->fwrt);
iwl_get_shared_mem_conf(&mvm->fwrt);
ret = iwl_mvm_sf_update(mvm, NULL, false);
@@ -1591,8 +1467,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
for (i = 0; i < IWL_MVM_FW_MAX_LINK_ID + 1; i++)
RCU_INIT_POINTER(mvm->link_id_to_link_conf[i], NULL);
- memset(&mvm->fw_link_ids_map, 0, sizeof(mvm->fw_link_ids_map));
-
mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
/* reset quota debouncing buffer - 0xff will yield invalid data */
@@ -1630,21 +1504,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
goto error;
}
- chan = &sband->channels[0];
-
- cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
- for (i = 0; i < NUM_PHY_CTX; i++) {
- /*
- * The channel used here isn't relevant as it's
- * going to be overwritten in the other flows.
- * For now use the first channel we have.
- */
- ret = iwl_mvm_phy_ctxt_add(mvm, &mvm->phy_ctxts[i],
- &chandef, 1, 1);
- if (ret)
- goto error;
- }
-
if (iwl_mvm_is_tt_in_fw(mvm)) {
/* in order to give the responsibility of ct-kill and
* TX backoff to FW we need to send empty temperature reporting
@@ -1708,9 +1567,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
if (!mvm->ptp_data.ptp_clock)
iwl_mvm_ptp_init(mvm);
- if (iwl_acpi_get_eckv(mvm->dev, &mvm->ext_clock_valid))
- IWL_DEBUG_INFO(mvm, "ECKV table doesn't exist in BIOS\n");
-
ret = iwl_mvm_ppag_init(mvm);
if (ret)
goto error;
@@ -1727,9 +1583,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
iwl_mvm_tas_init(mvm);
iwl_mvm_leds_sync(mvm);
+ iwl_mvm_uats_init(mvm);
if (iwl_rfi_supported(mvm)) {
- if (iwl_mvm_eval_dsm_rfi(mvm) == DSM_VALUE_RFI_ENABLE)
+ if (iwl_mvm_eval_dsm_rfi(mvm))
iwl_rfi_send_config_cmd(mvm, NULL);
}
@@ -1738,8 +1595,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
return 0;
error:
- if (!iwlmvm_mod_params.init_dbg || !ret)
- iwl_mvm_stop_device(mvm);
+ iwl_mvm_stop_device(mvm);
return ret;
}
diff --git a/mvm/link.c b/mvm/link.c
index ace82e2c5bd9..a9929aa49913 100644
--- a/mvm/link.c
+++ b/mvm/link.c
@@ -1,33 +1,65 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2022 - 2023 Intel Corporation
+ * Copyright (C) 2022 - 2024 Intel Corporation
*/
#include "mvm.h"
#include "time-event.h"
-static u32 iwl_mvm_get_free_fw_link_id(struct iwl_mvm *mvm,
- struct iwl_mvm_vif *mvm_vif)
-{
- u32 link_id;
+#define HANDLE_ESR_REASONS(HOW) \
+ HOW(BLOCKED_PREVENTION) \
+ HOW(BLOCKED_WOWLAN) \
+ HOW(BLOCKED_TPT) \
+ HOW(BLOCKED_FW) \
+ HOW(BLOCKED_NON_BSS) \
+ HOW(BLOCKED_ROC) \
+ HOW(EXIT_MISSED_BEACON) \
+ HOW(EXIT_LOW_RSSI) \
+ HOW(EXIT_COEX) \
+ HOW(EXIT_BANDWIDTH) \
+ HOW(EXIT_CSA) \
+ HOW(EXIT_LINK_USAGE)
- lockdep_assert_held(&mvm->mutex);
+static const char *const iwl_mvm_esr_states_names[] = {
+#define NAME_ENTRY(x) [ilog2(IWL_MVM_ESR_##x)] = #x,
+ HANDLE_ESR_REASONS(NAME_ENTRY)
+};
+
+const char *iwl_get_esr_state_string(enum iwl_mvm_esr_state state)
+{
+ int offs = ilog2(state);
- link_id = ffz(mvm->fw_link_ids_map);
+ if (offs >= ARRAY_SIZE(iwl_mvm_esr_states_names) ||
+ !iwl_mvm_esr_states_names[offs])
+ return "UNKNOWN";
- /* this case can happen if there're deactivated but not removed links */
- if (link_id > IWL_MVM_FW_MAX_LINK_ID)
- return IWL_MVM_FW_LINK_ID_INVALID;
+ return iwl_mvm_esr_states_names[offs];
+}
- mvm->fw_link_ids_map |= BIT(link_id);
- return link_id;
+static void iwl_mvm_print_esr_state(struct iwl_mvm *mvm, u32 mask)
+{
+#define NAME_FMT(x) "%s"
+#define NAME_PR(x) (mask & IWL_MVM_ESR_##x) ? "[" #x "]" : "",
+ IWL_DEBUG_INFO(mvm,
+ "EMLSR state = " HANDLE_ESR_REASONS(NAME_FMT)
+ " (0x%x)\n",
+ HANDLE_ESR_REASONS(NAME_PR)
+ mask);
+#undef NAME_FMT
+#undef NAME_PR
}
-static void iwl_mvm_release_fw_link_id(struct iwl_mvm *mvm, u32 link_id)
+static u32 iwl_mvm_get_free_fw_link_id(struct iwl_mvm *mvm,
+ struct iwl_mvm_vif *mvm_vif)
{
+ u32 i;
+
lockdep_assert_held(&mvm->mutex);
- if (!WARN_ON(link_id > IWL_MVM_FW_MAX_LINK_ID))
- mvm->fw_link_ids_map &= ~BIT(link_id);
+ for (i = 0; i < ARRAY_SIZE(mvm->link_id_to_link_conf); i++)
+ if (!rcu_access_pointer(mvm->link_id_to_link_conf[i]))
+ return i;
+
+ return IWL_MVM_FW_LINK_ID_INVALID;
}
static int iwl_mvm_link_cmd_send(struct iwl_mvm *mvm,
@@ -46,28 +78,45 @@ static int iwl_mvm_link_cmd_send(struct iwl_mvm *mvm,
return ret;
}
-int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *link_conf)
+int iwl_mvm_set_link_mapping(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- unsigned int link_id = link_conf->link_id;
- struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id];
- struct iwl_link_config_cmd cmd = {};
- struct iwl_mvm_phy_ctxt *phyctxt;
-
- if (WARN_ON_ONCE(!link_info))
- return -EINVAL;
+ struct iwl_mvm_vif_link_info *link_info =
+ mvmvif->link[link_conf->link_id];
if (link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID) {
link_info->fw_link_id = iwl_mvm_get_free_fw_link_id(mvm,
mvmvif);
- if (link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID)
+ if (link_info->fw_link_id >=
+ ARRAY_SIZE(mvm->link_id_to_link_conf))
return -EINVAL;
rcu_assign_pointer(mvm->link_id_to_link_conf[link_info->fw_link_id],
link_conf);
}
+ return 0;
+}
+
+int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ unsigned int link_id = link_conf->link_id;
+ struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id];
+ struct iwl_link_config_cmd cmd = {};
+ unsigned int cmd_id = WIDE_ID(MAC_CONF_GROUP, LINK_CONFIG_CMD);
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1);
+ int ret;
+
+ if (WARN_ON_ONCE(!link_info))
+ return -EINVAL;
+
+ ret = iwl_mvm_set_link_mapping(mvm, vif, link_conf);
+ if (ret)
+ return ret;
+
/* Update SF - Disable if needed. if this fails, SF might still be on
* while many macs are bound, which is forbidden - so fail the binding.
*/
@@ -77,23 +126,79 @@ int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
cmd.link_id = cpu_to_le32(link_info->fw_link_id);
cmd.mac_id = cpu_to_le32(mvmvif->id);
cmd.spec_link_id = link_conf->link_id;
- /* P2P-Device already has a valid PHY context during add */
- phyctxt = link_info->phy_ctxt;
- if (phyctxt)
- cmd.phy_id = cpu_to_le32(phyctxt->id);
- else
- cmd.phy_id = cpu_to_le32(FW_CTXT_INVALID);
+ WARN_ON_ONCE(link_info->phy_ctxt);
+ cmd.phy_id = cpu_to_le32(FW_CTXT_INVALID);
memcpy(cmd.local_link_addr, link_conf->addr, ETH_ALEN);
if (vif->type == NL80211_IFTYPE_ADHOC && link_conf->bssid)
memcpy(cmd.ibss_bssid_addr, link_conf->bssid, ETH_ALEN);
- cmd.listen_lmac = cpu_to_le32(link_info->listen_lmac);
+ if (cmd_ver < 2)
+ cmd.listen_lmac = cpu_to_le32(link_info->listen_lmac);
return iwl_mvm_link_cmd_send(mvm, &cmd, FW_CTXT_ACTION_ADD);
}
+struct iwl_mvm_esr_iter_data {
+ struct ieee80211_vif *vif;
+ unsigned int link_id;
+ bool lift_block;
+};
+
+static void iwl_mvm_esr_vif_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_esr_iter_data *data = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int link_id;
+
+ if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_STATION)
+ return;
+
+ for_each_mvm_vif_valid_link(mvmvif, link_id) {
+ struct iwl_mvm_vif_link_info *link_info =
+ mvmvif->link[link_id];
+ if (vif == data->vif && link_id == data->link_id)
+ continue;
+ if (link_info->active)
+ data->lift_block = false;
+ }
+}
+
+int iwl_mvm_esr_non_bss_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ unsigned int link_id, bool active)
+{
+ /* An active link of a non-station vif blocks EMLSR. Upon activation
+ * block EMLSR on the bss vif. Upon deactivation, check if this link
+ * was the last non-station link active, and if so unblock the bss vif
+ */
+ struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm);
+ struct iwl_mvm_esr_iter_data data = {
+ .vif = vif,
+ .link_id = link_id,
+ .lift_block = true,
+ };
+
+ if (IS_ERR_OR_NULL(bss_vif))
+ return 0;
+
+ if (active)
+ return iwl_mvm_block_esr_sync(mvm, bss_vif,
+ IWL_MVM_ESR_BLOCKED_NON_BSS);
+
+ ieee80211_iterate_active_interfaces(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_esr_vif_iterator, &data);
+ if (data.lift_block) {
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_unblock_esr(mvm, bss_vif, IWL_MVM_ESR_BLOCKED_NON_BSS);
+ mutex_unlock(&mvm->mutex);
+ }
+
+ return 0;
+}
+
int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf,
u32 changes, bool active)
@@ -105,6 +210,8 @@ int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_link_config_cmd cmd = {};
u32 ht_flag, flags = 0, flags_mask = 0;
int ret;
+ unsigned int cmd_id = WIDE_ID(MAC_CONF_GROUP, LINK_CONFIG_CMD);
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1);
if (WARN_ON_ONCE(!link_info ||
link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID))
@@ -194,11 +301,23 @@ int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
flags_mask |= LINK_FLG_MU_EDCA_CW;
}
- if (link_conf->eht_puncturing && !iwlwifi_mod_params.disable_11be)
- cmd.puncture_mask = cpu_to_le16(link_conf->eht_puncturing);
- else
- /* This flag can be set only if the MAC has eht support */
- changes &= ~LINK_CONTEXT_MODIFY_EHT_PARAMS;
+ if (changes & LINK_CONTEXT_MODIFY_EHT_PARAMS) {
+ struct ieee80211_chanctx_conf *ctx;
+ struct cfg80211_chan_def *def = NULL;
+
+ rcu_read_lock();
+ ctx = rcu_dereference(link_conf->chanctx_conf);
+ if (ctx)
+ def = iwl_mvm_chanctx_def(mvm, ctx);
+
+ if (iwlwifi_mod_params.disable_11be ||
+ !link_conf->eht_support || !def ||
+ iwl_fw_lookup_cmd_ver(mvm->fw, PHY_CONTEXT_CMD, 1) >= 6)
+ changes &= ~LINK_CONTEXT_MODIFY_EHT_PARAMS;
+ else
+ cmd.puncture_mask = cpu_to_le16(def->punctured);
+ rcu_read_unlock();
+ }
cmd.bss_color = link_conf->he_bss_color.color;
@@ -226,7 +345,8 @@ send_cmd:
cmd.flags = cpu_to_le32(flags);
cmd.flags_mask = cpu_to_le32(flags_mask);
cmd.spec_link_id = link_conf->link_id;
- cmd.listen_lmac = cpu_to_le32(link_info->listen_lmac);
+ if (cmd_ver < 2)
+ cmd.listen_lmac = cpu_to_le32(link_info->listen_lmac);
ret = iwl_mvm_link_cmd_send(mvm, &cmd, FW_CTXT_ACTION_MODIFY);
if (!ret && (changes & LINK_CONTEXT_MODIFY_ACTIVE))
@@ -235,6 +355,24 @@ send_cmd:
return ret;
}
+int iwl_mvm_unset_link_mapping(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_vif_link_info *link_info =
+ mvmvif->link[link_conf->link_id];
+
+ /* mac80211 thought we have the link, but it was never configured */
+ if (WARN_ON(!link_info ||
+ link_info->fw_link_id >=
+ ARRAY_SIZE(mvm->link_id_to_link_conf)))
+ return -EINVAL;
+
+ RCU_INIT_POINTER(mvm->link_id_to_link_conf[link_info->fw_link_id],
+ NULL);
+ return 0;
+}
+
int iwl_mvm_remove_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf)
{
@@ -244,16 +382,14 @@ int iwl_mvm_remove_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_link_config_cmd cmd = {};
int ret;
- if (WARN_ON(!link_info ||
- link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID))
- return -EINVAL;
+ ret = iwl_mvm_unset_link_mapping(mvm, vif, link_conf);
+ if (ret)
+ return 0;
- RCU_INIT_POINTER(mvm->link_id_to_link_conf[link_info->fw_link_id],
- NULL);
cmd.link_id = cpu_to_le32(link_info->fw_link_id);
- iwl_mvm_release_fw_link_id(mvm, link_info->fw_link_id);
link_info->fw_link_id = IWL_MVM_FW_LINK_ID_INVALID;
cmd.spec_link_id = link_conf->link_id;
+ cmd.phy_id = cpu_to_le32(FW_CTXT_INVALID);
ret = iwl_mvm_link_cmd_send(mvm, &cmd, FW_CTXT_ACTION_REMOVE);
@@ -283,3 +419,732 @@ int iwl_mvm_disable_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return ret;
}
+
+struct iwl_mvm_rssi_to_grade {
+ s8 rssi[2];
+ u16 grade;
+};
+
+#define RSSI_TO_GRADE_LINE(_lb, _hb_uhb, _grade) \
+ { \
+ .rssi = {_lb, _hb_uhb}, \
+ .grade = _grade \
+ }
+
+/*
+ * This array must be sorted by increasing RSSI for proper functionality.
+ * The grades are actually estimated throughput, represented as fixed-point
+ * with a scale factor of 1/10.
+ */
+static const struct iwl_mvm_rssi_to_grade rssi_to_grade_map[] = {
+ RSSI_TO_GRADE_LINE(-85, -89, 177),
+ RSSI_TO_GRADE_LINE(-83, -86, 344),
+ RSSI_TO_GRADE_LINE(-82, -85, 516),
+ RSSI_TO_GRADE_LINE(-80, -83, 688),
+ RSSI_TO_GRADE_LINE(-77, -79, 1032),
+ RSSI_TO_GRADE_LINE(-73, -76, 1376),
+ RSSI_TO_GRADE_LINE(-70, -74, 1548),
+ RSSI_TO_GRADE_LINE(-69, -72, 1750),
+ RSSI_TO_GRADE_LINE(-65, -68, 2064),
+ RSSI_TO_GRADE_LINE(-61, -66, 2294),
+ RSSI_TO_GRADE_LINE(-58, -61, 2580),
+ RSSI_TO_GRADE_LINE(-55, -58, 2868),
+ RSSI_TO_GRADE_LINE(-46, -55, 3098),
+ RSSI_TO_GRADE_LINE(-43, -54, 3442)
+};
+
+#define MAX_GRADE (rssi_to_grade_map[ARRAY_SIZE(rssi_to_grade_map) - 1].grade)
+
+#define DEFAULT_CHAN_LOAD_LB 30
+#define DEFAULT_CHAN_LOAD_HB 15
+#define DEFAULT_CHAN_LOAD_UHB 0
+
+/* Factors calculation is done with fixed-point with a scaling factor of 1/256 */
+#define SCALE_FACTOR 256
+
+/* Convert a percentage from [0,100] to [0,255] */
+#define NORMALIZE_PERCENT_TO_255(percentage) ((percentage) * SCALE_FACTOR / 100)
+
+static unsigned int
+iwl_mvm_get_puncturing_factor(const struct ieee80211_bss_conf *link_conf)
+{
+ enum nl80211_chan_width chan_width =
+ link_conf->chanreq.oper.width;
+ int mhz = nl80211_chan_width_to_mhz(chan_width);
+ unsigned int n_subchannels, n_punctured, puncturing_penalty;
+
+ if (WARN_ONCE(mhz < 20 || mhz > 320,
+ "Invalid channel width : (%d)\n", mhz))
+ return SCALE_FACTOR;
+
+ /* No puncturing, no penalty */
+ if (mhz < 80)
+ return SCALE_FACTOR;
+
+ /* total number of subchannels */
+ n_subchannels = mhz / 20;
+ /* how many of these are punctured */
+ n_punctured = hweight16(link_conf->chanreq.oper.punctured);
+
+ puncturing_penalty = n_punctured * SCALE_FACTOR / n_subchannels;
+ return SCALE_FACTOR - puncturing_penalty;
+}
+
+static unsigned int
+iwl_mvm_get_chan_load(struct ieee80211_bss_conf *link_conf)
+{
+ struct ieee80211_vif *vif = link_conf->vif;
+ struct iwl_mvm_vif_link_info *mvm_link =
+ iwl_mvm_vif_from_mac80211(link_conf->vif)->link[link_conf->link_id];
+ const struct element *bss_load_elem;
+ const struct ieee80211_bss_load_elem *bss_load;
+ enum nl80211_band band = link_conf->chanreq.oper.chan->band;
+ const struct cfg80211_bss_ies *ies;
+ unsigned int chan_load;
+ u32 chan_load_by_us;
+
+ rcu_read_lock();
+ if (ieee80211_vif_link_active(vif, link_conf->link_id))
+ ies = rcu_dereference(link_conf->bss->beacon_ies);
+ else
+ ies = rcu_dereference(link_conf->bss->ies);
+
+ if (ies)
+ bss_load_elem = cfg80211_find_elem(WLAN_EID_QBSS_LOAD,
+ ies->data, ies->len);
+ else
+ bss_load_elem = NULL;
+
+ /* If there isn't BSS Load element, take the defaults */
+ if (!bss_load_elem ||
+ bss_load_elem->datalen != sizeof(*bss_load)) {
+ rcu_read_unlock();
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ chan_load = DEFAULT_CHAN_LOAD_LB;
+ break;
+ case NL80211_BAND_5GHZ:
+ chan_load = DEFAULT_CHAN_LOAD_HB;
+ break;
+ case NL80211_BAND_6GHZ:
+ chan_load = DEFAULT_CHAN_LOAD_UHB;
+ break;
+ default:
+ chan_load = 0;
+ break;
+ }
+ /* The defaults are given in percentage */
+ return NORMALIZE_PERCENT_TO_255(chan_load);
+ }
+
+ bss_load = (const void *)bss_load_elem->data;
+ /* Channel util is in range 0-255 */
+ chan_load = bss_load->channel_util;
+ rcu_read_unlock();
+
+ if (!mvm_link || !mvm_link->active)
+ return chan_load;
+
+ if (WARN_ONCE(!mvm_link->phy_ctxt,
+ "Active link (%u) without phy ctxt assigned!\n",
+ link_conf->link_id))
+ return chan_load;
+
+ /* channel load by us is given in percentage */
+ chan_load_by_us =
+ NORMALIZE_PERCENT_TO_255(mvm_link->phy_ctxt->channel_load_by_us);
+
+ /* Use only values that firmware sends that can possibly be valid */
+ if (chan_load_by_us <= chan_load)
+ chan_load -= chan_load_by_us;
+
+ return chan_load;
+}
+
+static unsigned int
+iwl_mvm_get_chan_load_factor(struct ieee80211_bss_conf *link_conf)
+{
+ return SCALE_FACTOR - iwl_mvm_get_chan_load(link_conf);
+}
+
+/* This function calculates the grade of a link. Returns 0 in error case */
+VISIBLE_IF_IWLWIFI_KUNIT
+unsigned int iwl_mvm_get_link_grade(struct ieee80211_bss_conf *link_conf)
+{
+ enum nl80211_band band;
+ int i, rssi_idx;
+ s32 link_rssi;
+ unsigned int grade = MAX_GRADE;
+
+ if (WARN_ON_ONCE(!link_conf))
+ return 0;
+
+ band = link_conf->chanreq.oper.chan->band;
+ if (WARN_ONCE(band != NL80211_BAND_2GHZ &&
+ band != NL80211_BAND_5GHZ &&
+ band != NL80211_BAND_6GHZ,
+ "Invalid band (%u)\n", band))
+ return 0;
+
+ link_rssi = MBM_TO_DBM(link_conf->bss->signal);
+ /*
+ * For 6 GHz the RSSI of the beacons is lower than
+ * the RSSI of the data.
+ */
+ if (band == NL80211_BAND_6GHZ)
+ link_rssi += 4;
+
+ rssi_idx = band == NL80211_BAND_2GHZ ? 0 : 1;
+
+ /* No valid RSSI - take the lowest grade */
+ if (!link_rssi)
+ link_rssi = rssi_to_grade_map[0].rssi[rssi_idx];
+
+ /* Get grade based on RSSI */
+ for (i = 0; i < ARRAY_SIZE(rssi_to_grade_map); i++) {
+ const struct iwl_mvm_rssi_to_grade *line =
+ &rssi_to_grade_map[i];
+
+ if (link_rssi > line->rssi[rssi_idx])
+ continue;
+ grade = line->grade;
+ break;
+ }
+
+ /* apply the channel load and puncturing factors */
+ grade = grade * iwl_mvm_get_chan_load_factor(link_conf) / SCALE_FACTOR;
+ grade = grade * iwl_mvm_get_puncturing_factor(link_conf) / SCALE_FACTOR;
+ return grade;
+}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mvm_get_link_grade);
+
+static
+u8 iwl_mvm_set_link_selection_data(struct ieee80211_vif *vif,
+ struct iwl_mvm_link_sel_data *data,
+ unsigned long usable_links,
+ u8 *best_link_idx)
+{
+ u8 n_data = 0;
+ u16 max_grade = 0;
+ unsigned long link_id;
+
+ /* TODO: don't select links that weren't discovered in the last scan */
+ for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_bss_conf *link_conf =
+ link_conf_dereference_protected(vif, link_id);
+
+ if (WARN_ON_ONCE(!link_conf))
+ continue;
+
+ data[n_data].link_id = link_id;
+ data[n_data].chandef = &link_conf->chanreq.oper;
+ data[n_data].signal = link_conf->bss->signal / 100;
+ data[n_data].grade = iwl_mvm_get_link_grade(link_conf);
+
+ if (data[n_data].grade > max_grade) {
+ max_grade = data[n_data].grade;
+ *best_link_idx = n_data;
+ }
+ n_data++;
+ }
+
+ return n_data;
+}
+
+struct iwl_mvm_bw_to_rssi_threshs {
+ s8 low;
+ s8 high;
+};
+
+#define BW_TO_RSSI_THRESHOLDS(_bw) \
+ [IWL_PHY_CHANNEL_MODE ## _bw] = { \
+ .low = IWL_MVM_LOW_RSSI_THRESH_##_bw##MHZ, \
+ .high = IWL_MVM_HIGH_RSSI_THRESH_##_bw##MHZ \
+ }
+
+s8 iwl_mvm_get_esr_rssi_thresh(struct iwl_mvm *mvm,
+ const struct cfg80211_chan_def *chandef,
+ bool low)
+{
+ const struct iwl_mvm_bw_to_rssi_threshs bw_to_rssi_threshs_map[] = {
+ BW_TO_RSSI_THRESHOLDS(20),
+ BW_TO_RSSI_THRESHOLDS(40),
+ BW_TO_RSSI_THRESHOLDS(80),
+ BW_TO_RSSI_THRESHOLDS(160)
+ /* 320 MHz has the same thresholds as 20 MHz */
+ };
+ const struct iwl_mvm_bw_to_rssi_threshs *threshs;
+ u8 chan_width = iwl_mvm_get_channel_width(chandef);
+
+ if (WARN_ON(chandef->chan->band != NL80211_BAND_2GHZ &&
+ chandef->chan->band != NL80211_BAND_5GHZ &&
+ chandef->chan->band != NL80211_BAND_6GHZ))
+ return S8_MAX;
+
+ /* 6 GHz will always use 20 MHz thresholds, regardless of the BW */
+ if (chan_width == IWL_PHY_CHANNEL_MODE320)
+ chan_width = IWL_PHY_CHANNEL_MODE20;
+
+ threshs = &bw_to_rssi_threshs_map[chan_width];
+
+ return low ? threshs->low : threshs->high;
+}
+
+static u32
+iwl_mvm_esr_disallowed_with_link(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ const struct iwl_mvm_link_sel_data *link,
+ bool primary)
+{
+ struct wiphy *wiphy = mvm->hw->wiphy;
+ struct ieee80211_bss_conf *conf;
+ enum iwl_mvm_esr_state ret = 0;
+ s8 thresh;
+
+ conf = wiphy_dereference(wiphy, vif->link_conf[link->link_id]);
+ if (WARN_ON_ONCE(!conf))
+ return false;
+
+ /* BT Coex effects eSR mode only if one of the links is on LB */
+ if (link->chandef->chan->band == NL80211_BAND_2GHZ &&
+ (!iwl_mvm_bt_coex_calculate_esr_mode(mvm, vif, link->signal,
+ primary)))
+ ret |= IWL_MVM_ESR_EXIT_COEX;
+
+ thresh = iwl_mvm_get_esr_rssi_thresh(mvm, link->chandef,
+ false);
+
+ if (link->signal < thresh)
+ ret |= IWL_MVM_ESR_EXIT_LOW_RSSI;
+
+ if (conf->csa_active)
+ ret |= IWL_MVM_ESR_EXIT_CSA;
+
+ if (ret) {
+ IWL_DEBUG_INFO(mvm,
+ "Link %d is not allowed for esr\n",
+ link->link_id);
+ iwl_mvm_print_esr_state(mvm, ret);
+ }
+ return ret;
+}
+
+VISIBLE_IF_IWLWIFI_KUNIT
+bool iwl_mvm_mld_valid_link_pair(struct ieee80211_vif *vif,
+ const struct iwl_mvm_link_sel_data *a,
+ const struct iwl_mvm_link_sel_data *b)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ enum iwl_mvm_esr_state ret = 0;
+
+ /* Per-link considerations */
+ if (iwl_mvm_esr_disallowed_with_link(mvm, vif, a, true) ||
+ iwl_mvm_esr_disallowed_with_link(mvm, vif, b, false))
+ return false;
+
+ if (a->chandef->width != b->chandef->width ||
+ !(a->chandef->chan->band == NL80211_BAND_6GHZ &&
+ b->chandef->chan->band == NL80211_BAND_5GHZ))
+ ret |= IWL_MVM_ESR_EXIT_BANDWIDTH;
+
+ if (ret) {
+ IWL_DEBUG_INFO(mvm,
+ "Links %d and %d are not a valid pair for EMLSR\n",
+ a->link_id, b->link_id);
+ iwl_mvm_print_esr_state(mvm, ret);
+ return false;
+ }
+
+ return true;
+
+}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mvm_mld_valid_link_pair);
+
+/*
+ * Returns the combined eSR grade of two given links.
+ * Returns 0 if eSR is not allowed with these 2 links.
+ */
+static
+unsigned int iwl_mvm_get_esr_grade(struct ieee80211_vif *vif,
+ const struct iwl_mvm_link_sel_data *a,
+ const struct iwl_mvm_link_sel_data *b,
+ u8 *primary_id)
+{
+ struct ieee80211_bss_conf *primary_conf;
+ struct wiphy *wiphy = ieee80211_vif_to_wdev(vif)->wiphy;
+ unsigned int primary_load;
+
+ lockdep_assert_wiphy(wiphy);
+
+ /* a is always primary, b is always secondary */
+ if (b->grade > a->grade)
+ swap(a, b);
+
+ *primary_id = a->link_id;
+
+ if (!iwl_mvm_mld_valid_link_pair(vif, a, b))
+ return 0;
+
+ primary_conf = wiphy_dereference(wiphy, vif->link_conf[*primary_id]);
+
+ if (WARN_ON_ONCE(!primary_conf))
+ return 0;
+
+ primary_load = iwl_mvm_get_chan_load(primary_conf);
+
+ return a->grade +
+ ((b->grade * primary_load) / SCALE_FACTOR);
+}
+
+void iwl_mvm_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_link_sel_data data[IEEE80211_MLD_MAX_NUM_LINKS];
+ struct iwl_mvm_link_sel_data *best_link;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u32 max_active_links = iwl_mvm_max_active_links(mvm, vif);
+ u16 usable_links = ieee80211_vif_usable_links(vif);
+ u8 best, primary_link, best_in_pair, n_data;
+ u16 max_esr_grade = 0, new_active_links;
+
+ lockdep_assert_wiphy(mvm->hw->wiphy);
+
+ if (!mvmvif->authorized || !ieee80211_vif_is_mld(vif))
+ return;
+
+ if (!IWL_MVM_AUTO_EML_ENABLE)
+ return;
+
+ /* The logic below is a simple version that doesn't suit more than 2
+ * links
+ */
+ WARN_ON_ONCE(max_active_links > 2);
+
+ n_data = iwl_mvm_set_link_selection_data(vif, data, usable_links,
+ &best);
+
+ if (WARN(!n_data, "Couldn't find a valid grade for any link!\n"))
+ return;
+
+ best_link = &data[best];
+ primary_link = best_link->link_id;
+ new_active_links = BIT(best_link->link_id);
+
+ /* eSR is not supported/blocked, or only one usable link */
+ if (max_active_links == 1 || !iwl_mvm_vif_has_esr_cap(mvm, vif) ||
+ mvmvif->esr_disable_reason || n_data == 1)
+ goto set_active;
+
+ for (u8 a = 0; a < n_data; a++)
+ for (u8 b = a + 1; b < n_data; b++) {
+ u16 esr_grade = iwl_mvm_get_esr_grade(vif, &data[a],
+ &data[b],
+ &best_in_pair);
+
+ if (esr_grade <= max_esr_grade)
+ continue;
+
+ max_esr_grade = esr_grade;
+ primary_link = best_in_pair;
+ new_active_links = BIT(data[a].link_id) |
+ BIT(data[b].link_id);
+ }
+
+ /* No valid pair was found, go with the best link */
+ if (hweight16(new_active_links) <= 1)
+ goto set_active;
+
+ /* For equal grade - prefer EMLSR */
+ if (best_link->grade > max_esr_grade) {
+ primary_link = best_link->link_id;
+ new_active_links = BIT(best_link->link_id);
+ }
+set_active:
+ IWL_DEBUG_INFO(mvm, "Link selection result: 0x%x. Primary = %d\n",
+ new_active_links, primary_link);
+ ieee80211_set_active_links_async(vif, new_active_links);
+ mvmvif->link_selection_res = new_active_links;
+ mvmvif->link_selection_primary = primary_link;
+}
+
+u8 iwl_mvm_get_primary_link(struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ /* relevant data is written with both locks held, so read with either */
+ lockdep_assert(lockdep_is_held(&mvmvif->mvm->mutex) ||
+ lockdep_is_held(&mvmvif->mvm->hw->wiphy->mtx));
+
+ if (!ieee80211_vif_is_mld(vif))
+ return 0;
+
+ /* In AP mode, there is no primary link */
+ if (vif->type == NL80211_IFTYPE_AP)
+ return __ffs(vif->active_links);
+
+ if (mvmvif->esr_active &&
+ !WARN_ON(!(BIT(mvmvif->primary_link) & vif->active_links)))
+ return mvmvif->primary_link;
+
+ return __ffs(vif->active_links);
+}
+
+/*
+ * For non-MLO/single link, this will return the deflink/single active link,
+ * respectively
+ */
+u8 iwl_mvm_get_other_link(struct ieee80211_vif *vif, u8 link_id)
+{
+ switch (hweight16(vif->active_links)) {
+ case 0:
+ return 0;
+ default:
+ WARN_ON(1);
+ fallthrough;
+ case 1:
+ return __ffs(vif->active_links);
+ case 2:
+ return __ffs(vif->active_links & ~BIT(link_id));
+ }
+}
+
+/* Reasons that can cause esr prevention */
+#define IWL_MVM_ESR_PREVENT_REASONS IWL_MVM_ESR_EXIT_MISSED_BEACON
+#define IWL_MVM_PREVENT_ESR_TIMEOUT (HZ * 400)
+#define IWL_MVM_ESR_PREVENT_SHORT (HZ * 300)
+#define IWL_MVM_ESR_PREVENT_LONG (HZ * 600)
+
+static bool iwl_mvm_check_esr_prevention(struct iwl_mvm *mvm,
+ struct iwl_mvm_vif *mvmvif,
+ enum iwl_mvm_esr_state reason)
+{
+ bool timeout_expired = time_after(jiffies,
+ mvmvif->last_esr_exit.ts +
+ IWL_MVM_PREVENT_ESR_TIMEOUT);
+ unsigned long delay;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* Only handle reasons that can cause prevention */
+ if (!(reason & IWL_MVM_ESR_PREVENT_REASONS))
+ return false;
+
+ /*
+ * Reset the counter if more than 400 seconds have passed between one
+ * exit and the other, or if we exited due to a different reason.
+ * Will also reset the counter after the long prevention is done.
+ */
+ if (timeout_expired || mvmvif->last_esr_exit.reason != reason) {
+ mvmvif->exit_same_reason_count = 1;
+ return false;
+ }
+
+ mvmvif->exit_same_reason_count++;
+ if (WARN_ON(mvmvif->exit_same_reason_count < 2 ||
+ mvmvif->exit_same_reason_count > 3))
+ return false;
+
+ mvmvif->esr_disable_reason |= IWL_MVM_ESR_BLOCKED_PREVENTION;
+
+ /*
+ * For the second exit, use a short prevention, and for the third one,
+ * use a long prevention.
+ */
+ delay = mvmvif->exit_same_reason_count == 2 ?
+ IWL_MVM_ESR_PREVENT_SHORT :
+ IWL_MVM_ESR_PREVENT_LONG;
+
+ IWL_DEBUG_INFO(mvm,
+ "Preventing EMLSR for %ld seconds due to %u exits with the reason = %s (0x%x)\n",
+ delay / HZ, mvmvif->exit_same_reason_count,
+ iwl_get_esr_state_string(reason), reason);
+
+ wiphy_delayed_work_queue(mvm->hw->wiphy,
+ &mvmvif->prevent_esr_done_wk, delay);
+ return true;
+}
+
+#define IWL_MVM_TRIGGER_LINK_SEL_TIME (IWL_MVM_TRIGGER_LINK_SEL_TIME_SEC * HZ)
+
+/* API to exit eSR mode */
+void iwl_mvm_exit_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ enum iwl_mvm_esr_state reason,
+ u8 link_to_keep)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u16 new_active_links;
+ bool prevented;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!IWL_MVM_AUTO_EML_ENABLE)
+ return;
+
+ /* Nothing to do */
+ if (!mvmvif->esr_active)
+ return;
+
+ if (WARN_ON(!ieee80211_vif_is_mld(vif) || !mvmvif->authorized))
+ return;
+
+ if (WARN_ON(!(vif->active_links & BIT(link_to_keep))))
+ link_to_keep = __ffs(vif->active_links);
+
+ new_active_links = BIT(link_to_keep);
+ IWL_DEBUG_INFO(mvm,
+ "Exiting EMLSR. reason = %s (0x%x). Current active links=0x%x, new active links = 0x%x\n",
+ iwl_get_esr_state_string(reason), reason,
+ vif->active_links, new_active_links);
+
+ ieee80211_set_active_links_async(vif, new_active_links);
+
+ /* Prevent EMLSR if needed */
+ prevented = iwl_mvm_check_esr_prevention(mvm, mvmvif, reason);
+
+ /* Remember why and when we exited EMLSR */
+ mvmvif->last_esr_exit.ts = jiffies;
+ mvmvif->last_esr_exit.reason = reason;
+
+ /*
+ * If EMLSR is prevented now - don't try to get back to EMLSR.
+ * If we exited due to a blocking event, we will try to get back to
+ * EMLSR when the corresponding unblocking event will happen.
+ */
+ if (prevented || reason & IWL_MVM_BLOCK_ESR_REASONS)
+ return;
+
+ /* If EMLSR is not blocked - try enabling it again in 30 seconds */
+ wiphy_delayed_work_queue(mvm->hw->wiphy,
+ &mvmvif->mlo_int_scan_wk,
+ round_jiffies_relative(IWL_MVM_TRIGGER_LINK_SEL_TIME));
+}
+
+void iwl_mvm_block_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ enum iwl_mvm_esr_state reason,
+ u8 link_to_keep)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!IWL_MVM_AUTO_EML_ENABLE)
+ return;
+
+ /* This should be called only with disable reasons */
+ if (WARN_ON(!(reason & IWL_MVM_BLOCK_ESR_REASONS)))
+ return;
+
+ if (mvmvif->esr_disable_reason & reason)
+ return;
+
+ IWL_DEBUG_INFO(mvm,
+ "Blocking EMLSR mode. reason = %s (0x%x)\n",
+ iwl_get_esr_state_string(reason), reason);
+
+ mvmvif->esr_disable_reason |= reason;
+
+ iwl_mvm_print_esr_state(mvm, mvmvif->esr_disable_reason);
+
+ iwl_mvm_exit_esr(mvm, vif, reason, link_to_keep);
+}
+
+int iwl_mvm_block_esr_sync(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ enum iwl_mvm_esr_state reason)
+{
+ int primary_link = iwl_mvm_get_primary_link(vif);
+ int ret;
+
+ if (!IWL_MVM_AUTO_EML_ENABLE || !ieee80211_vif_is_mld(vif))
+ return 0;
+
+ /* This should be called only with blocking reasons */
+ if (WARN_ON(!(reason & IWL_MVM_BLOCK_ESR_REASONS)))
+ return 0;
+
+ /* leave ESR immediately, not only async with iwl_mvm_block_esr() */
+ ret = ieee80211_set_active_links(vif, BIT(primary_link));
+ if (ret)
+ return ret;
+
+ mutex_lock(&mvm->mutex);
+ /* only additionally block for consistency and to avoid concurrency */
+ iwl_mvm_block_esr(mvm, vif, reason, primary_link);
+ mutex_unlock(&mvm->mutex);
+
+ return 0;
+}
+
+static void iwl_mvm_esr_unblocked(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ bool need_new_sel = time_after(jiffies, mvmvif->last_esr_exit.ts +
+ IWL_MVM_TRIGGER_LINK_SEL_TIME);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!ieee80211_vif_is_mld(vif) || !mvmvif->authorized ||
+ mvmvif->esr_active)
+ return;
+
+ IWL_DEBUG_INFO(mvm, "EMLSR is unblocked\n");
+
+ /* If we exited due to an EXIT reason, and the exit was in less than
+ * 30 seconds, then a MLO scan was scheduled already.
+ */
+ if (!need_new_sel &&
+ !(mvmvif->last_esr_exit.reason & IWL_MVM_BLOCK_ESR_REASONS)) {
+ IWL_DEBUG_INFO(mvm, "Wait for MLO scan\n");
+ return;
+ }
+
+ /*
+ * If EMLSR was blocked for more than 30 seconds, or the last link
+ * selection decided to not enter EMLSR, trigger a new scan.
+ */
+ if (need_new_sel || hweight16(mvmvif->link_selection_res) < 2) {
+ IWL_DEBUG_INFO(mvm, "Trigger MLO scan\n");
+ wiphy_delayed_work_queue(mvm->hw->wiphy,
+ &mvmvif->mlo_int_scan_wk, 0);
+ /*
+ * If EMLSR was blocked for less than 30 seconds, and the last link
+ * selection decided to use EMLSR, activate EMLSR using the previous
+ * link selection result.
+ */
+ } else {
+ IWL_DEBUG_INFO(mvm,
+ "Use the latest link selection result: 0x%x\n",
+ mvmvif->link_selection_res);
+ ieee80211_set_active_links_async(vif,
+ mvmvif->link_selection_res);
+ }
+}
+
+void iwl_mvm_unblock_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ enum iwl_mvm_esr_state reason)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!IWL_MVM_AUTO_EML_ENABLE)
+ return;
+
+ /* This should be called only with disable reasons */
+ if (WARN_ON(!(reason & IWL_MVM_BLOCK_ESR_REASONS)))
+ return;
+
+ /* No Change */
+ if (!(mvmvif->esr_disable_reason & reason))
+ return;
+
+ mvmvif->esr_disable_reason &= ~reason;
+
+ IWL_DEBUG_INFO(mvm,
+ "Unblocking EMLSR mode. reason = %s (0x%x)\n",
+ iwl_get_esr_state_string(reason), reason);
+ iwl_mvm_print_esr_state(mvm, mvmvif->esr_disable_reason);
+
+ if (!mvmvif->esr_disable_reason)
+ iwl_mvm_esr_unblocked(mvm, vif);
+}
diff --git a/mvm/mac-ctxt.c b/mvm/mac-ctxt.c
index 7369a45f7f2b..dfcc96f18b4f 100644
--- a/mvm/mac-ctxt.c
+++ b/mvm/mac-ctxt.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -31,6 +31,17 @@ const u8 iwl_mvm_ac_to_gen2_tx_fifo[] = {
IWL_GEN2_TRIG_TX_FIFO_BK,
};
+const u8 iwl_mvm_ac_to_bz_tx_fifo[] = {
+ IWL_BZ_EDCA_TX_FIFO_VO,
+ IWL_BZ_EDCA_TX_FIFO_VI,
+ IWL_BZ_EDCA_TX_FIFO_BE,
+ IWL_BZ_EDCA_TX_FIFO_BK,
+ IWL_BZ_TRIG_TX_FIFO_VO,
+ IWL_BZ_TRIG_TX_FIFO_VI,
+ IWL_BZ_TRIG_TX_FIFO_BE,
+ IWL_BZ_TRIG_TX_FIFO_BK,
+};
+
struct iwl_mvm_mac_iface_iterator_data {
struct iwl_mvm *mvm;
struct ieee80211_vif *vif;
@@ -285,6 +296,11 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
INIT_LIST_HEAD(&mvmvif->time_event_data.list);
mvmvif->time_event_data.id = TE_MAX;
+ mvmvif->roc_activity = ROC_NUM_ACTIVITIES;
+
+ mvmvif->deflink.bcast_sta.sta_id = IWL_MVM_INVALID_STA;
+ mvmvif->deflink.mcast_sta.sta_id = IWL_MVM_INVALID_STA;
+ mvmvif->deflink.ap_sta_id = IWL_MVM_INVALID_STA;
/* No need to allocate data queues to P2P Device MAC and NAN.*/
if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
@@ -300,10 +316,6 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
mvmvif->deflink.cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
}
- mvmvif->deflink.bcast_sta.sta_id = IWL_MVM_INVALID_STA;
- mvmvif->deflink.mcast_sta.sta_id = IWL_MVM_INVALID_STA;
- mvmvif->deflink.ap_sta_id = IWL_MVM_INVALID_STA;
-
for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++)
mvmvif->deflink.smps_requests[i] = IEEE80211_SMPS_AUTOMATIC;
@@ -455,7 +467,7 @@ void iwl_mvm_set_fw_protection_flags(struct iwl_mvm *mvm,
break;
case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
/* Protect when channel wider than 20MHz */
- if (link_conf->chandef.width > NL80211_CHAN_WIDTH_20)
+ if (link_conf->chanreq.oper.width > NL80211_CHAN_WIDTH_20)
*protection_flags |= cpu_to_le32(ht_flag);
break;
default:
@@ -494,7 +506,7 @@ void iwl_mvm_set_fw_qos_params(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (link_conf->qos)
*qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
- if (link_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)
+ if (link_conf->chanreq.oper.width != NL80211_CHAN_WIDTH_20_NOHT)
*qos_flags |= cpu_to_le32(MAC_QOS_FLG_TGN);
}
@@ -862,7 +874,7 @@ void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm,
}
}
-static u32 iwl_mvm_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size)
+u32 iwl_mvm_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size)
{
struct ieee80211_mgmt *mgmt = (void *)beacon;
const u8 *ie;
@@ -910,8 +922,8 @@ u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct iwl_mvm *mvm,
link_conf = rcu_dereference(vif->link_conf[link_id]);
if (link_conf) {
basic = link_conf->basic_rates;
- if (link_conf->chandef.chan)
- band = link_conf->chandef.chan->band;
+ if (link_conf->chanreq.oper.chan)
+ band = link_conf->chanreq.oper.chan->band;
}
rcu_read_unlock();
}
@@ -999,12 +1011,13 @@ static void iwl_mvm_mac_ctxt_set_tx(struct iwl_mvm *mvm,
tx->tx_flags = cpu_to_le32(tx_flags);
if (!fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION))
+ IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION)) {
iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx);
- tx->rate_n_flags =
- cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) <<
- RATE_MCS_ANT_POS);
+ tx->rate_n_flags =
+ cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) <<
+ RATE_MCS_ANT_POS);
+ }
rate = iwl_mvm_mac_ctxt_get_beacon_rate(mvm, info, vif);
@@ -1083,6 +1096,19 @@ static int iwl_mvm_mac_ctxt_send_beacon_v7(struct iwl_mvm *mvm,
sizeof(beacon_cmd));
}
+bool iwl_mvm_enable_fils(struct iwl_mvm *mvm,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ if (IWL_MVM_DISABLE_AP_FILS)
+ return false;
+
+ if (cfg80211_channel_is_psc(ctx->def.chan))
+ return true;
+
+ return (ctx->def.chan->band == NL80211_BAND_6GHZ &&
+ ctx->def.width >= NL80211_CHAN_WIDTH_80);
+}
+
static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct sk_buff *beacon,
@@ -1102,8 +1128,7 @@ static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm *mvm,
ctx = rcu_dereference(link_conf->chanctx_conf);
channel = ieee80211_frequency_to_channel(ctx->def.chan->center_freq);
WARN_ON(channel == 0);
- if (cfg80211_channel_is_psc(ctx->def.chan) &&
- !IWL_MVM_DISABLE_AP_FILS) {
+ if (iwl_mvm_enable_fils(mvm, ctx)) {
flags |= iwl_fw_lookup_cmd_ver(mvm->fw, BEACON_TEMPLATE_CMD,
0) > 10 ?
IWL_MAC_BEACON_FILS :
@@ -1140,6 +1165,13 @@ static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm *mvm,
WLAN_EID_EXT_CHANSWITCH_ANN,
beacon->len));
+ if (vif->type == NL80211_IFTYPE_AP &&
+ iwl_fw_lookup_cmd_ver(mvm->fw, BEACON_TEMPLATE_CMD, 0) >= 14)
+ beacon_cmd.btwt_offset =
+ cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data,
+ WLAN_EID_S1G_TWT,
+ beacon->len));
+
return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd,
sizeof(beacon_cmd));
}
@@ -1454,8 +1486,8 @@ static void iwl_mvm_csa_count_down(struct iwl_mvm *mvm,
mvmvif->csa_countdown = true;
- if (!ieee80211_beacon_cntdwn_is_complete(csa_vif)) {
- int c = ieee80211_beacon_update_cntdwn(csa_vif);
+ if (!ieee80211_beacon_cntdwn_is_complete(csa_vif, 0)) {
+ int c = ieee80211_beacon_update_cntdwn(csa_vif, 0);
iwl_mvm_mac_ctxt_beacon_changed(mvm, csa_vif,
&csa_vif->bss_conf);
@@ -1474,7 +1506,7 @@ static void iwl_mvm_csa_count_down(struct iwl_mvm *mvm,
}
} else if (!iwl_mvm_te_scheduled(&mvmvif->time_event_data)) {
/* we don't have CSA NoA scheduled yet, switch now */
- ieee80211_csa_finish(csa_vif);
+ ieee80211_csa_finish(csa_vif, 0);
RCU_INIT_POINTER(mvm->csa_vif, NULL);
}
}
@@ -1568,23 +1600,23 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
u32 id = le32_to_cpu(mb->link_id);
union iwl_dbg_tlv_tp_data tp_data = { .fw_pkt = pkt };
u32 mac_type;
+ int link_id = -1;
u8 notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
MISSED_BEACONS_NOTIFICATION,
0);
- rcu_read_lock();
-
/* before version four the ID in the notification refers to mac ID */
if (notif_ver < 4) {
- vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true);
+ vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, false);
} else {
struct ieee80211_bss_conf *bss_conf =
- iwl_mvm_rcu_fw_link_id_to_link_conf(mvm, id, true);
+ iwl_mvm_rcu_fw_link_id_to_link_conf(mvm, id, false);
if (!bss_conf)
- goto out;
+ return;
vif = bss_conf->vif;
+ link_id = bss_conf->link_id;
}
IWL_DEBUG_INFO(mvm,
@@ -1597,7 +1629,7 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
le32_to_cpu(mb->num_expected_beacons));
if (!vif)
- goto out;
+ return;
mac_type = iwl_mvm_get_mac_type(vif);
@@ -1614,10 +1646,26 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
* TODO: the threshold should be adjusted based on latency conditions,
* and/or in case of a CS flow on one of the other AP vifs.
*/
- if (rx_missed_bcon > IWL_MVM_MISSED_BEACONS_THRESHOLD_LONG)
- iwl_mvm_connection_loss(mvm, vif, "missed beacons");
- else if (rx_missed_bcon_since_rx > IWL_MVM_MISSED_BEACONS_THRESHOLD)
- ieee80211_beacon_loss(vif);
+ if (rx_missed_bcon >= IWL_MVM_MISSED_BEACONS_THRESHOLD_LONG) {
+ if (rx_missed_bcon_since_rx >= IWL_MVM_MISSED_BEACONS_SINCE_RX_THOLD) {
+ iwl_mvm_connection_loss(mvm, vif, "missed beacons");
+ } else {
+ IWL_WARN(mvm,
+ "missed beacons exceeds threshold, but receiving data. Stay connected, Expect bugs.\n");
+ IWL_WARN(mvm,
+ "missed_beacons:%d, missed_beacons_since_rx:%d\n",
+ rx_missed_bcon, rx_missed_bcon_since_rx);
+ }
+ } else if (rx_missed_bcon >= IWL_MVM_MISSED_BEACONS_EXIT_ESR_THRESH &&
+ link_id >= 0 && hweight16(vif->active_links) > 1) {
+ iwl_mvm_exit_esr(mvm, vif, IWL_MVM_ESR_EXIT_MISSED_BEACON,
+ iwl_mvm_get_other_link(vif, link_id));
+ } else if (rx_missed_bcon_since_rx > IWL_MVM_MISSED_BEACONS_THRESHOLD) {
+ if (!iwl_mvm_has_new_tx_api(mvm))
+ ieee80211_beacon_loss(vif);
+ else
+ ieee80211_cqm_beacon_loss_notify(vif, GFP_ATOMIC);
+ }
iwl_dbg_tlv_time_point(&mvm->fwrt,
IWL_FW_INI_TIME_POINT_MISSED_BEACONS, &tp_data);
@@ -1625,7 +1673,7 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
trigger = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
FW_DBG_TRIGGER_MISSED_BEACONS);
if (!trigger)
- goto out;
+ return;
bcon_trig = (void *)trigger->data;
stop_trig_missed_bcon = le32_to_cpu(bcon_trig->stop_consec_missed_bcon);
@@ -1637,9 +1685,6 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
if (rx_missed_bcon_since_rx >= stop_trig_missed_bcon_since_rx ||
rx_missed_bcon >= stop_trig_missed_bcon)
iwl_fw_dbg_collect_trig(&mvm->fwrt, trigger, NULL);
-
-out:
- rcu_read_unlock();
}
void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
@@ -1761,6 +1806,7 @@ void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm,
u32 id_n_color, csa_id;
/* save mac_id or link_id to use later to cancel csa if needed */
u32 id;
+ u32 mac_link_id = 0;
u8 notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP,
CHANNEL_SWITCH_START_NOTIF, 0);
bool csa_active;
@@ -1790,6 +1836,7 @@ void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm,
goto out_unlock;
id = link_id;
+ mac_link_id = bss_conf->link_id;
vif = bss_conf->vif;
csa_active = bss_conf->csa_active;
}
@@ -1818,7 +1865,7 @@ void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm,
msecs_to_jiffies(IWL_MVM_CS_UNBLOCK_TX_TIMEOUT *
csa_vif->bss_conf.beacon_int));
- ieee80211_csa_finish(csa_vif);
+ ieee80211_csa_finish(csa_vif, 0);
rcu_read_unlock();
@@ -1839,7 +1886,7 @@ void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm,
iwl_mvm_csa_client_absent(mvm, vif);
cancel_delayed_work(&mvmvif->csa_work);
- ieee80211_chswitch_done(vif, true);
+ ieee80211_chswitch_done(vif, true, mac_link_id);
break;
default:
/* should never happen */
diff --git a/mvm/mac80211.c b/mvm/mac80211.c
index ce7905faa08f..625ccf566e1c 100644
--- a/mvm/mac80211.c
+++ b/mvm/mac80211.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -22,7 +22,7 @@
#include "mvm.h"
#include "sta.h"
#include "time-event.h"
-#include "iwl-eeprom-parse.h"
+#include "iwl-nvm-utils.h"
#include "iwl-phy-db.h"
#include "testmode.h"
#include "fw/error-dump.h"
@@ -30,21 +30,28 @@
#include "iwl-nvm-parse.h"
#include "time-sync.h"
+#define IWL_MVM_LIMITS(ap) \
+ { \
+ .max = 1, \
+ .types = BIT(NL80211_IFTYPE_STATION), \
+ }, \
+ { \
+ .max = 1, \
+ .types = ap | \
+ BIT(NL80211_IFTYPE_P2P_CLIENT) | \
+ BIT(NL80211_IFTYPE_P2P_GO), \
+ }, \
+ { \
+ .max = 1, \
+ .types = BIT(NL80211_IFTYPE_P2P_DEVICE), \
+ }
+
static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_STATION),
- },
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_P2P_CLIENT) |
- BIT(NL80211_IFTYPE_P2P_GO),
- },
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
- },
+ IWL_MVM_LIMITS(0)
+};
+
+static const struct ieee80211_iface_limit iwl_mvm_limits_ap[] = {
+ IWL_MVM_LIMITS(BIT(NL80211_IFTYPE_AP))
};
static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
@@ -54,6 +61,12 @@ static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
.limits = iwl_mvm_limits,
.n_limits = ARRAY_SIZE(iwl_mvm_limits),
},
+ {
+ .num_different_channels = 1,
+ .max_interfaces = 3,
+ .limits = iwl_mvm_limits_ap,
+ .n_limits = ARRAY_SIZE(iwl_mvm_limits_ap),
+ },
};
static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = {
@@ -152,6 +165,16 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
mvm->lar_regdom_set = true;
mvm->mcc_src = src_id;
+ /* Some kind of regulatory mess means we need to currently disallow
+ * puncturing in the US and Canada. Do that here, at least until we
+ * figure out the new chanctx APIs for puncturing.
+ */
+ if (resp->mcc == cpu_to_le16(IWL_MCC_US) ||
+ resp->mcc == cpu_to_le16(IWL_MCC_CANADA))
+ ieee80211_hw_set(mvm->hw, DISALLOW_PUNCTURING);
+ else
+ __clear_bit(IEEE80211_HW_DISALLOW_PUNCTURING, mvm->hw->flags);
+
iwl_mei_set_country_code(__le16_to_cpu(resp->mcc));
out:
@@ -186,7 +209,7 @@ struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm,
MCC_SOURCE_OLD_FW, changed);
}
-int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm)
+int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm, bool force_regd_sync)
{
enum iwl_mcc_source used_src;
struct ieee80211_regdomain *regd;
@@ -213,8 +236,10 @@ int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm)
if (IS_ERR_OR_NULL(regd))
return -EIO;
- /* update cfg80211 if the regdomain was changed */
- if (changed)
+ /* update cfg80211 if the regdomain was changed or the caller explicitly
+ * asked to update regdomain
+ */
+ if (changed || force_regd_sync)
ret = regulatory_set_wiphy_regd_sync(mvm->hw->wiphy, regd);
else
ret = 0;
@@ -251,6 +276,9 @@ static const u8 tm_if_types_ext_capa_sta[] = {
__bf_shf(IEEE80211_EML_CAP_EMLSR_PADDING_DELAY) | \
IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_64US << \
__bf_shf(IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY))
+#define IWL_MVM_MLD_CAPA_OPS FIELD_PREP_CONST( \
+ IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP, \
+ IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP_SAME)
static const struct wiphy_iftype_ext_capab add_iftypes_ext_capa[] = {
{
@@ -260,6 +288,7 @@ static const struct wiphy_iftype_ext_capab add_iftypes_ext_capa[] = {
.extended_capabilities_len = sizeof(he_if_types_ext_capa_sta),
/* relevant only if EHT is supported */
.eml_capabilities = IWL_MVM_EMLSR_CAPA,
+ .mld_capa_and_ops = IWL_MVM_MLD_CAPA_OPS,
},
{
.iftype = NL80211_IFTYPE_STATION,
@@ -268,6 +297,7 @@ static const struct wiphy_iftype_ext_capab add_iftypes_ext_capa[] = {
.extended_capabilities_len = sizeof(tm_if_types_ext_capa_sta),
/* relevant only if EHT is supported */
.eml_capabilities = IWL_MVM_EMLSR_CAPA,
+ .mld_capa_and_ops = IWL_MVM_MLD_CAPA_OPS,
},
};
@@ -279,6 +309,30 @@ int iwl_mvm_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
return 0;
}
+int iwl_mvm_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ /* This has been tested on those devices only */
+ if (mvm->trans->trans_cfg->device_family != IWL_DEVICE_FAMILY_9000 &&
+ mvm->trans->trans_cfg->device_family != IWL_DEVICE_FAMILY_22000)
+ return -EOPNOTSUPP;
+
+ if (!mvm->nvm_data)
+ return -EBUSY;
+
+ /* mac80211 ensures the device is not started,
+ * so the firmware cannot be running
+ */
+
+ mvm->set_tx_ant = tx_ant;
+ mvm->set_rx_ant = rx_ant;
+
+ iwl_reinit_cab(mvm->trans, mvm->nvm_data, tx_ant, rx_ant, mvm->fw);
+
+ return 0;
+}
+
int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
{
struct ieee80211_hw *hw = mvm->hw;
@@ -315,9 +369,13 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
ieee80211_hw_set(hw, STA_MMPDU_TXQ);
/* Set this early since we need to have it for the check below */
- if (mvm->mld_api_is_used &&
- mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ if (mvm->mld_api_is_used && mvm->nvm_data->sku_cap_11be_enable &&
+ !iwlwifi_mod_params.disable_11ax &&
+ !iwlwifi_mod_params.disable_11be) {
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO;
+ /* we handle this already earlier, but need it for MLO */
+ ieee80211_hw_set(hw, HANDLES_QUIET_CSA);
+ }
/* With MLD FW API, it tracks timing by itself,
* no need for any timing from the host
@@ -325,12 +383,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
if (!mvm->mld_api_is_used)
ieee80211_hw_set(hw, TIMING_BEACON_ONLY);
- /* We should probably have this, but mac80211
- * currently doesn't support it for MLO.
- */
- if (!(hw->wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO))
- ieee80211_hw_set(hw, DEAUTH_NEED_MGD_TX_PREP);
-
/*
* On older devices, enabling TX A-MSDU occasionally leads to
* something getting messed up, the command read from the FIFO
@@ -351,7 +403,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
ieee80211_hw_set(hw, HAS_RATE_CONTROL);
}
- if (iwl_mvm_has_new_rx_api(mvm))
+ /* We want to use the mac80211's reorder buffer for 9000 */
+ if (iwl_mvm_has_new_rx_api(mvm) &&
+ mvm->trans->trans_cfg->device_family > IWL_DEVICE_FAMILY_9000)
ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
if (fw_has_capa(&mvm->fw->ucode_capa,
@@ -451,6 +505,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
IWL_UCODE_TLV_CAPA_TIME_SYNC_BOTH_FTM_TM))
hw->wiphy->hw_timestamp_max_peers = 1;
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SPP_AMSDU_SUPPORT))
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT);
+
ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
hw->wiphy->features |=
NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
@@ -488,6 +547,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
REGULATORY_DISABLE_BEACON_HINTS;
+ if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_DFS_CONCURRENT);
+
hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
hw->wiphy->flags |= WIPHY_FLAG_SPLIT_SCAN_6GHZ;
@@ -497,7 +560,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
ARRAY_SIZE(iwl_mvm_iface_combinations);
hw->wiphy->max_remain_on_channel_duration = 10000;
- hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
+ hw->max_listen_interval = IWL_MVM_CONN_LISTEN_INTERVAL;
/* Extract MAC address */
memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN);
@@ -522,13 +585,13 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK);
- BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) ||
- IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK));
+ BUILD_BUG_ON(IWL_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) ||
+ IWL_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK));
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
- mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS;
+ mvm->max_scans = IWL_MAX_UMAC_SCANS;
else
- mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS;
+ mvm->max_scans = IWL_MAX_LMAC_SCANS;
if (mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels)
hw->wiphy->bands[NL80211_BAND_2GHZ] =
@@ -597,7 +660,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_KEK_KCK_MATERIAL,
- IWL_FW_CMD_VER_UNKNOWN) == 3)
+ IWL_FW_CMD_VER_UNKNOWN) >= 3)
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK;
if (fw_has_api(&mvm->fw->ucode_capa,
@@ -652,12 +715,24 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
}
}
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(LOCATION_GROUP,
+ TOF_RANGE_REQ_CMD),
+ IWL_FW_CMD_VER_UNKNOWN) >= 11) {
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_PROT_RANGE_NEGO_AND_MEASURE);
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SECURE_LTF_SUPPORT))
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_SECURE_LTF);
+ }
+
mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
+ ieee80211_hw_set(hw, DISALLOW_PUNCTURING_5GHZ);
+
#ifdef CONFIG_PM_SLEEP
if ((unified || mvm->fw->img[IWL_UCODE_WOWLAN].num_sec) &&
- mvm->trans->ops->d3_suspend &&
- mvm->trans->ops->d3_resume &&
device_can_wakeup(mvm->trans->dev)) {
mvm->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT |
WIPHY_WOWLAN_DISCONNECT |
@@ -752,7 +827,7 @@ void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
}
if (offchannel &&
- !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) &&
+ !test_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status) &&
!test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
goto drop;
@@ -837,6 +912,8 @@ void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
&mvmtxq->state) &&
!test_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT,
&mvmtxq->state) &&
+ !test_bit(IWL_MVM_TXQ_STATE_STOP_AP_CSA,
+ &mvmtxq->state) &&
!test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status))) {
skb = ieee80211_tx_dequeue(hw, txq);
@@ -1031,7 +1108,14 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data);
spin_unlock_bh(&mvm->time_event_lock);
- memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data));
+ mvmvif->roc_activity = ROC_NUM_ACTIVITIES;
+
+ mvmvif->bf_enabled = false;
+ mvmvif->ba_enabled = false;
+ mvmvif->ap_sta = NULL;
+
+ mvmvif->esr_active = false;
+ vif->driver_flags &= ~IEEE80211_VIF_EML_ACTIVE;
for_each_mvm_vif_valid_link(mvmvif, link_id) {
mvmvif->link[link_id]->ap_sta_id = IWL_MVM_INVALID_STA;
@@ -1039,6 +1123,8 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
mvmvif->link[link_id]->phy_ctxt = NULL;
mvmvif->link[link_id]->active = 0;
mvmvif->link[link_id]->igtk = NULL;
+ memset(&mvmvif->link[link_id]->bf_data, 0,
+ sizeof(mvmvif->link[link_id]->bf_data));
}
probe_data = rcu_dereference_protected(mvmvif->deflink.probe_resp_data,
@@ -1048,6 +1134,39 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
RCU_INIT_POINTER(mvmvif->deflink.probe_resp_data, NULL);
}
+static void iwl_mvm_cleanup_sta_iterator(void *data, struct ieee80211_sta *sta)
+{
+ struct iwl_mvm *mvm = data;
+ struct iwl_mvm_sta *mvm_sta;
+ struct ieee80211_vif *vif;
+ int link_id;
+
+ mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ vif = mvm_sta->vif;
+
+ if (!sta->valid_links)
+ return;
+
+ for (link_id = 0; link_id < ARRAY_SIZE((sta)->link); link_id++) {
+ struct iwl_mvm_link_sta *mvm_link_sta;
+
+ mvm_link_sta =
+ rcu_dereference_check(mvm_sta->link[link_id],
+ lockdep_is_held(&mvm->mutex));
+ if (mvm_link_sta && !(vif->active_links & BIT(link_id))) {
+ /*
+ * We have a link STA but the link is inactive in
+ * mac80211. This will happen if we failed to
+ * deactivate the link but mac80211 roll back the
+ * deactivation of the link.
+ * Delete the stale data to avoid issues later on.
+ */
+ iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_link_sta,
+ link_id, false);
+ }
+ }
+}
+
static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
{
iwl_mvm_stop_device(mvm);
@@ -1070,6 +1189,10 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
*/
ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm);
+ /* cleanup stations as links may be gone after restart */
+ ieee80211_iterate_stations_atomic(mvm->hw,
+ iwl_mvm_cleanup_sta_iterator, mvm);
+
mvm->p2p_device_vif = NULL;
iwl_mvm_reset_phy_ctxts(mvm);
@@ -1092,6 +1215,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
{
+ bool fast_resume = false;
int ret;
lockdep_assert_held(&mvm->mutex);
@@ -1117,6 +1241,30 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
mvm->nvm_data = NULL;
}
+#ifdef CONFIG_PM
+ /* fast_resume will be cleared by iwl_mvm_fast_resume */
+ fast_resume = mvm->fast_resume;
+
+ if (fast_resume) {
+ ret = iwl_mvm_fast_resume(mvm);
+ if (ret) {
+ iwl_mvm_stop_device(mvm);
+ /* iwl_mvm_up() will be called further down */
+ } else {
+ /*
+ * We clear IWL_MVM_STATUS_FIRMWARE_RUNNING upon
+ * mac_down() so that debugfs will stop honoring
+ * requests after we flush all the workers.
+ * Set the IWL_MVM_STATUS_FIRMWARE_RUNNING bit again
+ * now that we are back. This is a bit abusing the
+ * flag since the firmware wasn't really ever stopped,
+ * but this still serves the purpose.
+ */
+ set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
+ }
+ }
+#endif /* CONFIG_PM */
+
if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) {
/*
* Now convert the HW_RESTART_REQUESTED flag to IN_HW_RESTART
@@ -1127,7 +1275,10 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
/* Clean up some internal and mac80211 state on restart */
iwl_mvm_restart_cleanup(mvm);
}
- ret = iwl_mvm_up(mvm);
+
+ /* we also want to load the firmware if fast_resume failed */
+ if (!fast_resume || ret)
+ ret = iwl_mvm_up(mvm);
iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_POST_INIT,
NULL);
@@ -1151,14 +1302,12 @@ int iwl_mvm_mac_start(struct ieee80211_hw *hw)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
int ret;
- int retry, max_retry = 0;
mutex_lock(&mvm->mutex);
/* we are starting the mac not in error flow, and restart is enabled */
if (!test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) &&
iwlwifi_mod_params.fw_restart) {
- max_retry = IWL_MAX_INIT_RETRY;
/*
* This will prevent mac80211 recovery flows to trigger during
* init failures
@@ -1166,23 +1315,7 @@ int iwl_mvm_mac_start(struct ieee80211_hw *hw)
set_bit(IWL_MVM_STATUS_STARTING, &mvm->status);
}
- for (retry = 0; retry <= max_retry; retry++) {
- ret = __iwl_mvm_mac_start(mvm);
- if (!ret)
- break;
-
- /*
- * In PLDR sync PCI re-enumeration is needed. no point to retry
- * mac start before that.
- */
- if (mvm->pldr_sync) {
- iwl_mei_alive_notif(false);
- iwl_trans_pcie_remove(mvm->trans, true);
- break;
- }
-
- IWL_ERR(mvm, "mac start retry %d\n", retry);
- }
+ ret = __iwl_mvm_mac_start(mvm);
clear_bit(IWL_MVM_STATUS_STARTING, &mvm->status);
mutex_unlock(&mvm->mutex);
@@ -1196,7 +1329,7 @@ static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
{
int ret;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
@@ -1212,8 +1345,6 @@ static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
* of packets the FW sent out, so we must reconnect.
*/
iwl_mvm_teardown_tdls_peers(mvm);
-
- mutex_unlock(&mvm->mutex);
}
void iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw,
@@ -1230,7 +1361,7 @@ void iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw,
}
}
-void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
+void __iwl_mvm_mac_stop(struct iwl_mvm *mvm, bool suspend)
{
lockdep_assert_held(&mvm->mutex);
@@ -1246,7 +1377,11 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
if (!iwl_mvm_has_new_station_api(mvm->fw))
iwl_mvm_rm_aux_sta(mvm);
- iwl_mvm_stop_device(mvm);
+ if (suspend &&
+ mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ iwl_mvm_fast_suspend(mvm);
+ else
+ iwl_mvm_stop_device(mvm);
iwl_mvm_async_handlers_purge(mvm);
/* async_handlers_list is empty and will stay empty: HW is stopped */
@@ -1279,10 +1414,17 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
}
}
-void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
+void iwl_mvm_mac_stop(struct ieee80211_hw *hw, bool suspend)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ /* Stop internal MLO scan, if running */
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_INT_MLO, false);
+ mutex_unlock(&mvm->mutex);
+
+ wiphy_work_cancel(mvm->hw->wiphy, &mvm->trig_link_selection_wk);
+ wiphy_work_flush(mvm->hw->wiphy, &mvm->async_handlers_wiphy_wk);
flush_work(&mvm->async_handlers_wk);
flush_work(&mvm->add_stream_wk);
@@ -1308,7 +1450,7 @@ void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
iwl_mvm_mei_set_sw_rfkill_state(mvm);
mutex_lock(&mvm->mutex);
- __iwl_mvm_mac_stop(mvm);
+ __iwl_mvm_mac_stop(mvm, suspend);
mutex_unlock(&mvm->mutex);
/*
@@ -1316,6 +1458,7 @@ void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
* discover that its list is now empty.
*/
cancel_work_sync(&mvm->async_handlers_wk);
+ wiphy_work_cancel(hw->wiphy, &mvm->async_handlers_wiphy_wk);
}
struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
@@ -1349,7 +1492,9 @@ int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
cmd.common.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
- if (cmd_ver == 7)
+ if (cmd_ver == 8)
+ len = sizeof(cmd.v8);
+ else if (cmd_ver == 7)
len = sizeof(cmd.v7);
else if (cmd_ver == 6)
len = sizeof(cmd.v6);
@@ -1368,8 +1513,23 @@ int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd);
}
+static void iwl_mvm_post_csa_tx(void *data, struct ieee80211_sta *sta)
+{
+ struct ieee80211_hw *hw = data;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
+ struct iwl_mvm_txq *mvmtxq =
+ iwl_mvm_txq_from_mac80211(sta->txq[i]);
+
+ clear_bit(IWL_MVM_TXQ_STATE_STOP_AP_CSA, &mvmtxq->state);
+ iwl_mvm_mac_itxq_xmit(hw, sta->txq[i]);
+ }
+}
+
int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
@@ -1379,10 +1539,12 @@ int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
if (vif->type == NL80211_IFTYPE_STATION) {
struct iwl_mvm_sta *mvmsta;
+ unsigned int link_id = link_conf->link_id;
+ u8 ap_sta_id = mvmvif->link[link_id]->ap_sta_id;
mvmvif->csa_bcn_pending = false;
- mvmsta = iwl_mvm_sta_from_staid_protected(mvm,
- mvmvif->deflink.ap_sta_id);
+ mvmvif->csa_blocks_tx = false;
+ mvmsta = iwl_mvm_sta_from_staid_protected(mvm, ap_sta_id);
if (WARN_ON(!mvmsta)) {
ret = -EIO;
@@ -1397,12 +1559,24 @@ int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
if (!fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) {
- ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
+ ret = iwl_mvm_enable_beacon_filter(mvm, vif);
if (ret)
goto out_unlock;
iwl_mvm_stop_session_protection(mvm, vif);
}
+ } else if (vif->type == NL80211_IFTYPE_AP && mvmvif->csa_blocks_tx) {
+ struct iwl_mvm_txq *mvmtxq =
+ iwl_mvm_txq_from_mac80211(vif->txq);
+
+ clear_bit(IWL_MVM_TXQ_STATE_STOP_AP_CSA, &mvmtxq->state);
+
+ local_bh_disable();
+ iwl_mvm_mac_itxq_xmit(hw, vif->txq);
+ ieee80211_iterate_stations_atomic(hw, iwl_mvm_post_csa_tx, hw);
+ local_bh_enable();
+
+ mvmvif->csa_blocks_tx = false;
}
mvmvif->ps_disabled = false;
@@ -1418,7 +1592,8 @@ out_unlock:
}
void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -1451,7 +1626,8 @@ void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw,
mvmvif->csa_failed = true;
mutex_unlock(&mvm->mutex);
- iwl_mvm_post_channel_switch(hw, vif);
+ /* If we're here, we can't support MLD */
+ iwl_mvm_post_channel_switch(hw, vif, &vif->bss_conf);
}
void iwl_mvm_channel_switch_disconnect_wk(struct work_struct *wk)
@@ -1463,7 +1639,7 @@ void iwl_mvm_channel_switch_disconnect_wk(struct work_struct *wk)
vif = container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
/* Trigger disconnect (should clear the CSA state) */
- ieee80211_chswitch_done(vif, false);
+ ieee80211_chswitch_done(vif, false, 0);
}
static u8
@@ -1510,20 +1686,85 @@ static int iwl_mvm_alloc_bcast_mcast_sta(struct iwl_mvm *mvm,
IWL_STA_MULTICAST);
}
+static void iwl_mvm_prevent_esr_done_wk(struct wiphy *wiphy,
+ struct wiphy_work *wk)
+{
+ struct iwl_mvm_vif *mvmvif =
+ container_of(wk, struct iwl_mvm_vif, prevent_esr_done_wk.work);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ struct ieee80211_vif *vif =
+ container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
+
+ guard(mvm)(mvm);
+ iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_PREVENTION);
+}
+
+static void iwl_mvm_mlo_int_scan_wk(struct wiphy *wiphy, struct wiphy_work *wk)
+{
+ struct iwl_mvm_vif *mvmvif = container_of(wk, struct iwl_mvm_vif,
+ mlo_int_scan_wk.work);
+ struct ieee80211_vif *vif =
+ container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
+
+ guard(mvm)(mvmvif->mvm);
+ iwl_mvm_int_mlo_scan(mvmvif->mvm, vif);
+}
+
+static void iwl_mvm_unblock_esr_tpt(struct wiphy *wiphy, struct wiphy_work *wk)
+{
+ struct iwl_mvm_vif *mvmvif =
+ container_of(wk, struct iwl_mvm_vif, unblock_esr_tpt_wk);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ struct ieee80211_vif *vif =
+ container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
+
+ guard(mvm)(mvm);
+ iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_TPT);
+}
+
+void iwl_mvm_mac_init_mvmvif(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif)
+{
+ lockdep_assert_held(&mvm->mutex);
+
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ return;
+
+ INIT_DELAYED_WORK(&mvmvif->csa_work,
+ iwl_mvm_channel_switch_disconnect_wk);
+
+ wiphy_delayed_work_init(&mvmvif->prevent_esr_done_wk,
+ iwl_mvm_prevent_esr_done_wk);
+
+ wiphy_delayed_work_init(&mvmvif->mlo_int_scan_wk,
+ iwl_mvm_mlo_int_scan_wk);
+
+ wiphy_work_init(&mvmvif->unblock_esr_tpt_wk,
+ iwl_mvm_unblock_esr_tpt);
+}
+
static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ret;
+ int i;
mutex_lock(&mvm->mutex);
+ iwl_mvm_mac_init_mvmvif(mvm, mvmvif);
+
mvmvif->mvm = mvm;
/* the first link always points to the default one */
+ mvmvif->deflink.fw_link_id = IWL_MVM_FW_LINK_ID_INVALID;
+ mvmvif->deflink.active = 0;
mvmvif->link[0] = &mvmvif->deflink;
+ ret = iwl_mvm_set_link_mapping(mvm, vif, &vif->bss_conf);
+ if (ret)
+ goto out;
+
/*
* Not much to do here. The stack will not allow interface
* types or combinations that we didn't advertise, so we
@@ -1532,8 +1773,9 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
/* make sure that beacon statistics don't go backwards with FW reset */
if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
- mvmvif->deflink.beacon_stats.accu_num_beacons +=
- mvmvif->deflink.beacon_stats.num_beacons;
+ for_each_mvm_vif_valid_link(mvmvif, i)
+ mvmvif->link[i]->beacon_stats.accu_num_beacons +=
+ mvmvif->link[i]->beacon_stats.num_beacons;
/* Allocate resources for the MAC context, and add it to the fw */
ret = iwl_mvm_mac_ctxt_init(mvm, vif);
@@ -1561,7 +1803,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
*/
if (vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_ADHOC) {
- iwl_mvm_vif_dbgfs_register(mvm, vif);
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ iwl_mvm_vif_dbgfs_add_link(mvm, vif);
ret = 0;
goto out;
}
@@ -1577,7 +1820,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
goto out_remove_mac;
/* beacon filtering */
- ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif);
if (ret)
goto out_remove_mac;
@@ -1588,44 +1831,19 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
IEEE80211_VIF_SUPPORTS_CQM_RSSI;
}
- /*
- * P2P_DEVICE interface does not have a channel context assigned to it,
- * so a dedicated PHY context is allocated to it and the corresponding
- * MAC context is bound to it at this stage.
- */
- if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
-
- mvmvif->deflink.phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
- if (!mvmvif->deflink.phy_ctxt) {
- ret = -ENOSPC;
- goto out_free_bf;
- }
-
- iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt);
- ret = iwl_mvm_binding_add_vif(mvm, vif);
- if (ret)
- goto out_unref_phy;
-
- ret = iwl_mvm_add_p2p_bcast_sta(mvm, vif);
- if (ret)
- goto out_unbind;
-
- /* Save a pointer to p2p device vif, so it can later be used to
- * update the p2p device MAC when a GO is started/stopped */
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
mvm->p2p_device_vif = vif;
- }
iwl_mvm_tcm_add_vif(mvm, vif);
- INIT_DELAYED_WORK(&mvmvif->csa_work,
- iwl_mvm_channel_switch_disconnect_wk);
if (vif->type == NL80211_IFTYPE_MONITOR) {
mvm->monitor_on = true;
mvm->monitor_p80 =
- iwl_mvm_chandef_get_primary_80(&vif->bss_conf.chandef);
+ iwl_mvm_chandef_get_primary_80(&vif->bss_conf.chanreq.oper);
}
- iwl_mvm_vif_dbgfs_register(mvm, vif);
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ iwl_mvm_vif_dbgfs_add_link(mvm, vif);
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
@@ -1642,16 +1860,6 @@ out:
goto out_unlock;
- out_unbind:
- iwl_mvm_binding_remove_vif(mvm, vif);
- out_unref_phy:
- iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt);
- out_free_bf:
- if (mvm->bf_allowed_vif == mvmvif) {
- mvm->bf_allowed_vif = NULL;
- vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
- IEEE80211_VIF_SUPPORTS_CQM_RSSI);
- }
out_remove_mac:
mvmvif->deflink.phy_ctxt = NULL;
iwl_mvm_mac_ctxt_remove(mvm, vif);
@@ -1664,6 +1872,8 @@ out:
void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
/*
* Flush the ROC worker which will flush the OFFCHANNEL queue.
@@ -1672,14 +1882,20 @@ void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
*/
flush_work(&mvm->roc_done_wk);
}
+
+ wiphy_delayed_work_cancel(mvm->hw->wiphy,
+ &mvmvif->prevent_esr_done_wk);
+
+ wiphy_delayed_work_cancel(mvm->hw->wiphy,
+ &mvmvif->mlo_int_scan_wk);
+
+ wiphy_work_cancel(mvm->hw->wiphy, &mvmvif->unblock_esr_tpt_wk);
+
+ cancel_delayed_work_sync(&mvmvif->csa_work);
}
-/* This function is doing the common part of removing the interface for
- * both - MLD and non-MLD modes. Returns true if removing the interface
- * is done
- */
-static bool iwl_mvm_mac_remove_interface_common(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -1713,7 +1929,7 @@ static bool iwl_mvm_mac_remove_interface_common(struct ieee80211_hw *hw,
if (vif->bss_conf.ftm_responder)
memset(&mvm->ftm_resp_stats, 0, sizeof(mvm->ftm_resp_stats));
- iwl_mvm_vif_dbgfs_clean(mvm, vif);
+ iwl_mvm_vif_dbgfs_rm_link(mvm, vif);
/*
* For AP/GO interface, the tear down of the resources allocated to the
@@ -1727,30 +1943,25 @@ static bool iwl_mvm_mac_remove_interface_common(struct ieee80211_hw *hw,
mvm->noa_duration = 0;
}
#endif
- return true;
+ goto out;
}
iwl_mvm_power_update_mac(mvm);
- return false;
-}
-
-static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
-{
- struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-
- if (iwl_mvm_mac_remove_interface_common(hw, vif))
- goto out;
+ /* Before the interface removal, mac80211 would cancel the ROC, and the
+ * ROC worker would be scheduled if needed. The worker would be flushed
+ * in iwl_mvm_prepare_mac_removal() and thus at this point there is no
+ * binding etc. so nothing needs to be done here.
+ */
if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ if (mvmvif->deflink.phy_ctxt) {
+ iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt);
+ mvmvif->deflink.phy_ctxt = NULL;
+ }
mvm->p2p_device_vif = NULL;
- iwl_mvm_rm_p2p_bcast_sta(mvm, vif);
- iwl_mvm_binding_remove_vif(mvm, vif);
- iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt);
- mvmvif->deflink.phy_ctxt = NULL;
}
+ iwl_mvm_unset_link_mapping(mvm, vif, &vif->bss_conf);
iwl_mvm_mac_ctxt_remove(mvm, vif);
RCU_INIT_POINTER(mvm->vif_id_to_mac[mvmvif->id], NULL);
@@ -1883,7 +2094,7 @@ void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
/* replace previous configuration */
kfree(mvm->mcast_filter_cmd);
@@ -1900,7 +2111,6 @@ void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
iwl_mvm_recalc_multicast(mvm);
out:
- mutex_unlock(&mvm->mutex);
*total_flags = 0;
}
@@ -1920,9 +2130,8 @@ static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw,
!vif->p2p)
return;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
- mutex_unlock(&mvm->mutex);
}
int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
@@ -2472,7 +2681,7 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
}
void iwl_mvm_protect_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- u32 duration_override)
+ u32 duration_override, unsigned int link_id)
{
u32 duration = IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
u32 min_duration = IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS;
@@ -2492,7 +2701,8 @@ void iwl_mvm_protect_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
iwl_mvm_schedule_session_protection(mvm, vif, 900,
- min_duration, false);
+ min_duration, false,
+ link_id);
else
iwl_mvm_protect_session(mvm, vif, duration,
min_duration, 500, false);
@@ -2505,6 +2715,7 @@ void iwl_mvm_bss_info_changed_station_assoc(struct iwl_mvm *mvm,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ret;
+ int link_id;
/* The firmware tracks the MU-MIMO group on its own.
* However, on HW restart we should restore this data.
@@ -2520,7 +2731,8 @@ void iwl_mvm_bss_info_changed_station_assoc(struct iwl_mvm *mvm,
iwl_mvm_recalc_multicast(mvm);
/* reset rssi values */
- mvmvif->bf_data.ave_beacon_signal = 0;
+ for_each_mvm_vif_valid_link(mvmvif, link_id)
+ mvmvif->link[link_id]->bf_data.ave_beacon_signal = 0;
iwl_mvm_bt_coex_vif_change(mvm);
iwl_mvm_update_smps_on_active_links(mvm, vif, IWL_MVM_SMPS_REQ_TT,
@@ -2547,7 +2759,7 @@ iwl_mvm_bss_info_changed_station_common(struct iwl_mvm *mvm,
iwl_mvm_stop_session_protection(mvm, vif);
iwl_mvm_sf_update(mvm, vif, false);
- WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
+ WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif));
}
if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS |
@@ -2561,14 +2773,18 @@ iwl_mvm_bss_info_changed_station_common(struct iwl_mvm *mvm,
}
if (changes & BSS_CHANGED_CQM) {
- IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n");
- /* reset cqm events tracking */
- mvmvif->bf_data.last_cqm_event = 0;
- if (mvmvif->bf_data.bf_enabled) {
+ struct iwl_mvm_vif_link_info *link_info =
+ mvmvif->link[link_conf->link_id];
+
+ IWL_DEBUG_MAC80211(mvm, "CQM info_changed\n");
+ if (link_info)
+ link_info->bf_data.last_cqm_event = 0;
+
+ if (mvmvif->bf_enabled) {
/* FIXME: need to update per link when FW API will
* support it
*/
- ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
+ ret = iwl_mvm_enable_beacon_filter(mvm, vif);
if (ret)
IWL_ERR(mvm,
"failed to update CQM thresholds\n");
@@ -2577,6 +2793,13 @@ iwl_mvm_bss_info_changed_station_common(struct iwl_mvm *mvm,
if (changes & BSS_CHANGED_BANDWIDTH)
iwl_mvm_update_link_smps(vif, link_conf);
+
+ if (changes & BSS_CHANGED_TPE) {
+ IWL_DEBUG_CALIB(mvm, "Changing TPE\n");
+ iwl_mvm_send_ap_tx_power_constraint_cmd(mvm, vif,
+ link_conf,
+ false);
+ }
}
static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
@@ -2586,6 +2809,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ret;
+ int i;
/*
* Re-calculate the tsf id, as the leader-follower relations depend
@@ -2594,9 +2818,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
*/
if (changes & BSS_CHANGED_ASSOC && vif->cfg.assoc) {
if ((vif->bss_conf.he_support &&
- !iwlwifi_mod_params.disable_11ax) ||
- (vif->bss_conf.eht_support &&
- !iwlwifi_mod_params.disable_11be))
+ !iwlwifi_mod_params.disable_11ax))
iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->deflink.ap_sta_id);
iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
@@ -2605,10 +2827,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
/* Update MU EDCA params */
if (changes & BSS_CHANGED_QOS && mvmvif->associated &&
vif->cfg.assoc &&
- ((vif->bss_conf.he_support &&
- !iwlwifi_mod_params.disable_11ax) ||
- (vif->bss_conf.eht_support &&
- !iwlwifi_mod_params.disable_11be)))
+ (vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax))
iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->deflink.ap_sta_id);
/*
@@ -2630,10 +2849,13 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
if (changes & BSS_CHANGED_ASSOC) {
if (vif->cfg.assoc) {
+ mvmvif->session_prot_connection_loss = false;
+
/* clear statistics to get clean beacon counter */
iwl_mvm_request_statistics(mvm, true);
- memset(&mvmvif->deflink.beacon_stats, 0,
- sizeof(mvmvif->deflink.beacon_stats));
+ for_each_mvm_vif_valid_link(mvmvif, i)
+ memset(&mvmvif->link[i]->beacon_stats, 0,
+ sizeof(mvmvif->link[i]->beacon_stats));
/* add quota for this interface */
ret = iwl_mvm_update_quotas(mvm, true, NULL);
@@ -2680,7 +2902,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
* time could be small without us having heard
* a beacon yet.
*/
- iwl_mvm_protect_assoc(mvm, vif, 0);
+ iwl_mvm_protect_assoc(mvm, vif, 0, 0);
}
iwl_mvm_sf_update(mvm, vif, false);
@@ -2963,7 +3185,7 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
iwl_mvm_stop_ap_ibss_common(mvm, vif);
@@ -2993,8 +3215,6 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
iwl_mvm_power_update_mac(mvm);
iwl_mvm_mac_ctxt_remove(mvm, vif);
-
- mutex_unlock(&mvm->mutex);
}
static void iwl_mvm_stop_ap(struct ieee80211_hw *hw,
@@ -3047,37 +3267,20 @@ static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_bss_conf *bss_conf,
u64 changes)
{
- static const struct iwl_mvm_bss_info_changed_ops callbacks = {
- .bss_info_changed_sta = iwl_mvm_bss_info_changed_station,
- .bss_info_changed_ap_ibss = iwl_mvm_bss_info_changed_ap_ibss,
- };
-
- iwl_mvm_bss_info_changed_common(hw, vif, bss_conf, &callbacks,
- changes);
-}
-
-void
-iwl_mvm_bss_info_changed_common(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *bss_conf,
- const struct iwl_mvm_bss_info_changed_ops *callbacks,
- u64 changes)
-{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
if (changes & BSS_CHANGED_IDLE && !vif->cfg.idle)
iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
switch (vif->type) {
case NL80211_IFTYPE_STATION:
- callbacks->bss_info_changed_sta(mvm, vif, bss_conf, changes);
+ iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes);
break;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_ADHOC:
- callbacks->bss_info_changed_ap_ibss(mvm, vif, bss_conf,
- changes);
+ iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes);
break;
case NL80211_IFTYPE_MONITOR:
if (changes & BSS_CHANGED_MU_GROUPS)
@@ -3093,25 +3296,19 @@ iwl_mvm_bss_info_changed_common(struct ieee80211_hw *hw,
bss_conf->txpower);
iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
}
-
- mutex_unlock(&mvm->mutex);
}
int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_scan_request *hw_req)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- int ret;
if (hw_req->req.n_channels == 0 ||
hw_req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels)
return -EINVAL;
- mutex_lock(&mvm->mutex);
- ret = iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies);
- mutex_unlock(&mvm->mutex);
-
- return ret;
+ guard(mvm)(mvm);
+ return iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies);
}
void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
@@ -3119,7 +3316,7 @@ void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
/* Due to a race condition, it's possible that mac80211 asks
* us to stop a hw_scan when it's already stopped. This can
@@ -3130,8 +3327,6 @@ void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
*/
if (mvm->scan_status & IWL_MVM_SCAN_REGULAR)
iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
-
- mutex_unlock(&mvm->mutex);
}
void
@@ -3300,7 +3495,7 @@ void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
* Since there's mvm->mutex here, no need to have RCU lock for
* mvm_sta->link access.
*/
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
for (link_id = 0; link_id < ARRAY_SIZE(mvm_sta->link); link_id++) {
struct iwl_mvm_link_sta *link_sta;
u32 sta_id;
@@ -3317,7 +3512,6 @@ void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
RCU_INIT_POINTER(mvm->fw_id_to_link_sta[sta_id], NULL);
}
}
- mutex_unlock(&mvm->mutex);
}
static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
@@ -3420,16 +3614,16 @@ iwl_mvm_check_he_obss_narrow_bw_ru(struct ieee80211_hw *hw,
.tolerated = true,
};
- if (WARN_ON_ONCE(!link_conf->chandef.chan ||
+ if (WARN_ON_ONCE(!link_conf->chanreq.oper.chan ||
!mvmvif->link[link_id]))
return;
- if (!(link_conf->chandef.chan->flags & IEEE80211_CHAN_RADAR)) {
+ if (!(link_conf->chanreq.oper.chan->flags & IEEE80211_CHAN_RADAR)) {
mvmvif->link[link_id]->he_ru_2mhz_block = false;
return;
}
- cfg80211_bss_iter(hw->wiphy, &link_conf->chandef,
+ cfg80211_bss_iter(hw->wiphy, &link_conf->chanreq.oper,
iwl_mvm_check_he_obss_narrow_bw_ru_iter,
&iter_data);
@@ -3489,10 +3683,10 @@ static void iwl_mvm_mei_host_associated(struct iwl_mvm *mvm,
return;
/* FIXME: MEI needs to be updated for MLO */
- if (!vif->bss_conf.chandef.chan)
+ if (!vif->bss_conf.chanreq.oper.chan)
return;
- conn_info.channel = vif->bss_conf.chandef.chan->hw_value;
+ conn_info.channel = vif->bss_conf.chanreq.oper.chan->hw_value;
switch (mvm_sta->pairwise_cipher) {
case WLAN_CIPHER_SUITE_TKIP:
@@ -3593,8 +3787,6 @@ static void iwl_mvm_rs_rate_init_all_links(struct iwl_mvm *mvm,
}
}
-#define IWL_MVM_MIN_BEACON_INTERVAL_TU 16
-
static bool iwl_mvm_vif_conf_from_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
@@ -3689,6 +3881,9 @@ iwl_mvm_sta_state_notexist_to_none(struct iwl_mvm *mvm,
NL80211_TDLS_SETUP);
}
+ if (ret)
+ return ret;
+
for_each_sta_active_link(vif, sta, link_sta, i)
link_sta->agg.max_rc_amsdu_len = 1;
@@ -3697,6 +3892,19 @@ iwl_mvm_sta_state_notexist_to_none(struct iwl_mvm *mvm,
if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
mvmvif->ap_sta = sta;
+ /*
+ * Initialize the rates here already - this really tells
+ * the firmware only what the supported legacy rates are
+ * (may be) since it's initialized already from what the
+ * AP advertised in the beacon/probe response. This will
+ * allow the firmware to send auth/assoc frames with one
+ * of the supported rates already, rather than having to
+ * use a mandatory rate.
+ * If we're the AP, we'll just assume mandatory rates at
+ * this point, but we know nothing about the STA anyway.
+ */
+ iwl_mvm_rs_rate_init_all_links(mvm, vif, sta);
+
return 0;
}
@@ -3723,10 +3931,8 @@ iwl_mvm_sta_state_auth_to_assoc(struct ieee80211_hw *hw,
* the default bss_conf
*/
if (!mvm->mld_api_is_used &&
- ((vif->bss_conf.he_support &&
- !iwlwifi_mod_params.disable_11ax) ||
- (vif->bss_conf.eht_support &&
- !iwlwifi_mod_params.disable_11be)))
+ (vif->bss_conf.he_support &&
+ !iwlwifi_mod_params.disable_11ax))
iwl_mvm_cfg_he_sta(mvm, vif, mvm_sta->deflink.sta_id);
} else if (vif->type == NL80211_IFTYPE_STATION) {
iwl_mvm_vif_set_he_support(hw, vif, sta, true);
@@ -3778,16 +3984,47 @@ iwl_mvm_sta_state_assoc_to_authorized(struct iwl_mvm *mvm,
NL80211_TDLS_ENABLE_LINK);
} else {
/* enable beacon filtering */
- WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
+ WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif));
mvmvif->authorized = 1;
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+ mvmvif->link_selection_res = vif->active_links;
+ mvmvif->link_selection_primary =
+ vif->active_links ? __ffs(vif->active_links) : 0;
+ }
+
callbacks->mac_ctxt_changed(mvm, vif, false);
iwl_mvm_mei_host_associated(mvm, vif, mvm_sta);
+
+ memset(&mvmvif->last_esr_exit, 0,
+ sizeof(mvmvif->last_esr_exit));
+
+ iwl_mvm_block_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_TPT, 0);
+
+ /* Block until FW notif will arrive */
+ iwl_mvm_block_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_FW, 0);
+
+ /* when client is authorized (AP station marked as such),
+ * try to enable the best link(s).
+ */
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ iwl_mvm_select_links(mvm, vif);
}
mvm_sta->authorized = true;
+ /* MFP is set by default before the station is authorized.
+ * Clear it here in case it's not used.
+ */
+ if (!sta->mfp) {
+ int ret = callbacks->update_sta(mvm, vif, sta);
+
+ if (ret)
+ return ret;
+ }
+
iwl_mvm_rs_rate_init_all_links(mvm, vif, sta);
return 0;
@@ -3817,9 +4054,22 @@ iwl_mvm_sta_state_authorized_to_assoc(struct iwl_mvm *mvm,
* time.
*/
mvmvif->authorized = 0;
+ mvmvif->link_selection_res = 0;
/* disable beacon filtering */
- iwl_mvm_disable_beacon_filter(mvm, vif, 0);
+ iwl_mvm_disable_beacon_filter(mvm, vif);
+
+ wiphy_delayed_work_cancel(mvm->hw->wiphy,
+ &mvmvif->prevent_esr_done_wk);
+
+ wiphy_delayed_work_cancel(mvm->hw->wiphy,
+ &mvmvif->mlo_int_scan_wk);
+
+ wiphy_work_cancel(mvm->hw->wiphy, &mvmvif->unblock_esr_tpt_wk);
+
+ /* No need for the periodic statistics anymore */
+ if (ieee80211_vif_is_mld(vif) && mvmvif->esr_active)
+ iwl_mvm_request_periodic_system_statistics(mvm, false);
}
return 0;
@@ -3877,9 +4127,14 @@ int iwl_mvm_mac_sta_state_common(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
- /* this would be a mac80211 bug ... but don't crash */
+ /* this would be a mac80211 bug ... but don't crash, unless we had a
+ * firmware crash while we were activating a link, in which case it is
+ * legit to have phy_ctxt = NULL. Don't bother not to WARN if we are in
+ * recovery flow since we spit tons of error messages anyway.
+ */
for_each_sta_active_link(vif, sta, link_sta, link_id) {
- if (WARN_ON_ONCE(!mvmvif->link[link_id]->phy_ctxt)) {
+ if (WARN_ON_ONCE(!mvmvif->link[link_id] ||
+ !mvmvif->link[link_id]->phy_ctxt)) {
mutex_unlock(&mvm->mutex);
return test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
&mvm->status) ? 0 : -EINVAL;
@@ -4001,12 +4256,8 @@ static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw,
* The exception is P2P_DEVICE interface which needs immediate update.
*/
if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
- int ret;
-
- mutex_lock(&mvm->mutex);
- ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
- mutex_unlock(&mvm->mutex);
- return ret;
+ guard(mvm)(mvm);
+ return iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
}
return 0;
}
@@ -4015,11 +4266,14 @@ void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_prep_tx_info *info)
{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- mutex_lock(&mvm->mutex);
- iwl_mvm_protect_assoc(mvm, vif, info->duration);
- mutex_unlock(&mvm->mutex);
+ if (info->was_assoc && !mvmvif->session_prot_connection_loss)
+ return;
+
+ guard(mvm)(mvm);
+ iwl_mvm_protect_assoc(mvm, vif, info->duration, info->link_id);
}
void iwl_mvm_mac_mgd_complete_tx(struct ieee80211_hw *hw,
@@ -4032,9 +4286,8 @@ void iwl_mvm_mac_mgd_complete_tx(struct ieee80211_hw *hw,
if (info->success)
return;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
iwl_mvm_stop_session_protection(mvm, vif);
- mutex_unlock(&mvm->mutex);
}
int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
@@ -4044,20 +4297,12 @@ int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- int ret;
+ guard(mvm)(mvm);
- mutex_lock(&mvm->mutex);
-
- if (!vif->cfg.idle) {
- ret = -EBUSY;
- goto out;
- }
-
- ret = iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED);
+ if (!vif->cfg.idle)
+ return -EBUSY;
-out:
- mutex_unlock(&mvm->mutex);
- return ret;
+ return iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED);
}
int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
@@ -4155,12 +4400,21 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
* GTK on AP interface is a TX-only key, return 0;
* on IBSS they're per-station and because we're lazy
* we don't support them for RX, so do the same.
- * CMAC/GMAC in AP/IBSS modes must be done in software.
+ * CMAC/GMAC in AP/IBSS modes must be done in software
+ * on older NICs.
*
* Except, of course, beacon protection - it must be
- * offloaded since we just set a beacon template.
+ * offloaded since we just set a beacon template, and
+ * then we must also offload the IGTK (not just BIGTK)
+ * for firmware reasons.
+ *
+ * So just check for beacon protection - if we don't
+ * have it we cannot get here with keyidx >= 6, and
+ * if we do have it we need to send the key to FW in
+ * all cases (CMAC/GMAC).
*/
- if (keyidx < 6 &&
+ if (!wiphy_ext_feature_isset(hw->wiphy,
+ NL80211_EXT_FEATURE_BEACON_PROTECTION) &&
(key->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)) {
@@ -4326,13 +4580,9 @@ int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_key_conf *key)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- int ret;
- mutex_lock(&mvm->mutex);
- ret = __iwl_mvm_mac_set_key(hw, cmd, vif, sta, key);
- mutex_unlock(&mvm->mutex);
-
- return ret;
+ guard(mvm)(mvm);
+ return __iwl_mvm_mac_set_key(hw, cmd, vif, sta, key);
}
void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw,
@@ -4384,11 +4634,6 @@ static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait,
return true;
}
-#define AUX_ROC_MIN_DURATION MSEC_TO_TU(100)
-#define AUX_ROC_MIN_DELAY MSEC_TO_TU(200)
-#define AUX_ROC_MAX_DELAY MSEC_TO_TU(600)
-#define AUX_ROC_SAFETY_BUFFER MSEC_TO_TU(20)
-#define AUX_ROC_MIN_SAFETY_BUFFER MSEC_TO_TU(10)
static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
struct ieee80211_channel *channel,
struct ieee80211_vif *vif,
@@ -4399,8 +4644,6 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data;
static const u16 time_event_response[] = { HOT_SPOT_CMD };
struct iwl_notification_wait wait_time_event;
- u32 dtim_interval = vif->bss_conf.dtim_period *
- vif->bss_conf.beacon_int;
u32 req_dur, delay;
struct iwl_hs20_roc_req aux_roc_req = {
.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
@@ -4421,29 +4664,7 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
/* Set the time and duration */
tail->apply_time = cpu_to_le32(iwl_mvm_get_systime(mvm));
- delay = AUX_ROC_MIN_DELAY;
- req_dur = MSEC_TO_TU(duration);
-
- /*
- * If we are associated we want the delay time to be at least one
- * dtim interval so that the FW can wait until after the DTIM and
- * then start the time event, this will potentially allow us to
- * remain off-channel for the max duration.
- * Since we want to use almost a whole dtim interval we would also
- * like the delay to be for 2-3 dtim intervals, in case there are
- * other time events with higher priority.
- */
- if (vif->cfg.assoc) {
- delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY);
- /* We cannot remain off-channel longer than the DTIM interval */
- if (dtim_interval <= req_dur) {
- req_dur = dtim_interval - AUX_ROC_SAFETY_BUFFER;
- if (req_dur <= AUX_ROC_MIN_DURATION)
- req_dur = dtim_interval -
- AUX_ROC_MIN_SAFETY_BUFFER;
- }
- }
-
+ iwl_mvm_roc_duration_and_delay(vif, duration, &req_dur, &delay);
tail->duration = cpu_to_le32(req_dur);
tail->apply_time_max_delay = cpu_to_le32(delay);
@@ -4451,8 +4672,8 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
"ROC: Requesting to remain on channel %u for %ums\n",
channel->hw_value, req_dur);
IWL_DEBUG_TE(mvm,
- "\t(requested = %ums, max_delay = %ums, dtim_interval = %ums)\n",
- duration, delay, dtim_interval);
+ "\t(requested = %ums, max_delay = %ums)\n",
+ duration, delay);
/* Set the node address */
memcpy(tail->node_addr, vif->addr, ETH_ALEN);
@@ -4530,30 +4751,20 @@ static int iwl_mvm_add_aux_sta_for_hs20(struct iwl_mvm *mvm, u32 lmac_id)
return ret;
}
-static int iwl_mvm_roc_switch_binding(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct iwl_mvm_phy_ctxt *new_phy_ctxt)
+static int iwl_mvm_roc_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- int ret = 0;
+ int ret;
lockdep_assert_held(&mvm->mutex);
- /* Unbind the P2P_DEVICE from the current PHY context,
- * and if the PHY context is not used remove it.
- */
- ret = iwl_mvm_binding_remove_vif(mvm, vif);
- if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
+ ret = iwl_mvm_binding_add_vif(mvm, vif);
+ if (WARN(ret, "Failed binding P2P_DEVICE\n"))
return ret;
- iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt);
-
- /* Bind the P2P_DEVICE to the current PHY Context */
- mvmvif->deflink.phy_ctxt = new_phy_ctxt;
-
- ret = iwl_mvm_binding_add_vif(mvm, vif);
- WARN(ret, "Failed binding P2P_DEVICE\n");
- return ret;
+ /* The station and queue allocation must be done only after the binding
+ * is done, as otherwise the FW might incorrectly configure its state.
+ */
+ return iwl_mvm_add_p2p_bcast_sta(mvm, vif);
}
static int iwl_mvm_roc(struct ieee80211_hw *hw,
@@ -4564,12 +4775,112 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
{
static const struct iwl_mvm_roc_ops ops = {
.add_aux_sta_for_hs20 = iwl_mvm_add_aux_sta_for_hs20,
- .switch_phy_ctxt = iwl_mvm_roc_switch_binding,
+ .link = iwl_mvm_roc_link,
};
return iwl_mvm_roc_common(hw, vif, channel, duration, type, &ops);
}
+static int iwl_mvm_roc_station(struct iwl_mvm *mvm,
+ struct ieee80211_channel *channel,
+ struct ieee80211_vif *vif,
+ int duration)
+{
+ int ret;
+ u32 cmd_id = WIDE_ID(MAC_CONF_GROUP, ROC_CMD);
+ u8 fw_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
+ IWL_FW_CMD_VER_UNKNOWN);
+
+ if (fw_ver == IWL_FW_CMD_VER_UNKNOWN) {
+ ret = iwl_mvm_send_aux_roc_cmd(mvm, channel, vif, duration);
+ } else if (fw_ver >= 3) {
+ ret = iwl_mvm_roc_add_cmd(mvm, channel, vif, duration,
+ ROC_ACTIVITY_HOTSPOT);
+ } else {
+ ret = -EOPNOTSUPP;
+ IWL_ERR(mvm, "ROC command version %d mismatch!\n", fw_ver);
+ }
+
+ return ret;
+}
+
+static int iwl_mvm_roc_p2p(struct iwl_mvm *mvm,
+ struct ieee80211_channel *channel,
+ struct ieee80211_vif *vif,
+ int duration,
+ enum ieee80211_roc_type type)
+{
+ enum iwl_roc_activity activity;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ switch (type) {
+ case IEEE80211_ROC_TYPE_NORMAL:
+ activity = ROC_ACTIVITY_P2P_DISC;
+ break;
+ case IEEE80211_ROC_TYPE_MGMT_TX:
+ activity = ROC_ACTIVITY_P2P_NEG;
+ break;
+ default:
+ WARN_ONCE(1, "Got an invalid P2P ROC type\n");
+ return -EINVAL;
+ }
+
+ ret = iwl_mvm_mld_add_aux_sta(mvm,
+ iwl_mvm_get_lmac_id(mvm, channel->band));
+ if (ret)
+ return ret;
+
+ return iwl_mvm_roc_add_cmd(mvm, channel, vif, duration, activity);
+}
+
+static int iwl_mvm_p2p_find_phy_ctxt(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel *channel)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct cfg80211_chan_def chandef;
+ int i;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (mvmvif->deflink.phy_ctxt &&
+ channel == mvmvif->deflink.phy_ctxt->channel)
+ return 0;
+
+ /* Try using a PHY context that is already in use */
+ for (i = 0; i < NUM_PHY_CTX; i++) {
+ struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[i];
+
+ if (!phy_ctxt->ref || mvmvif->deflink.phy_ctxt == phy_ctxt)
+ continue;
+
+ if (channel == phy_ctxt->channel) {
+ if (mvmvif->deflink.phy_ctxt)
+ iwl_mvm_phy_ctxt_unref(mvm,
+ mvmvif->deflink.phy_ctxt);
+
+ mvmvif->deflink.phy_ctxt = phy_ctxt;
+ iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt);
+ return 0;
+ }
+ }
+
+ /* We already have a phy_ctxt, but it's not on the right channel */
+ if (mvmvif->deflink.phy_ctxt)
+ iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt);
+
+ mvmvif->deflink.phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
+ if (!mvmvif->deflink.phy_ctxt)
+ return -ENOSPC;
+
+ cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
+
+ return iwl_mvm_phy_ctxt_add(mvm, mvmvif->deflink.phy_ctxt,
+ &chandef, NULL, 1, 1);
+}
+
/* Execute the common part for MLD and non-MLD modes */
int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_channel *channel, int duration,
@@ -4577,12 +4888,9 @@ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const struct iwl_mvm_roc_ops *ops)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct cfg80211_chan_def chandef;
- struct iwl_mvm_phy_ctxt *phy_ctxt;
- bool band_change_removal;
- int ret, i;
+ struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm);
u32 lmac_id;
+ int ret;
IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
duration, type);
@@ -4593,7 +4901,14 @@ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
*/
flush_work(&mvm->roc_done_wk);
- mutex_lock(&mvm->mutex);
+ if (!IS_ERR_OR_NULL(bss_vif)) {
+ ret = iwl_mvm_block_esr_sync(mvm, bss_vif,
+ IWL_MVM_ESR_BLOCKED_ROC);
+ if (ret)
+ return ret;
+ }
+
+ guard(mvm)(mvm);
switch (vif->type) {
case NL80211_IFTYPE_STATION:
@@ -4602,93 +4917,30 @@ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
/* Use aux roc framework (HS20) */
ret = ops->add_aux_sta_for_hs20(mvm, lmac_id);
if (!ret)
- ret = iwl_mvm_send_aux_roc_cmd(mvm, channel,
- vif, duration);
- goto out_unlock;
+ ret = iwl_mvm_roc_station(mvm, channel, vif, duration);
+ return ret;
case NL80211_IFTYPE_P2P_DEVICE:
/* handle below */
break;
default:
- IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type);
- ret = -EINVAL;
- goto out_unlock;
+ IWL_ERR(mvm, "ROC: Invalid vif type=%u\n", vif->type);
+ return -EINVAL;
}
- for (i = 0; i < NUM_PHY_CTX; i++) {
- phy_ctxt = &mvm->phy_ctxts[i];
- if (phy_ctxt->ref == 0 || mvmvif->deflink.phy_ctxt == phy_ctxt)
- continue;
-
- if (phy_ctxt->ref && channel == phy_ctxt->channel) {
- ret = ops->switch_phy_ctxt(mvm, vif, phy_ctxt);
- if (ret)
- goto out_unlock;
-
- iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt);
- goto schedule_time_event;
- }
+ if (iwl_mvm_has_p2p_over_aux(mvm)) {
+ ret = iwl_mvm_roc_p2p(mvm, channel, vif, duration, type);
+ return ret;
}
- /* Need to update the PHY context only if the ROC channel changed */
- if (channel == mvmvif->deflink.phy_ctxt->channel)
- goto schedule_time_event;
-
- cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
-
- /*
- * Check if the remain-on-channel is on a different band and that
- * requires context removal, see iwl_mvm_phy_ctxt_changed(). If
- * so, we'll need to release and then re-configure here, since we
- * must not remove a PHY context that's part of a binding.
- */
- band_change_removal =
- fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
- mvmvif->deflink.phy_ctxt->channel->band != chandef.chan->band;
-
- if (mvmvif->deflink.phy_ctxt->ref == 1 && !band_change_removal) {
- /*
- * Change the PHY context configuration as it is currently
- * referenced only by the P2P Device MAC (and we can modify it)
- */
- ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->deflink.phy_ctxt,
- &chandef, 1, 1);
- if (ret)
- goto out_unlock;
- } else {
- /*
- * The PHY context is shared with other MACs (or we're trying to
- * switch bands), so remove the P2P Device from the binding,
- * allocate an new PHY context and create a new binding.
- */
- phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
- if (!phy_ctxt) {
- ret = -ENOSPC;
- goto out_unlock;
- }
-
- ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef,
- 1, 1);
- if (ret) {
- IWL_ERR(mvm, "Failed to change PHY context\n");
- goto out_unlock;
- }
-
- ret = ops->switch_phy_ctxt(mvm, vif, phy_ctxt);
- if (ret)
- goto out_unlock;
-
- iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt);
- }
+ ret = iwl_mvm_p2p_find_phy_ctxt(mvm, vif, channel);
+ if (ret)
+ return ret;
-schedule_time_event:
- /* Schedule the time events */
- ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type);
+ ret = ops->link(mvm, vif);
+ if (ret)
+ return ret;
-out_unlock:
- mutex_unlock(&mvm->mutex);
- IWL_DEBUG_MAC80211(mvm, "leave\n");
- return ret;
+ return iwl_mvm_start_p2p_roc(mvm, vif, duration, type);
}
int iwl_mvm_cancel_roc(struct ieee80211_hw *hw,
@@ -4698,9 +4950,7 @@ int iwl_mvm_cancel_roc(struct ieee80211_hw *hw,
IWL_DEBUG_MAC80211(mvm, "enter\n");
- mutex_lock(&mvm->mutex);
iwl_mvm_stop_roc(mvm, vif);
- mutex_unlock(&mvm->mutex);
IWL_DEBUG_MAC80211(mvm, "leave\n");
return 0;
@@ -4721,8 +4971,8 @@ static void iwl_mvm_ftm_responder_chanctx_iter(void *_data, u8 *mac,
data->responder = true;
}
-static bool iwl_mvm_is_ftm_responder_chanctx(struct iwl_mvm *mvm,
- struct ieee80211_chanctx_conf *ctx)
+bool iwl_mvm_is_ftm_responder_chanctx(struct iwl_mvm *mvm,
+ struct ieee80211_chanctx_conf *ctx)
{
struct iwl_mvm_ftm_responder_iter_data data = {
.responder = false,
@@ -4741,8 +4991,7 @@ static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm,
{
u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
struct iwl_mvm_phy_ctxt *phy_ctxt;
- bool responder = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx);
- struct cfg80211_chan_def *def = responder ? &ctx->def : &ctx->min_def;
+ struct cfg80211_chan_def *def = iwl_mvm_chanctx_def(mvm, ctx);
int ret;
lockdep_assert_held(&mvm->mutex);
@@ -4755,15 +5004,14 @@ static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm,
goto out;
}
- ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, def,
- ctx->rx_chains_static,
- ctx->rx_chains_dynamic);
+ ret = iwl_mvm_phy_ctxt_add(mvm, phy_ctxt, def, &ctx->ap,
+ ctx->rx_chains_static,
+ ctx->rx_chains_dynamic);
if (ret) {
IWL_ERR(mvm, "Failed to add PHY context\n");
goto out;
}
- iwl_mvm_phy_ctxt_ref(mvm, phy_ctxt);
*phy_ctxt_id = phy_ctxt->id;
out:
return ret;
@@ -4773,13 +5021,9 @@ int iwl_mvm_add_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- int ret;
- mutex_lock(&mvm->mutex);
- ret = __iwl_mvm_add_chanctx(mvm, ctx);
- mutex_unlock(&mvm->mutex);
-
- return ret;
+ guard(mvm)(mvm);
+ return __iwl_mvm_add_chanctx(mvm, ctx);
}
static void __iwl_mvm_remove_chanctx(struct iwl_mvm *mvm,
@@ -4798,9 +5042,8 @@ void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
__iwl_mvm_remove_chanctx(mvm, ctx);
- mutex_unlock(&mvm->mutex);
}
void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
@@ -4809,8 +5052,7 @@ void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
- bool responder = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx);
- struct cfg80211_chan_def *def = responder ? &ctx->def : &ctx->min_def;
+ struct cfg80211_chan_def *def = iwl_mvm_chanctx_def(mvm, ctx);
if (WARN_ONCE((phy_ctxt->ref > 1) &&
(changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH |
@@ -4821,26 +5063,23 @@ void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
phy_ctxt->ref, changed))
return;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
/* we are only changing the min_width, may be a noop */
if (changed == IEEE80211_CHANCTX_CHANGE_MIN_WIDTH) {
if (phy_ctxt->width == def->width)
- goto out_unlock;
+ return;
/* we are just toggling between 20_NOHT and 20 */
if (phy_ctxt->width <= NL80211_CHAN_WIDTH_20 &&
def->width <= NL80211_CHAN_WIDTH_20)
- goto out_unlock;
+ return;
}
iwl_mvm_bt_coex_vif_change(mvm);
- iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, def,
+ iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, def, &ctx->ap,
ctx->rx_chains_static,
ctx->rx_chains_dynamic);
-
-out_unlock:
- mutex_unlock(&mvm->mutex);
}
/*
@@ -4949,7 +5188,7 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm,
if (!fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) {
- u32 duration = 3 * vif->bss_conf.beacon_int;
+ u32 duration = 5 * vif->bss_conf.beacon_int;
/* Protect the session to make sure we hear the first
* beacon on the new channel.
@@ -4960,6 +5199,10 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm,
}
iwl_mvm_update_quotas(mvm, false, NULL);
+
+ iwl_mvm_send_ap_tx_power_constraint_cmd(mvm, vif,
+ link_conf,
+ false);
}
goto out;
@@ -4979,13 +5222,9 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- int ret;
- mutex_lock(&mvm->mutex);
- ret = __iwl_mvm_assign_vif_chanctx(mvm, vif, link_conf, ctx, false);
- mutex_unlock(&mvm->mutex);
-
- return ret;
+ guard(mvm)(mvm);
+ return __iwl_mvm_assign_vif_chanctx(mvm, vif, link_conf, ctx, false);
}
/*
@@ -5073,9 +5312,8 @@ static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
__iwl_mvm_unassign_vif_chanctx(mvm, vif, link_conf, ctx, false);
- mutex_unlock(&mvm->mutex);
}
static int
@@ -5085,7 +5323,7 @@ iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm,
{
int ret;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
ops->__unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].link_conf,
vifs[0].old_ctx, true);
__iwl_mvm_remove_chanctx(mvm, vifs[0].old_ctx);
@@ -5108,7 +5346,7 @@ iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm,
if (iwl_mvm_phy_ctx_count(mvm) > 1)
iwl_mvm_teardown_tdls_peers(mvm);
- goto out;
+ return 0;
out_remove:
__iwl_mvm_remove_chanctx(mvm, vifs[0].new_ctx);
@@ -5125,15 +5363,11 @@ out_reassign:
goto out_restart;
}
- goto out;
+ return ret;
out_restart:
/* things keep failing, better restart the hw */
iwl_mvm_nic_restart(mvm, false);
-
-out:
- mutex_unlock(&mvm->mutex);
-
return ret;
}
@@ -5144,7 +5378,7 @@ iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm,
{
int ret;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
ops->__unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].link_conf,
vifs[0].old_ctx, true);
@@ -5156,7 +5390,7 @@ iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm,
goto out_reassign;
}
- goto out;
+ return 0;
out_reassign:
if (ops->__assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].link_conf,
@@ -5165,15 +5399,11 @@ out_reassign:
goto out_restart;
}
- goto out;
+ return ret;
out_restart:
/* things keep failing, better restart the hw */
iwl_mvm_nic_restart(mvm, false);
-
-out:
- mutex_unlock(&mvm->mutex);
-
return ret;
}
@@ -5227,8 +5457,8 @@ int iwl_mvm_tx_last_beacon(struct ieee80211_hw *hw)
return mvm->ibss_manager;
}
-int iwl_mvm_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
- bool set)
+static int iwl_mvm_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
+ bool set)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
@@ -5288,8 +5518,8 @@ static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
return -EINVAL;
if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]))
- return iwl_mvm_enable_beacon_filter(mvm, vif, 0);
- return iwl_mvm_disable_beacon_filter(mvm, vif, 0);
+ return iwl_mvm_enable_beacon_filter(mvm, vif);
+ return iwl_mvm_disable_beacon_filter(mvm, vif);
}
return -EOPNOTSUPP;
@@ -5300,13 +5530,9 @@ int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw,
void *data, int len)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- int err;
-
- mutex_lock(&mvm->mutex);
- err = __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len);
- mutex_unlock(&mvm->mutex);
- return err;
+ guard(mvm)(mvm);
+ return __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len);
}
#endif
@@ -5372,8 +5598,8 @@ static int iwl_mvm_old_pre_chan_sw_sta(struct iwl_mvm *mvm,
if (chsw->block_tx)
iwl_mvm_csa_client_absent(mvm, vif);
- if (mvmvif->bf_data.bf_enabled) {
- int ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
+ if (mvmvif->bf_enabled) {
+ int ret = iwl_mvm_disable_beacon_filter(mvm, vif);
if (ret)
return ret;
@@ -5385,19 +5611,32 @@ static int iwl_mvm_old_pre_chan_sw_sta(struct iwl_mvm *mvm,
return 0;
}
+static void iwl_mvm_csa_block_txqs(void *data, struct ieee80211_sta *sta)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
+ struct iwl_mvm_txq *mvmtxq =
+ iwl_mvm_txq_from_mac80211(sta->txq[i]);
+
+ set_bit(IWL_MVM_TXQ_STATE_STOP_AP_CSA, &mvmtxq->state);
+ }
+}
+
#define IWL_MAX_CSA_BLOCK_TX 1500
-int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
+int iwl_mvm_pre_channel_switch(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_channel_switch *chsw)
{
- struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct ieee80211_vif *csa_vif;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_txq *mvmtxq;
int ret;
- mutex_lock(&mvm->mutex);
+ lockdep_assert_held(&mvm->mutex);
mvmvif->csa_failed = false;
+ mvmvif->csa_blocks_tx = false;
IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n",
chsw->chandef.center_freq1);
@@ -5412,30 +5651,38 @@ int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
rcu_dereference_protected(mvm->csa_vif,
lockdep_is_held(&mvm->mutex));
if (WARN_ONCE(csa_vif && csa_vif->bss_conf.csa_active,
- "Another CSA is already in progress")) {
- ret = -EBUSY;
- goto out_unlock;
- }
+ "Another CSA is already in progress"))
+ return -EBUSY;
/* we still didn't unblock tx. prevent new CS meanwhile */
if (rcu_dereference_protected(mvm->csa_tx_blocked_vif,
- lockdep_is_held(&mvm->mutex))) {
- ret = -EBUSY;
- goto out_unlock;
- }
+ lockdep_is_held(&mvm->mutex)))
+ return -EBUSY;
rcu_assign_pointer(mvm->csa_vif, vif);
if (WARN_ONCE(mvmvif->csa_countdown,
- "Previous CSA countdown didn't complete")) {
- ret = -EBUSY;
- goto out_unlock;
- }
+ "Previous CSA countdown didn't complete"))
+ return -EBUSY;
mvmvif->csa_target_freq = chsw->chandef.chan->center_freq;
+ if (!chsw->block_tx)
+ break;
+ /* don't need blocking in driver otherwise - mac80211 will do */
+ if (!ieee80211_hw_check(mvm->hw, HANDLES_QUIET_CSA))
+ break;
+
+ mvmvif->csa_blocks_tx = true;
+ mvmtxq = iwl_mvm_txq_from_mac80211(vif->txq);
+ set_bit(IWL_MVM_TXQ_STATE_STOP_AP_CSA, &mvmtxq->state);
+ ieee80211_iterate_stations_atomic(mvm->hw,
+ iwl_mvm_csa_block_txqs,
+ NULL);
break;
case NL80211_IFTYPE_STATION:
+ mvmvif->csa_blocks_tx = chsw->block_tx;
+
/*
* In the new flow FW is in charge of timing the switch so there
* is no need for all of this
@@ -5450,12 +5697,11 @@ int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
* we don't know the dtim period. In this case, the firmware can't
* track the beacons.
*/
- if (!vif->cfg.assoc || !vif->bss_conf.dtim_period) {
- ret = -EBUSY;
- goto out_unlock;
- }
+ if (!vif->cfg.assoc || !vif->bss_conf.dtim_period)
+ return -EBUSY;
- if (chsw->delay > IWL_MAX_CSA_BLOCK_TX)
+ if (chsw->delay > IWL_MAX_CSA_BLOCK_TX &&
+ hweight16(vif->valid_links) <= 1)
schedule_delayed_work(&mvmvif->csa_work, 0);
if (chsw->block_tx) {
@@ -5474,7 +5720,7 @@ int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) {
ret = iwl_mvm_old_pre_chan_sw_sta(mvm, vif, chsw);
if (ret)
- goto out_unlock;
+ return ret;
} else {
iwl_mvm_schedule_client_csa(mvm, vif, chsw);
}
@@ -5490,17 +5736,24 @@ int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
ret = iwl_mvm_power_update_ps(mvm);
if (ret)
- goto out_unlock;
+ return ret;
/* we won't be on this channel any longer */
iwl_mvm_teardown_tdls_peers(mvm);
-out_unlock:
- mutex_unlock(&mvm->mutex);
-
return ret;
}
+static int iwl_mvm_mac_pre_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *chsw)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ guard(mvm)(mvm);
+ return iwl_mvm_pre_channel_switch(mvm, vif, chsw);
+}
+
void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_channel_switch *chsw)
@@ -5532,9 +5785,17 @@ void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw,
if (chsw->count >= mvmvif->csa_count && chsw->block_tx) {
if (mvmvif->csa_misbehave) {
+ struct ieee80211_bss_conf *link_conf;
+
/* Second time, give up on this AP*/
- iwl_mvm_abort_channel_switch(hw, vif);
- ieee80211_chswitch_done(vif, false);
+
+ link_conf = wiphy_dereference(hw->wiphy,
+ vif->link_conf[chsw->link_id]);
+ if (WARN_ON(!link_conf))
+ return;
+
+ iwl_mvm_abort_channel_switch(hw, vif, link_conf);
+ ieee80211_chswitch_done(vif, false, 0);
mvmvif->csa_misbehave = false;
return;
}
@@ -5542,16 +5803,14 @@ void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw,
}
mvmvif->csa_count = chsw->count;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
if (mvmvif->csa_failed)
- goto out_unlock;
+ return;
WARN_ON(iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(MAC_CONF_GROUP,
CHANNEL_SWITCH_TIME_EVENT_CMD),
0, sizeof(cmd), &cmd));
-out_unlock:
- mutex_unlock(&mvm->mutex);
}
static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop)
@@ -5559,18 +5818,21 @@ static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop)
int i;
if (!iwl_mvm_has_new_tx_api(mvm)) {
+ /* we can't ask the firmware anything if it is dead */
+ if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
+ &mvm->status))
+ return;
if (drop) {
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
iwl_mvm_flush_tx_path(mvm,
iwl_mvm_flushable_queues(mvm) & queues);
- mutex_unlock(&mvm->mutex);
} else {
iwl_trans_wait_tx_queues_empty(mvm->trans, queues);
}
return;
}
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
struct ieee80211_sta *sta;
@@ -5585,14 +5847,13 @@ static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop)
iwl_mvm_wait_sta_queues_empty(mvm,
iwl_mvm_sta_from_mac80211(sta));
}
- mutex_unlock(&mvm->mutex);
}
void iwl_mvm_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- struct iwl_mvm_vif *mvmvif;
struct iwl_mvm_sta *mvmsta;
struct ieee80211_sta *sta;
bool ap_sta_done = false;
@@ -5604,14 +5865,22 @@ void iwl_mvm_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return;
}
- if (vif->type != NL80211_IFTYPE_STATION)
- return;
+ if (!drop && hweight16(vif->active_links) <= 1) {
+ int link_id = vif->active_links ? __ffs(vif->active_links) : 0;
+ struct ieee80211_bss_conf *link_conf;
+
+ link_conf = wiphy_dereference(hw->wiphy,
+ vif->link_conf[link_id]);
+ if (WARN_ON(!link_conf))
+ return;
+ if (link_conf->csa_active && mvmvif->csa_blocks_tx)
+ drop = true;
+ }
/* Make sure we're done with the deferred traffic before flushing */
flush_work(&mvm->add_stream_wk);
mutex_lock(&mvm->mutex);
- mvmvif = iwl_mvm_vif_from_mac80211(vif);
/* flush the AP-station and all TDLS peers */
for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
@@ -5630,11 +5899,9 @@ void iwl_mvm_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
ap_sta_done = true;
}
- /* make sure only TDLS peers or the AP are flushed */
- WARN_ON_ONCE(sta != mvmvif->ap_sta && !sta->tdls);
-
if (drop) {
- if (iwl_mvm_flush_sta(mvm, mvmsta, false))
+ if (iwl_mvm_flush_sta(mvm, mvmsta->deflink.sta_id,
+ mvmsta->tfd_queue_msk))
IWL_ERR(mvm, "flush request fail\n");
} else {
if (iwl_mvm_has_new_tx_api(mvm))
@@ -5648,66 +5915,129 @@ void iwl_mvm_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
/* this can take a while, and we may need/want other operations
* to succeed while doing this, so do it without the mutex held
+ * If the firmware is dead, this can't work...
*/
- if (!drop && !iwl_mvm_has_new_tx_api(mvm))
+ if (!drop && !iwl_mvm_has_new_tx_api(mvm) &&
+ !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
+ &mvm->status))
iwl_trans_wait_tx_queues_empty(mvm->trans, msk);
}
void iwl_mvm_mac_flush_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- int i;
+ struct iwl_mvm_link_sta *mvm_link_sta;
+ struct ieee80211_link_sta *link_sta;
+ int link_id;
+
+ guard(mvm)(mvm);
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
+ mvm_link_sta = rcu_dereference_protected(mvmsta->link[link_id],
+ lockdep_is_held(&mvm->mutex));
+ if (!mvm_link_sta)
+ continue;
+
+ if (iwl_mvm_flush_sta(mvm, mvm_link_sta->sta_id,
+ mvmsta->tfd_queue_msk))
+ IWL_ERR(mvm, "flush request fail\n");
+ }
+}
+
+static int iwl_mvm_mac_get_acs_survey(struct iwl_mvm *mvm, int idx,
+ struct survey_info *survey)
+{
+ int chan_idx;
+ enum nl80211_band band;
+ int ret;
mutex_lock(&mvm->mutex);
- for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
- struct iwl_mvm_sta *mvmsta;
- struct ieee80211_sta *tmp;
- tmp = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
- lockdep_is_held(&mvm->mutex));
- if (tmp != sta)
+ if (!mvm->acs_survey) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ /* Find and return the next entry that has a non-zero active time */
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ struct ieee80211_supported_band *sband =
+ mvm->hw->wiphy->bands[band];
+
+ if (!sband)
continue;
- mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ for (chan_idx = 0; chan_idx < sband->n_channels; chan_idx++) {
+ struct iwl_mvm_acs_survey_channel *info =
+ &mvm->acs_survey->bands[band][chan_idx];
- if (iwl_mvm_flush_sta(mvm, mvmsta, false))
- IWL_ERR(mvm, "flush request fail\n");
+ if (!info->time)
+ continue;
+
+ /* Found (the next) channel to report */
+ survey->channel = &sband->channels[chan_idx];
+ survey->filled = SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_BUSY |
+ SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_TX;
+ survey->time = info->time;
+ survey->time_busy = info->time_busy;
+ survey->time_rx = info->time_rx;
+ survey->time_tx = info->time_tx;
+ survey->noise = info->noise;
+ if (survey->noise < 0)
+ survey->filled |= SURVEY_INFO_NOISE_DBM;
+
+ /* Clear time so that channel is only reported once */
+ info->time = 0;
+
+ ret = 0;
+ goto out;
+ }
}
+
+ ret = -ENOENT;
+
+out:
mutex_unlock(&mvm->mutex);
+
+ return ret;
}
int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- int ret;
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(SYSTEM_GROUP,
+ SYSTEM_STATISTICS_CMD),
+ IWL_FW_CMD_VER_UNKNOWN);
memset(survey, 0, sizeof(*survey));
- /* only support global statistics right now */
- if (idx != 0)
- return -ENOENT;
-
if (!fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
return -ENOENT;
- mutex_lock(&mvm->mutex);
+ /*
+ * Return the beacon stats at index zero and pass on following indices
+ * to the function returning the full survey, most likely for ACS
+ * (Automatic Channel Selection).
+ */
+ if (idx > 0)
+ return iwl_mvm_mac_get_acs_survey(mvm, idx - 1, survey);
+
+ guard(mvm)(mvm);
if (iwl_mvm_firmware_running(mvm)) {
- ret = iwl_mvm_request_statistics(mvm, false);
+ int ret = iwl_mvm_request_statistics(mvm, false);
+
if (ret)
- goto out;
+ return ret;
}
- survey->filled = SURVEY_INFO_TIME |
- SURVEY_INFO_TIME_RX |
- SURVEY_INFO_TIME_TX |
- SURVEY_INFO_TIME_SCAN;
- survey->time = mvm->accu_radio_stats.on_time_rf +
- mvm->radio_stats.on_time_rf;
- do_div(survey->time, USEC_PER_MSEC);
+ survey->filled = SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_TX;
survey->time_rx = mvm->accu_radio_stats.rx_time +
mvm->radio_stats.rx_time;
@@ -5717,14 +6047,21 @@ int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
mvm->radio_stats.tx_time;
do_div(survey->time_tx, USEC_PER_MSEC);
+ /* the new fw api doesn't support the following fields */
+ if (cmd_ver != IWL_FW_CMD_VER_UNKNOWN)
+ return 0;
+
+ survey->filled |= SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_SCAN;
+ survey->time = mvm->accu_radio_stats.on_time_rf +
+ mvm->radio_stats.on_time_rf;
+ do_div(survey->time, USEC_PER_MSEC);
+
survey->time_scan = mvm->accu_radio_stats.on_time_scan +
mvm->radio_stats.on_time_scan;
do_div(survey->time_scan, USEC_PER_MSEC);
- ret = 0;
- out:
- mutex_unlock(&mvm->mutex);
- return ret;
+ return 0;
}
static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo)
@@ -5870,6 +6207,7 @@ void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ int i;
if (mvmsta->deflink.avg_energy) {
sinfo->signal_avg = -(s8)mvmsta->deflink.avg_energy;
@@ -5890,16 +6228,19 @@ void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
if (!vif->cfg.assoc)
return;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
if (mvmvif->deflink.ap_sta_id != mvmsta->deflink.sta_id)
- goto unlock;
+ return;
if (iwl_mvm_request_statistics(mvm, false))
- goto unlock;
+ return;
+
+ sinfo->rx_beacon = 0;
+ for_each_mvm_vif_valid_link(mvmvif, i)
+ sinfo->rx_beacon += mvmvif->link[i]->beacon_stats.num_beacons +
+ mvmvif->link[i]->beacon_stats.accu_num_beacons;
- sinfo->rx_beacon = mvmvif->deflink.beacon_stats.num_beacons +
- mvmvif->deflink.beacon_stats.accu_num_beacons;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX);
if (mvmvif->deflink.beacon_stats.avg_signal) {
/* firmware only reports a value after RXing a few beacons */
@@ -5907,8 +6248,6 @@ void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
mvmvif->deflink.beacon_stats.avg_signal;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG);
}
- unlock:
- mutex_unlock(&mvm->mutex);
}
static void iwl_mvm_event_mlme_callback_ini(struct iwl_mvm *mvm,
@@ -6028,6 +6367,7 @@ void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
}
}
+#define SYNC_RX_QUEUE_TIMEOUT (HZ)
void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
enum iwl_mvm_rxq_notif_type type,
bool sync,
@@ -6050,7 +6390,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
.len[0] = sizeof(cmd),
.data[1] = data,
.len[1] = size,
- .flags = sync ? 0 : CMD_ASYNC,
+ .flags = CMD_SEND_IN_RFKILL | (sync ? 0 : CMD_ASYNC),
};
int ret;
@@ -6075,12 +6415,11 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
if (sync) {
lockdep_assert_held(&mvm->mutex);
ret = wait_event_timeout(mvm->rx_sync_waitq,
- READ_ONCE(mvm->queue_sync_state) == 0 ||
- iwl_mvm_is_radio_killed(mvm),
- HZ);
- WARN_ONCE(!ret && !iwl_mvm_is_radio_killed(mvm),
- "queue sync: failed to sync, state is 0x%lx\n",
- mvm->queue_sync_state);
+ READ_ONCE(mvm->queue_sync_state) == 0,
+ SYNC_RX_QUEUE_TIMEOUT);
+ WARN_ONCE(!ret, "queue sync: failed to sync, state is 0x%lx, cookie %d\n",
+ mvm->queue_sync_state,
+ mvm->queue_sync_cookie);
}
out:
@@ -6094,9 +6433,8 @@ void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_EMPTY, true, NULL, 0);
- mutex_unlock(&mvm->mutex);
}
int
@@ -6132,13 +6470,9 @@ int iwl_mvm_start_pmsr(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct cfg80211_pmsr_request *request)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- int ret;
-
- mutex_lock(&mvm->mutex);
- ret = iwl_mvm_ftm_start(mvm, vif, request);
- mutex_unlock(&mvm->mutex);
- return ret;
+ guard(mvm)(mvm);
+ return iwl_mvm_ftm_start(mvm, vif, request);
}
void iwl_mvm_abort_pmsr(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -6146,9 +6480,8 @@ void iwl_mvm_abort_pmsr(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
iwl_mvm_ftm_abort(mvm, request);
- mutex_unlock(&mvm->mutex);
}
static bool iwl_mvm_can_hw_csum(struct sk_buff *skb)
@@ -6183,7 +6516,6 @@ int iwl_mvm_set_hw_timestamp(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
u32 protocols = 0;
- int ret;
/* HW timestamping is only supported for a specific station */
if (!hwts->macaddr)
@@ -6193,11 +6525,8 @@ int iwl_mvm_set_hw_timestamp(struct ieee80211_hw *hw,
protocols =
IWL_TIME_SYNC_PROTOCOL_TM | IWL_TIME_SYNC_PROTOCOL_FTM;
- mutex_lock(&mvm->mutex);
- ret = iwl_mvm_time_sync_config(mvm, hwts->macaddr, protocols);
- mutex_unlock(&mvm->mutex);
-
- return ret;
+ guard(mvm)(mvm);
+ return iwl_mvm_time_sync_config(mvm, hwts->macaddr, protocols);
}
const struct ieee80211_ops iwl_mvm_hw_ops = {
@@ -6205,6 +6534,7 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
.wake_tx_queue = iwl_mvm_mac_wake_tx_queue,
.ampdu_action = iwl_mvm_mac_ampdu_action,
.get_antenna = iwl_mvm_op_get_antenna,
+ .set_antenna = iwl_mvm_op_set_antenna,
.start = iwl_mvm_mac_start,
.reconfig_complete = iwl_mvm_mac_reconfig_complete,
.stop = iwl_mvm_mac_stop,
@@ -6253,7 +6583,7 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
.set_tim = iwl_mvm_set_tim,
.channel_switch = iwl_mvm_channel_switch,
- .pre_channel_switch = iwl_mvm_pre_channel_switch,
+ .pre_channel_switch = iwl_mvm_mac_pre_channel_switch,
.post_channel_switch = iwl_mvm_post_channel_switch,
.abort_channel_switch = iwl_mvm_abort_channel_switch,
.channel_switch_rx_beacon = iwl_mvm_channel_switch_rx_beacon,
@@ -6287,6 +6617,7 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
.can_aggregate_in_amsdu = iwl_mvm_mac_can_aggregate,
#ifdef CONFIG_IWLWIFI_DEBUGFS
+ .vif_add_debugfs = iwl_mvm_vif_add_debugfs,
.link_sta_add_debugfs = iwl_mvm_link_sta_add_debugfs,
#endif
.set_hw_timestamp = iwl_mvm_set_hw_timestamp,
diff --git a/mvm/mld-key.c b/mvm/mld-key.c
index 2c9f2f71b083..8a38fc4b0b0f 100644
--- a/mvm/mld-key.c
+++ b/mvm/mld-key.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2022 - 2023 Intel Corporation
+ * Copyright (C) 2022 - 2024 Intel Corporation
*/
#include <linux/kernel.h>
#include <net/mac80211.h>
@@ -24,10 +24,15 @@ static u32 iwl_mvm_get_sec_sta_mask(struct iwl_mvm *mvm,
return 0;
}
- /* AP group keys are per link and should be on the mcast STA */
+ /* AP group keys are per link and should be on the mcast/bcast STA */
if (vif->type == NL80211_IFTYPE_AP &&
- !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+ /* IGTK/BIGTK to bcast STA */
+ if (keyconf->keyidx >= 4)
+ return BIT(link_info->bcast_sta.sta_id);
+ /* GTK for data to mcast STA */
return BIT(link_info->mcast_sta.sta_id);
+ }
/* for client mode use the AP STA also for group keys */
if (!sta && vif->type == NL80211_IFTYPE_STATION)
@@ -57,11 +62,13 @@ u32 iwl_mvm_get_sec_flags(struct iwl_mvm *mvm,
struct ieee80211_key_conf *keyconf)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ bool pairwise = keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE;
+ bool igtk = keyconf->keyidx == 4 || keyconf->keyidx == 5;
u32 flags = 0;
lockdep_assert_held(&mvm->mutex);
- if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ if (!pairwise)
flags |= IWL_SEC_KEY_FLAG_MCAST_KEY;
switch (keyconf->cipher) {
@@ -91,9 +98,19 @@ u32 iwl_mvm_get_sec_flags(struct iwl_mvm *mvm,
if (!sta && vif->type == NL80211_IFTYPE_STATION)
sta = mvmvif->ap_sta;
- if (!IS_ERR_OR_NULL(sta) && sta->mfp)
+ /*
+ * If we are installing an iGTK (in AP or STA mode), we need to tell
+ * the firmware this key will en/decrypt MGMT frames.
+ * Same goes if we are installing a pairwise key for an MFP station.
+ * In case we're installing a groupwise key (which is not an iGTK),
+ * then, we will not use this key for MGMT frames.
+ */
+ if ((!IS_ERR_OR_NULL(sta) && sta->mfp && pairwise) || igtk)
flags |= IWL_SEC_KEY_FLAG_MFP;
+ if (keyconf->flags & IEEE80211_KEY_FLAG_SPP_AMSDU)
+ flags |= IWL_SEC_KEY_FLAG_SPP_AMSDU;
+
return flags;
}
@@ -325,6 +342,21 @@ static int _iwl_mvm_sec_key_del(struct iwl_mvm *mvm,
return ret;
}
+int iwl_mvm_sec_key_del_pasn(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 sta_mask,
+ struct ieee80211_key_conf *keyconf)
+{
+ u32 key_flags = iwl_mvm_get_sec_flags(mvm, vif, NULL, keyconf) |
+ IWL_SEC_KEY_FLAG_MFP;
+
+ if (WARN_ON(!sta_mask))
+ return -EINVAL;
+
+ return __iwl_mvm_sec_key_del(mvm, sta_mask, key_flags, keyconf->keyidx,
+ 0);
+}
+
int iwl_mvm_sec_key_del(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
diff --git a/mvm/mld-mac.c b/mvm/mld-mac.c
index f313a8d771e4..bb7851042177 100644
--- a/mvm/mld-mac.c
+++ b/mvm/mld-mac.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2022 - 2023 Intel Corporation
+ * Copyright (C) 2022 - 2024 Intel Corporation
*/
#include "mvm.h"
@@ -167,7 +167,7 @@ static int iwl_mvm_mld_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,
iwl_mvm_mld_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
cmd.filter_flags = cpu_to_le32(MAC_CFG_FILTER_PROMISC |
- MAC_FILTER_IN_CONTROL_AND_MGMT |
+ MAC_CFG_FILTER_ACCEPT_CONTROL_AND_MGMT |
MAC_CFG_FILTER_ACCEPT_BEACON |
MAC_CFG_FILTER_ACCEPT_PROBE_REQ |
MAC_CFG_FILTER_ACCEPT_GRP);
@@ -205,8 +205,11 @@ static int iwl_mvm_mld_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm,
cmd.p2p_dev.is_disc_extended =
iwl_mac_ctxt_p2p_dev_has_extended_disc(mvm, vif);
- /* Override the filter flags to accept only probe requests */
- cmd.filter_flags = cpu_to_le32(MAC_CFG_FILTER_ACCEPT_PROBE_REQ);
+ /* Override the filter flags to accept all management frames. This is
+ * needed to support both P2P device discovery using probe requests and
+ * P2P service discovery using action frames
+ */
+ cmd.filter_flags = cpu_to_le32(MAC_CFG_FILTER_ACCEPT_CONTROL_AND_MGMT);
return iwl_mvm_mld_mac_ctxt_send_cmd(mvm, &cmd);
}
diff --git a/mvm/mld-mac80211.c b/mvm/mld-mac80211.c
index 8b6c641772ee..3c99396ad369 100644
--- a/mvm/mld-mac80211.c
+++ b/mvm/mld-mac80211.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2022-2023 Intel Corporation
+ * Copyright (C) 2022-2024 Intel Corporation
*/
#include "mvm.h"
@@ -10,8 +10,11 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ret;
+ int i;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
+
+ iwl_mvm_mac_init_mvmvif(mvm, mvmvif);
mvmvif->mvm = mvm;
@@ -22,13 +25,14 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
/* make sure that beacon statistics don't go backwards with FW reset */
if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
- mvmvif->deflink.beacon_stats.accu_num_beacons +=
- mvmvif->deflink.beacon_stats.num_beacons;
+ for_each_mvm_vif_valid_link(mvmvif, i)
+ mvmvif->link[i]->beacon_stats.accu_num_beacons +=
+ mvmvif->link[i]->beacon_stats.num_beacons;
/* Allocate resources for the MAC context, and add it to the fw */
ret = iwl_mvm_mac_ctxt_init(mvm, vif);
if (ret)
- goto out_unlock;
+ return ret;
rcu_assign_pointer(mvm->vif_id_to_mac[mvmvif->id], vif);
@@ -42,10 +46,10 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
ret = iwl_mvm_mld_mac_ctxt_add(mvm, vif);
if (ret)
- goto out_unlock;
+ return ret;
/* beacon filtering */
- ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif);
if (ret)
goto out_remove_mac;
@@ -56,58 +60,29 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
IEEE80211_VIF_SUPPORTS_CQM_RSSI;
}
- /*
- * P2P_DEVICE interface does not have a channel context assigned to it,
- * so a dedicated PHY context is allocated to it and the corresponding
- * MAC context is bound to it at this stage.
- */
- if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
- mvmvif->deflink.phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
- if (!mvmvif->deflink.phy_ctxt) {
- ret = -ENOSPC;
- goto out_free_bf;
- }
-
- iwl_mvm_phy_ctxt_ref(mvm, mvmvif->deflink.phy_ctxt);
- ret = iwl_mvm_add_link(mvm, vif, &vif->bss_conf);
- if (ret)
- goto out_unref_phy;
-
- ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf,
- LINK_CONTEXT_MODIFY_ACTIVE |
- LINK_CONTEXT_MODIFY_RATES_INFO,
- true);
- if (ret)
- goto out_remove_link;
-
- ret = iwl_mvm_mld_add_bcast_sta(mvm, vif, &vif->bss_conf);
- if (ret)
- goto out_remove_link;
+ ret = iwl_mvm_add_link(mvm, vif, &vif->bss_conf);
+ if (ret)
+ goto out_free_bf;
- /* Save a pointer to p2p device vif, so it can later be used to
- * update the p2p device MAC when a GO is started/stopped
- */
+ /* Save a pointer to p2p device vif, so it can later be used to
+ * update the p2p device MAC when a GO is started/stopped
+ */
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
mvm->p2p_device_vif = vif;
- } else {
- ret = iwl_mvm_add_link(mvm, vif, &vif->bss_conf);
- if (ret)
- goto out_free_bf;
- }
ret = iwl_mvm_power_update_mac(mvm);
if (ret)
goto out_free_bf;
iwl_mvm_tcm_add_vif(mvm, vif);
- INIT_DELAYED_WORK(&mvmvif->csa_work,
- iwl_mvm_channel_switch_disconnect_wk);
if (vif->type == NL80211_IFTYPE_MONITOR) {
mvm->monitor_on = true;
ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS);
}
- iwl_mvm_vif_dbgfs_register(mvm, vif);
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ iwl_mvm_vif_dbgfs_add_link(mvm, vif);
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
@@ -117,12 +92,11 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
mvm->csme_vif = vif;
}
- goto out_unlock;
+ if (vif->p2p || iwl_fw_lookup_cmd_ver(mvm->fw, PHY_CONTEXT_CMD, 1) < 5)
+ vif->driver_flags |= IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW;
+
+ return 0;
- out_remove_link:
- iwl_mvm_disable_link(mvm, vif, &vif->bss_conf);
- out_unref_phy:
- iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt);
out_free_bf:
if (mvm->bf_allowed_vif == mvmvif) {
mvm->bf_allowed_vif = NULL;
@@ -130,12 +104,8 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
IEEE80211_VIF_SUPPORTS_CQM_RSSI);
}
out_remove_mac:
- mvmvif->deflink.phy_ctxt = NULL;
mvmvif->link[0] = NULL;
iwl_mvm_mld_mac_ctxt_remove(mvm, vif);
- out_unlock:
- mutex_unlock(&mvm->mutex);
-
return ret;
}
@@ -152,7 +122,7 @@ static void iwl_mvm_mld_mac_remove_interface(struct ieee80211_hw *hw,
vif->type == NL80211_IFTYPE_ADHOC))
iwl_mvm_tcm_rm_vif(mvm, vif);
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
if (vif == mvm->csme_vif) {
iwl_mei_set_netdev(NULL);
@@ -168,7 +138,7 @@ static void iwl_mvm_mld_mac_remove_interface(struct ieee80211_hw *hw,
if (vif->bss_conf.ftm_responder)
memset(&mvm->ftm_resp_stats, 0, sizeof(mvm->ftm_resp_stats));
- iwl_mvm_vif_dbgfs_clean(mvm, vif);
+ iwl_mvm_vif_dbgfs_rm_link(mvm, vif);
/* For AP/GO interface, the tear down of the resources allocated to the
* interface is be handled as part of the stop_ap flow.
@@ -185,14 +155,18 @@ static void iwl_mvm_mld_mac_remove_interface(struct ieee80211_hw *hw,
iwl_mvm_power_update_mac(mvm);
+ /* Before the interface removal, mac80211 would cancel the ROC, and the
+ * ROC worker would be scheduled if needed. The worker would be flushed
+ * in iwl_mvm_prepare_mac_removal() and thus at this point the link is
+ * not active. So need only to remove the link.
+ */
if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ if (mvmvif->deflink.phy_ctxt) {
+ iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt);
+ mvmvif->deflink.phy_ctxt = NULL;
+ }
mvm->p2p_device_vif = NULL;
-
- /* P2P device uses only one link */
- iwl_mvm_mld_rm_bcast_sta(mvm, vif, &vif->bss_conf);
- iwl_mvm_disable_link(mvm, vif, &vif->bss_conf);
- iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt);
- mvmvif->deflink.phy_ctxt = NULL;
+ iwl_mvm_remove_link(mvm, vif, &vif->bss_conf);
} else {
iwl_mvm_disable_link(mvm, vif, &vif->bss_conf);
}
@@ -211,27 +185,47 @@ static void iwl_mvm_mld_mac_remove_interface(struct ieee80211_hw *hw,
mvm->monitor_on = false;
__clear_bit(IEEE80211_HW_RX_INCLUDES_FCS, mvm->hw->flags);
}
-
- mutex_unlock(&mvm->mutex);
}
-static unsigned int iwl_mvm_mld_count_active_links(struct ieee80211_vif *vif)
+static unsigned int iwl_mvm_mld_count_active_links(struct iwl_mvm_vif *mvmvif)
{
unsigned int n_active = 0;
int i;
for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {
- struct ieee80211_bss_conf *link_conf;
-
- link_conf = link_conf_dereference_protected(vif, i);
- if (link_conf &&
- rcu_access_pointer(link_conf->chanctx_conf))
+ if (mvmvif->link[i] && mvmvif->link[i]->phy_ctxt)
n_active++;
}
return n_active;
}
+static void iwl_mvm_restart_mpdu_count(struct iwl_mvm *mvm,
+ struct iwl_mvm_vif *mvmvif)
+{
+ struct ieee80211_sta *ap_sta = mvmvif->ap_sta;
+ struct iwl_mvm_sta *mvmsta;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!ap_sta)
+ return;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(ap_sta);
+ if (!mvmsta->mpdu_counters)
+ return;
+
+ for (int q = 0; q < mvm->trans->num_rx_queues; q++) {
+ spin_lock_bh(&mvmsta->mpdu_counters[q].lock);
+ memset(mvmsta->mpdu_counters[q].per_link, 0,
+ sizeof(mvmsta->mpdu_counters[q].per_link));
+ mvmsta->mpdu_counters[q].window_start = jiffies;
+ spin_unlock_bh(&mvmsta->mpdu_counters[q].lock);
+ }
+
+ IWL_DEBUG_STATS(mvm, "MPDU counters are cleared\n");
+}
+
static int iwl_mvm_esr_mode_active(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
@@ -240,8 +234,8 @@ static int iwl_mvm_esr_mode_active(struct iwl_mvm *mvm,
mvmvif->esr_active = true;
- /* Disable SMPS overrideing by user */
- vif->driver_flags |= IEEE80211_VIF_DISABLE_SMPS_OVERRIDE;
+ /* Indicate to mac80211 that EML is enabled */
+ vif->driver_flags |= IEEE80211_VIF_EML_ACTIVE;
iwl_mvm_update_smps_on_active_links(mvm, vif, IWL_MVM_SMPS_REQ_FW,
IEEE80211_SMPS_OFF);
@@ -259,6 +253,22 @@ static int iwl_mvm_esr_mode_active(struct iwl_mvm *mvm,
link->phy_ctxt->rlc_disabled = true;
}
+ if (vif->active_links == mvmvif->link_selection_res &&
+ !WARN_ON(!(vif->active_links & BIT(mvmvif->link_selection_primary))))
+ mvmvif->primary_link = mvmvif->link_selection_primary;
+ else
+ mvmvif->primary_link = __ffs(vif->active_links);
+
+ /* Needed for tracking RSSI */
+ iwl_mvm_request_periodic_system_statistics(mvm, true);
+
+ /*
+ * Restart the MPDU counters and the counting window, so when the
+ * statistics arrive (which is where we look at the counters) we
+ * will be at the end of the window.
+ */
+ iwl_mvm_restart_mpdu_count(mvm, mvmvif);
+
return ret;
}
@@ -271,21 +281,18 @@ __iwl_mvm_mld_assign_vif_chanctx(struct iwl_mvm *mvm,
{
u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
- unsigned int n_active = iwl_mvm_mld_count_active_links(vif);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ unsigned int n_active = iwl_mvm_mld_count_active_links(mvmvif);
unsigned int link_id = link_conf->link_id;
int ret;
- /* if the assigned one was not counted yet, count it now */
- if (!rcu_access_pointer(link_conf->chanctx_conf))
- n_active++;
-
- if (n_active > iwl_mvm_max_active_links(mvm, vif))
- return -EOPNOTSUPP;
-
if (WARN_ON_ONCE(!mvmvif->link[link_id]))
return -EINVAL;
+ /* if the assigned one was not counted yet, count it now */
+ if (!mvmvif->link[link_id]->phy_ctxt)
+ n_active++;
+
/* mac parameters such as HE support can change at this stage
* For sta, need first to configure correct state from drv_sta_state
* and only after that update mac config.
@@ -298,17 +305,18 @@ __iwl_mvm_mld_assign_vif_chanctx(struct iwl_mvm *mvm,
}
}
+ mvmvif->link[link_id]->phy_ctxt = phy_ctxt;
+
if (iwl_mvm_is_esr_supported(mvm->fwrt.trans) && n_active > 1) {
mvmvif->link[link_id]->listen_lmac = true;
ret = iwl_mvm_esr_mode_active(mvm, vif);
if (ret) {
IWL_ERR(mvm, "failed to activate ESR mode (%d)\n", ret);
- return ret;
+ iwl_mvm_request_periodic_system_statistics(mvm, false);
+ goto out;
}
}
- mvmvif->link[link_id]->phy_ctxt = phy_ctxt;
-
if (switching_chanctx) {
/* reactivate if we turned this off during channel switch */
if (vif->type == NL80211_IFTYPE_AP)
@@ -325,13 +333,8 @@ __iwl_mvm_mld_assign_vif_chanctx(struct iwl_mvm *mvm,
* this needs the phy context assigned (and in FW?), and we cannot
* do it later because it needs to be initialized as soon as we're
* able to TX on the link, i.e. when active.
- *
- * Firmware restart isn't quite correct yet for MLO, but we don't
- * need to do it in that case anyway since it will happen from the
- * normal station state callback.
*/
- if (mvmvif->ap_sta &&
- !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+ if (mvmvif->ap_sta) {
struct ieee80211_link_sta *link_sta;
rcu_read_lock();
@@ -344,6 +347,11 @@ __iwl_mvm_mld_assign_vif_chanctx(struct iwl_mvm *mvm,
rcu_read_unlock();
}
+ if (vif->type == NL80211_IFTYPE_STATION)
+ iwl_mvm_send_ap_tx_power_constraint_cmd(mvm, vif,
+ link_conf,
+ false);
+
/* then activate */
ret = iwl_mvm_link_changed(mvm, vif, link_conf,
LINK_CONTEXT_MODIFY_ACTIVE |
@@ -381,13 +389,23 @@ static int iwl_mvm_mld_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- int ret;
- mutex_lock(&mvm->mutex);
- ret = __iwl_mvm_mld_assign_vif_chanctx(mvm, vif, link_conf, ctx, false);
- mutex_unlock(&mvm->mutex);
+ /* update EMLSR mode */
+ if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) {
+ int ret;
- return ret;
+ ret = iwl_mvm_esr_non_bss_link(mvm, vif, link_conf->link_id,
+ true);
+ /*
+ * Don't activate this link if failed to exit EMLSR in
+ * the BSS interface
+ */
+ if (ret)
+ return ret;
+ }
+
+ guard(mvm)(mvm);
+ return __iwl_mvm_mld_assign_vif_chanctx(mvm, vif, link_conf, ctx, false);
}
static int iwl_mvm_esr_mode_inactive(struct iwl_mvm *mvm,
@@ -399,7 +417,7 @@ static int iwl_mvm_esr_mode_inactive(struct iwl_mvm *mvm,
mvmvif->esr_active = false;
- vif->driver_flags &= ~IEEE80211_VIF_DISABLE_SMPS_OVERRIDE;
+ vif->driver_flags &= ~IEEE80211_VIF_EML_ACTIVE;
iwl_mvm_update_smps_on_active_links(mvm, vif, IWL_MVM_SMPS_REQ_FW,
IEEE80211_SMPS_AUTOMATIC);
@@ -433,6 +451,11 @@ static int iwl_mvm_esr_mode_inactive(struct iwl_mvm *mvm,
break;
}
+ iwl_mvm_request_periodic_system_statistics(mvm, false);
+
+ /* Start a new counting window */
+ iwl_mvm_restart_mpdu_count(mvm, mvmvif);
+
return ret;
}
@@ -445,7 +468,7 @@ __iwl_mvm_mld_unassign_vif_chanctx(struct iwl_mvm *mvm,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- unsigned int n_active = iwl_mvm_mld_count_active_links(vif);
+ unsigned int n_active = iwl_mvm_mld_count_active_links(mvmvif);
unsigned int link_id = link_conf->link_id;
/* shouldn't happen, but verify link_id is valid before accessing */
@@ -464,6 +487,9 @@ __iwl_mvm_mld_unassign_vif_chanctx(struct iwl_mvm *mvm,
mvmvif->ap_ibss_active = false;
}
+ iwl_mvm_link_changed(mvm, vif, link_conf,
+ LINK_CONTEXT_MODIFY_ACTIVE, false);
+
if (iwl_mvm_is_esr_supported(mvm->fwrt.trans) && n_active > 1) {
int ret = iwl_mvm_esr_mode_inactive(mvm, vif);
@@ -475,9 +501,6 @@ __iwl_mvm_mld_unassign_vif_chanctx(struct iwl_mvm *mvm,
if (vif->type == NL80211_IFTYPE_MONITOR)
iwl_mvm_mld_rm_snif_sta(mvm, vif);
- iwl_mvm_link_changed(mvm, vif, link_conf,
- LINK_CONTEXT_MODIFY_ACTIVE, false);
-
if (switching_chanctx)
return;
mvmvif->link[link_id]->phy_ctxt = NULL;
@@ -489,11 +512,99 @@ static void iwl_mvm_mld_unassign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
mutex_lock(&mvm->mutex);
__iwl_mvm_mld_unassign_vif_chanctx(mvm, vif, link_conf, ctx, false);
+ /* in the non-MLD case, remove/re-add the link to clean up FW state */
+ if (!ieee80211_vif_is_mld(vif) && !mvmvif->ap_sta &&
+ !WARN_ON_ONCE(vif->cfg.assoc)) {
+ iwl_mvm_remove_link(mvm, vif, link_conf);
+ iwl_mvm_add_link(mvm, vif, link_conf);
+ }
mutex_unlock(&mvm->mutex);
+
+ /* update EMLSR mode */
+ if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION)
+ iwl_mvm_esr_non_bss_link(mvm, vif, link_conf->link_id, false);
+}
+
+static void
+iwl_mvm_tpe_sta_cmd_data(struct iwl_txpower_constraints_cmd *cmd,
+ const struct ieee80211_bss_conf *bss_info)
+{
+ u8 i;
+
+ /*
+ * NOTE: the 0 here is IEEE80211_TPE_CAT_6GHZ_DEFAULT,
+ * we fully ignore IEEE80211_TPE_CAT_6GHZ_SUBORDINATE
+ */
+
+ BUILD_BUG_ON(ARRAY_SIZE(cmd->psd_pwr) !=
+ ARRAY_SIZE(bss_info->tpe.psd_local[0].power));
+
+ /* if not valid, mac80211 puts default (max value) */
+ for (i = 0; i < ARRAY_SIZE(cmd->psd_pwr); i++)
+ cmd->psd_pwr[i] = min(bss_info->tpe.psd_local[0].power[i],
+ bss_info->tpe.psd_reg_client[0].power[i]);
+
+ BUILD_BUG_ON(ARRAY_SIZE(cmd->eirp_pwr) !=
+ ARRAY_SIZE(bss_info->tpe.max_local[0].power));
+
+ for (i = 0; i < ARRAY_SIZE(cmd->eirp_pwr); i++)
+ cmd->eirp_pwr[i] = min(bss_info->tpe.max_local[0].power[i],
+ bss_info->tpe.max_reg_client[0].power[i]);
+}
+
+void
+iwl_mvm_send_ap_tx_power_constraint_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ bool is_ap)
+{
+ struct iwl_txpower_constraints_cmd cmd = {};
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_vif_link_info *link_info =
+ mvmvif->link[bss_conf->link_id];
+ u32 cmd_id = WIDE_ID(PHY_OPS_GROUP, AP_TX_POWER_CONSTRAINTS_CMD);
+ u32 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
+ IWL_FW_CMD_VER_UNKNOWN);
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (cmd_ver == IWL_FW_CMD_VER_UNKNOWN)
+ return;
+
+ if (!link_info->active ||
+ link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID)
+ return;
+
+ if (bss_conf->chanreq.oper.chan->band != NL80211_BAND_6GHZ)
+ return;
+
+ cmd.link_id = cpu_to_le16(link_info->fw_link_id);
+ memset(cmd.psd_pwr, DEFAULT_TPE_TX_POWER, sizeof(cmd.psd_pwr));
+ memset(cmd.eirp_pwr, DEFAULT_TPE_TX_POWER, sizeof(cmd.eirp_pwr));
+
+ if (is_ap) {
+ cmd.ap_type = cpu_to_le16(IWL_6GHZ_AP_TYPE_VLP);
+ } else if (bss_conf->power_type == IEEE80211_REG_UNSET_AP) {
+ return;
+ } else {
+ cmd.ap_type = cpu_to_le16(bss_conf->power_type - 1);
+ iwl_mvm_tpe_sta_cmd_data(&cmd, bss_conf);
+ }
+
+ ret = iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(PHY_OPS_GROUP,
+ AP_TX_POWER_CONSTRAINTS_CMD),
+ 0, sizeof(cmd), &cmd);
+ if (ret)
+ IWL_ERR(mvm,
+ "failed to send AP_TX_POWER_CONSTRAINTS_CMD (%d)\n",
+ ret);
}
static int iwl_mvm_mld_start_ap_ibss(struct ieee80211_hw *hw,
@@ -504,11 +615,16 @@ static int iwl_mvm_mld_start_ap_ibss(struct ieee80211_hw *hw,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ret;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
+
+ if (vif->type == NL80211_IFTYPE_AP)
+ iwl_mvm_send_ap_tx_power_constraint_cmd(mvm, vif,
+ link_conf, true);
+
/* Send the beacon template */
ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif, link_conf);
if (ret)
- goto out_unlock;
+ return ret;
/* the link should be already activated when assigning chan context */
ret = iwl_mvm_link_changed(mvm, vif, link_conf,
@@ -516,11 +632,11 @@ static int iwl_mvm_mld_start_ap_ibss(struct ieee80211_hw *hw,
~LINK_CONTEXT_MODIFY_ACTIVE,
true);
if (ret)
- goto out_unlock;
+ return ret;
ret = iwl_mvm_mld_add_mcast_sta(mvm, vif, link_conf);
if (ret)
- goto out_unlock;
+ return ret;
/* Send the bcast station. At this stage the TBTT and DTIM time
* events are added and applied to the scheduler
@@ -544,7 +660,7 @@ static int iwl_mvm_mld_start_ap_ibss(struct ieee80211_hw *hw,
iwl_mvm_ftm_restart_responder(mvm, vif, link_conf);
- goto out_unlock;
+ return 0;
out_failed:
iwl_mvm_power_update_mac(mvm);
@@ -552,8 +668,6 @@ out_failed:
iwl_mvm_mld_rm_bcast_sta(mvm, vif, link_conf);
out_rm_mcast:
iwl_mvm_mld_rm_mcast_sta(mvm, vif, link_conf);
-out_unlock:
- mutex_unlock(&mvm->mutex);
return ret;
}
@@ -576,7 +690,7 @@ static void iwl_mvm_mld_stop_ap_ibss(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
iwl_mvm_stop_ap_ibss_common(mvm, vif);
@@ -590,7 +704,6 @@ static void iwl_mvm_mld_stop_ap_ibss(struct ieee80211_hw *hw,
iwl_mvm_mld_rm_mcast_sta(mvm, vif, link_conf);
iwl_mvm_power_update_mac(mvm);
- mutex_unlock(&mvm->mutex);
}
static void iwl_mvm_mld_stop_ap(struct ieee80211_hw *hw,
@@ -623,6 +736,25 @@ static int iwl_mvm_mld_mac_sta_state(struct ieee80211_hw *hw,
&callbacks);
}
+static bool iwl_mvm_esr_bw_criteria(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct ieee80211_bss_conf *other_link;
+ int link_id;
+
+ /* Exit EMLSR if links don't have equal bandwidths */
+ for_each_vif_active_link(vif, other_link, link_id) {
+ if (link_id == link_conf->link_id)
+ continue;
+ if (link_conf->chanreq.oper.width ==
+ other_link->chanreq.oper.width)
+ return true;
+ }
+
+ return false;
+}
+
static void
iwl_mvm_mld_link_info_changed_station(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
@@ -652,8 +784,16 @@ iwl_mvm_mld_link_info_changed_station(struct iwl_mvm *mvm,
link_changes |= LINK_CONTEXT_MODIFY_HE_PARAMS;
}
- /* Update EHT Puncturing info */
- if (changes & BSS_CHANGED_EHT_PUNCTURING && vif->cfg.assoc && has_eht)
+ if ((changes & BSS_CHANGED_BANDWIDTH) &&
+ ieee80211_vif_link_active(vif, link_conf->link_id) &&
+ mvmvif->esr_active &&
+ !iwl_mvm_esr_bw_criteria(mvm, vif, link_conf))
+ iwl_mvm_exit_esr(mvm, vif,
+ IWL_MVM_ESR_EXIT_BANDWIDTH,
+ iwl_mvm_get_primary_link(vif));
+
+ /* if associated, maybe puncturing changed - we'll check later */
+ if (vif->cfg.assoc)
link_changes |= LINK_CONTEXT_MODIFY_EHT_PARAMS;
if (link_changes) {
@@ -731,73 +871,91 @@ static void iwl_mvm_mld_vif_cfg_changed_station(struct iwl_mvm *mvm,
mvmvif->associated = vif->cfg.assoc;
- if (!(changes & BSS_CHANGED_ASSOC))
- return;
-
- if (vif->cfg.assoc) {
- /* clear statistics to get clean beacon counter */
- iwl_mvm_request_statistics(mvm, true);
- iwl_mvm_sf_update(mvm, vif, false);
- iwl_mvm_power_vif_assoc(mvm, vif);
-
- for_each_mvm_vif_valid_link(mvmvif, i) {
- memset(&mvmvif->link[i]->beacon_stats, 0,
- sizeof(mvmvif->link[i]->beacon_stats));
+ if (changes & BSS_CHANGED_ASSOC) {
+ if (vif->cfg.assoc) {
+ mvmvif->session_prot_connection_loss = false;
+
+ /* clear statistics to get clean beacon counter */
+ iwl_mvm_request_statistics(mvm, true);
+ iwl_mvm_sf_update(mvm, vif, false);
+ iwl_mvm_power_vif_assoc(mvm, vif);
+
+ for_each_mvm_vif_valid_link(mvmvif, i) {
+ memset(&mvmvif->link[i]->beacon_stats, 0,
+ sizeof(mvmvif->link[i]->beacon_stats));
+
+ if (vif->p2p) {
+ iwl_mvm_update_smps(mvm, vif,
+ IWL_MVM_SMPS_REQ_PROT,
+ IEEE80211_SMPS_DYNAMIC, i);
+ }
+
+ rcu_read_lock();
+ link_conf = rcu_dereference(vif->link_conf[i]);
+ if (link_conf && !link_conf->dtim_period)
+ protect = true;
+ rcu_read_unlock();
+ }
- if (vif->p2p) {
- iwl_mvm_update_smps(mvm, vif,
- IWL_MVM_SMPS_REQ_PROT,
- IEEE80211_SMPS_DYNAMIC, i);
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
+ protect) {
+ /* We are in assoc so only one link is active-
+ * The association link
+ */
+ unsigned int link_id =
+ ffs(vif->active_links) - 1;
+
+ /* If we're not restarting and still haven't
+ * heard a beacon (dtim period unknown) then
+ * make sure we still have enough minimum time
+ * remaining in the time event, since the auth
+ * might actually have taken quite a while
+ * (especially for SAE) and so the remaining
+ * time could be small without us having heard
+ * a beacon yet.
+ */
+ iwl_mvm_protect_assoc(mvm, vif, 0, link_id);
}
- rcu_read_lock();
- link_conf = rcu_dereference(vif->link_conf[i]);
- if (link_conf && !link_conf->dtim_period)
- protect = true;
- rcu_read_unlock();
- }
+ iwl_mvm_sf_update(mvm, vif, false);
+
+ /* FIXME: need to decide about misbehaving AP handling */
+ iwl_mvm_power_vif_assoc(mvm, vif);
+ } else if (iwl_mvm_mld_vif_have_valid_ap_sta(mvmvif)) {
+ iwl_mvm_mei_host_disassociated(mvm);
- if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
- protect) {
- /* If we're not restarting and still haven't
- * heard a beacon (dtim period unknown) then
- * make sure we still have enough minimum time
- * remaining in the time event, since the auth
- * might actually have taken quite a while
- * (especially for SAE) and so the remaining
- * time could be small without us having heard
- * a beacon yet.
+ /* If update fails - SF might be running in associated
+ * mode while disassociated - which is forbidden.
*/
- iwl_mvm_protect_assoc(mvm, vif, 0);
+ ret = iwl_mvm_sf_update(mvm, vif, false);
+ WARN_ONCE(ret &&
+ !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
+ &mvm->status),
+ "Failed to update SF upon disassociation\n");
+
+ /* If we get an assert during the connection (after the
+ * station has been added, but before the vif is set
+ * to associated), mac80211 will re-add the station and
+ * then configure the vif. Since the vif is not
+ * associated, we would remove the station here and
+ * this would fail the recovery.
+ */
+ iwl_mvm_mld_vif_delete_all_stas(mvm, vif);
}
- iwl_mvm_sf_update(mvm, vif, false);
-
- /* FIXME: need to decide about misbehaving AP handling */
- iwl_mvm_power_vif_assoc(mvm, vif);
- } else if (iwl_mvm_mld_vif_have_valid_ap_sta(mvmvif)) {
- iwl_mvm_mei_host_disassociated(mvm);
+ iwl_mvm_bss_info_changed_station_assoc(mvm, vif, changes);
+ }
- /* If update fails - SF might be running in associated
- * mode while disassociated - which is forbidden.
- */
- ret = iwl_mvm_sf_update(mvm, vif, false);
- WARN_ONCE(ret &&
- !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
- &mvm->status),
- "Failed to update SF upon disassociation\n");
-
- /* If we get an assert during the connection (after the
- * station has been added, but before the vif is set
- * to associated), mac80211 will re-add the station and
- * then configure the vif. Since the vif is not
- * associated, we would remove the station here and
- * this would fail the recovery.
- */
- iwl_mvm_mld_vif_delete_all_stas(mvm, vif);
+ if (changes & BSS_CHANGED_PS) {
+ ret = iwl_mvm_power_update_mac(mvm);
+ if (ret)
+ IWL_ERR(mvm, "failed to update power mode\n");
}
- iwl_mvm_bss_info_changed_station_assoc(mvm, vif, changes);
+ if (changes & (BSS_CHANGED_MLD_VALID_LINKS | BSS_CHANGED_MLD_TTLM) &&
+ ieee80211_vif_is_mld(vif) && mvmvif->authorized)
+ wiphy_delayed_work_queue(mvm->hw->wiphy,
+ &mvmvif->mlo_int_scan_wk, 0);
}
static void
@@ -850,7 +1008,7 @@ static void iwl_mvm_mld_link_info_changed(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
switch (vif->type) {
case NL80211_IFTYPE_STATION:
@@ -876,8 +1034,6 @@ static void iwl_mvm_mld_link_info_changed(struct ieee80211_hw *hw,
link_conf->txpower);
iwl_mvm_set_tx_power(mvm, vif, link_conf->txpower);
}
-
- mutex_unlock(&mvm->mutex);
}
static void iwl_mvm_mld_vif_cfg_changed(struct ieee80211_hw *hw,
@@ -886,15 +1042,13 @@ static void iwl_mvm_mld_vif_cfg_changed(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
if (changes & BSS_CHANGED_IDLE && !vif->cfg.idle)
iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
if (vif->type == NL80211_IFTYPE_STATION)
iwl_mvm_mld_vif_cfg_changed_station(mvm, vif, changes);
-
- mutex_unlock(&mvm->mutex);
}
static int
@@ -927,9 +1081,8 @@ static void iwl_mvm_mld_config_iface_filter(struct ieee80211_hw *hw,
!vif->p2p)
return;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
iwl_mvm_mld_mac_ctxt_changed(mvm, vif, false);
- mutex_unlock(&mvm->mutex);
}
static int
@@ -951,48 +1104,37 @@ iwl_mvm_mld_mac_conf_tx(struct ieee80211_hw *hw,
* The exception is P2P_DEVICE interface which needs immediate update.
*/
if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
- int ret;
-
- mutex_lock(&mvm->mutex);
- ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf,
- LINK_CONTEXT_MODIFY_QOS_PARAMS,
- true);
- mutex_unlock(&mvm->mutex);
- return ret;
+ guard(mvm)(mvm);
+ return iwl_mvm_link_changed(mvm, vif, &vif->bss_conf,
+ LINK_CONTEXT_MODIFY_QOS_PARAMS,
+ true);
}
return 0;
}
-static int iwl_mvm_link_switch_phy_ctx(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct iwl_mvm_phy_ctxt *new_phy_ctxt)
+static int iwl_mvm_mld_roc_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- int ret = 0;
+ int ret;
lockdep_assert_held(&mvm->mutex);
- /* Inorder to change the phy_ctx of a link, the link needs to be
- * inactive. Therefore, first deactivate the link, then change its
- * phy_ctx, and then activate it again.
- */
- ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf,
- LINK_CONTEXT_MODIFY_ACTIVE, false);
- if (WARN(ret, "Failed to deactivate link\n"))
+ /* The PHY context ID might have changed so need to set it */
+ ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf, 0, false);
+ if (WARN(ret, "Failed to set PHY context ID\n"))
return ret;
- iwl_mvm_phy_ctxt_unref(mvm, mvmvif->deflink.phy_ctxt);
-
- mvmvif->deflink.phy_ctxt = new_phy_ctxt;
+ ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf,
+ LINK_CONTEXT_MODIFY_ACTIVE |
+ LINK_CONTEXT_MODIFY_RATES_INFO,
+ true);
- ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf, 0, false);
- if (WARN(ret, "Failed to deactivate link\n"))
+ if (WARN(ret, "Failed linking P2P_DEVICE\n"))
return ret;
- ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf,
- LINK_CONTEXT_MODIFY_ACTIVE, true);
- WARN(ret, "Failed binding P2P_DEVICE\n");
- return ret;
+ /* The station and queue allocation must be done only after the linking
+ * is done, as otherwise the FW might incorrectly configure its state.
+ */
+ return iwl_mvm_mld_add_bcast_sta(mvm, vif, &vif->bss_conf);
}
static int iwl_mvm_mld_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -1001,7 +1143,7 @@ static int iwl_mvm_mld_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
{
static const struct iwl_mvm_roc_ops ops = {
.add_aux_sta_for_hs20 = iwl_mvm_mld_add_aux_sta,
- .switch_phy_ctxt = iwl_mvm_link_switch_phy_ctx,
+ .link = iwl_mvm_mld_roc_link,
};
return iwl_mvm_roc_common(hw, vif, channel, duration, type, &ops);
@@ -1014,17 +1156,12 @@ iwl_mvm_mld_change_vif_links(struct ieee80211_hw *hw,
struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS])
{
struct iwl_mvm_vif_link_info *new_link[IEEE80211_MLD_MAX_NUM_LINKS] = {};
- unsigned int n_active = iwl_mvm_mld_count_active_links(vif);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
u16 removed = old_links & ~new_links;
u16 added = new_links & ~old_links;
int err, i;
- if (hweight16(new_links) > 1 &&
- n_active > iwl_mvm_max_active_links(mvm, vif))
- return -EOPNOTSUPP;
-
for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {
int r;
@@ -1084,13 +1221,18 @@ iwl_mvm_mld_change_vif_links(struct ieee80211_hw *hw,
}
}
- if (err)
- goto out_err;
-
err = 0;
if (new_links == 0) {
mvmvif->link[0] = &mvmvif->deflink;
err = iwl_mvm_add_link(mvm, vif, &vif->bss_conf);
+ if (err == 0)
+ mvmvif->primary_link = 0;
+ } else if (!(new_links & BIT(mvmvif->primary_link))) {
+ /*
+ * Ensure we always have a valid primary_link, the real
+ * decision happens later when PHY is activated.
+ */
+ mvmvif->primary_link = __ffs(new_links);
}
out_err:
@@ -1110,10 +1252,103 @@ iwl_mvm_mld_change_sta_links(struct ieee80211_hw *hw,
u16 old_links, u16 new_links)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ guard(mvm)(mvm);
+ return iwl_mvm_mld_update_sta_links(mvm, vif, sta, old_links, new_links);
+}
+
+bool iwl_mvm_vif_has_esr_cap(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ const struct wiphy_iftype_ext_capab *ext_capa;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!ieee80211_vif_is_mld(vif) || !vif->cfg.assoc ||
+ hweight16(ieee80211_vif_usable_links(vif)) == 1)
+ return false;
+
+ if (!(vif->cfg.eml_cap & IEEE80211_EML_CAP_EMLSR_SUPP))
+ return false;
+
+ ext_capa = cfg80211_get_iftype_ext_capa(mvm->hw->wiphy,
+ ieee80211_vif_type_p2p(vif));
+ return (ext_capa &&
+ (ext_capa->eml_capabilities & IEEE80211_EML_CAP_EMLSR_SUPP));
+}
+
+static bool iwl_mvm_mld_can_activate_links(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u16 desired_links)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int n_links = hweight16(desired_links);
+
+ if (n_links <= 1)
+ return true;
+
+ guard(mvm)(mvm);
+
+ /* Check if HW supports the wanted number of links */
+ if (n_links > iwl_mvm_max_active_links(mvm, vif))
+ return false;
+
+ /* If it is an eSR device, check that we can enter eSR */
+ return iwl_mvm_is_esr_supported(mvm->fwrt.trans) &&
+ iwl_mvm_vif_has_esr_cap(mvm, vif);
+}
+
+static enum ieee80211_neg_ttlm_res
+iwl_mvm_mld_can_neg_ttlm(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_neg_ttlm *neg_ttlm)
+{
+ u16 map;
+ u8 i;
+
+ /* Verify all TIDs are mapped to the same links set */
+ map = neg_ttlm->downlink[0];
+ for (i = 0; i < IEEE80211_TTLM_NUM_TIDS; i++) {
+ if (neg_ttlm->downlink[i] != neg_ttlm->uplink[i] ||
+ neg_ttlm->uplink[i] != map)
+ return NEG_TTLM_RES_REJECT;
+ }
+
+ return NEG_TTLM_RES_ACCEPT;
+}
+
+static int
+iwl_mvm_mld_mac_pre_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *chsw)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
int ret;
mutex_lock(&mvm->mutex);
- ret = iwl_mvm_mld_update_sta_links(mvm, vif, sta, old_links, new_links);
+ if (mvmvif->esr_active) {
+ u8 primary = iwl_mvm_get_primary_link(vif);
+ int selected;
+
+ /* prefer primary unless quiet CSA on it */
+ if (chsw->link_id == primary && chsw->block_tx)
+ selected = iwl_mvm_get_other_link(vif, primary);
+ else
+ selected = primary;
+
+ iwl_mvm_exit_esr(mvm, vif, IWL_MVM_ESR_EXIT_CSA, selected);
+ mutex_unlock(&mvm->mutex);
+
+ /*
+ * If we've not kept the link active that's doing the CSA
+ * then we don't need to do anything else, just return.
+ */
+ if (selected != chsw->link_id)
+ return 0;
+
+ mutex_lock(&mvm->mutex);
+ }
+
+ ret = iwl_mvm_pre_channel_switch(mvm, vif, chsw);
mutex_unlock(&mvm->mutex);
return ret;
@@ -1124,6 +1359,7 @@ const struct ieee80211_ops iwl_mvm_mld_hw_ops = {
.wake_tx_queue = iwl_mvm_mac_wake_tx_queue,
.ampdu_action = iwl_mvm_mac_ampdu_action,
.get_antenna = iwl_mvm_op_get_antenna,
+ .set_antenna = iwl_mvm_op_set_antenna,
.start = iwl_mvm_mac_start,
.reconfig_complete = iwl_mvm_mac_reconfig_complete,
.stop = iwl_mvm_mac_stop,
@@ -1170,10 +1406,8 @@ const struct ieee80211_ops iwl_mvm_mld_hw_ops = {
.tx_last_beacon = iwl_mvm_tx_last_beacon,
- .set_tim = iwl_mvm_set_tim,
-
.channel_switch = iwl_mvm_channel_switch,
- .pre_channel_switch = iwl_mvm_pre_channel_switch,
+ .pre_channel_switch = iwl_mvm_mld_mac_pre_channel_switch,
.post_channel_switch = iwl_mvm_post_channel_switch,
.abort_channel_switch = iwl_mvm_abort_channel_switch,
.channel_switch_rx_beacon = iwl_mvm_channel_switch_rx_beacon,
@@ -1206,10 +1440,14 @@ const struct ieee80211_ops iwl_mvm_mld_hw_ops = {
.abort_pmsr = iwl_mvm_abort_pmsr,
#ifdef CONFIG_IWLWIFI_DEBUGFS
+ .vif_add_debugfs = iwl_mvm_vif_add_debugfs,
+ .link_add_debugfs = iwl_mvm_link_add_debugfs,
.link_sta_add_debugfs = iwl_mvm_link_sta_add_debugfs,
#endif
.set_hw_timestamp = iwl_mvm_set_hw_timestamp,
.change_vif_links = iwl_mvm_mld_change_vif_links,
.change_sta_links = iwl_mvm_mld_change_sta_links,
+ .can_activate_links = iwl_mvm_mld_can_activate_links,
+ .can_neg_ttlm = iwl_mvm_mld_can_neg_ttlm,
};
diff --git a/mvm/mld-sta.c b/mvm/mld-sta.c
index 524852cf5cd2..d5a204e52076 100644
--- a/mvm/mld-sta.c
+++ b/mvm/mld-sta.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2022-2023 Intel Corporation
+ * Copyright (C) 2022-2024 Intel Corporation
*/
#include "mvm.h"
#include "time-sync.h"
@@ -9,7 +9,9 @@
u32 iwl_mvm_sta_fw_id_mask(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int filter_link_id)
{
+ struct ieee80211_link_sta *link_sta;
struct iwl_mvm_sta *mvmsta;
+ struct ieee80211_vif *vif;
unsigned int link_id;
u32 result = 0;
@@ -17,26 +19,27 @@ u32 iwl_mvm_sta_fw_id_mask(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
return 0;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ vif = mvmsta->vif;
/* it's easy when the STA is not an MLD */
if (!sta->valid_links)
return BIT(mvmsta->deflink.sta_id);
/* but if it is an MLD, get the mask of all the FW STAs it has ... */
- for (link_id = 0; link_id < ARRAY_SIZE(mvmsta->link); link_id++) {
- struct iwl_mvm_link_sta *link_sta;
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
+ struct iwl_mvm_link_sta *mvm_link_sta;
/* unless we have a specific link in mind */
if (filter_link_id >= 0 && link_id != filter_link_id)
continue;
- link_sta =
+ mvm_link_sta =
rcu_dereference_check(mvmsta->link[link_id],
lockdep_is_held(&mvm->mutex));
- if (!link_sta)
+ if (!mvm_link_sta)
continue;
- result |= BIT(link_sta->sta_id);
+ result |= BIT(mvm_link_sta->sta_id);
}
return result;
@@ -238,7 +241,7 @@ int iwl_mvm_mld_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
IWL_MAX_TID_COUNT, &wdg_timeout);
}
-/* Allocate a new station entry for the broadcast station to the given vif,
+/* Allocate a new station entry for the multicast station to the given vif,
* and send it to the FW.
* Note that each AP/GO mac should have its own multicast station.
*/
@@ -347,7 +350,7 @@ static int iwl_mvm_mld_rm_int_sta(struct iwl_mvm *mvm,
return -EINVAL;
if (flush)
- iwl_mvm_flush_sta(mvm, int_sta, true);
+ iwl_mvm_flush_sta(mvm, int_sta->sta_id, int_sta->tfd_queue_msk);
iwl_mvm_mld_disable_txq(mvm, BIT(int_sta->sta_id), queuptr, tid);
@@ -467,7 +470,7 @@ static int iwl_mvm_mld_cfg_sta(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
break;
}
- switch (sta->deflink.smps_mode) {
+ switch (link_sta->smps_mode) {
case IEEE80211_SMPS_AUTOMATIC:
case IEEE80211_SMPS_NUM_MODES:
WARN_ON(1);
@@ -512,11 +515,11 @@ static int iwl_mvm_mld_cfg_sta(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
return iwl_mvm_mld_send_sta_cmd(mvm, &cmd);
}
-static void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm,
- struct iwl_mvm_sta *mvm_sta,
- struct iwl_mvm_link_sta *mvm_sta_link,
- unsigned int link_id,
- bool is_in_fw)
+void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvm_sta,
+ struct iwl_mvm_link_sta *mvm_sta_link,
+ unsigned int link_id,
+ bool is_in_fw)
{
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta_link->sta_id],
is_in_fw ? ERR_PTR(-EINVAL) : NULL);
@@ -582,14 +585,14 @@ static int iwl_mvm_mld_alloc_sta_links(struct iwl_mvm *mvm,
struct ieee80211_sta *sta)
{
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ struct ieee80211_link_sta *link_sta;
unsigned int link_id;
int ret;
lockdep_assert_held(&mvm->mutex);
- for (link_id = 0; link_id < ARRAY_SIZE(sta->link); link_id++) {
- if (!rcu_access_pointer(sta->link[link_id]) ||
- mvm_sta->link[link_id])
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
+ if (WARN_ON(mvm_sta->link[link_id]))
continue;
ret = iwl_mvm_mld_alloc_sta_link(mvm, vif, sta, link_id);
@@ -616,9 +619,6 @@ static void iwl_mvm_mld_set_ap_sta_id(struct ieee80211_sta *sta,
}
}
-/* FIXME: consider waiting for mac80211 to add the STA instead of allocating
- * queues here
- */
static int iwl_mvm_alloc_sta_after_restart(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
@@ -697,6 +697,8 @@ int iwl_mvm_mld_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
/* at this stage sta link pointers are already allocated */
ret = iwl_mvm_mld_update_sta(mvm, vif, sta);
+ if (ret)
+ goto err;
for_each_sta_active_link(vif, sta, link_sta, link_id) {
struct ieee80211_bss_conf *link_conf =
@@ -705,8 +707,10 @@ int iwl_mvm_mld_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
rcu_dereference_protected(mvm_sta->link[link_id],
lockdep_is_held(&mvm->mutex));
- if (WARN_ON(!link_conf || !mvm_link_sta))
+ if (WARN_ON(!link_conf || !mvm_link_sta)) {
+ ret = -EINVAL;
goto err;
+ }
ret = iwl_mvm_mld_cfg_sta(mvm, sta, vif, link_sta, link_conf,
mvm_link_sta);
@@ -719,7 +723,6 @@ int iwl_mvm_mld_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_mld_set_ap_sta_id(sta, mvm_vif->link[link_id],
mvm_link_sta);
}
-
return 0;
err:
@@ -845,16 +848,23 @@ int iwl_mvm_mld_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_link_sta,
link_id, stay_in_fw);
}
+ kfree(mvm_sta->mpdu_counters);
+ mvm_sta->mpdu_counters = NULL;
return ret;
}
int iwl_mvm_mld_rm_sta_id(struct iwl_mvm *mvm, u8 sta_id)
{
- int ret = iwl_mvm_mld_rm_sta_from_fw(mvm, sta_id);
+ int ret;
lockdep_assert_held(&mvm->mutex);
+ if (WARN_ON(sta_id == IWL_MVM_INVALID_STA))
+ return 0;
+
+ ret = iwl_mvm_mld_rm_sta_from_fw(mvm, sta_id);
+
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
RCU_INIT_POINTER(mvm->fw_id_to_link_sta[sta_id], NULL);
return ret;
@@ -870,6 +880,9 @@ void iwl_mvm_mld_sta_modify_disable_tx(struct iwl_mvm *mvm,
cmd.sta_id = cpu_to_le32(mvmsta->deflink.sta_id);
cmd.disable = cpu_to_le32(disable);
+ if (WARN_ON(iwl_mvm_has_no_host_disable_tx(mvm)))
+ return;
+
ret = iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(MAC_CONF_GROUP, STA_DISABLE_TX_CMD),
CMD_ASYNC, sizeof(cmd), &cmd);
@@ -977,6 +990,10 @@ static int iwl_mvm_mld_update_sta_baids(struct iwl_mvm *mvm,
u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD);
int baid;
+ /* mac80211 will remove sessions later, but we ignore all that */
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ return 0;
+
BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid));
for (baid = 0; baid < ARRAY_SIZE(mvm->baid_map); baid++) {
@@ -997,7 +1014,8 @@ static int iwl_mvm_mld_update_sta_baids(struct iwl_mvm *mvm,
cmd.modify.tid = cpu_to_le32(data->tid);
- ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cmd), &cmd);
+ ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_SEND_IN_RFKILL,
+ sizeof(cmd), &cmd);
data->sta_mask = new_sta_mask;
if (ret)
return ret;
@@ -1104,15 +1122,37 @@ int iwl_mvm_mld_update_sta_links(struct iwl_mvm *mvm,
link_sta_dereference_protected(sta, link_id);
mvm_vif_link = mvm_vif->link[link_id];
- if (WARN_ON(!mvm_vif_link || !link_conf || !link_sta ||
- mvm_sta->link[link_id])) {
+ if (WARN_ON(!mvm_vif_link || !link_conf || !link_sta)) {
ret = -EINVAL;
goto err;
}
- ret = iwl_mvm_mld_alloc_sta_link(mvm, vif, sta, link_id);
- if (WARN_ON(ret))
- goto err;
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+ struct iwl_mvm_link_sta *mvm_link_sta =
+ rcu_dereference_protected(mvm_sta->link[link_id],
+ lockdep_is_held(&mvm->mutex));
+ u32 sta_id;
+
+ if (WARN_ON(!mvm_link_sta)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ sta_id = mvm_link_sta->sta_id;
+
+ rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
+ rcu_assign_pointer(mvm->fw_id_to_link_sta[sta_id],
+ link_sta);
+ } else {
+ if (WARN_ON(mvm_sta->link[link_id])) {
+ ret = -EINVAL;
+ goto err;
+ }
+ ret = iwl_mvm_mld_alloc_sta_link(mvm, vif, sta,
+ link_id);
+ if (WARN_ON(ret))
+ goto err;
+ }
link_sta->agg.max_rc_amsdu_len = 1;
ieee80211_sta_recalc_aggregates(sta);
diff --git a/mvm/mvm.h b/mvm/mvm.h
index b18c91c5dd5d..22f48b66d79c 100644
--- a/mvm/mvm.h
+++ b/mvm/mvm.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -9,6 +9,7 @@
#include <linux/list.h>
#include <linux/spinlock.h>
+#include <linux/cleanup.h>
#include <linux/leds.h>
#include <linux/in6.h>
@@ -23,7 +24,7 @@
#include "iwl-op-mode.h"
#include "iwl-trans.h"
#include "fw/notif-wait.h"
-#include "iwl-eeprom-parse.h"
+#include "iwl-nvm-utils.h"
#include "fw/file.h"
#include "iwl-config.h"
#include "sta.h"
@@ -40,8 +41,9 @@
#define IWL_MVM_MAX_ADDRESSES 5
/* RSSI offset for WkP */
#define IWL_RSSI_OFFSET 50
+#define IWL_MVM_MISSED_BEACONS_SINCE_RX_THOLD 4
#define IWL_MVM_MISSED_BEACONS_THRESHOLD 8
-#define IWL_MVM_MISSED_BEACONS_THRESHOLD_LONG 16
+#define IWL_MVM_MISSED_BEACONS_THRESHOLD_LONG 19
/* A TimeUnit is 1024 microsecond */
#define MSEC_TO_TU(_msec) (_msec*1000/1024)
@@ -81,14 +83,9 @@ extern const struct ieee80211_ops iwl_mvm_mld_hw_ops;
/**
* struct iwl_mvm_mod_params - module parameters for iwlmvm
- * @init_dbg: if true, then the NIC won't be stopped if the INIT fw asserted.
- * We will register to mac80211 to have testmode working. The NIC must not
- * be up'ed after the INIT fw asserted. This is useful to be able to use
- * proprietary tools over testmode to debug the INIT fw.
* @power_scheme: one of enum iwl_power_scheme
*/
struct iwl_mvm_mod_params {
- bool init_dbg;
int power_scheme;
};
extern struct iwl_mvm_mod_params iwlmvm_mod_params;
@@ -105,6 +102,7 @@ struct iwl_mvm_phy_ctxt {
/* track for RLC config command */
u32 center_freq1;
bool rlc_disabled;
+ u32 channel_load_by_us;
};
struct iwl_mvm_time_event_data {
@@ -121,15 +119,16 @@ struct iwl_mvm_time_event_data {
* if the te is in the time event list or not (when id == TE_MAX)
*/
u32 id;
+ s8 link_id;
};
/* Power management */
/**
* enum iwl_power_scheme
- * @IWL_POWER_LEVEL_CAM - Continuously Active Mode
- * @IWL_POWER_LEVEL_BPS - Balanced Power Save (default)
- * @IWL_POWER_LEVEL_LP - Low Power
+ * @IWL_POWER_SCHEME_CAM: Continuously Active Mode
+ * @IWL_POWER_SCHEME_BPS: Balanced Power Save (default)
+ * @IWL_POWER_SCHEME_LP: Low Power
*/
enum iwl_power_scheme {
IWL_POWER_SCHEME_CAM = 1,
@@ -137,7 +136,6 @@ enum iwl_power_scheme {
IWL_POWER_SCHEME_LP
};
-#define IWL_CONN_MAX_LISTEN_INTERVAL 10
#define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -218,7 +216,7 @@ enum iwl_bt_force_ant_mode {
};
/**
- * struct iwl_mvm_low_latency_force - low latency force mode set by debugfs
+ * enum iwl_mvm_low_latency_force - low latency force mode set by debugfs
* @LOW_LATENCY_FORCE_UNSET: unset force mode
* @LOW_LATENCY_FORCE_ON: for low latency on
* @LOW_LATENCY_FORCE_OFF: for low latency off
@@ -232,7 +230,7 @@ enum iwl_mvm_low_latency_force {
};
/**
-* struct iwl_mvm_low_latency_cause - low latency set causes
+* enum iwl_mvm_low_latency_cause - low latency set causes
* @LOW_LATENCY_TRAFFIC: indicates low latency traffic was detected
* @LOW_LATENCY_DEBUGFS: low latency mode set from debugfs
* @LOW_LATENCY_VCMD: low latency mode set from vendor command
@@ -253,18 +251,14 @@ enum iwl_mvm_low_latency_cause {
};
/**
-* struct iwl_mvm_vif_bf_data - beacon filtering related data
-* @bf_enabled: indicates if beacon filtering is enabled
-* @ba_enabled: indicated if beacon abort is enabled
+* struct iwl_mvm_link_bf_data - beacon filtering related data
* @ave_beacon_signal: average beacon signal
* @last_cqm_event: rssi of the last cqm event
* @bt_coex_min_thold: minimum threshold for BT coex
* @bt_coex_max_thold: maximum threshold for BT coex
* @last_bt_coex_event: rssi of the last BT coex event
*/
-struct iwl_mvm_vif_bf_data {
- bool bf_enabled;
- bool ba_enabled;
+struct iwl_mvm_link_bf_data {
int ave_beacon_signal;
int last_cqm_event;
int bt_coex_min_thold;
@@ -302,7 +296,12 @@ struct iwl_probe_resp_data {
* @queue_params: QoS params for this MAC
* @mgmt_queue: queue number for unbufferable management frames
* @igtk: the current IGTK programmed into the firmware
+ * @active: indicates the link is active in FW (for sanity checking)
+ * @cab_queue: content-after-beacon (multicast) queue
* @listen_lmac: indicates this link is allocated to the listen LMAC
+ * @mcast_sta: multicast station
+ * @phy_ctxt: phy context allocated to this link, if any
+ * @bf_data: beacon filtering data
*/
struct iwl_mvm_vif_link_info {
u8 bssid[ETH_ALEN];
@@ -338,10 +337,71 @@ struct iwl_mvm_vif_link_info {
struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
u16 mgmt_queue;
+
+ struct iwl_mvm_link_bf_data bf_data;
+};
+
+/**
+ * enum iwl_mvm_esr_state - defines reasons for which the EMLSR is exited or
+ * blocked.
+ * The low 16 bits are used for blocking reasons, and the 16 higher bits
+ * are used for exit reasons.
+ * For the blocking reasons - use iwl_mvm_(un)block_esr(), and for the exit
+ * reasons - use iwl_mvm_exit_esr().
+ *
+ * Note: new reasons shall be added to HANDLE_ESR_REASONS as well (for logs)
+ *
+ * @IWL_MVM_ESR_BLOCKED_PREVENTION: Prevent EMLSR to avoid entering and exiting
+ * in a loop.
+ * @IWL_MVM_ESR_BLOCKED_WOWLAN: WOWLAN is preventing the enablement of EMLSR
+ * @IWL_MVM_ESR_BLOCKED_TPT: block EMLSR when there is not enough traffic
+ * @IWL_MVM_ESR_BLOCKED_FW: FW didn't recommended/forced exit from EMLSR
+ * @IWL_MVM_ESR_BLOCKED_NON_BSS: An active non-BSS interface's link is
+ * preventing EMLSR
+ * @IWL_MVM_ESR_BLOCKED_ROC: remain-on-channel is preventing EMLSR
+ * @IWL_MVM_ESR_EXIT_MISSED_BEACON: exited EMLSR due to missed beacons
+ * @IWL_MVM_ESR_EXIT_LOW_RSSI: link is deactivated/not allowed for EMLSR
+ * due to low RSSI.
+ * @IWL_MVM_ESR_EXIT_COEX: link is deactivated/not allowed for EMLSR
+ * due to BT Coex.
+ * @IWL_MVM_ESR_EXIT_BANDWIDTH: Bandwidths of primary and secondry links
+ * preventing the enablement of EMLSR
+ * @IWL_MVM_ESR_EXIT_CSA: CSA happened, so exit EMLSR
+ * @IWL_MVM_ESR_EXIT_LINK_USAGE: Exit EMLSR due to low tpt on secondary link
+ */
+enum iwl_mvm_esr_state {
+ IWL_MVM_ESR_BLOCKED_PREVENTION = 0x1,
+ IWL_MVM_ESR_BLOCKED_WOWLAN = 0x2,
+ IWL_MVM_ESR_BLOCKED_TPT = 0x4,
+ IWL_MVM_ESR_BLOCKED_FW = 0x8,
+ IWL_MVM_ESR_BLOCKED_NON_BSS = 0x10,
+ IWL_MVM_ESR_BLOCKED_ROC = 0x20,
+ IWL_MVM_ESR_EXIT_MISSED_BEACON = 0x10000,
+ IWL_MVM_ESR_EXIT_LOW_RSSI = 0x20000,
+ IWL_MVM_ESR_EXIT_COEX = 0x40000,
+ IWL_MVM_ESR_EXIT_BANDWIDTH = 0x80000,
+ IWL_MVM_ESR_EXIT_CSA = 0x100000,
+ IWL_MVM_ESR_EXIT_LINK_USAGE = 0x200000,
+};
+
+#define IWL_MVM_BLOCK_ESR_REASONS 0xffff
+
+const char *iwl_get_esr_state_string(enum iwl_mvm_esr_state state);
+
+/**
+ * struct iwl_mvm_esr_exit - details of the last exit from EMLSR mode.
+ * @reason: The reason for the last exit from EMLSR.
+ * &iwl_mvm_prevent_esr_reasons. Will be 0 before exiting EMLSR.
+ * @ts: the time stamp of the last time we existed EMLSR.
+ */
+struct iwl_mvm_esr_exit {
+ unsigned long ts;
+ enum iwl_mvm_esr_state reason;
};
/**
* struct iwl_mvm_vif - data per Virtual Interface, it is a MAC context
+ * @mvm: pointer back to the mvm struct
* @id: between 0 and 3
* @color: to solve races upon MAC addition and removal
* @associated: indicates that we're currently associated, used only for
@@ -363,7 +423,65 @@ struct iwl_mvm_vif_link_info {
* @csa_countdown: indicates that CSA countdown may be started
* @csa_failed: CSA failed to schedule time event, report an error later
* @csa_bcn_pending: indicates that we are waiting for a beacon on a new channel
+ * @csa_blocks_tx: CSA is blocking TX
* @features: hw features active for this vif
+ * @max_tx_op: max TXOP in usecs for all ACs, zero for no limit.
+ * @ap_beacon_time: AP beacon time for synchronisation (on older FW)
+ * @bf_enabled: indicates if beacon filtering is enabled
+ * @ba_enabled: indicated if beacon abort is enabled
+ * @bcn_prot: beacon protection data (keys; FIXME: needs to be per link)
+ * @deflink: default link data for use in non-MLO
+ * @link: link data for each link in MLO
+ * @esr_active: indicates eSR mode is active
+ * @esr_disable_reason: a bitmap of &enum iwl_mvm_esr_state
+ * @pm_enabled: indicates powersave is enabled
+ * @link_selection_res: bitmap of active links as it was decided in the last
+ * link selection. Valid only for a MLO vif after assoc. 0 if there wasn't
+ * any link selection yet.
+ * @link_selection_primary: primary link selected by link selection
+ * @primary_link: primary link in eSR. Valid only for an associated MLD vif,
+ * and in eSR mode. Valid only for a STA.
+ * @last_esr_exit: Details of the last exit from EMLSR.
+ * @exit_same_reason_count: The number of times we exited due to the specified
+ * @last_esr_exit::reason, only counting exits due to
+ * &IWL_MVM_ESR_PREVENT_REASONS.
+ * @prevent_esr_done_wk: work that should be done when esr prevention ends.
+ * @mlo_int_scan_wk: work for the internal MLO scan.
+ * @unblock_esr_tpt_wk: work for unblocking EMLSR when tpt is high enough.
+ * @roc_activity: currently running ROC activity for this vif (or
+ * ROC_NUM_ACTIVITIES if no activity is running).
+ * @session_prot_connection_loss: the connection was lost due to session
+ * protection ending without receiving a beacon, so we need to now
+ * protect the deauth separately
+ * @ap_early_keys: The firmware cannot install keys before stations etc.,
+ * but higher layers work differently, so we store the keys here for
+ * later installation.
+ * @ap_sta: pointer to the AP STA data structure
+ * @csa_count: CSA counter (old CSA implementation w/o firmware)
+ * @csa_misbehave: CSA AP misbehaviour flag (old implementation)
+ * @csa_target_freq: CSA target channel frequency (old implementation)
+ * @csa_work: CSA work (old implementation)
+ * @dbgfs_bf: beamforming debugfs data
+ * @dbgfs_dir: debugfs directory for this vif
+ * @dbgfs_pm: power management debugfs data
+ * @dbgfs_quota_min: debugfs value for minimal quota
+ * @dbgfs_slink: debugfs symlink for this interface
+ * @ftm_unprotected: unprotected FTM debugfs override
+ * @hs_time_event_data: hotspot/AUX ROC time event data
+ * @mac_pwr_cmd: debugfs override for MAC power command
+ * @target_ipv6_addrs: IPv6 addresses on this interface for offload
+ * @num_target_ipv6_addrs: number of @target_ipv6_addrs
+ * @tentative_addrs: bitmap of tentative IPv6 addresses in @target_ipv6_addrs
+ * @rekey_data: rekeying data for WoWLAN GTK rekey offload
+ * @seqno: storage for seqno for older firmware D0/D3 transition
+ * @seqno_valid: indicates @seqno is valid
+ * @time_event_data: session protection time event data
+ * @tsf_id: the TSF resource ID assigned in firmware (for firmware needing that)
+ * @tx_key_idx: WEP transmit key index for D3
+ * @uapsd_misbehaving_ap_addr: MLD address/BSSID of U-APSD misbehaving AP, to
+ * not use U-APSD on reconnection
+ * @uapsd_nonagg_detected_wk: worker for handling detection of no aggregation
+ * in U-APSD
*/
struct iwl_mvm_vif {
struct iwl_mvm *mvm;
@@ -377,6 +495,7 @@ struct iwl_mvm_vif {
bool pm_enabled;
bool monitor_active;
bool esr_active;
+ bool session_prot_connection_loss;
u8 low_latency: 6;
u8 low_latency_actual: 1;
@@ -384,8 +503,10 @@ struct iwl_mvm_vif {
u8 authorized:1;
bool ps_disabled;
+ u32 esr_disable_reason;
u32 ap_beacon_time;
- struct iwl_mvm_vif_bf_data bf_data;
+ bool bf_enabled;
+ bool ba_enabled;
#ifdef CONFIG_PM
/* WoWLAN GTK rekey data */
@@ -419,6 +540,7 @@ struct iwl_mvm_vif {
struct iwl_dbgfs_bf dbgfs_bf;
struct iwl_mac_power_cmd mac_pwr_cmd;
int dbgfs_quota_min;
+ bool ftm_unprotected;
#endif
/* FW identified misbehaving AP */
@@ -428,6 +550,7 @@ struct iwl_mvm_vif {
bool csa_countdown;
bool csa_failed;
bool csa_bcn_pending;
+ bool csa_blocks_tx;
u16 csa_target_freq;
u16 csa_count;
u16 csa_misbehave;
@@ -437,6 +560,7 @@ struct iwl_mvm_vif {
struct iwl_mvm_time_event_data time_event_data;
struct iwl_mvm_time_event_data hs_time_event_data;
+ enum iwl_roc_activity roc_activity;
/* TCP Checksum Offload */
netdev_features_t features;
@@ -450,6 +574,17 @@ struct iwl_mvm_vif {
struct ieee80211_key_conf __rcu *keys[2];
} bcn_prot;
+ u16 max_tx_op;
+
+ u16 link_selection_res;
+ u8 link_selection_primary;
+ u8 primary_link;
+ struct iwl_mvm_esr_exit last_esr_exit;
+ u8 exit_same_reason_count;
+ struct wiphy_delayed_work prevent_esr_done_wk;
+ struct wiphy_delayed_work mlo_int_scan_wk;
+ struct wiphy_work unblock_esr_tpt_wk;
+
struct iwl_mvm_vif_link_info deflink;
struct iwl_mvm_vif_link_info *link[IEEE80211_MLD_MAX_NUM_LINKS];
};
@@ -474,10 +609,12 @@ enum iwl_scan_status {
IWL_MVM_SCAN_REGULAR = BIT(0),
IWL_MVM_SCAN_SCHED = BIT(1),
IWL_MVM_SCAN_NETDETECT = BIT(2),
+ IWL_MVM_SCAN_INT_MLO = BIT(3),
IWL_MVM_SCAN_STOPPING_REGULAR = BIT(8),
IWL_MVM_SCAN_STOPPING_SCHED = BIT(9),
IWL_MVM_SCAN_STOPPING_NETDETECT = BIT(10),
+ IWL_MVM_SCAN_STOPPING_INT_MLO = BIT(11),
IWL_MVM_SCAN_REGULAR_MASK = IWL_MVM_SCAN_REGULAR |
IWL_MVM_SCAN_STOPPING_REGULAR,
@@ -485,6 +622,8 @@ enum iwl_scan_status {
IWL_MVM_SCAN_STOPPING_SCHED,
IWL_MVM_SCAN_NETDETECT_MASK = IWL_MVM_SCAN_NETDETECT |
IWL_MVM_SCAN_STOPPING_NETDETECT,
+ IWL_MVM_SCAN_INT_MLO_MASK = IWL_MVM_SCAN_INT_MLO |
+ IWL_MVM_SCAN_STOPPING_INT_MLO,
IWL_MVM_SCAN_STOPPING_MASK = 0xff << IWL_MVM_SCAN_STOPPING_SHIFT,
IWL_MVM_SCAN_MASK = 0xff,
@@ -506,7 +645,7 @@ enum iwl_mvm_sched_scan_pass_all_states {
};
/**
- * struct iwl_mvm_tt_mgnt - Thermal Throttling Management structure
+ * struct iwl_mvm_tt_mgmt - Thermal Throttling Management structure
* @ct_kill_exit: worker to exit thermal kill
* @dynamic_smps: Is thermal throttling enabled dynamic_smps?
* @tx_backoff: The current thremal throttling tx backoff in uSec.
@@ -525,14 +664,12 @@ struct iwl_mvm_tt_mgmt {
#ifdef CONFIG_THERMAL
/**
- *struct iwl_mvm_thermal_device - thermal zone related data
- * @temp_trips: temperature thresholds for report
- * @fw_trips_index: keep indexes to original array - temp_trips
+ * struct iwl_mvm_thermal_device - thermal zone related data
+ * @trips: temperature thresholds for report
* @tzone: thermal zone device data
*/
struct iwl_mvm_thermal_device {
struct thermal_trip trips[IWL_MAX_DTS_TRIPS];
- u8 fw_trips_index[IWL_MAX_DTS_TRIPS];
struct thermal_zone_device *tzone;
};
@@ -631,57 +768,32 @@ struct iwl_mvm_tcm {
* struct iwl_mvm_reorder_buffer - per ra/tid/queue reorder buffer
* @head_sn: reorder window head sn
* @num_stored: number of mpdus stored in the buffer
- * @buf_size: the reorder buffer size as set by the last addba request
* @queue: queue of this reorder buffer
* @last_amsdu: track last ASMDU SN for duplication detection
* @last_sub_index: track ASMDU sub frame index for duplication detection
- * @reorder_timer: timer for frames are in the reorder buffer. For AMSDU
- * it is the time of last received sub-frame
- * @removed: prevent timer re-arming
* @valid: reordering is valid for this queue
* @lock: protect reorder buffer internal state
- * @mvm: mvm pointer, needed for frame timer context
- * @consec_oldsn_drops: consecutive drops due to old SN
- * @consec_oldsn_ampdu_gp2: A-MPDU GP2 timestamp to track
- * when to apply old SN consecutive drop workaround
- * @consec_oldsn_prev_drop: track whether or not an MPDU
- * that was single/part of the previous A-MPDU was
- * dropped due to old SN
*/
struct iwl_mvm_reorder_buffer {
u16 head_sn;
u16 num_stored;
- u16 buf_size;
int queue;
u16 last_amsdu;
u8 last_sub_index;
- struct timer_list reorder_timer;
- bool removed;
bool valid;
spinlock_t lock;
- struct iwl_mvm *mvm;
- unsigned int consec_oldsn_drops;
- u32 consec_oldsn_ampdu_gp2;
- unsigned int consec_oldsn_prev_drop:1;
} ____cacheline_aligned_in_smp;
/**
- * struct _iwl_mvm_reorder_buf_entry - reorder buffer entry per-queue/per-seqno
+ * struct iwl_mvm_reorder_buf_entry - reorder buffer entry per-queue/per-seqno
* @frames: list of skbs stored
- * @reorder_time: time the packet was stored in the reorder buffer
*/
-struct _iwl_mvm_reorder_buf_entry {
- struct sk_buff_head frames;
- unsigned long reorder_time;
-};
-
-/* make this indirection to get the aligned thing */
struct iwl_mvm_reorder_buf_entry {
- struct _iwl_mvm_reorder_buf_entry e;
+ struct sk_buff_head frames;
}
#ifndef __CHECKER__
/* sparse doesn't like this construct: "bad integer constant expression" */
-__aligned(roundup_pow_of_two(sizeof(struct _iwl_mvm_reorder_buf_entry)))
+__aligned(roundup_pow_of_two(sizeof(struct sk_buff_head)))
#endif
;
@@ -689,15 +801,18 @@ __aligned(roundup_pow_of_two(sizeof(struct _iwl_mvm_reorder_buf_entry)))
* struct iwl_mvm_baid_data - BA session data
* @sta_mask: current station mask for the BAID
* @tid: tid of the session
- * @baid baid of the session
+ * @baid: baid of the session
* @timeout: the timeout set in the addba request
+ * @buf_size: the reorder buffer size as set by the last addba request
* @entries_per_queue: # of buffers per queue, this actually gets
* aligned up to avoid cache line sharing between queues
* @last_rx: last rx jiffies, updated only if timeout passed from last update
* @session_timer: timer to check if BA session expired, runs at 2 * timeout
+ * @rcu_ptr: BA data RCU protected access
+ * @rcu_head: RCU head for freeing this data
* @mvm: mvm pointer, needed for timer context
* @reorder_buf: reorder buffer, allocated per queue
- * @reorder_buf_data: data
+ * @entries: data
*/
struct iwl_mvm_baid_data {
struct rcu_head rcu_head;
@@ -705,13 +820,14 @@ struct iwl_mvm_baid_data {
u8 tid;
u8 baid;
u16 timeout;
+ u16 buf_size;
u16 entries_per_queue;
unsigned long last_rx;
struct timer_list session_timer;
struct iwl_mvm_baid_data __rcu **rcu_ptr;
struct iwl_mvm *mvm;
struct iwl_mvm_reorder_buffer reorder_buf[IWL_MAX_RX_HW_QUEUES];
- struct iwl_mvm_reorder_buf_entry entries[];
+ struct iwl_mvm_reorder_buf_entry entries[] ____cacheline_aligned_in_smp;
};
static inline struct iwl_mvm_baid_data *
@@ -759,9 +875,10 @@ struct iwl_mvm_txq {
struct list_head list;
u16 txq_id;
atomic_t tx_request;
-#define IWL_MVM_TXQ_STATE_STOP_FULL 0
-#define IWL_MVM_TXQ_STATE_STOP_REDIRECT 1
-#define IWL_MVM_TXQ_STATE_READY 2
+#define IWL_MVM_TXQ_STATE_READY 0
+#define IWL_MVM_TXQ_STATE_STOP_FULL 1
+#define IWL_MVM_TXQ_STATE_STOP_REDIRECT 2
+#define IWL_MVM_TXQ_STATE_STOP_AP_CSA 3
unsigned long state;
};
@@ -839,6 +956,35 @@ struct iwl_mei_scan_filter {
struct work_struct scan_work;
};
+/**
+ * struct iwl_mvm_acs_survey_channel - per-channel survey information
+ *
+ * Stripped down version of &struct survey_info.
+ *
+ * @time: time in ms the radio was on the channel
+ * @time_busy: time in ms the channel was sensed busy
+ * @time_tx: time in ms spent transmitting data
+ * @time_rx: time in ms spent receiving data
+ * @noise: channel noise in dBm
+ */
+struct iwl_mvm_acs_survey_channel {
+ u32 time;
+ u32 time_busy;
+ u32 time_tx;
+ u32 time_rx;
+ s8 noise;
+};
+
+struct iwl_mvm_acs_survey {
+ struct iwl_mvm_acs_survey_channel *bands[NUM_NL80211_BANDS];
+
+ /* Overall number of channels */
+ int n_channels;
+
+ /* Storage space for per-channel information follows */
+ struct iwl_mvm_acs_survey_channel channels[] __counted_by(n_channels);
+};
+
struct iwl_mvm {
/* for logger access */
struct device *dev;
@@ -855,6 +1001,11 @@ struct iwl_mvm {
spinlock_t async_handlers_lock;
struct work_struct async_handlers_wk;
+ /* For async rx handlers that require the wiphy lock */
+ struct wiphy_work async_handlers_wiphy_wk;
+
+ struct wiphy_work trig_link_selection_wk;
+
struct work_struct roc_done_wk;
unsigned long init_status;
@@ -925,7 +1076,6 @@ struct iwl_mvm {
struct iwl_rx_phy_info last_phy_info;
struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT_MAX];
struct ieee80211_link_sta __rcu *fw_id_to_link_sta[IWL_MVM_STATION_COUNT_MAX];
- unsigned long fw_link_ids_map;
u8 rx_ba_sessions;
/* configured by mac80211 */
@@ -947,13 +1097,14 @@ struct iwl_mvm {
unsigned int max_scans;
/* UMAC scan tracking */
- u32 scan_uid_status[IWL_MVM_MAX_UMAC_SCANS];
+ u32 scan_uid_status[IWL_MAX_UMAC_SCANS];
/* start time of last scan in TSF of the mac that requested the scan */
u64 scan_start;
/* the vif that requested the current scan */
struct iwl_mvm_vif *scan_vif;
+ u8 scan_link_id;
/* rx chain antennas set through debugfs for the scan command */
u8 scan_rx_ant;
@@ -967,6 +1118,9 @@ struct iwl_mvm {
u8 scan_last_antenna_idx; /* to toggle TX between antennas */
u8 mgmt_last_antenna_idx;
+ u8 set_tx_ant;
+ u8 set_rx_ant;
+
/* last smart fifo state that was successfully sent to firmware */
enum iwl_sf_state sf_state;
@@ -1033,6 +1187,7 @@ struct iwl_mvm {
struct ieee80211_channel **nd_channels;
int n_nd_channels;
bool net_detect;
+ bool fast_resume;
u8 offload_tid;
#ifdef CONFIG_IWLWIFI_DEBUGFS
bool d3_wake_sysassert;
@@ -1187,17 +1342,29 @@ struct iwl_mvm {
struct iwl_phy_specific_cfg phy_filters;
#endif
+ /* report rx timestamp in ptp clock time */
+ bool rx_ts_ptp;
+
unsigned long last_6ghz_passive_scan_jiffies;
unsigned long last_reset_or_resume_time_jiffies;
bool sta_remove_requires_queue_remove;
bool mld_api_is_used;
- bool pldr_sync;
+ /*
+ * Indicates that firmware will do a product reset (and then
+ * therefore fail to load) when we start it (due to OTP burn),
+ * if so don't dump errors etc. since this is expected.
+ */
+ bool fw_product_reset;
struct iwl_time_sync_data time_sync;
struct iwl_mei_scan_filter mei_scan_filter;
+
+ struct iwl_mvm_acs_survey *acs_survey;
+
+ bool statistics_clear;
};
/* Extract MVM priv from op_mode and _hw */
@@ -1207,16 +1374,18 @@ struct iwl_mvm {
#define IWL_MAC80211_GET_MVM(_hw) \
IWL_OP_MODE_GET_MVM((struct iwl_op_mode *)((_hw)->priv))
+DEFINE_GUARD(mvm, struct iwl_mvm *, mutex_lock(&_T->mutex), mutex_unlock(&_T->mutex))
+
/**
* enum iwl_mvm_status - MVM status bits
* @IWL_MVM_STATUS_HW_RFKILL: HW RF-kill is asserted
* @IWL_MVM_STATUS_HW_CTKILL: CT-kill is active
- * @IWL_MVM_STATUS_ROC_RUNNING: remain-on-channel is running
+ * @IWL_MVM_STATUS_ROC_P2P_RUNNING: remain-on-channel on P2P is running (when
+ * P2P is not over AUX)
* @IWL_MVM_STATUS_HW_RESTART_REQUESTED: HW restart was requested
* @IWL_MVM_STATUS_IN_HW_RESTART: HW restart is active
* @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running
* @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running
- * @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA
* @IWL_MVM_STATUS_IN_D3: in D3 (or at least about to go into it)
* @IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE: suppress one error log
* if this is set, when intentionally triggered
@@ -1226,12 +1395,11 @@ struct iwl_mvm {
enum iwl_mvm_status {
IWL_MVM_STATUS_HW_RFKILL,
IWL_MVM_STATUS_HW_CTKILL,
- IWL_MVM_STATUS_ROC_RUNNING,
+ IWL_MVM_STATUS_ROC_P2P_RUNNING,
IWL_MVM_STATUS_HW_RESTART_REQUESTED,
IWL_MVM_STATUS_IN_HW_RESTART,
IWL_MVM_STATUS_ROC_AUX_RUNNING,
IWL_MVM_STATUS_FIRMWARE_RUNNING,
- IWL_MVM_STATUS_NEED_FLUSH_P2P,
IWL_MVM_STATUS_IN_D3,
IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE,
IWL_MVM_STATUS_STARTING,
@@ -1318,7 +1486,8 @@ iwl_mvm_rcu_dereference_vif_id(struct iwl_mvm *mvm, u8 vif_id, bool rcu)
static inline struct ieee80211_bss_conf *
iwl_mvm_rcu_fw_link_id_to_link_conf(struct iwl_mvm *mvm, u8 link_id, bool rcu)
{
- if (WARN_ON(link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf)))
+ if (IWL_FW_CHECK(mvm, link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf),
+ "erroneous FW link ID: %d\n", link_id))
return NULL;
if (rcu)
@@ -1515,6 +1684,12 @@ static inline bool iwl_mvm_has_quota_low_latency(struct iwl_mvm *mvm)
IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY);
}
+static inline bool iwl_mvm_has_no_host_disable_tx(struct iwl_mvm *mvm)
+{
+ return fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_NO_HOST_DISABLE_TX);
+}
+
static inline bool iwl_mvm_has_tlc_offload(const struct iwl_mvm *mvm)
{
return fw_has_capa(&mvm->fw->ucode_capa,
@@ -1566,6 +1741,7 @@ static inline int iwl_mvm_max_active_links(struct iwl_mvm *mvm,
if (vif->type == NL80211_IFTYPE_AP)
return mvm->fw->ucode_capa.num_beacons;
+ /* Check if HW supports eSR or STR */
if (iwl_mvm_is_esr_supported(trans) ||
(CSR_HW_RFID_TYPE(trans->hw_rf_id) == IWL_CFG_RF_TYPE_FM &&
CSR_HW_RFID_IS_CDB(trans->hw_rf_id)))
@@ -1576,12 +1752,16 @@ static inline int iwl_mvm_max_active_links(struct iwl_mvm *mvm,
extern const u8 iwl_mvm_ac_to_tx_fifo[];
extern const u8 iwl_mvm_ac_to_gen2_tx_fifo[];
+extern const u8 iwl_mvm_ac_to_bz_tx_fifo[];
static inline u8 iwl_mvm_mac_ac_to_tx_fifo(struct iwl_mvm *mvm,
enum ieee80211_ac_numbers ac)
{
- return iwl_mvm_has_new_tx_api(mvm) ?
- iwl_mvm_ac_to_gen2_tx_fifo[ac] : iwl_mvm_ac_to_tx_fifo[ac];
+ if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ return iwl_mvm_ac_to_bz_tx_fifo[ac];
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return iwl_mvm_ac_to_gen2_tx_fifo[ac];
+ return iwl_mvm_ac_to_tx_fifo[ac];
}
struct iwl_rate_info {
@@ -1592,7 +1772,7 @@ struct iwl_rate_info {
u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
};
-void __iwl_mvm_mac_stop(struct iwl_mvm *mvm);
+void __iwl_mvm_mac_stop(struct iwl_mvm *mvm, bool suspend);
int __iwl_mvm_mac_start(struct iwl_mvm *mvm);
/******************
@@ -1626,6 +1806,7 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, int clock_type, u32 *gp2,
u64 *boottime, ktime_t *realtime);
u32 iwl_mvm_get_systime(struct iwl_mvm *mvm);
+u32 iwl_mvm_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size);
/* Tx / Host Commands */
int __must_check iwl_mvm_send_cmd(struct iwl_mvm *mvm,
@@ -1658,7 +1839,7 @@ const char *iwl_mvm_get_tx_fail_reason(u32 status);
static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; }
#endif
int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk);
-int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal);
+int iwl_mvm_flush_sta(struct iwl_mvm *mvm, u32 sta_id, u32 tfd_queue_mask);
int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id, u16 tids);
/* Utils to extract sta related data */
@@ -1689,11 +1870,23 @@ static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm)
}
/* Statistics */
+void iwl_mvm_handle_rx_system_oper_stats(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_handle_rx_system_oper_part1_stats(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+static inline void
+iwl_mvm_handle_rx_system_end_stats_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+}
+
void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt);
void iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear);
+int iwl_mvm_request_periodic_system_statistics(struct iwl_mvm *mvm,
+ bool enable);
void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm);
/* NVM */
@@ -1702,16 +1895,29 @@ int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm);
static inline u8 iwl_mvm_get_valid_tx_ant(struct iwl_mvm *mvm)
{
- return mvm->nvm_data && mvm->nvm_data->valid_tx_ant ?
- mvm->fw->valid_tx_ant & mvm->nvm_data->valid_tx_ant :
- mvm->fw->valid_tx_ant;
+ u8 tx_ant = mvm->fw->valid_tx_ant;
+
+ if (mvm->nvm_data && mvm->nvm_data->valid_tx_ant)
+ tx_ant &= mvm->nvm_data->valid_tx_ant;
+
+ if (mvm->set_tx_ant)
+ tx_ant &= mvm->set_tx_ant;
+
+ return tx_ant;
}
static inline u8 iwl_mvm_get_valid_rx_ant(struct iwl_mvm *mvm)
{
- return mvm->nvm_data && mvm->nvm_data->valid_rx_ant ?
- mvm->fw->valid_rx_ant & mvm->nvm_data->valid_rx_ant :
- mvm->fw->valid_rx_ant;
+ u8 rx_ant = mvm->fw->valid_rx_ant;
+
+ if (mvm->nvm_data && mvm->nvm_data->valid_rx_ant)
+ rx_ant &= mvm->nvm_data->valid_rx_ant;
+
+ if (mvm->set_rx_ant)
+ rx_ant &= mvm->set_rx_ant;
+
+ return rx_ant;
+
}
static inline void iwl_mvm_toggle_tx_ant(struct iwl_mvm *mvm, u8 *ant)
@@ -1737,6 +1943,8 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm);
int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm);
+void iwl_mvm_mac_init_mvmvif(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif);
+
/*
* FW notifications / CMD responses handlers
* Convention: iwl_mvm_rx_<NAME OF THE CMD>
@@ -1773,18 +1981,20 @@ void iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm,
/* MVM PHY */
struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm);
int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
- struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *ap,
u8 chains_static, u8 chains_dynamic);
int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
- struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *ap,
u8 chains_static, u8 chains_dynamic);
void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *ctxt);
void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *ctxt);
int iwl_mvm_phy_ctx_count(struct iwl_mvm *mvm);
-u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef);
-u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef);
+u8 iwl_mvm_get_channel_width(const struct cfg80211_chan_def *chandef);
+u8 iwl_mvm_get_ctrl_pos(const struct cfg80211_chan_def *chandef);
int iwl_mvm_phy_send_rlc(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
u8 chains_static, u8 chains_dynamic);
@@ -1875,16 +2085,40 @@ int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
u32 iwl_mvm_get_lmac_id(struct iwl_mvm *mvm, enum nl80211_band band);
/* Links */
+int iwl_mvm_set_link_mapping(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf);
int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf);
int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf,
u32 changes, bool active);
+int iwl_mvm_unset_link_mapping(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf);
int iwl_mvm_remove_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf);
int iwl_mvm_disable_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf);
+void iwl_mvm_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+u8 iwl_mvm_get_primary_link(struct ieee80211_vif *vif);
+u8 iwl_mvm_get_other_link(struct ieee80211_vif *vif, u8 link_id);
+
+struct iwl_mvm_link_sel_data {
+ u8 link_id;
+ const struct cfg80211_chan_def *chandef;
+ s32 signal;
+ u16 grade;
+};
+
+#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
+unsigned int iwl_mvm_get_link_grade(struct ieee80211_bss_conf *link_conf);
+bool iwl_mvm_mld_valid_link_pair(struct ieee80211_vif *vif,
+ const struct iwl_mvm_link_sel_data *a,
+ const struct iwl_mvm_link_sel_data *b);
+
+s8 iwl_mvm_average_dbm_values(const struct iwl_umac_scan_channel_survey_notif *notif);
+#endif
+
/* AP and IBSS */
bool iwl_mvm_start_ap_ibss_common(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, int *ret);
@@ -1892,41 +2126,10 @@ void iwl_mvm_stop_ap_ibss_common(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
/* BSS Info */
-/**
- * struct iwl_mvm_bss_info_changed_ops - callbacks for the bss_info_changed()
- *
- * Since the only difference between both MLD and
- * non-MLD versions of bss_info_changed() is these function calls,
- * each version will send its specific function calls to
- * %iwl_mvm_bss_info_changed_common().
- *
- * @bss_info_changed_sta: pointer to the function that handles changes
- * in bss_info in sta mode
- * @bss_info_changed_ap_ibss: pointer to the function that handles changes
- * in bss_info in ap and ibss modes
- */
-struct iwl_mvm_bss_info_changed_ops {
- void (*bss_info_changed_sta)(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *bss_conf,
- u64 changes);
- void (*bss_info_changed_ap_ibss)(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *bss_conf,
- u64 changes);
-};
-
-void
-iwl_mvm_bss_info_changed_common(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *bss_conf,
- const struct iwl_mvm_bss_info_changed_ops *callbacks,
- u64 changes);
-void
-iwl_mvm_bss_info_changed_station_common(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *link_conf,
- u64 changes);
+void iwl_mvm_bss_info_changed_station_common(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ u64 changes);
void iwl_mvm_bss_info_changed_station_assoc(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
u64 changes);
@@ -1942,13 +2145,12 @@ void iwl_mvm_bss_info_changed_station_assoc(struct iwl_mvm *mvm,
*
* @add_aux_sta_for_hs20: pointer to the function that adds an aux sta
* for Hot Spot 2.0
- * @switch_phy_ctxt: pointer to the function that switches a vif from one
- * phy_ctx to another
+ * @link: For a P2P Device interface, pointer to a function that links the
+ * MAC/Link to the PHY context
*/
struct iwl_mvm_roc_ops {
int (*add_aux_sta_for_hs20)(struct iwl_mvm *mvm, u32 lmac_id);
- int (*switch_phy_ctxt)(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct iwl_mvm_phy_ctxt *new_phy_ctxt);
+ int (*link)(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
};
int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -1959,7 +2161,7 @@ int iwl_mvm_cancel_roc(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
/*Session Protection */
void iwl_mvm_protect_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- u32 duration_override);
+ u32 duration_override, unsigned int link_id);
/* Quota management */
static inline size_t iwl_mvm_quota_cmd_size(struct iwl_mvm *mvm)
@@ -1992,9 +2194,13 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_scan_ies *ies);
size_t iwl_mvm_scan_size(struct iwl_mvm *mvm);
int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify);
+
int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm);
void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm);
void iwl_mvm_scan_timeout_wk(struct work_struct *work);
+int iwl_mvm_int_mlo_scan(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_rx_channel_survey_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
/* Scheduled scan */
void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
@@ -2019,18 +2225,19 @@ void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
/* MVM debugfs */
#ifdef CONFIG_IWLWIFI_DEBUGFS
void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm);
-void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
-void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_vif_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+void iwl_mvm_vif_dbgfs_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_vif_dbgfs_rm_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
#else
static inline void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
{
}
static inline void
-iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+iwl_mvm_vif_dbgfs_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
}
static inline void
-iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+iwl_mvm_vif_dbgfs_rm_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
}
#endif /* CONFIG_IWLWIFI_DEBUGFS */
@@ -2087,11 +2294,22 @@ extern const struct file_operations iwl_dbgfs_d3_test_ops;
#ifdef CONFIG_PM
void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
+void iwl_mvm_fast_suspend(struct iwl_mvm *mvm);
+int iwl_mvm_fast_resume(struct iwl_mvm *mvm);
#else
static inline void
iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
}
+
+static inline void iwl_mvm_fast_suspend(struct iwl_mvm *mvm)
+{
+}
+
+static inline int iwl_mvm_fast_resume(struct iwl_mvm *mvm)
+{
+ return 0;
+}
#endif
void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta,
struct iwl_wowlan_config_cmd *cmd);
@@ -2099,7 +2317,8 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
bool disable_offloading,
bool offload_ns,
- u32 cmd_flags);
+ u32 cmd_flags,
+ u8 sta_id);
/* BT Coex */
int iwl_mvm_send_bt_init_conf(struct iwl_mvm *mvm);
@@ -2132,11 +2351,9 @@ iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
{}
#endif
int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- u32 flags);
+ struct ieee80211_vif *vif);
int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- u32 flags);
+ struct ieee80211_vif *vif);
/* SMPS */
void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum iwl_mvm_smps_type_request req_type,
@@ -2263,7 +2480,7 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
bool *changed);
struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm,
bool *changed);
-int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm);
+int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm, bool force_regd_sync);
void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm);
/* smart fifo */
@@ -2316,7 +2533,8 @@ void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm);
void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
bool sta_added);
void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
+ struct ieee80211_vif *vif,
+ unsigned int link_id);
int iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u8 oper_class,
@@ -2335,7 +2553,6 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
enum iwl_mvm_rxq_notif_type type,
bool sync,
const void *data, u32 size);
-void iwl_mvm_reorder_timer_expired(struct timer_list *t);
struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
struct ieee80211_vif *iwl_mvm_get_vif_by_macid(struct iwl_mvm *mvm, u32 macid);
bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm);
@@ -2369,12 +2586,16 @@ u64 iwl_mvm_ptp_get_adj_time(struct iwl_mvm *mvm, u64 base_time);
int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b);
int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm);
int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm);
-void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm);
+void iwl_mvm_get_bios_tables(struct iwl_mvm *mvm);
#ifdef CONFIG_IWLWIFI_DEBUGFS
void iwl_mvm_link_sta_add_debugfs(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_link_sta *link_sta,
struct dentry *dir);
+void iwl_mvm_link_add_debugfs(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct dentry *dir);
#endif
/* new MLD related APIs */
@@ -2386,6 +2607,10 @@ int iwl_mvm_sec_key_del(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *keyconf);
+int iwl_mvm_sec_key_del_pasn(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 sta_mask,
+ struct ieee80211_key_conf *keyconf);
void iwl_mvm_sec_key_remove_ap(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_mvm_vif_link_info *link,
@@ -2424,10 +2649,26 @@ static inline u8 iwl_mvm_phy_band_from_nl80211(enum nl80211_band band)
}
}
+static inline u8 iwl_mvm_nl80211_band_from_phy(u8 phy_band)
+{
+ switch (phy_band) {
+ case PHY_BAND_24:
+ return NL80211_BAND_2GHZ;
+ case PHY_BAND_5:
+ return NL80211_BAND_5GHZ;
+ case PHY_BAND_6:
+ return NL80211_BAND_6GHZ;
+ default:
+ WARN_ONCE(1, "Unsupported phy band (%u)\n", phy_band);
+ return NL80211_BAND_5GHZ;
+ }
+}
+
/* Channel Switch */
void iwl_mvm_channel_switch_disconnect_wk(struct work_struct *wk);
int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link);
/* Channel Context */
/**
@@ -2509,7 +2750,7 @@ static inline void iwl_mvm_set_chan_info(struct iwl_mvm *mvm,
static inline void
iwl_mvm_set_chan_info_chandef(struct iwl_mvm *mvm,
struct iwl_fw_channel_info *ci,
- struct cfg80211_chan_def *chandef)
+ const struct cfg80211_chan_def *chandef)
{
enum nl80211_band band = chandef->chan->band;
@@ -2580,6 +2821,13 @@ static inline void iwl_mvm_mei_set_sw_rfkill_state(struct iwl_mvm *mvm)
sw_rfkill);
}
+static inline bool iwl_mvm_has_p2p_over_aux(struct iwl_mvm *mvm)
+{
+ u32 cmd_id = WIDE_ID(MAC_CONF_GROUP, ROC_CMD);
+
+ return iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 0) >= 4;
+}
+
static inline bool iwl_mvm_mei_filter_scan(struct iwl_mvm *mvm,
struct sk_buff *skb)
{
@@ -2599,7 +2847,6 @@ static inline bool iwl_mvm_mei_filter_scan(struct iwl_mvm *mvm,
void iwl_mvm_send_roaming_forbidden_event(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
bool forbidden);
-bool iwl_mvm_is_vendor_in_approved_list(void);
/* Callbacks for ieee80211_ops */
void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
@@ -2611,10 +2858,11 @@ int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_ampdu_params *params);
int iwl_mvm_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant);
+int iwl_mvm_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant);
int iwl_mvm_mac_start(struct ieee80211_hw *hw);
void iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw,
enum ieee80211_reconfig_type reconfig_type);
-void iwl_mvm_mac_stop(struct ieee80211_hw *hw);
+void iwl_mvm_mac_stop(struct ieee80211_hw *hw, bool suspend);
static inline int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed)
{
return 0;
@@ -2682,15 +2930,14 @@ void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw,
void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx, u32 changed);
int iwl_mvm_tx_last_beacon(struct ieee80211_hw *hw);
-int iwl_mvm_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
- bool set);
void iwl_mvm_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_channel_switch *chsw);
-int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
+int iwl_mvm_pre_channel_switch(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_channel_switch *chsw);
void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf);
void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_channel_switch *chsw);
@@ -2725,4 +2972,58 @@ int iwl_mvm_set_hw_timestamp(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_set_hw_timestamp *hwts);
int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+bool iwl_mvm_enable_fils(struct iwl_mvm *mvm,
+ struct ieee80211_chanctx_conf *ctx);
+bool iwl_mvm_is_ftm_responder_chanctx(struct iwl_mvm *mvm,
+ struct ieee80211_chanctx_conf *ctx);
+
+static inline struct cfg80211_chan_def *
+iwl_mvm_chanctx_def(struct iwl_mvm *mvm, struct ieee80211_chanctx_conf *ctx)
+{
+ bool use_def = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx) ||
+ iwl_mvm_enable_fils(mvm, ctx);
+
+ return use_def ? &ctx->def : &ctx->min_def;
+}
+
+void iwl_mvm_roc_duration_and_delay(struct ieee80211_vif *vif,
+ u32 duration_ms,
+ u32 *duration_tu,
+ u32 *delay);
+int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_channel *channel,
+ struct ieee80211_vif *vif,
+ int duration, enum iwl_roc_activity activity);
+
+/* EMLSR */
+bool iwl_mvm_vif_has_esr_cap(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_block_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ enum iwl_mvm_esr_state reason,
+ u8 link_to_keep);
+int iwl_mvm_block_esr_sync(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ enum iwl_mvm_esr_state reason);
+void iwl_mvm_unblock_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ enum iwl_mvm_esr_state reason);
+void iwl_mvm_exit_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ enum iwl_mvm_esr_state reason,
+ u8 link_to_keep);
+s8 iwl_mvm_get_esr_rssi_thresh(struct iwl_mvm *mvm,
+ const struct cfg80211_chan_def *chandef,
+ bool low);
+void iwl_mvm_bt_coex_update_link_esr(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ int link_id);
+bool
+iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ s32 link_rssi,
+ bool primary);
+int iwl_mvm_esr_non_bss_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ unsigned int link_id, bool active);
+
+void
+iwl_mvm_send_ap_tx_power_constraint_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ bool is_ap);
#endif /* __IWL_MVM_H__ */
diff --git a/mvm/nvm.c b/mvm/nvm.c
index f67ab8ee18c2..836ca22597bc 100644
--- a/mvm/nvm.c
+++ b/mvm/nvm.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2019, 2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2019, 2021-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -9,8 +9,7 @@
#include "iwl-trans.h"
#include "iwl-csr.h"
#include "mvm.h"
-#include "iwl-eeprom-parse.h"
-#include "iwl-eeprom-read.h"
+#include "iwl-nvm-utils.h"
#include "iwl-nvm-parse.h"
#include "iwl-prph.h"
#include "fw/acpi.h"
@@ -220,6 +219,8 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
struct iwl_nvm_section *sections = mvm->nvm_sections;
const __be16 *hw;
const __le16 *sw, *calib, *regulatory, *mac_override, *phy_sku;
+ u8 tx_ant = mvm->fw->valid_tx_ant;
+ u8 rx_ant = mvm->fw->valid_rx_ant;
int regulatory_type;
/* Checking for required sections */
@@ -270,9 +271,15 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
(const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY_SDP].data :
(const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data;
+ if (mvm->set_tx_ant)
+ tx_ant &= mvm->set_tx_ant;
+
+ if (mvm->set_rx_ant)
+ rx_ant &= mvm->set_rx_ant;
+
return iwl_parse_nvm_data(mvm->trans, mvm->cfg, mvm->fw, hw, sw, calib,
regulatory, mac_override, phy_sku,
- mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant);
+ tx_ant, rx_ant);
}
/* Loads the NVM data stored in mvm->nvm_sections into the NIC */
@@ -565,7 +572,7 @@ int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
* try to replay the last set MCC to FW. If it doesn't exist,
* queue an update to cfg80211 to retrieve the default alpha2 from FW.
*/
- retval = iwl_mvm_init_fw_regd(mvm);
+ retval = iwl_mvm_init_fw_regd(mvm, true);
if (retval != -ENOENT)
return retval;
@@ -582,7 +589,7 @@ int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
return -EIO;
if (iwl_mvm_is_wifi_mcc_supported(mvm) &&
- !iwl_acpi_get_mcc(mvm->dev, mcc)) {
+ !iwl_bios_get_mcc(&mvm->fwrt, mcc)) {
kfree(regd);
regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc,
MCC_SOURCE_BIOS, NULL);
diff --git a/mvm/offloading.c b/mvm/offloading.c
index dfb16ca5b438..1eb21fe861e5 100644
--- a/mvm/offloading.c
+++ b/mvm/offloading.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2021-2022 Intel Corporation
+ * Copyright (C) 2012-2014, 2021-2022, 2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 Intel Deutschland GmbH
*/
@@ -30,7 +30,8 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
bool disable_offloading,
bool offload_ns,
- u32 cmd_flags)
+ u32 cmd_flags,
+ u8 sta_id)
{
union {
struct iwl_proto_offload_cmd_v1 v1;
@@ -205,6 +206,9 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
if (!disable_offloading)
common->enabled = cpu_to_le32(enabled);
+ if (ver >= 4)
+ cmd.v4.sta_id = cpu_to_le32(sta_id);
+
hcmd.len[0] = size;
return iwl_mvm_send_cmd(mvm, &hcmd);
}
diff --git a/mvm/ops.c b/mvm/ops.c
index 5336a4afde4d..b9daaffd9c7f 100644
--- a/mvm/ops.c
+++ b/mvm/ops.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -18,7 +18,7 @@
#include "iwl-modparams.h"
#include "mvm.h"
#include "iwl-phy-db.h"
-#include "iwl-eeprom-parse.h"
+#include "iwl-nvm-utils.h"
#include "iwl-csr.h"
#include "iwl-io.h"
#include "iwl-prph.h"
@@ -41,12 +41,8 @@ static const struct iwl_op_mode_ops iwl_mvm_ops_mq;
struct iwl_mvm_mod_params iwlmvm_mod_params = {
.power_scheme = IWL_POWER_SCHEME_BPS,
- /* rest of fields are 0 by default */
};
-module_param_named(init_dbg, iwlmvm_mod_params.init_dbg, bool, 0444);
-MODULE_PARM_DESC(init_dbg,
- "set to true to debug an ASSERT in INIT fw (default: false");
module_param_named(power_scheme, iwlmvm_mod_params.power_scheme, int, 0444);
MODULE_PARM_DESC(power_scheme,
"power management scheme: 1-active, 2-balanced, 3-low power, default: 2");
@@ -145,6 +141,24 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
}
+static void iwl_mvm_rx_esr_mode_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mvm_esr_mode_notif *notif = (void *)pkt->data;
+ struct ieee80211_vif *vif = iwl_mvm_get_bss_vif(mvm);
+
+ /* FW recommendations is only for entering EMLSR */
+ if (IS_ERR_OR_NULL(vif) || iwl_mvm_vif_from_mac80211(vif)->esr_active)
+ return;
+
+ if (le32_to_cpu(notif->action) == ESR_RECOMMEND_ENTER)
+ iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_FW);
+ else
+ iwl_mvm_block_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_FW,
+ iwl_mvm_get_primary_link(vif));
+}
+
static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
@@ -161,9 +175,9 @@ static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm,
if (!vif || vif->type != NL80211_IFTYPE_STATION)
return;
- if (!vif->bss_conf.chandef.chan ||
- vif->bss_conf.chandef.chan->band != NL80211_BAND_2GHZ ||
- vif->bss_conf.chandef.width < NL80211_CHAN_WIDTH_40)
+ if (!vif->bss_conf.chanreq.oper.chan ||
+ vif->bss_conf.chanreq.oper.chan->band != NL80211_BAND_2GHZ ||
+ vif->bss_conf.chanreq.oper.width < NL80211_CHAN_WIDTH_40)
return;
if (!vif->cfg.assoc)
@@ -219,7 +233,7 @@ void iwl_mvm_update_link_smps(struct ieee80211_vif *vif,
return;
if (mvm->fw_static_smps_request &&
- link_conf->chandef.width == NL80211_CHAN_WIDTH_160 &&
+ link_conf->chanreq.oper.width == NL80211_CHAN_WIDTH_160 &&
link_conf->he_support)
mode = IEEE80211_SMPS_STATIC;
@@ -259,7 +273,7 @@ static void iwl_mvm_rx_thermal_dual_chain_req(struct iwl_mvm *mvm,
}
/**
- * enum iwl_rx_handler_context context for Rx handler
+ * enum iwl_rx_handler_context: context for Rx handler
* @RX_HANDLER_SYNC : this means that it will be called in the Rx path
* which can't acquire mvm->mutex.
* @RX_HANDLER_ASYNC_LOCKED : If the handler needs to hold mvm->mutex
@@ -267,15 +281,19 @@ static void iwl_mvm_rx_thermal_dual_chain_req(struct iwl_mvm *mvm,
* it will be called from a worker with mvm->mutex held.
* @RX_HANDLER_ASYNC_UNLOCKED : in case the handler needs to lock the
* mutex itself, it will be called from a worker without mvm->mutex held.
+ * @RX_HANDLER_ASYNC_LOCKED_WIPHY: If the handler needs to hold the wiphy lock
+ * and mvm->mutex. Will be handled with the wiphy_work queue infra
+ * instead of regular work queue.
*/
enum iwl_rx_handler_context {
RX_HANDLER_SYNC,
RX_HANDLER_ASYNC_LOCKED,
RX_HANDLER_ASYNC_UNLOCKED,
+ RX_HANDLER_ASYNC_LOCKED_WIPHY,
};
/**
- * struct iwl_rx_handlers handler for FW notification
+ * struct iwl_rx_handlers: handler for FW notification
* @cmd_id: command id
* @min_size: minimum size to expect for the notification
* @context: see &iwl_rx_handler_context
@@ -316,12 +334,26 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
struct iwl_tlc_update_notif),
RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif,
- RX_HANDLER_ASYNC_LOCKED, struct iwl_bt_coex_profile_notif),
+ RX_HANDLER_ASYNC_LOCKED_WIPHY,
+ struct iwl_bt_coex_profile_notif),
RX_HANDLER_NO_SIZE(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif,
RX_HANDLER_ASYNC_LOCKED),
RX_HANDLER_NO_SIZE(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics,
RX_HANDLER_ASYNC_LOCKED),
+ RX_HANDLER_GRP(STATISTICS_GROUP, STATISTICS_OPER_NOTIF,
+ iwl_mvm_handle_rx_system_oper_stats,
+ RX_HANDLER_ASYNC_LOCKED_WIPHY,
+ struct iwl_system_statistics_notif_oper),
+ RX_HANDLER_GRP(STATISTICS_GROUP, STATISTICS_OPER_PART1_NOTIF,
+ iwl_mvm_handle_rx_system_oper_part1_stats,
+ RX_HANDLER_ASYNC_LOCKED,
+ struct iwl_system_statistics_part1_notif_oper),
+ RX_HANDLER_GRP(SYSTEM_GROUP, SYSTEM_STATISTICS_END_NOTIF,
+ iwl_mvm_handle_rx_system_end_stats_notif,
+ RX_HANDLER_ASYNC_LOCKED,
+ struct iwl_system_statistics_end_notif),
+
RX_HANDLER(BA_WINDOW_STATUS_NOTIFICATION_ID,
iwl_mvm_window_status_notif, RX_HANDLER_SYNC,
struct iwl_ba_window_status_notif),
@@ -347,13 +379,15 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
iwl_mvm_rx_scan_match_found,
RX_HANDLER_SYNC),
RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif,
- RX_HANDLER_ASYNC_LOCKED, struct iwl_umac_scan_complete),
+ RX_HANDLER_ASYNC_LOCKED,
+ struct iwl_umac_scan_complete),
RX_HANDLER(SCAN_ITERATION_COMPLETE_UMAC,
iwl_mvm_rx_umac_scan_iter_complete_notif, RX_HANDLER_SYNC,
struct iwl_umac_scan_iter_complete_notif),
RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif,
- RX_HANDLER_SYNC, struct iwl_missed_beacons_notif),
+ RX_HANDLER_ASYNC_LOCKED_WIPHY,
+ struct iwl_missed_beacons_notif),
RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, RX_HANDLER_SYNC,
struct iwl_error_resp),
@@ -405,6 +439,12 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
iwl_mvm_channel_switch_error_notif,
RX_HANDLER_ASYNC_UNLOCKED,
struct iwl_channel_switch_error_notif),
+
+ RX_HANDLER_GRP(DATA_PATH_GROUP, ESR_MODE_NOTIF,
+ iwl_mvm_rx_esr_mode_notif,
+ RX_HANDLER_ASYNC_LOCKED_WIPHY,
+ struct iwl_mvm_esr_mode_notif),
+
RX_HANDLER_GRP(DATA_PATH_GROUP, MONITOR_NOTIF,
iwl_mvm_rx_monitor_notif, RX_HANDLER_ASYNC_LOCKED,
struct iwl_datapath_monitor_notif),
@@ -426,6 +466,12 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
WNM_80211V_TIMING_MEASUREMENT_CONFIRM_NOTIFICATION,
iwl_mvm_time_sync_msmt_confirm_event, RX_HANDLER_SYNC,
struct iwl_time_msmt_cfm_notify),
+ RX_HANDLER_GRP(MAC_CONF_GROUP, ROC_NOTIF,
+ iwl_mvm_rx_roc_notif, RX_HANDLER_ASYNC_LOCKED,
+ struct iwl_roc_notif),
+ RX_HANDLER_GRP(SCAN_GROUP, CHANNEL_SURVEY_NOTIF,
+ iwl_mvm_rx_channel_survey_notif, RX_HANDLER_ASYNC_LOCKED,
+ struct iwl_umac_scan_channel_survey_notif),
};
#undef RX_HANDLER
#undef RX_HANDLER_GRP
@@ -522,6 +568,7 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
HCMD_NAME(D0I3_END_CMD),
HCMD_NAME(LTR_CONFIG),
HCMD_NAME(LDBG_CONFIG_CMD),
+ HCMD_NAME(DEBUG_LOG_MSG),
};
/* Please keep this array *SORTED* by hex value.
@@ -529,11 +576,14 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
*/
static const struct iwl_hcmd_names iwl_mvm_system_names[] = {
HCMD_NAME(SHARED_MEM_CFG_CMD),
+ HCMD_NAME(SOC_CONFIGURATION_CMD),
HCMD_NAME(INIT_EXTENDED_CFG_CMD),
HCMD_NAME(FW_ERROR_RECOVERY_CMD),
HCMD_NAME(RFI_CONFIG_CMD),
HCMD_NAME(RFI_GET_FREQ_TABLE_CMD),
HCMD_NAME(SYSTEM_FEATURES_CONTROL_CMD),
+ HCMD_NAME(SYSTEM_STATISTICS_CMD),
+ HCMD_NAME(SYSTEM_STATISTICS_END_NOTIF),
HCMD_NAME(RFI_DEACTIVATE_NOTIF),
};
@@ -541,15 +591,22 @@ static const struct iwl_hcmd_names iwl_mvm_system_names[] = {
* Access is done through binary search
*/
static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = {
+ HCMD_NAME(LOW_LATENCY_CMD),
HCMD_NAME(CHANNEL_SWITCH_TIME_EVENT_CMD),
HCMD_NAME(SESSION_PROTECTION_CMD),
+ HCMD_NAME(CANCEL_CHANNEL_SWITCH_CMD),
HCMD_NAME(MAC_CONFIG_CMD),
HCMD_NAME(LINK_CONFIG_CMD),
HCMD_NAME(STA_CONFIG_CMD),
HCMD_NAME(AUX_STA_CMD),
HCMD_NAME(STA_REMOVE_CMD),
HCMD_NAME(STA_DISABLE_TX_CMD),
+ HCMD_NAME(ROC_CMD),
+ HCMD_NAME(ROC_NOTIF),
+ HCMD_NAME(CHANNEL_SWITCH_ERROR_NOTIF),
+ HCMD_NAME(MISSED_VAP_NOTIF),
HCMD_NAME(SESSION_PROTECTION_NOTIF),
+ HCMD_NAME(PROBE_RESPONSE_DATA_NOTIF),
HCMD_NAME(CHANNEL_SWITCH_START_NOTIF),
};
@@ -561,6 +618,7 @@ static const struct iwl_hcmd_names iwl_mvm_phy_names[] = {
HCMD_NAME(CTDP_CONFIG_CMD),
HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD),
HCMD_NAME(PER_CHAIN_LIMIT_OFFSET_CMD),
+ HCMD_NAME(AP_TX_POWER_CONSTRAINTS_CMD),
HCMD_NAME(CT_KILL_NOTIFICATION),
HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE),
};
@@ -572,6 +630,8 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
HCMD_NAME(DQA_ENABLE_CMD),
HCMD_NAME(UPDATE_MU_GROUPS_CMD),
HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD),
+ HCMD_NAME(WNM_PLATFORM_PTM_REQUEST_CMD),
+ HCMD_NAME(WNM_80211V_TIMING_MEASUREMENT_CONFIG_CMD),
HCMD_NAME(STA_HE_CTXT_CMD),
HCMD_NAME(RLC_CONFIG_CMD),
HCMD_NAME(RFH_QUEUE_CONFIG_CMD),
@@ -579,6 +639,7 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
HCMD_NAME(CHEST_COLLECTOR_FILTER_CONFIG_CMD),
HCMD_NAME(SCD_QUEUE_CONFIG_CMD),
HCMD_NAME(SEC_KEY_CMD),
+ HCMD_NAME(ESR_MODE_NOTIF),
HCMD_NAME(MONITOR_NOTIF),
HCMD_NAME(THERMAL_DUAL_CHAIN_REQUEST),
HCMD_NAME(STA_PM_NOTIF),
@@ -589,7 +650,31 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
/* Please keep this array *SORTED* by hex value.
* Access is done through binary search
*/
+static const struct iwl_hcmd_names iwl_mvm_statistics_names[] = {
+ HCMD_NAME(STATISTICS_OPER_NOTIF),
+ HCMD_NAME(STATISTICS_OPER_PART1_NOTIF),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mvm_debug_names[] = {
+ HCMD_NAME(LMAC_RD_WR),
+ HCMD_NAME(UMAC_RD_WR),
+ HCMD_NAME(HOST_EVENT_CFG),
+ HCMD_NAME(DBGC_SUSPEND_RESUME),
+ HCMD_NAME(BUFFER_ALLOCATION),
+ HCMD_NAME(GET_TAS_STATUS),
+ HCMD_NAME(FW_DUMP_COMPLETE_CMD),
+ HCMD_NAME(FW_CLEAR_BUFFER),
+ HCMD_NAME(MFU_ASSERT_DUMP_NTF),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
static const struct iwl_hcmd_names iwl_mvm_scan_names[] = {
+ HCMD_NAME(CHANNEL_SURVEY_NOTIF),
HCMD_NAME(OFFLOAD_MATCH_INFO_NOTIF),
};
@@ -640,10 +725,14 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
[PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names),
[REGULATORY_AND_NVM_GROUP] =
HCMD_ARR(iwl_mvm_regulatory_and_nvm_names),
+ [DEBUG_GROUP] = HCMD_ARR(iwl_mvm_debug_names),
+ [STATISTICS_GROUP] = HCMD_ARR(iwl_mvm_statistics_names),
};
/* this forward declaration can avoid to export the function */
static void iwl_mvm_async_handlers_wk(struct work_struct *wk);
+static void iwl_mvm_async_handlers_wiphy_wk(struct wiphy *wiphy,
+ struct wiphy_work *work);
static u32 iwl_mvm_min_backoff(struct iwl_mvm *mvm)
{
@@ -653,7 +742,7 @@ static u32 iwl_mvm_min_backoff(struct iwl_mvm *mvm)
if (!backoff)
return 0;
- dflt_pwr_limit = iwl_acpi_get_pwr_limit(mvm->dev);
+ iwl_bios_get_pwr_limit(&mvm->fwrt, &dflt_pwr_limit);
while (backoff->pwr) {
if (dflt_pwr_limit >= backoff->pwr)
@@ -672,20 +761,18 @@ static void iwl_mvm_tx_unblock_dwork(struct work_struct *work)
struct ieee80211_vif *tx_blocked_vif;
struct iwl_mvm_vif *mvmvif;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
tx_blocked_vif =
rcu_dereference_protected(mvm->csa_tx_blocked_vif,
lockdep_is_held(&mvm->mutex));
if (!tx_blocked_vif)
- goto unlock;
+ return;
mvmvif = iwl_mvm_vif_from_mac80211(tx_blocked_vif);
iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false);
RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
-unlock:
- mutex_unlock(&mvm->mutex);
}
static void iwl_mvm_fwrt_dump_start(void *ctx)
@@ -702,21 +789,12 @@ static void iwl_mvm_fwrt_dump_end(void *ctx)
mutex_unlock(&mvm->mutex);
}
-static bool iwl_mvm_fwrt_fw_running(void *ctx)
-{
- return iwl_mvm_firmware_running(ctx);
-}
-
static int iwl_mvm_fwrt_send_hcmd(void *ctx, struct iwl_host_cmd *host_cmd)
{
struct iwl_mvm *mvm = (struct iwl_mvm *)ctx;
- int ret;
- mutex_lock(&mvm->mutex);
- ret = iwl_mvm_send_cmd(mvm, host_cmd);
- mutex_unlock(&mvm->mutex);
-
- return ret;
+ guard(mvm)(mvm);
+ return iwl_mvm_send_cmd(mvm, host_cmd);
}
static bool iwl_mvm_d3_debug_enable(void *ctx)
@@ -727,7 +805,6 @@ static bool iwl_mvm_d3_debug_enable(void *ctx)
static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = {
.dump_start = iwl_mvm_fwrt_dump_start,
.dump_end = iwl_mvm_fwrt_dump_end,
- .fw_running = iwl_mvm_fwrt_fw_running,
.send_hcmd = iwl_mvm_fwrt_send_hcmd,
.d3_debug_enable = iwl_mvm_d3_debug_enable,
};
@@ -751,7 +828,10 @@ static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm)
*/
mvm->nvm_data =
iwl_parse_mei_nvm_data(trans, trans->cfg,
- mvm->mei_nvm_data, mvm->fw);
+ mvm->mei_nvm_data,
+ mvm->fw,
+ mvm->set_tx_ant,
+ mvm->set_rx_ant);
return 0;
}
@@ -780,8 +860,7 @@ get_nvm_from_fw:
ret = iwl_mvm_init_mcc(mvm);
}
- if (!iwlmvm_mod_params.init_dbg || !ret)
- iwl_mvm_stop_device(mvm);
+ iwl_mvm_stop_device(mvm);
mutex_unlock(&mvm->mutex);
wiphy_unlock(mvm->hw->wiphy);
@@ -790,6 +869,9 @@ get_nvm_from_fw:
if (ret)
IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
+ /* no longer need this regardless of failure or not */
+ mvm->fw_product_reset = false;
+
return ret;
}
@@ -1101,6 +1183,29 @@ static const struct iwl_mei_ops mei_ops = {
.nic_stolen = iwl_mvm_mei_nic_stolen,
};
+static void iwl_mvm_find_link_selection_vif(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (ieee80211_vif_is_mld(vif) && mvmvif->authorized)
+ iwl_mvm_select_links(mvmvif->mvm, vif);
+}
+
+static void iwl_mvm_trig_link_selection(struct wiphy *wiphy,
+ struct wiphy_work *wk)
+{
+ struct iwl_mvm *mvm =
+ container_of(wk, struct iwl_mvm, trig_link_selection_wk);
+
+ mutex_lock(&mvm->mutex);
+ ieee80211_iterate_active_interfaces(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_find_link_selection_vif,
+ NULL);
+ mutex_unlock(&mvm->mutex);
+}
+
static struct iwl_op_mode *
iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
const struct iwl_fw *fw, struct dentry *dbgfs_dir)
@@ -1136,7 +1241,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
return NULL;
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
- max_agg = IEEE80211_MAX_AMPDU_BUF_EHT;
+ max_agg = 512;
else
max_agg = IEEE80211_MAX_AMPDU_BUF_HE;
@@ -1159,7 +1264,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm,
&iwl_mvm_sanitize_ops, mvm, dbgfs_dir);
- iwl_mvm_get_acpi_tables(mvm);
+ iwl_mvm_get_bios_tables(mvm);
iwl_uefi_get_sgom_table(trans, &mvm->fwrt);
iwl_uefi_get_step_table(trans);
@@ -1230,6 +1335,12 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_LIST_HEAD(&mvm->add_stream_txqs);
spin_lock_init(&mvm->add_stream_lock);
+ wiphy_work_init(&mvm->async_handlers_wiphy_wk,
+ iwl_mvm_async_handlers_wiphy_wk);
+
+ wiphy_work_init(&mvm->trig_link_selection_wk,
+ iwl_mvm_trig_link_selection);
+
init_waitqueue_head(&mvm->rx_sync_waitq);
mvm->queue_sync_state = 0;
@@ -1259,24 +1370,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
- switch (iwlwifi_mod_params.amsdu_size) {
- case IWL_AMSDU_DEF:
- trans_cfg.rx_buf_size = IWL_AMSDU_4K;
- break;
- case IWL_AMSDU_4K:
- trans_cfg.rx_buf_size = IWL_AMSDU_4K;
- break;
- case IWL_AMSDU_8K:
- trans_cfg.rx_buf_size = IWL_AMSDU_8K;
- break;
- case IWL_AMSDU_12K:
- trans_cfg.rx_buf_size = IWL_AMSDU_12K;
- break;
- default:
- pr_err("%s: Unsupported amsdu_size: %d\n", KBUILD_MODNAME,
- iwlwifi_mod_params.amsdu_size);
- trans_cfg.rx_buf_size = IWL_AMSDU_4K;
- }
+ trans_cfg.rx_buf_size = iwl_amsdu_size_to_rxb_size();
trans->wide_cmd_header = true;
trans_cfg.bc_table_dword =
@@ -1298,7 +1392,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
snprintf(mvm->hw->wiphy->fw_version,
sizeof(mvm->hw->wiphy->fw_version),
- "%s", fw->fw_version);
+ "%.31s", fw->fw_version);
trans_cfg.fw_reset_handshake = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE);
@@ -1336,9 +1430,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
goto out_free;
}
- IWL_INFO(mvm, "Detected %s, REV=0x%X\n",
- mvm->trans->name, mvm->trans->hw_rev);
-
if (iwlwifi_mod_params.nvm_file)
mvm->nvm_file_name = iwlwifi_mod_params.nvm_file;
else
@@ -1406,8 +1497,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_fw_flush_dumps(&mvm->fwrt);
iwl_fw_runtime_free(&mvm->fwrt);
- if (iwlmvm_mod_params.init_dbg)
- return op_mode;
iwl_phy_db_free(mvm->phy_db);
kfree(mvm->scan_cmd);
iwl_trans_op_mode_leave(trans);
@@ -1424,6 +1513,8 @@ void iwl_mvm_stop_device(struct iwl_mvm *mvm)
clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
+ iwl_mvm_pause_tcm(mvm, false);
+
iwl_fw_dbg_stop_sync(&mvm->fwrt);
iwl_trans_stop_device(mvm->trans);
iwl_free_fw_paging(&mvm->fwrt);
@@ -1484,6 +1575,7 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
kfree(mvm->temp_nvm_data);
for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
kfree(mvm->nvm_sections[i].data);
+ kfree(mvm->acs_survey);
cancel_delayed_work_sync(&mvm->tcm.work);
@@ -1516,35 +1608,62 @@ void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm)
spin_unlock_bh(&mvm->async_handlers_lock);
}
-static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
+/*
+ * This function receives a bitmap of rx async handler contexts
+ * (&iwl_rx_handler_context) to handle, and runs only them
+ */
+static void iwl_mvm_async_handlers_by_context(struct iwl_mvm *mvm,
+ u8 contexts)
{
- struct iwl_mvm *mvm =
- container_of(wk, struct iwl_mvm, async_handlers_wk);
struct iwl_async_handler_entry *entry, *tmp;
LIST_HEAD(local_list);
- /* Ensure that we are not in stop flow (check iwl_mvm_mac_stop) */
-
/*
- * Sync with Rx path with a lock. Remove all the entries from this list,
- * add them to a local one (lock free), and then handle them.
+ * Sync with Rx path with a lock. Remove all the entries of the
+ * wanted contexts from this list, add them to a local one (lock free),
+ * and then handle them.
*/
spin_lock_bh(&mvm->async_handlers_lock);
- list_splice_init(&mvm->async_handlers_list, &local_list);
+ list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) {
+ if (!(BIT(entry->context) & contexts))
+ continue;
+ list_del(&entry->list);
+ list_add_tail(&entry->list, &local_list);
+ }
spin_unlock_bh(&mvm->async_handlers_lock);
list_for_each_entry_safe(entry, tmp, &local_list, list) {
- if (entry->context == RX_HANDLER_ASYNC_LOCKED)
+ if (entry->context != RX_HANDLER_ASYNC_UNLOCKED)
mutex_lock(&mvm->mutex);
entry->fn(mvm, &entry->rxb);
iwl_free_rxb(&entry->rxb);
list_del(&entry->list);
- if (entry->context == RX_HANDLER_ASYNC_LOCKED)
+ if (entry->context != RX_HANDLER_ASYNC_UNLOCKED)
mutex_unlock(&mvm->mutex);
kfree(entry);
}
}
+static void iwl_mvm_async_handlers_wiphy_wk(struct wiphy *wiphy,
+ struct wiphy_work *wk)
+{
+ struct iwl_mvm *mvm =
+ container_of(wk, struct iwl_mvm, async_handlers_wiphy_wk);
+ u8 contexts = BIT(RX_HANDLER_ASYNC_LOCKED_WIPHY);
+
+ iwl_mvm_async_handlers_by_context(mvm, contexts);
+}
+
+static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
+{
+ struct iwl_mvm *mvm =
+ container_of(wk, struct iwl_mvm, async_handlers_wk);
+ u8 contexts = BIT(RX_HANDLER_ASYNC_LOCKED) |
+ BIT(RX_HANDLER_ASYNC_UNLOCKED);
+
+ iwl_mvm_async_handlers_by_context(mvm, contexts);
+}
+
static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{
@@ -1624,7 +1743,11 @@ static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
spin_lock(&mvm->async_handlers_lock);
list_add_tail(&entry->list, &mvm->async_handlers_list);
spin_unlock(&mvm->async_handlers_lock);
- schedule_work(&mvm->async_handlers_wk);
+ if (rx_h->context == RX_HANDLER_ASYNC_LOCKED_WIPHY)
+ wiphy_work_queue(mvm->hw->wiphy,
+ &mvm->async_handlers_wiphy_wk);
+ else
+ schedule_work(&mvm->async_handlers_wk);
break;
}
}
@@ -1668,18 +1791,6 @@ void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
iwl_mvm_rx_common(mvm, rxb, pkt);
}
-static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode,
- const struct iwl_device_cmd *cmd)
-{
- struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
-
- /*
- * For now, we only set the CMD_WANT_ASYNC_CALLBACK for ADD_STA
- * commands that need to block the Tx queues.
- */
- iwl_trans_block_txq_ptrs(mvm->trans, false);
-}
-
static int iwl_mvm_is_static_queue(struct iwl_mvm *mvm, int queue)
{
return queue == mvm->aux_queue || queue == mvm->probe_queue ||
@@ -1765,12 +1876,8 @@ static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
static void iwl_mvm_set_rfkill_state(struct iwl_mvm *mvm)
{
- bool state = iwl_mvm_is_radio_killed(mvm);
-
- if (state)
- wake_up(&mvm->rx_sync_waitq);
-
- wiphy_rfkill_set_hw_state(mvm->hw->wiphy, state);
+ wiphy_rfkill_set_hw_state(mvm->hw->wiphy,
+ iwl_mvm_is_radio_killed(mvm));
}
void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
@@ -1932,7 +2039,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
ieee80211_restart_hw(mvm->hw);
} else if (mvm->fwrt.trans->dbg.restart_required) {
IWL_DEBUG_INFO(mvm, "FW restart requested after debug collection\n");
- mvm->fwrt.trans->dbg.restart_required = FALSE;
+ mvm->fwrt.trans->dbg.restart_required = false;
ieee80211_restart_hw(mvm->hw);
} else if (mvm->trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_8000) {
ieee80211_restart_hw(mvm->hw);
@@ -1944,9 +2051,6 @@ static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode, bool sync)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
- if (mvm->pldr_sync)
- return;
-
if (!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status) &&
!test_and_clear_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE,
&mvm->status))
@@ -1990,9 +2094,22 @@ static void iwl_op_mode_mvm_time_point(struct iwl_op_mode *op_mode,
iwl_dbg_tlv_time_point(&mvm->fwrt, tp_id, tp_data);
}
+static void iwl_op_mode_mvm_device_powered_off(struct iwl_op_mode *op_mode)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+ mutex_lock(&mvm->mutex);
+ clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
+ mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
+ iwl_mvm_stop_device(mvm);
+#ifdef CONFIG_PM
+ mvm->fast_resume = false;
+#endif
+ mutex_unlock(&mvm->mutex);
+}
+
#define IWL_MVM_COMMON_OPS \
/* these could be differentiated */ \
- .async_cb = iwl_mvm_async_cb, \
.queue_full = iwl_mvm_stop_sw_queue, \
.queue_not_full = iwl_mvm_wake_sw_queue, \
.hw_rf_kill = iwl_mvm_set_hw_rfkill_state, \
@@ -2003,7 +2120,8 @@ static void iwl_op_mode_mvm_time_point(struct iwl_op_mode *op_mode,
/* as we only register one, these MUST be common! */ \
.start = iwl_op_mode_mvm_start, \
.stop = iwl_op_mode_mvm_stop, \
- .time_point = iwl_op_mode_mvm_time_point
+ .time_point = iwl_op_mode_mvm_time_point, \
+ .device_powered_off = iwl_op_mode_mvm_device_powered_off
static const struct iwl_op_mode_ops iwl_mvm_ops = {
IWL_MVM_COMMON_OPS,
diff --git a/mvm/phy-ctxt.c b/mvm/phy-ctxt.c
index a5b432bc9e2f..ce264b386029 100644
--- a/mvm/phy-ctxt.c
+++ b/mvm/phy-ctxt.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2017 Intel Deutschland GmbH
*/
@@ -9,7 +9,7 @@
#include "mvm.h"
/* Maps the driver specific channel width definition to the fw values */
-u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef)
+u8 iwl_mvm_get_channel_width(const struct cfg80211_chan_def *chandef)
{
switch (chandef->width) {
case NL80211_CHAN_WIDTH_20_NOHT:
@@ -33,7 +33,7 @@ u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef)
* Maps the driver specific control channel position (relative to the center
* freq) definitions to the the fw values
*/
-u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef)
+u8 iwl_mvm_get_ctrl_pos(const struct cfg80211_chan_def *chandef)
{
int offs = chandef->chan->center_freq - chandef->center_freq1;
int abs_offs = abs(offs);
@@ -99,17 +99,6 @@ static void iwl_mvm_phy_ctxt_set_rxchain(struct iwl_mvm *mvm,
active_cnt = 2;
}
- /*
- * If the firmware requested it, then we know that it supports
- * getting zero for the values to indicate "use one, but pick
- * which one yourself", which means it can dynamically pick one
- * that e.g. has better RSSI.
- */
- if (mvm->fw_static_smps_request && active_cnt == 1 && idle_cnt == 1) {
- idle_cnt = 0;
- active_cnt = 0;
- }
-
*rxchain_info = cpu_to_le32(iwl_mvm_get_valid_rx_ant(mvm) <<
PHY_RX_CHAIN_VALID_POS);
*rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
@@ -127,7 +116,7 @@ static void iwl_mvm_phy_ctxt_set_rxchain(struct iwl_mvm *mvm,
static void iwl_mvm_phy_ctxt_cmd_data_v1(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *ctxt,
struct iwl_phy_context_cmd_v1 *cmd,
- struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *chandef,
u8 chains_static, u8 chains_dynamic)
{
struct iwl_phy_context_cmd_tail *tail =
@@ -148,7 +137,7 @@ static void iwl_mvm_phy_ctxt_cmd_data_v1(struct iwl_mvm *mvm,
static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *ctxt,
struct iwl_phy_context_cmd *cmd,
- struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *chandef,
u8 chains_static, u8 chains_dynamic)
{
cmd->lmac_id = cpu_to_le32(iwl_mvm_get_lmac_id(mvm,
@@ -192,6 +181,9 @@ int iwl_mvm_phy_send_rlc(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &cmd.rlc.rx_chain_info,
chains_static, chains_dynamic);
+ IWL_DEBUG_FW(mvm, "Send RLC command: phy=%d, rx_chain_info=0x%x\n",
+ ctxt->id, cmd.rlc.rx_chain_info);
+
return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(RLC_CONFIG_CMD,
DATA_PATH_GROUP, 2),
0, sizeof(cmd), &cmd);
@@ -205,14 +197,18 @@ int iwl_mvm_phy_send_rlc(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
*/
static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *ctxt,
- struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *ap,
u8 chains_static, u8 chains_dynamic,
u32 action)
{
int ret;
int ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_CONTEXT_CMD, 1);
- if (ver == 3 || ver == 4) {
+ if (ver < 5 || !ap || !ap->chan)
+ ap = NULL;
+
+ if (ver >= 3 && ver <= 6) {
struct iwl_phy_context_cmd cmd = {};
/* Set the command header fields */
@@ -223,6 +219,14 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
chains_static,
chains_dynamic);
+ if (ap) {
+ cmd.sbb_bandwidth = iwl_mvm_get_channel_width(ap);
+ cmd.sbb_ctrl_channel_loc = iwl_mvm_get_ctrl_pos(ap);
+ }
+
+ if (ver == 6)
+ cmd.puncture_mask = cpu_to_le16(chandef->punctured);
+
ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD,
0, sizeof(cmd), &cmd);
} else if (ver < 3) {
@@ -262,9 +266,12 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
* Send a command to add a PHY context based on the current HW configuration.
*/
int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
- struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *ap,
u8 chains_static, u8 chains_dynamic)
{
+ int ret;
+
WARN_ON(!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
ctxt->ref);
lockdep_assert_held(&mvm->mutex);
@@ -273,9 +280,16 @@ int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
ctxt->width = chandef->width;
ctxt->center_freq1 = chandef->center_freq1;
- return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
- chains_static, chains_dynamic,
- FW_CTXT_ACTION_ADD);
+ ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, ap,
+ chains_static, chains_dynamic,
+ FW_CTXT_ACTION_ADD);
+
+ if (ret)
+ return ret;
+
+ ctxt->ref++;
+
+ return 0;
}
/*
@@ -285,6 +299,11 @@ int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
{
lockdep_assert_held(&mvm->mutex);
+
+ /* If we were taking the first ref, we should have
+ * called iwl_mvm_phy_ctxt_add.
+ */
+ WARN_ON(!ctxt->ref);
ctxt->ref++;
}
@@ -294,14 +313,19 @@ void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
* changed.
*/
int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
- struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *ap,
u8 chains_static, u8 chains_dynamic)
{
enum iwl_ctxt_action action = FW_CTXT_ACTION_MODIFY;
lockdep_assert_held(&mvm->mutex);
- if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP, RLC_CONFIG_CMD), 0) >= 2 &&
+ if (WARN_ON_ONCE(!ctxt->ref))
+ return -EINVAL;
+
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP,
+ RLC_CONFIG_CMD), 0) >= 2 &&
ctxt->channel == chandef->chan &&
ctxt->width == chandef->width &&
ctxt->center_freq1 == chandef->center_freq1)
@@ -314,7 +338,7 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
int ret;
/* ... remove it here ...*/
- ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
+ ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, NULL,
chains_static, chains_dynamic,
FW_CTXT_ACTION_REMOVE);
if (ret)
@@ -328,13 +352,14 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
ctxt->width = chandef->width;
ctxt->center_freq1 = chandef->center_freq1;
- return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
+ return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, ap,
chains_static, chains_dynamic,
action);
}
void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
{
+ struct cfg80211_chan_def chandef;
lockdep_assert_held(&mvm->mutex);
if (WARN_ON_ONCE(!ctxt))
@@ -342,41 +367,13 @@ void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
ctxt->ref--;
- /*
- * Move unused phy's to a default channel. When the phy is moved the,
- * fw will cleanup immediate quiet bit if it was previously set,
- * otherwise we might not be able to reuse this phy.
- */
- if (ctxt->ref == 0) {
- struct ieee80211_channel *chan = NULL;
- struct cfg80211_chan_def chandef;
- struct ieee80211_supported_band *sband;
- enum nl80211_band band;
- int channel;
-
- for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
- sband = mvm->hw->wiphy->bands[band];
-
- if (!sband)
- continue;
-
- for (channel = 0; channel < sband->n_channels; channel++)
- if (!(sband->channels[channel].flags &
- IEEE80211_CHAN_DISABLED)) {
- chan = &sband->channels[channel];
- break;
- }
-
- if (chan)
- break;
- }
+ if (ctxt->ref)
+ return;
- if (WARN_ON(!chan))
- return;
+ cfg80211_chandef_create(&chandef, ctxt->channel, NL80211_CHAN_NO_HT);
- cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
- iwl_mvm_phy_ctxt_changed(mvm, ctxt, &chandef, 1, 1);
- }
+ iwl_mvm_phy_ctxt_apply(mvm, ctxt, &chandef, NULL, 1, 1,
+ FW_CTXT_ACTION_REMOVE);
}
static void iwl_mvm_binding_iterator(void *_data, u8 *mac,
diff --git a/mvm/power.c b/mvm/power.c
index 9131b5f1bc76..bc363e8427e4 100644
--- a/mvm/power.c
+++ b/mvm/power.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2019, 2021-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2019, 2021-2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -20,8 +20,7 @@
static
int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
- struct iwl_beacon_filter_cmd *cmd,
- u32 flags)
+ struct iwl_beacon_filter_cmd *cmd)
{
u16 len;
@@ -62,7 +61,7 @@ int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
len = offsetof(struct iwl_beacon_filter_cmd,
bf_threshold_absolute_low);
- return iwl_mvm_send_cmd_pdu(mvm, REPLY_BEACON_FILTERING_CMD, flags,
+ return iwl_mvm_send_cmd_pdu(mvm, REPLY_BEACON_FILTERING_CMD, 0,
len, cmd);
}
@@ -80,7 +79,7 @@ void iwl_mvm_beacon_filter_set_cqm_params(struct iwl_mvm *mvm,
cmd->bf_roaming_state =
cpu_to_le32(-vif->bss_conf.cqm_rssi_thold);
}
- cmd->ba_enable_beacon_abort = cpu_to_le32(mvmvif->bf_data.ba_enabled);
+ cmd->ba_enable_beacon_abort = cpu_to_le32(mvmvif->ba_enabled);
}
static void iwl_mvm_power_log(struct iwl_mvm *mvm,
@@ -212,19 +211,37 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
IWL_MVM_PS_HEAVY_RX_THLD_PERCENT;
}
-static void iwl_mvm_p2p_standalone_iterator(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
+struct iwl_allow_uapsd_iface_iterator_data {
+ struct ieee80211_vif *current_vif;
+ bool allow_uapsd;
+};
+
+static void iwl_mvm_allow_uapsd_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
{
- bool *is_p2p_standalone = _data;
+ struct iwl_allow_uapsd_iface_iterator_data *data = _data;
+ struct iwl_mvm_vif *other_mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_vif *curr_mvmvif =
+ iwl_mvm_vif_from_mac80211(data->current_vif);
- switch (ieee80211_vif_type_p2p(vif)) {
- case NL80211_IFTYPE_P2P_GO:
+ /* exclude the given vif */
+ if (vif == data->current_vif)
+ return;
+
+ switch (vif->type) {
case NL80211_IFTYPE_AP:
- *is_p2p_standalone = false;
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_NAN:
+ data->allow_uapsd = false;
break;
case NL80211_IFTYPE_STATION:
- if (vif->cfg.assoc)
- *is_p2p_standalone = false;
+ /* allow UAPSD if P2P interface and BSS station interface share
+ * the same channel.
+ */
+ if (vif->cfg.assoc && other_mvmvif->deflink.phy_ctxt &&
+ curr_mvmvif->deflink.phy_ctxt &&
+ other_mvmvif->deflink.phy_ctxt->id != curr_mvmvif->deflink.phy_ctxt->id)
+ data->allow_uapsd = false;
break;
default:
@@ -236,6 +253,10 @@ static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_allow_uapsd_iface_iterator_data data = {
+ .current_vif = vif,
+ .allow_uapsd = true,
+ };
if (ether_addr_equal(mvmvif->uapsd_misbehaving_ap_addr,
vif->cfg.ap_addr))
@@ -250,88 +271,75 @@ static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm,
IEEE80211_P2P_OPPPS_ENABLE_BIT))
return false;
- /*
- * Avoid using uAPSD if client is in DCM -
- * low latency issue in Miracast
- */
- if (iwl_mvm_phy_ctx_count(mvm) >= 2)
+ if (vif->p2p && !iwl_mvm_is_p2p_scm_uapsd_supported(mvm))
return false;
- if (vif->p2p) {
- /* Allow U-APSD only if p2p is stand alone */
- bool is_p2p_standalone = true;
-
- if (!iwl_mvm_is_p2p_scm_uapsd_supported(mvm))
- return false;
-
- ieee80211_iterate_active_interfaces_atomic(mvm->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_p2p_standalone_iterator,
- &is_p2p_standalone);
-
- if (!is_p2p_standalone)
- return false;
- }
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_allow_uapsd_iterator,
+ &data);
- return true;
+ return data.allow_uapsd;
}
-static bool iwl_mvm_power_is_radar(struct ieee80211_vif *vif)
+static bool iwl_mvm_power_is_radar(struct ieee80211_bss_conf *link_conf)
{
struct ieee80211_chanctx_conf *chanctx_conf;
- struct ieee80211_bss_conf *link_conf;
- bool radar_detect = false;
- unsigned int link_id;
- rcu_read_lock();
- for_each_vif_active_link(vif, link_conf, link_id) {
- chanctx_conf = rcu_dereference(link_conf->chanctx_conf);
- /* this happens on link switching, just ignore inactive ones */
- if (!chanctx_conf)
- continue;
+ chanctx_conf = rcu_dereference(link_conf->chanctx_conf);
- radar_detect = !!(chanctx_conf->def.chan->flags &
- IEEE80211_CHAN_RADAR);
- if (radar_detect)
- goto out;
- }
+ /* this happens on link switching, just ignore inactive ones */
+ if (!chanctx_conf)
+ return false;
-out:
- rcu_read_unlock();
- return radar_detect;
+ return chanctx_conf->def.chan->flags & IEEE80211_CHAN_RADAR;
}
static void iwl_mvm_power_config_skip_dtim(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_mac_power_cmd *cmd)
{
- int dtimper = vif->bss_conf.dtim_period ?: 1;
- int skip;
+ struct ieee80211_bss_conf *link_conf;
+ unsigned int min_link_skip = ~0;
+ unsigned int link_id;
/* disable, in case we're supposed to override */
cmd->skip_dtim_periods = 0;
cmd->flags &= ~cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
- if (iwl_mvm_power_is_radar(vif))
+ if (!test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status)) {
+ if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_LP)
+ return;
+ cmd->skip_dtim_periods = 2;
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
return;
+ }
- if (dtimper >= 10)
- return;
+ rcu_read_lock();
+ for_each_vif_active_link(vif, link_conf, link_id) {
+ unsigned int dtimper = link_conf->dtim_period ?: 1;
+ unsigned int dtimper_tu = dtimper * link_conf->beacon_int;
+ unsigned int skip;
- if (!test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status)) {
- if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_LP)
+ if (dtimper >= 10 || iwl_mvm_power_is_radar(link_conf)) {
+ rcu_read_unlock();
return;
- skip = 2;
- } else {
- int dtimper_tu = dtimper * vif->bss_conf.beacon_int;
+ }
if (WARN_ON(!dtimper_tu))
- return;
+ continue;
+
/* configure skip over dtim up to 900 TU DTIM interval */
- skip = max_t(u8, 1, 900 / dtimper_tu);
+ skip = max_t(int, 1, 900 / dtimper_tu);
+ min_link_skip = min(min_link_skip, skip);
}
+ rcu_read_unlock();
+
+ /* no WARN_ON, can only happen with WARN_ON above */
+ if (min_link_skip == ~0)
+ return;
- cmd->skip_dtim_periods = skip;
+ cmd->skip_dtim_periods = min_link_skip;
cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
}
@@ -489,6 +497,11 @@ int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
if (mvm->ext_clock_valid)
cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_32K_CLK_VALID_MSK);
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, POWER_TABLE_CMD, 0) >= 7 &&
+ test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status))
+ cmd.flags |=
+ cpu_to_le16(DEVICE_POWER_FLAGS_NO_SLEEP_TILL_D3_MSK);
+
IWL_DEBUG_POWER(mvm,
"Sending device power command with flags = 0x%X\n",
cmd.flags);
@@ -808,8 +821,7 @@ iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- struct iwl_beacon_filter_cmd *cmd,
- u32 cmd_flags)
+ struct iwl_beacon_filter_cmd *cmd)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ret;
@@ -820,29 +832,27 @@ static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, cmd);
iwl_mvm_beacon_filter_debugfs_parameters(vif, cmd);
- ret = iwl_mvm_beacon_filter_send_cmd(mvm, cmd, cmd_flags);
+ ret = iwl_mvm_beacon_filter_send_cmd(mvm, cmd);
if (!ret)
- mvmvif->bf_data.bf_enabled = true;
+ mvmvif->bf_enabled = true;
return ret;
}
int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- u32 flags)
+ struct ieee80211_vif *vif)
{
struct iwl_beacon_filter_cmd cmd = {
IWL_BF_CMD_CONFIG_DEFAULTS,
.bf_enable_beacon_filter = cpu_to_le32(1),
};
- return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, flags);
+ return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd);
}
static int _iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- u32 flags)
+ struct ieee80211_vif *vif)
{
struct iwl_beacon_filter_cmd cmd = {};
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -851,19 +861,18 @@ static int _iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
return 0;
- ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, flags);
+ ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
if (!ret)
- mvmvif->bf_data.bf_enabled = false;
+ mvmvif->bf_enabled = false;
return ret;
}
int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- u32 flags)
+ struct ieee80211_vif *vif)
{
- return _iwl_mvm_disable_beacon_filter(mvm, vif, flags);
+ return _iwl_mvm_disable_beacon_filter(mvm, vif);
}
static int iwl_mvm_power_set_ps(struct iwl_mvm *mvm)
@@ -903,18 +912,18 @@ static int iwl_mvm_power_set_ba(struct iwl_mvm *mvm,
.bf_enable_beacon_filter = cpu_to_le32(1),
};
- if (!mvmvif->bf_data.bf_enabled)
+ if (!mvmvif->bf_enabled)
return 0;
if (test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status))
cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3);
- mvmvif->bf_data.ba_enabled = !(!mvmvif->pm_enabled ||
- mvm->ps_disabled ||
- !vif->cfg.ps ||
- iwl_mvm_vif_low_latency(mvmvif));
+ mvmvif->ba_enabled = !(!mvmvif->pm_enabled ||
+ mvm->ps_disabled ||
+ !vif->cfg.ps ||
+ iwl_mvm_vif_low_latency(mvmvif));
- return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, 0);
+ return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd);
}
int iwl_mvm_power_update_ps(struct iwl_mvm *mvm)
diff --git a/mvm/rfi.c b/mvm/rfi.c
index 2ecd32bed752..045c862a8fc4 100644
--- a/mvm/rfi.c
+++ b/mvm/rfi.c
@@ -132,14 +132,18 @@ struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm)
if (ret)
return ERR_PTR(ret);
- if (WARN_ON_ONCE(iwl_rx_packet_payload_len(cmd.resp_pkt) != resp_size))
+ if (WARN_ON_ONCE(iwl_rx_packet_payload_len(cmd.resp_pkt) !=
+ resp_size)) {
+ iwl_free_resp(&cmd);
return ERR_PTR(-EIO);
+ }
resp = kmemdup(cmd.resp_pkt->data, resp_size, GFP_KERNEL);
+ iwl_free_resp(&cmd);
+
if (!resp)
return ERR_PTR(-ENOMEM);
- iwl_free_resp(&cmd);
return resp;
}
diff --git a/mvm/rs-fw.c b/mvm/rs-fw.c
index 6cba8a353b53..05715e5af6ab 100644
--- a/mvm/rs-fw.c
+++ b/mvm/rs-fw.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include "rs.h"
#include "fw-api.h"
@@ -479,9 +479,15 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
}
if (flags & IWL_TLC_NOTIF_FLAG_AMSDU && !mvm_link_sta->orig_amsdu_len) {
+ u32 enabled = le32_to_cpu(notif->amsdu_enabled);
u16 size = le32_to_cpu(notif->amsdu_size);
int i;
+ if (size < 2000) {
+ size = 0;
+ enabled = 0;
+ }
+
if (link_sta->agg.max_amsdu_len < size) {
/*
* In debug link_sta->agg.max_amsdu_len < size
@@ -492,7 +498,7 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
goto out;
}
- mvmsta->amsdu_enabled = le32_to_cpu(notif->amsdu_enabled);
+ mvmsta->amsdu_enabled = enabled;
mvmsta->max_amsdu_len = size;
link_sta->agg.max_rc_amsdu_len = mvmsta->max_amsdu_len;
@@ -508,6 +514,8 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
link_sta->agg.max_tid_amsdu_len[i] = 1;
}
+ ieee80211_sta_recalc_aggregates(sta);
+
IWL_DEBUG_RATE(mvm,
"AMSDU update. AMSDU size: %d, AMSDU selected size: %d, AMSDU TID bitmap 0x%X\n",
le32_to_cpu(notif->amsdu_size), size,
@@ -525,10 +533,10 @@ u16 rs_fw_get_max_amsdu_len(struct ieee80211_sta *sta,
const struct ieee80211_sta_ht_cap *ht_cap = &link_sta->ht_cap;
const struct ieee80211_sta_eht_cap *eht_cap = &link_sta->eht_cap;
- if (WARN_ON_ONCE(!link_conf->chandef.chan))
+ if (WARN_ON_ONCE(!link_conf->chanreq.oper.chan))
return IEEE80211_MAX_MPDU_LEN_VHT_3895;
- if (link_conf->chandef.chan->band == NL80211_BAND_6GHZ) {
+ if (link_conf->chanreq.oper.chan->band == NL80211_BAND_6GHZ) {
switch (le16_get_bits(link_sta->he_6ghz_capa.capa,
IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN)) {
case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454:
@@ -538,7 +546,7 @@ u16 rs_fw_get_max_amsdu_len(struct ieee80211_sta *sta,
default:
return IEEE80211_MAX_MPDU_LEN_VHT_3895;
}
- } else if (link_conf->chandef.chan->band == NL80211_BAND_2GHZ &&
+ } else if (link_conf->chanreq.oper.chan->band == NL80211_BAND_2GHZ &&
eht_cap->has_eht) {
switch (u8_get_bits(eht_cap->eht_cap_elem.mac_cap_info[0],
IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK)) {
@@ -603,6 +611,7 @@ void iwl_mvm_rs_fw_rate_init(struct iwl_mvm *mvm,
cpu_to_le16(max_amsdu_len) : 0,
};
unsigned int link_id = link_conf->link_id;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
int cmd_ver;
int ret;
@@ -646,12 +655,12 @@ void iwl_mvm_rs_fw_rate_init(struct iwl_mvm *mvm,
* since TLC offload works with one mode we can assume
* that only vht/ht is used and also set it as station max amsdu
*/
- sta->deflink.agg.max_amsdu_len = max_amsdu_len;
+ link_sta->agg.max_amsdu_len = max_amsdu_len;
+ ieee80211_sta_recalc_aggregates(sta);
+
+ cfg_cmd.max_tx_op = cpu_to_le16(mvmvif->max_tx_op);
- cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
- WIDE_ID(DATA_PATH_GROUP,
- TLC_MNG_CONFIG_CMD),
- 0);
+ cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 0);
IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, sta_id=%d, max_ch_width=%d, mode=%d\n",
cfg_cmd.sta_id, cfg_cmd.max_ch_width, cfg_cmd.mode);
IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, chains=0x%X, ch_wid_supp=%d, flags=0x%X\n",
@@ -687,9 +696,7 @@ void iwl_mvm_rs_fw_rate_init(struct iwl_mvm *mvm,
u16 cmd_size = sizeof(cfg_cmd_v3);
/* In old versions of the API the struct is 4 bytes smaller */
- if (iwl_fw_lookup_cmd_ver(mvm->fw,
- WIDE_ID(DATA_PATH_GROUP,
- TLC_MNG_CONFIG_CMD), 0) < 3)
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 0) < 3)
cmd_size -= 4;
ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC, cmd_size,
diff --git a/mvm/rx.c b/mvm/rx.c
index 542c192698a4..151289e13308 100644
--- a/mvm/rx.c
+++ b/mvm/rx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -553,39 +553,41 @@ struct iwl_mvm_stat_data {
struct iwl_mvm_stat_data_all_macs {
struct iwl_mvm *mvm;
__le32 flags;
- struct iwl_statistics_ntfy_per_mac *per_mac_stats;
+ struct iwl_stats_ntfy_per_mac *per_mac;
};
-static void iwl_mvm_update_vif_sig(struct ieee80211_vif *vif, int sig)
+static void iwl_mvm_update_link_sig(struct ieee80211_vif *vif, int sig,
+ struct iwl_mvm_vif_link_info *link_info,
+ struct ieee80211_bss_conf *bss_conf)
{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->mvm;
- int thold = vif->bss_conf.cqm_rssi_thold;
- int hyst = vif->bss_conf.cqm_rssi_hyst;
+ struct iwl_mvm *mvm = iwl_mvm_vif_from_mac80211(vif)->mvm;
+ int thold = bss_conf->cqm_rssi_thold;
+ int hyst = bss_conf->cqm_rssi_hyst;
int last_event;
+ s8 exit_esr_thresh;
if (sig == 0) {
IWL_DEBUG_RX(mvm, "RSSI is 0 - skip signal based decision\n");
return;
}
- mvmvif->bf_data.ave_beacon_signal = sig;
+ link_info->bf_data.ave_beacon_signal = sig;
/* BT Coex */
- if (mvmvif->bf_data.bt_coex_min_thold !=
- mvmvif->bf_data.bt_coex_max_thold) {
- last_event = mvmvif->bf_data.last_bt_coex_event;
- if (sig > mvmvif->bf_data.bt_coex_max_thold &&
- (last_event <= mvmvif->bf_data.bt_coex_min_thold ||
+ if (link_info->bf_data.bt_coex_min_thold !=
+ link_info->bf_data.bt_coex_max_thold) {
+ last_event = link_info->bf_data.last_bt_coex_event;
+ if (sig > link_info->bf_data.bt_coex_max_thold &&
+ (last_event <= link_info->bf_data.bt_coex_min_thold ||
last_event == 0)) {
- mvmvif->bf_data.last_bt_coex_event = sig;
+ link_info->bf_data.last_bt_coex_event = sig;
IWL_DEBUG_RX(mvm, "cqm_iterator bt coex high %d\n",
sig);
iwl_mvm_bt_rssi_event(mvm, vif, RSSI_EVENT_HIGH);
- } else if (sig < mvmvif->bf_data.bt_coex_min_thold &&
- (last_event >= mvmvif->bf_data.bt_coex_max_thold ||
+ } else if (sig < link_info->bf_data.bt_coex_min_thold &&
+ (last_event >= link_info->bf_data.bt_coex_max_thold ||
last_event == 0)) {
- mvmvif->bf_data.last_bt_coex_event = sig;
+ link_info->bf_data.last_bt_coex_event = sig;
IWL_DEBUG_RX(mvm, "cqm_iterator bt coex low %d\n",
sig);
iwl_mvm_bt_rssi_event(mvm, vif, RSSI_EVENT_LOW);
@@ -596,10 +598,10 @@ static void iwl_mvm_update_vif_sig(struct ieee80211_vif *vif, int sig)
return;
/* CQM Notification */
- last_event = mvmvif->bf_data.last_cqm_event;
+ last_event = link_info->bf_data.last_cqm_event;
if (thold && sig < thold && (last_event == 0 ||
sig < last_event - hyst)) {
- mvmvif->bf_data.last_cqm_event = sig;
+ link_info->bf_data.last_cqm_event = sig;
IWL_DEBUG_RX(mvm, "cqm_iterator cqm low %d\n",
sig);
ieee80211_cqm_rssi_notify(
@@ -609,7 +611,7 @@ static void iwl_mvm_update_vif_sig(struct ieee80211_vif *vif, int sig)
GFP_KERNEL);
} else if (sig > thold &&
(last_event == 0 || sig > last_event + hyst)) {
- mvmvif->bf_data.last_cqm_event = sig;
+ link_info->bf_data.last_cqm_event = sig;
IWL_DEBUG_RX(mvm, "cqm_iterator cqm high %d\n",
sig);
ieee80211_cqm_rssi_notify(
@@ -618,6 +620,20 @@ static void iwl_mvm_update_vif_sig(struct ieee80211_vif *vif, int sig)
sig,
GFP_KERNEL);
}
+
+ /* ESR recalculation */
+ if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif))
+ return;
+
+ exit_esr_thresh =
+ iwl_mvm_get_esr_rssi_thresh(mvm,
+ &bss_conf->chanreq.oper,
+ true);
+
+ if (sig < exit_esr_thresh)
+ iwl_mvm_exit_esr(mvm, vif, IWL_MVM_ESR_EXIT_LOW_RSSI,
+ iwl_mvm_get_other_link(vif,
+ bss_conf->link_id));
}
static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
@@ -651,14 +667,15 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
mvmvif->deflink.beacon_stats.accu_num_beacons +=
mvmvif->deflink.beacon_stats.num_beacons;
- iwl_mvm_update_vif_sig(vif, sig);
+ /* This is used in pre-MLO API so use deflink */
+ iwl_mvm_update_link_sig(vif, sig, &mvmvif->deflink, &vif->bss_conf);
}
static void iwl_mvm_stat_iterator_all_macs(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_mvm_stat_data_all_macs *data = _data;
- struct iwl_statistics_ntfy_per_mac *mac_stats;
+ struct iwl_stats_ntfy_per_mac *mac_stats;
int sig;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
u16 vif_id = mvmvif->id;
@@ -669,7 +686,7 @@ static void iwl_mvm_stat_iterator_all_macs(void *_data, u8 *mac,
if (vif->type != NL80211_IFTYPE_STATION)
return;
- mac_stats = &data->per_mac_stats[vif_id];
+ mac_stats = &data->per_mac[vif_id];
mvmvif->deflink.beacon_stats.num_beacons =
le32_to_cpu(mac_stats->beacon_counter);
@@ -684,7 +701,9 @@ static void iwl_mvm_stat_iterator_all_macs(void *_data, u8 *mac,
mvmvif->deflink.beacon_stats.num_beacons;
sig = -le32_to_cpu(mac_stats->beacon_filter_average_energy);
- iwl_mvm_update_vif_sig(vif, sig);
+
+ /* This is used in pre-MLO API so use deflink */
+ iwl_mvm_update_link_sig(vif, sig, &mvmvif->deflink, &vif->bss_conf);
}
static inline void
@@ -752,6 +771,19 @@ iwl_mvm_update_tcm_from_stats(struct iwl_mvm *mvm, __le32 *air_time_le,
spin_unlock(&mvm->tcm.lock);
}
+static void iwl_mvm_handle_per_phy_stats(struct iwl_mvm *mvm,
+ struct iwl_stats_ntfy_per_phy *per_phy)
+{
+ int i;
+
+ for (i = 0; i < NUM_PHY_CTX; i++) {
+ if (!mvm->phy_ctxts[i].ref)
+ continue;
+ mvm->phy_ctxts[i].channel_load_by_us =
+ le32_to_cpu(per_phy[i].channel_load_by_us);
+ }
+}
+
static void
iwl_mvm_stats_ver_15(struct iwl_mvm *mvm,
struct iwl_statistics_operational_ntfy *stats)
@@ -759,13 +791,14 @@ iwl_mvm_stats_ver_15(struct iwl_mvm *mvm,
struct iwl_mvm_stat_data_all_macs data = {
.mvm = mvm,
.flags = stats->flags,
- .per_mac_stats = stats->per_mac_stats,
+ .per_mac = stats->per_mac,
};
ieee80211_iterate_active_interfaces(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_stat_iterator_all_macs,
&data);
+ iwl_mvm_handle_per_phy_stats(mvm, stats->per_phy);
}
static void
@@ -829,6 +862,239 @@ static bool iwl_mvm_verify_stats_len(struct iwl_mvm *mvm,
}
static void
+iwl_mvm_stat_iterator_all_links(struct iwl_mvm *mvm,
+ struct iwl_stats_ntfy_per_link *per_link)
+{
+ u32 air_time[MAC_INDEX_AUX] = {};
+ u32 rx_bytes[MAC_INDEX_AUX] = {};
+ int fw_link_id;
+
+ for (fw_link_id = 0; fw_link_id < ARRAY_SIZE(mvm->link_id_to_link_conf);
+ fw_link_id++) {
+ struct iwl_stats_ntfy_per_link *link_stats;
+ struct ieee80211_bss_conf *bss_conf;
+ struct iwl_mvm_vif *mvmvif;
+ struct iwl_mvm_vif_link_info *link_info;
+ int link_id;
+ int sig;
+
+ bss_conf = iwl_mvm_rcu_fw_link_id_to_link_conf(mvm, fw_link_id,
+ false);
+ if (!bss_conf)
+ continue;
+
+ if (bss_conf->vif->type != NL80211_IFTYPE_STATION)
+ continue;
+
+ link_id = bss_conf->link_id;
+ if (link_id >= ARRAY_SIZE(mvmvif->link))
+ continue;
+
+ mvmvif = iwl_mvm_vif_from_mac80211(bss_conf->vif);
+ link_info = mvmvif->link[link_id];
+ if (!link_info)
+ continue;
+
+ link_stats = &per_link[fw_link_id];
+
+ link_info->beacon_stats.num_beacons =
+ le32_to_cpu(link_stats->beacon_counter);
+
+ /* we basically just use the u8 to store 8 bits and then treat
+ * it as a s8 whenever we take it out to a different type.
+ */
+ link_info->beacon_stats.avg_signal =
+ -le32_to_cpu(link_stats->beacon_average_energy);
+
+ if (link_info->phy_ctxt &&
+ link_info->phy_ctxt->channel->band == NL80211_BAND_2GHZ)
+ iwl_mvm_bt_coex_update_link_esr(mvm, bss_conf->vif,
+ link_id);
+
+ /* make sure that beacon statistics don't go backwards with TCM
+ * request to clear statistics
+ */
+ if (mvm->statistics_clear)
+ mvmvif->link[link_id]->beacon_stats.accu_num_beacons +=
+ mvmvif->link[link_id]->beacon_stats.num_beacons;
+
+ sig = -le32_to_cpu(link_stats->beacon_filter_average_energy);
+ iwl_mvm_update_link_sig(bss_conf->vif, sig, link_info,
+ bss_conf);
+
+ if (WARN_ONCE(mvmvif->id >= MAC_INDEX_AUX,
+ "invalid mvmvif id: %d", mvmvif->id))
+ continue;
+
+ air_time[mvmvif->id] +=
+ le32_to_cpu(per_link[fw_link_id].air_time);
+ rx_bytes[mvmvif->id] +=
+ le32_to_cpu(per_link[fw_link_id].rx_bytes);
+ }
+
+ /* Don't update in case the statistics are not cleared, since
+ * we will end up counting twice the same airtime, once in TCM
+ * request and once in statistics notification.
+ */
+ if (mvm->statistics_clear) {
+ __le32 air_time_le[MAC_INDEX_AUX];
+ __le32 rx_bytes_le[MAC_INDEX_AUX];
+ int vif_id;
+
+ for (vif_id = 0; vif_id < ARRAY_SIZE(air_time_le); vif_id++) {
+ air_time_le[vif_id] = cpu_to_le32(air_time[vif_id]);
+ rx_bytes_le[vif_id] = cpu_to_le32(rx_bytes[vif_id]);
+ }
+
+ iwl_mvm_update_tcm_from_stats(mvm, air_time_le, rx_bytes_le);
+ }
+}
+
+#define SEC_LINK_MIN_PERC 10
+#define SEC_LINK_MIN_TX 3000
+#define SEC_LINK_MIN_RX 400
+
+static void iwl_mvm_update_esr_mode_tpt(struct iwl_mvm *mvm)
+{
+ struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm);
+ struct iwl_mvm_vif *mvmvif;
+ struct iwl_mvm_sta *mvmsta;
+ unsigned long total_tx = 0, total_rx = 0;
+ unsigned long sec_link_tx = 0, sec_link_rx = 0;
+ u8 sec_link_tx_perc, sec_link_rx_perc;
+ u8 sec_link;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (IS_ERR_OR_NULL(bss_vif))
+ return;
+
+ mvmvif = iwl_mvm_vif_from_mac80211(bss_vif);
+
+ if (!mvmvif->esr_active || !mvmvif->ap_sta)
+ return;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(mvmvif->ap_sta);
+ /* We only count for the AP sta in a MLO connection */
+ if (!mvmsta->mpdu_counters)
+ return;
+
+ /* Get the FW ID of the secondary link */
+ sec_link = iwl_mvm_get_other_link(bss_vif,
+ iwl_mvm_get_primary_link(bss_vif));
+ if (WARN_ON(!mvmvif->link[sec_link]))
+ return;
+ sec_link = mvmvif->link[sec_link]->fw_link_id;
+
+ /* Sum up RX and TX MPDUs from the different queues/links */
+ for (int q = 0; q < mvm->trans->num_rx_queues; q++) {
+ spin_lock_bh(&mvmsta->mpdu_counters[q].lock);
+
+ /* The link IDs that doesn't exist will contain 0 */
+ for (int link = 0; link < IWL_MVM_FW_MAX_LINK_ID; link++) {
+ total_tx += mvmsta->mpdu_counters[q].per_link[link].tx;
+ total_rx += mvmsta->mpdu_counters[q].per_link[link].rx;
+ }
+
+ sec_link_tx += mvmsta->mpdu_counters[q].per_link[sec_link].tx;
+ sec_link_rx += mvmsta->mpdu_counters[q].per_link[sec_link].rx;
+
+ /*
+ * In EMLSR we have statistics every 5 seconds, so we can reset
+ * the counters upon every statistics notification.
+ */
+ memset(mvmsta->mpdu_counters[q].per_link, 0,
+ sizeof(mvmsta->mpdu_counters[q].per_link));
+
+ spin_unlock_bh(&mvmsta->mpdu_counters[q].lock);
+ }
+
+ IWL_DEBUG_STATS(mvm, "total Tx MPDUs: %ld. total Rx MPDUs: %ld\n",
+ total_tx, total_rx);
+
+ /* If we don't have enough MPDUs - exit EMLSR */
+ if (total_tx < IWL_MVM_ENTER_ESR_TPT_THRESH &&
+ total_rx < IWL_MVM_ENTER_ESR_TPT_THRESH) {
+ iwl_mvm_block_esr(mvm, bss_vif, IWL_MVM_ESR_BLOCKED_TPT,
+ iwl_mvm_get_primary_link(bss_vif));
+ return;
+ }
+
+ /* Calculate the percentage of the secondary link TX/RX */
+ sec_link_tx_perc = total_tx ? sec_link_tx * 100 / total_tx : 0;
+ sec_link_rx_perc = total_rx ? sec_link_rx * 100 / total_rx : 0;
+
+ /*
+ * The TX/RX percentage is checked only if it exceeds the required
+ * minimum. In addition, RX is checked only if the TX check failed.
+ */
+ if ((total_tx > SEC_LINK_MIN_TX &&
+ sec_link_tx_perc < SEC_LINK_MIN_PERC) ||
+ (total_rx > SEC_LINK_MIN_RX &&
+ sec_link_rx_perc < SEC_LINK_MIN_PERC))
+ iwl_mvm_exit_esr(mvm, bss_vif, IWL_MVM_ESR_EXIT_LINK_USAGE,
+ iwl_mvm_get_primary_link(bss_vif));
+}
+
+void iwl_mvm_handle_rx_system_oper_stats(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ u8 average_energy[IWL_MVM_STATION_COUNT_MAX];
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_system_statistics_notif_oper *stats;
+ int i;
+ u32 notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, STATISTICS_GROUP,
+ STATISTICS_OPER_NOTIF, 0);
+
+ if (notif_ver != 3) {
+ IWL_FW_CHECK_FAILED(mvm,
+ "Oper stats notif ver %d is not supported\n",
+ notif_ver);
+ return;
+ }
+
+ stats = (void *)&pkt->data;
+ iwl_mvm_stat_iterator_all_links(mvm, stats->per_link);
+
+ for (i = 0; i < ARRAY_SIZE(average_energy); i++)
+ average_energy[i] =
+ le32_to_cpu(stats->per_sta[i].average_energy);
+
+ ieee80211_iterate_stations_atomic(mvm->hw, iwl_mvm_stats_energy_iter,
+ average_energy);
+ iwl_mvm_handle_per_phy_stats(mvm, stats->per_phy);
+
+ iwl_mvm_update_esr_mode_tpt(mvm);
+}
+
+void iwl_mvm_handle_rx_system_oper_part1_stats(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_system_statistics_part1_notif_oper *part1_stats;
+ int i;
+ u32 notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, STATISTICS_GROUP,
+ STATISTICS_OPER_PART1_NOTIF, 0);
+
+ if (notif_ver != 4) {
+ IWL_FW_CHECK_FAILED(mvm,
+ "Part1 stats notif ver %d is not supported\n",
+ notif_ver);
+ return;
+ }
+
+ part1_stats = (void *)&pkt->data;
+ mvm->radio_stats.rx_time = 0;
+ mvm->radio_stats.tx_time = 0;
+ for (i = 0; i < ARRAY_SIZE(part1_stats->per_link); i++) {
+ mvm->radio_stats.rx_time +=
+ le64_to_cpu(part1_stats->per_link[i].rx_time);
+ mvm->radio_stats.tx_time +=
+ le64_to_cpu(part1_stats->per_link[i].tx_time);
+ }
+}
+
+static void
iwl_mvm_handle_rx_statistics_tlv(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{
@@ -887,11 +1153,11 @@ iwl_mvm_handle_rx_statistics_tlv(struct iwl_mvm *mvm,
for (i = 0; i < ARRAY_SIZE(average_energy); i++)
average_energy[i] =
- le32_to_cpu(stats->per_sta_stats[i].average_energy);
+ le32_to_cpu(stats->per_sta[i].average_energy);
for (i = 0; i < ARRAY_SIZE(air_time); i++) {
- air_time[i] = stats->per_mac_stats[i].air_time;
- rx_bytes[i] = stats->per_mac_stats[i].rx_bytes;
+ air_time[i] = stats->per_mac[i].air_time;
+ rx_bytes[i] = stats->per_mac[i].rx_bytes;
}
}
@@ -917,6 +1183,13 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
__le32 *bytes, *air_time, flags;
int expected_size;
u8 *energy;
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(SYSTEM_GROUP,
+ SYSTEM_STATISTICS_CMD),
+ IWL_FW_CMD_VER_UNKNOWN);
+
+ if (cmd_ver != IWL_FW_CMD_VER_UNKNOWN)
+ return;
/* From ver 14 and up we use TLV statistics format */
if (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
diff --git a/mvm/rxmq.c b/mvm/rxmq.c
index 8d1e44fd9de7..1a210d0c22b3 100644
--- a/mvm/rxmq.c
+++ b/mvm/rxmq.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -236,21 +236,13 @@ static void iwl_mvm_add_rtap_sniffer_config(struct iwl_mvm *mvm,
static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
struct napi_struct *napi,
struct sk_buff *skb, int queue,
- struct ieee80211_sta *sta,
- struct ieee80211_link_sta *link_sta)
+ struct ieee80211_sta *sta)
{
if (unlikely(iwl_mvm_check_pn(mvm, skb, queue, sta))) {
kfree_skb(skb);
return;
}
- if (sta && sta->valid_links && link_sta) {
- struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
-
- rx_status->link_valid = 1;
- rx_status->link_id = link_sta->link_id;
- }
-
ieee80211_rx_napi(mvm->hw, sta, skb, napi);
}
@@ -282,6 +274,7 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
u32 status,
struct ieee80211_rx_status *stats)
{
+ struct wireless_dev *wdev;
struct iwl_mvm_sta *mvmsta;
struct iwl_mvm_vif *mvmvif;
u8 keyid;
@@ -303,9 +296,15 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
if (!ieee80211_is_beacon(hdr->frame_control))
return 0;
+ if (!sta)
+ return -1;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+
/* key mismatch - will also report !MIC_OK but we shouldn't count it */
if (!(status & IWL_RX_MPDU_STATUS_KEY_VALID))
- return -1;
+ goto report;
/* good cases */
if (likely(status & IWL_RX_MPDU_STATUS_MIC_OK &&
@@ -314,13 +313,6 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
return 0;
}
- if (!sta)
- return -1;
-
- mvmsta = iwl_mvm_sta_from_mac80211(sta);
-
- mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
-
/*
* both keys will have the same cipher and MIC length, use
* whichever one is available
@@ -329,11 +321,11 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
if (!key) {
key = rcu_dereference(mvmvif->bcn_prot.keys[1]);
if (!key)
- return -1;
+ goto report;
}
if (len < key->icv_len + IEEE80211_GMAC_PN_LEN + 2)
- return -1;
+ goto report;
/* get the real key ID */
keyid = frame[len - key->icv_len - IEEE80211_GMAC_PN_LEN - 2];
@@ -347,7 +339,7 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
return -1;
key = rcu_dereference(mvmvif->bcn_prot.keys[keyid - 6]);
if (!key)
- return -1;
+ goto report;
}
/* Report status to mac80211 */
@@ -355,6 +347,10 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
ieee80211_key_mic_failure(key);
else if (status & IWL_RX_MPDU_STATUS_REPLAY_ERROR)
ieee80211_key_replay(key);
+report:
+ wdev = ieee80211_vif_to_wdev(mvmsta->vif);
+ if (wdev->netdev)
+ cfg80211_rx_unprot_mlme_mgmt(wdev->netdev, (void *)hdr, len);
return -1;
}
@@ -376,8 +372,10 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
*/
if (phy_info & IWL_RX_MPDU_PHY_AMPDU &&
(status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
- IWL_RX_MPDU_STATUS_SEC_UNKNOWN && !mvm->monitor_on)
+ IWL_RX_MPDU_STATUS_SEC_UNKNOWN && !mvm->monitor_on) {
+ IWL_DEBUG_DROP(mvm, "Dropping packets, bad enc status\n");
return -1;
+ }
if (unlikely(ieee80211_is_mgmt(hdr->frame_control) &&
!ieee80211_has_protected(hdr->frame_control)))
@@ -395,8 +393,11 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
case IWL_RX_MPDU_STATUS_SEC_GCM:
BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != IEEE80211_GCMP_PN_LEN);
/* alg is CCM: check MIC only */
- if (!(status & IWL_RX_MPDU_STATUS_MIC_OK))
+ if (!(status & IWL_RX_MPDU_STATUS_MIC_OK)) {
+ IWL_DEBUG_DROP(mvm,
+ "Dropping packet, bad MIC (CCM/GCM)\n");
return -1;
+ }
stats->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MIC_STRIPPED;
*crypt_len = IEEE80211_CCMP_HDR_LEN;
@@ -503,6 +504,10 @@ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue,
return false;
mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+
+ if (WARN_ON_ONCE(!mvm_sta->dup_data))
+ return false;
+
dup_data = &mvm_sta->dup_data[queue];
/*
@@ -510,11 +515,9 @@ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue,
* (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
*/
if (ieee80211_is_ctl(hdr->frame_control) ||
- ieee80211_is_qos_nullfunc(hdr->frame_control) ||
- is_multicast_ether_addr(hdr->addr1)) {
- rx_status->flag |= RX_FLAG_DUP_VALIDATED;
+ ieee80211_is_any_nullfunc(hdr->frame_control) ||
+ is_multicast_ether_addr(hdr->addr1))
return false;
- }
if (ieee80211_is_data_qos(hdr->frame_control)) {
/* frame has qos control */
@@ -548,44 +551,12 @@ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue,
return false;
}
-/*
- * Returns true if sn2 - buffer_size < sn1 < sn2.
- * To be used only in order to compare reorder buffer head with NSSN.
- * We fully trust NSSN unless it is behind us due to reorder timeout.
- * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN.
- */
-static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size)
-{
- return ieee80211_sn_less(sn1, sn2) &&
- !ieee80211_sn_less(sn1, sn2 - buffer_size);
-}
-
-static void iwl_mvm_sync_nssn(struct iwl_mvm *mvm, u8 baid, u16 nssn)
-{
- if (IWL_MVM_USE_NSSN_SYNC) {
- struct iwl_mvm_nssn_sync_data notif = {
- .baid = baid,
- .nssn = nssn,
- };
-
- iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_NSSN_SYNC, false,
- &notif, sizeof(notif));
- }
-}
-
-#define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10)
-
-enum iwl_mvm_release_flags {
- IWL_MVM_RELEASE_SEND_RSS_SYNC = BIT(0),
- IWL_MVM_RELEASE_FROM_RSS_SYNC = BIT(1),
-};
-
static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct napi_struct *napi,
struct iwl_mvm_baid_data *baid_data,
struct iwl_mvm_reorder_buffer *reorder_buf,
- u16 nssn, u32 flags)
+ u16 nssn)
{
struct iwl_mvm_reorder_buf_entry *entries =
&baid_data->entries[reorder_buf->queue *
@@ -594,31 +565,12 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
lockdep_assert_held(&reorder_buf->lock);
- /*
- * We keep the NSSN not too far behind, if we are sync'ing it and it
- * is more than 2048 ahead of us, it must be behind us. Discard it.
- * This can happen if the queue that hit the 0 / 2048 seqno was lagging
- * behind and this queue already processed packets. The next if
- * would have caught cases where this queue would have processed less
- * than 64 packets, but it may have processed more than 64 packets.
- */
- if ((flags & IWL_MVM_RELEASE_FROM_RSS_SYNC) &&
- ieee80211_sn_less(nssn, ssn))
- goto set_timer;
-
- /* ignore nssn smaller than head sn - this can happen due to timeout */
- if (iwl_mvm_is_sn_less(nssn, ssn, reorder_buf->buf_size))
- goto set_timer;
-
- while (iwl_mvm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
- int index = ssn % reorder_buf->buf_size;
- struct sk_buff_head *skb_list = &entries[index].e.frames;
+ while (ieee80211_sn_less(ssn, nssn)) {
+ int index = ssn % baid_data->buf_size;
+ struct sk_buff_head *skb_list = &entries[index].frames;
struct sk_buff *skb;
ssn = ieee80211_sn_inc(ssn);
- if ((flags & IWL_MVM_RELEASE_SEND_RSS_SYNC) &&
- (ssn == 2048 || ssn == 0))
- iwl_mvm_sync_nssn(mvm, baid_data->baid, ssn);
/*
* Empty the list. Will have more than one frame for A-MSDU.
@@ -628,104 +580,11 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
while ((skb = __skb_dequeue(skb_list))) {
iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb,
reorder_buf->queue,
- sta, NULL /* FIXME */);
+ sta);
reorder_buf->num_stored--;
}
}
reorder_buf->head_sn = nssn;
-
-set_timer:
- if (reorder_buf->num_stored && !reorder_buf->removed) {
- u16 index = reorder_buf->head_sn % reorder_buf->buf_size;
-
- while (skb_queue_empty(&entries[index].e.frames))
- index = (index + 1) % reorder_buf->buf_size;
- /* modify timer to match next frame's expiration time */
- mod_timer(&reorder_buf->reorder_timer,
- entries[index].e.reorder_time + 1 +
- RX_REORDER_BUF_TIMEOUT_MQ);
- } else {
- del_timer(&reorder_buf->reorder_timer);
- }
-}
-
-void iwl_mvm_reorder_timer_expired(struct timer_list *t)
-{
- struct iwl_mvm_reorder_buffer *buf = from_timer(buf, t, reorder_timer);
- struct iwl_mvm_baid_data *baid_data =
- iwl_mvm_baid_data_from_reorder_buf(buf);
- struct iwl_mvm_reorder_buf_entry *entries =
- &baid_data->entries[buf->queue * baid_data->entries_per_queue];
- int i;
- u16 sn = 0, index = 0;
- bool expired = false;
- bool cont = false;
-
- spin_lock(&buf->lock);
-
- if (!buf->num_stored || buf->removed) {
- spin_unlock(&buf->lock);
- return;
- }
-
- for (i = 0; i < buf->buf_size ; i++) {
- index = (buf->head_sn + i) % buf->buf_size;
-
- if (skb_queue_empty(&entries[index].e.frames)) {
- /*
- * If there is a hole and the next frame didn't expire
- * we want to break and not advance SN
- */
- cont = false;
- continue;
- }
- if (!cont &&
- !time_after(jiffies, entries[index].e.reorder_time +
- RX_REORDER_BUF_TIMEOUT_MQ))
- break;
-
- expired = true;
- /* continue until next hole after this expired frames */
- cont = true;
- sn = ieee80211_sn_add(buf->head_sn, i + 1);
- }
-
- if (expired) {
- struct ieee80211_sta *sta;
- struct iwl_mvm_sta *mvmsta;
- u8 sta_id = ffs(baid_data->sta_mask) - 1;
-
- rcu_read_lock();
- sta = rcu_dereference(buf->mvm->fw_id_to_mac_id[sta_id]);
- if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
- rcu_read_unlock();
- goto out;
- }
-
- mvmsta = iwl_mvm_sta_from_mac80211(sta);
-
- /* SN is set to the last expired frame + 1 */
- IWL_DEBUG_HT(buf->mvm,
- "Releasing expired frames for sta %u, sn %d\n",
- sta_id, sn);
- iwl_mvm_event_frame_timeout_callback(buf->mvm, mvmsta->vif,
- sta, baid_data->tid);
- iwl_mvm_release_frames(buf->mvm, sta, NULL, baid_data,
- buf, sn, IWL_MVM_RELEASE_SEND_RSS_SYNC);
- rcu_read_unlock();
- } else {
- /*
- * If no frame expired and there are stored frames, index is now
- * pointing to the first unexpired frame - modify timer
- * accordingly to this frame.
- */
- mod_timer(&buf->reorder_timer,
- entries[index].e.reorder_time +
- 1 + RX_REORDER_BUF_TIMEOUT_MQ);
- }
-
-out:
- spin_unlock(&buf->lock);
}
static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue,
@@ -758,10 +617,8 @@ static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue,
spin_lock_bh(&reorder_buf->lock);
iwl_mvm_release_frames(mvm, sta, NULL, ba_data, reorder_buf,
ieee80211_sn_add(reorder_buf->head_sn,
- reorder_buf->buf_size),
- 0);
+ ba_data->buf_size));
spin_unlock_bh(&reorder_buf->lock);
- del_timer_sync(&reorder_buf->reorder_timer);
out:
rcu_read_unlock();
@@ -769,8 +626,7 @@ out:
static void iwl_mvm_release_frames_from_notif(struct iwl_mvm *mvm,
struct napi_struct *napi,
- u8 baid, u16 nssn, int queue,
- u32 flags)
+ u8 baid, u16 nssn, int queue)
{
struct ieee80211_sta *sta;
struct iwl_mvm_reorder_buffer *reorder_buf;
@@ -787,11 +643,8 @@ static void iwl_mvm_release_frames_from_notif(struct iwl_mvm *mvm,
rcu_read_lock();
ba_data = rcu_dereference(mvm->baid_map[baid]);
- if (!ba_data) {
- WARN(!(flags & IWL_MVM_RELEASE_FROM_RSS_SYNC),
- "BAID %d not found in map\n", baid);
+ if (WARN(!ba_data, "BAID %d not found in map\n", baid))
goto out;
- }
/* pick any STA ID to find the pointer */
sta_id = ffs(ba_data->sta_mask) - 1;
@@ -803,22 +656,13 @@ static void iwl_mvm_release_frames_from_notif(struct iwl_mvm *mvm,
spin_lock_bh(&reorder_buf->lock);
iwl_mvm_release_frames(mvm, sta, napi, ba_data,
- reorder_buf, nssn, flags);
+ reorder_buf, nssn);
spin_unlock_bh(&reorder_buf->lock);
out:
rcu_read_unlock();
}
-static void iwl_mvm_nssn_sync(struct iwl_mvm *mvm,
- struct napi_struct *napi, int queue,
- const struct iwl_mvm_nssn_sync_data *data)
-{
- iwl_mvm_release_frames_from_notif(mvm, napi, data->baid,
- data->nssn, queue,
- IWL_MVM_RELEASE_FROM_RSS_SYNC);
-}
-
void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue)
{
@@ -836,11 +680,11 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi,
return;
len -= sizeof(*notif) + sizeof(*internal_notif);
- if (internal_notif->sync &&
- mvm->queue_sync_cookie != internal_notif->cookie) {
- WARN_ONCE(1, "Received expired RX queue sync message\n");
+ if (WARN_ONCE(internal_notif->sync &&
+ mvm->queue_sync_cookie != internal_notif->cookie,
+ "Received expired RX queue sync message (cookie %d but wanted %d, queue %d)\n",
+ internal_notif->cookie, mvm->queue_sync_cookie, queue))
return;
- }
switch (internal_notif->type) {
case IWL_MVM_RXQ_EMPTY:
@@ -853,14 +697,6 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi,
break;
iwl_mvm_del_ba(mvm, queue, (void *)internal_notif->data);
break;
- case IWL_MVM_RXQ_NSSN_SYNC:
- if (WARN_ONCE(len != sizeof(struct iwl_mvm_nssn_sync_data),
- "invalid nssn sync notification size %d (%d)",
- len, (int)sizeof(struct iwl_mvm_nssn_sync_data)))
- break;
- iwl_mvm_nssn_sync(mvm, napi, queue,
- (void *)internal_notif->data);
- break;
default:
WARN_ONCE(1, "Invalid identifier %d", internal_notif->type);
}
@@ -874,55 +710,6 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi,
}
}
-static void iwl_mvm_oldsn_workaround(struct iwl_mvm *mvm,
- struct ieee80211_sta *sta, int tid,
- struct iwl_mvm_reorder_buffer *buffer,
- u32 reorder, u32 gp2, int queue)
-{
- struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
-
- if (gp2 != buffer->consec_oldsn_ampdu_gp2) {
- /* we have a new (A-)MPDU ... */
-
- /*
- * reset counter to 0 if we didn't have any oldsn in
- * the last A-MPDU (as detected by GP2 being identical)
- */
- if (!buffer->consec_oldsn_prev_drop)
- buffer->consec_oldsn_drops = 0;
-
- /* either way, update our tracking state */
- buffer->consec_oldsn_ampdu_gp2 = gp2;
- } else if (buffer->consec_oldsn_prev_drop) {
- /*
- * tracking state didn't change, and we had an old SN
- * indication before - do nothing in this case, we
- * already noted this one down and are waiting for the
- * next A-MPDU (by GP2)
- */
- return;
- }
-
- /* return unless this MPDU has old SN */
- if (!(reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN))
- return;
-
- /* update state */
- buffer->consec_oldsn_prev_drop = 1;
- buffer->consec_oldsn_drops++;
-
- /* if limit is reached, send del BA and reset state */
- if (buffer->consec_oldsn_drops == IWL_MVM_AMPDU_CONSEC_DROPS_DELBA) {
- IWL_WARN(mvm,
- "reached %d old SN frames from %pM on queue %d, stopping BA session on TID %d\n",
- IWL_MVM_AMPDU_CONSEC_DROPS_DELBA,
- sta->addr, queue, tid);
- ieee80211_stop_rx_ba_session(mvmsta->vif, BIT(tid), sta->addr);
- buffer->consec_oldsn_prev_drop = 0;
- buffer->consec_oldsn_drops = 0;
- }
-}
-
/*
* Returns true if the MPDU was buffered\dropped, false if it should be passed
* to upper layer.
@@ -934,11 +721,9 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
struct sk_buff *skb,
struct iwl_rx_mpdu_desc *desc)
{
- struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_hdr *hdr = (void *)skb_mac_header(skb);
struct iwl_mvm_baid_data *baid_data;
struct iwl_mvm_reorder_buffer *buffer;
- struct sk_buff *tail;
u32 reorder = le32_to_cpu(desc->reorder_data);
bool amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU;
bool last_subframe =
@@ -955,6 +740,9 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
baid = (reorder & IWL_RX_MPDU_REORDER_BAID_MASK) >>
IWL_RX_MPDU_REORDER_BAID_SHIFT;
+ if (mvm->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_9000)
+ return false;
+
/*
* This also covers the case of receiving a Block Ack Request
* outside a BA session; we'll pass it to mac80211 and that
@@ -1016,59 +804,18 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
buffer->valid = true;
}
- if (ieee80211_is_back_req(hdr->frame_control)) {
- iwl_mvm_release_frames(mvm, sta, napi, baid_data,
- buffer, nssn, 0);
+ /* drop any duplicated packets */
+ if (desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_DUPLICATE))
goto drop;
- }
-
- /*
- * If there was a significant jump in the nssn - adjust.
- * If the SN is smaller than the NSSN it might need to first go into
- * the reorder buffer, in which case we just release up to it and the
- * rest of the function will take care of storing it and releasing up to
- * the nssn.
- * This should not happen. This queue has been lagging and it should
- * have been updated by a IWL_MVM_RXQ_NSSN_SYNC notification. Be nice
- * and update the other queues.
- */
- if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
- buffer->buf_size) ||
- !ieee80211_sn_less(sn, buffer->head_sn + buffer->buf_size)) {
- u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn;
-
- iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer,
- min_sn, IWL_MVM_RELEASE_SEND_RSS_SYNC);
- }
-
- iwl_mvm_oldsn_workaround(mvm, sta, tid, buffer, reorder,
- rx_status->device_timestamp, queue);
/* drop any oudated packets */
- if (ieee80211_sn_less(sn, buffer->head_sn))
+ if (reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN)
goto drop;
/* release immediately if allowed by nssn and no stored frames */
if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) {
- if (iwl_mvm_is_sn_less(buffer->head_sn, nssn,
- buffer->buf_size) &&
- (!amsdu || last_subframe)) {
- /*
- * If we crossed the 2048 or 0 SN, notify all the
- * queues. This is done in order to avoid having a
- * head_sn that lags behind for too long. When that
- * happens, we can get to a situation where the head_sn
- * is within the interval [nssn - buf_size : nssn]
- * which will make us think that the nssn is a packet
- * that we already freed because of the reordering
- * buffer and we will ignore it. So maintain the
- * head_sn somewhat updated across all the queues:
- * when it crosses 0 and 2048.
- */
- if (sn == 2048 || sn == 0)
- iwl_mvm_sync_nssn(mvm, baid, sn);
+ if (!amsdu || last_subframe)
buffer->head_sn = nssn;
- }
/* No need to update AMSDU last SN - we are moving the head */
spin_unlock_bh(&buffer->lock);
return false;
@@ -1083,37 +830,18 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
* while technically there is no hole and we can move forward.
*/
if (!buffer->num_stored && sn == buffer->head_sn) {
- if (!amsdu || last_subframe) {
- if (sn == 2048 || sn == 0)
- iwl_mvm_sync_nssn(mvm, baid, sn);
+ if (!amsdu || last_subframe)
buffer->head_sn = ieee80211_sn_inc(buffer->head_sn);
- }
+
/* No need to update AMSDU last SN - we are moving the head */
spin_unlock_bh(&buffer->lock);
return false;
}
- index = sn % buffer->buf_size;
-
- /*
- * Check if we already stored this frame
- * As AMSDU is either received or not as whole, logic is simple:
- * If we have frames in that position in the buffer and the last frame
- * originated from AMSDU had a different SN then it is a retransmission.
- * If it is the same SN then if the subframe index is incrementing it
- * is the same AMSDU - otherwise it is a retransmission.
- */
- tail = skb_peek_tail(&entries[index].e.frames);
- if (tail && !amsdu)
- goto drop;
- else if (tail && (sn != buffer->last_amsdu ||
- buffer->last_sub_index >= sub_frame_idx))
- goto drop;
-
/* put in reorder buffer */
- __skb_queue_tail(&entries[index].e.frames, skb);
+ index = sn % baid_data->buf_size;
+ __skb_queue_tail(&entries[index].frames, skb);
buffer->num_stored++;
- entries[index].e.reorder_time = jiffies;
if (amsdu) {
buffer->last_amsdu = sn;
@@ -1133,8 +861,7 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
*/
if (!amsdu || last_subframe)
iwl_mvm_release_frames(mvm, sta, napi, baid_data,
- buffer, nssn,
- IWL_MVM_RELEASE_SEND_RSS_SYNC);
+ buffer, nssn);
spin_unlock_bh(&buffer->lock);
return true;
@@ -1507,7 +1234,7 @@ static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm,
#define IWL_RX_RU_DATA_A1 2
#define IWL_RX_RU_DATA_A2 2
#define IWL_RX_RU_DATA_B1 2
-#define IWL_RX_RU_DATA_B2 3
+#define IWL_RX_RU_DATA_B2 4
#define IWL_RX_RU_DATA_C1 3
#define IWL_RX_RU_DATA_C2 3
#define IWL_RX_RU_DATA_D1 4
@@ -2163,21 +1890,6 @@ static void iwl_mvm_decode_lsig(struct sk_buff *skb,
}
}
-static inline u8 iwl_mvm_nl80211_band_from_rx_msdu(u8 phy_band)
-{
- switch (phy_band) {
- case PHY_BAND_24:
- return NL80211_BAND_2GHZ;
- case PHY_BAND_5:
- return NL80211_BAND_5GHZ;
- case PHY_BAND_6:
- return NL80211_BAND_6GHZ;
- default:
- WARN_ONCE(1, "Unsupported phy band (%u)\n", phy_band);
- return NL80211_BAND_5GHZ;
- }
-}
-
struct iwl_rx_sta_csa {
bool all_sta_unblocked;
struct ieee80211_vif *vif;
@@ -2242,6 +1954,16 @@ static void iwl_mvm_rx_fill_status(struct iwl_mvm *mvm,
iwl_mvm_decode_lsig(skb, phy_data);
rx_status->device_timestamp = phy_data->gp2_on_air_rise;
+
+ if (mvm->rx_ts_ptp && mvm->monitor_on) {
+ u64 adj_time =
+ iwl_mvm_ptp_get_adj_time(mvm, phy_data->gp2_on_air_rise * NSEC_PER_USEC);
+
+ rx_status->mactime = div64_u64(adj_time, NSEC_PER_USEC);
+ rx_status->flag |= RX_FLAG_MACTIME_IS_RTAP_TS64;
+ rx_status->flag &= ~RX_FLAG_MACTIME;
+ }
+
rx_status->freq = ieee80211_channel_to_frequency(phy_data->channel,
rx_status->band);
iwl_mvm_get_signal_strength(mvm, rx_status, rate_n_flags,
@@ -2320,9 +2042,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
u32 len;
u32 pkt_len = iwl_rx_packet_payload_len(pkt);
struct ieee80211_sta *sta = NULL;
- struct ieee80211_link_sta *link_sta = NULL;
struct sk_buff *skb;
u8 crypt_len = 0;
+ u8 sta_id = le32_get_bits(desc->status, IWL_RX_MPDU_STATUS_STA_ID);
size_t desc_size;
struct iwl_mvm_rx_phy_data phy_data = {};
u32 format;
@@ -2441,7 +2163,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
if (iwl_mvm_is_band_in_rx_supported(mvm)) {
u8 band = BAND_IN_RX_STATUS(desc->mac_phy_idx);
- rx_status->band = iwl_mvm_nl80211_band_from_rx_msdu(band);
+ rx_status->band = iwl_mvm_nl80211_band_from_phy(band);
} else {
rx_status->band = phy_data.channel > 14 ? NL80211_BAND_5GHZ :
NL80211_BAND_2GHZ;
@@ -2471,13 +2193,18 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rcu_read_lock();
if (desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) {
- u8 id = le32_get_bits(desc->status, IWL_RX_MPDU_STATUS_STA_ID);
+ if (!WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations)) {
+ struct ieee80211_link_sta *link_sta;
- if (!WARN_ON_ONCE(id >= mvm->fw->ucode_capa.num_stations)) {
- sta = rcu_dereference(mvm->fw_id_to_mac_id[id]);
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
if (IS_ERR(sta))
sta = NULL;
- link_sta = rcu_dereference(mvm->fw_id_to_link_sta[id]);
+ link_sta = rcu_dereference(mvm->fw_id_to_link_sta[sta_id]);
+
+ if (sta && sta->valid_links && link_sta) {
+ rx_status->link_valid = 1;
+ rx_status->link_id = link_sta->link_id;
+ }
}
} else if (!is_multicast_ether_addr(hdr->addr2)) {
/*
@@ -2562,6 +2289,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
iwl_mvm_rx_csum(mvm, sta, skb, pkt);
if (iwl_mvm_is_dup(sta, queue, rx_status, hdr, desc)) {
+ IWL_DEBUG_DROP(mvm, "Dropping duplicate packet 0x%x\n",
+ le16_to_cpu(hdr->seq_ctrl));
kfree_skb(skb);
goto out;
}
@@ -2591,6 +2320,16 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
iwl_mvm_agg_rx_received(mvm, reorder_data, baid);
}
+
+ if (ieee80211_is_data(hdr->frame_control)) {
+ u8 sub_frame_idx = desc->amsdu_info &
+ IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
+
+ /* 0 means not an A-MSDU, and 1 means a new A-MSDU */
+ if (!sub_frame_idx || sub_frame_idx == 1)
+ iwl_mvm_count_mpdu(mvmsta, sta_id, 1, false,
+ queue);
+ }
}
/* management stuff on default queue */
@@ -2613,9 +2352,14 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc) &&
likely(!iwl_mvm_time_sync_frame(mvm, skb, hdr->addr2)) &&
- likely(!iwl_mvm_mei_filter_scan(mvm, skb)))
- iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta,
- link_sta);
+ likely(!iwl_mvm_mei_filter_scan(mvm, skb))) {
+ if (mvm->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_9000 &&
+ (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) &&
+ !(desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME))
+ rx_status->flag |= RX_FLAG_AMSDU_MORE;
+
+ iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
+ }
out:
rcu_read_unlock();
}
@@ -2627,7 +2371,6 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_rx_no_data_ver_3 *desc = (void *)pkt->data;
u32 rssi;
- u32 info_type;
struct ieee80211_sta *sta = NULL;
struct sk_buff *skb;
struct iwl_mvm_rx_phy_data phy_data;
@@ -2640,7 +2383,6 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
return;
rssi = le32_to_cpu(desc->rssi);
- info_type = le32_to_cpu(desc->info) & RX_NO_DATA_INFO_TYPE_MSK;
phy_data.d0 = desc->phy_info[0];
phy_data.d1 = desc->phy_info[1];
phy_data.phy_info = IWL_RX_MPDU_PHY_TSF_OVERLOAD;
@@ -2692,7 +2434,12 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
/* 0-length PSDU */
rx_status->flag |= RX_FLAG_NO_PSDU;
- switch (info_type) {
+ /* mark as failed PLCP on any errors to skip checks in mac80211 */
+ if (le32_get_bits(desc->info, RX_NO_DATA_INFO_ERR_MSK) !=
+ RX_NO_DATA_INFO_ERR_NONE)
+ rx_status->flag |= RX_FLAG_FAILED_PLCP_CRC;
+
+ switch (le32_get_bits(desc->info, RX_NO_DATA_INFO_TYPE_MSK)) {
case RX_NO_DATA_INFO_TYPE_NDP:
rx_status->zero_length_psdu_type =
IEEE80211_RADIOTAP_ZERO_LEN_PSDU_SOUNDING;
@@ -2717,8 +2464,11 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
*
* We mark it as mac header, for upper layers to know where
* all radio tap header ends.
+ *
+ * Since data doesn't move data while putting data on skb and that is
+ * the only way we use, data + len is the next place that hdr would be put
*/
- skb_reset_mac_header(skb);
+ skb_set_mac_header(skb, skb->len);
/*
* Override the nss from the rx_vec since the rate_n_flags has
@@ -2758,7 +2508,7 @@ void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
iwl_mvm_release_frames_from_notif(mvm, napi, release->baid,
le16_to_cpu(release->nssn),
- queue, 0);
+ queue);
}
void iwl_mvm_rx_bar_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
@@ -2799,7 +2549,10 @@ void iwl_mvm_rx_bar_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
tid))
goto out;
- iwl_mvm_release_frames_from_notif(mvm, napi, baid, nssn, queue, 0);
+ IWL_DEBUG_DROP(mvm, "Received a BAR, expect packet loss: nssn %d\n",
+ nssn);
+
+ iwl_mvm_release_frames_from_notif(mvm, napi, baid, nssn, queue);
out:
rcu_read_unlock();
}
diff --git a/mvm/scan.c b/mvm/scan.c
index c1d9ce753468..1cc9c426bb15 100644
--- a/mvm/scan.c
+++ b/mvm/scan.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -48,6 +48,8 @@
/* Number of iterations on the channel for mei filtered scan */
#define IWL_MEI_SCAN_NUM_ITER 5U
+#define WFA_TPC_IE_LEN 9
+
struct iwl_mvm_scan_timing_params {
u32 suspend_time;
u32 max_out_time;
@@ -101,6 +103,7 @@ struct iwl_mvm_scan_params {
bool scan_6ghz;
bool enable_6ghz_passive;
bool respect_p2p_go, respect_p2p_go_hb;
+ s8 tsf_report_link_id;
u8 bssid[ETH_ALEN] __aligned(2);
};
@@ -207,7 +210,7 @@ static void iwl_mvm_scan_iterator(void *_data, u8 *mac,
curr_mvmvif = iwl_mvm_vif_from_mac80211(data->current_vif);
- if (vif->type == NL80211_IFTYPE_AP && vif->p2p &&
+ if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_P2P_GO &&
mvmvif->deflink.phy_ctxt && curr_mvmvif->deflink.phy_ctxt &&
mvmvif->deflink.phy_ctxt->id != curr_mvmvif->deflink.phy_ctxt->id)
data->is_dcm_with_p2p_go = true;
@@ -225,6 +228,14 @@ iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
.global_cnt = 0,
};
+ /*
+ * A scanning AP interface probably wants to generate a survey to do
+ * ACS (automatic channel selection).
+ * Force a non-fragmented scan in that case.
+ */
+ if (vif && ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_AP)
+ return IWL_SCAN_TYPE_WILD;
+
ieee80211_iterate_active_interfaces_atomic(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_scan_iterator,
@@ -240,13 +251,11 @@ iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
return IWL_SCAN_TYPE_FRAGMENTED;
/*
- * in case of DCM with GO where BSS DTIM interval < 220msec
- * set all scan requests as fast-balance scan
+ * in case of DCM with P2P GO set all scan requests as
+ * fast-balance scan
*/
if (vif && vif->type == NL80211_IFTYPE_STATION &&
- data.is_dcm_with_p2p_go &&
- ((vif->bss_conf.beacon_int *
- vif->bss_conf.dtim_period) < 220))
+ data.is_dcm_with_p2p_go)
return IWL_SCAN_TYPE_FAST_BALANCE;
}
@@ -296,8 +305,8 @@ static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm)
max_probe_len = SCAN_OFFLOAD_PROBE_REQ_SIZE;
- /* we create the 802.11 header and SSID element */
- max_probe_len -= 24 + 2;
+ /* we create the 802.11 header SSID element and WFA TPC element */
+ max_probe_len -= 24 + 2 + WFA_TPC_IE_LEN;
/* DS parameter set element is added on 2.4GHZ band if required */
if (iwl_mvm_rrm_scan_needed(mvm))
@@ -724,8 +733,6 @@ static u8 *iwl_mvm_copy_and_insert_ds_elem(struct iwl_mvm *mvm, const u8 *ies,
return newpos;
}
-#define WFA_TPC_IE_LEN 9
-
static void iwl_mvm_add_tpc_report_ie(u8 *pos)
{
pos[0] = WLAN_EID_VENDOR_SPECIFIC;
@@ -830,8 +837,8 @@ static inline bool iwl_mvm_scan_fits(struct iwl_mvm *mvm, int n_ssids,
return ((n_ssids <= PROBE_OPTION_MAX) &&
(n_channels <= mvm->fw->ucode_capa.n_scan_channels) &
(ies->common_ie_len +
- ies->len[NL80211_BAND_2GHZ] +
- ies->len[NL80211_BAND_5GHZ] <=
+ ies->len[NL80211_BAND_2GHZ] + ies->len[NL80211_BAND_5GHZ] +
+ ies->len[NL80211_BAND_6GHZ] <=
iwl_mvm_max_scan_ie_fw_cmd_room(mvm)));
}
@@ -853,11 +860,13 @@ static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm,
* 4. it's not a p2p find operation.
* 5. we are not in low latency mode,
* or if fragmented ebs is supported by the FW
+ * 6. the VIF is not an AP interface (scan wants survey results)
*/
return ((capa->flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) &&
mvm->last_ebs_successful && IWL_MVM_ENABLE_EBS &&
vif->type != NL80211_IFTYPE_P2P_DEVICE &&
- (!low_latency || iwl_mvm_is_frag_ebs_supported(mvm)));
+ (!low_latency || iwl_mvm_is_frag_ebs_supported(mvm)) &&
+ ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_AP);
}
static inline bool iwl_mvm_is_regular_scan(struct iwl_mvm_scan_params *params)
@@ -1304,7 +1313,7 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
if (IWL_MVM_ADWELL_MAX_BUDGET)
cmd->v7.adwell_max_budget =
cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET);
- else if (params->ssids && params->ssids[0].ssid_len)
+ else if (params->n_ssids && params->ssids[0].ssid_len)
cmd->v7.adwell_max_budget =
cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
else
@@ -1378,11 +1387,14 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2);
}
-static u32 iwl_mvm_scan_umac_ooc_priority(struct iwl_mvm_scan_params *params)
+static u32 iwl_mvm_scan_umac_ooc_priority(int type)
{
- return iwl_mvm_is_regular_scan(params) ?
- IWL_SCAN_PRIORITY_EXT_6 :
- IWL_SCAN_PRIORITY_EXT_2;
+ if (type == IWL_MVM_SCAN_REGULAR)
+ return IWL_SCAN_PRIORITY_EXT_6;
+ if (type == IWL_MVM_SCAN_INT_MLO)
+ return IWL_SCAN_PRIORITY_EXT_4;
+
+ return IWL_SCAN_PRIORITY_EXT_2;
}
static void
@@ -1406,7 +1418,7 @@ iwl_mvm_scan_umac_dwell_v11(struct iwl_mvm *mvm,
if (IWL_MVM_ADWELL_MAX_BUDGET)
general_params->adwell_max_budget =
cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET);
- else if (params->ssids && params->ssids[0].ssid_len)
+ else if (params->n_ssids && params->ssids[0].ssid_len)
general_params->adwell_max_budget =
cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
else
@@ -1647,6 +1659,17 @@ iwl_mvm_umac_scan_cfg_channels_v7(struct iwl_mvm *mvm,
cfg->v2.channel_num = channels[i]->hw_value;
if (cfg80211_channel_is_psc(channels[i]))
cfg->flags = 0;
+
+ if (band == NL80211_BAND_6GHZ) {
+ /* 6 GHz channels should only appear in a scan request
+ * that has scan_6ghz set. The only exception is MLO
+ * scan, which has to be passive.
+ */
+ WARN_ON_ONCE(cfg->flags != 0);
+ cfg->flags =
+ cpu_to_le32(IWL_UHB_CHAN_CFG_FLAG_FORCE_PASSIVE);
+ }
+
cfg->v2.iter_count = 1;
cfg->v2.iter_interval = 0;
if (version < 17)
@@ -1718,7 +1741,10 @@ iwl_mvm_umac_scan_fill_6g_chan_list(struct iwl_mvm *mvm,
break;
}
- if (k == idex_b && idex_b < SCAN_BSSID_MAX_SIZE) {
+ if (k == idex_b && idex_b < SCAN_BSSID_MAX_SIZE &&
+ !WARN_ONCE(!is_valid_ether_addr(scan_6ghz_params[j].bssid),
+ "scan: invalid BSSID at index %u, index_b=%u\n",
+ j, idex_b)) {
memcpy(&pp->bssid_array[idex_b++],
scan_6ghz_params[j].bssid, ETH_ALEN);
}
@@ -1748,8 +1774,9 @@ iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm,
&cp->channel_config[ch_cnt];
u32 s_ssid_bitmap = 0, bssid_bitmap = 0, flags = 0;
- u8 j, k, s_max = 0, b_max = 0, n_used_bssid_entries;
- bool force_passive, found = false, allow_passive = true,
+ u8 j, k, n_s_ssids = 0, n_bssids = 0;
+ u8 max_s_ssids, max_bssids;
+ bool force_passive = false, found = false, allow_passive = true,
unsolicited_probe_on_chan = false, psc_no_listen = false;
s8 psd_20 = IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED;
@@ -1772,20 +1799,15 @@ iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm,
cfg->v5.iter_count = 1;
cfg->v5.iter_interval = 0;
- /*
- * The optimize the scan time, i.e., reduce the scan dwell time
- * on each channel, the below logic tries to set 3 direct BSSID
- * probe requests for each broadcast probe request with a short
- * SSID.
- * TODO: improve this logic
- */
- n_used_bssid_entries = 3;
for (j = 0; j < params->n_6ghz_params; j++) {
s8 tmp_psd_20;
if (!(scan_6ghz_params[j].channel_idx == i))
continue;
+ unsolicited_probe_on_chan |=
+ scan_6ghz_params[j].unsolicited_probe;
+
/* Use the highest PSD value allowed as advertised by
* APs for this channel
*/
@@ -1797,12 +1819,69 @@ iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm,
psd_20 < tmp_psd_20))
psd_20 = tmp_psd_20;
- found = false;
- unsolicited_probe_on_chan |=
- scan_6ghz_params[j].unsolicited_probe;
psc_no_listen |= scan_6ghz_params[j].psc_no_listen;
+ }
+
+ /*
+ * In the following cases apply passive scan:
+ * 1. Non fragmented scan:
+ * - PSC channel with NO_LISTEN_FLAG on should be treated
+ * like non PSC channel
+ * - Non PSC channel with more than 3 short SSIDs or more
+ * than 9 BSSIDs.
+ * - Non PSC Channel with unsolicited probe response and
+ * more than 2 short SSIDs or more than 6 BSSIDs.
+ * - PSC channel with more than 2 short SSIDs or more than
+ * 6 BSSIDs.
+ * 3. Fragmented scan:
+ * - PSC channel with more than 1 SSID or 3 BSSIDs.
+ * - Non PSC channel with more than 2 SSIDs or 6 BSSIDs.
+ * - Non PSC channel with unsolicited probe response and
+ * more than 1 SSID or more than 3 BSSIDs.
+ */
+ if (!iwl_mvm_is_scan_fragmented(params->type)) {
+ if (!cfg80211_channel_is_psc(params->channels[i]) ||
+ psc_no_listen) {
+ if (unsolicited_probe_on_chan) {
+ max_s_ssids = 2;
+ max_bssids = 6;
+ } else {
+ max_s_ssids = 3;
+ max_bssids = 9;
+ }
+ } else {
+ max_s_ssids = 2;
+ max_bssids = 6;
+ }
+ } else if (cfg80211_channel_is_psc(params->channels[i])) {
+ max_s_ssids = 1;
+ max_bssids = 3;
+ } else {
+ if (unsolicited_probe_on_chan) {
+ max_s_ssids = 1;
+ max_bssids = 3;
+ } else {
+ max_s_ssids = 2;
+ max_bssids = 6;
+ }
+ }
+
+ /*
+ * The optimize the scan time, i.e., reduce the scan dwell time
+ * on each channel, the below logic tries to set 3 direct BSSID
+ * probe requests for each broadcast probe request with a short
+ * SSID.
+ * TODO: improve this logic
+ */
+ for (j = 0; j < params->n_6ghz_params; j++) {
+ if (!(scan_6ghz_params[j].channel_idx == i))
+ continue;
+
+ found = false;
- for (k = 0; k < pp->short_ssid_num; k++) {
+ for (k = 0;
+ k < pp->short_ssid_num && n_s_ssids < max_s_ssids;
+ k++) {
if (!scan_6ghz_params[j].unsolicited_probe &&
le32_to_cpu(pp->short_ssid[k]) ==
scan_6ghz_params[j].short_ssid) {
@@ -1813,25 +1892,25 @@ iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm,
}
/*
- * Use short SSID only to create a new
- * iteration during channel dwell or in
- * case that the short SSID has a
- * matching SSID, i.e., scan for hidden
- * APs.
+ * Prefer creating BSSID entries unless
+ * the short SSID probe can be done in
+ * the same channel dwell iteration.
+ *
+ * We also need to create a short SSID
+ * entry for any hidden AP.
*/
- if (n_used_bssid_entries >= 3) {
- s_ssid_bitmap |= BIT(k);
- s_max++;
- n_used_bssid_entries -= 3;
- found = true;
+ if (3 * n_s_ssids > n_bssids &&
+ !pp->direct_scan[k].len)
break;
- } else if (pp->direct_scan[k].len) {
- s_ssid_bitmap |= BIT(k);
- s_max++;
- found = true;
+
+ /* Hidden AP, cannot do passive scan */
+ if (pp->direct_scan[k].len)
allow_passive = false;
- break;
- }
+
+ s_ssid_bitmap |= BIT(k);
+ n_s_ssids++;
+ found = true;
+ break;
}
}
@@ -1843,9 +1922,12 @@ iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm,
scan_6ghz_params[j].bssid,
ETH_ALEN)) {
if (!(bssid_bitmap & BIT(k))) {
- bssid_bitmap |= BIT(k);
- b_max++;
- n_used_bssid_entries++;
+ if (n_bssids < max_bssids) {
+ bssid_bitmap |= BIT(k);
+ n_bssids++;
+ } else {
+ force_passive = TRUE;
+ }
}
break;
}
@@ -1859,39 +1941,6 @@ iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm,
if (unsolicited_probe_on_chan)
flags |= IWL_UHB_CHAN_CFG_FLAG_UNSOLICITED_PROBE_RES;
- /*
- * In the following cases apply passive scan:
- * 1. Non fragmented scan:
- * - PSC channel with NO_LISTEN_FLAG on should be treated
- * like non PSC channel
- * - Non PSC channel with more than 3 short SSIDs or more
- * than 9 BSSIDs.
- * - Non PSC Channel with unsolicited probe response and
- * more than 2 short SSIDs or more than 6 BSSIDs.
- * - PSC channel with more than 2 short SSIDs or more than
- * 6 BSSIDs.
- * 3. Fragmented scan:
- * - PSC channel with more than 1 SSID or 3 BSSIDs.
- * - Non PSC channel with more than 2 SSIDs or 6 BSSIDs.
- * - Non PSC channel with unsolicited probe response and
- * more than 1 SSID or more than 3 BSSIDs.
- */
- if (!iwl_mvm_is_scan_fragmented(params->type)) {
- if (!cfg80211_channel_is_psc(params->channels[i]) ||
- flags & IWL_UHB_CHAN_CFG_FLAG_PSC_CHAN_NO_LISTEN) {
- force_passive = (s_max > 3 || b_max > 9);
- force_passive |= (unsolicited_probe_on_chan &&
- (s_max > 2 || b_max > 6));
- } else {
- force_passive = (s_max > 2 || b_max > 6);
- }
- } else if (cfg80211_channel_is_psc(params->channels[i])) {
- force_passive = (s_max > 1 || b_max > 3);
- } else {
- force_passive = (s_max > 2 || b_max > 6);
- force_passive |= (unsolicited_probe_on_chan &&
- (s_max > 1 || b_max > 3));
- }
if ((allow_passive && force_passive) ||
(!(bssid_bitmap | s_ssid_bitmap) &&
!cfg80211_channel_is_psc(params->channels[i])))
@@ -2099,7 +2148,8 @@ static u16 iwl_mvm_scan_umac_flags_v2(struct iwl_mvm *mvm,
static u8 iwl_mvm_scan_umac_flags2(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params *params,
- struct ieee80211_vif *vif, int type)
+ struct ieee80211_vif *vif, int type,
+ u16 gen_flags)
{
u8 flags = 0;
@@ -2119,6 +2169,13 @@ static u8 iwl_mvm_scan_umac_flags2(struct iwl_mvm *mvm,
IWL_UCODE_TLV_CAPA_SCAN_DONT_TOGGLE_ANT))
flags |= IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_DONT_TOGGLE_ANT;
+ /* Passive and AP interface -> ACS (automatic channel selection) */
+ if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE &&
+ ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_AP &&
+ iwl_fw_lookup_notif_ver(mvm->fw, SCAN_GROUP, CHANNEL_SURVEY_NOTIF,
+ 0) >= 1)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS2_COLLECT_CHANNEL_STATS;
+
return flags;
}
@@ -2256,8 +2313,6 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_scan_umac_dwell(mvm, cmd, params);
- mvm->scan_uid_status[uid] = type;
-
cmd->uid = cpu_to_le32(uid);
gen_flags = iwl_mvm_scan_umac_flags(mvm, params, vif);
cmd->general_flags = cpu_to_le16(gen_flags);
@@ -2298,10 +2353,8 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
ret = iwl_mvm_fill_scan_sched_params(params, tail_v2->schedule,
&tail_v2->delay);
- if (ret) {
- mvm->scan_uid_status[uid] = 0;
+ if (ret)
return ret;
- }
if (iwl_mvm_is_scan_ext_chan_supported(mvm)) {
tail_v2->preq = params->preq;
@@ -2342,20 +2395,15 @@ iwl_mvm_scan_umac_fill_general_p_v12(struct iwl_mvm *mvm,
if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2)
gp->num_of_fragments[SCAN_HB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS;
- if (version < 12) {
+ mvm->scan_link_id = 0;
+
+ if (version < 16) {
gp->scan_start_mac_or_link_id = scan_vif->id;
} else {
- struct iwl_mvm_vif_link_info *link_info;
- u8 link_id = 0;
+ struct iwl_mvm_vif_link_info *link_info =
+ scan_vif->link[params->tsf_report_link_id];
- /* Use one of the active link (if any). In the future it would
- * be possible that the link ID would be part of the scan
- * request coming from upper layers so we would need to use it.
- */
- if (vif->active_links)
- link_id = ffs(vif->active_links) - 1;
-
- link_info = scan_vif->link[link_id];
+ mvm->scan_link_id = params->tsf_report_link_id;
if (!WARN_ON(!link_info))
gp->scan_start_mac_or_link_id = link_info->fw_link_id;
}
@@ -2456,9 +2504,7 @@ static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int ret;
u16 gen_flags;
- mvm->scan_uid_status[uid] = type;
-
- cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(params));
+ cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(type));
cmd->uid = cpu_to_le32(uid);
gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
@@ -2493,15 +2539,14 @@ static int iwl_mvm_scan_umac_v14_and_above(struct iwl_mvm *mvm,
u8 gen_flags2;
u32 bitmap_ssid = 0;
- mvm->scan_uid_status[uid] = type;
-
- cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(params));
+ cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(type));
cmd->uid = cpu_to_le32(uid);
gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
if (version >= 15)
- gen_flags2 = iwl_mvm_scan_umac_flags2(mvm, params, vif, type);
+ gen_flags2 = iwl_mvm_scan_umac_flags2(mvm, params, vif, type,
+ gen_flags);
else
gen_flags2 = 0;
@@ -2538,10 +2583,8 @@ static int iwl_mvm_scan_umac_v14_and_above(struct iwl_mvm *mvm,
params->n_channels,
pb, cp, vif->type,
version);
- if (!cp->count) {
- mvm->scan_uid_status[uid] = 0;
+ if (!cp->count)
return -EINVAL;
- }
if (!params->n_ssids ||
(params->n_ssids == 1 && !params->ssids[0].ssid_len))
@@ -2819,7 +2862,8 @@ static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm,
if (ver_handler->version != scan_ver)
continue;
- return ver_handler->handler(mvm, vif, params, type, uid);
+ err = ver_handler->handler(mvm, vif, params, type, uid);
+ return err ? : uid;
}
err = iwl_mvm_scan_umac(mvm, vif, params, type, uid);
@@ -2845,7 +2889,7 @@ static void iwl_mvm_scan_respect_p2p_go_iter(void *_data, u8 *mac,
if (vif == data->current_vif)
return;
- if (vif->type == NL80211_IFTYPE_AP && vif->p2p) {
+ if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_P2P_GO) {
u32 link_id;
for (link_id = 0;
@@ -2920,9 +2964,11 @@ static void iwl_mvm_fill_respect_p2p_go(struct iwl_mvm *mvm,
}
}
-int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct cfg80211_scan_request *req,
- struct ieee80211_scan_ies *ies)
+static int _iwl_mvm_single_scan_start(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req,
+ struct ieee80211_scan_ies *ies,
+ int type)
{
struct iwl_host_cmd hcmd = {
.len = { iwl_mvm_scan_size(mvm), },
@@ -2940,7 +2986,7 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return -EBUSY;
}
- ret = iwl_mvm_check_running_scans(mvm, IWL_MVM_SCAN_REGULAR);
+ ret = iwl_mvm_check_running_scans(mvm, type);
if (ret)
return ret;
@@ -2977,12 +3023,19 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (req->duration)
params.iter_notif = true;
+ params.tsf_report_link_id = req->tsf_report_link_id;
+ if (params.tsf_report_link_id < 0) {
+ if (vif->active_links)
+ params.tsf_report_link_id = __ffs(vif->active_links);
+ else
+ params.tsf_report_link_id = 0;
+ }
+
iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
iwl_mvm_scan_6ghz_passive_scan(mvm, &params, vif);
- uid = iwl_mvm_build_scan_cmd(mvm, vif, &hcmd, &params,
- IWL_MVM_SCAN_REGULAR);
+ uid = iwl_mvm_build_scan_cmd(mvm, vif, &hcmd, &params, type);
if (uid < 0)
return uid;
@@ -2997,23 +3050,35 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
*/
IWL_ERR(mvm, "Scan failed! ret %d\n", ret);
iwl_mvm_resume_tcm(mvm);
- mvm->scan_uid_status[uid] = 0;
return ret;
}
- IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n");
- mvm->scan_status |= IWL_MVM_SCAN_REGULAR;
- mvm->scan_vif = iwl_mvm_vif_from_mac80211(vif);
+ IWL_DEBUG_SCAN(mvm, "Scan request send success: type=%u, uid=%u\n",
+ type, uid);
+
+ mvm->scan_uid_status[uid] = type;
+ mvm->scan_status |= type;
+
+ if (type == IWL_MVM_SCAN_REGULAR) {
+ mvm->scan_vif = iwl_mvm_vif_from_mac80211(vif);
+ schedule_delayed_work(&mvm->scan_timeout_dwork,
+ msecs_to_jiffies(SCAN_TIMEOUT));
+ }
if (params.enable_6ghz_passive)
mvm->last_6ghz_passive_scan_jiffies = jiffies;
- schedule_delayed_work(&mvm->scan_timeout_dwork,
- msecs_to_jiffies(SCAN_TIMEOUT));
-
return 0;
}
+int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req,
+ struct ieee80211_scan_ies *ies)
+{
+ return _iwl_mvm_single_scan_start(mvm, vif, req, ies,
+ IWL_MVM_SCAN_REGULAR);
+}
+
int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct cfg80211_sched_scan_request *req,
@@ -3114,23 +3179,23 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
params.n_channels = j;
}
- if (non_psc_included &&
- !iwl_mvm_scan_fits(mvm, req->n_ssids, ies, params.n_channels)) {
- kfree(params.channels);
- return -ENOBUFS;
+ if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, params.n_channels)) {
+ ret = -ENOBUFS;
+ goto out;
}
uid = iwl_mvm_build_scan_cmd(mvm, vif, &hcmd, &params, type);
-
- if (non_psc_included)
- kfree(params.channels);
- if (uid < 0)
- return uid;
+ if (uid < 0) {
+ ret = uid;
+ goto out;
+ }
ret = iwl_mvm_send_cmd(mvm, &hcmd);
if (!ret) {
IWL_DEBUG_SCAN(mvm,
- "Sched scan request was sent successfully\n");
+ "Sched scan request send success: type=%u, uid=%u\n",
+ type, uid);
+ mvm->scan_uid_status[uid] = type;
mvm->scan_status |= type;
} else {
/* If the scan failed, it usually means that the FW was unable
@@ -3138,10 +3203,12 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
* should try to send the command again with different params.
*/
IWL_ERR(mvm, "Sched scan failed! ret %d\n", ret);
- mvm->scan_uid_status[uid] = 0;
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
}
+out:
+ if (non_psc_included)
+ kfree(params.channels);
return ret;
}
@@ -3152,9 +3219,25 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
struct iwl_umac_scan_complete *notif = (void *)pkt->data;
u32 uid = __le32_to_cpu(notif->uid);
bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED);
+ bool select_links = false;
mvm->mei_scan_filter.is_mei_limited_scan = false;
+ IWL_DEBUG_SCAN(mvm,
+ "Scan completed: uid=%u type=%u, status=%s, EBS=%s\n",
+ uid, mvm->scan_uid_status[uid],
+ notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
+ "completed" : "aborted",
+ iwl_mvm_ebs_status_str(notif->ebs_status));
+
+ IWL_DEBUG_SCAN(mvm, "Scan completed: scan_status=0x%x\n",
+ mvm->scan_status);
+
+ IWL_DEBUG_SCAN(mvm,
+ "Scan completed: line=%u, iter=%u, elapsed time=%u\n",
+ notif->last_schedule, notif->last_iter,
+ __le32_to_cpu(notif->time_from_last_iter));
+
if (WARN_ON(!(mvm->scan_uid_status[uid] & mvm->scan_status)))
return;
@@ -3164,8 +3247,18 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
.aborted = aborted,
.scan_start_tsf = mvm->scan_start,
};
+ struct iwl_mvm_vif *scan_vif = mvm->scan_vif;
+ struct iwl_mvm_vif_link_info *link_info =
+ scan_vif->link[mvm->scan_link_id];
+
+ /* It is possible that by the time the scan is complete the link
+ * was already removed and is not valid.
+ */
+ if (link_info)
+ memcpy(info.tsf_bssid, link_info->bssid, ETH_ALEN);
+ else
+ IWL_DEBUG_SCAN(mvm, "Scan link is no longer valid\n");
- memcpy(info.tsf_bssid, mvm->scan_vif->deflink.bssid, ETH_ALEN);
ieee80211_scan_completed(mvm->hw, &info);
mvm->scan_vif = NULL;
cancel_delayed_work(&mvm->scan_timeout_dwork);
@@ -3173,25 +3266,28 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
} else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) {
ieee80211_sched_scan_stopped(mvm->hw);
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
+ } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_INT_MLO) {
+ IWL_DEBUG_SCAN(mvm, "Internal MLO scan completed\n");
+ /*
+ * Other scan types won't necessarily scan for the MLD links channels.
+ * Therefore, only select links after successful internal scan.
+ */
+ select_links = notif->status == IWL_SCAN_OFFLOAD_COMPLETED;
}
mvm->scan_status &= ~mvm->scan_uid_status[uid];
- IWL_DEBUG_SCAN(mvm,
- "Scan completed, uid %u type %u, status %s, EBS status %s\n",
- uid, mvm->scan_uid_status[uid],
- notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
- "completed" : "aborted",
- iwl_mvm_ebs_status_str(notif->ebs_status));
- IWL_DEBUG_SCAN(mvm,
- "Last line %d, Last iteration %d, Time from last iteration %d\n",
- notif->last_schedule, notif->last_iter,
- __le32_to_cpu(notif->time_from_last_iter));
+
+ IWL_DEBUG_SCAN(mvm, "Scan completed: after update: scan_status=0x%x\n",
+ mvm->scan_status);
if (notif->ebs_status != IWL_SCAN_EBS_SUCCESS &&
notif->ebs_status != IWL_SCAN_EBS_INACTIVE)
mvm->last_ebs_successful = false;
mvm->scan_uid_status[uid] = 0;
+
+ if (select_links)
+ wiphy_work_queue(mvm->hw->wiphy, &mvm->trig_link_selection_wk);
}
void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
@@ -3238,10 +3334,11 @@ static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
ret = iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_ABORT_UMAC),
- 0, sizeof(cmd), &cmd);
+ CMD_SEND_IN_RFKILL, sizeof(cmd), &cmd);
if (!ret)
mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
+ IWL_DEBUG_SCAN(mvm, "Scan abort: ret=%d\n", ret);
return ret;
}
@@ -3359,6 +3456,12 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
mvm->scan_uid_status[uid] = 0;
}
+ uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_INT_MLO);
+ if (uid >= 0) {
+ IWL_DEBUG_SCAN(mvm, "Internal MLO scan aborted\n");
+ mvm->scan_uid_status[uid] = 0;
+ }
+
uid = iwl_mvm_scan_uid_by_status(mvm,
IWL_MVM_SCAN_STOPPING_REGULAR);
if (uid >= 0)
@@ -3369,6 +3472,11 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
if (uid >= 0)
mvm->scan_uid_status[uid] = 0;
+ uid = iwl_mvm_scan_uid_by_status(mvm,
+ IWL_MVM_SCAN_STOPPING_INT_MLO);
+ if (uid >= 0)
+ mvm->scan_uid_status[uid] = 0;
+
/* We shouldn't have any UIDs still set. Loop over all the
* UIDs to make sure there's nothing left there and warn if
* any is found.
@@ -3405,10 +3513,14 @@ int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify)
{
int ret;
+ IWL_DEBUG_SCAN(mvm,
+ "Request to stop scan: type=0x%x, status=0x%x\n",
+ type, mvm->scan_status);
+
if (!(mvm->scan_status & type))
return 0;
- if (iwl_mvm_is_radio_killed(mvm)) {
+ if (!test_bit(STATUS_DEVICE_ENABLED, &mvm->trans->status)) {
ret = 0;
goto out;
}
@@ -3416,6 +3528,9 @@ int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify)
ret = iwl_mvm_scan_stop_wait(mvm, type);
if (!ret)
mvm->scan_status |= type << IWL_MVM_SCAN_STOPPING_SHIFT;
+ else
+ IWL_DEBUG_SCAN(mvm, "Failed to stop scan\n");
+
out:
/* Clear the scan status so the next scan requests will
* succeed and mark the scan as stopping, so that the Rx
@@ -3440,3 +3555,276 @@ out:
return ret;
}
+
+static int iwl_mvm_int_mlo_scan_start(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel **channels,
+ size_t n_channels)
+{
+ struct cfg80211_scan_request *req = NULL;
+ struct ieee80211_scan_ies ies = {};
+ size_t size, i;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ IWL_DEBUG_SCAN(mvm, "Starting Internal MLO scan: n_channels=%zu\n",
+ n_channels);
+
+ if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif))
+ return -EINVAL;
+
+ size = struct_size(req, channels, n_channels);
+ req = kzalloc(size, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ /* set the requested channels */
+ for (i = 0; i < n_channels; i++)
+ req->channels[i] = channels[i];
+
+ req->n_channels = n_channels;
+
+ /* set the rates */
+ for (i = 0; i < NUM_NL80211_BANDS; i++)
+ if (mvm->hw->wiphy->bands[i])
+ req->rates[i] =
+ (1 << mvm->hw->wiphy->bands[i]->n_bitrates) - 1;
+
+ req->wdev = ieee80211_vif_to_wdev(vif);
+ req->wiphy = mvm->hw->wiphy;
+ req->scan_start = jiffies;
+ req->tsf_report_link_id = -1;
+
+ ret = _iwl_mvm_single_scan_start(mvm, vif, req, &ies,
+ IWL_MVM_SCAN_INT_MLO);
+ kfree(req);
+
+ IWL_DEBUG_SCAN(mvm, "Internal MLO scan: ret=%d\n", ret);
+ return ret;
+}
+
+int iwl_mvm_int_mlo_scan(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct ieee80211_channel *channels[IEEE80211_MLD_MAX_NUM_LINKS];
+ unsigned long usable_links = ieee80211_vif_usable_links(vif);
+ size_t n_channels = 0;
+ u8 link_id;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (mvm->scan_status & IWL_MVM_SCAN_INT_MLO) {
+ IWL_DEBUG_SCAN(mvm, "Internal MLO scan is already running\n");
+ return -EBUSY;
+ }
+
+ rcu_read_lock();
+
+ for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_bss_conf *link_conf =
+ rcu_dereference(vif->link_conf[link_id]);
+
+ if (WARN_ON_ONCE(!link_conf))
+ continue;
+
+ channels[n_channels++] = link_conf->chanreq.oper.chan;
+ }
+
+ rcu_read_unlock();
+
+ if (!n_channels)
+ return -EINVAL;
+
+ return iwl_mvm_int_mlo_scan_start(mvm, vif, channels, n_channels);
+}
+
+static int iwl_mvm_chanidx_from_phy(struct iwl_mvm *mvm,
+ enum nl80211_band band,
+ u16 phy_chan_num)
+{
+ struct ieee80211_supported_band *sband = mvm->hw->wiphy->bands[band];
+ int chan_idx;
+
+ if (WARN_ON_ONCE(!sband))
+ return -EINVAL;
+
+ for (chan_idx = 0; chan_idx < sband->n_channels; chan_idx++) {
+ struct ieee80211_channel *channel = &sband->channels[chan_idx];
+
+ if (channel->hw_value == phy_chan_num)
+ return chan_idx;
+ }
+
+ return -EINVAL;
+}
+
+static u32 iwl_mvm_div_by_db(u32 value, u8 db)
+{
+ /*
+ * 2^32 * 10**(i / 10) for i = [1, 10], skipping 0 and simply stopping
+ * at 10 dB and looping instead of using a much larger table.
+ *
+ * Using 64 bit math is overkill, but means the helper does not require
+ * a limit on the input range.
+ */
+ static const u32 db_to_val[] = {
+ 0xcb59185e, 0xa1866ba8, 0x804dce7a, 0x65ea59fe, 0x50f44d89,
+ 0x404de61f, 0x331426af, 0x2892c18b, 0x203a7e5b, 0x1999999a,
+ };
+
+ while (value && db > 0) {
+ u8 change = min_t(u8, db, ARRAY_SIZE(db_to_val));
+
+ value = (((u64)value) * db_to_val[change - 1]) >> 32;
+
+ db -= change;
+ }
+
+ return value;
+}
+
+VISIBLE_IF_IWLWIFI_KUNIT s8
+iwl_mvm_average_dbm_values(const struct iwl_umac_scan_channel_survey_notif *notif)
+{
+ s8 average_magnitude;
+ u32 average_factor;
+ s8 sum_magnitude = -128;
+ u32 sum_factor = 0;
+ int i, count = 0;
+
+ /*
+ * To properly average the decibel values (signal values given in dBm)
+ * we need to do the math in linear space. Doing a linear average of
+ * dB (dBm) values is a bit annoying though due to the large range of
+ * at least -10 to -110 dBm that will not fit into a 32 bit integer.
+ *
+ * A 64 bit integer should be sufficient, but then we still have the
+ * problem that there are no directly usable utility functions
+ * available.
+ *
+ * So, lets not deal with that and instead do much of the calculation
+ * with a 16.16 fixed point integer along with a base in dBm. 16.16 bit
+ * gives us plenty of head-room for adding up a few values and even
+ * doing some math on it. And the tail should be accurate enough too
+ * (1/2^16 is somewhere around -48 dB, so effectively zero).
+ *
+ * i.e. the real value of sum is:
+ * sum = sum_factor / 2^16 * 10^(sum_magnitude / 10) mW
+ *
+ * However, that does mean we need to be able to bring two values to
+ * a common base, so we need a helper for that.
+ *
+ * Note that this function takes an input with unsigned negative dBm
+ * values but returns a signed dBm (i.e. a negative value).
+ */
+
+ for (i = 0; i < ARRAY_SIZE(notif->noise); i++) {
+ s8 val_magnitude;
+ u32 val_factor;
+
+ if (notif->noise[i] == 0xff)
+ continue;
+
+ val_factor = 0x10000;
+ val_magnitude = -notif->noise[i];
+
+ if (val_magnitude <= sum_magnitude) {
+ u8 div_db = sum_magnitude - val_magnitude;
+
+ val_factor = iwl_mvm_div_by_db(val_factor, div_db);
+ val_magnitude = sum_magnitude;
+ } else {
+ u8 div_db = val_magnitude - sum_magnitude;
+
+ sum_factor = iwl_mvm_div_by_db(sum_factor, div_db);
+ sum_magnitude = val_magnitude;
+ }
+
+ sum_factor += val_factor;
+ count++;
+ }
+
+ /* No valid noise measurement, return a very high noise level */
+ if (count == 0)
+ return 0;
+
+ average_magnitude = sum_magnitude;
+ average_factor = sum_factor / count;
+
+ /*
+ * average_factor will be a number smaller than 1.0 (0x10000) at this
+ * point. What we need to do now is to adjust average_magnitude so that
+ * average_factor is between -0.5 dB and 0.5 dB.
+ *
+ * Just do -1 dB steps and find the point where
+ * -0.5 dB * -i dB = 0x10000 * 10^(-0.5/10) / i dB
+ * = div_by_db(0xe429, i)
+ * is smaller than average_factor.
+ */
+ for (i = 0; average_factor < iwl_mvm_div_by_db(0xe429, i); i++) {
+ /* nothing */
+ }
+
+ return average_magnitude - i;
+}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mvm_average_dbm_values);
+
+void iwl_mvm_rx_channel_survey_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ const struct iwl_umac_scan_channel_survey_notif *notif =
+ (void *)pkt->data;
+ struct iwl_mvm_acs_survey_channel *info;
+ enum nl80211_band band;
+ int chan_idx;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!mvm->acs_survey) {
+ size_t n_channels = 0;
+
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ if (!mvm->hw->wiphy->bands[band])
+ continue;
+
+ n_channels += mvm->hw->wiphy->bands[band]->n_channels;
+ }
+
+ mvm->acs_survey = kzalloc(struct_size(mvm->acs_survey,
+ channels, n_channels),
+ GFP_KERNEL);
+
+ if (!mvm->acs_survey)
+ return;
+
+ mvm->acs_survey->n_channels = n_channels;
+ n_channels = 0;
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ if (!mvm->hw->wiphy->bands[band])
+ continue;
+
+ mvm->acs_survey->bands[band] =
+ &mvm->acs_survey->channels[n_channels];
+ n_channels += mvm->hw->wiphy->bands[band]->n_channels;
+ }
+ }
+
+ band = iwl_mvm_nl80211_band_from_phy(le32_to_cpu(notif->band));
+ chan_idx = iwl_mvm_chanidx_from_phy(mvm, band,
+ le32_to_cpu(notif->channel));
+ if (WARN_ON_ONCE(chan_idx < 0))
+ return;
+
+ IWL_DEBUG_SCAN(mvm, "channel survey received for freq %d\n",
+ mvm->hw->wiphy->bands[band]->channels[chan_idx].center_freq);
+
+ info = &mvm->acs_survey->bands[band][chan_idx];
+
+ /* Times are all in ms */
+ info->time = le32_to_cpu(notif->active_time);
+ info->time_busy = le32_to_cpu(notif->busy_time);
+ info->time_rx = le32_to_cpu(notif->rx_time);
+ info->time_tx = le32_to_cpu(notif->tx_time);
+ info->noise = iwl_mvm_average_dbm_values(notif);
+}
diff --git a/mvm/sf.c b/mvm/sf.c
index 30d4233595e8..16285ae7cae9 100644
--- a/mvm/sf.c
+++ b/mvm/sf.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2013-2014, 2018-2019, 2022-2023 Intel Corporation
+ * Copyright (C) 2013-2014, 2018-2019, 2022-2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
*/
#include "mvm.h"
@@ -232,6 +232,9 @@ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif,
};
struct ieee80211_sta *sta = NULL;
+ if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_SMART_FIFO_OFFLOAD))
+ return 0;
/*
* Ignore the call if we are in HW Restart flow, or if the handled
* vif is a p2p device.
diff --git a/mvm/sta.c b/mvm/sta.c
index 3b9a343d4f67..15e64d94d6ea 100644
--- a/mvm/sta.c
+++ b/mvm/sta.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2015, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2015, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -71,7 +71,7 @@ u32 iwl_mvm_get_sta_ampdu_dens(struct ieee80211_link_sta *link_sta,
mpdu_dens = link_sta->ht_cap.ampdu_density;
}
- if (link_conf->chandef.chan->band == NL80211_BAND_6GHZ) {
+ if (link_conf->chanreq.oper.chan->band == NL80211_BAND_6GHZ) {
/* overwrite HT values on 6 GHz */
mpdu_dens = le16_get_bits(link_sta->he_6ghz_capa.capa,
IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START);
@@ -208,7 +208,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
}
if (sta->deflink.ht_cap.ht_supported ||
- mvm_sta->vif->bss_conf.chandef.chan->band == NL80211_BAND_6GHZ)
+ mvm_sta->vif->bss_conf.chanreq.oper.chan->band == NL80211_BAND_6GHZ)
add_sta_cmd.station_flags_msk |=
cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
STA_FLG_AGG_MPDU_DENS_MSK);
@@ -827,7 +827,7 @@ static int iwl_mvm_get_queue_size(struct ieee80211_sta *sta)
if (!link)
continue;
- /* support for 1k ba size */
+ /* support for 512 ba size */
if (link->eht_cap.has_eht &&
max_size < IWL_DEFAULT_QUEUE_SIZE_EHT)
max_size = IWL_DEFAULT_QUEUE_SIZE_EHT;
@@ -857,19 +857,13 @@ int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
size = iwl_mvm_get_queue_size(sta);
}
- /* take the min with bc tbl entries allowed */
- size = min_t(u32, size, mvm->trans->txqs.bc_tbl_size / sizeof(u16));
-
- /* size needs to be power of 2 values for calculating read/write pointers */
- size = rounddown_pow_of_two(size);
-
if (sta) {
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct ieee80211_link_sta *link_sta;
unsigned int link_id;
- for (link_id = 0;
- link_id < ARRAY_SIZE(mvmsta->link);
- link_id++) {
+ rcu_read_lock();
+ for_each_sta_active_link(mvmsta->vif, sta, link_sta, link_id) {
struct iwl_mvm_link_sta *link =
rcu_dereference_protected(mvmsta->link[link_id],
lockdep_is_held(&mvm->mutex));
@@ -879,6 +873,7 @@ int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
sta_mask |= BIT(link->sta_id);
}
+ rcu_read_unlock();
} else {
sta_mask |= BIT(sta_id);
}
@@ -886,22 +881,13 @@ int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
if (!sta_mask)
return -EINVAL;
- do {
- queue = iwl_trans_txq_alloc(mvm->trans, 0, sta_mask,
- tid, size, timeout);
-
- if (queue < 0)
- IWL_DEBUG_TX_QUEUES(mvm,
- "Failed allocating TXQ of size %d for sta mask %x tid %d, ret: %d\n",
- size, sta_mask, tid, queue);
- size /= 2;
- } while (queue < 0 && size >= 16);
-
- if (queue < 0)
- return queue;
+ queue = iwl_trans_txq_alloc(mvm->trans, 0, sta_mask,
+ tid, size, timeout);
- IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta mask 0x%x tid %d\n",
- queue, sta_mask, tid);
+ if (queue >= 0)
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Enabling TXQ #%d for sta mask 0x%x tid %d\n",
+ queue, sta_mask, tid);
return queue;
}
@@ -1501,6 +1487,34 @@ out_err:
return ret;
}
+int iwl_mvm_sta_ensure_queue(struct iwl_mvm *mvm,
+ struct ieee80211_txq *txq)
+{
+ struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
+ int ret = -EINVAL;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (likely(test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) ||
+ !txq->sta) {
+ return 0;
+ }
+
+ if (!iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, txq->tid)) {
+ set_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
+ ret = 0;
+ }
+
+ local_bh_disable();
+ spin_lock(&mvm->add_stream_lock);
+ if (!list_empty(&mvmtxq->list))
+ list_del_init(&mvmtxq->list);
+ spin_unlock(&mvm->add_stream_lock);
+ local_bh_enable();
+
+ return ret;
+}
+
void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
{
struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
@@ -1818,6 +1832,18 @@ int iwl_mvm_sta_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant);
+ /* MPDUs are counted only when EMLSR is possible */
+ if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
+ !sta->tdls && ieee80211_vif_is_mld(vif)) {
+ mvm_sta->mpdu_counters =
+ kcalloc(mvm->trans->num_rx_queues,
+ sizeof(*mvm_sta->mpdu_counters),
+ GFP_KERNEL);
+ if (mvm_sta->mpdu_counters)
+ for (int q = 0; q < mvm->trans->num_rx_queues; q++)
+ spin_lock_init(&mvm_sta->mpdu_counters[q].lock);
+ }
+
return 0;
}
@@ -2059,7 +2085,8 @@ bool iwl_mvm_sta_del(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
*status = IWL_MVM_QUEUE_FREE;
}
- if (vif->type == NL80211_IFTYPE_STATION) {
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ mvm_link->ap_sta_id == sta_id) {
/* if associated - we can't remove the AP STA now */
if (vif->cfg.assoc)
return true;
@@ -2097,7 +2124,8 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
return ret;
/* flush its queues here since we are freeing mvm_sta */
- ret = iwl_mvm_flush_sta(mvm, mvm_sta, false);
+ ret = iwl_mvm_flush_sta(mvm, mvm_sta->deflink.sta_id,
+ mvm_sta->tfd_queue_msk);
if (ret)
return ret;
if (iwl_mvm_has_new_tx_api(mvm)) {
@@ -2408,7 +2436,8 @@ void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
- iwl_mvm_flush_sta(mvm, &mvmvif->deflink.bcast_sta, true);
+ iwl_mvm_flush_sta(mvm, mvmvif->deflink.bcast_sta.sta_id,
+ mvmvif->deflink.bcast_sta.tfd_queue_msk);
switch (vif->type) {
case NL80211_IFTYPE_AP:
@@ -2546,7 +2575,7 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
vif->type != NL80211_IFTYPE_ADHOC))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
/*
* In IBSS, ieee80211_check_queues() sets the cab_queue to be
@@ -2664,7 +2693,8 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
lockdep_assert_held(&mvm->mutex);
- iwl_mvm_flush_sta(mvm, &mvmvif->deflink.mcast_sta, true);
+ iwl_mvm_flush_sta(mvm, mvmvif->deflink.mcast_sta.sta_id,
+ mvmvif->deflink.mcast_sta.tfd_queue_msk);
iwl_mvm_disable_txq(mvm, NULL, mvmvif->deflink.mcast_sta.sta_id,
&mvmvif->deflink.cab_queue, 0);
@@ -2713,25 +2743,16 @@ static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
*/
WARN_ON(1);
- for (j = 0; j < reorder_buf->buf_size; j++)
- __skb_queue_purge(&entries[j].e.frames);
- /*
- * Prevent timer re-arm. This prevents a very far fetched case
- * where we timed out on the notification. There may be prior
- * RX frames pending in the RX queue before the notification
- * that might get processed between now and the actual deletion
- * and we would re-arm the timer although we are deleting the
- * reorder buffer.
- */
- reorder_buf->removed = true;
+ for (j = 0; j < data->buf_size; j++)
+ __skb_queue_purge(&entries[j].frames);
+
spin_unlock_bh(&reorder_buf->lock);
- del_timer_sync(&reorder_buf->reorder_timer);
}
}
static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
struct iwl_mvm_baid_data *data,
- u16 ssn, u16 buf_size)
+ u16 ssn)
{
int i;
@@ -2744,16 +2765,11 @@ static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
reorder_buf->num_stored = 0;
reorder_buf->head_sn = ssn;
- reorder_buf->buf_size = buf_size;
- /* rx reorder timer */
- timer_setup(&reorder_buf->reorder_timer,
- iwl_mvm_reorder_timer_expired, 0);
spin_lock_init(&reorder_buf->lock);
- reorder_buf->mvm = mvm;
reorder_buf->queue = i;
reorder_buf->valid = false;
- for (j = 0; j < reorder_buf->buf_size; j++)
- __skb_queue_head_init(&entries[j].e.frames);
+ for (j = 0; j < data->buf_size; j++)
+ __skb_queue_head_init(&entries[j].frames);
}
}
@@ -2815,7 +2831,12 @@ static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm,
.action = start ? cpu_to_le32(IWL_RX_BAID_ACTION_ADD) :
cpu_to_le32(IWL_RX_BAID_ACTION_REMOVE),
};
- u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD);
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD),
+ .flags = CMD_SEND_IN_RFKILL,
+ .len[0] = sizeof(cmd),
+ .data[0] = &cmd,
+ };
int ret;
BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid));
@@ -2827,7 +2848,7 @@ static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm,
cmd.alloc.ssn = cpu_to_le16(ssn);
cmd.alloc.win_size = cpu_to_le16(buf_size);
baid = -EIO;
- } else if (iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1) == 1) {
+ } else if (iwl_fw_lookup_cmd_ver(mvm->fw, hcmd.id, 1) == 1) {
cmd.remove_v1.baid = cpu_to_le32(baid);
BUILD_BUG_ON(sizeof(cmd.remove_v1) > sizeof(cmd.remove));
} else {
@@ -2836,8 +2857,7 @@ static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm,
cmd.remove.tid = cpu_to_le32(tid);
}
- ret = iwl_mvm_send_cmd_pdu_status(mvm, cmd_id, sizeof(cmd),
- &cmd, &baid);
+ ret = iwl_mvm_send_cmd_status(mvm, &hcmd, &baid);
if (ret)
return ret;
@@ -2957,13 +2977,14 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
baid_data->mvm = mvm;
baid_data->tid = tid;
baid_data->sta_mask = iwl_mvm_sta_fw_id_mask(mvm, sta, -1);
+ baid_data->buf_size = buf_size;
mvm_sta->tid_to_baid[tid] = baid;
if (timeout)
mod_timer(&baid_data->session_timer,
TU_TO_EXP_TIME(timeout * 2));
- iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size);
+ iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn);
/*
* protect the BA data with RCU to cover a case where our
* internal RX sync mechanism will timeout (not that it's
@@ -2996,16 +3017,6 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
kfree_rcu(baid_data, rcu_head);
IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
-
- /*
- * After we've deleted it, do another queue sync
- * so if an IWL_MVM_RXQ_NSSN_SYNC was concurrently
- * running it won't find a new session in the old
- * BAID. It can find the NULL pointer for the BAID,
- * but we must not have it find a different session.
- */
- iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_EMPTY,
- true, NULL, 0);
}
return 0;
@@ -3241,7 +3252,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* should be updated as well.
*/
if (buf_size < IWL_FRAME_LIMIT)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
if (ret)
@@ -3566,6 +3577,9 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
key_flags = cpu_to_le16(keyidx);
key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
+ if (key->flags & IEEE80211_KEY_FLAG_SPP_AMSDU)
+ key_flags |= cpu_to_le16(STA_KEY_FLG_AMSDU_SPP);
+
switch (key->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
@@ -4118,10 +4132,8 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
}
/* block the Tx queues until the FW updated the sleep Tx count */
- iwl_trans_block_txq_ptrs(mvm->trans, true);
-
ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
- CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
+ CMD_ASYNC | CMD_BLOCK_TXQS,
iwl_mvm_add_sta_cmd_size(mvm), &cmd);
if (ret)
IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
@@ -4159,7 +4171,8 @@ void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
int ret;
if (mvm->mld_api_is_used) {
- iwl_mvm_mld_sta_modify_disable_tx(mvm, mvmsta, disable);
+ if (!iwl_mvm_has_no_host_disable_tx(mvm))
+ iwl_mvm_mld_sta_modify_disable_tx(mvm, mvmsta, disable);
return;
}
@@ -4176,7 +4189,8 @@ void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
if (mvm->mld_api_is_used) {
- iwl_mvm_mld_sta_modify_disable_tx_ap(mvm, sta, disable);
+ if (!iwl_mvm_has_no_host_disable_tx(mvm))
+ iwl_mvm_mld_sta_modify_disable_tx_ap(mvm, sta, disable);
return;
}
@@ -4231,7 +4245,9 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
int i;
if (mvm->mld_api_is_used) {
- iwl_mvm_mld_modify_all_sta_disable_tx(mvm, mvmvif, disable);
+ if (!iwl_mvm_has_no_host_disable_tx(mvm))
+ iwl_mvm_mld_modify_all_sta_disable_tx(mvm, mvmvif,
+ disable);
return;
}
@@ -4303,12 +4319,12 @@ u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher,
- u8 *key, u32 key_len)
+ u8 *key, u32 key_len,
+ struct ieee80211_key_conf *keyconf)
{
int ret;
u16 queue;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct ieee80211_key_conf *keyconf;
unsigned int wdg_timeout =
iwl_mvm_get_wd_timeout(mvm, vif, false, false);
bool mld = iwl_mvm_has_mld_api(mvm->fw);
@@ -4333,12 +4349,6 @@ int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (ret)
goto out;
- keyconf = kzalloc(sizeof(*keyconf) + key_len, GFP_KERNEL);
- if (!keyconf) {
- ret = -ENOBUFS;
- goto out;
- }
-
keyconf->cipher = cipher;
memcpy(keyconf->key, key, key_len);
keyconf->keylen = key_len;
@@ -4359,10 +4369,9 @@ int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
0, NULL, 0, 0, true);
}
- kfree(keyconf);
- return 0;
out:
- iwl_mvm_dealloc_int_sta(mvm, sta);
+ if (ret)
+ iwl_mvm_dealloc_int_sta(mvm, sta);
return ret;
}
@@ -4383,3 +4392,80 @@ void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm,
if (ret)
IWL_ERR(mvm, "Failed to cancel the channel switch\n");
}
+
+static int iwl_mvm_fw_sta_id_to_fw_link_id(struct iwl_mvm_vif *mvmvif,
+ u8 fw_sta_id)
+{
+ struct ieee80211_link_sta *link_sta =
+ rcu_dereference(mvmvif->mvm->fw_id_to_link_sta[fw_sta_id]);
+ struct iwl_mvm_vif_link_info *link;
+
+ if (WARN_ON_ONCE(!link_sta))
+ return -EINVAL;
+
+ link = mvmvif->link[link_sta->link_id];
+
+ if (WARN_ON_ONCE(!link))
+ return -EINVAL;
+
+ return link->fw_link_id;
+}
+
+#define IWL_MVM_TPT_COUNT_WINDOW (IWL_MVM_TPT_COUNT_WINDOW_SEC * HZ)
+
+void iwl_mvm_count_mpdu(struct iwl_mvm_sta *mvm_sta, u8 fw_sta_id, u32 count,
+ bool tx, int queue)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvm_sta->vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ struct iwl_mvm_tpt_counter *queue_counter;
+ struct iwl_mvm_mpdu_counter *link_counter;
+ u32 total_mpdus = 0;
+ int fw_link_id;
+
+ /* Count only for a BSS sta, and only when EMLSR is possible */
+ if (!mvm_sta->mpdu_counters)
+ return;
+
+ /* Map sta id to link id */
+ fw_link_id = iwl_mvm_fw_sta_id_to_fw_link_id(mvmvif, fw_sta_id);
+ if (fw_link_id < 0)
+ return;
+
+ queue_counter = &mvm_sta->mpdu_counters[queue];
+ link_counter = &queue_counter->per_link[fw_link_id];
+
+ spin_lock_bh(&queue_counter->lock);
+
+ if (tx)
+ link_counter->tx += count;
+ else
+ link_counter->rx += count;
+
+ /*
+ * When not in EMLSR, the window and the decision to enter EMLSR are
+ * handled during counting, when in EMLSR - in the statistics flow
+ */
+ if (mvmvif->esr_active)
+ goto out;
+
+ if (time_is_before_jiffies(queue_counter->window_start +
+ IWL_MVM_TPT_COUNT_WINDOW)) {
+ memset(queue_counter->per_link, 0,
+ sizeof(queue_counter->per_link));
+ queue_counter->window_start = jiffies;
+
+ IWL_DEBUG_STATS(mvm, "MPDU counters are cleared\n");
+ }
+
+ for (int i = 0; i < IWL_MVM_FW_MAX_LINK_ID; i++)
+ total_mpdus += tx ? queue_counter->per_link[i].tx :
+ queue_counter->per_link[i].rx;
+
+ if (total_mpdus > IWL_MVM_ENTER_ESR_TPT_THRESH)
+ wiphy_work_queue(mvmvif->mvm->hw->wiphy,
+ &mvmvif->unblock_esr_tpt_wk);
+
+out:
+ spin_unlock_bh(&queue_counter->lock);
+}
diff --git a/mvm/sta.h b/mvm/sta.h
index 7364346a1209..0dc83d6afb3c 100644
--- a/mvm/sta.h
+++ b/mvm/sta.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2016 Intel Deutschland GmbH
*/
@@ -286,12 +286,10 @@ struct iwl_mvm_key_pn {
*
* @IWL_MVM_RXQ_EMPTY: empty sync notification
* @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA
- * @IWL_MVM_RXQ_NSSN_SYNC: notify all the RSS queues with the new NSSN
*/
enum iwl_mvm_rxq_notif_type {
IWL_MVM_RXQ_EMPTY,
IWL_MVM_RXQ_NOTIF_DEL_BA,
- IWL_MVM_RXQ_NSSN_SYNC,
};
/**
@@ -315,11 +313,6 @@ struct iwl_mvm_delba_data {
u32 baid;
} __packed;
-struct iwl_mvm_nssn_sync_data {
- u32 baid;
- u32 nssn;
-} __packed;
-
/**
* struct iwl_mvm_rxq_dup_data - per station per rx queue data
* @last_seq: last sequence per tid for duplicate packet detection
@@ -354,8 +347,27 @@ struct iwl_mvm_link_sta {
u8 avg_energy;
};
+struct iwl_mvm_mpdu_counter {
+ u32 tx;
+ u32 rx;
+};
+
+/**
+ * struct iwl_mvm_tpt_counter - per-queue MPDU counter
+ *
+ * @lock: Needed to protect the counters when modified from statistics.
+ * @per_link: per-link counters.
+ * @window_start: timestamp of the counting-window start
+ */
+struct iwl_mvm_tpt_counter {
+ spinlock_t lock;
+ struct iwl_mvm_mpdu_counter per_link[IWL_MVM_FW_MAX_LINK_ID];
+ unsigned long window_start;
+} ____cacheline_aligned_in_smp;
+
/**
* struct iwl_mvm_sta - representation of a station in the driver
+ * @vif: the interface the station belongs to
* @tfd_queue_msk: the tfd queues used by the station
* @mac_id_n_color: the MAC context this station is linked to
* @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for
@@ -380,6 +392,7 @@ struct iwl_mvm_link_sta {
* @amsdu_enabled: bitmap of TX AMSDU allowed TIDs.
* In case TLC offload is not active it is either 0xFFFF or 0.
* @max_amsdu_len: max AMSDU length
+ * @sleeping: indicates the station is sleeping (when not offloaded to FW)
* @agg_tids: bitmap of tids whose status is operational aggregated (IWL_AGG_ON)
* @sleeping: sta sleep transitions in power management
* @sleep_tx_count: the number of frames that we told the firmware to let out
@@ -389,7 +402,6 @@ struct iwl_mvm_link_sta {
* the BA window. To be used for UAPSD only.
* @ptk_pn: per-queue PTK PN data structures
* @dup_data: per queue duplicate packet detection data
- * @deferred_traffic_tid_map: indication bitmap of deferred traffic per-TID
* @tx_ant: the index of the antenna to use for data tx to this station. Only
* used during connection establishment (e.g. for the 4 way handshake
* exchange).
@@ -400,6 +412,7 @@ struct iwl_mvm_link_sta {
* @link: per link sta entries. For non-MLO only link[0] holds data. For MLO,
* link[0] points to deflink and link[link_id] is allocated when new link
* sta is added.
+ * @mpdu_counters: RX/TX MPDUs counters for each queue.
*
* When mac80211 creates a station it reserves some space (hw->sta_data_size)
* in the structure for use by driver. This structure is placed in that
@@ -439,6 +452,8 @@ struct iwl_mvm_sta {
struct iwl_mvm_link_sta deflink;
struct iwl_mvm_link_sta __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
+
+ struct iwl_mvm_tpt_counter *mpdu_counters;
};
u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data);
@@ -463,7 +478,7 @@ struct iwl_mvm_int_sta {
};
/**
- * Send the STA info to the FW.
+ * iwl_mvm_sta_send_to_fw - Send the STA info to the FW.
*
* @mvm: the iwl_mvm* to use
* @sta: the STA
@@ -520,6 +535,9 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_count_mpdu(struct iwl_mvm_sta *mvm_sta, u8 fw_sta_id, u32 count,
+ bool tx, int queue);
+
/* AMPDU */
int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u16 ssn, bool start, u16 buf_size, u16 timeout);
@@ -577,10 +595,12 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
bool disable);
void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_sta_ensure_queue(struct iwl_mvm *mvm, struct ieee80211_txq *txq);
void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk);
int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher,
- u8 *key, u32 key_len);
+ u8 *key, u32 key_len,
+ struct ieee80211_key_conf *key_conf_out);
void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
u32 id);
@@ -642,6 +662,11 @@ int iwl_mvm_mld_update_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
int iwl_mvm_mld_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
+void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvm_sta,
+ struct iwl_mvm_link_sta *mvm_sta_link,
+ unsigned int link_id,
+ bool is_in_fw);
int iwl_mvm_mld_rm_sta_id(struct iwl_mvm *mvm, u8 sta_id);
int iwl_mvm_mld_update_sta_links(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
diff --git a/mvm/tdls.c b/mvm/tdls.c
index dae6f2a1aad9..3d25ff5cd7e8 100644
--- a/mvm/tdls.c
+++ b/mvm/tdls.c
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2014 Intel Mobile Communications GmbH
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020, 2022 Intel Corporation
+ * Copyright (C) 2018-2020, 2022-2024 Intel Corporation
*/
#include <linux/etherdevice.h>
#include "mvm.h"
@@ -144,21 +144,21 @@ void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
}
void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+ struct ieee80211_vif *vif,
+ unsigned int link_id)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int;
/* Protect the session to hear the TDLS setup response on the channel */
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
iwl_mvm_schedule_session_protection(mvm, vif, duration,
- duration, true);
+ duration, true, link_id);
else
iwl_mvm_protect_session(mvm, vif, duration,
duration, 100, true);
- mutex_unlock(&mvm->mutex);
}
static const char *
@@ -459,21 +459,21 @@ void iwl_mvm_tdls_ch_switch_work(struct work_struct *work)
int ret;
mvm = container_of(work, struct iwl_mvm, tdls_cs.dwork.work);
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
/* called after an active channel switch has finished or timed-out */
iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
/* station might be gone, in that case do nothing */
if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA)
- goto out;
+ return;
sta = rcu_dereference_protected(
mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
lockdep_is_held(&mvm->mutex));
/* the station may not be here, but if it is, it must be a TDLS peer */
if (!sta || IS_ERR(sta) || WARN_ON(!sta->tdls))
- goto out;
+ return;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
vif = mvmsta->vif;
@@ -492,8 +492,6 @@ void iwl_mvm_tdls_ch_switch_work(struct work_struct *work)
/* retry after a DTIM if we failed sending now */
delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
schedule_delayed_work(&mvm->tdls_cs.dwork, msecs_to_jiffies(delay));
-out:
- mutex_unlock(&mvm->mutex);
}
int
@@ -508,7 +506,7 @@ iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
unsigned int delay;
int ret;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
IWL_DEBUG_TDLS(mvm, "TDLS channel switch with %pM ch %d width %d\n",
sta->addr, chandef->chan->center_freq, chandef->width);
@@ -518,8 +516,7 @@ iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
IWL_DEBUG_TDLS(mvm,
"Existing peer. Can't start switch with %pM\n",
sta->addr);
- ret = -EBUSY;
- goto out;
+ return -EBUSY;
}
ret = iwl_mvm_tdls_config_channel_switch(mvm, vif,
@@ -528,17 +525,15 @@ iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
oper_class, chandef, 0, 0, 0,
tmpl_skb, ch_sw_tm_ie);
if (ret)
- goto out;
+ return ret;
/*
* Mark the peer as "in tdls switch" for this vif. We only allow a
* single such peer per vif.
*/
mvm->tdls_cs.peer.skb = skb_copy(tmpl_skb, GFP_KERNEL);
- if (!mvm->tdls_cs.peer.skb) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!mvm->tdls_cs.peer.skb)
+ return -ENOMEM;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
mvm->tdls_cs.peer.sta_id = mvmsta->deflink.sta_id;
@@ -555,10 +550,7 @@ iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
vif->bss_conf.beacon_int);
mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
msecs_to_jiffies(delay));
-
-out:
- mutex_unlock(&mvm->mutex);
- return ret;
+ return 0;
}
void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
@@ -625,7 +617,7 @@ iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ?
"REQ" : "RESP";
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
IWL_DEBUG_TDLS(mvm,
"Received TDLS ch switch action %s from %pM status %d\n",
@@ -669,5 +661,4 @@ retry:
1024 / 1000;
mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
msecs_to_jiffies(delay));
- mutex_unlock(&mvm->mutex);
}
diff --git a/mvm/testmode.h b/mvm/testmode.h
new file mode 100644
index 000000000000..ff82af11de8d
--- /dev/null
+++ b/mvm/testmode.h
@@ -0,0 +1,92 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __IWL_MVM_TESTMODE_H__
+#define __IWL_MVM_TESTMODE_H__
+
+/**
+ * enum iwl_mvm_testmode_attrs - testmode attributes inside NL80211_ATTR_TESTDATA
+ * @IWL_MVM_TM_ATTR_UNSPEC: (invalid attribute)
+ * @IWL_MVM_TM_ATTR_CMD: sub command, see &enum iwl_mvm_testmode_commands (u32)
+ * @IWL_MVM_TM_ATTR_NOA_DURATION: requested NoA duration (u32)
+ * @IWL_MVM_TM_ATTR_BEACON_FILTER_STATE: beacon filter state (0 or 1, u32)
+ */
+enum iwl_mvm_testmode_attrs {
+ IWL_MVM_TM_ATTR_UNSPEC,
+ IWL_MVM_TM_ATTR_CMD,
+ IWL_MVM_TM_ATTR_NOA_DURATION,
+ IWL_MVM_TM_ATTR_BEACON_FILTER_STATE,
+
+ /* keep last */
+ NUM_IWL_MVM_TM_ATTRS,
+ IWL_MVM_TM_ATTR_MAX = NUM_IWL_MVM_TM_ATTRS - 1,
+};
+
+/**
+ * enum iwl_mvm_testmode_commands - MVM testmode commands
+ * @IWL_MVM_TM_CMD_SET_NOA: set NoA on GO vif for testing
+ * @IWL_MVM_TM_CMD_SET_BEACON_FILTER: turn beacon filtering off/on
+ */
+enum iwl_mvm_testmode_commands {
+ IWL_MVM_TM_CMD_SET_NOA,
+ IWL_MVM_TM_CMD_SET_BEACON_FILTER,
+};
+
+#endif /* __IWL_MVM_TESTMODE_H__ */
diff --git a/mvm/time-event.c b/mvm/time-event.c
index 5f0e7144a951..a8c42ce3b630 100644
--- a/mvm/time-event.c
+++ b/mvm/time-event.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2017 Intel Deutschland GmbH
*/
@@ -42,34 +42,32 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
te_data->uid = 0;
te_data->id = TE_MAX;
te_data->vif = NULL;
+ te_data->link_id = -1;
}
-void iwl_mvm_roc_done_wk(struct work_struct *wk)
+static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm)
{
- struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk);
+ struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm);
+ struct ieee80211_vif *vif = mvm->p2p_device_vif;
+
+ lockdep_assert_held(&mvm->mutex);
/*
- * Clear the ROC_RUNNING status bit.
+ * Clear the ROC_P2P_RUNNING status bit.
* This will cause the TX path to drop offchannel transmissions.
* That would also be done by mac80211, but it is racy, in particular
- * in the case that the time event actually completed in the firmware
- * (which is handled in iwl_mvm_te_handle_notif).
- */
- clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
-
- synchronize_net();
-
- /*
- * Flush the offchannel queue -- this is called when the time
+ * in the case that the time event actually completed in the firmware.
+ *
+ * Also flush the offchannel queue -- this is called when the time
* event finishes or is canceled, so that frames queued for it
* won't get stuck on the queue and be transmitted in the next
* time event.
*/
-
- mutex_lock(&mvm->mutex);
- if (test_and_clear_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status)) {
+ if (test_and_clear_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status)) {
struct iwl_mvm_vif *mvmvif;
+ synchronize_net();
+
/*
* NB: access to this pointer would be racy, but the flush bit
* can only be set when we had a P2P-Device VIF, and we have a
@@ -77,14 +75,33 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
* not really racy.
*/
- if (!WARN_ON(!mvm->p2p_device_vif)) {
- mvmvif = iwl_mvm_vif_from_mac80211(mvm->p2p_device_vif);
- iwl_mvm_flush_sta(mvm, &mvmvif->deflink.bcast_sta,
- true);
+ if (!WARN_ON(!vif)) {
+ mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ iwl_mvm_flush_sta(mvm, mvmvif->deflink.bcast_sta.sta_id,
+ mvmvif->deflink.bcast_sta.tfd_queue_msk);
+
+ if (mvm->mld_api_is_used) {
+ iwl_mvm_mld_rm_bcast_sta(mvm, vif,
+ &vif->bss_conf);
+
+ iwl_mvm_link_changed(mvm, vif, &vif->bss_conf,
+ LINK_CONTEXT_MODIFY_ACTIVE,
+ false);
+ } else {
+ iwl_mvm_rm_p2p_bcast_sta(mvm, vif);
+ iwl_mvm_binding_remove_vif(mvm, vif);
+ }
+
+ /* Do not remove the PHY context as removing and adding
+ * a PHY context has timing overheads. Leaving it
+ * configured in FW would be useful in case the next ROC
+ * is with the same channel.
+ */
}
}
/*
+ * P2P AUX ROC and HS2.0 ROC do not run simultaneously.
* Clear the ROC_AUX_RUNNING status bit.
* This will cause the TX path to drop offchannel transmissions.
* That would also be done by mac80211, but it is racy, in particular
@@ -92,12 +109,15 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
* (which is handled in iwl_mvm_te_handle_notif).
*/
if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) {
- /* do the same in case of hot spot 2.0 */
- iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true);
+ synchronize_net();
+
+ iwl_mvm_flush_sta(mvm, mvm->aux_sta.sta_id,
+ mvm->aux_sta.tfd_queue_msk);
if (mvm->mld_api_is_used) {
iwl_mvm_mld_rm_aux_sta(mvm);
- goto out_unlock;
+ mutex_unlock(&mvm->mutex);
+ return;
}
/* In newer version of this command an aux station is added only
@@ -107,10 +127,20 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
iwl_mvm_rm_aux_sta(mvm);
}
-out_unlock:
+ if (!IS_ERR_OR_NULL(bss_vif))
+ iwl_mvm_unblock_esr(mvm, bss_vif, IWL_MVM_ESR_BLOCKED_ROC);
mutex_unlock(&mvm->mutex);
}
+void iwl_mvm_roc_done_wk(struct work_struct *wk)
+{
+ struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk);
+
+ mutex_lock(&mvm->mutex);
+ /* Mutex is released inside */
+ iwl_mvm_cleanup_roc(mvm);
+}
+
static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
{
/*
@@ -141,12 +171,12 @@ static void iwl_mvm_csa_noa_start(struct iwl_mvm *mvm)
* So we just do nothing here and the switch
* will be performed on the last TBTT.
*/
- if (!ieee80211_beacon_cntdwn_is_complete(csa_vif)) {
+ if (!ieee80211_beacon_cntdwn_is_complete(csa_vif, 0)) {
IWL_WARN(mvm, "CSA NOA started too early\n");
goto out_unlock;
}
- ieee80211_csa_finish(csa_vif);
+ ieee80211_csa_finish(csa_vif, 0);
rcu_read_unlock();
@@ -192,6 +222,8 @@ static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm,
iwl_dbg_tlv_time_point(&mvm->fwrt,
IWL_FW_INI_TIME_POINT_ASSOC_FAILED,
NULL);
+
+ mvmvif->session_prot_connection_loss = true;
}
iwl_mvm_connection_loss(mvm, vif, errmsg);
@@ -223,7 +255,7 @@ iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm,
}
iwl_mvm_csa_client_absent(mvm, te_data->vif);
cancel_delayed_work(&mvmvif->csa_work);
- ieee80211_chswitch_done(te_data->vif, true);
+ ieee80211_chswitch_done(te_data->vif, true, 0);
break;
default:
/* should never happen */
@@ -272,18 +304,6 @@ static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm,
}
}
-static void iwl_mvm_p2p_roc_finished(struct iwl_mvm *mvm)
-{
- /*
- * If the IWL_MVM_STATUS_NEED_FLUSH_P2P is already set, then the
- * roc_done_wk is already scheduled or running, so don't schedule it
- * again to avoid a race where the roc_done_wk clears this bit after
- * it is set here, affecting the next run of the roc_done_wk.
- */
- if (!test_and_set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status))
- iwl_mvm_roc_finished(mvm);
-}
-
/*
* Handles a FW notification for an event that is known to the driver.
*
@@ -335,7 +355,7 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
switch (te_data->vif->type) {
case NL80211_IFTYPE_P2P_DEVICE:
ieee80211_remain_on_channel_expired(mvm->hw);
- iwl_mvm_p2p_roc_finished(mvm);
+ iwl_mvm_roc_finished(mvm);
break;
case NL80211_IFTYPE_STATION:
/*
@@ -368,7 +388,7 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
te_data->end_jiffies = TU_TO_EXP_TIME(te_data->duration);
if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
- set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
+ set_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status);
ieee80211_ready_on_channel(mvm->hw);
} else if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) {
iwl_mvm_te_handle_notify_csa(mvm, te_data, notif);
@@ -378,6 +398,59 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
}
}
+struct iwl_mvm_rx_roc_iterator_data {
+ u32 activity;
+ bool end_activity;
+ bool found;
+};
+
+static void iwl_mvm_rx_roc_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_rx_roc_iterator_data *data = _data;
+
+ if (mvmvif->roc_activity == data->activity) {
+ data->found = true;
+ if (data->end_activity)
+ mvmvif->roc_activity = ROC_NUM_ACTIVITIES;
+ }
+}
+
+void iwl_mvm_rx_roc_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_roc_notif *notif = (void *)pkt->data;
+ u32 activity = le32_to_cpu(notif->activity);
+ bool started = le32_to_cpu(notif->success) &&
+ le32_to_cpu(notif->started);
+ struct iwl_mvm_rx_roc_iterator_data data = {
+ .activity = activity,
+ .end_activity = !started,
+ };
+
+ /* Clear vif roc_activity if done (set to ROC_NUM_ACTIVITIES) */
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_rx_roc_iterator,
+ &data);
+ /*
+ * It is possible that the ROC was canceled
+ * but the notification was already fired.
+ */
+ if (!data.found)
+ return;
+
+ if (started) {
+ set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
+ ieee80211_ready_on_channel(mvm->hw);
+ } else {
+ iwl_mvm_roc_finished(mvm);
+ ieee80211_remain_on_channel_expired(mvm->hw);
+ }
+}
+
/*
* Handle A Aux ROC time event
*/
@@ -651,19 +724,45 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
}
}
+/* Determine whether mac or link id should be used, and validate the link id */
+static int iwl_mvm_get_session_prot_id(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ s8 link_id)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(MAC_CONF_GROUP,
+ SESSION_PROTECTION_CMD), 1);
+
+ if (ver < 2)
+ return mvmvif->id;
+
+ if (WARN(link_id < 0 || !mvmvif->link[link_id],
+ "Invalid link ID for session protection: %u\n", link_id))
+ return -EINVAL;
+
+ if (WARN(!mvmvif->link[link_id]->active,
+ "Session Protection on an inactive link: %u\n", link_id))
+ return -EINVAL;
+
+ return mvmvif->link[link_id]->fw_link_id;
+}
+
static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm,
- struct iwl_mvm_vif *mvmvif,
- u32 id)
+ struct ieee80211_vif *vif,
+ u32 id, s8 link_id)
{
+ int mac_link_id = iwl_mvm_get_session_prot_id(mvm, vif, link_id);
struct iwl_mvm_session_prot_cmd cmd = {
- .id_and_color =
- cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
- mvmvif->color)),
+ .id_and_color = cpu_to_le32(mac_link_id),
.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
.conf_id = cpu_to_le32(id),
};
int ret;
+ if (mac_link_id < 0)
+ return;
+
ret = iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
0, sizeof(cmd), &cmd);
@@ -672,15 +771,35 @@ static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm,
"Couldn't send the SESSION_PROTECTION_CMD: %d\n", ret);
}
+static void iwl_mvm_roc_rm_cmd(struct iwl_mvm *mvm, u32 activity)
+{
+ struct iwl_roc_req roc_cmd = {
+ .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
+ .activity = cpu_to_le32(activity),
+ };
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+ ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0,
+ sizeof(roc_cmd), &roc_cmd);
+ if (ret)
+ IWL_ERR(mvm, "Couldn't send the ROC_CMD: %d\n", ret);
+}
+
static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
struct iwl_mvm_time_event_data *te_data,
u32 *uid)
{
u32 id;
+ struct ieee80211_vif *vif = te_data->vif;
struct iwl_mvm_vif *mvmvif;
enum nl80211_iftype iftype;
+ s8 link_id;
+ bool p2p_aux = iwl_mvm_has_p2p_over_aux(mvm);
+ u8 roc_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0);
- if (!te_data->vif)
+ if (!vif)
return false;
mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
@@ -695,6 +814,7 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
/* Save time event uid before clearing its data */
*uid = te_data->uid;
id = te_data->id;
+ link_id = te_data->link_id;
/*
* The clear_data function handles time events that were already removed
@@ -702,19 +822,28 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
iwl_mvm_te_clear_data(mvm, te_data);
spin_unlock_bh(&mvm->time_event_lock);
- /* When session protection is used, the te_data->id field
- * is reused to save session protection's configuration.
- * For AUX ROC, HOT_SPOT_CMD is used and the te_data->id field is set
- * to HOT_SPOT_CMD.
- */
- if (fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD) &&
- id != HOT_SPOT_CMD) {
+ if ((p2p_aux && iftype == NL80211_IFTYPE_P2P_DEVICE) ||
+ (roc_ver >= 3 && mvmvif->roc_activity == ROC_ACTIVITY_HOTSPOT)) {
+ if (mvmvif->roc_activity < ROC_NUM_ACTIVITIES) {
+ iwl_mvm_roc_rm_cmd(mvm, mvmvif->roc_activity);
+ mvmvif->roc_activity = ROC_NUM_ACTIVITIES;
+ iwl_mvm_roc_finished(mvm);
+ }
+ return false;
+ } else if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD) &&
+ id != HOT_SPOT_CMD) {
+ /* When session protection is used, the te_data->id field
+ * is reused to save session protection's configuration.
+ * For AUX ROC, HOT_SPOT_CMD is used and the te_data->id
+ * field is set to HOT_SPOT_CMD.
+ */
if (mvmvif && id < SESSION_PROTECT_CONF_MAX_ID) {
/* Session protection is still ongoing. Cancel it */
- iwl_mvm_cancel_session_protection(mvm, mvmvif, id);
+ iwl_mvm_cancel_session_protection(mvm, vif, id,
+ link_id);
if (iftype == NL80211_IFTYPE_P2P_DEVICE) {
- iwl_mvm_p2p_roc_finished(mvm);
+ iwl_mvm_roc_finished(mvm);
}
}
return false;
@@ -829,18 +958,40 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_mvm_session_prot_notif *notif = (void *)pkt->data;
+ unsigned int ver =
+ iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP,
+ SESSION_PROTECTION_NOTIF, 2);
+ int id = le32_to_cpu(notif->mac_link_id);
struct ieee80211_vif *vif;
struct iwl_mvm_vif *mvmvif;
+ unsigned int notif_link_id;
rcu_read_lock();
- vif = iwl_mvm_rcu_dereference_vif_id(mvm, le32_to_cpu(notif->mac_id),
- true);
+
+ if (ver <= 2) {
+ vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true);
+ } else {
+ struct ieee80211_bss_conf *link_conf =
+ iwl_mvm_rcu_fw_link_id_to_link_conf(mvm, id, true);
+
+ if (!link_conf)
+ goto out_unlock;
+
+ notif_link_id = link_conf->link_id;
+ vif = link_conf->vif;
+ }
if (!vif)
goto out_unlock;
mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ if (WARN(ver > 2 && mvmvif->time_event_data.link_id >= 0 &&
+ mvmvif->time_event_data.link_id != notif_link_id,
+ "SESSION_PROTECTION_NOTIF was received for link %u, while the current time event is on link %u\n",
+ notif_link_id, mvmvif->time_event_data.link_id))
+ goto out_unlock;
+
/* The vif is not a P2P_DEVICE, maintain its time_event_data */
if (vif->type != NL80211_IFTYPE_P2P_DEVICE) {
struct iwl_mvm_time_event_data *te_data =
@@ -880,13 +1031,14 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
if (!le32_to_cpu(notif->status) || !le32_to_cpu(notif->start)) {
/* End TE, notify mac80211 */
mvmvif->time_event_data.id = SESSION_PROTECT_CONF_MAX_ID;
+ mvmvif->time_event_data.link_id = -1;
+ iwl_mvm_roc_finished(mvm);
ieee80211_remain_on_channel_expired(mvm->hw);
- iwl_mvm_p2p_roc_finished(mvm);
} else if (le32_to_cpu(notif->start)) {
if (WARN_ON(mvmvif->time_event_data.id !=
le32_to_cpu(notif->conf_id)))
goto out_unlock;
- set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
+ set_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status);
ieee80211_ready_on_channel(mvm->hw); /* Start TE */
}
@@ -894,6 +1046,103 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
rcu_read_unlock();
}
+#define AUX_ROC_MIN_DURATION MSEC_TO_TU(100)
+#define AUX_ROC_MIN_DELAY MSEC_TO_TU(200)
+#define AUX_ROC_MAX_DELAY MSEC_TO_TU(600)
+#define AUX_ROC_SAFETY_BUFFER MSEC_TO_TU(20)
+#define AUX_ROC_MIN_SAFETY_BUFFER MSEC_TO_TU(10)
+
+void iwl_mvm_roc_duration_and_delay(struct ieee80211_vif *vif,
+ u32 duration_ms,
+ u32 *duration_tu,
+ u32 *delay)
+{
+ struct ieee80211_bss_conf *link_conf;
+ unsigned int link_id;
+ u32 dtim_interval = 0;
+
+ *delay = AUX_ROC_MIN_DELAY;
+ *duration_tu = MSEC_TO_TU(duration_ms);
+
+ rcu_read_lock();
+ for_each_vif_active_link(vif, link_conf, link_id) {
+ dtim_interval =
+ max_t(u32, dtim_interval,
+ link_conf->dtim_period * link_conf->beacon_int);
+ }
+ rcu_read_unlock();
+
+ /*
+ * If we are associated we want the delay time to be at least one
+ * dtim interval so that the FW can wait until after the DTIM and
+ * then start the time event, this will potentially allow us to
+ * remain off-channel for the max duration.
+ * Since we want to use almost a whole dtim interval we would also
+ * like the delay to be for 2-3 dtim intervals, in case there are
+ * other time events with higher priority.
+ * dtim_interval should never be 0, it can be 1 if we don't know it
+ * (we haven't heard any beacon yet).
+ */
+ if (vif->cfg.assoc && !WARN_ON(!dtim_interval)) {
+ *delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY);
+ /* We cannot remain off-channel longer than the DTIM interval */
+ if (dtim_interval <= *duration_tu) {
+ *duration_tu = dtim_interval - AUX_ROC_SAFETY_BUFFER;
+ if (*duration_tu <= AUX_ROC_MIN_DURATION)
+ *duration_tu = dtim_interval -
+ AUX_ROC_MIN_SAFETY_BUFFER;
+ }
+ }
+}
+
+int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_channel *channel,
+ struct ieee80211_vif *vif,
+ int duration, enum iwl_roc_activity activity)
+{
+ int res;
+ u32 duration_tu, delay;
+ struct iwl_roc_req roc_req = {
+ .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
+ .activity = cpu_to_le32(activity),
+ .sta_id = cpu_to_le32(mvm->aux_sta.sta_id),
+ };
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (WARN_ON(mvmvif->roc_activity != ROC_NUM_ACTIVITIES))
+ return -EBUSY;
+
+ /* Set the channel info data */
+ iwl_mvm_set_chan_info(mvm, &roc_req.channel_info,
+ channel->hw_value,
+ iwl_mvm_phy_band_from_nl80211(channel->band),
+ IWL_PHY_CHANNEL_MODE20, 0);
+
+ iwl_mvm_roc_duration_and_delay(vif, duration, &duration_tu,
+ &delay);
+ roc_req.duration = cpu_to_le32(duration_tu);
+ roc_req.max_delay = cpu_to_le32(delay);
+
+ IWL_DEBUG_TE(mvm,
+ "\t(requested = %ums, max_delay = %ums)\n",
+ duration, delay);
+ IWL_DEBUG_TE(mvm,
+ "Requesting to remain on channel %u for %utu. activity %u\n",
+ channel->hw_value, duration_tu, activity);
+
+ /* Set the node address */
+ memcpy(roc_req.node_addr, vif->addr, ETH_ALEN);
+
+ res = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, ROC_CMD),
+ 0, sizeof(roc_req), &roc_req);
+ if (!res)
+ mvmvif->roc_activity = activity;
+
+ return res;
+}
+
static int
iwl_mvm_start_p2p_roc_session_protection(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
@@ -903,8 +1152,7 @@ iwl_mvm_start_p2p_roc_session_protection(struct iwl_mvm *mvm,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_session_prot_cmd cmd = {
.id_and_color =
- cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
- mvmvif->color)),
+ cpu_to_le32(iwl_mvm_get_session_prot_id(mvm, vif, 0)),
.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
.duration_tu = cpu_to_le32(MSEC_TO_TU(duration)),
};
@@ -914,6 +1162,9 @@ iwl_mvm_start_p2p_roc_session_protection(struct iwl_mvm *mvm,
/* The time_event_data.id field is reused to save session
* protection's configuration.
*/
+
+ mvmvif->time_event_data.link_id = 0;
+
switch (type) {
case IEEE80211_ROC_TYPE_NORMAL:
mvmvif->time_event_data.id =
@@ -1032,41 +1283,69 @@ void iwl_mvm_cleanup_roc_te(struct iwl_mvm *mvm)
void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
- struct iwl_mvm_vif *mvmvif;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_time_event_data *te_data;
+ bool p2p_aux = iwl_mvm_has_p2p_over_aux(mvm);
+ u8 roc_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0);
+ int iftype = vif->type;
- if (fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
- mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ mutex_lock(&mvm->mutex);
- if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
- iwl_mvm_cancel_session_protection(mvm, mvmvif,
- mvmvif->time_event_data.id);
- iwl_mvm_p2p_roc_finished(mvm);
+ if (p2p_aux || (roc_ver >= 3 && iftype != NL80211_IFTYPE_P2P_DEVICE)) {
+ if (mvmvif->roc_activity < ROC_NUM_ACTIVITIES) {
+ iwl_mvm_roc_rm_cmd(mvm, mvmvif->roc_activity);
+ mvmvif->roc_activity = ROC_NUM_ACTIVITIES;
+ }
+ goto cleanup_roc;
+ } else if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
+ te_data = &mvmvif->time_event_data;
+
+ if (iftype == NL80211_IFTYPE_P2P_DEVICE) {
+ if (te_data->id >= SESSION_PROTECT_CONF_MAX_ID) {
+ IWL_DEBUG_TE(mvm,
+ "No remain on channel event\n");
+ mutex_unlock(&mvm->mutex);
+ return;
+ }
+ iwl_mvm_cancel_session_protection(mvm, vif,
+ te_data->id,
+ te_data->link_id);
} else {
iwl_mvm_remove_aux_roc_te(mvm, mvmvif,
&mvmvif->hs_time_event_data);
- iwl_mvm_roc_finished(mvm);
}
-
- return;
+ goto cleanup_roc;
}
te_data = iwl_mvm_get_roc_te(mvm);
if (!te_data) {
IWL_WARN(mvm, "No remain on channel event\n");
+ mutex_unlock(&mvm->mutex);
return;
}
mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
-
- if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ iftype = te_data->vif->type;
+ if (iftype == NL80211_IFTYPE_P2P_DEVICE)
iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
- iwl_mvm_p2p_roc_finished(mvm);
- } else {
+ else
iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data);
- iwl_mvm_roc_finished(mvm);
- }
+
+cleanup_roc:
+ /*
+ * In case we get here before the ROC event started,
+ * (so the status bit isn't set) set it here so iwl_mvm_cleanup_roc will
+ * cleanup things properly
+ */
+ if (p2p_aux || iftype != NL80211_IFTYPE_P2P_DEVICE)
+ set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
+ else
+ set_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status);
+
+ /* Mutex is released inside this function */
+ iwl_mvm_cleanup_roc(mvm);
}
void iwl_mvm_remove_csa_period(struct iwl_mvm *mvm,
@@ -1164,25 +1443,28 @@ static bool iwl_mvm_session_prot_notif(struct iwl_notif_wait_data *notif_wait,
void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
u32 duration, u32 min_duration,
- bool wait_for_notif)
+ bool wait_for_notif,
+ unsigned int link_id)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
const u16 notif[] = { WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF) };
struct iwl_notification_wait wait_notif;
+ int mac_link_id = iwl_mvm_get_session_prot_id(mvm, vif, (s8)link_id);
struct iwl_mvm_session_prot_cmd cmd = {
- .id_and_color =
- cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
- mvmvif->color)),
+ .id_and_color = cpu_to_le32(mac_link_id),
.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
.conf_id = cpu_to_le32(SESSION_PROTECT_CONF_ASSOC),
.duration_tu = cpu_to_le32(MSEC_TO_TU(duration)),
};
+ if (mac_link_id < 0)
+ return;
+
lockdep_assert_held(&mvm->mutex);
spin_lock_bh(&mvm->time_event_lock);
- if (te_data->running &&
+ if (te_data->running && te_data->link_id == link_id &&
time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) {
IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n",
jiffies_to_msecs(te_data->end_jiffies - jiffies));
@@ -1199,6 +1481,7 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
te_data->id = le32_to_cpu(cmd.conf_id);
te_data->duration = le32_to_cpu(cmd.duration_tu);
te_data->vif = vif;
+ te_data->link_id = link_id;
spin_unlock_bh(&mvm->time_event_lock);
IWL_DEBUG_TE(mvm, "Add new session protection, duration %d TU\n",
@@ -1208,11 +1491,7 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
if (iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
0, sizeof(cmd), &cmd)) {
- IWL_ERR(mvm,
- "Couldn't send the SESSION_PROTECTION_CMD\n");
- spin_lock_bh(&mvm->time_event_lock);
- iwl_mvm_te_clear_data(mvm, te_data);
- spin_unlock_bh(&mvm->time_event_lock);
+ goto send_cmd_err;
}
return;
@@ -1225,12 +1504,19 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
if (iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD),
0, sizeof(cmd), &cmd)) {
- IWL_ERR(mvm,
- "Couldn't send the SESSION_PROTECTION_CMD\n");
iwl_remove_notification(&mvm->notif_wait, &wait_notif);
+ goto send_cmd_err;
} else if (iwl_wait_notification(&mvm->notif_wait, &wait_notif,
TU_TO_JIFFIES(100))) {
IWL_ERR(mvm,
"Failed to protect session until session protection\n");
}
+ return;
+
+send_cmd_err:
+ IWL_ERR(mvm,
+ "Couldn't send the SESSION_PROTECTION_CMD\n");
+ spin_lock_bh(&mvm->time_event_lock);
+ iwl_mvm_te_clear_data(mvm, te_data);
+ spin_unlock_bh(&mvm->time_event_lock);
}
diff --git a/mvm/time-event.h b/mvm/time-event.h
index 989a5319fb21..49256ba4cf58 100644
--- a/mvm/time-event.h
+++ b/mvm/time-event.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2019-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2019-2020, 2023 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
*/
#ifndef __time_event_h__
@@ -101,6 +101,14 @@ void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
/**
+ * iwl_mvm_rx_roc_notif - handles %DISCOVERY_ROC_NTF.
+ * @mvm: the mvm component
+ * @rxb: RX buffer
+ */
+void iwl_mvm_rx_roc_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+
+/**
* iwl_mvm_start_p2p_roc - start remain on channel for p2p device functionality
* @mvm: the mvm component
* @vif: the virtual interface for which the roc is requested. It is assumed
@@ -134,7 +142,7 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
/**
* iwl_mvm_remove_time_event - general function to clean up of time event
* @mvm: the mvm component
- * @vif: the vif to which the time event belongs
+ * @mvmvif: the vif to which the time event belongs
* @te_data: the time event data that corresponds to that time event
*
* This function can be used to cancel a time event regardless its type.
@@ -195,16 +203,21 @@ iwl_mvm_te_scheduled(struct iwl_mvm_time_event_data *te_data)
* iwl_mvm_schedule_session_protection - schedule a session protection
* @mvm: the mvm component
* @vif: the virtual interface for which the protection issued
- * @duration: the duration of the protection
+ * @duration: the requested duration of the protection
+ * @min_duration: the minimum duration of the protection
* @wait_for_notif: if true, will block until the start of the protection
+ * @link_id: The link to schedule a session protection for
*/
void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
u32 duration, u32 min_duration,
- bool wait_for_notif);
+ bool wait_for_notif,
+ unsigned int link_id);
/**
* iwl_mvm_rx_session_protect_notif - handles %SESSION_PROTECTION_NOTIF
+ * @mvm: the mvm component
+ * @rxb: the RX buffer containing the notification
*/
void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
diff --git a/mvm/tt.c b/mvm/tt.c
index 157e96fa23c1..d92470960b38 100644
--- a/mvm/tt.c
+++ b/mvm/tt.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2019-2022 Intel Corporation
+ * Copyright (C) 2012-2014, 2019-2022, 2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2016 Intel Deutschland GmbH
*/
@@ -299,7 +299,7 @@ static void check_exit_ctkill(struct work_struct *work)
ret = iwl_mvm_get_temp(mvm, &temp);
- __iwl_mvm_mac_stop(mvm);
+ __iwl_mvm_mac_stop(mvm, false);
if (ret)
goto reschedule;
@@ -555,6 +555,22 @@ static int compare_temps(const void *a, const void *b)
return ((s16)le16_to_cpu(*(__le16 *)a) -
(s16)le16_to_cpu(*(__le16 *)b));
}
+
+struct iwl_trip_walk_data {
+ __le16 *thresholds;
+ int count;
+};
+
+static int iwl_trip_temp_cb(struct thermal_trip *trip, void *arg)
+{
+ struct iwl_trip_walk_data *twd = arg;
+
+ if (trip->temperature == THERMAL_TEMP_INVALID)
+ return 0;
+
+ twd->thresholds[twd->count++] = cpu_to_le16((s16)(trip->temperature / 1000));
+ return 0;
+}
#endif
int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm)
@@ -562,42 +578,25 @@ int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm)
struct temp_report_ths_cmd cmd = {0};
int ret;
#ifdef CONFIG_THERMAL
- int i, j, idx = 0;
+ struct iwl_trip_walk_data twd = { .thresholds = cmd.thresholds, .count = 0 };
lockdep_assert_held(&mvm->mutex);
if (!mvm->tz_device.tzone)
goto send;
- /* The driver holds array of temperature trips that are unsorted
- * and uncompressed, the FW should get it compressed and sorted
+ /*
+ * The thermal core holds an array of temperature trips that are
+ * unsorted and uncompressed, the FW should get it compressed and
+ * sorted.
*/
/* compress trips to cmd array, remove uninitialized values*/
- for (i = 0; i < IWL_MAX_DTS_TRIPS; i++) {
- if (mvm->tz_device.trips[i].temperature != INT_MIN) {
- cmd.thresholds[idx++] =
- cpu_to_le16((s16)(mvm->tz_device.trips[i].temperature / 1000));
- }
- }
- cmd.num_temps = cpu_to_le32(idx);
+ for_each_thermal_trip(mvm->tz_device.tzone, iwl_trip_temp_cb, &twd);
- if (!idx)
- goto send;
-
- /*sort cmd array*/
- sort(cmd.thresholds, idx, sizeof(s16), compare_temps, NULL);
-
- /* we should save the indexes of trips because we sort
- * and compress the orginal array
- */
- for (i = 0; i < idx; i++) {
- for (j = 0; j < IWL_MAX_DTS_TRIPS; j++) {
- if ((int)(le16_to_cpu(cmd.thresholds[i]) * 1000) ==
- mvm->tz_device.trips[j].temperature)
- mvm->tz_device.fw_trips_index[i] = j;
- }
- }
+ cmd.num_temps = cpu_to_le32(twd.count);
+ if (twd.count)
+ sort(cmd.thresholds, twd.count, sizeof(s16), compare_temps, NULL);
send:
#endif
@@ -619,55 +618,41 @@ static int iwl_mvm_tzone_get_temp(struct thermal_zone_device *device,
int ret;
int temp;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
if (!iwl_mvm_firmware_running(mvm) ||
mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) {
- ret = -ENODATA;
- goto out;
+ /*
+ * Tell the core that there is no valid temperature value to
+ * return, but it need not worry about this.
+ */
+ *temperature = THERMAL_TEMP_INVALID;
+ return 0;
}
ret = iwl_mvm_get_temp(mvm, &temp);
if (ret)
- goto out;
+ return ret;
*temperature = temp * 1000;
-
-out:
- mutex_unlock(&mvm->mutex);
- return ret;
+ return 0;
}
static int iwl_mvm_tzone_set_trip_temp(struct thermal_zone_device *device,
- int trip, int temp)
+ const struct thermal_trip *trip, int temp)
{
struct iwl_mvm *mvm = thermal_zone_device_priv(device);
- struct iwl_mvm_thermal_device *tzone;
- int ret;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
if (!iwl_mvm_firmware_running(mvm) ||
- mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) {
- ret = -EIO;
- goto out;
- }
+ mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)
+ return -EIO;
- if ((temp / 1000) > S16_MAX) {
- ret = -EINVAL;
- goto out;
- }
-
- tzone = &mvm->tz_device;
- if (!tzone) {
- ret = -EIO;
- goto out;
- }
+ if ((temp / 1000) > S16_MAX)
+ return -EINVAL;
- ret = iwl_mvm_send_temp_report_ths_cmd(mvm);
-out:
- mutex_unlock(&mvm->mutex);
- return ret;
+ return iwl_mvm_send_temp_report_ths_cmd(mvm);
}
static struct thermal_zone_device_ops tzone_ops = {
@@ -675,9 +660,6 @@ static struct thermal_zone_device_ops tzone_ops = {
.set_trip_temp = iwl_mvm_tzone_set_trip_temp,
};
-/* make all trips writable */
-#define IWL_WRITABLE_TRIPS_MSK (BIT(IWL_MAX_DTS_TRIPS) - 1)
-
static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm)
{
int i, ret;
@@ -693,10 +675,18 @@ static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm)
BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH);
sprintf(name, "iwlwifi_%u", atomic_inc_return(&counter) & 0xFF);
+ /*
+ * 0 is a valid temperature,
+ * so initialize the array with S16_MIN which invalid temperature
+ */
+ for (i = 0 ; i < IWL_MAX_DTS_TRIPS; i++) {
+ mvm->tz_device.trips[i].temperature = THERMAL_TEMP_INVALID;
+ mvm->tz_device.trips[i].type = THERMAL_TRIP_PASSIVE;
+ mvm->tz_device.trips[i].flags = THERMAL_TRIP_FLAG_RW_TEMP;
+ }
mvm->tz_device.tzone = thermal_zone_device_register_with_trips(name,
mvm->tz_device.trips,
IWL_MAX_DTS_TRIPS,
- IWL_WRITABLE_TRIPS_MSK,
mvm, &tzone_ops,
NULL, 0, 0);
if (IS_ERR(mvm->tz_device.tzone)) {
@@ -711,15 +701,6 @@ static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm)
if (ret) {
IWL_DEBUG_TEMP(mvm, "Failed to enable thermal zone\n");
thermal_zone_device_unregister(mvm->tz_device.tzone);
- return;
- }
-
- /* 0 is a valid temperature,
- * so initialize the array with S16_MIN which invalid temperature
- */
- for (i = 0 ; i < IWL_MAX_DTS_TRIPS; i++) {
- mvm->tz_device.trips[i].temperature = INT_MIN;
- mvm->tz_device.trips[i].type = THERMAL_TRIP_PASSIVE;
}
}
@@ -745,27 +726,18 @@ static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev,
unsigned long new_state)
{
struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata);
- int ret;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
if (!iwl_mvm_firmware_running(mvm) ||
- mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) {
- ret = -EIO;
- goto unlock;
- }
-
- if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) {
- ret = -EINVAL;
- goto unlock;
- }
+ mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)
+ return -EIO;
- ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START,
- new_state);
+ if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets))
+ return -EINVAL;
-unlock:
- mutex_unlock(&mvm->mutex);
- return ret;
+ return iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START,
+ new_state);
}
static const struct thermal_cooling_device_ops tcooling_ops = {
diff --git a/mvm/tx.c b/mvm/tx.c
index 36d70d589aed..7ff5ea5e7aca 100644
--- a/mvm/tx.c
+++ b/mvm/tx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -12,7 +12,7 @@
#include <net/ipv6.h>
#include "iwl-trans.h"
-#include "iwl-eeprom-parse.h"
+#include "iwl-nvm-utils.h"
#include "mvm.h"
#include "sta.h"
#include "time-sync.h"
@@ -262,8 +262,42 @@ static u32 iwl_mvm_get_tx_ant(struct iwl_mvm *mvm,
return BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
}
+static u32 iwl_mvm_convert_rate_idx(struct iwl_mvm *mvm,
+ struct ieee80211_tx_info *info,
+ int rate_idx)
+{
+ u32 rate_flags = 0;
+ u8 rate_plcp;
+ bool is_cck;
+
+ /* if the rate isn't a well known legacy rate, take the lowest one */
+ if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY)
+ rate_idx = iwl_mvm_mac_ctxt_get_lowest_rate(mvm,
+ info,
+ info->control.vif);
+
+ /* Get PLCP rate for tx_cmd->rate_n_flags */
+ rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(mvm->fw, rate_idx);
+ is_cck = (rate_idx >= IWL_FIRST_CCK_RATE) &&
+ (rate_idx <= IWL_LAST_CCK_RATE);
+
+ /* Set CCK or OFDM flag */
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, TX_CMD, 0) > 8) {
+ if (!is_cck)
+ rate_flags |= RATE_MCS_LEGACY_OFDM_MSK;
+ else
+ rate_flags |= RATE_MCS_CCK_MSK;
+ } else if (is_cck) {
+ rate_flags |= RATE_MCS_CCK_MSK_V1;
+ }
+
+ return (u32)rate_plcp | rate_flags;
+}
+
static u32 iwl_mvm_get_inject_tx_rate(struct iwl_mvm *mvm,
- struct ieee80211_tx_info *info)
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta,
+ __le16 fc)
{
struct ieee80211_tx_rate *rate = &info->control.rates[0];
u32 result;
@@ -288,6 +322,9 @@ static u32 iwl_mvm_get_inject_tx_rate(struct iwl_mvm *mvm,
result |= u32_encode_bits(2, RATE_MCS_CHAN_WIDTH_MSK_V1);
else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
result |= u32_encode_bits(3, RATE_MCS_CHAN_WIDTH_MSK_V1);
+
+ if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TX_CMD, 0) > 6)
+ result = iwl_new_rate_from_v1(result);
} else if (rate->flags & IEEE80211_TX_RC_MCS) {
result = RATE_MCS_HT_MSK_V1;
result |= u32_encode_bits(rate->idx,
@@ -301,12 +338,21 @@ static u32 iwl_mvm_get_inject_tx_rate(struct iwl_mvm *mvm,
result |= RATE_MCS_LDPC_MSK_V1;
if (u32_get_bits(info->flags, IEEE80211_TX_CTL_STBC))
result |= RATE_MCS_STBC_MSK;
+
+ if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TX_CMD, 0) > 6)
+ result = iwl_new_rate_from_v1(result);
} else {
- return 0;
+ int rate_idx = info->control.rates[0].idx;
+
+ result = iwl_mvm_convert_rate_idx(mvm, info, rate_idx);
}
- if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TX_CMD, 0) > 6)
- return iwl_new_rate_from_v1(result);
+ if (info->control.antennas)
+ result |= u32_encode_bits(info->control.antennas,
+ RATE_MCS_ANT_AB_MSK);
+ else
+ result |= iwl_mvm_get_tx_ant(mvm, info, sta, fc);
+
return result;
}
@@ -315,17 +361,8 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
struct ieee80211_sta *sta, __le16 fc)
{
int rate_idx = -1;
- u8 rate_plcp;
- u32 rate_flags = 0;
- bool is_cck;
- if (unlikely(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)) {
- u32 result = iwl_mvm_get_inject_tx_rate(mvm, info);
-
- if (result)
- return result;
- rate_idx = info->control.rates[0].idx;
- } else if (!ieee80211_hw_check(mvm->hw, HAS_RATE_CONTROL)) {
+ if (!ieee80211_hw_check(mvm->hw, HAS_RATE_CONTROL)) {
/* info->control is only relevant for non HW rate control */
/* HT rate doesn't make sense for a non data frame */
@@ -350,33 +387,16 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
}
- /* if the rate isn't a well known legacy rate, take the lowest one */
- if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY)
- rate_idx = iwl_mvm_mac_ctxt_get_lowest_rate(mvm,
- info,
- info->control.vif);
-
- /* Get PLCP rate for tx_cmd->rate_n_flags */
- rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(mvm->fw, rate_idx);
- is_cck = (rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE);
-
- /* Set CCK or OFDM flag */
- if (iwl_fw_lookup_cmd_ver(mvm->fw, TX_CMD, 0) > 8) {
- if (!is_cck)
- rate_flags |= RATE_MCS_LEGACY_OFDM_MSK;
- else
- rate_flags |= RATE_MCS_CCK_MSK;
- } else if (is_cck) {
- rate_flags |= RATE_MCS_CCK_MSK_V1;
- }
-
- return (u32)rate_plcp | rate_flags;
+ return iwl_mvm_convert_rate_idx(mvm, info, rate_idx);
}
static u32 iwl_mvm_get_tx_rate_n_flags(struct iwl_mvm *mvm,
struct ieee80211_tx_info *info,
struct ieee80211_sta *sta, __le16 fc)
{
+ if (unlikely(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT))
+ return iwl_mvm_get_inject_tx_rate(mvm, info, sta, fc);
+
return iwl_mvm_get_tx_rate(mvm, info, sta, fc) |
iwl_mvm_get_tx_ant(mvm, info, sta, fc);
}
@@ -500,13 +520,49 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
}
}
+static bool iwl_mvm_use_host_rate(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvmsta,
+ struct ieee80211_hdr *hdr,
+ struct ieee80211_tx_info *info)
+{
+ if (unlikely(!mvmsta))
+ return true;
+
+ if (unlikely(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT))
+ return true;
+
+ if (likely(ieee80211_is_data(hdr->frame_control) &&
+ mvmsta->sta_state >= IEEE80211_STA_AUTHORIZED))
+ return false;
+
+ /*
+ * Not a data frame, use host rate if on an old device that
+ * can't possibly be doing MLO (firmware may be selecting a
+ * bad rate), if we might be doing MLO we need to let FW pick
+ * (since we don't necesarily know the link), but FW rate
+ * selection was fixed.
+ */
+ return mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ;
+}
+
+static void iwl_mvm_copy_hdr(void *cmd, const void *hdr, int hdrlen,
+ const u8 *addr3_override)
+{
+ struct ieee80211_hdr *out_hdr = cmd;
+
+ memcpy(cmd, hdr, hdrlen);
+ if (addr3_override)
+ memcpy(out_hdr->addr3, addr3_override, ETH_ALEN);
+}
+
/*
* Allocates and sets the Tx cmd the driver data pointers in the skb
*/
static struct iwl_device_tx_cmd *
iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_tx_info *info, int hdrlen,
- struct ieee80211_sta *sta, u8 sta_id)
+ struct ieee80211_sta *sta, u8 sta_id,
+ const u8 *addr3_override)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct iwl_device_tx_cmd *dev_cmd;
@@ -536,16 +592,20 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
flags |= IWL_TX_FLAGS_ENCRYPT_DIS;
/*
- * For data packets rate info comes from the fw. Only
- * set rate/antenna during connection establishment or in case
- * no station is given.
+ * For data and mgmt packets rate info comes from the fw (for
+ * new devices, older FW is somewhat broken for this). Only
+ * set rate/antenna for injected frames with fixed rate, or
+ * when no sta is given, or with older firmware.
*/
- if (!sta || !ieee80211_is_data(hdr->frame_control) ||
- mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) {
+ if (unlikely(iwl_mvm_use_host_rate(mvm, mvmsta, hdr, info))) {
flags |= IWL_TX_FLAGS_CMD_RATE;
rate_n_flags =
iwl_mvm_get_tx_rate_n_flags(mvm, info, sta,
hdr->frame_control);
+ } else if (!ieee80211_is_data(hdr->frame_control) ||
+ mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) {
+ /* These are important frames */
+ flags |= IWL_TX_FLAGS_HIGH_PRI;
}
if (mvm->trans->trans_cfg->device_family >=
@@ -560,7 +620,7 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
cmd->len = cpu_to_le16((u16)skb->len);
/* Copy MAC header from skb into command buffer */
- memcpy(cmd->hdr, hdr, hdrlen);
+ iwl_mvm_copy_hdr(cmd->hdr, hdr, hdrlen, addr3_override);
cmd->flags = cpu_to_le16(flags);
cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
@@ -575,7 +635,7 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
cmd->len = cpu_to_le16((u16)skb->len);
/* Copy MAC header from skb into command buffer */
- memcpy(cmd->hdr, hdr, hdrlen);
+ iwl_mvm_copy_hdr(cmd->hdr, hdr, hdrlen, addr3_override);
cmd->flags = cpu_to_le32(flags);
cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
@@ -593,7 +653,7 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control);
/* Copy MAC header from skb into command buffer */
- memcpy(tx_cmd->hdr, hdr, hdrlen);
+ iwl_mvm_copy_hdr(tx_cmd->hdr, hdr, hdrlen, addr3_override);
out:
return dev_cmd;
@@ -742,10 +802,30 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
if (info.control.vif) {
struct iwl_mvm_vif *mvmvif =
iwl_mvm_vif_from_mac80211(info.control.vif);
+ bool p2p_aux = iwl_mvm_has_p2p_over_aux(mvm);
- if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
- info.control.vif->type == NL80211_IFTYPE_AP ||
- info.control.vif->type == NL80211_IFTYPE_ADHOC) {
+ if ((info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE &&
+ p2p_aux) ||
+ (info.control.vif->type == NL80211_IFTYPE_STATION &&
+ offchannel)) {
+ /*
+ * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets
+ * that can be used in 2 different types of vifs, P2P
+ * Device and STATION.
+ * P2P Device uses the offchannel queue.
+ * STATION (HS2.0) uses the auxiliary context of the FW,
+ * and hence needs to be sent on the aux queue.
+ * If P2P_DEV_OVER_AUX is supported (p2p_aux = true)
+ * also P2P Device uses the aux queue.
+ */
+ sta_id = mvm->aux_sta.sta_id;
+ queue = mvm->aux_queue;
+ if (WARN_ON(queue == IWL_MVM_INVALID_QUEUE))
+ return -1;
+ } else if (info.control.vif->type ==
+ NL80211_IFTYPE_P2P_DEVICE ||
+ info.control.vif->type == NL80211_IFTYPE_AP ||
+ info.control.vif->type == NL80211_IFTYPE_ADHOC) {
u32 link_id = u32_get_bits(info.control.flags,
IEEE80211_TX_CTRL_MLO_LINK);
struct iwl_mvm_vif_link_info *link;
@@ -771,18 +851,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
} else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) {
queue = mvm->snif_queue;
sta_id = mvm->snif_sta.sta_id;
- } else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
- offchannel) {
- /*
- * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets
- * that can be used in 2 different types of vifs, P2P &
- * STATION.
- * P2P uses the offchannel queue.
- * STATION (HS2.0) uses the auxiliary context of the FW,
- * and hence needs to be sent on the aux queue.
- */
- sta_id = mvm->aux_sta.sta_id;
- queue = mvm->aux_queue;
}
}
@@ -796,7 +864,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, queue);
- dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id);
+ dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id,
+ NULL);
if (!dev_cmd)
return -1;
@@ -845,10 +914,10 @@ unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm,
if (WARN_ON(!link_conf))
band = NL80211_BAND_2GHZ;
else
- band = link_conf->chandef.chan->band;
+ band = link_conf->chanreq.oper.chan->band;
rcu_read_unlock();
} else {
- band = mvmsta->vif->bss_conf.chandef.chan->band;
+ band = mvmsta->vif->bss_conf.chanreq.oper.chan->band;
}
lmac = iwl_mvm_get_lmac_id(mvm, band);
@@ -890,9 +959,15 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
next = skb_gso_segment(skb, netdev_flags);
skb_shinfo(skb)->gso_size = mss;
skb_shinfo(skb)->gso_type = ipv4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
- if (WARN_ON_ONCE(IS_ERR(next)))
- return -EINVAL;
- else if (next)
+
+ if (IS_ERR(next) && PTR_ERR(next) == -ENOMEM)
+ return -ENOMEM;
+
+ if (WARN_ONCE(IS_ERR(next),
+ "skb_gso_segment error: %d\n", (int)PTR_ERR(next)))
+ return PTR_ERR(next);
+
+ if (next)
consume_skb(skb);
skb_list_walk_safe(next, tmp, next) {
@@ -948,8 +1023,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
netdev_features_t netdev_flags = NETIF_F_CSUM_MASK | NETIF_F_SG;
u8 tid;
- snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) +
- tcp_hdrlen(skb);
+ snap_ip_tcp = 8 + skb_network_header_len(skb) + tcp_hdrlen(skb);
if (!mvmsta->max_amsdu_len ||
!ieee80211_is_data_qos(hdr->frame_control) ||
@@ -1116,7 +1190,8 @@ static int iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm,
*/
static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_tx_info *info,
- struct ieee80211_sta *sta)
+ struct ieee80211_sta *sta,
+ const u8 *addr3_override)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct iwl_mvm_sta *mvmsta;
@@ -1148,7 +1223,8 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
iwl_mvm_probe_resp_set_noa(mvm, skb);
dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen,
- sta, mvmsta->deflink.sta_id);
+ sta, mvmsta->deflink.sta_id,
+ addr3_override);
if (!dev_cmd)
goto drop;
@@ -1270,9 +1346,11 @@ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct ieee80211_tx_info info;
struct sk_buff_head mpdus_skbs;
+ struct ieee80211_vif *vif;
unsigned int payload_len;
int ret;
struct sk_buff *orig_skb = skb;
+ const u8 *addr3;
if (WARN_ON_ONCE(!mvmsta))
return -1;
@@ -1283,26 +1361,59 @@ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
memcpy(&info, skb->cb, sizeof(info));
if (!skb_is_gso(skb))
- return iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
+ return iwl_mvm_tx_mpdu(mvm, skb, &info, sta, NULL);
payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
tcp_hdrlen(skb) + skb->data_len;
if (payload_len <= skb_shinfo(skb)->gso_size)
- return iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
+ return iwl_mvm_tx_mpdu(mvm, skb, &info, sta, NULL);
__skb_queue_head_init(&mpdus_skbs);
+ vif = info.control.vif;
+ if (!vif)
+ return -1;
+
ret = iwl_mvm_tx_tso(mvm, skb, &info, sta, &mpdus_skbs);
if (ret)
return ret;
WARN_ON(skb_queue_empty(&mpdus_skbs));
+ /*
+ * As described in IEEE sta 802.11-2020, table 9-30 (Address
+ * field contents), A-MSDU address 3 should contain the BSSID
+ * address.
+ * Pass address 3 down to iwl_mvm_tx_mpdu() and further to set it
+ * in the command header. We need to preserve the original
+ * address 3 in the skb header to correctly create all the
+ * A-MSDU subframe headers from it.
+ */
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ addr3 = vif->cfg.ap_addr;
+ break;
+ case NL80211_IFTYPE_AP:
+ addr3 = vif->addr;
+ break;
+ default:
+ addr3 = NULL;
+ break;
+ }
+
while (!skb_queue_empty(&mpdus_skbs)) {
+ struct ieee80211_hdr *hdr;
+ bool amsdu;
+
skb = __skb_dequeue(&mpdus_skbs);
+ hdr = (void *)skb->data;
+ amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
+ (*ieee80211_get_qos_ctl(hdr) &
+ IEEE80211_QOS_CTL_A_MSDU_PRESENT);
- ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
+ ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta,
+ amsdu ? addr3 : NULL);
if (ret) {
/* Free skbs created as part of TSO logic that have not yet been dequeued */
__skb_queue_purge(&mpdus_skbs);
@@ -1563,12 +1674,18 @@ static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
* of the batch. This is why the SSN of the SCD is written at the end of the
* whole struct at a variable offset. This function knows how to cope with the
* variable offset and returns the SSN of the SCD.
+ *
+ * For 22000-series and lower, this is just 12 bits. For later, 16 bits.
*/
static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm *mvm,
struct iwl_mvm_tx_resp *tx_resp)
{
- return le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) +
- tx_resp->frame_count) & 0xfff;
+ u32 val = le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) +
+ tx_resp->frame_count);
+
+ if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ return val & 0xFFFF;
+ return val & 0xFFF;
}
static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
@@ -1599,7 +1716,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
seq_ctl = le16_to_cpu(tx_resp->seq_ctl);
/* we can free until ssn % q.n_bd not inclusive */
- iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs);
+ iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs, false);
while (!skb_queue_empty(&skbs)) {
struct sk_buff *skb = __skb_dequeue(&skbs);
@@ -1612,6 +1729,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
memset(&info->status, 0, sizeof(info->status));
+ info->flags &= ~(IEEE80211_TX_STAT_ACK | IEEE80211_TX_STAT_TX_FILTERED);
/* inform mac80211 about what happened with the frame */
switch (status & TX_STATUS_MSK) {
@@ -1699,7 +1817,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
RS_DRV_DATA_PACK(lq_color, tx_resp->reduced_tpc);
if (likely(!iwl_mvm_time_sync_frame(mvm, skb, hdr->addr1)))
- ieee80211_tx_status(mvm->hw, skb);
+ ieee80211_tx_status_skb(mvm->hw, skb);
}
/* This is an aggregation queue or might become one, so we use
@@ -1760,6 +1878,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
IWL_DEBUG_TX_REPLY(mvm,
"Next reclaimed packet:%d\n",
next_reclaimed);
+ iwl_mvm_count_mpdu(mvmsta, sta_id, 1, true, 0);
} else {
IWL_DEBUG_TX_REPLY(mvm,
"NDP - don't update next_reclaimed\n");
@@ -1950,7 +2069,7 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
* block-ack window (we assume that they've been successfully
* transmitted ... if not, it's too late anyway).
*/
- iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs);
+ iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs, is_flush);
skb_queue_walk(&reclaimed_skbs, skb) {
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -1964,6 +2083,8 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
*/
if (!is_flush)
info->flags |= IEEE80211_TX_STAT_ACK;
+ else
+ info->flags &= ~IEEE80211_TX_STAT_ACK;
}
/*
@@ -2053,7 +2174,7 @@ out:
while (!skb_queue_empty(&reclaimed_skbs)) {
skb = __skb_dequeue(&reclaimed_skbs);
- ieee80211_tx_status(mvm->hw, skb);
+ ieee80211_tx_status_skb(mvm->hw, skb);
}
}
@@ -2098,6 +2219,12 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
tfd_cnt, pkt_len))
return;
+ IWL_DEBUG_TX_REPLY(mvm,
+ "BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n",
+ sta_id, le32_to_cpu(ba_res->flags),
+ le16_to_cpu(ba_res->txed),
+ le16_to_cpu(ba_res->done));
+
rcu_read_lock();
mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
@@ -2129,16 +2256,14 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
le32_to_cpu(ba_res->tx_rate), false);
}
- if (mvmsta)
+ if (mvmsta) {
iwl_mvm_tx_airtime(mvm, mvmsta,
le32_to_cpu(ba_res->wireless_time));
- rcu_read_unlock();
- IWL_DEBUG_TX_REPLY(mvm,
- "BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n",
- sta_id, le32_to_cpu(ba_res->flags),
- le16_to_cpu(ba_res->txed),
- le16_to_cpu(ba_res->done));
+ iwl_mvm_count_mpdu(mvmsta, sta_id,
+ le16_to_cpu(ba_res->txed), true, 0);
+ }
+ rcu_read_unlock();
return;
}
@@ -2170,9 +2295,6 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
rcu_read_unlock();
- iwl_mvm_tx_reclaim(mvm, sta_id, tid, txq, index, &ba_info,
- tid_data->rate_n_flags, false);
-
IWL_DEBUG_TX_REPLY(mvm,
"BA_NOTIFICATION Received from %pM, sta_id = %d\n",
ba_notif->sta_addr, ba_notif->sta_id);
@@ -2185,6 +2307,9 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n",
ba_notif->reduced_txp);
+
+ iwl_mvm_tx_reclaim(mvm, sta_id, tid, txq, index, &ba_info,
+ tid_data->rate_n_flags, false);
}
/*
@@ -2229,7 +2354,7 @@ int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id, u16 tids)
WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TXPATH_FLUSH, 0) > 0)
- cmd.flags |= CMD_WANT_SKB;
+ cmd.flags |= CMD_WANT_SKB | CMD_SEND_IN_RFKILL;
IWL_DEBUG_TX_QUEUES(mvm, "flush for sta id %d tid mask 0x%x\n",
sta_id, tids);
@@ -2290,24 +2415,10 @@ free_rsp:
return ret;
}
-int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal)
+int iwl_mvm_flush_sta(struct iwl_mvm *mvm, u32 sta_id, u32 tfd_queue_mask)
{
- u32 sta_id, tfd_queue_msk;
-
- if (internal) {
- struct iwl_mvm_int_sta *int_sta = sta;
-
- sta_id = int_sta->sta_id;
- tfd_queue_msk = int_sta->tfd_queue_msk;
- } else {
- struct iwl_mvm_sta *mvm_sta = sta;
-
- sta_id = mvm_sta->deflink.sta_id;
- tfd_queue_msk = mvm_sta->tfd_queue_msk;
- }
-
if (iwl_mvm_has_new_tx_api(mvm))
return iwl_mvm_flush_sta_tids(mvm, sta_id, 0xffff);
- return iwl_mvm_flush_tx_path(mvm, tfd_queue_msk);
+ return iwl_mvm_flush_tx_path(mvm, tfd_queue_mask);
}
diff --git a/mvm/utils.c b/mvm/utils.c
index 48016b4343d2..0e5fa8374103 100644
--- a/mvm/utils.c
+++ b/mvm/utils.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -249,6 +249,8 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
* This is the special case in which init is set and we call a callback in
* this case to clear the state indicating that station creation is in
* progress.
+ *
+ * Returns: an error code indicating success or failure
*/
int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq)
{
@@ -342,6 +344,80 @@ static bool iwl_wait_stats_complete(struct iwl_notif_wait_data *notif_wait,
return true;
}
+#define PERIODIC_STAT_RATE 5
+
+int iwl_mvm_request_periodic_system_statistics(struct iwl_mvm *mvm, bool enable)
+{
+ u32 flags = enable ? 0 : IWL_STATS_CFG_FLG_DISABLE_NTFY_MSK;
+ u32 type = enable ? (IWL_STATS_NTFY_TYPE_ID_OPER |
+ IWL_STATS_NTFY_TYPE_ID_OPER_PART1) : 0;
+ struct iwl_system_statistics_cmd system_cmd = {
+ .cfg_mask = cpu_to_le32(flags),
+ .config_time_sec = cpu_to_le32(enable ?
+ PERIODIC_STAT_RATE : 0),
+ .type_id_mask = cpu_to_le32(type),
+ };
+
+ return iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(SYSTEM_GROUP,
+ SYSTEM_STATISTICS_CMD),
+ 0, sizeof(system_cmd), &system_cmd);
+}
+
+static int iwl_mvm_request_system_statistics(struct iwl_mvm *mvm, bool clear,
+ u8 cmd_ver)
+{
+ struct iwl_system_statistics_cmd system_cmd = {
+ .cfg_mask = clear ?
+ cpu_to_le32(IWL_STATS_CFG_FLG_ON_DEMAND_NTFY_MSK) :
+ cpu_to_le32(IWL_STATS_CFG_FLG_RESET_MSK |
+ IWL_STATS_CFG_FLG_ON_DEMAND_NTFY_MSK),
+ .type_id_mask = cpu_to_le32(IWL_STATS_NTFY_TYPE_ID_OPER |
+ IWL_STATS_NTFY_TYPE_ID_OPER_PART1),
+ };
+ struct iwl_host_cmd cmd = {
+ .id = WIDE_ID(SYSTEM_GROUP, SYSTEM_STATISTICS_CMD),
+ .len[0] = sizeof(system_cmd),
+ .data[0] = &system_cmd,
+ };
+ struct iwl_notification_wait stats_wait;
+ static const u16 stats_complete[] = {
+ WIDE_ID(SYSTEM_GROUP, SYSTEM_STATISTICS_END_NOTIF),
+ };
+ int ret;
+
+ if (cmd_ver != 1) {
+ IWL_FW_CHECK_FAILED(mvm,
+ "Invalid system statistics command version:%d\n",
+ cmd_ver);
+ return -EOPNOTSUPP;
+ }
+
+ iwl_init_notification_wait(&mvm->notif_wait, &stats_wait,
+ stats_complete, ARRAY_SIZE(stats_complete),
+ NULL, NULL);
+
+ mvm->statistics_clear = clear;
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (ret) {
+ iwl_remove_notification(&mvm->notif_wait, &stats_wait);
+ return ret;
+ }
+
+ /* 500ms for OPERATIONAL, PART1 and END notification should be enough
+ * for FW to collect data from all LMACs and send
+ * STATISTICS_NOTIFICATION to host
+ */
+ ret = iwl_wait_notification(&mvm->notif_wait, &stats_wait, HZ / 2);
+ if (ret)
+ return ret;
+
+ if (clear)
+ iwl_mvm_accu_radio_stats(mvm);
+
+ return ret;
+}
+
int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear)
{
struct iwl_statistics_cmd scmd = {
@@ -353,8 +429,22 @@ int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear)
.len[0] = sizeof(scmd),
.data[0] = &scmd,
};
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(SYSTEM_GROUP,
+ SYSTEM_STATISTICS_CMD),
+ IWL_FW_CMD_VER_UNKNOWN);
int ret;
+ /*
+ * Don't request statistics during restart, they'll not have any useful
+ * information right after restart, nor is clearing needed
+ */
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ return 0;
+
+ if (cmd_ver != IWL_FW_CMD_VER_UNKNOWN)
+ return iwl_mvm_request_system_statistics(mvm, clear, cmd_ver);
+
/* From version 15 - STATISTICS_NOTIFICATION, the reply for
* STATISTICS_CMD is empty, and the response is with
* STATISTICS_NOTIFICATION notification
@@ -802,7 +892,7 @@ static void iwl_mvm_tcm_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
static void iwl_mvm_tcm_results(struct iwl_mvm *mvm)
{
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
ieee80211_iterate_active_interfaces(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
@@ -810,8 +900,6 @@ static void iwl_mvm_tcm_results(struct iwl_mvm *mvm)
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
iwl_mvm_config_scan(mvm);
-
- mutex_unlock(&mvm->mutex);
}
static void iwl_mvm_tcm_uapsd_nonagg_detected_wk(struct work_struct *wk)
@@ -1040,10 +1128,9 @@ void iwl_mvm_recalc_tcm(struct iwl_mvm *mvm)
spin_unlock(&mvm->tcm.lock);
if (handle_uapsd && iwl_mvm_has_new_rx_api(mvm)) {
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
if (iwl_mvm_request_statistics(mvm, true))
handle_uapsd = false;
- mutex_unlock(&mvm->mutex);
}
spin_lock(&mvm->tcm.lock);
diff --git a/pcie/ctxt-info-gen3.c b/pcie/ctxt-info-gen3.c
index fa4a14546860..ae93a72542b2 100644
--- a/pcie/ctxt-info-gen3.c
+++ b/pcie/ctxt-info-gen3.c
@@ -1,13 +1,34 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
+#include <linux/dmi.h>
#include "iwl-trans.h"
#include "iwl-fh.h"
#include "iwl-context-info-gen3.h"
#include "internal.h"
#include "iwl-prph.h"
+static const struct dmi_system_id dmi_force_scu_active_approved_list[] = {
+ { .ident = "DELL",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ },
+ },
+ { .ident = "DELL",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ },
+ },
+ /* keep last */
+ {}
+};
+
+static bool iwl_is_force_scu_active_approved(void)
+{
+ return !!dmi_check_system(dmi_force_scu_active_approved_list);
+}
+
static void
iwl_pcie_ctxt_info_dbg_enable(struct iwl_trans *trans,
struct iwl_prph_scratch_hwm_cfg *dbg_cfg,
@@ -68,7 +89,8 @@ iwl_pcie_ctxt_info_dbg_enable(struct iwl_trans *trans,
}
break;
default:
- IWL_ERR(trans, "WRT: Invalid buffer destination\n");
+ IWL_DEBUG_FW(trans, "WRT: Invalid buffer destination (%d)\n",
+ le32_to_cpu(fw_mon_cfg->buf_location));
}
out:
if (dbg_flags)
@@ -119,7 +141,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
prph_sc_ctrl->version.version = 0;
prph_sc_ctrl->version.mac_id =
- cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
+ cpu_to_le16((u16)trans->hw_rev);
prph_sc_ctrl->version.size = cpu_to_le16(sizeof(*prph_scratch) / 4);
control_flags |= IWL_PRPH_SCRATCH_MTR_MODE;
@@ -128,6 +150,14 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
if (trans->trans_cfg->imr_enabled)
control_flags |= IWL_PRPH_SCRATCH_IMR_DEBUG_EN;
+ if (CSR_HW_REV_TYPE(trans->hw_rev) == IWL_CFG_MAC_TYPE_GL &&
+ iwl_is_force_scu_active_approved()) {
+ control_flags |= IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE;
+ IWL_DEBUG_FW(trans,
+ "Context Info: Set SCU_FORCE_ACTIVE (0x%x) in control_flags\n",
+ IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE);
+ }
+
/* initialize RX default queue */
prph_sc_ctrl->rbd_cfg.free_rbd_addr =
cpu_to_le64(trans_pcie->rxq->bd_dma);
@@ -187,7 +217,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
ctxt_info_gen3->cr_tail_idx_arr_base_addr =
cpu_to_le64(trans_pcie->prph_info_dma_addr + 3 * PAGE_SIZE / 4);
ctxt_info_gen3->mtr_base_addr =
- cpu_to_le64(trans->txqs.txq[trans->txqs.cmd.q_id]->dma_addr);
+ cpu_to_le64(trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]->dma_addr);
ctxt_info_gen3->mcr_base_addr =
cpu_to_le64(trans_pcie->rxq->used_bd_dma);
ctxt_info_gen3->mtr_size =
diff --git a/pcie/ctxt-info.c b/pcie/ctxt-info.c
index 5f55efe64bf5..344e4d5a1c6e 100644
--- a/pcie/ctxt-info.c
+++ b/pcie/ctxt-info.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2022 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include "iwl-trans.h"
#include "iwl-fh.h"
@@ -180,7 +180,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
ctxt_info->version.version = 0;
ctxt_info->version.mac_id =
- cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
+ cpu_to_le16((u16)trans->hw_rev);
/* size is in DWs */
ctxt_info->version.size = cpu_to_le16(sizeof(*ctxt_info) / 4);
@@ -218,7 +218,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
/* initialize TX command queue */
ctxt_info->hcmd_cfg.cmd_queue_addr =
- cpu_to_le64(trans->txqs.txq[trans->txqs.cmd.q_id]->dma_addr);
+ cpu_to_le64(trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]->dma_addr);
ctxt_info->hcmd_cfg.cmd_queue_size =
TFD_QUEUE_CB_SIZE(IWL_CMD_QUEUE_SIZE);
diff --git a/pcie/drv.c b/pcie/drv.c
index 73c1fb3c0c5e..84fd93278450 100644
--- a/pcie/drv.c
+++ b/pcie/drv.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -33,7 +33,7 @@ extern int _invalid_type;
.driver_data = _ASSIGN_CFG(cfg)
/* Hardware specific file defines the PCI IDs table for that hardware module */
-static const struct pci_device_id iwl_hw_card_ids[] = {
+VISIBLE_IF_IWLWIFI_KUNIT const struct pci_device_id iwl_hw_card_ids[] = {
#if IS_ENABLED(CONFIG_IWLDVM)
{IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */
{IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */
@@ -492,7 +492,6 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x7AF0, PCI_ANY_ID, iwl_so_trans_cfg)},
{IWL_PCI_DEVICE(0x51F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)},
{IWL_PCI_DEVICE(0x51F1, PCI_ANY_ID, iwl_so_long_latency_imr_trans_cfg)},
- {IWL_PCI_DEVICE(0x51F1, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)},
{IWL_PCI_DEVICE(0x54F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)},
{IWL_PCI_DEVICE(0x7F70, PCI_ANY_ID, iwl_so_trans_cfg)},
@@ -502,17 +501,53 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
/* Bz devices */
{IWL_PCI_DEVICE(0x2727, PCI_ANY_ID, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0x272D, PCI_ANY_ID, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, PCI_ANY_ID, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0000, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0090, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0094, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0098, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x009C, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x00C0, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x00C4, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x00E0, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x00E4, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x00E8, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x00EC, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0100, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0110, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0114, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0118, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x011C, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0310, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0314, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0510, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0A10, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x1671, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x1672, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x1771, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x1772, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x1791, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x1792, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x4090, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x40C4, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x40E0, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x4110, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x4314, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0x7740, PCI_ANY_ID, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0x4D40, PCI_ANY_ID, iwl_bz_trans_cfg)},
/* Sc devices */
{IWL_PCI_DEVICE(0xE440, PCI_ANY_ID, iwl_sc_trans_cfg)},
+ {IWL_PCI_DEVICE(0xE340, PCI_ANY_ID, iwl_sc_trans_cfg)},
+ {IWL_PCI_DEVICE(0xD340, PCI_ANY_ID, iwl_sc_trans_cfg)},
+ {IWL_PCI_DEVICE(0x6E70, PCI_ANY_ID, iwl_sc_trans_cfg)},
#endif /* CONFIG_IWLMVM */
{0}
};
MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_hw_card_ids);
#define _IWL_DEV_INFO(_device, _subdevice, _mac_type, _mac_step, _rf_type, \
_rf_id, _rf_step, _no_160, _cores, _cdb, _cfg, _name) \
@@ -526,7 +561,7 @@ MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, \
IWL_CFG_ANY, _cfg, _name)
-static const struct iwl_dev_info iwl_dev_info_table[] = {
+VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
#if IS_ENABLED(CONFIG_IWLMVM)
/* 9000 */
IWL_DEV_INFO(0x2526, 0x1550, iwl9260_2ac_cfg, iwl9260_killer_1550_name),
@@ -942,11 +977,6 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
iwl_cfg_ma, iwl_ax211_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_ma, iwl_ax221_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_ma, iwl_ax231_name),
@@ -997,20 +1027,6 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB,
iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_name),
-/* Bz */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_bz, iwl_bz_name),
-
-/* Ga (Gl) */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_gl, iwl_bz_name),
-
/* SoF with JF2 */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
@@ -1091,23 +1107,31 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9462_name),
-/* MsP */
-/* For now we use the same FW as MR, but this will change in the future. */
+/* Bz */
+/* FIXME: need to change the naming according to the actual CRF */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_so_a0_ms_a0, iwl_ax204_name),
+ IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_bz, iwl_fm_name),
+
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_so_a0_ms_a0, iwl_ax204_name),
+ IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_bz, iwl_fm_name),
+
+/* Ga (Gl) */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_ma, iwl_ax204_name),
+ IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_320, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ iwl_cfg_gl, iwl_gl_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_NO_320, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ iwl_cfg_gl, iwl_mtp_name),
/* Sc */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
@@ -1115,32 +1139,42 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_sc, iwl_sc_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SC2, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_sc2, iwl_sc2_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SC2F, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_sc2f, iwl_sc2f_name),
#endif /* CONFIG_IWLMVM */
};
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_dev_info_table);
+
+#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
+const unsigned int iwl_dev_info_table_size = ARRAY_SIZE(iwl_dev_info_table);
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_dev_info_table_size);
+#endif
/*
* Read rf id and cdb info from prph register and store it
*/
-static int get_crf_id(struct iwl_trans *iwl_trans)
+static void get_crf_id(struct iwl_trans *iwl_trans)
{
- int ret = 0;
u32 sd_reg_ver_addr;
u32 val = 0;
+ u8 step;
if (iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
sd_reg_ver_addr = SD_REG_VER_GEN2;
else
sd_reg_ver_addr = SD_REG_VER;
- if (!iwl_trans_grab_nic_access(iwl_trans)) {
- IWL_ERR(iwl_trans, "Failed to grab nic access before reading crf id\n");
- ret = -EIO;
- goto out;
- }
-
/* Enable access to peripheral registers */
val = iwl_read_umac_prph_no_grab(iwl_trans, WFPM_CTRL_REG);
- val |= ENABLE_WFPM;
+ val |= WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK;
iwl_write_umac_prph_no_grab(iwl_trans, WFPM_CTRL_REG, val);
/* Read crf info */
@@ -1150,17 +1184,33 @@ static int get_crf_id(struct iwl_trans *iwl_trans)
iwl_trans->hw_cnv_id =
iwl_read_prph_no_grab(iwl_trans, CNVI_AUX_MISC_CHIP);
+ /* For BZ-W, take B step also when A step is indicated */
+ if (CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_BZ_W)
+ step = SILICON_B_STEP;
+
+ /* In BZ, the MAC step must be read from the CNVI aux register */
+ if (CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_BZ) {
+ step = CNVI_AUX_MISC_CHIP_MAC_STEP(iwl_trans->hw_cnv_id);
+
+ /* For BZ-U, take B step also when A step is indicated */
+ if ((CNVI_AUX_MISC_CHIP_PROD_TYPE(iwl_trans->hw_cnv_id) ==
+ CNVI_AUX_MISC_CHIP_PROD_TYPE_BZ_U) &&
+ step == SILICON_A_STEP)
+ step = SILICON_B_STEP;
+ }
+
+ if (CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_BZ ||
+ CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_BZ_W) {
+ iwl_trans->hw_rev_step = step;
+ iwl_trans->hw_rev |= step;
+ }
+
/* Read cdb info (also contains the jacket info if needed in the future */
iwl_trans->hw_wfpm_id =
iwl_read_umac_prph_no_grab(iwl_trans, WFPM_OTP_CFG1_ADDR);
IWL_INFO(iwl_trans, "Detected crf-id 0x%x, cnv-id 0x%x wfpm id 0x%x\n",
iwl_trans->hw_crf_id, iwl_trans->hw_cnv_id,
iwl_trans->hw_wfpm_id);
-
- iwl_trans_release_nic_access(iwl_trans);
-
-out:
- return ret;
}
/*
@@ -1197,14 +1247,12 @@ static int map_crf_id(struct iwl_trans *iwl_trans)
case REG_CRF_ID_TYPE_GF:
iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_GF << 12);
break;
- case REG_CRF_ID_TYPE_MR:
- iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_MR << 12);
- break;
case REG_CRF_ID_TYPE_FM:
- case REG_CRF_ID_TYPE_FMI:
- case REG_CRF_ID_TYPE_FMR:
iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_FM << 12);
break;
+ case REG_CRF_ID_TYPE_WHP:
+ iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_WH << 12);
+ break;
default:
ret = -EIO;
IWL_ERR(iwl_trans,
@@ -1245,7 +1293,7 @@ out:
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT 0x041
-static const struct iwl_dev_info *
+VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info *
iwl_pci_find_dev_info(u16 device, u16 subsystem_device,
u16 mac_type, u8 mac_step, u16 rf_type, u8 cdb,
u8 jacket, u8 rf_id, u8 no_160, u8 cores, u8 rf_step)
@@ -1308,6 +1356,7 @@ iwl_pci_find_dev_info(u16 device, u16 subsystem_device,
return NULL;
}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_pci_find_dev_info);
static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -1351,6 +1400,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_free_trans;
if (iwl_trans_grab_nic_access(iwl_trans)) {
+ get_crf_id(iwl_trans);
/* all good */
iwl_trans_release_nic_access(iwl_trans);
} else {
@@ -1360,7 +1410,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
iwl_trans->hw_rf_id = iwl_read32(iwl_trans, CSR_HW_RF_ID);
- get_crf_id(iwl_trans);
/*
* The RF_ID is set to zero in blank OTP so read version to
@@ -1391,6 +1440,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (dev_info) {
iwl_trans->cfg = dev_info->cfg;
iwl_trans->name = dev_info->name;
+ iwl_trans->no_160 = dev_info->no_160 == IWL_CFG_NO_160;
}
#if IS_ENABLED(CONFIG_IWLMVM)
@@ -1456,6 +1506,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!iwl_trans->name)
iwl_trans->name = iwl_trans->cfg->name;
+ IWL_INFO(iwl_trans, "Detected %s\n", iwl_trans->name);
+
if (iwl_trans->trans_cfg->mq_rx_supported) {
if (WARN_ON(!iwl_trans->cfg->num_rbds)) {
ret = -EINVAL;
@@ -1525,11 +1577,12 @@ static int iwl_pci_suspend(struct device *device)
return 0;
}
-static int iwl_pci_resume(struct device *device)
+static int _iwl_pci_resume(struct device *device, bool restore)
{
struct pci_dev *pdev = to_pci_dev(device);
struct iwl_trans *trans = pci_get_drvdata(pdev);
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ bool device_was_powered_off = false;
/* Before you put code here, think about WoWLAN. You cannot check here
* whether WoWLAN is enabled or not, and your code will run even if
@@ -1545,6 +1598,26 @@ static int iwl_pci_resume(struct device *device)
if (!trans->op_mode)
return 0;
+ /*
+ * Scratch value was altered, this means the device was powered off, we
+ * need to reset it completely.
+ * Note: MAC (bits 0:7) will be cleared upon suspend even with wowlan,
+ * so assume that any bits there mean that the device is usable.
+ */
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ &&
+ !iwl_read32(trans, CSR_FUNC_SCRATCH))
+ device_was_powered_off = true;
+
+ if (restore || device_was_powered_off) {
+ trans->state = IWL_TRANS_NO_FW;
+ /* Hope for the best here ... If one of those steps fails we
+ * won't really know how to recover.
+ */
+ iwl_pcie_prepare_card_hw(trans);
+ iwl_finish_nic_init(trans);
+ iwl_op_mode_device_powered_off(trans->op_mode);
+ }
+
/* In WOWLAN, let iwl_trans_pcie_d3_resume do the rest of the work */
if (test_bit(STATUS_DEVICE_ENABLED, &trans->status))
return 0;
@@ -1565,9 +1638,23 @@ static int iwl_pci_resume(struct device *device)
return 0;
}
+static int iwl_pci_restore(struct device *device)
+{
+ return _iwl_pci_resume(device, true);
+}
+
+static int iwl_pci_resume(struct device *device)
+{
+ return _iwl_pci_resume(device, false);
+}
+
static const struct dev_pm_ops iwl_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(iwl_pci_suspend,
- iwl_pci_resume)
+ .suspend = pm_sleep_ptr(iwl_pci_suspend),
+ .resume = pm_sleep_ptr(iwl_pci_resume),
+ .freeze = pm_sleep_ptr(iwl_pci_suspend),
+ .thaw = pm_sleep_ptr(iwl_pci_resume),
+ .poweroff = pm_sleep_ptr(iwl_pci_suspend),
+ .restore = pm_sleep_ptr(iwl_pci_restore),
};
#define IWL_PM_OPS (&iwl_dev_pm_ops)
diff --git a/pcie/internal.h b/pcie/internal.h
index 0adcf0e13e85..27a7e0b5b3d5 100644
--- a/pcie/internal.h
+++ b/pcie/internal.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2003-2015, 2018-2022 Intel Corporation
+ * Copyright (C) 2003-2015, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -22,7 +22,6 @@
#include "iwl-io.h"
#include "iwl-op-mode.h"
#include "iwl-drv.h"
-#include "queue/tx.h"
#include "iwl-context-info.h"
/*
@@ -58,10 +57,7 @@ struct iwl_rx_mem_buffer {
bool invalid;
};
-/**
- * struct isr_statistics - interrupt statistics
- *
- */
+/* interrupt statistics */
struct isr_statistics {
u32 hw;
u32 sw;
@@ -127,6 +123,8 @@ struct iwl_rx_completion_desc_bz {
* @used_bd_dma: physical address of buffer of used receive buffer descriptors (rbd)
* @read: Shared index to newest available Rx buffer
* @write: Shared index to oldest written Rx packet
+ * @write_actual: actual write pointer written to device, since we update in
+ * blocks of 8 only
* @free_count: Number of pre-allocated buffers in rx_free
* @used_count: Number of RBDs handled to allocator to use for allocation
* @write_actual:
@@ -135,10 +133,12 @@ struct iwl_rx_completion_desc_bz {
* @need_update: flag to indicate we need to update read/write index
* @rb_stts: driver's pointer to receive buffer status
* @rb_stts_dma: bus address of receive buffer status
- * @lock:
+ * @lock: per-queue lock
* @queue: actual rx queue. Not used for multi-rx queue.
* @next_rb_is_fragment: indicates that the previous RB that we handled set
* the fragmented flag, so the next one is still another fragment
+ * @napi: NAPI struct for this queue
+ * @queue_size: size of this queue
*
* NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
*/
@@ -188,19 +188,20 @@ struct iwl_rb_allocator {
/**
* iwl_get_closed_rb_stts - get closed rb stts from different structs
- * @rxq - the rxq to get the rb stts from
+ * @trans: transport pointer (for configuration)
+ * @rxq: the rxq to get the rb stts from
*/
-static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
- struct iwl_rxq *rxq)
+static inline u16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
+ struct iwl_rxq *rxq)
{
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
__le16 *rb_stts = rxq->rb_stts;
- return READ_ONCE(*rb_stts);
+ return le16_to_cpu(READ_ONCE(*rb_stts));
} else {
struct iwl_rb_status *rb_stts = rxq->rb_stts;
- return READ_ONCE(rb_stts->closed_rb_num);
+ return le16_to_cpu(READ_ONCE(rb_stts->closed_rb_num)) & 0xFFF;
}
}
@@ -243,6 +244,7 @@ enum iwl_image_response_code {
IWL_IMAGE_RESP_FAIL = 2,
};
+#ifdef CONFIG_IWLWIFI_DEBUGFS
/**
* struct cont_rec: continuous recording data structure
* @prev_wr_ptr: the last address that was read in monitor_data
@@ -253,7 +255,6 @@ enum iwl_image_response_code {
* in &iwl_fw_mon_dbgfs_state enum
* @mutex: locked while reading from monitor_data debugfs file
*/
-#ifdef CONFIG_IWLWIFI_DEBUGFS
struct cont_rec {
u32 prev_wr_ptr;
u32 prev_wrap_cnt;
@@ -271,7 +272,7 @@ enum iwl_pcie_fw_reset_state {
};
/**
- * enum wl_pcie_imr_status - imr dma transfer state
+ * enum iwl_pcie_imr_status - imr dma transfer state
* @IMR_D2S_IDLE: default value of the dma transfer
* @IMR_D2S_REQUESTED: dma transfer requested
* @IMR_D2S_COMPLETED: dma transfer completed
@@ -285,6 +286,58 @@ enum iwl_pcie_imr_status {
};
/**
+ * struct iwl_pcie_txqs - TX queues data
+ *
+ * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
+ * @page_offs: offset from skb->cb to mac header page pointer
+ * @dev_cmd_offs: offset from skb->cb to iwl_device_tx_cmd pointer
+ * @queue_used: bit mask of used queues
+ * @queue_stopped: bit mask of stopped queues
+ * @txq: array of TXQ data structures representing the TXQs
+ * @scd_bc_tbls: gen1 pointer to the byte count table of the scheduler
+ * @queue_alloc_cmd_ver: queue allocation command version
+ * @bc_pool: bytecount DMA allocations pool
+ * @bc_tbl_size: bytecount table size
+ * @tso_hdr_page: page allocated (per CPU) for A-MSDU headers when doing TSO
+ * (and similar usage)
+ * @cmd: command queue data
+ * @cmd.fifo: FIFO number
+ * @cmd.q_id: queue ID
+ * @cmd.wdg_timeout: watchdog timeout
+ * @tfd: TFD data
+ * @tfd.max_tbs: max number of buffers per TFD
+ * @tfd.size: TFD size
+ * @tfd.addr_size: TFD/TB address size
+ */
+struct iwl_pcie_txqs {
+ unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
+ unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
+ struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
+ struct dma_pool *bc_pool;
+ size_t bc_tbl_size;
+ bool bc_table_dword;
+ u8 page_offs;
+ u8 dev_cmd_offs;
+ struct iwl_tso_hdr_page __percpu *tso_hdr_page;
+
+ struct {
+ u8 fifo;
+ u8 q_id;
+ unsigned int wdg_timeout;
+ } cmd;
+
+ struct {
+ u8 max_tbs;
+ u16 size;
+ u8 addr_size;
+ } tfd;
+
+ struct iwl_dma_ptr scd_bc_tbls;
+
+ u8 queue_alloc_cmd_ver;
+};
+
+/**
* struct iwl_trans_pcie - PCIe transport specific data
* @rxq: all the RX queue data
* @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues
@@ -298,10 +351,6 @@ enum iwl_pcie_imr_status {
* @prph_info_dma_addr: dma addr of prph info
* @prph_scratch_dma_addr: dma addr of prph scratch
* @ctxt_info_dma_addr: dma addr of context information
- * @init_dram: DRAM data of firmware image (including paging).
- * Context information addresses will be taken from here.
- * This is driver's local copy for keeping track of size and
- * count for allocating and freeing the memory.
* @iml: image loader image virtual address
* @iml_dma_addr: image loader image DMA address
* @trans: pointer to the generic transport area
@@ -315,17 +364,15 @@ enum iwl_pcie_imr_status {
* @ucode_write_complete: indicates that the ucode has been copied.
* @ucode_write_waitq: wait queue for uCode load
* @cmd_queue - command queue number
- * @def_rx_queue - default rx queue number
* @rx_buf_size: Rx buffer size
* @scd_set_active: should the transport configure the SCD for HCMD queue
* @rx_page_order: page order for receive buffer size
* @rx_buf_bytes: RX buffer (RB) size in bytes
* @reg_lock: protect hw register access
* @mutex: to protect stop_device / start_fw / start_hw
- * @cmd_in_flight: true when we have a host command in flight
-#ifdef CONFIG_IWLWIFI_DEBUGFS
* @fw_mon_data: fw continuous recording data
-#endif
+ * @cmd_hold_nic_awake: indicates NIC is held awake for APMG workaround
+ * during commands in flight
* @msix_entries: array of MSI-X entries
* @msix_enabled: true if managed to enable MSI-X
* @shared_vec_mask: the type of causes the shared vector handles
@@ -345,8 +392,33 @@ enum iwl_pcie_imr_status {
* @alloc_page: allocated page to still use parts of
* @alloc_page_used: how much of the allocated page was already used (bytes)
* @imr_status: imr dma state machine
- * @wait_queue_head_t: imr wait queue for dma completion
+ * @imr_waitq: imr wait queue for dma completion
* @rf_name: name/version of the CRF, if any
+ * @use_ict: whether or not ICT (interrupt table) is used
+ * @ict_index: current ICT read index
+ * @ict_tbl: ICT table pointer
+ * @ict_tbl_dma: ICT table DMA address
+ * @inta_mask: interrupt (INT-A) mask
+ * @irq_lock: lock to synchronize IRQ handling
+ * @txq_memory: TXQ allocation array
+ * @sx_waitq: waitqueue for Sx transitions
+ * @sx_complete: completion for Sx transitions
+ * @pcie_dbg_dumped_once: indicates PCIe regs were dumped already
+ * @opmode_down: indicates opmode went away
+ * @num_rx_bufs: number of RX buffers to allocate/use
+ * @no_reclaim_cmds: special commands not using reclaim flow
+ * (firmware workaround)
+ * @n_no_reclaim_cmds: number of special commands not using reclaim flow
+ * @affinity_mask: IRQ affinity mask for each RX queue
+ * @debug_rfkill: RF-kill debugging state, -1 for unset, 0/1 for radio
+ * enable/disable
+ * @fw_reset_handshake: indicates FW reset handshake is needed
+ * @fw_reset_state: state of FW reset handshake
+ * @fw_reset_waitq: waitqueue for FW reset handshake
+ * @is_down: indicates the NIC is down
+ * @isr_stats: interrupt statistics
+ * @napi_dev: (fake) netdev for NAPI registration
+ * @txqs: transport tx queues data.
*/
struct iwl_trans_pcie {
struct iwl_rxq *rxq;
@@ -366,7 +438,7 @@ struct iwl_trans_pcie {
dma_addr_t iml_dma_addr;
struct iwl_trans *trans;
- struct net_device napi_dev;
+ struct net_device *napi_dev;
/* INT ICT Table */
__le32 *ict_tbl;
@@ -398,7 +470,6 @@ struct iwl_trans_pcie {
wait_queue_head_t ucode_write_waitq;
wait_queue_head_t sx_waitq;
- u8 def_rx_queue;
u8 n_no_reclaim_cmds;
u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
u16 num_rx_bufs;
@@ -445,6 +516,8 @@ struct iwl_trans_pcie {
enum iwl_pcie_imr_status imr_status;
wait_queue_head_t imr_waitq;
char rf_name[32];
+
+ struct iwl_pcie_txqs txqs;
};
static inline struct iwl_trans_pcie *
@@ -519,6 +592,33 @@ void iwl_pcie_disable_ict(struct iwl_trans *trans);
/*****************************************************
* TX / HCMD
******************************************************/
+/* We need 2 entries for the TX command and header, and another one might
+ * be needed for potential data in the SKB's head. The remaining ones can
+ * be used for frags.
+ */
+#define IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie) ((trans_pcie)->txqs.tfd.max_tbs - 3)
+
+struct iwl_tso_hdr_page {
+ struct page *page;
+ u8 *pos;
+};
+
+/*
+ * Note that we put this struct *last* in the page. By doing that, we ensure
+ * that no TB referencing this page can trigger the 32-bit boundary hardware
+ * bug.
+ */
+struct iwl_tso_page_info {
+ dma_addr_t dma_addr;
+ struct page *next;
+ refcount_t use_count;
+};
+
+#define IWL_TSO_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(struct iwl_tso_page_info))
+#define IWL_TSO_PAGE_INFO(addr) \
+ ((struct iwl_tso_page_info *)(((unsigned long)addr & PAGE_MASK) + \
+ IWL_TSO_PAGE_DATA_SIZE))
+
int iwl_pcie_tx_init(struct iwl_trans *trans);
void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
int iwl_pcie_tx_stop(struct iwl_trans *trans);
@@ -533,10 +633,171 @@ void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_tx_cmd *dev_cmd, int txq_id);
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
-int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
struct iwl_rx_cmd_buffer *rxb);
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
+int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
+ int slots_num, bool cmd_queue);
+
+dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset,
+ unsigned int len);
+struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_cmd_meta *cmd_meta,
+ u8 **hdr, unsigned int hdr_room);
+
+void iwl_pcie_free_tso_pages(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_cmd_meta *cmd_meta);
+
+static inline dma_addr_t iwl_pcie_get_tso_page_phys(void *addr)
+{
+ dma_addr_t res;
+
+ res = IWL_TSO_PAGE_INFO(addr)->dma_addr;
+ res += (unsigned long)addr & ~PAGE_MASK;
+
+ return res;
+}
+
+static inline dma_addr_t
+iwl_txq_get_first_tb_dma(struct iwl_txq *txq, int idx)
+{
+ return txq->first_tb_dma +
+ sizeof(struct iwl_pcie_first_tb_buf) * idx;
+}
+
+static inline u16 iwl_txq_get_cmd_index(const struct iwl_txq *q, u32 index)
+{
+ return index & (q->n_window - 1);
+}
+
+static inline void *iwl_txq_get_tfd(struct iwl_trans *trans,
+ struct iwl_txq *txq, int idx)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (trans->trans_cfg->gen2)
+ idx = iwl_txq_get_cmd_index(txq, idx);
+
+ return (u8 *)txq->tfds + trans_pcie->txqs.tfd.size * idx;
+}
+
+/*
+ * We need this inline in case dma_addr_t is only 32-bits - since the
+ * hardware is always 64-bit, the issue can still occur in that case,
+ * so use u64 for 'phys' here to force the addition in 64-bit.
+ */
+static inline bool iwl_txq_crosses_4g_boundary(u64 phys, u16 len)
+{
+ return upper_32_bits(phys) != upper_32_bits(phys + len);
+}
+
+int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q);
+
+static inline void iwl_txq_stop(struct iwl_trans *trans, struct iwl_txq *txq)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (!test_and_set_bit(txq->id, trans_pcie->txqs.queue_stopped)) {
+ iwl_op_mode_queue_full(trans->op_mode, txq->id);
+ IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
+ } else {
+ IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
+ txq->id);
+ }
+}
+
+/**
+ * iwl_txq_inc_wrap - increment queue index, wrap back to beginning
+ * @trans: the transport (for configuration data)
+ * @index: current index
+ */
+static inline int iwl_txq_inc_wrap(struct iwl_trans *trans, int index)
+{
+ return ++index &
+ (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
+}
+
+/**
+ * iwl_txq_dec_wrap - decrement queue index, wrap back to end
+ * @trans: the transport (for configuration data)
+ * @index: current index
+ */
+static inline int iwl_txq_dec_wrap(struct iwl_trans *trans, int index)
+{
+ return --index &
+ (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
+}
+
+void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq);
+
+static inline void
+iwl_trans_pcie_wake_queue(struct iwl_trans *trans, struct iwl_txq *txq)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (test_and_clear_bit(txq->id, trans_pcie->txqs.queue_stopped)) {
+ IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
+ iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
+ }
+}
+
+int iwl_txq_gen2_set_tb(struct iwl_trans *trans,
+ struct iwl_tfh_tfd *tfd, dma_addr_t addr,
+ u16 len);
+
+static inline void iwl_txq_set_tfd_invalid_gen2(struct iwl_trans *trans,
+ struct iwl_tfh_tfd *tfd)
+{
+ tfd->num_tbs = 0;
+
+ iwl_txq_gen2_set_tb(trans, tfd, trans->invalid_tx_cmd.dma,
+ trans->invalid_tx_cmd.size);
+}
+
+void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans,
+ struct iwl_cmd_meta *meta,
+ struct iwl_tfh_tfd *tfd);
+
+int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags,
+ u32 sta_mask, u8 tid,
+ int size, unsigned int timeout);
+
+int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_device_tx_cmd *dev_cmd, int txq_id);
+
+void iwl_txq_dyn_free(struct iwl_trans *trans, int queue);
+void iwl_txq_gen2_tx_free(struct iwl_trans *trans);
+int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
+ int slots_num, bool cmd_queue);
+int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id,
+ int queue_size);
+
+static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans,
+ void *_tfd, u8 idx)
+{
+ struct iwl_tfd *tfd;
+ struct iwl_tfd_tb *tb;
+
+ if (trans->trans_cfg->gen2) {
+ struct iwl_tfh_tfd *tfh_tfd = _tfd;
+ struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx];
+
+ return le16_to_cpu(tfh_tb->tb_len);
+ }
+
+ tfd = (struct iwl_tfd *)_tfd;
+ tb = &tfd->tbs[idx];
+
+ return le16_to_cpu(tb->hi_n_len) >> 4;
+}
+
+void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
+ struct sk_buff_head *skbs, bool is_flush);
+void iwl_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr);
+void iwl_pcie_freeze_txq_timer(struct iwl_trans *trans,
+ unsigned long txqs, bool freeze);
+int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx);
+int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm);
/*****************************************************
* Error handling
@@ -751,7 +1012,7 @@ static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
}
}
-void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans);
+void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans, bool from_irq);
static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
{
@@ -798,17 +1059,56 @@ static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans)
return (trans->dbg.dest_tlv || iwl_trans_dbg_ini_valid(trans));
}
-void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
+void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq);
void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
#ifdef CONFIG_IWLWIFI_DEBUGFS
void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
+void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans);
#else
static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { }
#endif
void iwl_pcie_rx_allocator_work(struct work_struct *data);
+/* common trans ops for all generations transports */
+void iwl_trans_pcie_configure(struct iwl_trans *trans,
+ const struct iwl_trans_config *trans_cfg);
+int iwl_trans_pcie_start_hw(struct iwl_trans *trans);
+void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans);
+void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val);
+void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val);
+u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs);
+u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg);
+void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, u32 val);
+int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
+ void *buf, int dwords);
+int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
+ const void *buf, int dwords);
+int iwl_trans_pcie_sw_reset(struct iwl_trans *trans, bool retake_ownership);
+struct iwl_trans_dump_data *
+iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask,
+ const struct iwl_dump_sanitize_ops *sanitize_ops,
+ void *sanitize_ctx);
+int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
+ enum iwl_d3_status *status,
+ bool test, bool reset);
+int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, bool reset);
+void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable);
+void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans);
+void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
+ u32 mask, u32 value);
+int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs,
+ u32 *val);
+bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans);
+void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans);
+
+/* transport gen 1 exported functions */
+void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr);
+int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
+ const struct fw_img *fw, bool run_in_rfkill);
+void iwl_trans_pcie_stop_device(struct iwl_trans *trans);
+
/* common functions that are used by gen2 transport */
int iwl_pcie_gen2_apm_init(struct iwl_trans *trans);
void iwl_pcie_apm_config(struct iwl_trans *trans);
@@ -830,7 +1130,7 @@ void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);
/* transport gen 2 exported functions */
int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
const struct fw_img *fw, bool run_in_rfkill);
-void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr);
+void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans);
int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd);
void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
@@ -845,5 +1145,7 @@ void iwl_trans_pcie_copy_imr_fh(struct iwl_trans *trans,
u32 dst_addr, u64 src_addr, u32 byte_cnt);
int iwl_trans_pcie_copy_imr(struct iwl_trans *trans,
u32 dst_addr, u64 src_addr, u32 byte_cnt);
+int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
+ struct iwl_trans_rxq_dma_data *data);
#endif /* __iwl_trans_int_pcie_h__ */
diff --git a/pcie/rx.c b/pcie/rx.c
index f87b28edc267..afb88eab8174 100644
--- a/pcie/rx.c
+++ b/pcie/rx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2003-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2003-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -1000,6 +1000,11 @@ void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget);
+static inline struct iwl_trans_pcie *iwl_netdev_to_trans_pcie(struct net_device *dev)
+{
+ return *(struct iwl_trans_pcie **)netdev_priv(dev);
+}
+
static int iwl_pcie_napi_poll(struct napi_struct *napi, int budget)
{
struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi);
@@ -1007,7 +1012,7 @@ static int iwl_pcie_napi_poll(struct napi_struct *napi, int budget)
struct iwl_trans *trans;
int ret;
- trans_pcie = container_of(napi->dev, struct iwl_trans_pcie, napi_dev);
+ trans_pcie = iwl_netdev_to_trans_pcie(napi->dev);
trans = trans_pcie->trans;
ret = iwl_pcie_rx_handle(trans, rxq->id, budget);
@@ -1034,7 +1039,7 @@ static int iwl_pcie_napi_poll_msix(struct napi_struct *napi, int budget)
struct iwl_trans *trans;
int ret;
- trans_pcie = container_of(napi->dev, struct iwl_trans_pcie, napi_dev);
+ trans_pcie = iwl_netdev_to_trans_pcie(napi->dev);
trans = trans_pcie->trans;
ret = iwl_pcie_rx_handle(trans, rxq->id, budget);
@@ -1131,7 +1136,7 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
if (trans_pcie->msix_enabled)
poll = iwl_pcie_napi_poll_msix;
- netif_napi_add(&trans_pcie->napi_dev, &rxq->napi,
+ netif_napi_add(trans_pcie->napi_dev, &rxq->napi,
poll);
napi_enable(&rxq->napi);
}
@@ -1296,7 +1301,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
int i)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
+ struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
bool page_stolen = false;
int max_len = trans_pcie->rx_buf_bytes;
u32 offset = 0;
@@ -1351,8 +1356,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
if (len < sizeof(*pkt) || offset > max_len)
break;
- trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
- trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
+ maybe_trace_iwlwifi_dev_rx(trans, pkt, len);
/* Reclaim a command buffer only if this packet is a response
* to a (driver-originated) command.
@@ -1373,7 +1377,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
}
}
- if (rxq->id == trans_pcie->def_rx_queue)
+ if (rxq->id == IWL_DEFAULT_RX_QUEUE)
iwl_op_mode_rx(trans->op_mode, &rxq->napi,
&rxcb);
else
@@ -1385,7 +1389,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
* if it is true then one of the handlers took the page.
*/
- if (reclaim) {
+ if (reclaim && txq) {
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
int index = SEQ_TO_INDEX(sequence);
int cmd_index = iwl_txq_get_cmd_index(txq, index);
@@ -1510,7 +1514,7 @@ restart:
spin_lock(&rxq->lock);
/* uCode's read index (stored in shared DRAM) indicates the last Rx
* buffer that the driver may process (last buffer filled by ucode). */
- r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
+ r = iwl_get_closed_rb_stts(trans, rxq);
i = rxq->read;
/* W/A 9000 device step A0 wrap-around bug */
@@ -1660,9 +1664,7 @@ irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
IWL_DEBUG_ISR(trans, "[%d] Got interrupt\n", entry->entry);
local_bh_disable();
- if (napi_schedule_prep(&rxq->napi))
- __napi_schedule(&rxq->napi);
- else
+ if (!napi_schedule(&rxq->napi))
iwl_pcie_clear_irq(trans, entry->entry);
local_bh_enable();
@@ -1676,6 +1678,7 @@ irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
*/
static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i;
/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
@@ -1692,9 +1695,9 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
}
for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
- if (!trans->txqs.txq[i])
+ if (!trans_pcie->txqs.txq[i])
continue;
- del_timer(&trans->txqs.txq[i]->stuck_timer);
+ del_timer(&trans_pcie->txqs.txq[i]->stuck_timer);
}
/* The STATUS_FW_ERROR bit is set in this function. This must happen
@@ -1785,7 +1788,7 @@ static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
return inta;
}
-void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans)
+void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans, bool from_irq)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
@@ -1809,7 +1812,7 @@ void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans)
isr_stats->rfkill++;
if (prev != report)
- iwl_trans_pcie_rf_kill(trans, report);
+ iwl_trans_pcie_rf_kill(trans, report, from_irq);
mutex_unlock(&trans_pcie->mutex);
if (hw_rfkill) {
@@ -1949,7 +1952,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
/* HW RF KILL switch toggled */
if (inta & CSR_INT_BIT_RF_KILL) {
- iwl_pcie_handle_rfkill_irq(trans);
+ iwl_pcie_handle_rfkill_irq(trans, true);
handled |= CSR_INT_BIT_RF_KILL;
}
@@ -2291,6 +2294,12 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
else
sw_err = inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR;
+ if (inta_hw & MSIX_HW_INT_CAUSES_REG_TOP_FATAL_ERR) {
+ IWL_ERR(trans, "TOP Fatal error detected, inta_hw=0x%x.\n",
+ inta_hw);
+ /* TODO: PLDR flow required here for >= Bz */
+ }
+
/* Error detected by uCode */
if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) || sw_err) {
IWL_ERR(trans,
@@ -2366,7 +2375,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
/* HW RF KILL switch toggled */
if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL)
- iwl_pcie_handle_rfkill_irq(trans);
+ iwl_pcie_handle_rfkill_irq(trans, true);
if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) {
IWL_ERR(trans,
diff --git a/pcie/trans-gen2.c b/pcie/trans-gen2.c
index fa46dad5fd68..18dda89b7985 100644
--- a/pcie/trans-gen2.c
+++ b/pcie/trans-gen2.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include "iwl-trans.h"
#include "iwl-prph.h"
@@ -161,6 +161,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
IWL_DEBUG_INFO(trans,
"DEVICE_ENABLED bit was set and is now cleared\n");
+ iwl_pcie_synchronize_irqs(trans);
iwl_pcie_rx_napi_sync(trans);
iwl_txq_gen2_tx_free(trans);
iwl_pcie_rx_stop(trans);
@@ -230,11 +231,14 @@ static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int queue_size = max_t(u32, IWL_CMD_QUEUE_SIZE,
trans->cfg->min_txq_size);
+ int ret;
/* TODO: most of the logic can be removed in A0 - but not in Z0 */
spin_lock_bh(&trans_pcie->irq_lock);
- iwl_pcie_gen2_apm_init(trans);
+ ret = iwl_pcie_gen2_apm_init(trans);
spin_unlock_bh(&trans_pcie->irq_lock);
+ if (ret)
+ return ret;
iwl_op_mode_nic_config(trans->op_mode);
@@ -243,7 +247,7 @@ static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
return -ENOMEM;
/* Allocate or reset and init all Tx and Command queues */
- if (iwl_txq_gen2_init(trans, trans->txqs.cmd.q_id, queue_size))
+ if (iwl_txq_gen2_init(trans, trans_pcie->txqs.cmd.q_id, queue_size))
return -ENOMEM;
/* enable shadow regs in HW */
@@ -286,6 +290,16 @@ static void iwl_pcie_get_rf_name(struct iwl_trans *trans)
case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_MS):
pos = scnprintf(buf, buflen, "MS");
break;
+ case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_FM):
+ pos = scnprintf(buf, buflen, "FM");
+ break;
+ case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_WP):
+ if (SILICON_Z_STEP ==
+ CSR_HW_RFID_STEP(trans->hw_rf_id))
+ pos = scnprintf(buf, buflen, "WHTC");
+ else
+ pos = scnprintf(buf, buflen, "WH");
+ break;
default:
return;
}
@@ -325,16 +339,17 @@ static void iwl_pcie_get_rf_name(struct iwl_trans *trans)
pos += scnprintf(buf + pos, buflen - pos, "\n");
}
-void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
+void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
iwl_pcie_reset_ict(trans);
/* make sure all queue are not stopped/used */
- memset(trans->txqs.queue_stopped, 0,
- sizeof(trans->txqs.queue_stopped));
- memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
+ memset(trans_pcie->txqs.queue_stopped, 0,
+ sizeof(trans_pcie->txqs.queue_stopped));
+ memset(trans_pcie->txqs.queue_used, 0,
+ sizeof(trans_pcie->txqs.queue_used));
/* now that we got alive we can free the fw image & the context info.
* paging memory cannot be freed included since FW will still use it
diff --git a/pcie/trans.c b/pcie/trans.c
index 3e988da44973..719ddc4b72c5 100644
--- a/pcie/trans.c
+++ b/pcie/trans.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2007-2015, 2018-2023 Intel Corporation
+ * Copyright (C) 2007-2015, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -127,8 +127,7 @@ out:
kfree(buf);
}
-static int iwl_trans_pcie_sw_reset(struct iwl_trans *trans,
- bool retake_ownership)
+int iwl_trans_pcie_sw_reset(struct iwl_trans *trans, bool retake_ownership)
{
/* Reset entire device - do controller reset (results in SHRD_HW_RST) */
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
@@ -1082,7 +1081,7 @@ bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans)
report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
if (prev != report)
- iwl_trans_pcie_rf_kill(trans, report);
+ iwl_trans_pcie_rf_kill(trans, report, false);
return hw_rfkill;
}
@@ -1111,6 +1110,7 @@ static const struct iwl_causes_list causes_list_common[] = {
IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_ALIVE),
IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_WAKEUP),
IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RESET_DONE),
+ IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_TOP_FATAL_ERR),
IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_CT_KILL),
IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RF_KILL),
IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_PERIODIC),
@@ -1236,7 +1236,7 @@ static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
trans_pcie->hw_mask = trans_pcie->hw_init_mask;
}
-static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans)
+static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool from_irq)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1263,6 +1263,8 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans)
if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
IWL_DEBUG_INFO(trans,
"DEVICE_ENABLED bit was set and is now cleared\n");
+ if (!from_irq)
+ iwl_pcie_synchronize_irqs(trans);
iwl_pcie_rx_napi_sync(trans);
iwl_pcie_tx_stop(trans);
iwl_pcie_rx_stop(trans);
@@ -1333,8 +1335,8 @@ void iwl_pcie_synchronize_irqs(struct iwl_trans *trans)
}
}
-static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
- const struct fw_img *fw, bool run_in_rfkill)
+int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
+ const struct fw_img *fw, bool run_in_rfkill)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
bool hw_rfkill;
@@ -1420,7 +1422,7 @@ out:
return ret;
}
-static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
+void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
{
iwl_pcie_reset_ict(trans);
iwl_pcie_tx_start(trans, scd_addr);
@@ -1452,10 +1454,10 @@ void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
}
if (hw_rfkill != was_in_rfkill)
- iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+ iwl_trans_pcie_rf_kill(trans, hw_rfkill, false);
}
-static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
+void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
bool was_in_rfkill;
@@ -1467,12 +1469,12 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
mutex_lock(&trans_pcie->mutex);
trans_pcie->opmode_down = true;
was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
- _iwl_trans_pcie_stop_device(trans);
+ _iwl_trans_pcie_stop_device(trans, false);
iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill);
mutex_unlock(&trans_pcie->mutex);
}
-void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
+void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq)
{
struct iwl_trans_pcie __maybe_unused *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1481,12 +1483,9 @@ void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
IWL_WARN(trans, "reporting RF_KILL (radio %s)\n",
state ? "disabled" : "enabled");
- if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) {
- if (trans->trans_cfg->gen2)
- _iwl_trans_pcie_gen2_stop_device(trans);
- else
- _iwl_trans_pcie_stop_device(trans);
- }
+ if (iwl_op_mode_hw_rf_kill(trans->op_mode, state) &&
+ !WARN_ON(trans->trans_cfg->gen2))
+ _iwl_trans_pcie_stop_device(trans, from_irq);
}
void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
@@ -1505,9 +1504,17 @@ void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
iwl_pcie_synchronize_irqs(trans);
- iwl_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_INIT);
+ } else {
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+ }
if (reset) {
/*
@@ -1552,8 +1559,7 @@ static int iwl_pcie_d3_handshake(struct iwl_trans *trans, bool suspend)
return 0;
}
-static int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
- bool reset)
+int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, bool reset)
{
int ret;
@@ -1571,9 +1577,9 @@ static int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
return 0;
}
-static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
- enum iwl_d3_status *status,
- bool test, bool reset)
+int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
+ enum iwl_d3_status *status,
+ bool test, bool reset)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 val;
@@ -1586,8 +1592,12 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
goto out;
}
- iwl_set_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ iwl_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);
+ else
+ iwl_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
ret = iwl_finish_nic_init(trans);
if (ret)
@@ -1715,6 +1725,7 @@ enable_msi:
static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)
{
+#if defined(CONFIG_SMP)
int iter_rx_q, i, ret, cpu, offset;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1735,6 +1746,7 @@ static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)
"Failed to set affinity mask for IRQ %d\n",
trans_pcie->msix_entries[i].vector);
}
+#endif
}
static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
@@ -1872,7 +1884,7 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans)
return 0;
}
-static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
+int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret;
@@ -1884,7 +1896,7 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
return ret;
}
-static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
+void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1904,17 +1916,17 @@ static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
iwl_pcie_synchronize_irqs(trans);
}
-static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
+void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
{
writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
}
-static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
+void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
{
writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
}
-static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
+u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
{
return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
}
@@ -1927,7 +1939,7 @@ static u32 iwl_trans_pcie_prph_msk(struct iwl_trans *trans)
return 0x000FFFFF;
}
-static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
+u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
{
u32 mask = iwl_trans_pcie_prph_msk(trans);
@@ -1936,8 +1948,7 @@ static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
}
-static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
- u32 val)
+void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, u32 val)
{
u32 mask = iwl_trans_pcie_prph_msk(trans);
@@ -1946,20 +1957,20 @@ static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
}
-static void iwl_trans_pcie_configure(struct iwl_trans *trans,
- const struct iwl_trans_config *trans_cfg)
+void iwl_trans_pcie_configure(struct iwl_trans *trans,
+ const struct iwl_trans_config *trans_cfg)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
/* free all first - we might be reconfigured for a different size */
iwl_pcie_free_rbs_pool(trans);
- trans->txqs.cmd.q_id = trans_cfg->cmd_queue;
- trans->txqs.cmd.fifo = trans_cfg->cmd_fifo;
- trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
- trans->txqs.page_offs = trans_cfg->cb_data_offs;
- trans->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *);
- trans->txqs.queue_alloc_cmd_ver = trans_cfg->queue_alloc_cmd_ver;
+ trans_pcie->txqs.cmd.q_id = trans_cfg->cmd_queue;
+ trans_pcie->txqs.cmd.fifo = trans_cfg->cmd_fifo;
+ trans_pcie->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
+ trans_pcie->txqs.page_offs = trans_cfg->cb_data_offs;
+ trans_pcie->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *);
+ trans_pcie->txqs.queue_alloc_cmd_ver = trans_cfg->queue_alloc_cmd_ver;
if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
trans_pcie->n_no_reclaim_cmds = 0;
@@ -1978,19 +1989,12 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
trans_pcie->supported_dma_mask = DMA_BIT_MASK(11);
- trans->txqs.bc_table_dword = trans_cfg->bc_table_dword;
+ trans_pcie->txqs.bc_table_dword = trans_cfg->bc_table_dword;
trans_pcie->scd_set_active = trans_cfg->scd_set_active;
trans->command_groups = trans_cfg->command_groups;
trans->command_groups_size = trans_cfg->command_groups_size;
- /* Initialize NAPI here - it should be before registering to mac80211
- * in the opmode but after the HW struct is allocated.
- * As this function may be called again in some corner cases don't
- * do anything if NAPI was already initialized.
- */
- if (trans_pcie->napi_dev.reg_state != NETREG_DUMMY)
- init_dummy_netdev(&trans_pcie->napi_dev);
trans_pcie->fw_reset_handshake = trans_cfg->fw_reset_handshake;
}
@@ -2018,6 +2022,30 @@ void iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions *dram_regions
memset(desc_dram, 0, sizeof(*desc_dram));
}
+static void iwl_pcie_free_invalid_tx_cmd(struct iwl_trans *trans)
+{
+ iwl_pcie_free_dma_ptr(trans, &trans->invalid_tx_cmd);
+}
+
+static int iwl_pcie_alloc_invalid_tx_cmd(struct iwl_trans *trans)
+{
+ struct iwl_cmd_header_wide bad_cmd = {
+ .cmd = INVALID_WR_PTR_CMD,
+ .group_id = DEBUG_GROUP,
+ .sequence = cpu_to_le16(0xffff),
+ .length = cpu_to_le16(0),
+ .version = 0,
+ };
+ int ret;
+
+ ret = iwl_pcie_alloc_dma_ptr(trans, &trans->invalid_tx_cmd,
+ sizeof(bad_cmd));
+ if (ret)
+ return ret;
+ memcpy(trans->invalid_tx_cmd.addr, &bad_cmd, sizeof(bad_cmd));
+ return 0;
+}
+
void iwl_trans_pcie_free(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -2048,6 +2076,10 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
iwl_pcie_free_ict(trans);
}
+ free_netdev(trans_pcie->napi_dev);
+
+ iwl_pcie_free_invalid_tx_cmd(trans);
+
iwl_pcie_free_fw_monitor(trans);
iwl_trans_pcie_free_pnvm_dram_regions(&trans_pcie->pnvm_data,
@@ -2056,15 +2088,20 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
trans->dev);
mutex_destroy(&trans_pcie->mutex);
- iwl_trans_free(trans);
-}
-static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
-{
- if (state)
- set_bit(STATUS_TPOWER_PMI, &trans->status);
- else
- clear_bit(STATUS_TPOWER_PMI, &trans->status);
+ if (trans_pcie->txqs.tso_hdr_page) {
+ for_each_possible_cpu(i) {
+ struct iwl_tso_hdr_page *p =
+ per_cpu_ptr(trans_pcie->txqs.tso_hdr_page, i);
+
+ if (p && p->page)
+ __free_page(p->page);
+ }
+
+ free_percpu(trans_pcie->txqs.tso_hdr_page);
+ }
+
+ iwl_trans_free(trans);
}
struct iwl_trans_pcie_removal {
@@ -2079,15 +2116,29 @@ static void iwl_trans_pcie_removal_wk(struct work_struct *wk)
container_of(wk, struct iwl_trans_pcie_removal, work);
struct pci_dev *pdev = removal->pdev;
static char *prop[] = {"EVENT=INACCESSIBLE", NULL};
- struct pci_bus *bus = pdev->bus;
+ struct pci_bus *bus;
+
+ pci_lock_rescan_remove();
+
+ bus = pdev->bus;
+ /* in this case, something else already removed the device */
+ if (!bus)
+ goto out;
dev_err(&pdev->dev, "Device gone - attempting removal\n");
+
kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop);
- pci_lock_rescan_remove();
- pci_dev_put(pdev);
+
pci_stop_and_remove_bus_device(pdev);
- if (removal->rescan)
- pci_rescan_bus(bus->parent);
+ pci_dev_put(pdev);
+
+ if (removal->rescan) {
+ if (bus->parent)
+ bus = bus->parent;
+ pci_rescan_bus(bus);
+ }
+
+out:
pci_unlock_rescan_remove();
kfree(removal);
@@ -2102,6 +2153,7 @@ void iwl_trans_pcie_remove(struct iwl_trans *trans, bool rescan)
return;
IWL_ERR(trans, "Device gone - scheduling removal!\n");
+ iwl_pcie_dump_csr(trans);
/*
* get a module reference to avoid doing this
@@ -2147,6 +2199,9 @@ bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP;
u32 poll = CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN;
+ if (test_bit(STATUS_TRANS_DEAD, &trans->status))
+ return false;
+
spin_lock(&trans_pcie->reg_lock);
if (trans_pcie->cmd_hold_nic_awake)
@@ -2212,7 +2267,7 @@ out:
return true;
}
-static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
+bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
{
bool ret;
@@ -2226,7 +2281,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
return false;
}
-static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans)
+void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -2256,9 +2311,11 @@ out:
spin_unlock_bh(&trans_pcie->reg_lock);
}
-static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
- void *buf, int dwords)
+int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
+ void *buf, int dwords)
{
+#define IWL_MAX_HW_ERRS 5
+ unsigned int num_consec_hw_errors = 0;
int offs = 0;
u32 *vals = buf;
@@ -2274,6 +2331,17 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
while (offs < dwords) {
vals[offs] = iwl_read32(trans,
HBUS_TARG_MEM_RDAT);
+
+ if (iwl_trans_is_hw_error_value(vals[offs]))
+ num_consec_hw_errors++;
+ else
+ num_consec_hw_errors = 0;
+
+ if (num_consec_hw_errors >= IWL_MAX_HW_ERRS) {
+ iwl_trans_release_nic_access(trans);
+ return -EIO;
+ }
+
offs++;
if (time_after(jiffies, end)) {
@@ -2293,8 +2361,8 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
return 0;
}
-static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
- const void *buf, int dwords)
+int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
+ const void *buf, int dwords)
{
int offs, ret = 0;
const u32 *vals = buf;
@@ -2311,43 +2379,17 @@ static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
return ret;
}
-static int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs,
- u32 *val)
+int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs,
+ u32 *val)
{
return pci_read_config_dword(IWL_TRANS_GET_PCIE_TRANS(trans)->pci_dev,
ofs, val);
}
-static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
-{
- int i;
-
- for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
- struct iwl_txq *txq = trans->txqs.txq[i];
-
- if (i == trans->txqs.cmd.q_id)
- continue;
-
- spin_lock_bh(&txq->lock);
-
- if (!block && !(WARN_ON_ONCE(!txq->block))) {
- txq->block--;
- if (!txq->block) {
- iwl_write32(trans, HBUS_TARG_WRPTR,
- txq->write_ptr | (i << 8));
- }
- } else if (block) {
- txq->block++;
- }
-
- spin_unlock_bh(&txq->lock);
- }
-}
-
#define IWL_FLUSH_WAIT_MS 2000
-static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
- struct iwl_trans_rxq_dma_data *data)
+int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
+ struct iwl_trans_rxq_dma_data *data)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -2362,8 +2404,9 @@ static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
return 0;
}
-static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
+int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq;
unsigned long now = jiffies;
bool overflow_tx;
@@ -2373,11 +2416,11 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
if (test_bit(STATUS_TRANS_DEAD, &trans->status))
return -ENODEV;
- if (!test_bit(txq_idx, trans->txqs.queue_used))
+ if (!test_bit(txq_idx, trans_pcie->txqs.queue_used))
return -EINVAL;
IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx);
- txq = trans->txqs.txq[txq_idx];
+ txq = trans_pcie->txqs.txq[txq_idx];
spin_lock_bh(&txq->lock);
overflow_tx = txq->overflow_tx ||
@@ -2423,8 +2466,9 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
return 0;
}
-static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
+int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int cnt;
int ret = 0;
@@ -2433,9 +2477,9 @@ static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
cnt < trans->trans_cfg->base_params->num_of_queues;
cnt++) {
- if (cnt == trans->txqs.cmd.q_id)
+ if (cnt == trans_pcie->txqs.cmd.q_id)
continue;
- if (!test_bit(cnt, trans->txqs.queue_used))
+ if (!test_bit(cnt, trans_pcie->txqs.queue_used))
continue;
if (!(BIT(cnt) & txq_bm))
continue;
@@ -2448,8 +2492,8 @@ static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
return ret;
}
-static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
- u32 mask, u32 value)
+void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
+ u32 mask, u32 value)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -2608,12 +2652,13 @@ static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v)
struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
struct iwl_dbgfs_tx_queue_state *state = v;
struct iwl_trans *trans = priv->trans;
- struct iwl_txq *txq = trans->txqs.txq[state->pos];
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txqs.txq[state->pos];
seq_printf(seq, "hwq %.3u: used=%d stopped=%d ",
(unsigned int)state->pos,
- !!test_bit(state->pos, trans->txqs.queue_used),
- !!test_bit(state->pos, trans->txqs.queue_stopped));
+ !!test_bit(state->pos, trans_pcie->txqs.queue_used),
+ !!test_bit(state->pos, trans_pcie->txqs.queue_stopped));
if (txq)
seq_printf(seq,
"read=%u write=%u need_update=%d frozen=%d n_window=%d ampdu=%d",
@@ -2623,7 +2668,7 @@ static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v)
else
seq_puts(seq, "(unallocated)");
- if (state->pos == trans->txqs.cmd.q_id)
+ if (state->pos == trans_pcie->txqs.cmd.q_id)
seq_puts(seq, " (HCMD)");
seq_puts(seq, "\n");
@@ -2686,11 +2731,9 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n",
rxq->free_count);
if (rxq->rb_stts) {
- u32 r = __le16_to_cpu(iwl_get_closed_rb_stts(trans,
- rxq));
+ u32 r = iwl_get_closed_rb_stts(trans, rxq);
pos += scnprintf(buf + pos, bufsz - pos,
- "\tclosed_rb_num: %u\n",
- r & 0x0FFF);
+ "\tclosed_rb_num: %u\n", r);
} else {
pos += scnprintf(buf + pos, bufsz - pos,
"\tclosed_rb_num: Not Allocated\n");
@@ -2842,7 +2885,7 @@ static ssize_t iwl_dbgfs_rfkill_write(struct file *file,
IWL_WARN(trans, "changing debug rfkill %d->%d\n",
trans_pcie->debug_rfkill, new_value);
trans_pcie->debug_rfkill = new_value;
- iwl_pcie_handle_rfkill_irq(trans);
+ iwl_pcie_handle_rfkill_irq(trans, false);
return count;
}
@@ -3029,7 +3072,7 @@ void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
DEBUGFS_ADD_FILE(rf, dir, 0400);
}
-static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans)
+void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct cont_rec *data = &trans_pcie->fw_mon_data;
@@ -3042,10 +3085,11 @@ static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans)
static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 cmdlen = 0;
int i;
- for (i = 0; i < trans->txqs.tfd.max_tbs; i++)
+ for (i = 0; i < trans_pcie->txqs.tfd.max_tbs; i++)
cmdlen += iwl_txq_gen1_tfd_tb_get_len(trans, tfd, i);
return cmdlen;
@@ -3061,9 +3105,9 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
struct iwl_rxq *rxq = &trans_pcie->rxq[0];
u32 i, r, j, rb_len = 0;
- spin_lock(&rxq->lock);
+ spin_lock_bh(&rxq->lock);
- r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
+ r = iwl_get_closed_rb_stts(trans, rxq);
for (i = rxq->read, j = 0;
i != r && j < allocated_rb_nums;
@@ -3085,7 +3129,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
*data = iwl_fw_error_next_data(*data);
}
- spin_unlock(&rxq->lock);
+ spin_unlock_bh(&rxq->lock);
return rb_len;
}
@@ -3306,15 +3350,14 @@ static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len)
return 0;
}
-static struct iwl_trans_dump_data *
-iwl_trans_pcie_dump_data(struct iwl_trans *trans,
- u32 dump_mask,
+struct iwl_trans_dump_data *
+iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask,
const struct iwl_dump_sanitize_ops *sanitize_ops,
void *sanitize_ctx)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_fw_error_dump_data *data;
- struct iwl_txq *cmdq = trans->txqs.txq[trans->txqs.cmd.q_id];
+ struct iwl_txq *cmdq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
struct iwl_fw_error_dump_txcmd *txcmd;
struct iwl_trans_dump_data *dump_data;
u32 len, num_rbs = 0, monitor_len = 0;
@@ -3359,9 +3402,7 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans,
/* Dump RBs is supported only for pre-9000 devices (1 queue) */
struct iwl_rxq *rxq = &trans_pcie->rxq[0];
/* RBs */
- num_rbs =
- le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq))
- & 0x0FFF;
+ num_rbs = iwl_get_closed_rb_stts(trans, rxq);
num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK;
len += num_rbs * (sizeof(*data) +
sizeof(struct iwl_fw_error_dump_rb) +
@@ -3383,7 +3424,7 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans,
data = (void *)dump_data->data;
if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) {
- u16 tfd_size = trans->txqs.tfd.size;
+ u16 tfd_size = trans_pcie->txqs.tfd.size;
data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
txcmd = (void *)data->data;
@@ -3459,7 +3500,7 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans,
return dump_data;
}
-static void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable)
+void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable)
{
if (enable)
iwl_enable_interrupts(trans);
@@ -3467,7 +3508,7 @@ static void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable)
iwl_disable_interrupts(trans);
}
-static void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
+void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
{
u32 inta_addr, sw_err_bit;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -3486,106 +3527,92 @@ static void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
iwl_trans_sync_nmi_with_addr(trans, inta_addr, sw_err_bit);
}
-#define IWL_TRANS_COMMON_OPS \
- .op_mode_leave = iwl_trans_pcie_op_mode_leave, \
- .write8 = iwl_trans_pcie_write8, \
- .write32 = iwl_trans_pcie_write32, \
- .read32 = iwl_trans_pcie_read32, \
- .read_prph = iwl_trans_pcie_read_prph, \
- .write_prph = iwl_trans_pcie_write_prph, \
- .read_mem = iwl_trans_pcie_read_mem, \
- .write_mem = iwl_trans_pcie_write_mem, \
- .read_config32 = iwl_trans_pcie_read_config32, \
- .configure = iwl_trans_pcie_configure, \
- .set_pmi = iwl_trans_pcie_set_pmi, \
- .sw_reset = iwl_trans_pcie_sw_reset, \
- .grab_nic_access = iwl_trans_pcie_grab_nic_access, \
- .release_nic_access = iwl_trans_pcie_release_nic_access, \
- .set_bits_mask = iwl_trans_pcie_set_bits_mask, \
- .dump_data = iwl_trans_pcie_dump_data, \
- .d3_suspend = iwl_trans_pcie_d3_suspend, \
- .d3_resume = iwl_trans_pcie_d3_resume, \
- .interrupts = iwl_trans_pci_interrupts, \
- .sync_nmi = iwl_trans_pcie_sync_nmi, \
- .imr_dma_data = iwl_trans_pcie_copy_imr \
-
-static const struct iwl_trans_ops trans_ops_pcie = {
- IWL_TRANS_COMMON_OPS,
- .start_hw = iwl_trans_pcie_start_hw,
- .fw_alive = iwl_trans_pcie_fw_alive,
- .start_fw = iwl_trans_pcie_start_fw,
- .stop_device = iwl_trans_pcie_stop_device,
-
- .send_cmd = iwl_pcie_enqueue_hcmd,
-
- .tx = iwl_trans_pcie_tx,
- .reclaim = iwl_txq_reclaim,
-
- .txq_disable = iwl_trans_pcie_txq_disable,
- .txq_enable = iwl_trans_pcie_txq_enable,
-
- .txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode,
-
- .wait_tx_queues_empty = iwl_trans_pcie_wait_txqs_empty,
-
- .freeze_txq_timer = iwl_trans_txq_freeze_timer,
- .block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs,
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup,
-#endif
-};
-
-static const struct iwl_trans_ops trans_ops_pcie_gen2 = {
- IWL_TRANS_COMMON_OPS,
- .start_hw = iwl_trans_pcie_start_hw,
- .fw_alive = iwl_trans_pcie_gen2_fw_alive,
- .start_fw = iwl_trans_pcie_gen2_start_fw,
- .stop_device = iwl_trans_pcie_gen2_stop_device,
-
- .send_cmd = iwl_pcie_gen2_enqueue_hcmd,
-
- .tx = iwl_txq_gen2_tx,
- .reclaim = iwl_txq_reclaim,
-
- .set_q_ptrs = iwl_txq_set_q_ptrs,
-
- .txq_alloc = iwl_txq_dyn_alloc,
- .txq_free = iwl_txq_dyn_free,
- .wait_txq_empty = iwl_trans_pcie_wait_txq_empty,
- .rxq_dma_data = iwl_trans_pcie_rxq_dma_data,
- .load_pnvm = iwl_trans_pcie_ctx_info_gen3_load_pnvm,
- .set_pnvm = iwl_trans_pcie_ctx_info_gen3_set_pnvm,
- .load_reduce_power = iwl_trans_pcie_ctx_info_gen3_load_reduce_power,
- .set_reduce_power = iwl_trans_pcie_ctx_info_gen3_set_reduce_power,
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup,
-#endif
-};
-
struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
const struct pci_device_id *ent,
const struct iwl_cfg_trans_params *cfg_trans)
{
- struct iwl_trans_pcie *trans_pcie;
+ struct iwl_trans_pcie *trans_pcie, **priv;
struct iwl_trans *trans;
int ret, addr_size;
- const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2;
void __iomem * const *table;
+ u32 bar0;
- if (!cfg_trans->gen2)
- ops = &trans_ops_pcie;
+ /* reassign our BAR 0 if invalid due to possible runtime PM races */
+ pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &bar0);
+ if (bar0 == PCI_BASE_ADDRESS_MEM_TYPE_64) {
+ ret = pci_assign_resource(pdev, 0);
+ if (ret)
+ return ERR_PTR(ret);
+ }
ret = pcim_enable_device(pdev);
if (ret)
return ERR_PTR(ret);
- trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, ops,
+ trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev,
cfg_trans);
if (!trans)
return ERR_PTR(-ENOMEM);
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ if (trans->trans_cfg->gen2) {
+ trans_pcie->txqs.tfd.addr_size = 64;
+ trans_pcie->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS;
+ trans_pcie->txqs.tfd.size = sizeof(struct iwl_tfh_tfd);
+ } else {
+ trans_pcie->txqs.tfd.addr_size = 36;
+ trans_pcie->txqs.tfd.max_tbs = IWL_NUM_OF_TBS;
+ trans_pcie->txqs.tfd.size = sizeof(struct iwl_tfd);
+ }
+ trans->max_skb_frags = IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie);
+
+ trans_pcie->txqs.tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
+ if (!trans_pcie->txqs.tso_hdr_page) {
+ ret = -ENOMEM;
+ goto out_free_trans;
+ }
+
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ trans_pcie->txqs.bc_tbl_size =
+ sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_BZ;
+ else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ trans_pcie->txqs.bc_tbl_size =
+ sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_AX210;
+ else
+ trans_pcie->txqs.bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl);
+ /*
+ * For gen2 devices, we use a single allocation for each byte-count
+ * table, but they're pretty small (1k) so use a DMA pool that we
+ * allocate here.
+ */
+ if (trans->trans_cfg->gen2) {
+ trans_pcie->txqs.bc_pool =
+ dmam_pool_create("iwlwifi:bc", trans->dev,
+ trans_pcie->txqs.bc_tbl_size,
+ 256, 0);
+ if (!trans_pcie->txqs.bc_pool) {
+ ret = -ENOMEM;
+ goto out_free_tso;
+ }
+ }
+
+ /* Some things must not change even if the config does */
+ WARN_ON(trans_pcie->txqs.tfd.addr_size !=
+ (trans->trans_cfg->gen2 ? 64 : 36));
+
+ /* Initialize NAPI here - it should be before registering to mac80211
+ * in the opmode but after the HW struct is allocated.
+ */
+ trans_pcie->napi_dev = alloc_netdev_dummy(sizeof(struct iwl_trans_pcie *));
+ if (!trans_pcie->napi_dev) {
+ ret = -ENOMEM;
+ goto out_free_tso;
+ }
+ /* The private struct in netdev is a pointer to struct iwl_trans_pcie */
+ priv = netdev_priv(trans_pcie->napi_dev);
+ *priv = trans_pcie;
+
trans_pcie->trans = trans;
trans_pcie->opmode_down = true;
spin_lock_init(&trans_pcie->irq_lock);
@@ -3600,7 +3627,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
WQ_HIGHPRI | WQ_UNBOUND, 0);
if (!trans_pcie->rba.alloc_wq) {
ret = -ENOMEM;
- goto out_free_trans;
+ goto out_free_ndev;
}
INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
@@ -3617,11 +3644,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
PCIE_LINK_STATE_CLKPM);
}
- trans_pcie->def_rx_queue = 0;
-
pci_set_master(pdev);
- addr_size = trans->txqs.tfd.addr_size;
+ addr_size = trans_pcie->txqs.tfd.addr_size;
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_size));
if (ret) {
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
@@ -3686,6 +3711,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
init_waitqueue_head(&trans_pcie->sx_waitq);
+ ret = iwl_pcie_alloc_invalid_tx_cmd(trans);
+ if (ret)
+ goto out_no_pci;
if (trans_pcie->msix_enabled) {
ret = iwl_pcie_init_msix_handler(pdev, trans_pcie);
@@ -3719,6 +3747,10 @@ out_free_ict:
iwl_pcie_free_ict(trans);
out_no_pci:
destroy_workqueue(trans_pcie->rba.alloc_wq);
+out_free_ndev:
+ free_netdev(trans_pcie->napi_dev);
+out_free_tso:
+ free_percpu(trans_pcie->txqs.tso_hdr_page);
out_free_trans:
iwl_trans_free(trans);
return ERR_PTR(ret);
diff --git a/pcie/tx-gen2.c b/pcie/tx-gen2.c
index 34bde8c87324..b1846abb99b7 100644
--- a/pcie/tx-gen2.c
+++ b/pcie/tx-gen2.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2020, 2023-2024 Intel Corporation
*/
#include <net/tso.h>
#include <linux/tcp.h>
@@ -11,7 +11,1183 @@
#include "iwl-io.h"
#include "internal.h"
#include "fw/api/tx.h"
-#include "queue/tx.h"
+#include "fw/api/commands.h"
+#include "fw/api/datapath.h"
+#include "iwl-scd.h"
+
+static struct page *get_workaround_page(struct iwl_trans *trans,
+ struct sk_buff *skb)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tso_page_info *info;
+ struct page **page_ptr;
+ struct page *ret;
+ dma_addr_t phys;
+
+ page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs);
+
+ ret = alloc_page(GFP_ATOMIC);
+ if (!ret)
+ return NULL;
+
+ info = IWL_TSO_PAGE_INFO(page_address(ret));
+
+ /* Create a DMA mapping for the page */
+ phys = dma_map_page_attrs(trans->dev, ret, 0, PAGE_SIZE,
+ DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ if (unlikely(dma_mapping_error(trans->dev, phys))) {
+ __free_page(ret);
+ return NULL;
+ }
+
+ /* Store physical address and set use count */
+ info->dma_addr = phys;
+ refcount_set(&info->use_count, 1);
+
+ /* set the chaining pointer to the previous page if there */
+ info->next = *page_ptr;
+ *page_ptr = ret;
+
+ return ret;
+}
+
+/*
+ * Add a TB and if needed apply the FH HW bug workaround;
+ * meta != NULL indicates that it's a page mapping and we
+ * need to dma_unmap_page() and set the meta->tbs bit in
+ * this case.
+ */
+static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans,
+ struct sk_buff *skb,
+ struct iwl_tfh_tfd *tfd,
+ dma_addr_t phys, void *virt,
+ u16 len, struct iwl_cmd_meta *meta,
+ bool unmap)
+{
+ dma_addr_t oldphys = phys;
+ struct page *page;
+ int ret;
+
+ if (unlikely(dma_mapping_error(trans->dev, phys)))
+ return -ENOMEM;
+
+ if (likely(!iwl_txq_crosses_4g_boundary(phys, len))) {
+ ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
+
+ if (ret < 0)
+ goto unmap;
+
+ if (meta)
+ meta->tbs |= BIT(ret);
+
+ ret = 0;
+ goto trace;
+ }
+
+ /*
+ * Work around a hardware bug. If (as expressed in the
+ * condition above) the TB ends on a 32-bit boundary,
+ * then the next TB may be accessed with the wrong
+ * address.
+ * To work around it, copy the data elsewhere and make
+ * a new mapping for it so the device will not fail.
+ */
+
+ if (WARN_ON(len > IWL_TSO_PAGE_DATA_SIZE)) {
+ ret = -ENOBUFS;
+ goto unmap;
+ }
+
+ page = get_workaround_page(trans, skb);
+ if (!page) {
+ ret = -ENOMEM;
+ goto unmap;
+ }
+
+ memcpy(page_address(page), virt, len);
+
+ /*
+ * This is a bit odd, but performance does not matter here, what
+ * matters are the expectations of the calling code and TB cleanup
+ * function.
+ *
+ * As such, if unmap is set, then create another mapping for the TB
+ * entry as it will be unmapped later. On the other hand, if it is not
+ * set, then the TB entry will not be unmapped and instead we simply
+ * reference and sync the mapping that get_workaround_page() created.
+ */
+ if (unmap) {
+ phys = dma_map_single(trans->dev, page_address(page), len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, phys)))
+ return -ENOMEM;
+ } else {
+ phys = iwl_pcie_get_tso_page_phys(page_address(page));
+ dma_sync_single_for_device(trans->dev, phys, len,
+ DMA_TO_DEVICE);
+ }
+
+ ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
+ if (ret < 0) {
+ /* unmap the new allocation as single */
+ oldphys = phys;
+ meta = NULL;
+ goto unmap;
+ }
+
+ IWL_DEBUG_TX(trans,
+ "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
+ len, (unsigned long long)oldphys,
+ (unsigned long long)phys);
+
+ ret = 0;
+unmap:
+ if (!unmap)
+ goto trace;
+
+ if (meta)
+ dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE);
+ else
+ dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE);
+trace:
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len);
+
+ return ret;
+}
+
+static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
+ struct sk_buff *skb,
+ struct iwl_tfh_tfd *tfd,
+ struct iwl_cmd_meta *out_meta,
+ int start_len,
+ u8 hdr_len,
+ struct iwl_device_tx_cmd *dev_cmd)
+{
+#ifdef CONFIG_INET
+ struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
+ unsigned int mss = skb_shinfo(skb)->gso_size;
+ unsigned int data_offset = 0;
+ dma_addr_t start_hdr_phys;
+ u16 length, amsdu_pad;
+ u8 *start_hdr;
+ struct sg_table *sgt;
+ struct tso_t tso;
+
+ trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
+ &dev_cmd->hdr, start_len, 0);
+
+ ip_hdrlen = skb_network_header_len(skb);
+ snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
+ total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len;
+ amsdu_pad = 0;
+
+ /* total amount of header we may need for this A-MSDU */
+ hdr_room = DIV_ROUND_UP(total_len, mss) *
+ (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
+
+ /* Our device supports 9 segments at most, it will fit in 1 page */
+ sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room);
+ if (!sgt)
+ return -ENOMEM;
+
+ start_hdr_phys = iwl_pcie_get_tso_page_phys(start_hdr);
+
+ /*
+ * Pull the ieee80211 header to be able to use TSO core,
+ * we will restore it for the tx_status flow.
+ */
+ skb_pull(skb, hdr_len);
+
+ /*
+ * Remove the length of all the headers that we don't actually
+ * have in the MPDU by themselves, but that we duplicate into
+ * all the different MSDUs inside the A-MSDU.
+ */
+ le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
+
+ tso_start(skb, &tso);
+
+ while (total_len) {
+ /* this is the data left for this subframe */
+ unsigned int data_left = min_t(unsigned int, mss, total_len);
+ unsigned int tb_len;
+ dma_addr_t tb_phys;
+ u8 *pos_hdr = start_hdr;
+
+ total_len -= data_left;
+
+ memset(pos_hdr, 0, amsdu_pad);
+ pos_hdr += amsdu_pad;
+ amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
+ data_left)) & 0x3;
+ ether_addr_copy(pos_hdr, ieee80211_get_DA(hdr));
+ pos_hdr += ETH_ALEN;
+ ether_addr_copy(pos_hdr, ieee80211_get_SA(hdr));
+ pos_hdr += ETH_ALEN;
+
+ length = snap_ip_tcp_hdrlen + data_left;
+ *((__be16 *)pos_hdr) = cpu_to_be16(length);
+ pos_hdr += sizeof(length);
+
+ /*
+ * This will copy the SNAP as well which will be considered
+ * as MAC header.
+ */
+ tso_build_hdr(skb, pos_hdr, &tso, data_left, !total_len);
+
+ pos_hdr += snap_ip_tcp_hdrlen;
+
+ tb_len = pos_hdr - start_hdr;
+ tb_phys = iwl_pcie_get_tso_page_phys(start_hdr);
+
+ /*
+ * No need for _with_wa, this is from the TSO page and
+ * we leave some space at the end of it so can't hit
+ * the buggy scenario.
+ */
+ iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb_len);
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
+ tb_phys, tb_len);
+ /* add this subframe's headers' length to the tx_cmd */
+ le16_add_cpu(&tx_cmd->len, tb_len);
+
+ /* prepare the start_hdr for the next subframe */
+ start_hdr = pos_hdr;
+
+ /* put the payload */
+ while (data_left) {
+ int ret;
+
+ tb_len = min_t(unsigned int, tso.size, data_left);
+ tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, data_offset,
+ tb_len);
+ /* Not a real mapping error, use direct comparison */
+ if (unlikely(tb_phys == DMA_MAPPING_ERROR))
+ goto out_err;
+
+ ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd,
+ tb_phys, tso.data,
+ tb_len, NULL, false);
+ if (ret)
+ goto out_err;
+
+ data_left -= tb_len;
+ data_offset += tb_len;
+ tso_build_data(skb, &tso, tb_len);
+ }
+ }
+
+ dma_sync_single_for_device(trans->dev, start_hdr_phys, hdr_room,
+ DMA_TO_DEVICE);
+
+ /* re -add the WiFi header */
+ skb_push(skb, hdr_len);
+
+ return 0;
+
+out_err:
+#endif
+ return -EINVAL;
+}
+
+static struct
+iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans,
+ struct iwl_txq *txq,
+ struct iwl_device_tx_cmd *dev_cmd,
+ struct sk_buff *skb,
+ struct iwl_cmd_meta *out_meta,
+ int hdr_len,
+ int tx_cmd_len)
+{
+ int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
+ struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
+ dma_addr_t tb_phys;
+ int len;
+ void *tb1_addr;
+
+ tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
+
+ /*
+ * No need for _with_wa, the first TB allocation is aligned up
+ * to a 64-byte boundary and thus can't be at the end or cross
+ * a page boundary (much less a 2^32 boundary).
+ */
+ iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
+
+ /*
+ * The second TB (tb1) points to the remainder of the TX command
+ * and the 802.11 header - dword aligned size
+ * (This calculation modifies the TX command, so do it before the
+ * setup of the first TB)
+ */
+ len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
+ IWL_FIRST_TB_SIZE;
+
+ /* do not align A-MSDU to dword as the subframe header aligns it */
+
+ /* map the data for TB1 */
+ tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
+ tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+ goto out_err;
+ /*
+ * No need for _with_wa(), we ensure (via alignment) that the data
+ * here can never cross or end at a page boundary.
+ */
+ iwl_txq_gen2_set_tb(trans, tfd, tb_phys, len);
+
+ if (iwl_txq_gen2_build_amsdu(trans, skb, tfd, out_meta,
+ len + IWL_FIRST_TB_SIZE, hdr_len, dev_cmd))
+ goto out_err;
+
+ /* building the A-MSDU might have changed this data, memcpy it now */
+ memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
+ return tfd;
+
+out_err:
+ iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
+ return NULL;
+}
+
+static int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans,
+ struct sk_buff *skb,
+ struct iwl_tfh_tfd *tfd,
+ struct iwl_cmd_meta *out_meta)
+{
+ int i;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ dma_addr_t tb_phys;
+ unsigned int fragsz = skb_frag_size(frag);
+ int ret;
+
+ if (!fragsz)
+ continue;
+
+ tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
+ fragsz, DMA_TO_DEVICE);
+ ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
+ skb_frag_address(frag),
+ fragsz, out_meta, true);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct
+iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans,
+ struct iwl_txq *txq,
+ struct iwl_device_tx_cmd *dev_cmd,
+ struct sk_buff *skb,
+ struct iwl_cmd_meta *out_meta,
+ int hdr_len,
+ int tx_cmd_len,
+ bool pad)
+{
+ int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
+ struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
+ dma_addr_t tb_phys;
+ int len, tb1_len, tb2_len;
+ void *tb1_addr;
+ struct sk_buff *frag;
+
+ tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
+
+ /* The first TB points to bi-directional DMA data */
+ memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
+
+ /*
+ * No need for _with_wa, the first TB allocation is aligned up
+ * to a 64-byte boundary and thus can't be at the end or cross
+ * a page boundary (much less a 2^32 boundary).
+ */
+ iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
+
+ /*
+ * The second TB (tb1) points to the remainder of the TX command
+ * and the 802.11 header - dword aligned size
+ * (This calculation modifies the TX command, so do it before the
+ * setup of the first TB)
+ */
+ len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
+ IWL_FIRST_TB_SIZE;
+
+ if (pad)
+ tb1_len = ALIGN(len, 4);
+ else
+ tb1_len = len;
+
+ /* map the data for TB1 */
+ tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
+ tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+ goto out_err;
+ /*
+ * No need for _with_wa(), we ensure (via alignment) that the data
+ * here can never cross or end at a page boundary.
+ */
+ iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
+ trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
+ IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
+
+ /* set up TFD's third entry to point to remainder of skb's head */
+ tb2_len = skb_headlen(skb) - hdr_len;
+
+ if (tb2_len > 0) {
+ int ret;
+
+ tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
+ tb2_len, DMA_TO_DEVICE);
+ ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
+ skb->data + hdr_len, tb2_len,
+ NULL, true);
+ if (ret)
+ goto out_err;
+ }
+
+ if (iwl_txq_gen2_tx_add_frags(trans, skb, tfd, out_meta))
+ goto out_err;
+
+ skb_walk_frags(skb, frag) {
+ int ret;
+
+ tb_phys = dma_map_single(trans->dev, frag->data,
+ skb_headlen(frag), DMA_TO_DEVICE);
+ ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
+ frag->data,
+ skb_headlen(frag), NULL,
+ true);
+ if (ret)
+ goto out_err;
+ if (iwl_txq_gen2_tx_add_frags(trans, frag, tfd, out_meta))
+ goto out_err;
+ }
+
+ return tfd;
+
+out_err:
+ iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
+ return NULL;
+}
+
+static
+struct iwl_tfh_tfd *iwl_txq_gen2_build_tfd(struct iwl_trans *trans,
+ struct iwl_txq *txq,
+ struct iwl_device_tx_cmd *dev_cmd,
+ struct sk_buff *skb,
+ struct iwl_cmd_meta *out_meta)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
+ struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
+ int len, hdr_len;
+ bool amsdu;
+
+ /* There must be data left over for TB1 or this code must be changed */
+ BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
+ BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
+ offsetofend(struct iwl_tx_cmd_gen2, dram_info) >
+ IWL_FIRST_TB_SIZE);
+ BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) < IWL_FIRST_TB_SIZE);
+ BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
+ offsetofend(struct iwl_tx_cmd_gen3, dram_info) >
+ IWL_FIRST_TB_SIZE);
+
+ memset(tfd, 0, sizeof(*tfd));
+
+ if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ len = sizeof(struct iwl_tx_cmd_gen2);
+ else
+ len = sizeof(struct iwl_tx_cmd_gen3);
+
+ amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
+ (*ieee80211_get_qos_ctl(hdr) &
+ IEEE80211_QOS_CTL_A_MSDU_PRESENT);
+
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+ /*
+ * Only build A-MSDUs here if doing so by GSO, otherwise it may be
+ * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been
+ * built in the higher layers already.
+ */
+ if (amsdu && skb_shinfo(skb)->gso_size)
+ return iwl_txq_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
+ out_meta, hdr_len, len);
+ return iwl_txq_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
+ hdr_len, len, !amsdu);
+}
+
+int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q)
+{
+ unsigned int max;
+ unsigned int used;
+
+ /*
+ * To avoid ambiguity between empty and completely full queues, there
+ * should always be less than max_tfd_queue_size elements in the queue.
+ * If q->n_window is smaller than max_tfd_queue_size, there is no need
+ * to reserve any queue entries for this purpose.
+ */
+ if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size)
+ max = q->n_window;
+ else
+ max = trans->trans_cfg->base_params->max_tfd_queue_size - 1;
+
+ /*
+ * max_tfd_queue_size is a power of 2, so the following is equivalent to
+ * modulo by max_tfd_queue_size and is well defined.
+ */
+ used = (q->write_ptr - q->read_ptr) &
+ (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
+
+ if (WARN_ON(used > max))
+ return 0;
+
+ return max - used;
+}
+
+/*
+ * iwl_pcie_gen2_update_byte_tbl - Set up entry in Tx byte-count array
+ */
+static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,
+ struct iwl_txq *txq, u16 byte_cnt,
+ int num_tbs)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
+ u8 filled_tfd_size, num_fetch_chunks;
+ u16 len = byte_cnt;
+ __le16 bc_ent;
+
+ if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window))
+ return;
+
+ filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
+ num_tbs * sizeof(struct iwl_tfh_tb);
+ /*
+ * filled_tfd_size contains the number of filled bytes in the TFD.
+ * Dividing it by 64 will give the number of chunks to fetch
+ * to SRAM- 0 for one chunk, 1 for 2 and so on.
+ * If, for example, TFD contains only 3 TBs then 32 bytes
+ * of the TFD are used, and only one chunk of 64 bytes should
+ * be fetched
+ */
+ num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
+
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ struct iwl_gen3_bc_tbl_entry *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
+
+ /* Starting from AX210, the HW expects bytes */
+ WARN_ON(trans_pcie->txqs.bc_table_dword);
+ WARN_ON(len > 0x3FFF);
+ bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
+ scd_bc_tbl_gen3[idx].tfd_offset = bc_ent;
+ } else {
+ struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
+
+ /* Before AX210, the HW expects DW */
+ WARN_ON(!trans_pcie->txqs.bc_table_dword);
+ len = DIV_ROUND_UP(len, 4);
+ WARN_ON(len > 0xFFF);
+ bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
+ scd_bc_tbl->tfd_offset[idx] = bc_ent;
+ }
+}
+
+static u8 iwl_txq_gen2_get_num_tbs(struct iwl_tfh_tfd *tfd)
+{
+ return le16_to_cpu(tfd->num_tbs) & 0x1f;
+}
+
+int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd,
+ dma_addr_t addr, u16 len)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int idx = iwl_txq_gen2_get_num_tbs(tfd);
+ struct iwl_tfh_tb *tb;
+
+ /* Only WARN here so we know about the issue, but we mess up our
+ * unmap path because not every place currently checks for errors
+ * returned from this function - it can only return an error if
+ * there's no more space, and so when we know there is enough we
+ * don't always check ...
+ */
+ WARN(iwl_txq_crosses_4g_boundary(addr, len),
+ "possible DMA problem with iova:0x%llx, len:%d\n",
+ (unsigned long long)addr, len);
+
+ if (WARN_ON(idx >= IWL_TFH_NUM_TBS))
+ return -EINVAL;
+ tb = &tfd->tbs[idx];
+
+ /* Each TFD can point to a maximum max_tbs Tx buffers */
+ if (le16_to_cpu(tfd->num_tbs) >= trans_pcie->txqs.tfd.max_tbs) {
+ IWL_ERR(trans, "Error can not send more than %d chunks\n",
+ trans_pcie->txqs.tfd.max_tbs);
+ return -EINVAL;
+ }
+
+ put_unaligned_le64(addr, &tb->addr);
+ tb->tb_len = cpu_to_le16(len);
+
+ tfd->num_tbs = cpu_to_le16(idx + 1);
+
+ return idx;
+}
+
+void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans,
+ struct iwl_cmd_meta *meta,
+ struct iwl_tfh_tfd *tfd)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int i, num_tbs;
+
+ /* Sanity check on number of chunks */
+ num_tbs = iwl_txq_gen2_get_num_tbs(tfd);
+
+ if (num_tbs > trans_pcie->txqs.tfd.max_tbs) {
+ IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
+ return;
+ }
+
+ /* TB1 is mapped directly, the rest is the TSO page and SG list. */
+ if (meta->sg_offset)
+ num_tbs = 2;
+
+ /* first TB is never freed - it's the bidirectional DMA data */
+ for (i = 1; i < num_tbs; i++) {
+ if (meta->tbs & BIT(i))
+ dma_unmap_page(trans->dev,
+ le64_to_cpu(tfd->tbs[i].addr),
+ le16_to_cpu(tfd->tbs[i].tb_len),
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(trans->dev,
+ le64_to_cpu(tfd->tbs[i].addr),
+ le16_to_cpu(tfd->tbs[i].tb_len),
+ DMA_TO_DEVICE);
+ }
+
+ iwl_txq_set_tfd_invalid_gen2(trans, tfd);
+}
+
+static void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
+{
+ /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
+ * idx is bounded by n_window
+ */
+ int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
+ struct sk_buff *skb;
+
+ lockdep_assert_held(&txq->lock);
+
+ if (!txq->entries)
+ return;
+
+ iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
+ iwl_txq_get_tfd(trans, txq, idx));
+
+ skb = txq->entries[idx].skb;
+
+ /* Can be called from irqs-disabled context
+ * If skb is not NULL, it means that the whole queue is being
+ * freed and that the queue is not empty - free the skb
+ */
+ if (skb) {
+ iwl_op_mode_free_skb(trans->op_mode, skb);
+ txq->entries[idx].skb = NULL;
+ }
+}
+
+/*
+ * iwl_txq_inc_wr_ptr - Send new write index to hardware
+ */
+static void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
+{
+ lockdep_assert_held(&txq->lock);
+
+ IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr);
+
+ /*
+ * if not in power-save mode, uCode will never sleep when we're
+ * trying to tx (during RFKILL, we're not trying to tx).
+ */
+ iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16));
+}
+
+int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_device_tx_cmd *dev_cmd, int txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_cmd_meta *out_meta;
+ struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
+ u16 cmd_len;
+ int idx;
+ void *tfd;
+
+ if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
+ "queue %d out of range", txq_id))
+ return -EINVAL;
+
+ if (WARN_ONCE(!test_bit(txq_id, trans_pcie->txqs.queue_used),
+ "TX on unused queue %d\n", txq_id))
+ return -EINVAL;
+
+ if (skb_is_nonlinear(skb) &&
+ skb_shinfo(skb)->nr_frags > IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie) &&
+ __skb_linearize(skb))
+ return -ENOMEM;
+
+ spin_lock(&txq->lock);
+
+ if (iwl_txq_space(trans, txq) < txq->high_mark) {
+ iwl_txq_stop(trans, txq);
+
+ /* don't put the packet on the ring, if there is no room */
+ if (unlikely(iwl_txq_space(trans, txq) < 3)) {
+ struct iwl_device_tx_cmd **dev_cmd_ptr;
+
+ dev_cmd_ptr = (void *)((u8 *)skb->cb +
+ trans_pcie->txqs.dev_cmd_offs);
+
+ *dev_cmd_ptr = dev_cmd;
+ __skb_queue_tail(&txq->overflow_q, skb);
+ spin_unlock(&txq->lock);
+ return 0;
+ }
+ }
+
+ idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
+
+ /* Set up driver data for this TFD */
+ txq->entries[idx].skb = skb;
+ txq->entries[idx].cmd = dev_cmd;
+
+ dev_cmd->hdr.sequence =
+ cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
+ INDEX_TO_SEQ(idx)));
+
+ /* Set up first empty entry in queue's array of Tx/cmd buffers */
+ out_meta = &txq->entries[idx].meta;
+ memset(out_meta, 0, sizeof(*out_meta));
+
+ tfd = iwl_txq_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta);
+ if (!tfd) {
+ spin_unlock(&txq->lock);
+ return -1;
+ }
+
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ struct iwl_tx_cmd_gen3 *tx_cmd_gen3 =
+ (void *)dev_cmd->payload;
+
+ cmd_len = le16_to_cpu(tx_cmd_gen3->len);
+ } else {
+ struct iwl_tx_cmd_gen2 *tx_cmd_gen2 =
+ (void *)dev_cmd->payload;
+
+ cmd_len = le16_to_cpu(tx_cmd_gen2->len);
+ }
+
+ /* Set up entry for this TFD in Tx byte-count array */
+ iwl_pcie_gen2_update_byte_tbl(trans, txq, cmd_len,
+ iwl_txq_gen2_get_num_tbs(tfd));
+
+ /* start timer if queue currently empty */
+ if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
+ mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
+
+ /* Tell device the write index *just past* this latest filled TFD */
+ txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
+ iwl_txq_inc_wr_ptr(trans, txq);
+ /*
+ * At this point the frame is "transmitted" successfully
+ * and we will get a TX status notification eventually.
+ */
+ spin_unlock(&txq->lock);
+ return 0;
+}
+
+/*************** HOST COMMAND QUEUE FUNCTIONS *****/
+
+/*
+ * iwl_txq_gen2_unmap - Unmap any remaining DMA mappings and free skb's
+ */
+static void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
+
+ spin_lock_bh(&txq->reclaim_lock);
+ spin_lock(&txq->lock);
+ while (txq->write_ptr != txq->read_ptr) {
+ IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
+ txq_id, txq->read_ptr);
+
+ if (txq_id != trans_pcie->txqs.cmd.q_id) {
+ int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
+ struct iwl_cmd_meta *cmd_meta = &txq->entries[idx].meta;
+ struct sk_buff *skb = txq->entries[idx].skb;
+
+ if (!WARN_ON_ONCE(!skb))
+ iwl_pcie_free_tso_pages(trans, skb, cmd_meta);
+ }
+ iwl_txq_gen2_free_tfd(trans, txq);
+ txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
+ }
+
+ while (!skb_queue_empty(&txq->overflow_q)) {
+ struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
+
+ iwl_op_mode_free_skb(trans->op_mode, skb);
+ }
+
+ spin_unlock(&txq->lock);
+ spin_unlock_bh(&txq->reclaim_lock);
+
+ /* just in case - this queue may have been stopped */
+ iwl_trans_pcie_wake_queue(trans, txq);
+}
+
+static void iwl_txq_gen2_free_memory(struct iwl_trans *trans,
+ struct iwl_txq *txq)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct device *dev = trans->dev;
+
+ /* De-alloc circular buffer of TFDs */
+ if (txq->tfds) {
+ dma_free_coherent(dev,
+ trans_pcie->txqs.tfd.size * txq->n_window,
+ txq->tfds, txq->dma_addr);
+ dma_free_coherent(dev,
+ sizeof(*txq->first_tb_bufs) * txq->n_window,
+ txq->first_tb_bufs, txq->first_tb_dma);
+ }
+
+ kfree(txq->entries);
+ if (txq->bc_tbl.addr)
+ dma_pool_free(trans_pcie->txqs.bc_pool,
+ txq->bc_tbl.addr, txq->bc_tbl.dma);
+ kfree(txq);
+}
+
+/*
+ * iwl_pcie_txq_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+static void iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq;
+ int i;
+
+ if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
+ "queue %d out of range", txq_id))
+ return;
+
+ txq = trans_pcie->txqs.txq[txq_id];
+
+ if (WARN_ON(!txq))
+ return;
+
+ iwl_txq_gen2_unmap(trans, txq_id);
+
+ /* De-alloc array of command/tx buffers */
+ if (txq_id == trans_pcie->txqs.cmd.q_id)
+ for (i = 0; i < txq->n_window; i++) {
+ kfree_sensitive(txq->entries[i].cmd);
+ kfree_sensitive(txq->entries[i].free_buf);
+ }
+ del_timer_sync(&txq->stuck_timer);
+
+ iwl_txq_gen2_free_memory(trans, txq);
+
+ trans_pcie->txqs.txq[txq_id] = NULL;
+
+ clear_bit(txq_id, trans_pcie->txqs.queue_used);
+}
+
+static struct iwl_txq *
+iwl_txq_dyn_alloc_dma(struct iwl_trans *trans, int size, unsigned int timeout)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ size_t bc_tbl_size, bc_tbl_entries;
+ struct iwl_txq *txq;
+ int ret;
+
+ WARN_ON(!trans_pcie->txqs.bc_tbl_size);
+
+ bc_tbl_size = trans_pcie->txqs.bc_tbl_size;
+ bc_tbl_entries = bc_tbl_size / sizeof(u16);
+
+ if (WARN_ON(size > bc_tbl_entries))
+ return ERR_PTR(-EINVAL);
+
+ txq = kzalloc(sizeof(*txq), GFP_KERNEL);
+ if (!txq)
+ return ERR_PTR(-ENOMEM);
+
+ txq->bc_tbl.addr = dma_pool_alloc(trans_pcie->txqs.bc_pool, GFP_KERNEL,
+ &txq->bc_tbl.dma);
+ if (!txq->bc_tbl.addr) {
+ IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
+ kfree(txq);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ret = iwl_pcie_txq_alloc(trans, txq, size, false);
+ if (ret) {
+ IWL_ERR(trans, "Tx queue alloc failed\n");
+ goto error;
+ }
+ ret = iwl_txq_init(trans, txq, size, false);
+ if (ret) {
+ IWL_ERR(trans, "Tx queue init failed\n");
+ goto error;
+ }
+
+ txq->wd_timeout = msecs_to_jiffies(timeout);
+
+ return txq;
+
+error:
+ iwl_txq_gen2_free_memory(trans, txq);
+ return ERR_PTR(ret);
+}
+
+static int iwl_pcie_txq_alloc_response(struct iwl_trans *trans,
+ struct iwl_txq *txq,
+ struct iwl_host_cmd *hcmd)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tx_queue_cfg_rsp *rsp;
+ int ret, qid;
+ u32 wr_ptr;
+
+ if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) !=
+ sizeof(*rsp))) {
+ ret = -EINVAL;
+ goto error_free_resp;
+ }
+
+ rsp = (void *)hcmd->resp_pkt->data;
+ qid = le16_to_cpu(rsp->queue_number);
+ wr_ptr = le16_to_cpu(rsp->write_pointer);
+
+ if (qid >= ARRAY_SIZE(trans_pcie->txqs.txq)) {
+ WARN_ONCE(1, "queue index %d unsupported", qid);
+ ret = -EIO;
+ goto error_free_resp;
+ }
+
+ if (test_and_set_bit(qid, trans_pcie->txqs.queue_used)) {
+ WARN_ONCE(1, "queue %d already used", qid);
+ ret = -EIO;
+ goto error_free_resp;
+ }
+
+ if (WARN_ONCE(trans_pcie->txqs.txq[qid],
+ "queue %d already allocated\n", qid)) {
+ ret = -EIO;
+ goto error_free_resp;
+ }
+
+ txq->id = qid;
+ trans_pcie->txqs.txq[qid] = txq;
+ wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
+
+ /* Place first TFD at index corresponding to start sequence number */
+ txq->read_ptr = wr_ptr;
+ txq->write_ptr = wr_ptr;
+
+ IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
+
+ iwl_free_resp(hcmd);
+ return qid;
+
+error_free_resp:
+ iwl_free_resp(hcmd);
+ iwl_txq_gen2_free_memory(trans, txq);
+ return ret;
+}
+
+int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,
+ u8 tid, int size, unsigned int timeout)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq;
+ union {
+ struct iwl_tx_queue_cfg_cmd old;
+ struct iwl_scd_queue_cfg_cmd new;
+ } cmd;
+ struct iwl_host_cmd hcmd = {
+ .flags = CMD_WANT_SKB,
+ };
+ int ret;
+
+ /* take the min with bytecount table entries allowed */
+ size = min_t(u32, size, trans_pcie->txqs.bc_tbl_size / sizeof(u16));
+ /* but must be power of 2 values for calculating read/write pointers */
+ size = rounddown_pow_of_two(size);
+
+ if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_BZ &&
+ trans->hw_rev_step == SILICON_A_STEP) {
+ size = 4096;
+ txq = iwl_txq_dyn_alloc_dma(trans, size, timeout);
+ } else {
+ do {
+ txq = iwl_txq_dyn_alloc_dma(trans, size, timeout);
+ if (!IS_ERR(txq))
+ break;
+
+ IWL_DEBUG_TX_QUEUES(trans,
+ "Failed allocating TXQ of size %d for sta mask %x tid %d, ret: %ld\n",
+ size, sta_mask, tid,
+ PTR_ERR(txq));
+ size /= 2;
+ } while (size >= 16);
+ }
+
+ if (IS_ERR(txq))
+ return PTR_ERR(txq);
+
+ if (trans_pcie->txqs.queue_alloc_cmd_ver == 0) {
+ memset(&cmd.old, 0, sizeof(cmd.old));
+ cmd.old.tfdq_addr = cpu_to_le64(txq->dma_addr);
+ cmd.old.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
+ cmd.old.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
+ cmd.old.flags = cpu_to_le16(flags | TX_QUEUE_CFG_ENABLE_QUEUE);
+ cmd.old.tid = tid;
+
+ if (hweight32(sta_mask) != 1) {
+ ret = -EINVAL;
+ goto error;
+ }
+ cmd.old.sta_id = ffs(sta_mask) - 1;
+
+ hcmd.id = SCD_QUEUE_CFG;
+ hcmd.len[0] = sizeof(cmd.old);
+ hcmd.data[0] = &cmd.old;
+ } else if (trans_pcie->txqs.queue_alloc_cmd_ver == 3) {
+ memset(&cmd.new, 0, sizeof(cmd.new));
+ cmd.new.operation = cpu_to_le32(IWL_SCD_QUEUE_ADD);
+ cmd.new.u.add.tfdq_dram_addr = cpu_to_le64(txq->dma_addr);
+ cmd.new.u.add.bc_dram_addr = cpu_to_le64(txq->bc_tbl.dma);
+ cmd.new.u.add.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
+ cmd.new.u.add.flags = cpu_to_le32(flags);
+ cmd.new.u.add.sta_mask = cpu_to_le32(sta_mask);
+ cmd.new.u.add.tid = tid;
+
+ hcmd.id = WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD);
+ hcmd.len[0] = sizeof(cmd.new);
+ hcmd.data[0] = &cmd.new;
+ } else {
+ ret = -EOPNOTSUPP;
+ goto error;
+ }
+
+ ret = iwl_trans_send_cmd(trans, &hcmd);
+ if (ret)
+ goto error;
+
+ return iwl_pcie_txq_alloc_response(trans, txq, &hcmd);
+
+error:
+ iwl_txq_gen2_free_memory(trans, txq);
+ return ret;
+}
+
+void iwl_txq_dyn_free(struct iwl_trans *trans, int queue)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (WARN(queue >= IWL_MAX_TVQM_QUEUES,
+ "queue %d out of range", queue))
+ return;
+
+ /*
+ * Upon HW Rfkill - we stop the device, and then stop the queues
+ * in the op_mode. Just for the sake of the simplicity of the op_mode,
+ * allow the op_mode to call txq_disable after it already called
+ * stop_device.
+ */
+ if (!test_and_clear_bit(queue, trans_pcie->txqs.queue_used)) {
+ WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
+ "queue %d not used", queue);
+ return;
+ }
+
+ iwl_txq_gen2_free(trans, queue);
+
+ IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
+}
+
+void iwl_txq_gen2_tx_free(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int i;
+
+ memset(trans_pcie->txqs.queue_used, 0,
+ sizeof(trans_pcie->txqs.queue_used));
+
+ /* Free all TX queues */
+ for (i = 0; i < ARRAY_SIZE(trans_pcie->txqs.txq); i++) {
+ if (!trans_pcie->txqs.txq[i])
+ continue;
+
+ iwl_txq_gen2_free(trans, i);
+ }
+}
+
+int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *queue;
+ int ret;
+
+ /* alloc and init the tx queue */
+ if (!trans_pcie->txqs.txq[txq_id]) {
+ queue = kzalloc(sizeof(*queue), GFP_KERNEL);
+ if (!queue) {
+ IWL_ERR(trans, "Not enough memory for tx queue\n");
+ return -ENOMEM;
+ }
+ trans_pcie->txqs.txq[txq_id] = queue;
+ ret = iwl_pcie_txq_alloc(trans, queue, queue_size, true);
+ if (ret) {
+ IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
+ goto error;
+ }
+ } else {
+ queue = trans_pcie->txqs.txq[txq_id];
+ }
+
+ ret = iwl_txq_init(trans, queue, queue_size,
+ (txq_id == trans_pcie->txqs.cmd.q_id));
+ if (ret) {
+ IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
+ goto error;
+ }
+ trans_pcie->txqs.txq[txq_id]->id = txq_id;
+ set_bit(txq_id, trans_pcie->txqs.queue_used);
+
+ return 0;
+
+error:
+ iwl_txq_gen2_tx_free(trans);
+ return ret;
+}
/*************** HOST COMMAND QUEUE FUNCTIONS *****/
@@ -28,7 +1204,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
+ struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
void *dup_buf = NULL;
@@ -42,6 +1218,9 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
struct iwl_tfh_tfd *tfd;
unsigned long flags;
+ if (WARN_ON(cmd->flags & CMD_BLOCK_TXQS))
+ return -EINVAL;
+
copy_size = sizeof(struct iwl_cmd_header_wide);
cmd_size = sizeof(struct iwl_cmd_header_wide);
@@ -127,7 +1306,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
out_cmd = txq->entries[idx].cmd;
out_meta = &txq->entries[idx].meta;
- /* re-initialize to NULL */
+ /* re-initialize, this also marks the SG list as unused */
memset(out_meta, 0, sizeof(*out_meta));
if (cmd->flags & CMD_WANT_SKB)
out_meta->source = cmd;
@@ -140,7 +1319,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide));
out_cmd->hdr_wide.reserved = 0;
out_cmd->hdr_wide.sequence =
- cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) |
+ cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->txqs.cmd.q_id) |
INDEX_TO_SEQ(txq->write_ptr));
cmd_pos = sizeof(struct iwl_cmd_header_wide);
@@ -188,7 +1367,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
"Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
iwl_get_cmd_string(trans, cmd->id), group_id,
out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
- cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id);
+ cmd_size, txq->write_ptr, idx, trans_pcie->txqs.cmd.q_id);
/* start the TFD with the minimum copy bytes */
tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
@@ -213,7 +1392,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
/* map the remaining (adjusted) nocopy/dup fragments */
for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
- const void *data = cmddata[i];
+ void *data = (void *)(uintptr_t)cmddata[i];
if (!cmdlen[i])
continue;
@@ -222,7 +1401,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
continue;
if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
data = dup_buf;
- phys_addr = dma_map_single(trans->dev, (void *)data,
+ phys_addr = dma_map_single(trans->dev, data,
cmdlen[i], DMA_TO_DEVICE);
if (dma_mapping_error(trans->dev, phys_addr)) {
idx = -ENOMEM;
diff --git a/pcie/tx.c b/pcie/tx.c
index 790e5b124740..9fe050f0ddc1 100644
--- a/pcie/tx.c
+++ b/pcie/tx.c
@@ -1,16 +1,22 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2003-2014, 2018-2021, 2023 Intel Corporation
+ * Copyright (C) 2003-2014, 2018-2021, 2023-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#include <linux/etherdevice.h>
#include <linux/ieee80211.h>
+#include <linux/dmapool.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include <linux/tcp.h>
#include <net/ip6_checksum.h>
#include <net/tso.h>
+#include "fw/api/commands.h"
+#include "fw/api/datapath.h"
+#include "fw/api/debug.h"
+#include "iwl-fh.h"
#include "iwl-debug.h"
#include "iwl-csr.h"
#include "iwl-prph.h"
@@ -72,6 +78,7 @@ void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr)
static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
struct iwl_txq *txq)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 reg = 0;
int txq_id = txq->id;
@@ -84,7 +91,7 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
* 3. there is a chance that the NIC is asleep
*/
if (!trans->trans_cfg->base_params->shadow_reg_enable &&
- txq_id != trans->txqs.cmd.q_id &&
+ txq_id != trans_pcie->txqs.cmd.q_id &&
test_bit(STATUS_TPOWER_PMI, &trans->status)) {
/*
* wake up nic if it's powered down ...
@@ -115,12 +122,13 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i;
for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
- struct iwl_txq *txq = trans->txqs.txq[i];
+ struct iwl_txq *txq = trans_pcie->txqs.txq[i];
- if (!test_bit(i, trans->txqs.queue_used))
+ if (!test_bit(i, trans_pcie->txqs.queue_used))
continue;
spin_lock_bh(&txq->lock);
@@ -132,12 +140,10 @@ void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
}
}
-static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd,
- u8 idx, dma_addr_t addr, u16 len)
+static inline void iwl_pcie_gen1_tfd_set_tb(struct iwl_tfd *tfd,
+ u8 idx, dma_addr_t addr, u16 len)
{
- struct iwl_tfd *tfd_fh = (void *)tfd;
- struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx];
-
+ struct iwl_tfd_tb *tb = &tfd->tbs[idx];
u16 hi_n_len = len << 4;
put_unaligned_le32(addr, &tb->lo);
@@ -145,26 +151,32 @@ static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd,
tb->hi_n_len = cpu_to_le16(hi_n_len);
- tfd_fh->num_tbs = idx + 1;
+ tfd->num_tbs = idx + 1;
+}
+
+static inline u8 iwl_txq_gen1_tfd_get_num_tbs(struct iwl_tfd *tfd)
+{
+ return tfd->num_tbs & 0x1f;
}
static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
dma_addr_t addr, u16 len, bool reset)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
void *tfd;
u32 num_tbs;
- tfd = (u8 *)txq->tfds + trans->txqs.tfd.size * txq->write_ptr;
+ tfd = (u8 *)txq->tfds + trans_pcie->txqs.tfd.size * txq->write_ptr;
if (reset)
- memset(tfd, 0, trans->txqs.tfd.size);
+ memset(tfd, 0, trans_pcie->txqs.tfd.size);
- num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd);
+ num_tbs = iwl_txq_gen1_tfd_get_num_tbs(tfd);
/* Each TFD can point to a maximum max_tbs Tx buffers */
- if (num_tbs >= trans->txqs.tfd.max_tbs) {
+ if (num_tbs >= trans_pcie->txqs.tfd.max_tbs) {
IWL_ERR(trans, "Error can not send more than %d chunks\n",
- trans->txqs.tfd.max_tbs);
+ trans_pcie->txqs.tfd.max_tbs);
return -EINVAL;
}
@@ -172,7 +184,7 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
"Unaligned address = %llx\n", (unsigned long long)addr))
return -EINVAL;
- iwl_pcie_tfd_set_tb(trans, tfd, num_tbs, addr, len);
+ iwl_pcie_gen1_tfd_set_tb(tfd, num_tbs, addr, len);
return num_tbs;
}
@@ -197,36 +209,206 @@ static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
spin_unlock(&trans_pcie->reg_lock);
}
+static void iwl_pcie_free_and_unmap_tso_page(struct iwl_trans *trans,
+ struct page *page)
+{
+ struct iwl_tso_page_info *info = IWL_TSO_PAGE_INFO(page_address(page));
+
+ /* Decrease internal use count and unmap/free page if needed */
+ if (refcount_dec_and_test(&info->use_count)) {
+ dma_unmap_page(trans->dev, info->dma_addr, PAGE_SIZE,
+ DMA_TO_DEVICE);
+
+ __free_page(page);
+ }
+}
+
+void iwl_pcie_free_tso_pages(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_cmd_meta *cmd_meta)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct page **page_ptr;
+ struct page *next;
+
+ page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs);
+ next = *page_ptr;
+ *page_ptr = NULL;
+
+ while (next) {
+ struct iwl_tso_page_info *info;
+ struct page *tmp = next;
+
+ info = IWL_TSO_PAGE_INFO(page_address(next));
+ next = info->next;
+
+ /* Unmap the scatter gather list that is on the last page */
+ if (!next && cmd_meta->sg_offset) {
+ struct sg_table *sgt;
+
+ sgt = (void *)((u8 *)page_address(tmp) +
+ cmd_meta->sg_offset);
+
+ dma_unmap_sgtable(trans->dev, sgt, DMA_TO_DEVICE, 0);
+ }
+
+ iwl_pcie_free_and_unmap_tso_page(trans, tmp);
+ }
+}
+
+static inline dma_addr_t
+iwl_txq_gen1_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
+{
+ struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+ dma_addr_t addr;
+ dma_addr_t hi_len;
+
+ addr = get_unaligned_le32(&tb->lo);
+
+ if (sizeof(dma_addr_t) <= sizeof(u32))
+ return addr;
+
+ hi_len = le16_to_cpu(tb->hi_n_len) & 0xF;
+
+ /*
+ * shift by 16 twice to avoid warnings on 32-bit
+ * (where this code never runs anyway due to the
+ * if statement above)
+ */
+ return addr | ((hi_len << 16) << 16);
+}
+
+static void iwl_txq_set_tfd_invalid_gen1(struct iwl_trans *trans,
+ struct iwl_tfd *tfd)
+{
+ tfd->num_tbs = 0;
+
+ iwl_pcie_gen1_tfd_set_tb(tfd, 0, trans->invalid_tx_cmd.dma,
+ trans->invalid_tx_cmd.size);
+}
+
+static void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
+ struct iwl_cmd_meta *meta,
+ struct iwl_txq *txq, int index)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int i, num_tbs;
+ struct iwl_tfd *tfd = iwl_txq_get_tfd(trans, txq, index);
+
+ /* Sanity check on number of chunks */
+ num_tbs = iwl_txq_gen1_tfd_get_num_tbs(tfd);
+
+ if (num_tbs > trans_pcie->txqs.tfd.max_tbs) {
+ IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
+ /* @todo issue fatal error, it is quite serious situation */
+ return;
+ }
+
+ /* TB1 is mapped directly, the rest is the TSO page and SG list. */
+ if (meta->sg_offset)
+ num_tbs = 2;
+
+ /* first TB is never freed - it's the bidirectional DMA data */
+
+ for (i = 1; i < num_tbs; i++) {
+ if (meta->tbs & BIT(i))
+ dma_unmap_page(trans->dev,
+ iwl_txq_gen1_tfd_tb_get_addr(tfd, i),
+ iwl_txq_gen1_tfd_tb_get_len(trans,
+ tfd, i),
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(trans->dev,
+ iwl_txq_gen1_tfd_tb_get_addr(tfd, i),
+ iwl_txq_gen1_tfd_tb_get_len(trans,
+ tfd, i),
+ DMA_TO_DEVICE);
+ }
+
+ meta->tbs = 0;
+
+ iwl_txq_set_tfd_invalid_gen1(trans, tfd);
+}
+
+/**
+ * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
+ * @trans: transport private data
+ * @txq: tx queue
+ * @read_ptr: the TXQ read_ptr to free
+ *
+ * Does NOT advance any TFD circular buffer read/write indexes
+ * Does NOT free the TFD itself (which is within circular buffer)
+ */
+static void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
+ int read_ptr)
+{
+ /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
+ * idx is bounded by n_window
+ */
+ int idx = iwl_txq_get_cmd_index(txq, read_ptr);
+ struct sk_buff *skb;
+
+ lockdep_assert_held(&txq->reclaim_lock);
+
+ if (!txq->entries)
+ return;
+
+ /* We have only q->n_window txq->entries, but we use
+ * TFD_QUEUE_SIZE_MAX tfds
+ */
+ if (trans->trans_cfg->gen2)
+ iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
+ iwl_txq_get_tfd(trans, txq, read_ptr));
+ else
+ iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta,
+ txq, read_ptr);
+
+ /* free SKB */
+ skb = txq->entries[idx].skb;
+
+ /* Can be called from irqs-disabled context
+ * If skb is not NULL, it means that the whole queue is being
+ * freed and that the queue is not empty - free the skb
+ */
+ if (skb) {
+ iwl_op_mode_free_skb(trans->op_mode, skb);
+ txq->entries[idx].skb = NULL;
+ }
+}
+
/*
* iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's
*/
static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
{
- struct iwl_txq *txq = trans->txqs.txq[txq_id];
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
if (!txq) {
IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n");
return;
}
- spin_lock_bh(&txq->lock);
+ spin_lock_bh(&txq->reclaim_lock);
+ spin_lock(&txq->lock);
while (txq->write_ptr != txq->read_ptr) {
IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
txq_id, txq->read_ptr);
- if (txq_id != trans->txqs.cmd.q_id) {
+ if (txq_id != trans_pcie->txqs.cmd.q_id) {
struct sk_buff *skb = txq->entries[txq->read_ptr].skb;
+ struct iwl_cmd_meta *cmd_meta =
+ &txq->entries[txq->read_ptr].meta;
if (WARN_ON_ONCE(!skb))
continue;
- iwl_txq_free_tso_page(trans, skb);
+ iwl_pcie_free_tso_pages(trans, skb, cmd_meta);
}
- iwl_txq_free_tfd(trans, txq);
+ iwl_txq_free_tfd(trans, txq, txq->read_ptr);
txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
if (txq->read_ptr == txq->write_ptr &&
- txq_id == trans->txqs.cmd.q_id)
+ txq_id == trans_pcie->txqs.cmd.q_id)
iwl_pcie_clear_cmd_in_flight(trans);
}
@@ -236,10 +418,11 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
iwl_op_mode_free_skb(trans->op_mode, skb);
}
- spin_unlock_bh(&txq->lock);
+ spin_unlock(&txq->lock);
+ spin_unlock_bh(&txq->reclaim_lock);
/* just in case - this queue may have been stopped */
- iwl_wake_queue(trans, txq);
+ iwl_trans_pcie_wake_queue(trans, txq);
}
/*
@@ -252,7 +435,8 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
*/
static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
{
- struct iwl_txq *txq = trans->txqs.txq[txq_id];
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
struct device *dev = trans->dev;
int i;
@@ -262,7 +446,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
iwl_pcie_txq_unmap(trans, txq_id);
/* De-alloc array of command/tx buffers */
- if (txq_id == trans->txqs.cmd.q_id)
+ if (txq_id == trans_pcie->txqs.cmd.q_id)
for (i = 0; i < txq->n_window; i++) {
kfree_sensitive(txq->entries[i].cmd);
kfree_sensitive(txq->entries[i].free_buf);
@@ -271,7 +455,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
/* De-alloc circular buffer of TFDs */
if (txq->tfds) {
dma_free_coherent(dev,
- trans->txqs.tfd.size *
+ trans_pcie->txqs.tfd.size *
trans->trans_cfg->base_params->max_tfd_queue_size,
txq->tfds, txq->dma_addr);
txq->dma_addr = 0;
@@ -301,9 +485,10 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32);
/* make sure all queue are not stopped/used */
- memset(trans->txqs.queue_stopped, 0,
- sizeof(trans->txqs.queue_stopped));
- memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
+ memset(trans_pcie->txqs.queue_stopped, 0,
+ sizeof(trans_pcie->txqs.queue_stopped));
+ memset(trans_pcie->txqs.queue_used, 0,
+ sizeof(trans_pcie->txqs.queue_used));
trans_pcie->scd_base_addr =
iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
@@ -317,7 +502,7 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
NULL, clear_dwords);
iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
- trans->txqs.scd_bc_tbls.dma >> 10);
+ trans_pcie->txqs.scd_bc_tbls.dma >> 10);
/* The chain extension of the SCD doesn't work well. This feature is
* enabled by default by the HW, so we need to disable it manually.
@@ -325,9 +510,9 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
if (trans->trans_cfg->base_params->scd_chain_ext_wa)
iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
- iwl_trans_ac_txq_enable(trans, trans->txqs.cmd.q_id,
- trans->txqs.cmd.fifo,
- trans->txqs.cmd.wdg_timeout);
+ iwl_trans_ac_txq_enable(trans, trans_pcie->txqs.cmd.q_id,
+ trans_pcie->txqs.cmd.fifo,
+ trans_pcie->txqs.cmd.wdg_timeout);
/* Activate all Tx DMA/FIFO channels */
iwl_scd_activate_fifos(trans);
@@ -363,7 +548,7 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
txq_id++) {
- struct iwl_txq *txq = trans->txqs.txq[txq_id];
+ struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
if (trans->trans_cfg->gen2)
iwl_write_direct64(trans,
FH_MEM_CBBC_QUEUE(trans, txq_id),
@@ -438,9 +623,10 @@ int iwl_pcie_tx_stop(struct iwl_trans *trans)
* queues. This happens when we have an rfkill interrupt.
* Since we stop Tx altogether - mark the queues as stopped.
*/
- memset(trans->txqs.queue_stopped, 0,
- sizeof(trans->txqs.queue_stopped));
- memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
+ memset(trans_pcie->txqs.queue_stopped, 0,
+ sizeof(trans_pcie->txqs.queue_stopped));
+ memset(trans_pcie->txqs.queue_used, 0,
+ sizeof(trans_pcie->txqs.queue_used));
/* This can happen: start_hw, stop_device */
if (!trans_pcie->txq_memory)
@@ -464,7 +650,8 @@ void iwl_pcie_tx_free(struct iwl_trans *trans)
int txq_id;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
+ memset(trans_pcie->txqs.queue_used, 0,
+ sizeof(trans_pcie->txqs.queue_used));
/* Tx queues */
if (trans_pcie->txq_memory) {
@@ -472,7 +659,7 @@ void iwl_pcie_tx_free(struct iwl_trans *trans)
txq_id < trans->trans_cfg->base_params->num_of_queues;
txq_id++) {
iwl_pcie_txq_free(trans, txq_id);
- trans->txqs.txq[txq_id] = NULL;
+ trans_pcie->txqs.txq[txq_id] = NULL;
}
}
@@ -481,7 +668,135 @@ void iwl_pcie_tx_free(struct iwl_trans *trans)
iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw);
- iwl_pcie_free_dma_ptr(trans, &trans->txqs.scd_bc_tbls);
+ iwl_pcie_free_dma_ptr(trans, &trans_pcie->txqs.scd_bc_tbls);
+}
+
+void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
+{
+ u32 txq_id = txq->id;
+ u32 status;
+ bool active;
+ u8 fifo;
+
+ if (trans->trans_cfg->gen2) {
+ IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id,
+ txq->read_ptr, txq->write_ptr);
+ /* TODO: access new SCD registers and dump them */
+ return;
+ }
+
+ status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id));
+ fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
+ active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
+
+ IWL_ERR(trans,
+ "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n",
+ txq_id, active ? "" : "in", fifo,
+ jiffies_to_msecs(txq->wd_timeout),
+ txq->read_ptr, txq->write_ptr,
+ iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
+ (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
+ iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
+ (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
+ iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
+}
+
+static void iwl_txq_stuck_timer(struct timer_list *t)
+{
+ struct iwl_txq *txq = from_timer(txq, t, stuck_timer);
+ struct iwl_trans *trans = txq->trans;
+
+ spin_lock(&txq->lock);
+ /* check if triggered erroneously */
+ if (txq->read_ptr == txq->write_ptr) {
+ spin_unlock(&txq->lock);
+ return;
+ }
+ spin_unlock(&txq->lock);
+
+ iwl_txq_log_scd_error(trans, txq);
+
+ iwl_force_nmi(trans);
+}
+
+int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
+ int slots_num, bool cmd_queue)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ size_t num_entries = trans->trans_cfg->gen2 ?
+ slots_num : trans->trans_cfg->base_params->max_tfd_queue_size;
+ size_t tfd_sz;
+ size_t tb0_buf_sz;
+ int i;
+
+ if (WARN_ONCE(slots_num <= 0, "Invalid slots num:%d\n", slots_num))
+ return -EINVAL;
+
+ if (WARN_ON(txq->entries || txq->tfds))
+ return -EINVAL;
+
+ tfd_sz = trans_pcie->txqs.tfd.size * num_entries;
+
+ timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0);
+ txq->trans = trans;
+
+ txq->n_window = slots_num;
+
+ txq->entries = kcalloc(slots_num,
+ sizeof(struct iwl_pcie_txq_entry),
+ GFP_KERNEL);
+
+ if (!txq->entries)
+ goto error;
+
+ if (cmd_queue)
+ for (i = 0; i < slots_num; i++) {
+ txq->entries[i].cmd =
+ kmalloc(sizeof(struct iwl_device_cmd),
+ GFP_KERNEL);
+ if (!txq->entries[i].cmd)
+ goto error;
+ }
+
+ /* Circular buffer of transmit frame descriptors (TFDs),
+ * shared with device
+ */
+ txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
+ &txq->dma_addr, GFP_KERNEL);
+ if (!txq->tfds)
+ goto error;
+
+ BUILD_BUG_ON(sizeof(*txq->first_tb_bufs) != IWL_FIRST_TB_SIZE_ALIGN);
+
+ tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num;
+
+ txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz,
+ &txq->first_tb_dma,
+ GFP_KERNEL);
+ if (!txq->first_tb_bufs)
+ goto err_free_tfds;
+
+ for (i = 0; i < num_entries; i++) {
+ void *tfd = iwl_txq_get_tfd(trans, txq, i);
+
+ if (trans->trans_cfg->gen2)
+ iwl_txq_set_tfd_invalid_gen2(trans, tfd);
+ else
+ iwl_txq_set_tfd_invalid_gen1(trans, tfd);
+ }
+
+ return 0;
+err_free_tfds:
+ dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
+ txq->tfds = NULL;
+error:
+ if (txq->entries && cmd_queue)
+ for (i = 0; i < slots_num; i++)
+ kfree(txq->entries[i].cmd);
+ kfree(txq->entries);
+ txq->entries = NULL;
+
+ return -ENOMEM;
}
/*
@@ -507,7 +822,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
goto error;
}
- ret = iwl_pcie_alloc_dma_ptr(trans, &trans->txqs.scd_bc_tbls,
+ ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->txqs.scd_bc_tbls,
bc_tbls_size);
if (ret) {
IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
@@ -533,7 +848,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
txq_id++) {
- bool cmd_queue = (txq_id == trans->txqs.cmd.q_id);
+ bool cmd_queue = (txq_id == trans_pcie->txqs.cmd.q_id);
if (cmd_queue)
slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
@@ -541,14 +856,14 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
else
slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
trans->cfg->min_ba_txq_size);
- trans->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id];
- ret = iwl_txq_alloc(trans, trans->txqs.txq[txq_id], slots_num,
- cmd_queue);
+ trans_pcie->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id];
+ ret = iwl_pcie_txq_alloc(trans, trans_pcie->txqs.txq[txq_id],
+ slots_num, cmd_queue);
if (ret) {
IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
goto error;
}
- trans->txqs.txq[txq_id]->id = txq_id;
+ trans_pcie->txqs.txq[txq_id]->id = txq_id;
}
return 0;
@@ -559,6 +874,69 @@ error:
return ret;
}
+/*
+ * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
+ */
+static int iwl_queue_init(struct iwl_txq *q, int slots_num)
+{
+ q->n_window = slots_num;
+
+ /* slots_num must be power-of-two size, otherwise
+ * iwl_txq_get_cmd_index is broken.
+ */
+ if (WARN_ON(!is_power_of_2(slots_num)))
+ return -EINVAL;
+
+ q->low_mark = q->n_window / 4;
+ if (q->low_mark < 4)
+ q->low_mark = 4;
+
+ q->high_mark = q->n_window / 8;
+ if (q->high_mark < 2)
+ q->high_mark = 2;
+
+ q->write_ptr = 0;
+ q->read_ptr = 0;
+
+ return 0;
+}
+
+int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
+ int slots_num, bool cmd_queue)
+{
+ u32 tfd_queue_max_size =
+ trans->trans_cfg->base_params->max_tfd_queue_size;
+ int ret;
+
+ txq->need_update = false;
+
+ /* max_tfd_queue_size must be power-of-two size, otherwise
+ * iwl_txq_inc_wrap and iwl_txq_dec_wrap are broken.
+ */
+ if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1),
+ "Max tfd queue size must be a power of two, but is %d",
+ tfd_queue_max_size))
+ return -EINVAL;
+
+ /* Initialize queue's high/low-water marks, and head/tail indexes */
+ ret = iwl_queue_init(txq, slots_num);
+ if (ret)
+ return ret;
+
+ spin_lock_init(&txq->lock);
+ spin_lock_init(&txq->reclaim_lock);
+
+ if (cmd_queue) {
+ static struct lock_class_key iwl_txq_cmd_queue_lock_class;
+
+ lockdep_set_class(&txq->lock, &iwl_txq_cmd_queue_lock_class);
+ }
+
+ __skb_queue_head_init(&txq->overflow_q);
+
+ return 0;
+}
+
int iwl_pcie_tx_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -587,7 +965,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
txq_id++) {
- bool cmd_queue = (txq_id == trans->txqs.cmd.q_id);
+ bool cmd_queue = (txq_id == trans_pcie->txqs.cmd.q_id);
if (cmd_queue)
slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
@@ -595,7 +973,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
else
slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
trans->cfg->min_ba_txq_size);
- ret = iwl_txq_init(trans, trans->txqs.txq[txq_id], slots_num,
+ ret = iwl_txq_init(trans, trans_pcie->txqs.txq[txq_id], slots_num,
cmd_queue);
if (ret) {
IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
@@ -609,7 +987,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
* Circular buffer (TFD queue in DRAM) physical base address
*/
iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id),
- trans->txqs.txq[txq_id]->dma_addr >> 8);
+ trans_pcie->txqs.txq[txq_id]->dma_addr >> 8);
}
iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
@@ -657,6 +1035,42 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
return 0;
}
+static void iwl_txq_progress(struct iwl_txq *txq)
+{
+ lockdep_assert_held(&txq->lock);
+
+ if (!txq->wd_timeout)
+ return;
+
+ /*
+ * station is asleep and we send data - that must
+ * be uAPSD or PS-Poll. Don't rearm the timer.
+ */
+ if (txq->frozen)
+ return;
+
+ /*
+ * if empty delete timer, otherwise move timer forward
+ * since we're making progress on this queue
+ */
+ if (txq->read_ptr == txq->write_ptr)
+ del_timer(&txq->stuck_timer);
+ else
+ mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
+}
+
+static inline bool iwl_txq_used(const struct iwl_txq *q, int i,
+ int read_ptr, int write_ptr)
+{
+ int index = iwl_txq_get_cmd_index(q, i);
+ int r = iwl_txq_get_cmd_index(q, read_ptr);
+ int w = iwl_txq_get_cmd_index(q, write_ptr);
+
+ return w >= r ?
+ (index >= r && index < w) :
+ !(index < r && index >= w);
+}
+
/*
* iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
*
@@ -666,7 +1080,8 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
*/
static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
{
- struct iwl_txq *txq = trans->txqs.txq[txq_id];
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
int nfreed = 0;
u16 r;
@@ -676,8 +1091,8 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
r = iwl_txq_get_cmd_index(txq, txq->read_ptr);
if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size ||
- (!iwl_txq_used(txq, idx))) {
- WARN_ONCE(test_bit(txq_id, trans->txqs.queue_used),
+ (!iwl_txq_used(txq, idx, txq->read_ptr, txq->write_ptr))) {
+ WARN_ONCE(test_bit(txq_id, trans_pcie->txqs.queue_used),
"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
__func__, txq_id, idx,
trans->trans_cfg->base_params->max_tfd_queue_size,
@@ -736,11 +1151,11 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
unsigned int wdg_timeout)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans->txqs.txq[txq_id];
+ struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
int fifo = -1;
bool scd_bug = false;
- if (test_and_set_bit(txq_id, trans->txqs.queue_used))
+ if (test_and_set_bit(txq_id, trans_pcie->txqs.queue_used))
WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
txq->wd_timeout = msecs_to_jiffies(wdg_timeout);
@@ -749,7 +1164,7 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
fifo = cfg->fifo;
/* Disable the scheduler prior configuring the cmd queue */
- if (txq_id == trans->txqs.cmd.q_id &&
+ if (txq_id == trans_pcie->txqs.cmd.q_id &&
trans_pcie->scd_set_active)
iwl_scd_enable_set_active(trans, 0);
@@ -757,7 +1172,7 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
iwl_scd_txq_set_inactive(trans, txq_id);
/* Set this queue as a chain-building queue unless it is CMD */
- if (txq_id != trans->txqs.cmd.q_id)
+ if (txq_id != trans_pcie->txqs.cmd.q_id)
iwl_scd_txq_set_chain(trans, txq_id);
if (cfg->aggregate) {
@@ -827,7 +1242,7 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
SCD_QUEUE_STTS_REG_MSK);
/* enable the scheduler for this queue (only) */
- if (txq_id == trans->txqs.cmd.q_id &&
+ if (txq_id == trans_pcie->txqs.cmd.q_id &&
trans_pcie->scd_set_active)
iwl_scd_enable_set_active(trans, BIT(txq_id));
@@ -846,7 +1261,8 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
bool shared_mode)
{
- struct iwl_txq *txq = trans->txqs.txq[txq_id];
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
txq->ampdu = !shared_mode;
}
@@ -859,8 +1275,8 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
SCD_TX_STTS_QUEUE_OFFSET(txq_id);
static const u32 zero_val[4] = {};
- trans->txqs.txq[txq_id]->frozen_expiry_remainder = 0;
- trans->txqs.txq[txq_id]->frozen = false;
+ trans_pcie->txqs.txq[txq_id]->frozen_expiry_remainder = 0;
+ trans_pcie->txqs.txq[txq_id]->frozen = false;
/*
* Upon HW Rfkill - we stop the device, and then stop the queues
@@ -868,7 +1284,7 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
* allow the op_mode to call txq_disable after it already called
* stop_device.
*/
- if (!test_and_clear_bit(txq_id, trans->txqs.queue_used)) {
+ if (!test_and_clear_bit(txq_id, trans_pcie->txqs.queue_used)) {
WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
"queue %d not used", txq_id);
return;
@@ -882,13 +1298,41 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
}
iwl_pcie_txq_unmap(trans, txq_id);
- trans->txqs.txq[txq_id]->ampdu = false;
+ trans_pcie->txqs.txq[txq_id]->ampdu = false;
IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
}
/*************** HOST COMMAND QUEUE FUNCTIONS *****/
+static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int i;
+
+ for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
+ struct iwl_txq *txq = trans_pcie->txqs.txq[i];
+
+ if (i == trans_pcie->txqs.cmd.q_id)
+ continue;
+
+ /* we skip the command queue (obviously) so it's OK to nest */
+ spin_lock_nested(&txq->lock, 1);
+
+ if (!block && !(WARN_ON_ONCE(!txq->block))) {
+ txq->block--;
+ if (!txq->block) {
+ iwl_write32(trans, HBUS_TARG_WRPTR,
+ txq->write_ptr | (i << 8));
+ }
+ } else if (block) {
+ txq->block++;
+ }
+
+ spin_unlock(&txq->lock);
+ }
+}
+
/*
* iwl_pcie_enqueue_hcmd - enqueue a uCode command
* @priv: device private data point
@@ -901,7 +1345,8 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
{
- struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
void *dup_buf = NULL;
@@ -1013,7 +1458,8 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
out_cmd = txq->entries[idx].cmd;
out_meta = &txq->entries[idx].meta;
- memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
+ /* re-initialize, this also marks the SG list as unused */
+ memset(out_meta, 0, sizeof(*out_meta));
if (cmd->flags & CMD_WANT_SKB)
out_meta->source = cmd;
@@ -1027,7 +1473,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
sizeof(struct iwl_cmd_header_wide));
out_cmd->hdr_wide.reserved = 0;
out_cmd->hdr_wide.sequence =
- cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) |
+ cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->txqs.cmd.q_id) |
INDEX_TO_SEQ(txq->write_ptr));
cmd_pos = sizeof(struct iwl_cmd_header_wide);
@@ -1035,7 +1481,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
} else {
out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id);
out_cmd->hdr.sequence =
- cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) |
+ cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->txqs.cmd.q_id) |
INDEX_TO_SEQ(txq->write_ptr));
out_cmd->hdr.group_id = 0;
@@ -1086,7 +1532,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
iwl_get_cmd_string(trans, cmd->id),
group_id, out_cmd->hdr.cmd,
le16_to_cpu(out_cmd->hdr.sequence),
- cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id);
+ cmd_size, txq->write_ptr, idx, trans_pcie->txqs.cmd.q_id);
/* start the TFD with the minimum copy bytes */
tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
@@ -1153,6 +1599,9 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
goto out;
}
+ if (cmd->flags & CMD_BLOCK_TXQS)
+ iwl_trans_pcie_block_txq_ptrs(trans, true);
+
/* Increment and update queue's write index */
txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
iwl_pcie_txq_inc_wr_ptr(trans, txq);
@@ -1182,14 +1631,14 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
struct iwl_device_cmd *cmd;
struct iwl_cmd_meta *meta;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
+ struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
/* If a Tx command is being handled and it isn't in the actual
* command queue then there a command routing bug has been introduced
* in the queue management code. */
- if (WARN(txq_id != trans->txqs.cmd.q_id,
+ if (WARN(txq_id != trans_pcie->txqs.cmd.q_id,
"wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
- txq_id, trans->txqs.cmd.q_id, sequence, txq->read_ptr,
+ txq_id, trans_pcie->txqs.cmd.q_id, sequence, txq->read_ptr,
txq->write_ptr)) {
iwl_print_hex_error(trans, pkt, 32);
return;
@@ -1203,7 +1652,11 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
group_id = cmd->hdr.group_id;
cmd_id = WIDE_ID(group_id, cmd->hdr.cmd);
- iwl_txq_gen1_tfd_unmap(trans, meta, txq, index);
+ if (trans->trans_cfg->gen2)
+ iwl_txq_gen2_tfd_unmap(trans, meta,
+ iwl_txq_get_tfd(trans, txq, index));
+ else
+ iwl_txq_gen1_tfd_unmap(trans, meta, txq, index);
/* Input error checking is done when commands are added to queue. */
if (meta->flags & CMD_WANT_SKB) {
@@ -1214,8 +1667,8 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
meta->source->_rx_page_order = trans_pcie->rx_page_order;
}
- if (meta->flags & CMD_WANT_ASYNC_CALLBACK)
- iwl_op_mode_async_cb(trans->op_mode, cmd);
+ if (meta->flags & CMD_BLOCK_TXQS)
+ iwl_trans_pcie_block_txq_ptrs(trans, false);
iwl_pcie_cmdq_reclaim(trans, txq_id, index);
@@ -1288,19 +1741,180 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
}
#ifdef CONFIG_INET
+static void *iwl_pcie_get_page_hdr(struct iwl_trans *trans,
+ size_t len, struct sk_buff *skb)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->txqs.tso_hdr_page);
+ struct iwl_tso_page_info *info;
+ struct page **page_ptr;
+ dma_addr_t phys;
+ void *ret;
+
+ page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs);
+
+ if (WARN_ON(*page_ptr))
+ return NULL;
+
+ if (!p->page)
+ goto alloc;
+
+ /*
+ * Check if there's enough room on this page
+ *
+ * Note that we put a page chaining pointer *last* in the
+ * page - we need it somewhere, and if it's there then we
+ * avoid DMA mapping the last bits of the page which may
+ * trigger the 32-bit boundary hardware bug.
+ *
+ * (see also get_workaround_page() in tx-gen2.c)
+ */
+ if (((unsigned long)p->pos & ~PAGE_MASK) + len < IWL_TSO_PAGE_DATA_SIZE) {
+ info = IWL_TSO_PAGE_INFO(page_address(p->page));
+ goto out;
+ }
+
+ /* We don't have enough room on this page, get a new one. */
+ iwl_pcie_free_and_unmap_tso_page(trans, p->page);
+
+alloc:
+ p->page = alloc_page(GFP_ATOMIC);
+ if (!p->page)
+ return NULL;
+ p->pos = page_address(p->page);
+
+ info = IWL_TSO_PAGE_INFO(page_address(p->page));
+
+ /* set the chaining pointer to NULL */
+ info->next = NULL;
+
+ /* Create a DMA mapping for the page */
+ phys = dma_map_page_attrs(trans->dev, p->page, 0, PAGE_SIZE,
+ DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ if (unlikely(dma_mapping_error(trans->dev, phys))) {
+ __free_page(p->page);
+ p->page = NULL;
+
+ return NULL;
+ }
+
+ /* Store physical address and set use count */
+ info->dma_addr = phys;
+ refcount_set(&info->use_count, 1);
+out:
+ *page_ptr = p->page;
+ /* Return an internal reference for the caller */
+ refcount_inc(&info->use_count);
+ ret = p->pos;
+ p->pos += len;
+
+ return ret;
+}
+
+/**
+ * iwl_pcie_get_sgt_tb_phys - Find TB address in mapped SG list
+ * @sgt: scatter gather table
+ * @offset: Offset into the mapped memory (i.e. SKB payload data)
+ * @len: Length of the area
+ *
+ * Find the DMA address that corresponds to the SKB payload data at the
+ * position given by @offset.
+ *
+ * Returns: Address for TB entry
+ */
+dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset,
+ unsigned int len)
+{
+ struct scatterlist *sg;
+ unsigned int sg_offset = 0;
+ int i;
+
+ /*
+ * Search the mapped DMA areas in the SG for the area that contains the
+ * data at offset with the given length.
+ */
+ for_each_sgtable_dma_sg(sgt, sg, i) {
+ if (offset >= sg_offset &&
+ offset + len <= sg_offset + sg_dma_len(sg))
+ return sg_dma_address(sg) + offset - sg_offset;
+
+ sg_offset += sg_dma_len(sg);
+ }
+
+ WARN_ON_ONCE(1);
+
+ return DMA_MAPPING_ERROR;
+}
+
+/**
+ * iwl_pcie_prep_tso - Prepare TSO page and SKB for sending
+ * @trans: transport private data
+ * @skb: the SKB to map
+ * @cmd_meta: command meta to store the scatter list information for unmapping
+ * @hdr: output argument for TSO headers
+ * @hdr_room: requested length for TSO headers
+ *
+ * Allocate space for a scatter gather list and TSO headers and map the SKB
+ * using the scatter gather list. The SKB is unmapped again when the page is
+ * free'ed again at the end of the operation.
+ *
+ * Returns: newly allocated and mapped scatter gather table with list
+ */
+struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_cmd_meta *cmd_meta,
+ u8 **hdr, unsigned int hdr_room)
+{
+ struct sg_table *sgt;
+
+ if (WARN_ON_ONCE(skb_has_frag_list(skb)))
+ return NULL;
+
+ *hdr = iwl_pcie_get_page_hdr(trans,
+ hdr_room + __alignof__(struct sg_table) +
+ sizeof(struct sg_table) +
+ (skb_shinfo(skb)->nr_frags + 1) *
+ sizeof(struct scatterlist),
+ skb);
+ if (!*hdr)
+ return NULL;
+
+ sgt = (void *)PTR_ALIGN(*hdr + hdr_room, __alignof__(struct sg_table));
+ sgt->sgl = (void *)(sgt + 1);
+
+ sg_init_table(sgt->sgl, skb_shinfo(skb)->nr_frags + 1);
+
+ /* Only map the data, not the header (it is copied to the TSO page) */
+ sgt->orig_nents = skb_to_sgvec(skb, sgt->sgl, skb_headlen(skb),
+ skb->data_len);
+ if (WARN_ON_ONCE(sgt->orig_nents <= 0))
+ return NULL;
+
+ /* And map the entire SKB */
+ if (dma_map_sgtable(trans->dev, sgt, DMA_TO_DEVICE, 0) < 0)
+ return NULL;
+
+ /* Store non-zero (i.e. valid) offset for unmapping */
+ cmd_meta->sg_offset = (unsigned long) sgt & ~PAGE_MASK;
+
+ return sgt;
+}
+
static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_txq *txq, u8 hdr_len,
struct iwl_cmd_meta *out_meta,
struct iwl_device_tx_cmd *dev_cmd,
u16 tb1_len)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
struct ieee80211_hdr *hdr = (void *)skb->data;
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
unsigned int mss = skb_shinfo(skb)->gso_size;
+ unsigned int data_offset = 0;
u16 length, iv_len, amsdu_pad;
- u8 *start_hdr;
- struct iwl_tso_hdr_page *hdr_page;
+ dma_addr_t start_hdr_phys;
+ u8 *start_hdr, *pos_hdr;
+ struct sg_table *sgt;
struct tso_t tso;
/* if the packet is protected, then it must be CCMP or GCMP */
@@ -1310,10 +1924,10 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
trace_iwlwifi_dev_tx(trans->dev, skb,
iwl_txq_get_tfd(trans, txq, txq->write_ptr),
- trans->txqs.tfd.size,
+ trans_pcie->txqs.tfd.size,
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0);
- ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
+ ip_hdrlen = skb_network_header_len(skb);
snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len;
amsdu_pad = 0;
@@ -1323,13 +1937,14 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
/* Our device supports 9 segments at most, it will fit in 1 page */
- hdr_page = get_page_hdr(trans, hdr_room, skb);
- if (!hdr_page)
+ sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room);
+ if (!sgt)
return -ENOMEM;
- start_hdr = hdr_page->pos;
- memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
- hdr_page->pos += iv_len;
+ start_hdr_phys = iwl_pcie_get_tso_page_phys(start_hdr);
+ pos_hdr = start_hdr;
+ memcpy(pos_hdr, skb->data + hdr_len, iv_len);
+ pos_hdr += iv_len;
/*
* Pull the ieee80211 header + IV to be able to use TSO core,
@@ -1352,45 +1967,43 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
min_t(unsigned int, mss, total_len);
unsigned int hdr_tb_len;
dma_addr_t hdr_tb_phys;
- u8 *subf_hdrs_start = hdr_page->pos;
+ u8 *subf_hdrs_start = pos_hdr;
total_len -= data_left;
- memset(hdr_page->pos, 0, amsdu_pad);
- hdr_page->pos += amsdu_pad;
+ memset(pos_hdr, 0, amsdu_pad);
+ pos_hdr += amsdu_pad;
amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
data_left)) & 0x3;
- ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
- hdr_page->pos += ETH_ALEN;
- ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
- hdr_page->pos += ETH_ALEN;
+ ether_addr_copy(pos_hdr, ieee80211_get_DA(hdr));
+ pos_hdr += ETH_ALEN;
+ ether_addr_copy(pos_hdr, ieee80211_get_SA(hdr));
+ pos_hdr += ETH_ALEN;
length = snap_ip_tcp_hdrlen + data_left;
- *((__be16 *)hdr_page->pos) = cpu_to_be16(length);
- hdr_page->pos += sizeof(length);
+ *((__be16 *)pos_hdr) = cpu_to_be16(length);
+ pos_hdr += sizeof(length);
/*
* This will copy the SNAP as well which will be considered
* as MAC header.
*/
- tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
+ tso_build_hdr(skb, pos_hdr, &tso, data_left, !total_len);
- hdr_page->pos += snap_ip_tcp_hdrlen;
+ pos_hdr += snap_ip_tcp_hdrlen;
+
+ hdr_tb_len = pos_hdr - start_hdr;
+ hdr_tb_phys = iwl_pcie_get_tso_page_phys(start_hdr);
- hdr_tb_len = hdr_page->pos - start_hdr;
- hdr_tb_phys = dma_map_single(trans->dev, start_hdr,
- hdr_tb_len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys)))
- return -EINVAL;
iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys,
hdr_tb_len, false);
trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
hdr_tb_phys, hdr_tb_len);
/* add this subframe's headers' length to the tx_cmd */
- le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
+ le16_add_cpu(&tx_cmd->len, pos_hdr - subf_hdrs_start);
/* prepare the start_hdr for the next subframe */
- start_hdr = hdr_page->pos;
+ start_hdr = pos_hdr;
/* put the payload */
while (data_left) {
@@ -1398,9 +2011,9 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
data_left);
dma_addr_t tb_phys;
- tb_phys = dma_map_single(trans->dev, tso.data,
- size, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+ tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, data_offset, size);
+ /* Not a real mapping error, use direct comparison */
+ if (unlikely(tb_phys == DMA_MAPPING_ERROR))
return -EINVAL;
iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
@@ -1409,10 +2022,14 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
tb_phys, size);
data_left -= size;
+ data_offset += size;
tso_build_data(skb, &tso, size);
}
}
+ dma_sync_single_for_device(trans->dev, start_hdr_phys, hdr_room,
+ DMA_TO_DEVICE);
+
/* re -add the WiFi header and IV */
skb_push(skb, hdr_len + iv_len);
@@ -1432,9 +2049,61 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
}
#endif /* CONFIG_INET */
+#define IWL_TX_CRC_SIZE 4
+#define IWL_TX_DELIMITER_SIZE 4
+
+/*
+ * iwl_txq_gen1_update_byte_cnt_tbl - Set up entry in Tx byte-count array
+ */
+static void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
+ struct iwl_txq *txq, u16 byte_cnt,
+ int num_tbs)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwlagn_scd_bc_tbl *scd_bc_tbl;
+ int write_ptr = txq->write_ptr;
+ int txq_id = txq->id;
+ u8 sec_ctl = 0;
+ u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
+ __le16 bc_ent;
+ struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd;
+ struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
+ u8 sta_id = tx_cmd->sta_id;
+
+ scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr;
+
+ sec_ctl = tx_cmd->sec_ctl;
+
+ switch (sec_ctl & TX_CMD_SEC_MSK) {
+ case TX_CMD_SEC_CCM:
+ len += IEEE80211_CCMP_MIC_LEN;
+ break;
+ case TX_CMD_SEC_TKIP:
+ len += IEEE80211_TKIP_ICV_LEN;
+ break;
+ case TX_CMD_SEC_WEP:
+ len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
+ break;
+ }
+ if (trans_pcie->txqs.bc_table_dword)
+ len = DIV_ROUND_UP(len, 4);
+
+ if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
+ return;
+
+ bc_ent = cpu_to_le16(len | (sta_id << 12));
+
+ scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
+
+ if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
+ scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] =
+ bc_ent;
+}
+
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_tx_cmd *dev_cmd, int txq_id)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct ieee80211_hdr *hdr;
struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
struct iwl_cmd_meta *out_meta;
@@ -1449,14 +2118,14 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
u16 wifi_seq;
bool amsdu;
- txq = trans->txqs.txq[txq_id];
+ txq = trans_pcie->txqs.txq[txq_id];
- if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used),
+ if (WARN_ONCE(!test_bit(txq_id, trans_pcie->txqs.queue_used),
"TX on unused queue %d\n", txq_id))
return -EINVAL;
if (skb_is_nonlinear(skb) &&
- skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) &&
+ skb_shinfo(skb)->nr_frags > IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie) &&
__skb_linearize(skb))
return -ENOMEM;
@@ -1477,7 +2146,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_tx_cmd **dev_cmd_ptr;
dev_cmd_ptr = (void *)((u8 *)skb->cb +
- trans->txqs.dev_cmd_offs);
+ trans_pcie->txqs.dev_cmd_offs);
*dev_cmd_ptr = dev_cmd;
__skb_queue_tail(&txq->overflow_q, skb);
@@ -1515,7 +2184,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
/* Set up first empty entry in queue's array of Tx/cmd buffers */
out_meta = &txq->entries[txq->write_ptr].meta;
- out_meta->flags = 0;
+ memset(out_meta, 0, sizeof(*out_meta));
/*
* The second TB (tb1) points to the remainder of the TX command
@@ -1560,7 +2229,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
trace_iwlwifi_dev_tx(trans->dev, skb,
iwl_txq_get_tfd(trans, txq, txq->write_ptr),
- trans->txqs.tfd.size,
+ trans_pcie->txqs.tfd.size,
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
hdr_len);
@@ -1595,8 +2264,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr);
/* Set up entry for this TFD in Tx byte-count array */
iwl_txq_gen1_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),
- iwl_txq_gen1_tfd_get_num_tbs(trans,
- tfd));
+ iwl_txq_gen1_tfd_get_num_tbs(tfd));
wait_write_ptr = ieee80211_has_morefrags(fc);
@@ -1631,3 +2299,379 @@ out_err:
spin_unlock(&txq->lock);
return -1;
}
+
+static void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
+ struct iwl_txq *txq,
+ int read_ptr)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr;
+ int txq_id = txq->id;
+ u8 sta_id = 0;
+ __le16 bc_ent;
+ struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd;
+ struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
+
+ WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
+
+ if (txq_id != trans_pcie->txqs.cmd.q_id)
+ sta_id = tx_cmd->sta_id;
+
+ bc_ent = cpu_to_le16(1 | (sta_id << 12));
+
+ scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
+
+ if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
+ scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] =
+ bc_ent;
+}
+
+/* Frees buffers until index _not_ inclusive */
+void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
+ struct sk_buff_head *skbs, bool is_flush)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
+ int tfd_num, read_ptr, last_to_free;
+ int txq_read_ptr, txq_write_ptr;
+
+ /* This function is not meant to release cmd queue*/
+ if (WARN_ON(txq_id == trans_pcie->txqs.cmd.q_id))
+ return;
+
+ if (WARN_ON(!txq))
+ return;
+
+ tfd_num = iwl_txq_get_cmd_index(txq, ssn);
+
+ spin_lock_bh(&txq->reclaim_lock);
+
+ spin_lock(&txq->lock);
+ txq_read_ptr = txq->read_ptr;
+ txq_write_ptr = txq->write_ptr;
+ spin_unlock(&txq->lock);
+
+ read_ptr = iwl_txq_get_cmd_index(txq, txq_read_ptr);
+
+ if (!test_bit(txq_id, trans_pcie->txqs.queue_used)) {
+ IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
+ txq_id, ssn);
+ goto out;
+ }
+
+ if (read_ptr == tfd_num)
+ goto out;
+
+ IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d (%d) -> %d (%d)\n",
+ txq_id, read_ptr, txq_read_ptr, tfd_num, ssn);
+
+ /* Since we free until index _not_ inclusive, the one before index is
+ * the last we will free. This one must be used
+ */
+ last_to_free = iwl_txq_dec_wrap(trans, tfd_num);
+
+ if (!iwl_txq_used(txq, last_to_free, txq_read_ptr, txq_write_ptr)) {
+ IWL_ERR(trans,
+ "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
+ __func__, txq_id, last_to_free,
+ trans->trans_cfg->base_params->max_tfd_queue_size,
+ txq_write_ptr, txq_read_ptr);
+
+ iwl_op_mode_time_point(trans->op_mode,
+ IWL_FW_INI_TIME_POINT_FAKE_TX,
+ NULL);
+ goto out;
+ }
+
+ if (WARN_ON(!skb_queue_empty(skbs)))
+ goto out;
+
+ for (;
+ read_ptr != tfd_num;
+ txq_read_ptr = iwl_txq_inc_wrap(trans, txq_read_ptr),
+ read_ptr = iwl_txq_get_cmd_index(txq, txq_read_ptr)) {
+ struct iwl_cmd_meta *cmd_meta = &txq->entries[read_ptr].meta;
+ struct sk_buff *skb = txq->entries[read_ptr].skb;
+
+ if (WARN_ONCE(!skb, "no SKB at %d (%d) on queue %d\n",
+ read_ptr, txq_read_ptr, txq_id))
+ continue;
+
+ iwl_pcie_free_tso_pages(trans, skb, cmd_meta);
+
+ __skb_queue_tail(skbs, skb);
+
+ txq->entries[read_ptr].skb = NULL;
+
+ if (!trans->trans_cfg->gen2)
+ iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq,
+ txq_read_ptr);
+
+ iwl_txq_free_tfd(trans, txq, txq_read_ptr);
+ }
+
+ spin_lock(&txq->lock);
+ txq->read_ptr = txq_read_ptr;
+
+ iwl_txq_progress(txq);
+
+ if (iwl_txq_space(trans, txq) > txq->low_mark &&
+ test_bit(txq_id, trans_pcie->txqs.queue_stopped)) {
+ struct sk_buff_head overflow_skbs;
+ struct sk_buff *skb;
+
+ __skb_queue_head_init(&overflow_skbs);
+ skb_queue_splice_init(&txq->overflow_q,
+ is_flush ? skbs : &overflow_skbs);
+
+ /*
+ * We are going to transmit from the overflow queue.
+ * Remember this state so that wait_for_txq_empty will know we
+ * are adding more packets to the TFD queue. It cannot rely on
+ * the state of &txq->overflow_q, as we just emptied it, but
+ * haven't TXed the content yet.
+ */
+ txq->overflow_tx = true;
+
+ /*
+ * This is tricky: we are in reclaim path and are holding
+ * reclaim_lock, so noone will try to access the txq data
+ * from that path. We stopped tx, so we can't have tx as well.
+ * Bottom line, we can unlock and re-lock later.
+ */
+ spin_unlock(&txq->lock);
+
+ while ((skb = __skb_dequeue(&overflow_skbs))) {
+ struct iwl_device_tx_cmd *dev_cmd_ptr;
+
+ dev_cmd_ptr = *(void **)((u8 *)skb->cb +
+ trans_pcie->txqs.dev_cmd_offs);
+
+ /*
+ * Note that we can very well be overflowing again.
+ * In that case, iwl_txq_space will be small again
+ * and we won't wake mac80211's queue.
+ */
+ iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id);
+ }
+
+ if (iwl_txq_space(trans, txq) > txq->low_mark)
+ iwl_trans_pcie_wake_queue(trans, txq);
+
+ spin_lock(&txq->lock);
+ txq->overflow_tx = false;
+ }
+
+ spin_unlock(&txq->lock);
+out:
+ spin_unlock_bh(&txq->reclaim_lock);
+}
+
+/* Set wr_ptr of specific device and txq */
+void iwl_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
+
+ spin_lock_bh(&txq->lock);
+
+ txq->write_ptr = ptr;
+ txq->read_ptr = txq->write_ptr;
+
+ spin_unlock_bh(&txq->lock);
+}
+
+void iwl_pcie_freeze_txq_timer(struct iwl_trans *trans,
+ unsigned long txqs, bool freeze)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int queue;
+
+ for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
+ struct iwl_txq *txq = trans_pcie->txqs.txq[queue];
+ unsigned long now;
+
+ spin_lock_bh(&txq->lock);
+
+ now = jiffies;
+
+ if (txq->frozen == freeze)
+ goto next_queue;
+
+ IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",
+ freeze ? "Freezing" : "Waking", queue);
+
+ txq->frozen = freeze;
+
+ if (txq->read_ptr == txq->write_ptr)
+ goto next_queue;
+
+ if (freeze) {
+ if (unlikely(time_after(now,
+ txq->stuck_timer.expires))) {
+ /*
+ * The timer should have fired, maybe it is
+ * spinning right now on the lock.
+ */
+ goto next_queue;
+ }
+ /* remember how long until the timer fires */
+ txq->frozen_expiry_remainder =
+ txq->stuck_timer.expires - now;
+ del_timer(&txq->stuck_timer);
+ goto next_queue;
+ }
+
+ /*
+ * Wake a non-empty queue -> arm timer with the
+ * remainder before it froze
+ */
+ mod_timer(&txq->stuck_timer,
+ now + txq->frozen_expiry_remainder);
+
+next_queue:
+ spin_unlock_bh(&txq->lock);
+ }
+}
+
+#define HOST_COMPLETE_TIMEOUT (2 * HZ)
+
+static int iwl_trans_pcie_send_hcmd_sync(struct iwl_trans *trans,
+ struct iwl_host_cmd *cmd)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
+ struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
+ int cmd_idx;
+ int ret;
+
+ IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str);
+
+ if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
+ &trans->status),
+ "Command %s: a command is already active!\n", cmd_str))
+ return -EIO;
+
+ IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str);
+
+ if (trans->trans_cfg->gen2)
+ cmd_idx = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
+ else
+ cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd);
+
+ if (cmd_idx < 0) {
+ ret = cmd_idx;
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+ IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
+ cmd_str, ret);
+ return ret;
+ }
+
+ ret = wait_event_timeout(trans->wait_command_queue,
+ !test_bit(STATUS_SYNC_HCMD_ACTIVE,
+ &trans->status),
+ HOST_COMPLETE_TIMEOUT);
+ if (!ret) {
+ IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
+ cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
+
+ IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
+ txq->read_ptr, txq->write_ptr);
+
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+ IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
+ cmd_str);
+ ret = -ETIMEDOUT;
+
+ iwl_trans_sync_nmi(trans);
+ goto cancel;
+ }
+
+ if (test_bit(STATUS_FW_ERROR, &trans->status)) {
+ if (!test_and_clear_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE,
+ &trans->status)) {
+ IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
+ dump_stack();
+ }
+ ret = -EIO;
+ goto cancel;
+ }
+
+ if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
+ test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
+ IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
+ ret = -ERFKILL;
+ goto cancel;
+ }
+
+ if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
+ IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str);
+ ret = -EIO;
+ goto cancel;
+ }
+
+ return 0;
+
+cancel:
+ if (cmd->flags & CMD_WANT_SKB) {
+ /*
+ * Cancel the CMD_WANT_SKB flag for the cmd in the
+ * TX cmd queue. Otherwise in case the cmd comes
+ * in later, it will possibly set an invalid
+ * address (cmd->meta.source).
+ */
+ txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
+ }
+
+ if (cmd->resp_pkt) {
+ iwl_free_resp(cmd);
+ cmd->resp_pkt = NULL;
+ }
+
+ return ret;
+}
+
+int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans,
+ struct iwl_host_cmd *cmd)
+{
+ /* Make sure the NIC is still alive in the bus */
+ if (test_bit(STATUS_TRANS_DEAD, &trans->status))
+ return -ENODEV;
+
+ if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
+ test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
+ IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
+ cmd->id);
+ return -ERFKILL;
+ }
+
+ if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 &&
+ !(cmd->flags & CMD_SEND_IN_D3))) {
+ IWL_DEBUG_WOWLAN(trans, "Dropping CMD 0x%x: D3\n", cmd->id);
+ return -EHOSTDOWN;
+ }
+
+ if (cmd->flags & CMD_ASYNC) {
+ int ret;
+
+ /* An asynchronous command can not expect an SKB to be set. */
+ if (WARN_ON(cmd->flags & CMD_WANT_SKB))
+ return -EINVAL;
+
+ if (trans->trans_cfg->gen2)
+ ret = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
+ else
+ ret = iwl_pcie_enqueue_hcmd(trans, cmd);
+
+ if (ret < 0) {
+ IWL_ERR(trans,
+ "Error sending %s: enqueue_hcmd failed: %d\n",
+ iwl_get_cmd_string(trans, cmd->id), ret);
+ return ret;
+ }
+ return 0;
+ }
+
+ return iwl_trans_pcie_send_hcmd_sync(trans, cmd);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_pcie_send_hcmd);
diff --git a/queue/tx.c b/queue/tx.c
deleted file mode 100644
index 5bb3cc3367c9..000000000000
--- a/queue/tx.c
+++ /dev/null
@@ -1,1883 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
-/*
- * Copyright (C) 2020-2023 Intel Corporation
- */
-#include <net/tso.h>
-#include <linux/tcp.h>
-
-#include "iwl-debug.h"
-#include "iwl-io.h"
-#include "fw/api/commands.h"
-#include "fw/api/tx.h"
-#include "fw/api/datapath.h"
-#include "queue/tx.h"
-#include "iwl-fh.h"
-#include "iwl-scd.h"
-#include <linux/dmapool.h>
-
-/*
- * iwl_txq_update_byte_tbl - Set up entry in Tx byte-count array
- */
-static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,
- struct iwl_txq *txq, u16 byte_cnt,
- int num_tbs)
-{
- int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
- u8 filled_tfd_size, num_fetch_chunks;
- u16 len = byte_cnt;
- __le16 bc_ent;
-
- if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window))
- return;
-
- filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
- num_tbs * sizeof(struct iwl_tfh_tb);
- /*
- * filled_tfd_size contains the number of filled bytes in the TFD.
- * Dividing it by 64 will give the number of chunks to fetch
- * to SRAM- 0 for one chunk, 1 for 2 and so on.
- * If, for example, TFD contains only 3 TBs then 32 bytes
- * of the TFD are used, and only one chunk of 64 bytes should
- * be fetched
- */
- num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
-
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
- struct iwl_gen3_bc_tbl_entry *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
-
- /* Starting from AX210, the HW expects bytes */
- WARN_ON(trans->txqs.bc_table_dword);
- WARN_ON(len > 0x3FFF);
- bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
- scd_bc_tbl_gen3[idx].tfd_offset = bc_ent;
- } else {
- struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
-
- /* Before AX210, the HW expects DW */
- WARN_ON(!trans->txqs.bc_table_dword);
- len = DIV_ROUND_UP(len, 4);
- WARN_ON(len > 0xFFF);
- bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
- scd_bc_tbl->tfd_offset[idx] = bc_ent;
- }
-}
-
-/*
- * iwl_txq_inc_wr_ptr - Send new write index to hardware
- */
-void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
-{
- lockdep_assert_held(&txq->lock);
-
- IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr);
-
- /*
- * if not in power-save mode, uCode will never sleep when we're
- * trying to tx (during RFKILL, we're not trying to tx).
- */
- iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16));
-}
-
-static u8 iwl_txq_gen2_get_num_tbs(struct iwl_trans *trans,
- struct iwl_tfh_tfd *tfd)
-{
- return le16_to_cpu(tfd->num_tbs) & 0x1f;
-}
-
-void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
- struct iwl_tfh_tfd *tfd)
-{
- int i, num_tbs;
-
- /* Sanity check on number of chunks */
- num_tbs = iwl_txq_gen2_get_num_tbs(trans, tfd);
-
- if (num_tbs > trans->txqs.tfd.max_tbs) {
- IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
- return;
- }
-
- /* first TB is never freed - it's the bidirectional DMA data */
- for (i = 1; i < num_tbs; i++) {
- if (meta->tbs & BIT(i))
- dma_unmap_page(trans->dev,
- le64_to_cpu(tfd->tbs[i].addr),
- le16_to_cpu(tfd->tbs[i].tb_len),
- DMA_TO_DEVICE);
- else
- dma_unmap_single(trans->dev,
- le64_to_cpu(tfd->tbs[i].addr),
- le16_to_cpu(tfd->tbs[i].tb_len),
- DMA_TO_DEVICE);
- }
-
- tfd->num_tbs = 0;
-}
-
-void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
-{
- /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
- * idx is bounded by n_window
- */
- int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
- struct sk_buff *skb;
-
- lockdep_assert_held(&txq->lock);
-
- if (!txq->entries)
- return;
-
- iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
- iwl_txq_get_tfd(trans, txq, idx));
-
- skb = txq->entries[idx].skb;
-
- /* Can be called from irqs-disabled context
- * If skb is not NULL, it means that the whole queue is being
- * freed and that the queue is not empty - free the skb
- */
- if (skb) {
- iwl_op_mode_free_skb(trans->op_mode, skb);
- txq->entries[idx].skb = NULL;
- }
-}
-
-int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd,
- dma_addr_t addr, u16 len)
-{
- int idx = iwl_txq_gen2_get_num_tbs(trans, tfd);
- struct iwl_tfh_tb *tb;
-
- /*
- * Only WARN here so we know about the issue, but we mess up our
- * unmap path because not every place currently checks for errors
- * returned from this function - it can only return an error if
- * there's no more space, and so when we know there is enough we
- * don't always check ...
- */
- WARN(iwl_txq_crosses_4g_boundary(addr, len),
- "possible DMA problem with iova:0x%llx, len:%d\n",
- (unsigned long long)addr, len);
-
- if (WARN_ON(idx >= IWL_TFH_NUM_TBS))
- return -EINVAL;
- tb = &tfd->tbs[idx];
-
- /* Each TFD can point to a maximum max_tbs Tx buffers */
- if (le16_to_cpu(tfd->num_tbs) >= trans->txqs.tfd.max_tbs) {
- IWL_ERR(trans, "Error can not send more than %d chunks\n",
- trans->txqs.tfd.max_tbs);
- return -EINVAL;
- }
-
- put_unaligned_le64(addr, &tb->addr);
- tb->tb_len = cpu_to_le16(len);
-
- tfd->num_tbs = cpu_to_le16(idx + 1);
-
- return idx;
-}
-
-static struct page *get_workaround_page(struct iwl_trans *trans,
- struct sk_buff *skb)
-{
- struct page **page_ptr;
- struct page *ret;
-
- page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
-
- ret = alloc_page(GFP_ATOMIC);
- if (!ret)
- return NULL;
-
- /* set the chaining pointer to the previous page if there */
- *(void **)((u8 *)page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr;
- *page_ptr = ret;
-
- return ret;
-}
-
-/*
- * Add a TB and if needed apply the FH HW bug workaround;
- * meta != NULL indicates that it's a page mapping and we
- * need to dma_unmap_page() and set the meta->tbs bit in
- * this case.
- */
-static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans,
- struct sk_buff *skb,
- struct iwl_tfh_tfd *tfd,
- dma_addr_t phys, void *virt,
- u16 len, struct iwl_cmd_meta *meta)
-{
- dma_addr_t oldphys = phys;
- struct page *page;
- int ret;
-
- if (unlikely(dma_mapping_error(trans->dev, phys)))
- return -ENOMEM;
-
- if (likely(!iwl_txq_crosses_4g_boundary(phys, len))) {
- ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
-
- if (ret < 0)
- goto unmap;
-
- if (meta)
- meta->tbs |= BIT(ret);
-
- ret = 0;
- goto trace;
- }
-
- /*
- * Work around a hardware bug. If (as expressed in the
- * condition above) the TB ends on a 32-bit boundary,
- * then the next TB may be accessed with the wrong
- * address.
- * To work around it, copy the data elsewhere and make
- * a new mapping for it so the device will not fail.
- */
-
- if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) {
- ret = -ENOBUFS;
- goto unmap;
- }
-
- page = get_workaround_page(trans, skb);
- if (!page) {
- ret = -ENOMEM;
- goto unmap;
- }
-
- memcpy(page_address(page), virt, len);
-
- phys = dma_map_single(trans->dev, page_address(page), len,
- DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, phys)))
- return -ENOMEM;
- ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
- if (ret < 0) {
- /* unmap the new allocation as single */
- oldphys = phys;
- meta = NULL;
- goto unmap;
- }
- IWL_WARN(trans,
- "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
- len, (unsigned long long)oldphys, (unsigned long long)phys);
-
- ret = 0;
-unmap:
- if (meta)
- dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE);
- else
- dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE);
-trace:
- trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len);
-
- return ret;
-}
-
-#ifdef CONFIG_INET
-struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
- struct sk_buff *skb)
-{
- struct iwl_tso_hdr_page *p = this_cpu_ptr(trans->txqs.tso_hdr_page);
- struct page **page_ptr;
-
- page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
-
- if (WARN_ON(*page_ptr))
- return NULL;
-
- if (!p->page)
- goto alloc;
-
- /*
- * Check if there's enough room on this page
- *
- * Note that we put a page chaining pointer *last* in the
- * page - we need it somewhere, and if it's there then we
- * avoid DMA mapping the last bits of the page which may
- * trigger the 32-bit boundary hardware bug.
- *
- * (see also get_workaround_page() in tx-gen2.c)
- */
- if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE -
- sizeof(void *))
- goto out;
-
- /* We don't have enough room on this page, get a new one. */
- __free_page(p->page);
-
-alloc:
- p->page = alloc_page(GFP_ATOMIC);
- if (!p->page)
- return NULL;
- p->pos = page_address(p->page);
- /* set the chaining pointer to NULL */
- *(void **)((u8 *)page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
-out:
- *page_ptr = p->page;
- get_page(p->page);
- return p;
-}
-#endif
-
-static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
- struct sk_buff *skb,
- struct iwl_tfh_tfd *tfd, int start_len,
- u8 hdr_len,
- struct iwl_device_tx_cmd *dev_cmd)
-{
-#ifdef CONFIG_INET
- struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
- struct ieee80211_hdr *hdr = (void *)skb->data;
- unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
- unsigned int mss = skb_shinfo(skb)->gso_size;
- u16 length, amsdu_pad;
- u8 *start_hdr;
- struct iwl_tso_hdr_page *hdr_page;
- struct tso_t tso;
-
- trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
- &dev_cmd->hdr, start_len, 0);
-
- ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
- snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
- total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len;
- amsdu_pad = 0;
-
- /* total amount of header we may need for this A-MSDU */
- hdr_room = DIV_ROUND_UP(total_len, mss) *
- (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
-
- /* Our device supports 9 segments at most, it will fit in 1 page */
- hdr_page = get_page_hdr(trans, hdr_room, skb);
- if (!hdr_page)
- return -ENOMEM;
-
- start_hdr = hdr_page->pos;
-
- /*
- * Pull the ieee80211 header to be able to use TSO core,
- * we will restore it for the tx_status flow.
- */
- skb_pull(skb, hdr_len);
-
- /*
- * Remove the length of all the headers that we don't actually
- * have in the MPDU by themselves, but that we duplicate into
- * all the different MSDUs inside the A-MSDU.
- */
- le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
-
- tso_start(skb, &tso);
-
- while (total_len) {
- /* this is the data left for this subframe */
- unsigned int data_left = min_t(unsigned int, mss, total_len);
- unsigned int tb_len;
- dma_addr_t tb_phys;
- u8 *subf_hdrs_start = hdr_page->pos;
-
- total_len -= data_left;
-
- memset(hdr_page->pos, 0, amsdu_pad);
- hdr_page->pos += amsdu_pad;
- amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
- data_left)) & 0x3;
- ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
- hdr_page->pos += ETH_ALEN;
- ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
- hdr_page->pos += ETH_ALEN;
-
- length = snap_ip_tcp_hdrlen + data_left;
- *((__be16 *)hdr_page->pos) = cpu_to_be16(length);
- hdr_page->pos += sizeof(length);
-
- /*
- * This will copy the SNAP as well which will be considered
- * as MAC header.
- */
- tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
-
- hdr_page->pos += snap_ip_tcp_hdrlen;
-
- tb_len = hdr_page->pos - start_hdr;
- tb_phys = dma_map_single(trans->dev, start_hdr,
- tb_len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
- goto out_err;
- /*
- * No need for _with_wa, this is from the TSO page and
- * we leave some space at the end of it so can't hit
- * the buggy scenario.
- */
- iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb_len);
- trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
- tb_phys, tb_len);
- /* add this subframe's headers' length to the tx_cmd */
- le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
-
- /* prepare the start_hdr for the next subframe */
- start_hdr = hdr_page->pos;
-
- /* put the payload */
- while (data_left) {
- int ret;
-
- tb_len = min_t(unsigned int, tso.size, data_left);
- tb_phys = dma_map_single(trans->dev, tso.data,
- tb_len, DMA_TO_DEVICE);
- ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd,
- tb_phys, tso.data,
- tb_len, NULL);
- if (ret)
- goto out_err;
-
- data_left -= tb_len;
- tso_build_data(skb, &tso, tb_len);
- }
- }
-
- /* re -add the WiFi header */
- skb_push(skb, hdr_len);
-
- return 0;
-
-out_err:
-#endif
- return -EINVAL;
-}
-
-static struct
-iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans,
- struct iwl_txq *txq,
- struct iwl_device_tx_cmd *dev_cmd,
- struct sk_buff *skb,
- struct iwl_cmd_meta *out_meta,
- int hdr_len,
- int tx_cmd_len)
-{
- int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
- struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
- dma_addr_t tb_phys;
- int len;
- void *tb1_addr;
-
- tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
-
- /*
- * No need for _with_wa, the first TB allocation is aligned up
- * to a 64-byte boundary and thus can't be at the end or cross
- * a page boundary (much less a 2^32 boundary).
- */
- iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
-
- /*
- * The second TB (tb1) points to the remainder of the TX command
- * and the 802.11 header - dword aligned size
- * (This calculation modifies the TX command, so do it before the
- * setup of the first TB)
- */
- len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
- IWL_FIRST_TB_SIZE;
-
- /* do not align A-MSDU to dword as the subframe header aligns it */
-
- /* map the data for TB1 */
- tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
- tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
- goto out_err;
- /*
- * No need for _with_wa(), we ensure (via alignment) that the data
- * here can never cross or end at a page boundary.
- */
- iwl_txq_gen2_set_tb(trans, tfd, tb_phys, len);
-
- if (iwl_txq_gen2_build_amsdu(trans, skb, tfd, len + IWL_FIRST_TB_SIZE,
- hdr_len, dev_cmd))
- goto out_err;
-
- /* building the A-MSDU might have changed this data, memcpy it now */
- memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
- return tfd;
-
-out_err:
- iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
- return NULL;
-}
-
-static int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans,
- struct sk_buff *skb,
- struct iwl_tfh_tfd *tfd,
- struct iwl_cmd_meta *out_meta)
-{
- int i;
-
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- dma_addr_t tb_phys;
- unsigned int fragsz = skb_frag_size(frag);
- int ret;
-
- if (!fragsz)
- continue;
-
- tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
- fragsz, DMA_TO_DEVICE);
- ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
- skb_frag_address(frag),
- fragsz, out_meta);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static struct
-iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans,
- struct iwl_txq *txq,
- struct iwl_device_tx_cmd *dev_cmd,
- struct sk_buff *skb,
- struct iwl_cmd_meta *out_meta,
- int hdr_len,
- int tx_cmd_len,
- bool pad)
-{
- int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
- struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
- dma_addr_t tb_phys;
- int len, tb1_len, tb2_len;
- void *tb1_addr;
- struct sk_buff *frag;
-
- tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
-
- /* The first TB points to bi-directional DMA data */
- memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
-
- /*
- * No need for _with_wa, the first TB allocation is aligned up
- * to a 64-byte boundary and thus can't be at the end or cross
- * a page boundary (much less a 2^32 boundary).
- */
- iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
-
- /*
- * The second TB (tb1) points to the remainder of the TX command
- * and the 802.11 header - dword aligned size
- * (This calculation modifies the TX command, so do it before the
- * setup of the first TB)
- */
- len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
- IWL_FIRST_TB_SIZE;
-
- if (pad)
- tb1_len = ALIGN(len, 4);
- else
- tb1_len = len;
-
- /* map the data for TB1 */
- tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
- tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
- goto out_err;
- /*
- * No need for _with_wa(), we ensure (via alignment) that the data
- * here can never cross or end at a page boundary.
- */
- iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
- trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
- IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
-
- /* set up TFD's third entry to point to remainder of skb's head */
- tb2_len = skb_headlen(skb) - hdr_len;
-
- if (tb2_len > 0) {
- int ret;
-
- tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
- tb2_len, DMA_TO_DEVICE);
- ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
- skb->data + hdr_len, tb2_len,
- NULL);
- if (ret)
- goto out_err;
- }
-
- if (iwl_txq_gen2_tx_add_frags(trans, skb, tfd, out_meta))
- goto out_err;
-
- skb_walk_frags(skb, frag) {
- int ret;
-
- tb_phys = dma_map_single(trans->dev, frag->data,
- skb_headlen(frag), DMA_TO_DEVICE);
- ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
- frag->data,
- skb_headlen(frag), NULL);
- if (ret)
- goto out_err;
- if (iwl_txq_gen2_tx_add_frags(trans, frag, tfd, out_meta))
- goto out_err;
- }
-
- return tfd;
-
-out_err:
- iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
- return NULL;
-}
-
-static
-struct iwl_tfh_tfd *iwl_txq_gen2_build_tfd(struct iwl_trans *trans,
- struct iwl_txq *txq,
- struct iwl_device_tx_cmd *dev_cmd,
- struct sk_buff *skb,
- struct iwl_cmd_meta *out_meta)
-{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
- struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
- int len, hdr_len;
- bool amsdu;
-
- /* There must be data left over for TB1 or this code must be changed */
- BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
- BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
- offsetofend(struct iwl_tx_cmd_gen2, dram_info) >
- IWL_FIRST_TB_SIZE);
- BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) < IWL_FIRST_TB_SIZE);
- BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
- offsetofend(struct iwl_tx_cmd_gen3, dram_info) >
- IWL_FIRST_TB_SIZE);
-
- memset(tfd, 0, sizeof(*tfd));
-
- if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
- len = sizeof(struct iwl_tx_cmd_gen2);
- else
- len = sizeof(struct iwl_tx_cmd_gen3);
-
- amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
- (*ieee80211_get_qos_ctl(hdr) &
- IEEE80211_QOS_CTL_A_MSDU_PRESENT);
-
- hdr_len = ieee80211_hdrlen(hdr->frame_control);
-
- /*
- * Only build A-MSDUs here if doing so by GSO, otherwise it may be
- * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been
- * built in the higher layers already.
- */
- if (amsdu && skb_shinfo(skb)->gso_size)
- return iwl_txq_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
- out_meta, hdr_len, len);
- return iwl_txq_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
- hdr_len, len, !amsdu);
-}
-
-int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q)
-{
- unsigned int max;
- unsigned int used;
-
- /*
- * To avoid ambiguity between empty and completely full queues, there
- * should always be less than max_tfd_queue_size elements in the queue.
- * If q->n_window is smaller than max_tfd_queue_size, there is no need
- * to reserve any queue entries for this purpose.
- */
- if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size)
- max = q->n_window;
- else
- max = trans->trans_cfg->base_params->max_tfd_queue_size - 1;
-
- /*
- * max_tfd_queue_size is a power of 2, so the following is equivalent to
- * modulo by max_tfd_queue_size and is well defined.
- */
- used = (q->write_ptr - q->read_ptr) &
- (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
-
- if (WARN_ON(used > max))
- return 0;
-
- return max - used;
-}
-
-int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_tx_cmd *dev_cmd, int txq_id)
-{
- struct iwl_cmd_meta *out_meta;
- struct iwl_txq *txq = trans->txqs.txq[txq_id];
- u16 cmd_len;
- int idx;
- void *tfd;
-
- if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
- "queue %d out of range", txq_id))
- return -EINVAL;
-
- if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used),
- "TX on unused queue %d\n", txq_id))
- return -EINVAL;
-
- if (skb_is_nonlinear(skb) &&
- skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) &&
- __skb_linearize(skb))
- return -ENOMEM;
-
- spin_lock(&txq->lock);
-
- if (iwl_txq_space(trans, txq) < txq->high_mark) {
- iwl_txq_stop(trans, txq);
-
- /* don't put the packet on the ring, if there is no room */
- if (unlikely(iwl_txq_space(trans, txq) < 3)) {
- struct iwl_device_tx_cmd **dev_cmd_ptr;
-
- dev_cmd_ptr = (void *)((u8 *)skb->cb +
- trans->txqs.dev_cmd_offs);
-
- *dev_cmd_ptr = dev_cmd;
- __skb_queue_tail(&txq->overflow_q, skb);
- spin_unlock(&txq->lock);
- return 0;
- }
- }
-
- idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
-
- /* Set up driver data for this TFD */
- txq->entries[idx].skb = skb;
- txq->entries[idx].cmd = dev_cmd;
-
- dev_cmd->hdr.sequence =
- cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
- INDEX_TO_SEQ(idx)));
-
- /* Set up first empty entry in queue's array of Tx/cmd buffers */
- out_meta = &txq->entries[idx].meta;
- out_meta->flags = 0;
-
- tfd = iwl_txq_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta);
- if (!tfd) {
- spin_unlock(&txq->lock);
- return -1;
- }
-
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
- struct iwl_tx_cmd_gen3 *tx_cmd_gen3 =
- (void *)dev_cmd->payload;
-
- cmd_len = le16_to_cpu(tx_cmd_gen3->len);
- } else {
- struct iwl_tx_cmd_gen2 *tx_cmd_gen2 =
- (void *)dev_cmd->payload;
-
- cmd_len = le16_to_cpu(tx_cmd_gen2->len);
- }
-
- /* Set up entry for this TFD in Tx byte-count array */
- iwl_pcie_gen2_update_byte_tbl(trans, txq, cmd_len,
- iwl_txq_gen2_get_num_tbs(trans, tfd));
-
- /* start timer if queue currently empty */
- if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
- mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
-
- /* Tell device the write index *just past* this latest filled TFD */
- txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
- iwl_txq_inc_wr_ptr(trans, txq);
- /*
- * At this point the frame is "transmitted" successfully
- * and we will get a TX status notification eventually.
- */
- spin_unlock(&txq->lock);
- return 0;
-}
-
-/*************** HOST COMMAND QUEUE FUNCTIONS *****/
-
-/*
- * iwl_txq_gen2_unmap - Unmap any remaining DMA mappings and free skb's
- */
-void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id)
-{
- struct iwl_txq *txq = trans->txqs.txq[txq_id];
-
- spin_lock_bh(&txq->lock);
- while (txq->write_ptr != txq->read_ptr) {
- IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
- txq_id, txq->read_ptr);
-
- if (txq_id != trans->txqs.cmd.q_id) {
- int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
- struct sk_buff *skb = txq->entries[idx].skb;
-
- if (!WARN_ON_ONCE(!skb))
- iwl_txq_free_tso_page(trans, skb);
- }
- iwl_txq_gen2_free_tfd(trans, txq);
- txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
- }
-
- while (!skb_queue_empty(&txq->overflow_q)) {
- struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
-
- iwl_op_mode_free_skb(trans->op_mode, skb);
- }
-
- spin_unlock_bh(&txq->lock);
-
- /* just in case - this queue may have been stopped */
- iwl_wake_queue(trans, txq);
-}
-
-static void iwl_txq_gen2_free_memory(struct iwl_trans *trans,
- struct iwl_txq *txq)
-{
- struct device *dev = trans->dev;
-
- /* De-alloc circular buffer of TFDs */
- if (txq->tfds) {
- dma_free_coherent(dev,
- trans->txqs.tfd.size * txq->n_window,
- txq->tfds, txq->dma_addr);
- dma_free_coherent(dev,
- sizeof(*txq->first_tb_bufs) * txq->n_window,
- txq->first_tb_bufs, txq->first_tb_dma);
- }
-
- kfree(txq->entries);
- if (txq->bc_tbl.addr)
- dma_pool_free(trans->txqs.bc_pool,
- txq->bc_tbl.addr, txq->bc_tbl.dma);
- kfree(txq);
-}
-
-/*
- * iwl_pcie_txq_free - Deallocate DMA queue.
- * @txq: Transmit queue to deallocate.
- *
- * Empty queue by removing and destroying all BD's.
- * Free all buffers.
- * 0-fill, but do not free "txq" descriptor structure.
- */
-static void iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id)
-{
- struct iwl_txq *txq;
- int i;
-
- if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
- "queue %d out of range", txq_id))
- return;
-
- txq = trans->txqs.txq[txq_id];
-
- if (WARN_ON(!txq))
- return;
-
- iwl_txq_gen2_unmap(trans, txq_id);
-
- /* De-alloc array of command/tx buffers */
- if (txq_id == trans->txqs.cmd.q_id)
- for (i = 0; i < txq->n_window; i++) {
- kfree_sensitive(txq->entries[i].cmd);
- kfree_sensitive(txq->entries[i].free_buf);
- }
- del_timer_sync(&txq->stuck_timer);
-
- iwl_txq_gen2_free_memory(trans, txq);
-
- trans->txqs.txq[txq_id] = NULL;
-
- clear_bit(txq_id, trans->txqs.queue_used);
-}
-
-/*
- * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
- */
-static int iwl_queue_init(struct iwl_txq *q, int slots_num)
-{
- q->n_window = slots_num;
-
- /* slots_num must be power-of-two size, otherwise
- * iwl_txq_get_cmd_index is broken. */
- if (WARN_ON(!is_power_of_2(slots_num)))
- return -EINVAL;
-
- q->low_mark = q->n_window / 4;
- if (q->low_mark < 4)
- q->low_mark = 4;
-
- q->high_mark = q->n_window / 8;
- if (q->high_mark < 2)
- q->high_mark = 2;
-
- q->write_ptr = 0;
- q->read_ptr = 0;
-
- return 0;
-}
-
-int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
- bool cmd_queue)
-{
- int ret;
- u32 tfd_queue_max_size =
- trans->trans_cfg->base_params->max_tfd_queue_size;
-
- txq->need_update = false;
-
- /* max_tfd_queue_size must be power-of-two size, otherwise
- * iwl_txq_inc_wrap and iwl_txq_dec_wrap are broken. */
- if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1),
- "Max tfd queue size must be a power of two, but is %d",
- tfd_queue_max_size))
- return -EINVAL;
-
- /* Initialize queue's high/low-water marks, and head/tail indexes */
- ret = iwl_queue_init(txq, slots_num);
- if (ret)
- return ret;
-
- spin_lock_init(&txq->lock);
-
- if (cmd_queue) {
- static struct lock_class_key iwl_txq_cmd_queue_lock_class;
-
- lockdep_set_class(&txq->lock, &iwl_txq_cmd_queue_lock_class);
- }
-
- __skb_queue_head_init(&txq->overflow_q);
-
- return 0;
-}
-
-void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb)
-{
- struct page **page_ptr;
- struct page *next;
-
- page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
- next = *page_ptr;
- *page_ptr = NULL;
-
- while (next) {
- struct page *tmp = next;
-
- next = *(void **)((u8 *)page_address(next) + PAGE_SIZE -
- sizeof(void *));
- __free_page(tmp);
- }
-}
-
-void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
-{
- u32 txq_id = txq->id;
- u32 status;
- bool active;
- u8 fifo;
-
- if (trans->trans_cfg->gen2) {
- IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id,
- txq->read_ptr, txq->write_ptr);
- /* TODO: access new SCD registers and dump them */
- return;
- }
-
- status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id));
- fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
- active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
-
- IWL_ERR(trans,
- "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n",
- txq_id, active ? "" : "in", fifo,
- jiffies_to_msecs(txq->wd_timeout),
- txq->read_ptr, txq->write_ptr,
- iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
- (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
- iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
- (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
- iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
-}
-
-static void iwl_txq_stuck_timer(struct timer_list *t)
-{
- struct iwl_txq *txq = from_timer(txq, t, stuck_timer);
- struct iwl_trans *trans = txq->trans;
-
- spin_lock(&txq->lock);
- /* check if triggered erroneously */
- if (txq->read_ptr == txq->write_ptr) {
- spin_unlock(&txq->lock);
- return;
- }
- spin_unlock(&txq->lock);
-
- iwl_txq_log_scd_error(trans, txq);
-
- iwl_force_nmi(trans);
-}
-
-int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
- bool cmd_queue)
-{
- size_t tfd_sz = trans->txqs.tfd.size *
- trans->trans_cfg->base_params->max_tfd_queue_size;
- size_t tb0_buf_sz;
- int i;
-
- if (WARN_ONCE(slots_num <= 0, "Invalid slots num:%d\n", slots_num))
- return -EINVAL;
-
- if (WARN_ON(txq->entries || txq->tfds))
- return -EINVAL;
-
- if (trans->trans_cfg->gen2)
- tfd_sz = trans->txqs.tfd.size * slots_num;
-
- timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0);
- txq->trans = trans;
-
- txq->n_window = slots_num;
-
- txq->entries = kcalloc(slots_num,
- sizeof(struct iwl_pcie_txq_entry),
- GFP_KERNEL);
-
- if (!txq->entries)
- goto error;
-
- if (cmd_queue)
- for (i = 0; i < slots_num; i++) {
- txq->entries[i].cmd =
- kmalloc(sizeof(struct iwl_device_cmd),
- GFP_KERNEL);
- if (!txq->entries[i].cmd)
- goto error;
- }
-
- /* Circular buffer of transmit frame descriptors (TFDs),
- * shared with device */
- txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
- &txq->dma_addr, GFP_KERNEL);
- if (!txq->tfds)
- goto error;
-
- BUILD_BUG_ON(sizeof(*txq->first_tb_bufs) != IWL_FIRST_TB_SIZE_ALIGN);
-
- tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num;
-
- txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz,
- &txq->first_tb_dma,
- GFP_KERNEL);
- if (!txq->first_tb_bufs)
- goto err_free_tfds;
-
- return 0;
-err_free_tfds:
- dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
- txq->tfds = NULL;
-error:
- if (txq->entries && cmd_queue)
- for (i = 0; i < slots_num; i++)
- kfree(txq->entries[i].cmd);
- kfree(txq->entries);
- txq->entries = NULL;
-
- return -ENOMEM;
-}
-
-static struct iwl_txq *
-iwl_txq_dyn_alloc_dma(struct iwl_trans *trans, int size, unsigned int timeout)
-{
- size_t bc_tbl_size, bc_tbl_entries;
- struct iwl_txq *txq;
- int ret;
-
- WARN_ON(!trans->txqs.bc_tbl_size);
-
- bc_tbl_size = trans->txqs.bc_tbl_size;
- bc_tbl_entries = bc_tbl_size / sizeof(u16);
-
- if (WARN_ON(size > bc_tbl_entries))
- return ERR_PTR(-EINVAL);
-
- txq = kzalloc(sizeof(*txq), GFP_KERNEL);
- if (!txq)
- return ERR_PTR(-ENOMEM);
-
- txq->bc_tbl.addr = dma_pool_alloc(trans->txqs.bc_pool, GFP_KERNEL,
- &txq->bc_tbl.dma);
- if (!txq->bc_tbl.addr) {
- IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
- kfree(txq);
- return ERR_PTR(-ENOMEM);
- }
-
- ret = iwl_txq_alloc(trans, txq, size, false);
- if (ret) {
- IWL_ERR(trans, "Tx queue alloc failed\n");
- goto error;
- }
- ret = iwl_txq_init(trans, txq, size, false);
- if (ret) {
- IWL_ERR(trans, "Tx queue init failed\n");
- goto error;
- }
-
- txq->wd_timeout = msecs_to_jiffies(timeout);
-
- return txq;
-
-error:
- iwl_txq_gen2_free_memory(trans, txq);
- return ERR_PTR(ret);
-}
-
-static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq,
- struct iwl_host_cmd *hcmd)
-{
- struct iwl_tx_queue_cfg_rsp *rsp;
- int ret, qid;
- u32 wr_ptr;
-
- if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) !=
- sizeof(*rsp))) {
- ret = -EINVAL;
- goto error_free_resp;
- }
-
- rsp = (void *)hcmd->resp_pkt->data;
- qid = le16_to_cpu(rsp->queue_number);
- wr_ptr = le16_to_cpu(rsp->write_pointer);
-
- if (qid >= ARRAY_SIZE(trans->txqs.txq)) {
- WARN_ONCE(1, "queue index %d unsupported", qid);
- ret = -EIO;
- goto error_free_resp;
- }
-
- if (test_and_set_bit(qid, trans->txqs.queue_used)) {
- WARN_ONCE(1, "queue %d already used", qid);
- ret = -EIO;
- goto error_free_resp;
- }
-
- if (WARN_ONCE(trans->txqs.txq[qid],
- "queue %d already allocated\n", qid)) {
- ret = -EIO;
- goto error_free_resp;
- }
-
- txq->id = qid;
- trans->txqs.txq[qid] = txq;
- wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
-
- /* Place first TFD at index corresponding to start sequence number */
- txq->read_ptr = wr_ptr;
- txq->write_ptr = wr_ptr;
-
- IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
-
- iwl_free_resp(hcmd);
- return qid;
-
-error_free_resp:
- iwl_free_resp(hcmd);
- iwl_txq_gen2_free_memory(trans, txq);
- return ret;
-}
-
-int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,
- u8 tid, int size, unsigned int timeout)
-{
- struct iwl_txq *txq;
- union {
- struct iwl_tx_queue_cfg_cmd old;
- struct iwl_scd_queue_cfg_cmd new;
- } cmd;
- struct iwl_host_cmd hcmd = {
- .flags = CMD_WANT_SKB,
- };
- int ret;
-
- if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_BZ &&
- trans->hw_rev_step == SILICON_A_STEP)
- size = 4096;
-
- txq = iwl_txq_dyn_alloc_dma(trans, size, timeout);
- if (IS_ERR(txq))
- return PTR_ERR(txq);
-
- if (trans->txqs.queue_alloc_cmd_ver == 0) {
- memset(&cmd.old, 0, sizeof(cmd.old));
- cmd.old.tfdq_addr = cpu_to_le64(txq->dma_addr);
- cmd.old.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
- cmd.old.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
- cmd.old.flags = cpu_to_le16(flags | TX_QUEUE_CFG_ENABLE_QUEUE);
- cmd.old.tid = tid;
-
- if (hweight32(sta_mask) != 1) {
- ret = -EINVAL;
- goto error;
- }
- cmd.old.sta_id = ffs(sta_mask) - 1;
-
- hcmd.id = SCD_QUEUE_CFG;
- hcmd.len[0] = sizeof(cmd.old);
- hcmd.data[0] = &cmd.old;
- } else if (trans->txqs.queue_alloc_cmd_ver == 3) {
- memset(&cmd.new, 0, sizeof(cmd.new));
- cmd.new.operation = cpu_to_le32(IWL_SCD_QUEUE_ADD);
- cmd.new.u.add.tfdq_dram_addr = cpu_to_le64(txq->dma_addr);
- cmd.new.u.add.bc_dram_addr = cpu_to_le64(txq->bc_tbl.dma);
- cmd.new.u.add.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
- cmd.new.u.add.flags = cpu_to_le32(flags);
- cmd.new.u.add.sta_mask = cpu_to_le32(sta_mask);
- cmd.new.u.add.tid = tid;
-
- hcmd.id = WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD);
- hcmd.len[0] = sizeof(cmd.new);
- hcmd.data[0] = &cmd.new;
- } else {
- ret = -EOPNOTSUPP;
- goto error;
- }
-
- ret = iwl_trans_send_cmd(trans, &hcmd);
- if (ret)
- goto error;
-
- return iwl_txq_alloc_response(trans, txq, &hcmd);
-
-error:
- iwl_txq_gen2_free_memory(trans, txq);
- return ret;
-}
-
-void iwl_txq_dyn_free(struct iwl_trans *trans, int queue)
-{
- if (WARN(queue >= IWL_MAX_TVQM_QUEUES,
- "queue %d out of range", queue))
- return;
-
- /*
- * Upon HW Rfkill - we stop the device, and then stop the queues
- * in the op_mode. Just for the sake of the simplicity of the op_mode,
- * allow the op_mode to call txq_disable after it already called
- * stop_device.
- */
- if (!test_and_clear_bit(queue, trans->txqs.queue_used)) {
- WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
- "queue %d not used", queue);
- return;
- }
-
- iwl_txq_gen2_free(trans, queue);
-
- IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
-}
-
-void iwl_txq_gen2_tx_free(struct iwl_trans *trans)
-{
- int i;
-
- memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
-
- /* Free all TX queues */
- for (i = 0; i < ARRAY_SIZE(trans->txqs.txq); i++) {
- if (!trans->txqs.txq[i])
- continue;
-
- iwl_txq_gen2_free(trans, i);
- }
-}
-
-int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size)
-{
- struct iwl_txq *queue;
- int ret;
-
- /* alloc and init the tx queue */
- if (!trans->txqs.txq[txq_id]) {
- queue = kzalloc(sizeof(*queue), GFP_KERNEL);
- if (!queue) {
- IWL_ERR(trans, "Not enough memory for tx queue\n");
- return -ENOMEM;
- }
- trans->txqs.txq[txq_id] = queue;
- ret = iwl_txq_alloc(trans, queue, queue_size, true);
- if (ret) {
- IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
- goto error;
- }
- } else {
- queue = trans->txqs.txq[txq_id];
- }
-
- ret = iwl_txq_init(trans, queue, queue_size,
- (txq_id == trans->txqs.cmd.q_id));
- if (ret) {
- IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
- goto error;
- }
- trans->txqs.txq[txq_id]->id = txq_id;
- set_bit(txq_id, trans->txqs.queue_used);
-
- return 0;
-
-error:
- iwl_txq_gen2_tx_free(trans);
- return ret;
-}
-
-static inline dma_addr_t iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans *trans,
- void *_tfd, u8 idx)
-{
- struct iwl_tfd *tfd;
- struct iwl_tfd_tb *tb;
- dma_addr_t addr;
- dma_addr_t hi_len;
-
- if (trans->trans_cfg->gen2) {
- struct iwl_tfh_tfd *tfh_tfd = _tfd;
- struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx];
-
- return (dma_addr_t)(le64_to_cpu(tfh_tb->addr));
- }
-
- tfd = _tfd;
- tb = &tfd->tbs[idx];
- addr = get_unaligned_le32(&tb->lo);
-
- if (sizeof(dma_addr_t) <= sizeof(u32))
- return addr;
-
- hi_len = le16_to_cpu(tb->hi_n_len) & 0xF;
-
- /*
- * shift by 16 twice to avoid warnings on 32-bit
- * (where this code never runs anyway due to the
- * if statement above)
- */
- return addr | ((hi_len << 16) << 16);
-}
-
-void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
- struct iwl_cmd_meta *meta,
- struct iwl_txq *txq, int index)
-{
- int i, num_tbs;
- void *tfd = iwl_txq_get_tfd(trans, txq, index);
-
- /* Sanity check on number of chunks */
- num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd);
-
- if (num_tbs > trans->txqs.tfd.max_tbs) {
- IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
- /* @todo issue fatal error, it is quite serious situation */
- return;
- }
-
- /* first TB is never freed - it's the bidirectional DMA data */
-
- for (i = 1; i < num_tbs; i++) {
- if (meta->tbs & BIT(i))
- dma_unmap_page(trans->dev,
- iwl_txq_gen1_tfd_tb_get_addr(trans,
- tfd, i),
- iwl_txq_gen1_tfd_tb_get_len(trans,
- tfd, i),
- DMA_TO_DEVICE);
- else
- dma_unmap_single(trans->dev,
- iwl_txq_gen1_tfd_tb_get_addr(trans,
- tfd, i),
- iwl_txq_gen1_tfd_tb_get_len(trans,
- tfd, i),
- DMA_TO_DEVICE);
- }
-
- meta->tbs = 0;
-
- if (trans->trans_cfg->gen2) {
- struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
-
- tfd_fh->num_tbs = 0;
- } else {
- struct iwl_tfd *tfd_fh = (void *)tfd;
-
- tfd_fh->num_tbs = 0;
- }
-}
-
-#define IWL_TX_CRC_SIZE 4
-#define IWL_TX_DELIMITER_SIZE 4
-
-/*
- * iwl_txq_gen1_update_byte_cnt_tbl - Set up entry in Tx byte-count array
- */
-void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
- struct iwl_txq *txq, u16 byte_cnt,
- int num_tbs)
-{
- struct iwlagn_scd_bc_tbl *scd_bc_tbl;
- int write_ptr = txq->write_ptr;
- int txq_id = txq->id;
- u8 sec_ctl = 0;
- u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
- __le16 bc_ent;
- struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd;
- struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
- u8 sta_id = tx_cmd->sta_id;
-
- scd_bc_tbl = trans->txqs.scd_bc_tbls.addr;
-
- sec_ctl = tx_cmd->sec_ctl;
-
- switch (sec_ctl & TX_CMD_SEC_MSK) {
- case TX_CMD_SEC_CCM:
- len += IEEE80211_CCMP_MIC_LEN;
- break;
- case TX_CMD_SEC_TKIP:
- len += IEEE80211_TKIP_ICV_LEN;
- break;
- case TX_CMD_SEC_WEP:
- len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
- break;
- }
- if (trans->txqs.bc_table_dword)
- len = DIV_ROUND_UP(len, 4);
-
- if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
- return;
-
- bc_ent = cpu_to_le16(len | (sta_id << 12));
-
- scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
-
- if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
- scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] =
- bc_ent;
-}
-
-void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
- struct iwl_txq *txq)
-{
- struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans->txqs.scd_bc_tbls.addr;
- int txq_id = txq->id;
- int read_ptr = txq->read_ptr;
- u8 sta_id = 0;
- __le16 bc_ent;
- struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd;
- struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
-
- WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
-
- if (txq_id != trans->txqs.cmd.q_id)
- sta_id = tx_cmd->sta_id;
-
- bc_ent = cpu_to_le16(1 | (sta_id << 12));
-
- scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
-
- if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
- scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] =
- bc_ent;
-}
-
-/*
- * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
- * @trans - transport private data
- * @txq - tx queue
- * @dma_dir - the direction of the DMA mapping
- *
- * Does NOT advance any TFD circular buffer read/write indexes
- * Does NOT free the TFD itself (which is within circular buffer)
- */
-void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
-{
- /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
- * idx is bounded by n_window
- */
- int rd_ptr = txq->read_ptr;
- int idx = iwl_txq_get_cmd_index(txq, rd_ptr);
- struct sk_buff *skb;
-
- lockdep_assert_held(&txq->lock);
-
- if (!txq->entries)
- return;
-
- /* We have only q->n_window txq->entries, but we use
- * TFD_QUEUE_SIZE_MAX tfds
- */
- iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr);
-
- /* free SKB */
- skb = txq->entries[idx].skb;
-
- /* Can be called from irqs-disabled context
- * If skb is not NULL, it means that the whole queue is being
- * freed and that the queue is not empty - free the skb
- */
- if (skb) {
- iwl_op_mode_free_skb(trans->op_mode, skb);
- txq->entries[idx].skb = NULL;
- }
-}
-
-void iwl_txq_progress(struct iwl_txq *txq)
-{
- lockdep_assert_held(&txq->lock);
-
- if (!txq->wd_timeout)
- return;
-
- /*
- * station is asleep and we send data - that must
- * be uAPSD or PS-Poll. Don't rearm the timer.
- */
- if (txq->frozen)
- return;
-
- /*
- * if empty delete timer, otherwise move timer forward
- * since we're making progress on this queue
- */
- if (txq->read_ptr == txq->write_ptr)
- del_timer(&txq->stuck_timer);
- else
- mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
-}
-
-/* Frees buffers until index _not_ inclusive */
-void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
- struct sk_buff_head *skbs)
-{
- struct iwl_txq *txq = trans->txqs.txq[txq_id];
- int tfd_num, read_ptr, last_to_free;
-
- /* This function is not meant to release cmd queue*/
- if (WARN_ON(txq_id == trans->txqs.cmd.q_id))
- return;
-
- if (WARN_ON(!txq))
- return;
-
- tfd_num = iwl_txq_get_cmd_index(txq, ssn);
- read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr);
-
- spin_lock_bh(&txq->lock);
-
- if (!test_bit(txq_id, trans->txqs.queue_used)) {
- IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
- txq_id, ssn);
- goto out;
- }
-
- if (read_ptr == tfd_num)
- goto out;
-
- IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
- txq_id, txq->read_ptr, tfd_num, ssn);
-
- /*Since we free until index _not_ inclusive, the one before index is
- * the last we will free. This one must be used */
- last_to_free = iwl_txq_dec_wrap(trans, tfd_num);
-
- if (!iwl_txq_used(txq, last_to_free)) {
- IWL_ERR(trans,
- "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
- __func__, txq_id, last_to_free,
- trans->trans_cfg->base_params->max_tfd_queue_size,
- txq->write_ptr, txq->read_ptr);
-
- iwl_op_mode_time_point(trans->op_mode,
- IWL_FW_INI_TIME_POINT_FAKE_TX,
- NULL);
- goto out;
- }
-
- if (WARN_ON(!skb_queue_empty(skbs)))
- goto out;
-
- for (;
- read_ptr != tfd_num;
- txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr),
- read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr)) {
- struct sk_buff *skb = txq->entries[read_ptr].skb;
-
- if (WARN_ON_ONCE(!skb))
- continue;
-
- iwl_txq_free_tso_page(trans, skb);
-
- __skb_queue_tail(skbs, skb);
-
- txq->entries[read_ptr].skb = NULL;
-
- if (!trans->trans_cfg->gen2)
- iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq);
-
- iwl_txq_free_tfd(trans, txq);
- }
-
- iwl_txq_progress(txq);
-
- if (iwl_txq_space(trans, txq) > txq->low_mark &&
- test_bit(txq_id, trans->txqs.queue_stopped)) {
- struct sk_buff_head overflow_skbs;
-
- __skb_queue_head_init(&overflow_skbs);
- skb_queue_splice_init(&txq->overflow_q, &overflow_skbs);
-
- /*
- * We are going to transmit from the overflow queue.
- * Remember this state so that wait_for_txq_empty will know we
- * are adding more packets to the TFD queue. It cannot rely on
- * the state of &txq->overflow_q, as we just emptied it, but
- * haven't TXed the content yet.
- */
- txq->overflow_tx = true;
-
- /*
- * This is tricky: we are in reclaim path which is non
- * re-entrant, so noone will try to take the access the
- * txq data from that path. We stopped tx, so we can't
- * have tx as well. Bottom line, we can unlock and re-lock
- * later.
- */
- spin_unlock_bh(&txq->lock);
-
- while (!skb_queue_empty(&overflow_skbs)) {
- struct sk_buff *skb = __skb_dequeue(&overflow_skbs);
- struct iwl_device_tx_cmd *dev_cmd_ptr;
-
- dev_cmd_ptr = *(void **)((u8 *)skb->cb +
- trans->txqs.dev_cmd_offs);
-
- /*
- * Note that we can very well be overflowing again.
- * In that case, iwl_txq_space will be small again
- * and we won't wake mac80211's queue.
- */
- iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id);
- }
-
- if (iwl_txq_space(trans, txq) > txq->low_mark)
- iwl_wake_queue(trans, txq);
-
- spin_lock_bh(&txq->lock);
- txq->overflow_tx = false;
- }
-
-out:
- spin_unlock_bh(&txq->lock);
-}
-
-/* Set wr_ptr of specific device and txq */
-void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr)
-{
- struct iwl_txq *txq = trans->txqs.txq[txq_id];
-
- spin_lock_bh(&txq->lock);
-
- txq->write_ptr = ptr;
- txq->read_ptr = txq->write_ptr;
-
- spin_unlock_bh(&txq->lock);
-}
-
-void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs,
- bool freeze)
-{
- int queue;
-
- for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
- struct iwl_txq *txq = trans->txqs.txq[queue];
- unsigned long now;
-
- spin_lock_bh(&txq->lock);
-
- now = jiffies;
-
- if (txq->frozen == freeze)
- goto next_queue;
-
- IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",
- freeze ? "Freezing" : "Waking", queue);
-
- txq->frozen = freeze;
-
- if (txq->read_ptr == txq->write_ptr)
- goto next_queue;
-
- if (freeze) {
- if (unlikely(time_after(now,
- txq->stuck_timer.expires))) {
- /*
- * The timer should have fired, maybe it is
- * spinning right now on the lock.
- */
- goto next_queue;
- }
- /* remember how long until the timer fires */
- txq->frozen_expiry_remainder =
- txq->stuck_timer.expires - now;
- del_timer(&txq->stuck_timer);
- goto next_queue;
- }
-
- /*
- * Wake a non-empty queue -> arm timer with the
- * remainder before it froze
- */
- mod_timer(&txq->stuck_timer,
- now + txq->frozen_expiry_remainder);
-
-next_queue:
- spin_unlock_bh(&txq->lock);
- }
-}
-
-#define HOST_COMPLETE_TIMEOUT (2 * HZ)
-
-static int iwl_trans_txq_send_hcmd_sync(struct iwl_trans *trans,
- struct iwl_host_cmd *cmd)
-{
- const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
- struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
- int cmd_idx;
- int ret;
-
- IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str);
-
- if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
- &trans->status),
- "Command %s: a command is already active!\n", cmd_str))
- return -EIO;
-
- IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str);
-
- cmd_idx = trans->ops->send_cmd(trans, cmd);
- if (cmd_idx < 0) {
- ret = cmd_idx;
- clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
- IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
- cmd_str, ret);
- return ret;
- }
-
- ret = wait_event_timeout(trans->wait_command_queue,
- !test_bit(STATUS_SYNC_HCMD_ACTIVE,
- &trans->status),
- HOST_COMPLETE_TIMEOUT);
- if (!ret) {
- IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
- cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
-
- IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
- txq->read_ptr, txq->write_ptr);
-
- clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
- IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
- cmd_str);
- ret = -ETIMEDOUT;
-
- iwl_trans_sync_nmi(trans);
- goto cancel;
- }
-
- if (test_bit(STATUS_FW_ERROR, &trans->status)) {
- if (!test_and_clear_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE,
- &trans->status)) {
- IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
- dump_stack();
- }
- ret = -EIO;
- goto cancel;
- }
-
- if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
- test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
- IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
- ret = -ERFKILL;
- goto cancel;
- }
-
- if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
- IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str);
- ret = -EIO;
- goto cancel;
- }
-
- return 0;
-
-cancel:
- if (cmd->flags & CMD_WANT_SKB) {
- /*
- * Cancel the CMD_WANT_SKB flag for the cmd in the
- * TX cmd queue. Otherwise in case the cmd comes
- * in later, it will possibly set an invalid
- * address (cmd->meta.source).
- */
- txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
- }
-
- if (cmd->resp_pkt) {
- iwl_free_resp(cmd);
- cmd->resp_pkt = NULL;
- }
-
- return ret;
-}
-
-int iwl_trans_txq_send_hcmd(struct iwl_trans *trans,
- struct iwl_host_cmd *cmd)
-{
- /* Make sure the NIC is still alive in the bus */
- if (test_bit(STATUS_TRANS_DEAD, &trans->status))
- return -ENODEV;
-
- if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
- test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
- IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
- cmd->id);
- return -ERFKILL;
- }
-
- if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 &&
- !(cmd->flags & CMD_SEND_IN_D3))) {
- IWL_DEBUG_WOWLAN(trans, "Dropping CMD 0x%x: D3\n", cmd->id);
- return -EHOSTDOWN;
- }
-
- if (cmd->flags & CMD_ASYNC) {
- int ret;
-
- /* An asynchronous command can not expect an SKB to be set. */
- if (WARN_ON(cmd->flags & CMD_WANT_SKB))
- return -EINVAL;
-
- ret = trans->ops->send_cmd(trans, cmd);
- if (ret < 0) {
- IWL_ERR(trans,
- "Error sending %s: enqueue_hcmd failed: %d\n",
- iwl_get_cmd_string(trans, cmd->id), ret);
- return ret;
- }
- return 0;
- }
-
- return iwl_trans_txq_send_hcmd_sync(trans, cmd);
-}
-
diff --git a/queue/tx.h b/queue/tx.h
deleted file mode 100644
index 1e4a24ab9bab..000000000000
--- a/queue/tx.h
+++ /dev/null
@@ -1,183 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/*
- * Copyright (C) 2020-2023 Intel Corporation
- */
-#ifndef __iwl_trans_queue_tx_h__
-#define __iwl_trans_queue_tx_h__
-#include "iwl-fh.h"
-#include "fw/api/tx.h"
-
-struct iwl_tso_hdr_page {
- struct page *page;
- u8 *pos;
-};
-
-static inline dma_addr_t
-iwl_txq_get_first_tb_dma(struct iwl_txq *txq, int idx)
-{
- return txq->first_tb_dma +
- sizeof(struct iwl_pcie_first_tb_buf) * idx;
-}
-
-static inline u16 iwl_txq_get_cmd_index(const struct iwl_txq *q, u32 index)
-{
- return index & (q->n_window - 1);
-}
-
-void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id);
-
-static inline void iwl_wake_queue(struct iwl_trans *trans,
- struct iwl_txq *txq)
-{
- if (test_and_clear_bit(txq->id, trans->txqs.queue_stopped)) {
- IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
- iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
- }
-}
-
-static inline void *iwl_txq_get_tfd(struct iwl_trans *trans,
- struct iwl_txq *txq, int idx)
-{
- if (trans->trans_cfg->gen2)
- idx = iwl_txq_get_cmd_index(txq, idx);
-
- return (u8 *)txq->tfds + trans->txqs.tfd.size * idx;
-}
-
-int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
- bool cmd_queue);
-/*
- * We need this inline in case dma_addr_t is only 32-bits - since the
- * hardware is always 64-bit, the issue can still occur in that case,
- * so use u64 for 'phys' here to force the addition in 64-bit.
- */
-static inline bool iwl_txq_crosses_4g_boundary(u64 phys, u16 len)
-{
- return upper_32_bits(phys) != upper_32_bits(phys + len);
-}
-
-int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q);
-
-static inline void iwl_txq_stop(struct iwl_trans *trans, struct iwl_txq *txq)
-{
- if (!test_and_set_bit(txq->id, trans->txqs.queue_stopped)) {
- iwl_op_mode_queue_full(trans->op_mode, txq->id);
- IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
- } else {
- IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
- txq->id);
- }
-}
-
-/**
- * iwl_txq_inc_wrap - increment queue index, wrap back to beginning
- * @index -- current index
- */
-static inline int iwl_txq_inc_wrap(struct iwl_trans *trans, int index)
-{
- return ++index &
- (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
-}
-
-/**
- * iwl_txq_dec_wrap - decrement queue index, wrap back to end
- * @index -- current index
- */
-static inline int iwl_txq_dec_wrap(struct iwl_trans *trans, int index)
-{
- return --index &
- (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
-}
-
-static inline bool iwl_txq_used(const struct iwl_txq *q, int i)
-{
- int index = iwl_txq_get_cmd_index(q, i);
- int r = iwl_txq_get_cmd_index(q, q->read_ptr);
- int w = iwl_txq_get_cmd_index(q, q->write_ptr);
-
- return w >= r ?
- (index >= r && index < w) :
- !(index < r && index >= w);
-}
-
-void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb);
-
-void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq);
-
-int iwl_txq_gen2_set_tb(struct iwl_trans *trans,
- struct iwl_tfh_tfd *tfd, dma_addr_t addr,
- u16 len);
-
-void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans,
- struct iwl_cmd_meta *meta,
- struct iwl_tfh_tfd *tfd);
-
-int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags,
- u32 sta_mask, u8 tid,
- int size, unsigned int timeout);
-
-int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_tx_cmd *dev_cmd, int txq_id);
-
-void iwl_txq_dyn_free(struct iwl_trans *trans, int queue);
-void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
-void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq);
-void iwl_txq_gen2_tx_free(struct iwl_trans *trans);
-int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
- bool cmd_queue);
-int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size);
-#ifdef CONFIG_INET
-struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
- struct sk_buff *skb);
-#endif
-static inline u8 iwl_txq_gen1_tfd_get_num_tbs(struct iwl_trans *trans,
- void *_tfd)
-{
- struct iwl_tfd *tfd;
-
- if (trans->trans_cfg->gen2) {
- struct iwl_tfh_tfd *tfh_tfd = _tfd;
-
- return le16_to_cpu(tfh_tfd->num_tbs) & 0x1f;
- }
-
- tfd = (struct iwl_tfd *)_tfd;
- return tfd->num_tbs & 0x1f;
-}
-
-static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans,
- void *_tfd, u8 idx)
-{
- struct iwl_tfd *tfd;
- struct iwl_tfd_tb *tb;
-
- if (trans->trans_cfg->gen2) {
- struct iwl_tfh_tfd *tfh_tfd = _tfd;
- struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx];
-
- return le16_to_cpu(tfh_tb->tb_len);
- }
-
- tfd = (struct iwl_tfd *)_tfd;
- tb = &tfd->tbs[idx];
-
- return le16_to_cpu(tb->hi_n_len) >> 4;
-}
-
-void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
- struct iwl_cmd_meta *meta,
- struct iwl_txq *txq, int index);
-void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
- struct iwl_txq *txq);
-void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
- struct iwl_txq *txq, u16 byte_cnt,
- int num_tbs);
-void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
- struct sk_buff_head *skbs);
-void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr);
-void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs,
- bool freeze);
-void iwl_txq_progress(struct iwl_txq *txq);
-void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
-int iwl_trans_txq_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
-#endif /* __iwl_trans_queue_tx_h__ */
diff --git a/tests/Makefile b/tests/Makefile
new file mode 100644
index 000000000000..84491488f589
--- /dev/null
+++ b/tests/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+iwlwifi-tests-y += module.o devinfo.o
+
+ccflags-y += -I$(src)/../
+
+obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += iwlwifi-tests.o
diff --git a/tests/devinfo.c b/tests/devinfo.c
new file mode 100644
index 000000000000..7361b6d0cdb8
--- /dev/null
+++ b/tests/devinfo.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * KUnit tests for the iwlwifi device info table
+ *
+ * Copyright (C) 2023-2024 Intel Corporation
+ */
+#include <kunit/test.h>
+#include <linux/pci.h>
+#include "iwl-drv.h"
+#include "iwl-config.h"
+
+MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
+
+static void iwl_pci_print_dev_info(const char *pfx, const struct iwl_dev_info *di)
+{
+ printk(KERN_DEBUG "%sdev=%.4x,subdev=%.4x,mac_type=%.4x,mac_step=%.4x,rf_type=%.4x,cdb=%d,jacket=%d,rf_id=%.2x,no_160=%d,cores=%.2x\n",
+ pfx, di->device, di->subdevice, di->mac_type, di->mac_step,
+ di->rf_type, di->cdb, di->jacket, di->rf_id, di->no_160,
+ di->cores);
+}
+
+static void devinfo_table_order(struct kunit *test)
+{
+ int idx;
+
+ for (idx = 0; idx < iwl_dev_info_table_size; idx++) {
+ const struct iwl_dev_info *di = &iwl_dev_info_table[idx];
+ const struct iwl_dev_info *ret;
+
+ ret = iwl_pci_find_dev_info(di->device, di->subdevice,
+ di->mac_type, di->mac_step,
+ di->rf_type, di->cdb,
+ di->jacket, di->rf_id,
+ di->no_160, di->cores, di->rf_step);
+ if (ret != di) {
+ iwl_pci_print_dev_info("searched: ", di);
+ iwl_pci_print_dev_info("found: ", ret);
+ KUNIT_FAIL(test,
+ "unusable entry at index %d (found index %d instead)\n",
+ idx, (int)(ret - iwl_dev_info_table));
+ }
+ }
+}
+
+static void devinfo_pci_ids(struct kunit *test)
+{
+ struct pci_dev *dev;
+
+ dev = kunit_kmalloc(test, sizeof(*dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, dev);
+
+ for (int i = 0; iwl_hw_card_ids[i].vendor; i++) {
+ const struct pci_device_id *s, *t;
+
+ s = &iwl_hw_card_ids[i];
+ dev->vendor = s->vendor;
+ dev->device = s->device;
+ dev->subsystem_vendor = s->subvendor;
+ dev->subsystem_device = s->subdevice;
+ dev->class = s->class;
+
+ t = pci_match_id(iwl_hw_card_ids, dev);
+ KUNIT_EXPECT_PTR_EQ(test, t, s);
+ }
+}
+
+static struct kunit_case devinfo_test_cases[] = {
+ KUNIT_CASE(devinfo_table_order),
+ KUNIT_CASE(devinfo_pci_ids),
+ {}
+};
+
+static struct kunit_suite iwlwifi_devinfo = {
+ .name = "iwlwifi-devinfo",
+ .test_cases = devinfo_test_cases,
+};
+
+kunit_test_suite(iwlwifi_devinfo);
diff --git a/tests/module.c b/tests/module.c
new file mode 100644
index 000000000000..0c54f818e5a7
--- /dev/null
+++ b/tests/module.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Module boilerplate for the iwlwifi kunit module.
+ *
+ * Copyright (C) 2023 Intel Corporation
+ */
+#include <linux/module.h>
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("kunit tests for iwlwifi");