diff options
author | Bjoern A. Zeeb <bz@FreeBSD.org> | 2024-05-17 00:53:10 +0000 |
---|---|---|
committer | Bjoern A. Zeeb <bz@FreeBSD.org> | 2024-07-29 14:58:04 +0000 |
commit | 087478e3278bcea4c35d1435b54c15a6eaed584f (patch) | |
tree | 1feddd1ed4ec852fd4c37de882b32c99b3bd88a7 | |
parent | 7e7384d7e4d4f01e10a375e0bf88c013bb436de0 (diff) |
iwlwifi: update Intel's iwlwifi/mvm driver.vendor/Linux/iwlwifi/iwlwifi-next-cb0a1fb7fd86
This version is based on
https://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next.git
cb0a1fb7fd86b0062692b5056ca8552906509512
( tag: iwlwifi-next-for-kalle-2022-02-18 )
and was committed to FreeBSD main as
d9836fb4b9380e2ed1c38455fb31a3832b452671.
-rw-r--r-- | cfg/22000.c | 117 | ||||
-rw-r--r-- | fw/acpi.c | 971 | ||||
-rw-r--r-- | fw/acpi.h | 30 | ||||
-rw-r--r-- | fw/api/alive.h | 26 | ||||
-rw-r--r-- | fw/api/commands.h | 36 | ||||
-rw-r--r-- | fw/api/config.h | 33 | ||||
-rw-r--r-- | fw/api/d3.h | 81 | ||||
-rw-r--r-- | fw/api/datapath.h | 210 | ||||
-rw-r--r-- | fw/api/dbg-tlv.h | 72 | ||||
-rw-r--r-- | fw/api/debug.h | 19 | ||||
-rw-r--r-- | fw/api/mac-cfg.h | 34 | ||||
-rw-r--r-- | fw/api/mac.h | 127 | ||||
-rw-r--r-- | fw/api/nvm-reg.h | 82 | ||||
-rw-r--r-- | fw/api/phy-ctxt.h | 9 | ||||
-rw-r--r-- | fw/api/phy.h | 16 | ||||
-rw-r--r-- | fw/api/power.h | 22 | ||||
-rw-r--r-- | fw/api/rfi.h | 10 | ||||
-rw-r--r-- | fw/api/rs.h | 60 | ||||
-rw-r--r-- | fw/api/scan.h | 93 | ||||
-rw-r--r-- | fw/api/stats.h | 92 | ||||
-rw-r--r-- | fw/api/system.h (renamed from fw/api/soc.h) | 16 | ||||
-rw-r--r-- | fw/api/tx.h | 30 | ||||
-rw-r--r-- | fw/api/txq.h | 4 | ||||
-rw-r--r-- | fw/dbg.c | 319 | ||||
-rw-r--r-- | fw/dbg.h | 5 | ||||
-rw-r--r-- | fw/debugfs.c | 409 | ||||
-rw-r--r-- | fw/dump.c | 153 | ||||
-rw-r--r-- | fw/error-dump.h | 18 | ||||
-rw-r--r-- | fw/file.h | 9 | ||||
-rw-r--r-- | fw/img.c | 13 | ||||
-rw-r--r-- | fw/img.h | 6 | ||||
-rw-r--r-- | fw/init.c | 7 | ||||
-rw-r--r-- | fw/paging.c | 4 | ||||
-rw-r--r-- | fw/runtime.h | 9 | ||||
-rw-r--r-- | fw/smem.c | 4 | ||||
-rw-r--r-- | fw/uefi.c | 106 | ||||
-rw-r--r-- | fw/uefi.h | 20 | ||||
-rw-r--r-- | iwl-config.h | 27 | ||||
-rw-r--r-- | iwl-csr.h | 27 | ||||
-rw-r--r-- | iwl-dbg-tlv.c | 140 | ||||
-rw-r--r-- | iwl-drv.c | 234 | ||||
-rw-r--r-- | iwl-drv.h | 5 | ||||
-rw-r--r-- | iwl-eeprom-read.c | 12 | ||||
-rw-r--r-- | iwl-fh.h | 32 | ||||
-rw-r--r-- | iwl-io.c | 20 | ||||
-rw-r--r-- | iwl-nvm-parse.c | 112 | ||||
-rw-r--r-- | iwl-nvm-parse.h | 11 | ||||
-rw-r--r-- | iwl-phy-db.c | 4 | ||||
-rw-r--r-- | iwl-prph.h | 22 | ||||
-rw-r--r-- | iwl-trans.c | 12 | ||||
-rw-r--r-- | iwl-trans.h | 87 | ||||
-rw-r--r-- | mvm/constants.h | 2 | ||||
-rw-r--r-- | mvm/d3.c | 2797 | ||||
-rw-r--r-- | mvm/debugfs.c | 2145 | ||||
-rw-r--r-- | mvm/ftm-initiator.c | 29 | ||||
-rw-r--r-- | mvm/ftm-responder.c | 24 | ||||
-rw-r--r-- | mvm/fw-api.h | 2 | ||||
-rw-r--r-- | mvm/fw.c | 386 | ||||
-rw-r--r-- | mvm/mac-ctxt.c | 50 | ||||
-rw-r--r-- | mvm/mac80211.c | 517 | ||||
-rw-r--r-- | mvm/mvm.h | 106 | ||||
-rw-r--r-- | mvm/offloading.c | 3 | ||||
-rw-r--r-- | mvm/ops.c | 300 | ||||
-rw-r--r-- | mvm/phy-ctxt.c | 93 | ||||
-rw-r--r-- | mvm/quota.c | 2 | ||||
-rw-r--r-- | mvm/rfi.c | 61 | ||||
-rw-r--r-- | mvm/rs-fw.c | 129 | ||||
-rw-r--r-- | mvm/rx.c | 265 | ||||
-rw-r--r-- | mvm/rxmq.c | 60 | ||||
-rw-r--r-- | mvm/scan.c | 385 | ||||
-rw-r--r-- | mvm/sta.c | 323 | ||||
-rw-r--r-- | mvm/sta.h | 7 | ||||
-rw-r--r-- | mvm/time-event.c | 56 | ||||
-rw-r--r-- | mvm/tt.c | 11 | ||||
-rw-r--r-- | mvm/tx.c | 128 | ||||
-rw-r--r-- | mvm/utils.c | 91 | ||||
-rw-r--r-- | pcie/drv.c | 388 | ||||
-rw-r--r-- | pcie/internal.h | 46 | ||||
-rw-r--r-- | pcie/rx.c | 119 | ||||
-rw-r--r-- | pcie/trans-gen2.c | 13 | ||||
-rw-r--r-- | pcie/trans.c | 185 | ||||
-rw-r--r-- | pcie/tx.c | 14 | ||||
-rw-r--r-- | queue/tx.c | 105 | ||||
-rw-r--r-- | queue/tx.h | 21 |
84 files changed, 11248 insertions, 1632 deletions
diff --git a/cfg/22000.c b/cfg/22000.c index 1572097bccf1..918dd0f6f8b5 100644 --- a/cfg/22000.c +++ b/cfg/22000.c @@ -7,9 +7,10 @@ #include <linux/stringify.h> #include "iwl-config.h" #include "iwl-prph.h" +#include "fw/api/txq.h" /* Highest firmware API version supported */ -#define IWL_22000_UCODE_API_MAX 67 +#define IWL_22000_UCODE_API_MAX 70 /* Lowest firmware API version supported */ #define IWL_22000_UCODE_API_MIN 39 @@ -39,6 +40,7 @@ #define IWL_SO_A_GF_A_FW_PRE "iwlwifi-so-a0-gf-a0-" #define IWL_TY_A_GF_A_FW_PRE "iwlwifi-ty-a0-gf-a0-" #define IWL_SO_A_GF4_A_FW_PRE "iwlwifi-so-a0-gf4-a0-" +#define IWL_SO_A_MR_A_FW_PRE "iwlwifi-so-a0-mr-a0-" #define IWL_SNJ_A_GF4_A_FW_PRE "iwlwifi-SoSnj-a0-gf4-a0-" #define IWL_SNJ_A_GF_A_FW_PRE "iwlwifi-SoSnj-a0-gf-a0-" #define IWL_SNJ_A_HR_B_FW_PRE "iwlwifi-SoSnj-a0-hr-b0-" @@ -54,7 +56,13 @@ #define IWL_BZ_A_GF4_A_FW_PRE "iwlwifi-bz-a0-gf4-a0-" #define IWL_BZ_A_MR_A_FW_PRE "iwlwifi-bz-a0-mr-a0-" #define IWL_BZ_A_FM_A_FW_PRE "iwlwifi-bz-a0-fm-a0-" -#define IWL_GL_A_FM_A_FW_PRE "iwlwifi-gl-a0-fm7-a0-" +#define IWL_GL_A_FM_A_FW_PRE "iwlwifi-gl-a0-fm-a0-" +#define IWL_BZ_Z_GF_A_FW_PRE "iwlwifi-bz-z0-gf-a0-" +#define IWL_BNJ_A_FM_A_FW_PRE "iwlwifi-BzBnj-a0-fm-a0-" +#define IWL_BNJ_A_FM4_A_FW_PRE "iwlwifi-BzBnj-a0-fm4-a0-" +#define IWL_BNJ_A_GF_A_FW_PRE "iwlwifi-BzBnj-a0-gf-a0-" +#define IWL_BNJ_A_GF4_A_FW_PRE "iwlwifi-BzBnj-a0-gf4-a0-" +#define IWL_BNJ_A_HR_B_FW_PRE "iwlwifi-BzBnj-a0-hr-b0-" #define IWL_QU_B_HR_B_MODULE_FIRMWARE(api) \ @@ -113,6 +121,16 @@ IWL_BZ_A_FM_A_FW_PRE __stringify(api) ".ucode" #define IWL_GL_A_FM_A_MODULE_FIRMWARE(api) \ IWL_GL_A_FM_A_FW_PRE __stringify(api) ".ucode" +#define IWL_BNJ_A_FM_A_MODULE_FIRMWARE(api) \ + IWL_BNJ_A_FM_A_FW_PRE __stringify(api) ".ucode" +#define IWL_BNJ_A_FM4_A_MODULE_FIRMWARE(api) \ + IWL_BNJ_A_FM4_A_FW_PRE __stringify(api) ".ucode" +#define IWL_BNJ_A_GF_A_MODULE_FIRMWARE(api) \ + IWL_BNJ_A_GF_A_FW_PRE __stringify(api) ".ucode" +#define IWL_BNJ_A_GF4_A_MODULE_FIRMWARE(api) \ + IWL_BNJ_A_GF4_A_FW_PRE __stringify(api) ".ucode" +#define IWL_BNJ_A_HR_B_MODULE_FIRMWARE(api) \ + IWL_BNJ_A_HR_B_FW_PRE __stringify(api) ".ucode" static const struct iwl_base_params iwl_22000_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE_32K, @@ -206,7 +224,7 @@ static const struct iwl_ht_params iwl_22000_ht_params = { .trans.base_params = &iwl_ax210_base_params, \ .min_txq_size = 128, \ .gp2_reg_addr = 0xd02c68, \ - .min_256_ba_txq_size = 1024, \ + .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_HE, \ .mon_dram_regs = { \ .write_ptr = { \ .addr = DBGC_CUR_DBGBUF_STATUS, \ @@ -234,7 +252,7 @@ static const struct iwl_ht_params iwl_22000_ht_params = { .dccm2_len = IWL_22000_DCCM2_LEN, \ .smem_offset = IWL_22000_SMEM_OFFSET, \ .smem_len = IWL_22000_SMEM_LEN, \ - .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \ + .features = IWL_TX_CSUM_NETIF_FLAGS_BZ | NETIF_F_RXCSUM, \ .apmg_not_supported = true, \ .trans.mq_rx_supported = true, \ .vht_mu_mimo_supported = true, \ @@ -267,7 +285,7 @@ static const struct iwl_ht_params iwl_22000_ht_params = { .trans.base_params = &iwl_ax210_base_params, \ .min_txq_size = 128, \ .gp2_reg_addr = 0xd02c68, \ - .min_256_ba_txq_size = 1024, \ + .min_ba_txq_size = IWL_DEFAULT_QUEUE_SIZE_EHT, \ .mon_dram_regs = { \ .write_ptr = { \ .addr = DBGC_CUR_DBGBUF_STATUS, \ @@ -281,6 +299,12 @@ static const struct iwl_ht_params iwl_22000_ht_params = { .addr = DBGC_CUR_DBGBUF_STATUS, \ .mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK, \ }, \ + }, \ + .mon_dbgi_regs = { \ + .write_ptr = { \ + .addr = DBGI_SRAM_FIFO_POINTERS, \ + .mask = DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK, \ + }, \ } const struct iwl_cfg_trans_params iwl_qnj_trans_cfg = { @@ -458,6 +482,7 @@ const char iwl_ax101_name[] = "Intel(R) Wi-Fi 6 AX101"; const char iwl_ax200_name[] = "Intel(R) Wi-Fi 6 AX200 160MHz"; const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz"; const char iwl_ax203_name[] = "Intel(R) Wi-Fi 6 AX203"; +const char iwl_ax204_name[] = "Intel(R) Wi-Fi 6 AX204 160MHz"; const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6E AX211 160MHz"; const char iwl_ax221_name[] = "Intel(R) Wi-Fi 6E AX221 160MHz"; const char iwl_ax231_name[] = "Intel(R) Wi-Fi 6E AX231 160MHz"; @@ -626,7 +651,7 @@ const struct iwl_cfg iwl_ax200_cfg_cc = { }; const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = { - .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)", + .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201NGW)", .fw_name_pre = IWL_QU_B_HR_B_FW_PRE, IWL_DEVICE_22500, /* @@ -639,7 +664,7 @@ const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = { }; const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = { - .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)", + .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201D2W)", .fw_name_pre = IWL_QU_B_HR_B_FW_PRE, IWL_DEVICE_22500, /* @@ -652,7 +677,7 @@ const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = { }; const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0 = { - .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)", + .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201NGW)", .fw_name_pre = IWL_QU_C_HR_B_FW_PRE, IWL_DEVICE_22500, /* @@ -665,7 +690,7 @@ const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0 = { }; const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0 = { - .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)", + .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201D2W)", .fw_name_pre = IWL_QU_C_HR_B_FW_PRE, IWL_DEVICE_22500, /* @@ -696,13 +721,6 @@ const struct iwl_cfg iwlax210_2ax_cfg_so_jf_b0 = { .num_rbds = IWL_NUM_RBDS_NON_HE, }; -const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0 = { - .name = "Intel(R) Wi-Fi 6 AX210 160MHz", - .fw_name_pre = IWL_SO_A_HR_B_FW_PRE, - IWL_DEVICE_AX210, - .num_rbds = IWL_NUM_RBDS_AX210_HE, -}; - const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0 = { .name = iwl_ax211_name, .fw_name_pre = IWL_SO_A_GF_A_FW_PRE, @@ -805,6 +823,20 @@ const struct iwl_cfg iwl_cfg_ma_a0_mr_a0 = { .num_rbds = IWL_NUM_RBDS_AX210_HE, }; +const struct iwl_cfg iwl_cfg_ma_a0_ms_a0 = { + .fw_name_pre = IWL_MA_A_MR_A_FW_PRE, + .uhb_supported = false, + IWL_DEVICE_AX210, + .num_rbds = IWL_NUM_RBDS_AX210_HE, +}; + +const struct iwl_cfg iwl_cfg_so_a0_ms_a0 = { + .fw_name_pre = IWL_SO_A_MR_A_FW_PRE, + .uhb_supported = false, + IWL_DEVICE_AX210, + .num_rbds = IWL_NUM_RBDS_AX210_HE, +}; + const struct iwl_cfg iwl_cfg_ma_a0_fm_a0 = { .fw_name_pre = IWL_MA_A_FM_A_FW_PRE, .uhb_supported = true, @@ -819,6 +851,13 @@ const struct iwl_cfg iwl_cfg_snj_a0_mr_a0 = { .num_rbds = IWL_NUM_RBDS_AX210_HE, }; +const struct iwl_cfg iwl_cfg_snj_a0_ms_a0 = { + .fw_name_pre = IWL_SNJ_A_MR_A_FW_PRE, + .uhb_supported = false, + IWL_DEVICE_AX210, + .num_rbds = IWL_NUM_RBDS_AX210_HE, +}; + const struct iwl_cfg iwl_cfg_so_a0_hr_a0 = { .fw_name_pre = IWL_SO_A_HR_B_FW_PRE, IWL_DEVICE_AX210, @@ -879,6 +918,47 @@ const struct iwl_cfg iwl_cfg_gl_a0_fm_a0 = { .num_rbds = IWL_NUM_RBDS_AX210_HE, }; +const struct iwl_cfg iwl_cfg_bz_z0_gf_a0 = { + .fw_name_pre = IWL_BZ_Z_GF_A_FW_PRE, + .uhb_supported = true, + IWL_DEVICE_BZ, + .num_rbds = IWL_NUM_RBDS_AX210_HE, +}; + +const struct iwl_cfg iwl_cfg_bnj_a0_fm_a0 = { + .fw_name_pre = IWL_BNJ_A_FM_A_FW_PRE, + .uhb_supported = true, + IWL_DEVICE_BZ, + .num_rbds = IWL_NUM_RBDS_AX210_HE, +}; + +const struct iwl_cfg iwl_cfg_bnj_a0_fm4_a0 = { + .fw_name_pre = IWL_BNJ_A_FM4_A_FW_PRE, + .uhb_supported = true, + IWL_DEVICE_BZ, + .num_rbds = IWL_NUM_RBDS_AX210_HE, +}; + +const struct iwl_cfg iwl_cfg_bnj_a0_gf_a0 = { + .fw_name_pre = IWL_BNJ_A_GF_A_FW_PRE, + .uhb_supported = true, + IWL_DEVICE_BZ, + .num_rbds = IWL_NUM_RBDS_AX210_HE, +}; + +const struct iwl_cfg iwl_cfg_bnj_a0_gf4_a0 = { + .fw_name_pre = IWL_BNJ_A_GF4_A_FW_PRE, + .uhb_supported = true, + IWL_DEVICE_BZ, + .num_rbds = IWL_NUM_RBDS_AX210_HE, +}; + +const struct iwl_cfg iwl_cfg_bnj_a0_hr_b0 = { + .fw_name_pre = IWL_BNJ_A_HR_B_FW_PRE, + .uhb_supported = true, + IWL_DEVICE_BZ, + .num_rbds = IWL_NUM_RBDS_AX210_HE, +}; MODULE_FIRMWARE(IWL_QU_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QNJ_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QU_C_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); @@ -907,3 +987,8 @@ MODULE_FIRMWARE(IWL_BZ_A_GF4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_BZ_A_MR_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_BZ_A_FM_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_GL_A_FM_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_BNJ_A_FM_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_BNJ_A_FM4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_BNJ_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_BNJ_A_GF4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_BNJ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); diff --git a/fw/acpi.c b/fw/acpi.c new file mode 100644 index 000000000000..0e9e61508ae5 --- /dev/null +++ b/fw/acpi.c @@ -0,0 +1,971 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2017 Intel Deutschland GmbH + * Copyright (C) 2019-2021 Intel Corporation + */ +#include <linux/uuid.h> +#include "iwl-drv.h" +#include "iwl-debug.h" +#include "acpi.h" +#include "fw/runtime.h" + +const guid_t iwl_guid = GUID_INIT(0xF21202BF, 0x8F78, 0x4DC6, + 0xA5, 0xB3, 0x1F, 0x73, + 0x8E, 0x28, 0x5A, 0xDE); +IWL_EXPORT_SYMBOL(iwl_guid); + +const guid_t iwl_rfi_guid = GUID_INIT(0x7266172C, 0x220B, 0x4B29, + 0x81, 0x4F, 0x75, 0xE4, + 0xDD, 0x26, 0xB5, 0xFD); +IWL_EXPORT_SYMBOL(iwl_rfi_guid); + +static int iwl_acpi_get_handle(struct device *dev, acpi_string method, + acpi_handle *ret_handle) +{ + acpi_handle root_handle; + acpi_status status; + + root_handle = ACPI_HANDLE(dev); + if (!root_handle) { + IWL_DEBUG_DEV_RADIO(dev, + "ACPI: Could not retrieve root port handle\n"); + return -ENOENT; + } + + status = acpi_get_handle(root_handle, method, ret_handle); + if (ACPI_FAILURE(status)) { + IWL_DEBUG_DEV_RADIO(dev, + "ACPI: %s method not found\n", method); + return -ENOENT; + } + return 0; +} + +void *iwl_acpi_get_object(struct device *dev, acpi_string method) +{ + struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL}; + acpi_handle handle; + acpi_status status; + int ret; + + ret = iwl_acpi_get_handle(dev, method, &handle); + if (ret) + return ERR_PTR(-ENOENT); + + /* Call the method with no arguments */ + status = acpi_evaluate_object(handle, NULL, NULL, &buf); + if (ACPI_FAILURE(status)) { + IWL_DEBUG_DEV_RADIO(dev, + "ACPI: %s method invocation failed (status: 0x%x)\n", + method, status); + return ERR_PTR(-ENOENT); + } + return buf.pointer; +} +IWL_EXPORT_SYMBOL(iwl_acpi_get_object); + +/* + * Generic function for evaluating a method defined in the device specific + * method (DSM) interface. The returned acpi object must be freed by calling + * function. + */ +static void *iwl_acpi_get_dsm_object(struct device *dev, int rev, int func, + union acpi_object *args, + const guid_t *guid) +{ + union acpi_object *obj; + + obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), guid, rev, func, + args); + if (!obj) { + IWL_DEBUG_DEV_RADIO(dev, + "ACPI: DSM method invocation failed (rev: %d, func:%d)\n", + rev, func); + return ERR_PTR(-ENOENT); + } + return obj; +} + +/* + * Generic function to evaluate a DSM with no arguments + * and an integer return value, + * (as an integer object or inside a buffer object), + * verify and assign the value in the "value" parameter. + * return 0 in success and the appropriate errno otherwise. + */ +static int iwl_acpi_get_dsm_integer(struct device *dev, int rev, int func, + const guid_t *guid, u64 *value, + size_t expected_size) +{ + union acpi_object *obj; + int ret = 0; + + obj = iwl_acpi_get_dsm_object(dev, rev, func, NULL, guid); + if (IS_ERR(obj)) { + IWL_DEBUG_DEV_RADIO(dev, + "Failed to get DSM object. func= %d\n", + func); + return -ENOENT; + } + + if (obj->type == ACPI_TYPE_INTEGER) { + *value = obj->integer.value; + } else if (obj->type == ACPI_TYPE_BUFFER) { + __le64 le_value = 0; + + if (WARN_ON_ONCE(expected_size > sizeof(le_value))) + return -EINVAL; + + /* if the buffer size doesn't match the expected size */ + if (obj->buffer.length != expected_size) + IWL_DEBUG_DEV_RADIO(dev, + "ACPI: DSM invalid buffer size, padding or truncating (%d)\n", + obj->buffer.length); + + /* assuming LE from Intel BIOS spec */ + memcpy(&le_value, obj->buffer.pointer, + min_t(size_t, expected_size, (size_t)obj->buffer.length)); + *value = le64_to_cpu(le_value); + } else { + IWL_DEBUG_DEV_RADIO(dev, + "ACPI: DSM method did not return a valid object, type=%d\n", + obj->type); + ret = -EINVAL; + goto out; + } + + IWL_DEBUG_DEV_RADIO(dev, + "ACPI: DSM method evaluated: func=%d, ret=%d\n", + func, ret); +out: + ACPI_FREE(obj); + return ret; +} + +/* + * Evaluate a DSM with no arguments and a u8 return value, + */ +int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func, + const guid_t *guid, u8 *value) +{ + int ret; + u64 val; + + ret = iwl_acpi_get_dsm_integer(dev, rev, func, + guid, &val, sizeof(u8)); + + if (ret < 0) + return ret; + + /* cast val (u64) to be u8 */ + *value = (u8)val; + return 0; +} +IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u8); + +/* + * Evaluate a DSM with no arguments and a u32 return value, + */ +int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func, + const guid_t *guid, u32 *value) +{ + int ret; + u64 val; + + ret = iwl_acpi_get_dsm_integer(dev, rev, func, + guid, &val, sizeof(u32)); + + if (ret < 0) + return ret; + + /* cast val (u64) to be u32 */ + *value = (u32)val; + return 0; +} +IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u32); + +union acpi_object *iwl_acpi_get_wifi_pkg_range(struct device *dev, + union acpi_object *data, + int min_data_size, + int max_data_size, + int *tbl_rev) +{ + int i; + union acpi_object *wifi_pkg; + + /* + * We need at least one entry in the wifi package that + * describes the domain, and one more entry, otherwise there's + * no point in reading it. + */ + if (WARN_ON_ONCE(min_data_size < 2 || min_data_size > max_data_size)) + return ERR_PTR(-EINVAL); + + /* + * We need at least two packages, one for the revision and one + * for the data itself. Also check that the revision is valid + * (i.e. it is an integer (each caller has to check by itself + * if the returned revision is supported)). + */ + if (data->type != ACPI_TYPE_PACKAGE || + data->package.count < 2 || + data->package.elements[0].type != ACPI_TYPE_INTEGER) { + IWL_DEBUG_DEV_RADIO(dev, "Invalid packages structure\n"); + return ERR_PTR(-EINVAL); + } + + *tbl_rev = data->package.elements[0].integer.value; + + /* loop through all the packages to find the one for WiFi */ + for (i = 1; i < data->package.count; i++) { + union acpi_object *domain; + + wifi_pkg = &data->package.elements[i]; + + /* skip entries that are not a package with the right size */ + if (wifi_pkg->type != ACPI_TYPE_PACKAGE || + wifi_pkg->package.count < min_data_size || + wifi_pkg->package.count > max_data_size) + continue; + + domain = &wifi_pkg->package.elements[0]; + if (domain->type == ACPI_TYPE_INTEGER && + domain->integer.value == ACPI_WIFI_DOMAIN) + goto found; + } + + return ERR_PTR(-ENOENT); + +found: + return wifi_pkg; +} +IWL_EXPORT_SYMBOL(iwl_acpi_get_wifi_pkg_range); + +int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, + union iwl_tas_config_cmd *cmd, int fw_ver) +{ + union acpi_object *wifi_pkg, *data; + int ret, tbl_rev, i, block_list_size, enabled; + + data = iwl_acpi_get_object(fwrt->dev, ACPI_WTAS_METHOD); + if (IS_ERR(data)) + return PTR_ERR(data); + + /* try to read wtas table revision 1 or revision 0*/ + wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, + ACPI_WTAS_WIFI_DATA_SIZE, + &tbl_rev); + if (IS_ERR(wifi_pkg)) { + ret = PTR_ERR(wifi_pkg); + goto out_free; + } + + if (tbl_rev == 1 && wifi_pkg->package.elements[1].type == + ACPI_TYPE_INTEGER) { + u32 tas_selection = + (u32)wifi_pkg->package.elements[1].integer.value; + u16 override_iec = + (tas_selection & ACPI_WTAS_OVERRIDE_IEC_MSK) >> ACPI_WTAS_OVERRIDE_IEC_POS; + u16 enabled_iec = (tas_selection & ACPI_WTAS_ENABLE_IEC_MSK) >> + ACPI_WTAS_ENABLE_IEC_POS; + u8 usa_tas_uhb = (tas_selection & ACPI_WTAS_USA_UHB_MSK) >> ACPI_WTAS_USA_UHB_POS; + + + enabled = tas_selection & ACPI_WTAS_ENABLED_MSK; + if (fw_ver <= 3) { + cmd->v3.override_tas_iec = cpu_to_le16(override_iec); + cmd->v3.enable_tas_iec = cpu_to_le16(enabled_iec); + } else { + cmd->v4.usa_tas_uhb_allowed = usa_tas_uhb; + cmd->v4.override_tas_iec = (u8)override_iec; + cmd->v4.enable_tas_iec = (u8)enabled_iec; + } + + } else if (tbl_rev == 0 && + wifi_pkg->package.elements[1].type == ACPI_TYPE_INTEGER) { + enabled = !!wifi_pkg->package.elements[1].integer.value; + } else { + ret = -EINVAL; + goto out_free; + } + + if (!enabled) { + IWL_DEBUG_RADIO(fwrt, "TAS not enabled\n"); + ret = 0; + goto out_free; + } + + IWL_DEBUG_RADIO(fwrt, "Reading TAS table revision %d\n", tbl_rev); + if (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER || + wifi_pkg->package.elements[2].integer.value > + APCI_WTAS_BLACK_LIST_MAX) { + IWL_DEBUG_RADIO(fwrt, "TAS invalid array size %llu\n", + wifi_pkg->package.elements[2].integer.value); + ret = -EINVAL; + goto out_free; + } + block_list_size = wifi_pkg->package.elements[2].integer.value; + cmd->v4.block_list_size = cpu_to_le32(block_list_size); + + IWL_DEBUG_RADIO(fwrt, "TAS array size %u\n", block_list_size); + if (block_list_size > APCI_WTAS_BLACK_LIST_MAX) { + IWL_DEBUG_RADIO(fwrt, "TAS invalid array size value %u\n", + block_list_size); + ret = -EINVAL; + goto out_free; + } + + for (i = 0; i < block_list_size; i++) { + u32 country; + + if (wifi_pkg->package.elements[3 + i].type != + ACPI_TYPE_INTEGER) { + IWL_DEBUG_RADIO(fwrt, + "TAS invalid array elem %d\n", 3 + i); + ret = -EINVAL; + goto out_free; + } + + country = wifi_pkg->package.elements[3 + i].integer.value; + cmd->v4.block_list_array[i] = cpu_to_le32(country); + IWL_DEBUG_RADIO(fwrt, "TAS block list country %d\n", country); + } + + ret = 1; +out_free: + kfree(data); + return ret; +} +IWL_EXPORT_SYMBOL(iwl_acpi_get_tas); + +int iwl_acpi_get_mcc(struct device *dev, char *mcc) +{ + union acpi_object *wifi_pkg, *data; + u32 mcc_val; + int ret, tbl_rev; + + data = iwl_acpi_get_object(dev, ACPI_WRDD_METHOD); + if (IS_ERR(data)) + return PTR_ERR(data); + + wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_WRDD_WIFI_DATA_SIZE, + &tbl_rev); + if (IS_ERR(wifi_pkg)) { + ret = PTR_ERR(wifi_pkg); + goto out_free; + } + + if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER || + tbl_rev != 0) { + ret = -EINVAL; + goto out_free; + } + + mcc_val = wifi_pkg->package.elements[1].integer.value; + + mcc[0] = (mcc_val >> 8) & 0xff; + mcc[1] = mcc_val & 0xff; + mcc[2] = '\0'; + + ret = 0; +out_free: + kfree(data); + return ret; +} +IWL_EXPORT_SYMBOL(iwl_acpi_get_mcc); + +u64 iwl_acpi_get_pwr_limit(struct device *dev) +{ + union acpi_object *data, *wifi_pkg; + u64 dflt_pwr_limit; + int tbl_rev; + + data = iwl_acpi_get_object(dev, ACPI_SPLC_METHOD); + if (IS_ERR(data)) { + dflt_pwr_limit = 0; + goto out; + } + + wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, + ACPI_SPLC_WIFI_DATA_SIZE, &tbl_rev); + if (IS_ERR(wifi_pkg) || tbl_rev != 0 || + wifi_pkg->package.elements[1].integer.value != ACPI_TYPE_INTEGER) { + dflt_pwr_limit = 0; + goto out_free; + } + + dflt_pwr_limit = wifi_pkg->package.elements[1].integer.value; +out_free: + kfree(data); +out: + return dflt_pwr_limit; +} +IWL_EXPORT_SYMBOL(iwl_acpi_get_pwr_limit); + +int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk) +{ + union acpi_object *wifi_pkg, *data; + int ret, tbl_rev; + + data = iwl_acpi_get_object(dev, ACPI_ECKV_METHOD); + if (IS_ERR(data)) + return PTR_ERR(data); + + wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_ECKV_WIFI_DATA_SIZE, + &tbl_rev); + if (IS_ERR(wifi_pkg)) { + ret = PTR_ERR(wifi_pkg); + goto out_free; + } + + if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER || + tbl_rev != 0) { + ret = -EINVAL; + goto out_free; + } + + *extl_clk = wifi_pkg->package.elements[1].integer.value; + + ret = 0; + +out_free: + kfree(data); + return ret; +} +IWL_EXPORT_SYMBOL(iwl_acpi_get_eckv); + +static int iwl_sar_set_profile(union acpi_object *table, + struct iwl_sar_profile *profile, + bool enabled, u8 num_chains, u8 num_sub_bands) +{ + int i, j, idx = 0; + + /* + * The table from ACPI is flat, but we store it in a + * structured array. + */ + for (i = 0; i < ACPI_SAR_NUM_CHAINS_REV2; i++) { + for (j = 0; j < ACPI_SAR_NUM_SUB_BANDS_REV2; j++) { + /* if we don't have the values, use the default */ + if (i >= num_chains || j >= num_sub_bands) { + profile->chains[i].subbands[j] = 0; + } else { + if (table[idx].type != ACPI_TYPE_INTEGER || + table[idx].integer.value > U8_MAX) + return -EINVAL; + + profile->chains[i].subbands[j] = + table[idx].integer.value; + + idx++; + } + } + } + + /* Only if all values were valid can the profile be enabled */ + profile->enabled = enabled; + + return 0; +} + +static int iwl_sar_fill_table(struct iwl_fw_runtime *fwrt, + __le16 *per_chain, u32 n_subbands, + int prof_a, int prof_b) +{ + int profs[ACPI_SAR_NUM_CHAINS_REV0] = { prof_a, prof_b }; + int i, j; + + for (i = 0; i < ACPI_SAR_NUM_CHAINS_REV0; i++) { + struct iwl_sar_profile *prof; + + /* don't allow SAR to be disabled (profile 0 means disable) */ + if (profs[i] == 0) + return -EPERM; + + /* we are off by one, so allow up to ACPI_SAR_PROFILE_NUM */ + if (profs[i] > ACPI_SAR_PROFILE_NUM) + return -EINVAL; + + /* profiles go from 1 to 4, so decrement to access the array */ + prof = &fwrt->sar_profiles[profs[i] - 1]; + + /* if the profile is disabled, do nothing */ + if (!prof->enabled) { + IWL_DEBUG_RADIO(fwrt, "SAR profile %d is disabled.\n", + profs[i]); + /* + * if one of the profiles is disabled, we + * ignore all of them and return 1 to + * differentiate disabled from other failures. + */ + return 1; + } + + IWL_DEBUG_INFO(fwrt, + "SAR EWRD: chain %d profile index %d\n", + i, profs[i]); + IWL_DEBUG_RADIO(fwrt, " Chain[%d]:\n", i); + for (j = 0; j < n_subbands; j++) { + per_chain[i * n_subbands + j] = + cpu_to_le16(prof->chains[i].subbands[j]); + IWL_DEBUG_RADIO(fwrt, " Band[%d] = %d * .125dBm\n", + j, prof->chains[i].subbands[j]); + } + } + + return 0; +} + +int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt, + __le16 *per_chain, u32 n_tables, u32 n_subbands, + int prof_a, int prof_b) +{ + int i, ret = 0; + + for (i = 0; i < n_tables; i++) { + ret = iwl_sar_fill_table(fwrt, + &per_chain[i * n_subbands * ACPI_SAR_NUM_CHAINS_REV0], + n_subbands, prof_a, prof_b); + if (ret) + break; + } + + return ret; +} +IWL_EXPORT_SYMBOL(iwl_sar_select_profile); + +int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt) +{ + union acpi_object *wifi_pkg, *table, *data; + bool enabled; + int ret, tbl_rev; + u8 num_chains, num_sub_bands; + + data = iwl_acpi_get_object(fwrt->dev, ACPI_WRDS_METHOD); + if (IS_ERR(data)) + return PTR_ERR(data); + + /* start by trying to read revision 2 */ + wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, + ACPI_WRDS_WIFI_DATA_SIZE_REV2, + &tbl_rev); + if (!IS_ERR(wifi_pkg)) { + if (tbl_rev != 2) { + ret = PTR_ERR(wifi_pkg); + goto out_free; + } + + num_chains = ACPI_SAR_NUM_CHAINS_REV2; + num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV2; + + goto read_table; + } + + /* then try revision 1 */ + wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, + ACPI_WRDS_WIFI_DATA_SIZE_REV1, + &tbl_rev); + if (!IS_ERR(wifi_pkg)) { + if (tbl_rev != 1) { + ret = PTR_ERR(wifi_pkg); + goto out_free; + } + + num_chains = ACPI_SAR_NUM_CHAINS_REV1; + num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV1; + + goto read_table; + } + + /* then finally revision 0 */ + wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, + ACPI_WRDS_WIFI_DATA_SIZE_REV0, + &tbl_rev); + if (!IS_ERR(wifi_pkg)) { + if (tbl_rev != 0) { + ret = PTR_ERR(wifi_pkg); + goto out_free; + } + + num_chains = ACPI_SAR_NUM_CHAINS_REV0; + num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV0; + + goto read_table; + } + + ret = PTR_ERR(wifi_pkg); + goto out_free; + +read_table: + if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) { + ret = -EINVAL; + goto out_free; + } + + IWL_DEBUG_RADIO(fwrt, "Reading WRDS tbl_rev=%d\n", tbl_rev); + + enabled = !!(wifi_pkg->package.elements[1].integer.value); + + /* position of the actual table */ + table = &wifi_pkg->package.elements[2]; + + /* The profile from WRDS is officially profile 1, but goes + * into sar_profiles[0] (because we don't have a profile 0). + */ + ret = iwl_sar_set_profile(table, &fwrt->sar_profiles[0], enabled, + num_chains, num_sub_bands); +out_free: + kfree(data); + return ret; +} +IWL_EXPORT_SYMBOL(iwl_sar_get_wrds_table); + +int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt) +{ + union acpi_object *wifi_pkg, *data; + bool enabled; + int i, n_profiles, tbl_rev, pos; + int ret = 0; + u8 num_chains, num_sub_bands; + + data = iwl_acpi_get_object(fwrt->dev, ACPI_EWRD_METHOD); + if (IS_ERR(data)) + return PTR_ERR(data); + + /* start by trying to read revision 2 */ + wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, + ACPI_EWRD_WIFI_DATA_SIZE_REV2, + &tbl_rev); + if (!IS_ERR(wifi_pkg)) { + if (tbl_rev != 2) { + ret = PTR_ERR(wifi_pkg); + goto out_free; + } + + num_chains = ACPI_SAR_NUM_CHAINS_REV2; + num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV2; + + goto read_table; + } + + /* then try revision 1 */ + wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, + ACPI_EWRD_WIFI_DATA_SIZE_REV1, + &tbl_rev); + if (!IS_ERR(wifi_pkg)) { + if (tbl_rev != 1) { + ret = PTR_ERR(wifi_pkg); + goto out_free; + } + + num_chains = ACPI_SAR_NUM_CHAINS_REV1; + num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV1; + + goto read_table; + } + + /* then finally revision 0 */ + wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data, + ACPI_EWRD_WIFI_DATA_SIZE_REV0, + &tbl_rev); + if (!IS_ERR(wifi_pkg)) { + if (tbl_rev != 0) { + ret = PTR_ERR(wifi_pkg); + goto out_free; + } + + num_chains = ACPI_SAR_NUM_CHAINS_REV0; + num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV0; + + goto read_table; + } + + ret = PTR_ERR(wifi_pkg); + goto out_free; + +read_table: + if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER || + wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER) { + ret = -EINVAL; + goto out_free; + } + + enabled = !!(wifi_pkg->package.elements[1].integer.value); + n_profiles = wifi_pkg->package.elements[2].integer.value; + + /* + * Check the validity of n_profiles. The EWRD profiles start + * from index 1, so the maximum value allowed here is + * ACPI_SAR_PROFILES_NUM - 1. + */ + if (n_profiles <= 0 || n_profiles >= ACPI_SAR_PROFILE_NUM) { + ret = -EINVAL; + goto out_free; + } + + /* the tables start at element 3 */ + pos = 3; + + for (i = 0; i < n_profiles; i++) { + /* The EWRD profiles officially go from 2 to 4, but we + * save them in sar_profiles[1-3] (because we don't + * have profile 0). So in the array we start from 1. + */ + ret = iwl_sar_set_profile(&wifi_pkg->package.elements[pos], + &fwrt->sar_profiles[i + 1], enabled, + num_chains, num_sub_bands); + if (ret < 0) + break; + + /* go to the next table */ + pos += num_chains * num_sub_bands; + } + +out_free: + kfree(data); + return ret; +} +IWL_EXPORT_SYMBOL(iwl_sar_get_ewrd_table); + +int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt) +{ + union acpi_object *wifi_pkg, *data; + int i, j, k, ret, tbl_rev; + u8 num_bands, num_profiles; + static const struct { + u8 revisions; + u8 bands; + u8 profiles; + u8 min_profiles; + } rev_data[] = { + { + .revisions = BIT(3), + .bands = ACPI_GEO_NUM_BANDS_REV2, + .profiles = ACPI_NUM_GEO_PROFILES_REV3, + .min_profiles = 3, + }, + { + .revisions = BIT(2), + .bands = ACPI_GEO_NUM_BANDS_REV2, + .profiles = ACPI_NUM_GEO_PROFILES, + }, + { + .revisions = BIT(0) | BIT(1), + .bands = ACPI_GEO_NUM_BANDS_REV0, + .profiles = ACPI_NUM_GEO_PROFILES, + }, + }; + int idx; + /* start from one to skip the domain */ + int entry_idx = 1; + + BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES_REV3 != IWL_NUM_GEO_PROFILES_V3); + BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES != IWL_NUM_GEO_PROFILES); + + data = iwl_acpi_get_object(fwrt->dev, ACPI_WGDS_METHOD); + if (IS_ERR(data)) + return PTR_ERR(data); + + /* read the highest revision we understand first */ + for (idx = 0; idx < ARRAY_SIZE(rev_data); idx++) { + /* min_profiles != 0 requires num_profiles header */ + u32 hdr_size = 1 + !!rev_data[idx].min_profiles; + u32 profile_size = ACPI_GEO_PER_CHAIN_SIZE * + rev_data[idx].bands; + u32 max_size = hdr_size + profile_size * rev_data[idx].profiles; + u32 min_size; + + if (!rev_data[idx].min_profiles) + min_size = max_size; + else + min_size = hdr_size + + profile_size * rev_data[idx].min_profiles; + + wifi_pkg = iwl_acpi_get_wifi_pkg_range(fwrt->dev, data, + min_size, max_size, + &tbl_rev); + if (!IS_ERR(wifi_pkg)) { + if (!(BIT(tbl_rev) & rev_data[idx].revisions)) + continue; + + num_bands = rev_data[idx].bands; + num_profiles = rev_data[idx].profiles; + + if (rev_data[idx].min_profiles) { + /* read header that says # of profiles */ + union acpi_object *entry; + + entry = &wifi_pkg->package.elements[entry_idx]; + entry_idx++; + if (entry->type != ACPI_TYPE_INTEGER || + entry->integer.value > num_profiles) { + ret = -EINVAL; + goto out_free; + } + num_profiles = entry->integer.value; + + /* + * this also validates >= min_profiles since we + * otherwise wouldn't have gotten the data when + * looking up in ACPI + */ + if (wifi_pkg->package.count != + hdr_size + profile_size * num_profiles) { + ret = -EINVAL; + goto out_free; + } + } + goto read_table; + } + } + + if (idx < ARRAY_SIZE(rev_data)) + ret = PTR_ERR(wifi_pkg); + else + ret = -ENOENT; + goto out_free; + +read_table: + fwrt->geo_rev = tbl_rev; + for (i = 0; i < num_profiles; i++) { + for (j = 0; j < ACPI_GEO_NUM_BANDS_REV2; j++) { + union acpi_object *entry; + + /* + * num_bands is either 2 or 3, if it's only 2 then + * fill the third band (6 GHz) with the values from + * 5 GHz (second band) + */ + if (j >= num_bands) { + fwrt->geo_profiles[i].bands[j].max = + fwrt->geo_profiles[i].bands[1].max; + } else { + entry = &wifi_pkg->package.elements[entry_idx]; + entry_idx++; + if (entry->type != ACPI_TYPE_INTEGER || + entry->integer.value > U8_MAX) { + ret = -EINVAL; + goto out_free; + } + + fwrt->geo_profiles[i].bands[j].max = + entry->integer.value; + } + + for (k = 0; k < ACPI_GEO_NUM_CHAINS; k++) { + /* same here as above */ + if (j >= num_bands) { + fwrt->geo_profiles[i].bands[j].chains[k] = + fwrt->geo_profiles[i].bands[1].chains[k]; + } else { + entry = &wifi_pkg->package.elements[entry_idx]; + entry_idx++; + if (entry->type != ACPI_TYPE_INTEGER || + entry->integer.value > U8_MAX) { + ret = -EINVAL; + goto out_free; + } + + fwrt->geo_profiles[i].bands[j].chains[k] = + entry->integer.value; + } + } + } + } + + fwrt->geo_num_profiles = num_profiles; + fwrt->geo_enabled = true; + ret = 0; +out_free: + kfree(data); + return ret; +} +IWL_EXPORT_SYMBOL(iwl_sar_get_wgds_table); + +bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt) +{ + /* + * The PER_CHAIN_LIMIT_OFFSET_CMD command is not supported on + * earlier firmware versions. Unfortunately, we don't have a + * TLV API flag to rely on, so rely on the major version which + * is in the first byte of ucode_ver. This was implemented + * initially on version 38 and then backported to 17. It was + * also backported to 29, but only for 7265D devices. The + * intention was to have it in 36 as well, but not all 8000 + * family got this feature enabled. The 8000 family is the + * only one using version 36, so skip this version entirely. + */ + return IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) >= 38 || + IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 || + (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 && + ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) == + CSR_HW_REV_TYPE_7265D)); +} +IWL_EXPORT_SYMBOL(iwl_sar_geo_support); + +int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt, + struct iwl_per_chain_offset *table, + u32 n_bands, u32 n_profiles) +{ + int i, j; + + if (!iwl_sar_geo_support(fwrt)) + return -EOPNOTSUPP; + + for (i = 0; i < n_profiles; i++) { + for (j = 0; j < n_bands; j++) { + struct iwl_per_chain_offset *chain = + &table[i * n_bands + j]; + + chain->max_tx_power = + cpu_to_le16(fwrt->geo_profiles[i].bands[j].max); + chain->chain_a = fwrt->geo_profiles[i].bands[j].chains[0]; + chain->chain_b = fwrt->geo_profiles[i].bands[j].chains[1]; + IWL_DEBUG_RADIO(fwrt, + "SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n", + i, j, + fwrt->geo_profiles[i].bands[j].chains[0], + fwrt->geo_profiles[i].bands[j].chains[1], + fwrt->geo_profiles[i].bands[j].max); + } + } + + return 0; +} +IWL_EXPORT_SYMBOL(iwl_sar_geo_init); + +__le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt) +{ + int ret; + u8 value; + __le32 config_bitmap = 0; + + /* + ** Evaluate func 'DSM_FUNC_ENABLE_INDONESIA_5G2' + */ + ret = iwl_acpi_get_dsm_u8(fwrt->dev, 0, + DSM_FUNC_ENABLE_INDONESIA_5G2, + &iwl_guid, &value); + + if (!ret && value == DSM_VALUE_INDONESIA_ENABLE) + config_bitmap |= + cpu_to_le32(LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK); + + /* + ** Evaluate func 'DSM_FUNC_DISABLE_SRD' + */ + ret = iwl_acpi_get_dsm_u8(fwrt->dev, 0, + DSM_FUNC_DISABLE_SRD, + &iwl_guid, &value); + if (!ret) { + if (value == DSM_VALUE_SRD_PASSIVE) + config_bitmap |= + cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK); + else if (value == DSM_VALUE_SRD_DISABLE) + config_bitmap |= + cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK); + } + + return config_bitmap; +} +IWL_EXPORT_SYMBOL(iwl_acpi_get_lari_config_bitmap); diff --git a/fw/acpi.h b/fw/acpi.h index 4aaa8a6b071b..466c95c21aa9 100644 --- a/fw/acpi.h +++ b/fw/acpi.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2021 Intel Corporation + * Copyright (C) 2018-2022 Intel Corporation */ #ifndef __iwl_fw_acpi__ #define __iwl_fw_acpi__ @@ -65,10 +65,21 @@ #define ACPI_ECKV_WIFI_DATA_SIZE 2 /* - * 1 type, 1 enabled, 1 block list size, 16 block list array + * TAS size: 1 elelment for type, + * 1 element for enabled field, + * 1 element for block list size, + * 16 elements for block list array */ #define APCI_WTAS_BLACK_LIST_MAX 16 #define ACPI_WTAS_WIFI_DATA_SIZE (3 + APCI_WTAS_BLACK_LIST_MAX) +#define ACPI_WTAS_ENABLED_MSK 0x1 +#define ACPI_WTAS_OVERRIDE_IEC_MSK 0x2 +#define ACPI_WTAS_ENABLE_IEC_MSK 0x4 +#define ACPI_WTAS_OVERRIDE_IEC_POS 0x1 +#define ACPI_WTAS_ENABLE_IEC_POS 0x2 +#define ACPI_WTAS_USA_UHB_MSK BIT(16) +#define ACPI_WTAS_USA_UHB_POS 16 + #define ACPI_PPAG_WIFI_DATA_SIZE_V1 ((IWL_NUM_CHAIN_LIMITS * \ IWL_NUM_SUB_BANDS_V1) + 2) @@ -105,6 +116,11 @@ struct iwl_geo_profile { struct iwl_geo_profile_band bands[ACPI_GEO_NUM_BANDS_REV2]; }; +/* Same thing as with SAR, all revisions fit in revision 2 */ +struct iwl_ppag_chain { + s8 subbands[ACPI_SAR_NUM_SUB_BANDS_REV2]; +}; + enum iwl_dsm_funcs_rev_0 { DSM_FUNC_QUERY = 0, DSM_FUNC_DISABLE_SRD = 1, @@ -112,7 +128,8 @@ enum iwl_dsm_funcs_rev_0 { DSM_FUNC_ENABLE_6E = 3, DSM_FUNC_11AX_ENABLEMENT = 6, DSM_FUNC_ENABLE_UNII4_CHAN = 7, - DSM_FUNC_ACTIVATE_CHANNEL = 8 + DSM_FUNC_ACTIVATE_CHANNEL = 8, + DSM_FUNC_FORCE_DISABLE_CHANNELS = 9 }; enum iwl_dsm_values_srd { @@ -198,8 +215,8 @@ int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt, struct iwl_per_chain_offset *table, u32 n_bands, u32 n_profiles); -int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, __le32 *block_list_array, - int *block_list_size); +int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, + union iwl_tas_config_cmd *cmd, int fw_ver); __le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt); @@ -280,8 +297,7 @@ static inline bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt) } static inline int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, - __le32 *block_list_array, - int *block_list_size) + union iwl_tas_config_cmd *cmd, int fw_ver) { return -ENOENT; } diff --git a/fw/api/alive.h b/fw/api/alive.h index c840a97e6a62..e00ab21e7358 100644 --- a/fw/api/alive.h +++ b/fw/api/alive.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018, 2020 Intel Corporation + * Copyright (C) 2012-2014, 2018, 2020-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -97,6 +97,21 @@ struct iwl_alive_ntf_v5 { struct iwl_sku_id sku_id; } __packed; /* UCODE_ALIVE_NTFY_API_S_VER_5 */ +struct iwl_imr_alive_info { + __le64 base_addr; + __le32 size; + __le32 enabled; +} __packed; /* IMR_ALIVE_INFO_API_S_VER_1 */ + +struct iwl_alive_ntf_v6 { + __le16 status; + __le16 flags; + struct iwl_lmac_alive lmac_data[2]; + struct iwl_umac_alive umac_data; + struct iwl_sku_id sku_id; + struct iwl_imr_alive_info imr; +} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_6 */ + /** * enum iwl_extended_cfg_flag - commands driver may send before * finishing init flow @@ -143,15 +158,6 @@ enum iwl_card_state_flags { }; /** - * struct iwl_radio_version_notif - information on the card state - * ( CARD_STATE_NOTIFICATION = 0xa1 ) - * @flags: &enum iwl_card_state_flags - */ -struct iwl_card_state_notif { - __le32 flags; -} __packed; /* CARD_STATE_NTFY_API_S_VER_1 */ - -/** * enum iwl_error_recovery_flags - flags for error recovery cmd * @ERROR_RECOVERY_UPDATE_DB: update db from blob sent * @ERROR_RECOVERY_END_OF_RECOVERY: end of recovery diff --git a/fw/api/commands.h b/fw/api/commands.h index ee6b5844a871..a91bd66ecb30 100644 --- a/fw/api/commands.h +++ b/fw/api/commands.h @@ -51,7 +51,7 @@ enum iwl_legacy_cmds { * @UCODE_ALIVE_NTFY: * Alive data from the firmware, as described in * &struct iwl_alive_ntf_v3 or &struct iwl_alive_ntf_v4 or - * &struct iwl_alive_ntf_v5. + * &struct iwl_alive_ntf_v5 or &struct iwl_alive_ntf_v6. */ UCODE_ALIVE_NTFY = 0x1, @@ -72,7 +72,8 @@ enum iwl_legacy_cmds { /** * @PHY_CONTEXT_CMD: - * Add/modify/remove a PHY context, using &struct iwl_phy_context_cmd. + * Add/modify/remove a PHY context, using &struct iwl_phy_context_cmd + * or &struct iwl_phy_context_cmd_v1. */ PHY_CONTEXT_CMD = 0x8, @@ -90,7 +91,8 @@ enum iwl_legacy_cmds { /** * @SCAN_CFG_CMD: - * uses &struct iwl_scan_config_v1 or &struct iwl_scan_config + * uses &struct iwl_scan_config_v1, &struct iwl_scan_config_v2 + * or &struct iwl_scan_config */ SCAN_CFG_CMD = 0xc, @@ -321,14 +323,6 @@ enum iwl_legacy_cmds { REPLY_THERMAL_MNG_BACKOFF = 0x7e, /** - * @DC2DC_CONFIG_CMD: - * Set/Get DC2DC frequency tune - * Command is &struct iwl_dc2dc_config_cmd, - * response is &struct iwl_dc2dc_config_resp - */ - DC2DC_CONFIG_CMD = 0x83, - - /** * @NVM_ACCESS_CMD: using &struct iwl_nvm_access_cmd */ NVM_ACCESS_CMD = 0x88, @@ -356,7 +350,7 @@ enum iwl_legacy_cmds { * &struct iwl_notif_statistics_v11, * &struct iwl_notif_statistics_v10, * &struct iwl_notif_statistics, - * &struct iwl_statistics_operational_ntfy + * &struct iwl_statistics_operational_ntfy_ver_14 */ STATISTICS_CMD = 0x9c, @@ -365,6 +359,7 @@ enum iwl_legacy_cmds { * one of &struct iwl_notif_statistics_v10, * &struct iwl_notif_statistics_v11, * &struct iwl_notif_statistic, + * &struct iwl_statistics_operational_ntfy_ver_14 * &struct iwl_statistics_operational_ntfy */ STATISTICS_NOTIFICATION = 0x9d, @@ -383,13 +378,6 @@ enum iwl_legacy_cmds { REDUCE_TX_POWER_CMD = 0x9f, /** - * @CARD_STATE_NOTIFICATION: - * Card state (RF/CT kill) notification, - * uses &struct iwl_card_state_notif - */ - CARD_STATE_NOTIFICATION = 0xa1, - - /** * @MISSED_BEACONS_NOTIFICATION: &struct iwl_missed_beacons_notif */ MISSED_BEACONS_NOTIFICATION = 0xa2, @@ -612,6 +600,16 @@ enum iwl_system_subcmd_ids { * @RFI_GET_FREQ_TABLE_CMD: &struct iwl_rfi_config_cmd */ RFI_GET_FREQ_TABLE_CMD = 0xc, + + /** + * @SYSTEM_FEATURES_CONTROL_CMD: &struct iwl_system_features_control_cmd + */ + SYSTEM_FEATURES_CONTROL_CMD = 0xd, + + /** + * @RFI_DEACTIVATE_NOTIF: &struct iwl_rfi_deactivate_notif + */ + RFI_DEACTIVATE_NOTIF = 0xff, }; #endif /* __iwl_fw_api_commands_h__ */ diff --git a/fw/api/config.h b/fw/api/config.h index 1ab92f62c414..087354b3c308 100644 --- a/fw/api/config.h +++ b/fw/api/config.h @@ -114,37 +114,4 @@ enum iwl_dc2dc_config_id { DCDC_FREQ_TUNE_SET = 0x2, }; /* MARKER_ID_API_E_VER_1 */ -/** - * struct iwl_dc2dc_config_cmd - configure dc2dc values - * - * (DC2DC_CONFIG_CMD = 0x83) - * - * Set/Get & configure dc2dc values. - * The command always returns the current dc2dc values. - * - * @flags: set/get dc2dc - * @enable_low_power_mode: not used. - * @dc2dc_freq_tune0: frequency divider - digital domain - * @dc2dc_freq_tune1: frequency divider - analog domain - */ -struct iwl_dc2dc_config_cmd { - __le32 flags; - __le32 enable_low_power_mode; /* not used */ - __le32 dc2dc_freq_tune0; - __le32 dc2dc_freq_tune1; -} __packed; /* DC2DC_CONFIG_CMD_API_S_VER_1 */ - -/** - * struct iwl_dc2dc_config_resp - response for iwl_dc2dc_config_cmd - * - * Current dc2dc values returned by the FW. - * - * @dc2dc_freq_tune0: frequency divider - digital domain - * @dc2dc_freq_tune1: frequency divider - analog domain - */ -struct iwl_dc2dc_config_resp { - __le32 dc2dc_freq_tune0; - __le32 dc2dc_freq_tune1; -} __packed; /* DC2DC_CONFIG_RESP_API_S_VER_1 */ - #endif /* __iwl_fw_api_config_h__ */ diff --git a/fw/api/d3.h b/fw/api/d3.h index 1503119ea910..4cd9ab23954e 100644 --- a/fw/api/d3.h +++ b/fw/api/d3.h @@ -554,7 +554,7 @@ struct iwl_wowlan_gtk_status_v1 { } __packed; /* WOWLAN_GTK_MATERIAL_VER_1 */ /** - * struct iwl_wowlan_gtk_status - GTK status + * struct iwl_wowlan_gtk_status_v2 - GTK status * @key: GTK material * @key_len: GTK legth, if set to 0, the key is not available * @key_flags: information about the key: @@ -565,7 +565,7 @@ struct iwl_wowlan_gtk_status_v1 { * @tkip_mic_key: TKIP RX MIC key * @rsc: TSC RSC counters */ -struct iwl_wowlan_gtk_status { +struct iwl_wowlan_gtk_status_v2 { u8 key[WOWLAN_KEY_MAX_SIZE]; u8 key_len; u8 key_flags; @@ -574,6 +574,41 @@ struct iwl_wowlan_gtk_status { struct iwl_wowlan_rsc_tsc_params_cmd_ver_2 rsc; } __packed; /* WOWLAN_GTK_MATERIAL_VER_2 */ +/** + * struct iwl_wowlan_all_rsc_tsc_v5 - key counters + * @ucast_rsc: unicast RSC values + * @mcast_rsc: multicast RSC values (per key map value) + * @sta_id: station ID + * @mcast_key_id_map: map of key id to @mcast_rsc entry + */ +struct iwl_wowlan_all_rsc_tsc_v5 { + __le64 ucast_rsc[IWL_MAX_TID_COUNT]; + __le64 mcast_rsc[2][IWL_MAX_TID_COUNT]; + __le32 sta_id; + u8 mcast_key_id_map[4]; +} __packed; /* ALL_TSC_RSC_API_S_VER_5 */ + +/** + * struct iwl_wowlan_gtk_status_v3 - GTK status + * @key: GTK material + * @key_len: GTK length, if set to 0, the key is not available + * @key_flags: information about the key: + * bits[0:1]: key index assigned by the AP + * bits[2:6]: GTK index of the key in the internal DB + * bit[7]: Set iff this is the currently used GTK + * @reserved: padding + * @tkip_mic_key: TKIP RX MIC key + * @sc: RSC/TSC counters + */ +struct iwl_wowlan_gtk_status_v3 { + u8 key[WOWLAN_KEY_MAX_SIZE]; + u8 key_len; + u8 key_flags; + u8 reserved[2]; + u8 tkip_mic_key[IWL_MIC_KEY_SIZE]; + struct iwl_wowlan_all_rsc_tsc_v5 sc; +} __packed; /* WOWLAN_GTK_MATERIAL_VER_3 */ + #define IWL_WOWLAN_GTK_IDX_MASK (BIT(0) | BIT(1)) /** @@ -640,7 +675,7 @@ struct iwl_wowlan_status_v6 { * @wake_packet: wakeup packet */ struct iwl_wowlan_status_v7 { - struct iwl_wowlan_gtk_status gtk[WOWLAN_GTK_KEYS_NUM]; + struct iwl_wowlan_gtk_status_v2 gtk[WOWLAN_GTK_KEYS_NUM]; struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM]; __le64 replay_ctr; __le16 pattern_number; @@ -676,7 +711,7 @@ struct iwl_wowlan_status_v7 { * @wake_packet: wakeup packet */ struct iwl_wowlan_status_v9 { - struct iwl_wowlan_gtk_status gtk[WOWLAN_GTK_KEYS_NUM]; + struct iwl_wowlan_gtk_status_v2 gtk[WOWLAN_GTK_KEYS_NUM]; struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM]; __le64 replay_ctr; __le16 pattern_number; @@ -693,6 +728,44 @@ struct iwl_wowlan_status_v9 { u8 wake_packet[]; /* can be truncated from _length to _bufsize */ } __packed; /* WOWLAN_STATUSES_RSP_API_S_VER_9 */ +/** + * struct iwl_wowlan_status_v12 - WoWLAN status + * @gtk: GTK data + * @igtk: IGTK data + * @replay_ctr: GTK rekey replay counter + * @pattern_number: number of the matched pattern + * @non_qos_seq_ctr: non-QoS sequence counter to use next. + * Reserved if the struct has version >= 10. + * @qos_seq_ctr: QoS sequence counters to use next + * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason + * @num_of_gtk_rekeys: number of GTK rekeys + * @transmitted_ndps: number of transmitted neighbor discovery packets + * @received_beacons: number of received beacons + * @wake_packet_length: wakeup packet length + * @wake_packet_bufsize: wakeup packet buffer size + * @tid_tear_down: bit mask of tids whose BA sessions were closed + * in suspend state + * @reserved: unused + * @wake_packet: wakeup packet + */ +struct iwl_wowlan_status_v12 { + struct iwl_wowlan_gtk_status_v3 gtk[WOWLAN_GTK_KEYS_NUM]; + struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM]; + __le64 replay_ctr; + __le16 pattern_number; + __le16 non_qos_seq_ctr; + __le16 qos_seq_ctr[8]; + __le32 wakeup_reasons; + __le32 num_of_gtk_rekeys; + __le32 transmitted_ndps; + __le32 received_beacons; + __le32 wake_packet_length; + __le32 wake_packet_bufsize; + u8 tid_tear_down; + u8 reserved[3]; + u8 wake_packet[]; /* can be truncated from _length to _bufsize */ +} __packed; /* WOWLAN_STATUSES_RSP_API_S_VER_12 */ + /* TODO: NetDetect API */ #endif /* __iwl_fw_api_d3_h__ */ diff --git a/fw/api/datapath.h b/fw/api/datapath.h index 985b0dc5b52a..43619acc29fd 100644 --- a/fw/api/datapath.h +++ b/fw/api/datapath.h @@ -32,12 +32,17 @@ enum iwl_data_path_subcmd_ids { STA_HE_CTXT_CMD = 0x7, /** + * @RLC_CONFIG_CMD: &struct iwl_rlc_config_cmd + */ + RLC_CONFIG_CMD = 0x8, + + /** * @RFH_QUEUE_CONFIG_CMD: &struct iwl_rfh_queue_config */ RFH_QUEUE_CONFIG_CMD = 0xD, /** - * @TLC_MNG_CONFIG_CMD: &struct iwl_tlc_config_cmd + * @TLC_MNG_CONFIG_CMD: &struct iwl_tlc_config_cmd_v4 */ TLC_MNG_CONFIG_CMD = 0xF, @@ -53,6 +58,20 @@ enum iwl_data_path_subcmd_ids { CHEST_COLLECTOR_FILTER_CONFIG_CMD = 0x14, /** + * @RX_BAID_ALLOCATION_CONFIG_CMD: Allocate/deallocate a BAID for an RX + * blockack session, uses &struct iwl_rx_baid_cfg_cmd for the + * command, and &struct iwl_rx_baid_cfg_resp as a response. + */ + RX_BAID_ALLOCATION_CONFIG_CMD = 0x16, + + /** + * @SCD_QUEUE_CONFIG_CMD: new scheduler queue allocation/config/removal + * command, uses &struct iwl_scd_queue_cfg_cmd and the response + * is (same as before) &struct iwl_tx_queue_cfg_rsp. + */ + SCD_QUEUE_CONFIG_CMD = 0x17, + + /** * @MONITOR_NOTIF: Datapath monitoring notification, using * &struct iwl_datapath_monitor_notif */ @@ -195,4 +214,193 @@ struct iwl_thermal_dual_chain_request { __le32 event; } __packed; /* THERMAL_DUAL_CHAIN_DISABLE_REQ_NTFY_API_S_VER_1 */ +enum iwl_rlc_chain_info { + IWL_RLC_CHAIN_INFO_DRIVER_FORCE = BIT(0), + IWL_RLC_CHAIN_INFO_VALID = 0x000e, + IWL_RLC_CHAIN_INFO_FORCE = 0x0070, + IWL_RLC_CHAIN_INFO_FORCE_MIMO = 0x0380, + IWL_RLC_CHAIN_INFO_COUNT = 0x0c00, + IWL_RLC_CHAIN_INFO_MIMO_COUNT = 0x3000, +}; + +/** + * struct iwl_rlc_properties - RLC properties + * @rx_chain_info: RX chain info, &enum iwl_rlc_chain_info + * @reserved: reserved + */ +struct iwl_rlc_properties { + __le32 rx_chain_info; + __le32 reserved; +} __packed; /* RLC_PROPERTIES_S_VER_1 */ + +enum iwl_sad_mode { + IWL_SAD_MODE_ENABLED = BIT(0), + IWL_SAD_MODE_DEFAULT_ANT_MSK = 0x6, + IWL_SAD_MODE_DEFAULT_ANT_FW = 0x0, + IWL_SAD_MODE_DEFAULT_ANT_A = 0x2, + IWL_SAD_MODE_DEFAULT_ANT_B = 0x4, +}; + +/** + * struct iwl_sad_properties - SAD properties + * @chain_a_sad_mode: chain A SAD mode, &enum iwl_sad_mode + * @chain_b_sad_mode: chain B SAD mode, &enum iwl_sad_mode + * @mac_id: MAC index + * @reserved: reserved + */ +struct iwl_sad_properties { + __le32 chain_a_sad_mode; + __le32 chain_b_sad_mode; + __le32 mac_id; + __le32 reserved; +} __packed; + +/** + * struct iwl_rlc_config_cmd - RLC configuration + * @phy_id: PHY index + * @rlc: RLC properties, &struct iwl_rlc_properties + * @sad: SAD (single antenna diversity) options, &struct iwl_sad_properties + * @flags: flags, &enum iwl_rlc_flags + * @reserved: reserved + */ +struct iwl_rlc_config_cmd { + __le32 phy_id; + struct iwl_rlc_properties rlc; + struct iwl_sad_properties sad; + u8 flags; + u8 reserved[3]; +} __packed; /* RLC_CONFIG_CMD_API_S_VER_2 */ + +#define IWL_MAX_BAID_OLD 16 /* MAX_IMMEDIATE_BA_API_D_VER_2 */ +#define IWL_MAX_BAID 32 /* MAX_IMMEDIATE_BA_API_D_VER_3 */ + +/** + * enum iwl_rx_baid_action - BAID allocation/config action + * @IWL_RX_BAID_ACTION_ADD: add a new BAID session + * @IWL_RX_BAID_ACTION_MODIFY: modify the BAID session + * @IWL_RX_BAID_ACTION_REMOVE: remove the BAID session + */ +enum iwl_rx_baid_action { + IWL_RX_BAID_ACTION_ADD, + IWL_RX_BAID_ACTION_MODIFY, + IWL_RX_BAID_ACTION_REMOVE, +}; /* RX_BAID_ALLOCATION_ACTION_E_VER_1 */ + +/** + * struct iwl_rx_baid_cfg_cmd_alloc - BAID allocation data + * @sta_id_mask: station ID mask + * @tid: the TID for this session + * @reserved: reserved + * @ssn: the starting sequence number + * @win_size: RX BA session window size + */ +struct iwl_rx_baid_cfg_cmd_alloc { + __le32 sta_id_mask; + u8 tid; + u8 reserved[3]; + __le16 ssn; + __le16 win_size; +} __packed; /* RX_BAID_ALLOCATION_ADD_CMD_API_S_VER_1 */ + +/** + * struct iwl_rx_baid_cfg_cmd_modify - BAID modification data + * @old_sta_id_mask: old station ID mask + * @new_sta_id_mask: new station ID mask + * @tid: TID of the BAID + */ +struct iwl_rx_baid_cfg_cmd_modify { + __le32 old_sta_id_mask; + __le32 new_sta_id_mask; + __le32 tid; +} __packed; /* RX_BAID_ALLOCATION_MODIFY_CMD_API_S_VER_2 */ + +/** + * struct iwl_rx_baid_cfg_cmd_remove_v1 - BAID removal data + * @baid: the BAID to remove + */ +struct iwl_rx_baid_cfg_cmd_remove_v1 { + __le32 baid; +} __packed; /* RX_BAID_ALLOCATION_REMOVE_CMD_API_S_VER_1 */ + +/** + * struct iwl_rx_baid_cfg_cmd_remove - BAID removal data + * @sta_id_mask: the station mask of the BAID to remove + * @tid: the TID of the BAID to remove + */ +struct iwl_rx_baid_cfg_cmd_remove { + __le32 sta_id_mask; + __le32 tid; +} __packed; /* RX_BAID_ALLOCATION_REMOVE_CMD_API_S_VER_2 */ + +/** + * struct iwl_rx_baid_cfg_cmd - BAID allocation/config command + * @action: the action, from &enum iwl_rx_baid_action + */ +struct iwl_rx_baid_cfg_cmd { + __le32 action; + union { + struct iwl_rx_baid_cfg_cmd_alloc alloc; + struct iwl_rx_baid_cfg_cmd_modify modify; + struct iwl_rx_baid_cfg_cmd_remove_v1 remove_v1; + struct iwl_rx_baid_cfg_cmd_remove remove; + }; /* RX_BAID_ALLOCATION_OPERATION_API_U_VER_2 */ +} __packed; /* RX_BAID_ALLOCATION_CONFIG_CMD_API_S_VER_2 */ + +/** + * struct iwl_rx_baid_cfg_resp - BAID allocation response + * @baid: the allocated BAID + */ +struct iwl_rx_baid_cfg_resp { + __le32 baid; +}; /* RX_BAID_ALLOCATION_RESPONSE_API_S_VER_1 */ + +/** + * enum iwl_scd_queue_cfg_operation - scheduler queue operation + * @IWL_SCD_QUEUE_ADD: allocate a new queue + * @IWL_SCD_QUEUE_REMOVE: remove a queue + * @IWL_SCD_QUEUE_MODIFY: modify a queue + */ +enum iwl_scd_queue_cfg_operation { + IWL_SCD_QUEUE_ADD = 0, + IWL_SCD_QUEUE_REMOVE = 1, + IWL_SCD_QUEUE_MODIFY = 2, +}; + +/** + * struct iwl_scd_queue_cfg_cmd - scheduler queue allocation command + * @operation: the operation, see &enum iwl_scd_queue_cfg_operation + * @u.add.sta_mask: station mask + * @u.add.tid: TID + * @u.add.reserved: reserved + * @u.add.flags: flags from &enum iwl_tx_queue_cfg_actions, except + * %TX_QUEUE_CFG_ENABLE_QUEUE is not valid + * @u.add.cb_size: size code + * @u.add.bc_dram_addr: byte-count table IOVA + * @u.add.tfdq_dram_addr: TFD queue IOVA + * @u.remove.queue: queue ID for removal + * @u.modify.sta_mask: new station mask for modify + * @u.modify.queue: queue ID to modify + */ +struct iwl_scd_queue_cfg_cmd { + __le32 operation; + union { + struct { + __le32 sta_mask; + u8 tid; + u8 reserved[3]; + __le32 flags; + __le32 cb_size; + __le64 bc_dram_addr; + __le64 tfdq_dram_addr; + } __packed add; /* TX_QUEUE_CFG_CMD_ADD_API_S_VER_1 */ + struct { + __le32 queue; + } __packed remove; /* TX_QUEUE_CFG_CMD_REMOVE_API_S_VER_1 */ + struct { + __le32 sta_mask; + __le32 queue; + } __packed modify; /* TX_QUEUE_CFG_CMD_MODIFY_API_S_VER_1 */ + } __packed u; /* TX_QUEUE_CFG_CMD_OPERATION_API_U_VER_1 */ +} __packed; /* TX_QUEUE_CFG_CMD_API_S_VER_3 */ + #endif /* __iwl_fw_api_datapath_h__ */ diff --git a/fw/api/dbg-tlv.h b/fw/api/dbg-tlv.h index 3988f5fea33a..52bf96585fc6 100644 --- a/fw/api/dbg-tlv.h +++ b/fw/api/dbg-tlv.h @@ -1,18 +1,18 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2018-2021 Intel Corporation + * Copyright (C) 2018-2022 Intel Corporation */ #ifndef __iwl_fw_dbg_tlv_h__ #define __iwl_fw_dbg_tlv_h__ #include <linux/bitops.h> -#define IWL_FW_INI_HW_SMEM_REGION_ID 15 #define IWL_FW_INI_MAX_REGION_ID 64 #define IWL_FW_INI_MAX_NAME 32 #define IWL_FW_INI_MAX_CFG_NAME 64 #define IWL_FW_INI_DOMAIN_ALWAYS_ON 0 -#define IWL_FW_INI_REGION_V2_MASK 0x0000FFFF +#define IWL_FW_INI_REGION_ID_MASK GENMASK(15, 0) +#define IWL_FW_INI_REGION_DUMP_POLICY_MASK GENMASK(31, 16) /** * struct iwl_fw_ini_hcmd @@ -124,6 +124,9 @@ struct iwl_fw_ini_region_internal_buffer { * @hdr: debug header * @id: region id. Max id is &IWL_FW_INI_MAX_REGION_ID * @type: region type. One of &enum iwl_fw_ini_region_type + * @sub_type: region sub type + * @sub_type_ver: region sub type version + * @reserved: not in use * @name: region name * @dev_addr: device address configuration. Used by * &IWL_FW_INI_REGION_DEVICE_MEMORY, &IWL_FW_INI_REGION_PERIPHERY_MAC, @@ -146,7 +149,10 @@ struct iwl_fw_ini_region_internal_buffer { struct iwl_fw_ini_region_tlv { struct iwl_fw_ini_header hdr; __le32 id; - __le32 type; + u8 type; + u8 sub_type; + u8 sub_type_ver; + u8 reserved; u8 name[IWL_FW_INI_MAX_NAME]; union { struct iwl_fw_ini_region_dev_addr dev_addr; @@ -244,11 +250,10 @@ struct iwl_fw_ini_hcmd_tlv { } __packed; /* FW_TLV_DEBUG_HCMD_API_S_VER_1 */ /** -* struct iwl_fw_ini_conf_tlv - preset configuration TLV +* struct iwl_fw_ini_addr_val - Address and value to set it to * * @address: the base address * @value: value to set at address - */ struct iwl_fw_ini_addr_val { __le32 address; @@ -306,6 +311,7 @@ enum iwl_fw_ini_config_set_type { * @IWL_FW_INI_ALLOCATION_ID_DBGC1: allocation meant for DBGC1 configuration * @IWL_FW_INI_ALLOCATION_ID_DBGC2: allocation meant for DBGC2 configuration * @IWL_FW_INI_ALLOCATION_ID_DBGC3: allocation meant for DBGC3 configuration + * @IWL_FW_INI_ALLOCATION_ID_DBGC4: allocation meant for DBGC4 configuration * @IWL_FW_INI_ALLOCATION_NUM: number of allocation ids */ enum iwl_fw_ini_allocation_id { @@ -313,6 +319,7 @@ enum iwl_fw_ini_allocation_id { IWL_FW_INI_ALLOCATION_ID_DBGC1, IWL_FW_INI_ALLOCATION_ID_DBGC2, IWL_FW_INI_ALLOCATION_ID_DBGC3, + IWL_FW_INI_ALLOCATION_ID_DBGC4, IWL_FW_INI_ALLOCATION_NUM, }; /* FW_DEBUG_TLV_ALLOCATION_ID_E_VER_1 */ @@ -379,6 +386,17 @@ enum iwl_fw_ini_region_type { IWL_FW_INI_REGION_NUM }; /* FW_TLV_DEBUG_REGION_TYPE_API_E */ +enum iwl_fw_ini_region_device_memory_subtype { + IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_HW_SMEM = 1, + IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_UMAC_ERROR_TABLE = 5, + IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_LMAC_1_ERROR_TABLE = 7, + IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_LMAC_2_ERROR_TABLE = 10, + IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_TCM_1_ERROR_TABLE = 14, + IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_TCM_2_ERROR_TABLE = 16, + IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_RCM_1_ERROR_TABLE = 18, + IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_RCM_2_ERROR_TABLE = 20, +}; /* FW_TLV_DEBUG_REGION_DEVICE_MEMORY_SUBTYPE_API_E */ + /** * enum iwl_fw_ini_time_point * @@ -457,6 +475,7 @@ enum iwl_fw_ini_time_point { * @IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG: override trigger configuration * @IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA: override trigger data. * Append otherwise + * @IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD: send cmd once dump collected */ enum iwl_fw_ini_trigger_apply_policy { IWL_FW_INI_APPLY_POLICY_MATCH_TIME_POINT = BIT(0), @@ -464,5 +483,46 @@ enum iwl_fw_ini_trigger_apply_policy { IWL_FW_INI_APPLY_POLICY_OVERRIDE_REGIONS = BIT(8), IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG = BIT(9), IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA = BIT(10), + IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD = BIT(16), +}; + +/** + * enum iwl_fw_ini_trigger_reset_fw_policy - Determines how to handle reset + * + * @IWL_FW_INI_RESET_FW_MODE_NOTHING: do not stop FW and reload (default) + * @IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY: stop FW without reload FW + * @IWL_FW_INI_RESET_FW_MODE_STOP_AND_RELOAD_FW: stop FW with reload FW + */ +enum iwl_fw_ini_trigger_reset_fw_policy { + IWL_FW_INI_RESET_FW_MODE_NOTHING = 0, + IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY, + IWL_FW_INI_RESET_FW_MODE_STOP_AND_RELOAD_FW +}; + +/** + * enum iwl_fw_ini_dump_policy - Determines how to handle dump based on enabled flags + * + * @IWL_FW_INI_DEBUG_DUMP_POLICY_NO_LIMIT: OS has no limit of dump size + * @IWL_FW_INI_DEBUG_DUMP_POLICY_MAX_LIMIT_600KB: mini dump only 600KB region dump + * @IWL_FW_IWL_DEBUG_DUMP_POLICY_MAX_LIMIT_5MB: mini dump 5MB size dump + */ +enum iwl_fw_ini_dump_policy { + IWL_FW_INI_DEBUG_DUMP_POLICY_NO_LIMIT = BIT(0), + IWL_FW_INI_DEBUG_DUMP_POLICY_MAX_LIMIT_600KB = BIT(1), + IWL_FW_IWL_DEBUG_DUMP_POLICY_MAX_LIMIT_5MB = BIT(2), + +}; + +/** + * enum iwl_fw_ini_dump_type - Determines dump type based on size defined by FW. + * + * @IWL_FW_INI_DUMP_BRIEF : only dump the most important regions + * @IWL_FW_INI_DEBUG_MEDIUM: dump more regions than "brief", but not all regions + * @IWL_FW_INI_DUMP_VERBOSE : dump all regions + */ +enum iwl_fw_ini_dump_type { + IWL_FW_INI_DUMP_BRIEF, + IWL_FW_INI_DUMP_MEDIUM, + IWL_FW_INI_DUMP_VERBOSE, }; #endif diff --git a/fw/api/debug.h b/fw/api/debug.h index 029ae64bf2b2..6255257ddebe 100644 --- a/fw/api/debug.h +++ b/fw/api/debug.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2020 Intel Corporation + * Copyright (C) 2005-2014, 2018-2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -43,6 +43,12 @@ enum iwl_debug_cmds { */ BUFFER_ALLOCATION = 0x8, /** + * @FW_DUMP_COMPLETE_CMD: + * sends command to fw once dump collection completed + * &struct iwl_dbg_dump_complete_cmd + */ + FW_DUMP_COMPLETE_CMD = 0xB, + /** * @MFU_ASSERT_DUMP_NTF: * &struct iwl_mfu_assert_dump_notif */ @@ -404,4 +410,15 @@ struct iwl_dbg_host_event_cfg_cmd { __le32 enabled_severities; } __packed; /* DEBUG_HOST_EVENT_CFG_CMD_API_S_VER_1 */ +/** + * struct iwl_dbg_dump_complete_cmd - dump complete cmd + * + * @tp: timepoint whose dump has completed + * @tp_data: timepoint data + */ +struct iwl_dbg_dump_complete_cmd { + __le32 tp; + __le32 tp_data; +} __packed; /* FW_DUMP_COMPLETE_CMD_API_S_VER_1 */ + #endif /* __iwl_fw_api_debug_h__ */ diff --git a/fw/api/mac-cfg.h b/fw/api/mac-cfg.h index d088c820b1a9..712532f17630 100644 --- a/fw/api/mac-cfg.h +++ b/fw/api/mac-cfg.h @@ -27,6 +27,10 @@ enum iwl_mac_conf_subcmd_ids { * @SESSION_PROTECTION_CMD: &struct iwl_mvm_session_prot_cmd */ SESSION_PROTECTION_CMD = 0x5, + /** + * @CANCEL_CHANNEL_SWITCH_CMD: &struct iwl_cancel_channel_switch_cmd + */ + CANCEL_CHANNEL_SWITCH_CMD = 0x6, /** * @SESSION_PROTECTION_NOTIF: &struct iwl_mvm_session_prot_notif @@ -42,6 +46,11 @@ enum iwl_mac_conf_subcmd_ids { * @CHANNEL_SWITCH_START_NOTIF: &struct iwl_channel_switch_start_notif */ CHANNEL_SWITCH_START_NOTIF = 0xFF, + + /** + *@CHANNEL_SWITCH_ERROR_NOTIF: &struct iwl_channel_switch_error_notif + */ + CHANNEL_SWITCH_ERROR_NOTIF = 0xF9, }; #define IWL_P2P_NOA_DESC_COUNT (2) @@ -110,6 +119,31 @@ struct iwl_channel_switch_start_notif { __le32 id_and_color; } __packed; /* CHANNEL_SWITCH_START_NTFY_API_S_VER_1 */ +#define CS_ERR_COUNT_ERROR BIT(0) +#define CS_ERR_LONG_DELAY_AFTER_CS BIT(1) +#define CS_ERR_LONG_TX_BLOCK BIT(2) +#define CS_ERR_TX_BLOCK_TIMER_EXPIRED BIT(3) + +/** + * struct iwl_channel_switch_error_notif - Channel switch error notification + * + * @mac_id: the mac for which the ucode sends the notification for + * @csa_err_mask: mask of channel switch error that can occur + */ +struct iwl_channel_switch_error_notif { + __le32 mac_id; + __le32 csa_err_mask; +} __packed; /* CHANNEL_SWITCH_ERROR_NTFY_API_S_VER_1 */ + +/** + * struct iwl_cancel_channel_switch_cmd - Cancel Channel Switch command + * + * @mac_id: the mac that should cancel the channel switch + */ +struct iwl_cancel_channel_switch_cmd { + __le32 mac_id; +} __packed; /* MAC_CANCEL_CHANNEL_SWITCH_S_VER_1 */ + /** * struct iwl_chan_switch_te_cmd - Channel Switch Time Event command * diff --git a/fw/api/mac.h b/fw/api/mac.h index 11f0bd283e49..9b7caf968346 100644 --- a/fw/api/mac.h +++ b/fw/api/mac.h @@ -413,10 +413,11 @@ enum iwl_he_pkt_ext_constellations { }; #define MAX_HE_SUPP_NSS 2 -#define MAX_HE_CHANNEL_BW_INDX 4 +#define MAX_CHANNEL_BW_INDX_API_D_VER_2 4 +#define MAX_CHANNEL_BW_INDX_API_D_VER_3 5 /** - * struct iwl_he_pkt_ext - QAM thresholds + * struct iwl_he_pkt_ext_v1 - QAM thresholds * The required PPE is set via HE Capabilities IE, per Nss x BW x MCS * The IE is organized in the following way: * Support for Nss x BW (or RU) matrix: @@ -435,9 +436,34 @@ enum iwl_he_pkt_ext_constellations { * Nss (0-siso, 1-mimo2) x BW (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz) x * (0-low_th, 1-high_th) */ -struct iwl_he_pkt_ext { - u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_HE_CHANNEL_BW_INDX][2]; -} __packed; /* PKT_EXT_DOT11AX_API_S */ +struct iwl_he_pkt_ext_v1 { + u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_CHANNEL_BW_INDX_API_D_VER_2][2]; +} __packed; /* PKT_EXT_DOT11AX_API_S_VER_1 */ + +/** + * struct iwl_he_pkt_ext_v2 - QAM thresholds + * The required PPE is set via HE Capabilities IE, per Nss x BW x MCS + * The IE is organized in the following way: + * Support for Nss x BW (or RU) matrix: + * (0=SISO, 1=MIMO2) x (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz) + * Each entry contains 2 QAM thresholds for 8us and 16us: + * 0=BPSK, 1=QPSK, 2=16QAM, 3=64QAM, 4=256QAM, 5=1024QAM, 6=RES, 7=NONE + * i.e. QAM_th1 < QAM_th2 such if TX uses QAM_tx: + * QAM_tx < QAM_th1 --> PPE=0us + * QAM_th1 <= QAM_tx < QAM_th2 --> PPE=8us + * QAM_th2 <= QAM_tx --> PPE=16us + * @pkt_ext_qam_th: QAM thresholds + * For each Nss/Bw define 2 QAM thrsholds (0..5) + * For rates below the low_th, no need for PPE + * For rates between low_th and high_th, need 8us PPE + * For rates equal or higher then the high_th, need 16us PPE + * Nss (0-siso, 1-mimo2) x + * BW (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz, 4-320MHz) x + * (0-low_th, 1-high_th) + */ +struct iwl_he_pkt_ext_v2 { + u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_CHANNEL_BW_INDX_API_D_VER_3][2]; +} __packed; /* PKT_EXT_DOT11AX_API_S_VER_2 */ /** * enum iwl_he_sta_ctxt_flags - HE STA context flags @@ -464,6 +490,11 @@ struct iwl_he_pkt_ext { * @STA_CTXT_HE_RU_2MHZ_BLOCK: indicates that 26-tone RU OFDMA transmission are * not allowed (as there are OBSS that might classify such transmissions as * radar pulses). + * @STA_CTXT_HE_NDP_FEEDBACK_ENABLED: mark support for NDP feedback and change + * of threshold + * @STA_CTXT_EHT_PUNCTURE_MASK_VALID: indicates the puncture_mask field is valid + * @STA_CTXT_EHT_LONG_PPE_ENABLED: indicates the PPE requirement should be + * extended to 20us for BW > 160Mhz or for MCS w/ 4096-QAM. */ enum iwl_he_sta_ctxt_flags { STA_CTXT_HE_REF_BSSID_VALID = BIT(4), @@ -477,6 +508,9 @@ enum iwl_he_sta_ctxt_flags { STA_CTXT_HE_MU_EDCA_CW = BIT(12), STA_CTXT_HE_NIC_NOT_ACK_ENABLED = BIT(13), STA_CTXT_HE_RU_2MHZ_BLOCK = BIT(14), + STA_CTXT_HE_NDP_FEEDBACK_ENABLED = BIT(15), + STA_CTXT_EHT_PUNCTURE_MASK_VALID = BIT(16), + STA_CTXT_EHT_LONG_PPE_ENABLED = BIT(17), }; /** @@ -551,7 +585,7 @@ struct iwl_he_sta_context_cmd_v1 { u8 frag_min_size; /* The below fields are set via PPE thresholds element */ - struct iwl_he_pkt_ext pkt_ext; + struct iwl_he_pkt_ext_v1 pkt_ext; /* The below fields are set via HE-Operation IE */ u8 bss_color; @@ -568,7 +602,7 @@ struct iwl_he_sta_context_cmd_v1 { } __packed; /* STA_CONTEXT_DOT11AX_API_S_VER_1 */ /** - * struct iwl_he_sta_context_cmd - configure FW to work with HE AP + * struct iwl_he_sta_context_cmd_v2 - configure FW to work with HE AP * @sta_id: STA id * @tid_limit: max num of TIDs in TX HE-SU multi-TID agg * 0 - bad value, 1 - multi-tid not supported, 2..8 - tid limit @@ -599,7 +633,7 @@ struct iwl_he_sta_context_cmd_v1 { * @bssid_count: actual number of VAPs in the MultiBSS Set * @reserved4: alignment */ -struct iwl_he_sta_context_cmd { +struct iwl_he_sta_context_cmd_v2 { u8 sta_id; u8 tid_limit; u8 reserved1; @@ -619,7 +653,7 @@ struct iwl_he_sta_context_cmd { u8 frag_min_size; /* The below fields are set via PPE thresholds element */ - struct iwl_he_pkt_ext pkt_ext; + struct iwl_he_pkt_ext_v1 pkt_ext; /* The below fields are set via HE-Operation IE */ u8 bss_color; @@ -643,6 +677,81 @@ struct iwl_he_sta_context_cmd { } __packed; /* STA_CONTEXT_DOT11AX_API_S_VER_2 */ /** + * struct iwl_he_sta_context_cmd_v3 - configure FW to work with HE AP + * @sta_id: STA id + * @tid_limit: max num of TIDs in TX HE-SU multi-TID agg + * 0 - bad value, 1 - multi-tid not supported, 2..8 - tid limit + * @reserved1: reserved byte for future use + * @reserved2: reserved byte for future use + * @flags: see %iwl_11ax_sta_ctxt_flags + * @ref_bssid_addr: reference BSSID used by the AP + * @reserved0: reserved 2 bytes for aligning the ref_bssid_addr field to 8 bytes + * @htc_flags: which features are supported in HTC + * @frag_flags: frag support in A-MSDU + * @frag_level: frag support level + * @frag_max_num: max num of "open" MSDUs in the receiver (in power of 2) + * @frag_min_size: min frag size (except last frag) + * @pkt_ext: optional, exists according to PPE-present bit in the HE-PHY capa + * @bss_color: 11ax AP ID that is used in the HE SIG-A to mark inter BSS frame + * @htc_trig_based_pkt_ext: default PE in 4us units + * @frame_time_rts_th: HE duration RTS threshold, in units of 32us + * @rand_alloc_ecwmin: random CWmin = 2**ECWmin-1 + * @rand_alloc_ecwmax: random CWmax = 2**ECWmax-1 + * @puncture_mask: puncture mask for EHT + * @trig_based_txf: MU EDCA Parameter set for the trigger based traffic queues + * @max_bssid_indicator: indicator of the max bssid supported on the associated + * bss + * @bssid_index: index of the associated VAP + * @ema_ap: AP supports enhanced Multi BSSID advertisement + * @profile_periodicity: number of Beacon periods that are needed to receive the + * complete VAPs info + * @bssid_count: actual number of VAPs in the MultiBSS Set + * @reserved4: alignment + */ +struct iwl_he_sta_context_cmd_v3 { + u8 sta_id; + u8 tid_limit; + u8 reserved1; + u8 reserved2; + __le32 flags; + + /* The below fields are set via Multiple BSSID IE */ + u8 ref_bssid_addr[6]; + __le16 reserved0; + + /* The below fields are set via HE-capabilities IE */ + __le32 htc_flags; + + u8 frag_flags; + u8 frag_level; + u8 frag_max_num; + u8 frag_min_size; + + /* The below fields are set via PPE thresholds element */ + struct iwl_he_pkt_ext_v2 pkt_ext; + + /* The below fields are set via HE-Operation IE */ + u8 bss_color; + u8 htc_trig_based_pkt_ext; + __le16 frame_time_rts_th; + + /* Random access parameter set (i.e. RAPS) */ + u8 rand_alloc_ecwmin; + u8 rand_alloc_ecwmax; + __le16 puncture_mask; + + /* The below fields are set via MU EDCA parameter set element */ + struct iwl_he_backoff_conf trig_based_txf[AC_NUM]; + + u8 max_bssid_indicator; + u8 bssid_index; + u8 ema_ap; + u8 profile_periodicity; + u8 bssid_count; + u8 reserved4[3]; +} __packed; /* STA_CONTEXT_DOT11AX_API_S_VER_2 */ + +/** * struct iwl_he_monitor_cmd - configure air sniffer for HE * @bssid: the BSSID to sniff for * @reserved1: reserved for dword alignment diff --git a/fw/api/nvm-reg.h b/fw/api/nvm-reg.h index 3551a3f1c1aa..91bfde6d5367 100644 --- a/fw/api/nvm-reg.h +++ b/fw/api/nvm-reg.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -34,6 +34,11 @@ enum iwl_regulatory_and_nvm_subcmd_ids { TAS_CONFIG = 0x3, /** + * @SAR_OFFSET_MAPPING_TABLE_CMD: &iwl_sar_offset_mapping_cmd + */ + SAR_OFFSET_MAPPING_TABLE_CMD = 0x4, + + /** * @PNVM_INIT_COMPLETE_NTFY: &struct iwl_pnvm_init_complete_ntfy */ PNVM_INIT_COMPLETE_NTFY = 0xFE, @@ -388,18 +393,57 @@ enum iwl_mcc_source { MCC_SOURCE_GETTING_MCC_TEST_MODE = 0x11, }; -#define IWL_TAS_BLACK_LIST_MAX 16 +#define IWL_TAS_BLOCK_LIST_MAX 16 /** - * struct iwl_tas_config_cmd - configures the TAS + * struct iwl_tas_config_cmd_v2 - configures the TAS * @block_list_size: size of relevant field in block_list_array - * @block_list_array: block list countries (without TAS) + * @block_list_array: list of countries where TAS must be disabled */ -struct iwl_tas_config_cmd { +struct iwl_tas_config_cmd_v2 { __le32 block_list_size; - __le32 block_list_array[IWL_TAS_BLACK_LIST_MAX]; + __le32 block_list_array[IWL_TAS_BLOCK_LIST_MAX]; } __packed; /* TAS_CONFIG_CMD_API_S_VER_2 */ /** + * struct iwl_tas_config_cmd_v3 - configures the TAS + * @block_list_size: size of relevant field in block_list_array + * @block_list_array: list of countries where TAS must be disabled + * @override_tas_iec: indicates whether to override default value of IEC regulatory + * @enable_tas_iec: in case override_tas_iec is set - + * indicates whether IEC regulatory is enabled or disabled + */ +struct iwl_tas_config_cmd_v3 { + __le32 block_list_size; + __le32 block_list_array[IWL_TAS_BLOCK_LIST_MAX]; + __le16 override_tas_iec; + __le16 enable_tas_iec; +} __packed; /* TAS_CONFIG_CMD_API_S_VER_3 */ + +/** + * struct iwl_tas_config_cmd_v3 - configures the TAS + * @block_list_size: size of relevant field in block_list_array + * @block_list_array: list of countries where TAS must be disabled + * @override_tas_iec: indicates whether to override default value of IEC regulatory + * @enable_tas_iec: in case override_tas_iec is set - + * indicates whether IEC regulatory is enabled or disabled + * @usa_tas_uhb_allowed: if set, allow TAS UHB in the USA + * @reserved: reserved +*/ +struct iwl_tas_config_cmd_v4 { + __le32 block_list_size; + __le32 block_list_array[IWL_TAS_BLOCK_LIST_MAX]; + u8 override_tas_iec; + u8 enable_tas_iec; + u8 usa_tas_uhb_allowed; + u8 reserved; +} __packed; /* TAS_CONFIG_CMD_API_S_VER_4 */ + +union iwl_tas_config_cmd { + struct iwl_tas_config_cmd_v2 v2; + struct iwl_tas_config_cmd_v3 v3; + struct iwl_tas_config_cmd_v4 v4; +}; +/** * enum iwl_lari_configs - bit masks for the various LARI config operations * @LARI_CONFIG_DISABLE_11AC_UKRAINE_MSK: disable 11ac in ukraine * @LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK: ETSI 5.8GHz SRD passive scan @@ -495,6 +539,32 @@ struct iwl_lari_config_change_cmd_v5 { } __packed; /* LARI_CHANGE_CONF_CMD_S_VER_5 */ /** + * struct iwl_lari_config_change_cmd_v6 - change LARI configuration + * @config_bitmap: Bitmap of the config commands. Each bit will trigger a + * different predefined FW config operation. + * @oem_uhb_allow_bitmap: Bitmap of UHB enabled MCC sets. + * @oem_11ax_allow_bitmap: Bitmap of 11ax allowed MCCs. There are two bits + * per country, one to indicate whether to override and the other to + * indicate the value to use. + * @oem_unii4_allow_bitmap: Bitmap of unii4 allowed MCCs.There are two bits + * per country, one to indicate whether to override and the other to + * indicate allow/disallow unii4 channels. + * @chan_state_active_bitmap: Bitmap for overriding channel state to active. + * Each bit represents a country or region to activate, according to the BIOS + * definitions. + * @force_disable_channels_bitmap: Bitmap of disabled bands/channels. + * Each bit represents a set of channels in a specific band that should be disabled + */ +struct iwl_lari_config_change_cmd_v6 { + __le32 config_bitmap; + __le32 oem_uhb_allow_bitmap; + __le32 oem_11ax_allow_bitmap; + __le32 oem_unii4_allow_bitmap; + __le32 chan_state_active_bitmap; + __le32 force_disable_channels_bitmap; +} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_6 */ + +/** * struct iwl_pnvm_init_complete_ntfy - PNVM initialization complete * @status: PNVM image loading status */ diff --git a/fw/api/phy-ctxt.h b/fw/api/phy-ctxt.h index 68b788b92b7a..e66f77924f83 100644 --- a/fw/api/phy-ctxt.h +++ b/fw/api/phy-ctxt.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018, 2020 Intel Corporation + * Copyright (C) 2012-2014, 2018, 2020-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -150,11 +150,12 @@ struct iwl_phy_context_cmd { /* COMMON_INDEX_HDR_API_S_VER_1 */ __le32 id_and_color; __le32 action; - /* PHY_CONTEXT_DATA_API_S_VER_3 */ + /* PHY_CONTEXT_DATA_API_S_VER_3, PHY_CONTEXT_DATA_API_S_VER_4 */ struct iwl_fw_channel_info ci; __le32 lmac_id; - __le32 rxchain_info; + __le32 rxchain_info; /* reserved in _VER_4 */ __le32 dsp_cfg_flags; __le32 reserved; -} __packed; /* PHY_CONTEXT_CMD_API_VER_3 */ +} __packed; /* PHY_CONTEXT_CMD_API_VER_3, PHY_CONTEXT_CMD_API_VER_4 */ + #endif /* __iwl_fw_api_phy_ctxt_h__ */ diff --git a/fw/api/phy.h b/fw/api/phy.h index c04f2521fcb3..b1b9c29859c1 100644 --- a/fw/api/phy.h +++ b/fw/api/phy.h @@ -166,14 +166,24 @@ struct iwl_dts_measurement_resp { /** * struct ct_kill_notif - CT-kill entry notification + * This structure represent both versions of this notification. * * @temperature: the current temperature in celsius - * @reserved: reserved + * @dts: only in v2: DTS that trigger the CT Kill bitmap: + * bit 0: ToP master + * bit 1: PA chain A master + * bit 2: PA chain B master + * bit 3: ToP slave + * bit 4: PA chain A slave + * bit 5: PA chain B slave) + * bits 6,7: reserved (set to 0) + * @scheme: only for v2: scheme that trigger the CT Kill (0-SW, 1-HW) */ struct ct_kill_notif { __le16 temperature; - __le16 reserved; -} __packed; /* GRP_PHY_CT_KILL_NTF */ + u8 dts; + u8 scheme; +} __packed; /* CT_KILL_NOTIFICATION_API_S_VER_1, CT_KILL_NOTIFICATION_API_S_VER_2 */ /** * enum ctdp_cmd_operation - CTDP command operations diff --git a/fw/api/power.h b/fw/api/power.h index 4d671c878bb7..81318208f2f6 100644 --- a/fw/api/power.h +++ b/fw/api/power.h @@ -419,7 +419,7 @@ struct iwl_geo_tx_power_profiles_cmd_v1 { * struct iwl_geo_tx_power_profile_cmd_v2 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd. * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation * @table: offset profile per band. - * @table_revision: BIOS table revision. + * @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading) */ struct iwl_geo_tx_power_profiles_cmd_v2 { __le32 ops; @@ -431,7 +431,7 @@ struct iwl_geo_tx_power_profiles_cmd_v2 { * struct iwl_geo_tx_power_profile_cmd_v3 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd. * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation * @table: offset profile per band. - * @table_revision: BIOS table revision. + * @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading) */ struct iwl_geo_tx_power_profiles_cmd_v3 { __le32 ops; @@ -443,7 +443,7 @@ struct iwl_geo_tx_power_profiles_cmd_v3 { * struct iwl_geo_tx_power_profile_cmd_v4 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd. * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation * @table: offset profile per band. - * @table_revision: BIOS table revision. + * @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading) */ struct iwl_geo_tx_power_profiles_cmd_v4 { __le32 ops; @@ -455,7 +455,7 @@ struct iwl_geo_tx_power_profiles_cmd_v4 { * struct iwl_geo_tx_power_profile_cmd_v5 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd. * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation * @table: offset profile per band. - * @table_revision: BIOS table revision. + * @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading) */ struct iwl_geo_tx_power_profiles_cmd_v5 { __le32 ops; @@ -503,6 +503,20 @@ union iwl_ppag_table_cmd { } v2; } __packed; +#define MCC_TO_SAR_OFFSET_TABLE_ROW_SIZE 26 +#define MCC_TO_SAR_OFFSET_TABLE_COL_SIZE 13 + +/** + * struct iwl_sar_offset_mapping_cmd - struct for SAR_OFFSET_MAPPING_TABLE_CMD + * @offset_map: mapping a mcc to a geo sar group + * @reserved: reserved + */ +struct iwl_sar_offset_mapping_cmd { + u8 offset_map[MCC_TO_SAR_OFFSET_TABLE_ROW_SIZE] + [MCC_TO_SAR_OFFSET_TABLE_COL_SIZE]; + u16 reserved; +} __packed; /*SAR_OFFSET_MAPPING_TABLE_CMD_API_S*/ + /** * struct iwl_beacon_filter_cmd * REPLY_BEACON_FILTERING_CMD = 0xd2 (command) diff --git a/fw/api/rfi.h b/fw/api/rfi.h index c678b9aa9b55..1a84a4081e7c 100644 --- a/fw/api/rfi.h +++ b/fw/api/rfi.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2020 Intel Corporation + * Copyright (C) 2020-2021 Intel Corporation */ #ifndef __iwl_fw_api_rfi_h__ #define __iwl_fw_api_rfi_h__ @@ -57,4 +57,12 @@ struct iwl_rfi_freq_table_resp_cmd { __le32 status; } __packed; /* RFI_CONFIG_CMD_API_S_VER_1 */ +/** + * struct iwl_rfi_deactivate_notif - notifcation that FW disaled RFIm + * + * @reason: used only for a log message + */ +struct iwl_rfi_deactivate_notif { + __le32 reason; +} __packed; /* RFI_DEACTIVATE_NTF_S_VER_1 */ #endif /* __iwl_fw_api_rfi_h__ */ diff --git a/fw/api/rs.h b/fw/api/rs.h index a09081d7ed45..2198ca5269e1 100644 --- a/fw/api/rs.h +++ b/fw/api/rs.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2012-2014, 2018-2022 Intel Corporation * Copyright (C) 2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_api_rs_h__ @@ -116,13 +116,24 @@ enum IWL_TLC_MNG_NSS { IWL_TLC_NSS_MAX }; -enum IWL_TLC_HT_BW_RATES { - IWL_TLC_HT_BW_NONE_160, - IWL_TLC_HT_BW_160, +/** + * enum IWL_TLC_MCS_PER_BW - mcs index per BW + * @IWL_TLC_MCS_PER_BW_80: mcs for bw - 20Hhz, 40Hhz, 80Hhz + * @IWL_TLC_MCS_PER_BW_160: mcs for bw - 160Mhz + * @IWL_TLC_MCS_PER_BW_320: mcs for bw - 320Mhz + * @IWL_TLC_MCS_PER_BW_NUM_V3: number of entries up to version 3 + * @IWL_TLC_MCS_PER_BW_NUM_V4: number of entries from version 4 + */ +enum IWL_TLC_MCS_PER_BW { + IWL_TLC_MCS_PER_BW_80, + IWL_TLC_MCS_PER_BW_160, + IWL_TLC_MCS_PER_BW_320, + IWL_TLC_MCS_PER_BW_NUM_V3 = IWL_TLC_MCS_PER_BW_160 + 1, + IWL_TLC_MCS_PER_BW_NUM_V4 = IWL_TLC_MCS_PER_BW_320 + 1, }; /** - * struct tlc_config_cmd - TLC configuration + * struct iwl_tlc_config_cmd_v3 - TLC configuration * @sta_id: station id * @reserved1: reserved * @max_ch_width: max supported channel width from @enum iwl_tlc_mng_cfg_cw @@ -131,8 +142,8 @@ enum IWL_TLC_HT_BW_RATES { * @amsdu: TX amsdu is supported * @flags: bitmask of &enum iwl_tlc_mng_cfg_flags * @non_ht_rates: bitmap of supported legacy rates - * @ht_rates: bitmap of &enum iwl_tlc_mng_ht_rates, per <nss, channel-width> - * pair (0 - 80mhz width and below, 1 - 160mhz). + * @ht_rates: bitmap of &enum iwl_tlc_mng_ht_rates, per &enum IWL_TLC_MCS_PER_BW + * <nss, channel-width> pair (0 - 80mhz width and below, 1 - 160mhz). * @max_mpdu_len: max MPDU length, in bytes * @sgi_ch_width_supp: bitmap of SGI support per channel width * use BIT(@enum iwl_tlc_mng_cfg_cw) @@ -140,7 +151,7 @@ enum IWL_TLC_HT_BW_RATES { * @max_tx_op: max TXOP in uSecs for all AC (BK, BE, VO, VI), * set zero for no limit. */ -struct iwl_tlc_config_cmd { +struct iwl_tlc_config_cmd_v3 { u8 sta_id; u8 reserved1[3]; u8 max_ch_width; @@ -149,7 +160,7 @@ struct iwl_tlc_config_cmd { u8 amsdu; __le16 flags; __le16 non_ht_rates; - __le16 ht_rates[IWL_TLC_NSS_MAX][2]; + __le16 ht_rates[IWL_TLC_NSS_MAX][IWL_TLC_MCS_PER_BW_NUM_V3]; __le16 max_mpdu_len; u8 sgi_ch_width_supp; u8 reserved2; @@ -157,6 +168,37 @@ struct iwl_tlc_config_cmd { } __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_3 */ /** + * struct iwl_tlc_config_cmd_v4 - TLC configuration + * @sta_id: station id + * @reserved1: reserved + * @max_ch_width: max supported channel width from &enum iwl_tlc_mng_cfg_cw + * @mode: &enum iwl_tlc_mng_cfg_mode + * @chains: bitmask of &enum iwl_tlc_mng_cfg_chains + * @sgi_ch_width_supp: bitmap of SGI support per channel width + * use BIT(&enum iwl_tlc_mng_cfg_cw) + * @flags: bitmask of &enum iwl_tlc_mng_cfg_flags + * @non_ht_rates: bitmap of supported legacy rates + * @ht_rates: bitmap of &enum iwl_tlc_mng_ht_rates, per <nss, channel-width> + * pair (0 - 80mhz width and below, 1 - 160mhz, 2 - 320mhz). + * @max_mpdu_len: max MPDU length, in bytes + * @max_tx_op: max TXOP in uSecs for all AC (BK, BE, VO, VI), + * set zero for no limit. + */ +struct iwl_tlc_config_cmd_v4 { + u8 sta_id; + u8 reserved1[3]; + u8 max_ch_width; + u8 mode; + u8 chains; + u8 sgi_ch_width_supp; + __le16 flags; + __le16 non_ht_rates; + __le16 ht_rates[IWL_TLC_NSS_MAX][IWL_TLC_MCS_PER_BW_NUM_V4]; + __le16 max_mpdu_len; + __le16 max_tx_op; +} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_4 */ + +/** * enum iwl_tlc_update_flags - updated fields * @IWL_TLC_NOTIF_FLAG_RATE: last initial rate update * @IWL_TLC_NOTIF_FLAG_AMSDU: umsdu parameters update diff --git a/fw/api/scan.h b/fw/api/scan.h index 8b200379f7c2..5413087ae909 100644 --- a/fw/api/scan.h +++ b/fw/api/scan.h @@ -82,6 +82,16 @@ enum iwl_scan_offload_band_selection { IWL_SCAN_OFFLOAD_SELECT_ANY = 0xc, }; +enum iwl_scan_offload_auth_alg { + IWL_AUTH_ALGO_UNSUPPORTED = 0x00, + IWL_AUTH_ALGO_NONE = 0x01, + IWL_AUTH_ALGO_PSK = 0x02, + IWL_AUTH_ALGO_8021X = 0x04, + IWL_AUTH_ALGO_SAE = 0x08, + IWL_AUTH_ALGO_8021X_SHA384 = 0x10, + IWL_AUTH_ALGO_OWE = 0x20, +}; + /** * struct iwl_scan_offload_profile - SCAN_OFFLOAD_PROFILE_S * @ssid_index: index to ssid list in fixed part @@ -201,7 +211,7 @@ struct iwl_scan_channel_cfg_lmac { __le32 iter_interval; } __packed; -/* +/** * struct iwl_scan_probe_segment - PROBE_SEGMENT_API_S_VER_1 * @offset: offset in the data block * @len: length of the segment @@ -211,7 +221,8 @@ struct iwl_scan_probe_segment { __le16 len; } __packed; -/* iwl_scan_probe_req - PROBE_REQUEST_FRAME_API_S_VER_2 +/** + * struct iwl_scan_probe_req_v1 - PROBE_REQUEST_FRAME_API_S_VER_2 * @mac_header: first (and common) part of the probe * @band_data: band specific data * @common_data: last (and common) part of the probe @@ -224,7 +235,8 @@ struct iwl_scan_probe_req_v1 { u8 buf[SCAN_OFFLOAD_PROBE_REQ_SIZE]; } __packed; -/* iwl_scan_probe_req - PROBE_REQUEST_FRAME_API_S_VER_v2 +/** + * struct iwl_scan_probe_req - PROBE_REQUEST_FRAME_API_S_VER_v2 * @mac_header: first (and common) part of the probe * @band_data: band specific data * @common_data: last (and common) part of the probe @@ -247,7 +259,8 @@ enum iwl_scan_channel_flags { IWL_SCAN_CHANNEL_FLAG_6G_PSC_NO_FILTER = BIT(6), }; -/* struct iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S +/** + * struct iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S * @flags: enum iwl_scan_channel_flags * @non_ebs_ratio: defines the ratio of number of scan iterations where EBS is * involved. @@ -492,7 +505,7 @@ struct iwl_scan_dwell { } __packed; /** - * struct iwl_scan_config_v1 + * struct iwl_scan_config_v1 - scan configuration command * @flags: enum scan_config_flags * @tx_chains: valid_tx antenna - ANT_* definitions * @rx_chains: valid_rx antenna - ANT_* definitions @@ -524,6 +537,21 @@ struct iwl_scan_config_v1 { #define SCAN_LB_LMAC_IDX 0 #define SCAN_HB_LMAC_IDX 1 +/** + * struct iwl_scan_config_v2 - scan configuration command + * @flags: enum scan_config_flags + * @tx_chains: valid_tx antenna - ANT_* definitions + * @rx_chains: valid_rx antenna - ANT_* definitions + * @legacy_rates: default legacy rates - enum scan_config_rates + * @out_of_channel_time: default max out of serving channel time + * @suspend_time: default max suspend time + * @dwell: dwells for the scan + * @mac_addr: default mac address to be used in probes + * @bcast_sta_id: the index of the station in the fw + * @channel_flags: default channel flags - enum iwl_channel_flags + * scan_config_channel_flag + * @channel_array: default supported channels + */ struct iwl_scan_config_v2 { __le32 flags; __le32 tx_chains; @@ -539,7 +567,7 @@ struct iwl_scan_config_v2 { } __packed; /* SCAN_CONFIG_DB_CMD_API_S_2 */ /** - * struct iwl_scan_config + * struct iwl_scan_config - scan configuration command * @enable_cam_mode: whether to enable CAM mode. * @enable_promiscouos_mode: whether to enable promiscouos mode * @bcast_sta_id: the index of the station in the fw. Deprecated starting with @@ -640,6 +668,10 @@ enum iwl_umac_scan_general_flags2 { * @IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN_FILTER_IN: in case * &IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN is enabled and scan is * activated over 6GHz PSC channels, filter in beacons and probe responses. + * @IWL_UMAC_SCAN_GEN_FLAGS_V2_OCE: if set, send probe requests in a minimum + * rate of 5.5Mpbs, filter in broadcast probe responses and set the max + * channel time indication field in the FILS request parameters element + * (if included by the driver in the probe request IEs). */ enum iwl_umac_scan_general_flags_v2 { IWL_UMAC_SCAN_GEN_FLAGS_V2_PERIODIC = BIT(0), @@ -657,6 +689,20 @@ enum iwl_umac_scan_general_flags_v2 { IWL_UMAC_SCAN_GEN_FLAGS_V2_TRIGGER_UHB_SCAN = BIT(12), IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN = BIT(13), IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN_FILTER_IN = BIT(14), + IWL_UMAC_SCAN_GEN_FLAGS_V2_OCE = BIT(15), +}; + +/** + * enum iwl_umac_scan_general_params_flags2 - UMAC scan general flags2 + * + * @IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_LB: scan event scheduling + * should be aware of a P2P GO operation on the 2GHz band. + * @IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_HB: scan event scheduling + * should be aware of a P2P GO operation on the 5GHz or 6GHz band. + */ +enum iwl_umac_scan_general_params_flags2 { + IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_LB = BIT(0), + IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_HB = BIT(1), }; /** @@ -941,8 +987,8 @@ struct iwl_scan_channel_params_v6 { } __packed; /* SCAN_CHANNEL_PARAMS_API_S_VER_6 */ /** - * struct iwl_scan_general_params_v10 - * @flags: &enum iwl_umac_scan_flags + * struct iwl_scan_general_params_v11 + * @flags: &enum iwl_umac_scan_general_flags_v2 * @reserved: reserved for future * @scan_start_mac_id: report the scan start TSF time according to this mac TSF * @active_dwell: dwell time for active scan per LMAC @@ -952,7 +998,8 @@ struct iwl_scan_channel_params_v6 { * for 5GHz channels * @adwell_default_social_chn: adaptive dwell default number of * APs per social channel - * @reserved1: reserved for future + * @flags2: for version 11 see &enum iwl_umac_scan_general_params_flags2. + * Otherwise reserved. * @adwell_max_budget: the maximal number of TUs that adaptive dwell * can add to the total scan time * @max_out_of_time: max out of serving channel time, per LMAC @@ -963,7 +1010,7 @@ struct iwl_scan_channel_params_v6 { * @num_of_fragments: number of fragments needed for full fragmented * scan coverage. */ -struct iwl_scan_general_params_v10 { +struct iwl_scan_general_params_v11 { __le16 flags; u8 reserved; u8 scan_start_mac_id; @@ -971,14 +1018,14 @@ struct iwl_scan_general_params_v10 { u8 adwell_default_2g; u8 adwell_default_5g; u8 adwell_default_social_chn; - u8 reserved1; + u8 flags2; __le16 adwell_max_budget; __le32 max_out_of_time[SCAN_TWO_LMACS]; __le32 suspend_time[SCAN_TWO_LMACS]; __le32 scan_priority; u8 passive_dwell[SCAN_TWO_LMACS]; u8 num_of_fragments[SCAN_TWO_LMACS]; -} __packed; /* SCAN_GENERAL_PARAMS_API_S_VER_10 */ +} __packed; /* SCAN_GENERAL_PARAMS_API_S_VER_11 and *_VER_10 */ /** * struct iwl_scan_periodic_parms_v1 @@ -994,31 +1041,31 @@ struct iwl_scan_periodic_parms_v1 { /** * struct iwl_scan_req_params_v12 - * @general_params: &struct iwl_scan_general_params_v10 + * @general_params: &struct iwl_scan_general_params_v11 * @channel_params: &struct iwl_scan_channel_params_v4 * @periodic_params: &struct iwl_scan_periodic_parms_v1 * @probe_params: &struct iwl_scan_probe_params_v3 */ struct iwl_scan_req_params_v12 { - struct iwl_scan_general_params_v10 general_params; + struct iwl_scan_general_params_v11 general_params; struct iwl_scan_channel_params_v4 channel_params; struct iwl_scan_periodic_parms_v1 periodic_params; struct iwl_scan_probe_params_v3 probe_params; } __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_12 */ /** - * struct iwl_scan_req_params_v14 - * @general_params: &struct iwl_scan_general_params_v10 + * struct iwl_scan_req_params_v15 + * @general_params: &struct iwl_scan_general_params_v11 * @channel_params: &struct iwl_scan_channel_params_v6 * @periodic_params: &struct iwl_scan_periodic_parms_v1 * @probe_params: &struct iwl_scan_probe_params_v4 */ -struct iwl_scan_req_params_v14 { - struct iwl_scan_general_params_v10 general_params; +struct iwl_scan_req_params_v15 { + struct iwl_scan_general_params_v11 general_params; struct iwl_scan_channel_params_v6 channel_params; struct iwl_scan_periodic_parms_v1 periodic_params; struct iwl_scan_probe_params_v4 probe_params; -} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_14 */ +} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_15 and *_VER_14 */ /** * struct iwl_scan_req_umac_v12 @@ -1033,16 +1080,16 @@ struct iwl_scan_req_umac_v12 { } __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_12 */ /** - * struct iwl_scan_req_umac_v14 + * struct iwl_scan_req_umac_v15 * @uid: scan id, &enum iwl_umac_scan_uid_offsets * @ooc_priority: out of channel priority - &enum iwl_scan_priority * @scan_params: scan parameters */ -struct iwl_scan_req_umac_v14 { +struct iwl_scan_req_umac_v15 { __le32 uid; __le32 ooc_priority; - struct iwl_scan_req_params_v14 scan_params; -} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_14 */ + struct iwl_scan_req_params_v15 scan_params; +} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_15 and *_VER_14 */ /** * struct iwl_umac_scan_abort diff --git a/fw/api/stats.h b/fw/api/stats.h index 18cca15caa3a..898e62326e6c 100644 --- a/fw/api/stats.h +++ b/fw/api/stats.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018, 2020 Intel Corporation + * Copyright (C) 2012-2014, 2018, 2020 - 2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -432,6 +432,7 @@ enum iwl_fw_statistics_type { FW_STATISTICS_HE, }; /* FW_STATISTICS_TYPE_API_E_VER_1 */ +#define IWL_STATISTICS_TYPE_MSK 0x7f /** * struct iwl_statistics_ntfy_hdr * @@ -446,10 +447,97 @@ struct iwl_statistics_ntfy_hdr { }; /* STATISTICS_NTFY_HDR_API_S_VER_1 */ /** + * struct iwl_statistics_ntfy_per_mac + * + * @beacon_filter_average_energy: Average energy [-dBm] of the 2 + * antennas. + * @air_time: air time + * @beacon_counter: all beacons (both filtered and not filtered) + * @beacon_average_energy: all beacons (both filtered and not + * filtered) + * @beacon_rssi_a: beacon RSSI on antenna A + * @beacon_rssi_b: beacon RSSI on antenna B + * @rx_bytes: RX byte count + */ +struct iwl_statistics_ntfy_per_mac { + __le32 beacon_filter_average_energy; + __le32 air_time; + __le32 beacon_counter; + __le32 beacon_average_energy; + __le32 beacon_rssi_a; + __le32 beacon_rssi_b; + __le32 rx_bytes; +} __packed; /* STATISTICS_NTFY_PER_MAC_API_S_VER_1 */ + +#define IWL_STATS_MAX_BW_INDEX 5 +/** struct iwl_statistics_ntfy_per_phy + * @channel_load: channel load + * @channel_load_by_us: device contribution to MCLM + * @channel_load_not_by_us: other devices' contribution to MCLM + * @clt: CLT HW timer (TIM_CH_LOAD2) + * @act: active accumulator SW + * @elp: elapsed time accumulator SW + * @rx_detected_per_ch_width: number of deferred TX per channel width, + * 0 - 20, 1/2/3 - 40/80/160 + * @success_per_ch_width: number of frames that got ACK/BACK/CTS + * per channel BW. note, BACK counted as 1 + * @fail_per_ch_width: number of frames that didn't get ACK/BACK/CTS + * per channel BW. note BACK counted as 1 + * @last_tx_ch_width_indx: last txed frame channel width index + */ +struct iwl_statistics_ntfy_per_phy { + __le32 channel_load; + __le32 channel_load_by_us; + __le32 channel_load_not_by_us; + __le32 clt; + __le32 act; + __le32 elp; + __le32 rx_detected_per_ch_width[IWL_STATS_MAX_BW_INDEX]; + __le32 success_per_ch_width[IWL_STATS_MAX_BW_INDEX]; + __le32 fail_per_ch_width[IWL_STATS_MAX_BW_INDEX]; + __le32 last_tx_ch_width_indx; +} __packed; /* STATISTICS_NTFY_PER_PHY_API_S_VER_1 */ + +/** + * struct iwl_statistics_ntfy_per_sta + * + * @average_energy: in fact it is minus the energy.. + */ +struct iwl_statistics_ntfy_per_sta { + __le32 average_energy; +} __packed; /* STATISTICS_NTFY_PER_STA_API_S_VER_1 */ + +#define IWL_STATS_MAX_PHY_OPERTINAL 3 +/** * struct iwl_statistics_operational_ntfy * * @hdr: general statistics header * @flags: bitmap of possible notification structures + * @per_mac_stats: per mac statistics, &struct iwl_statistics_ntfy_per_mac + * @per_phy_stats: per phy statistics, &struct iwl_statistics_ntfy_per_phy + * @per_sta_stats: per sta statistics, &struct iwl_statistics_ntfy_per_sta + * @rx_time: rx time + * @tx_time: usec the radio is transmitting. + * @on_time_rf: The total time in usec the RF is awake. + * @on_time_scan: usec the radio is awake due to scan. + */ +struct iwl_statistics_operational_ntfy { + struct iwl_statistics_ntfy_hdr hdr; + __le32 flags; + struct iwl_statistics_ntfy_per_mac per_mac_stats[MAC_INDEX_AUX]; + struct iwl_statistics_ntfy_per_phy per_phy_stats[IWL_STATS_MAX_PHY_OPERTINAL]; + struct iwl_statistics_ntfy_per_sta per_sta_stats[IWL_MVM_STATION_COUNT_MAX]; + __le64 rx_time; + __le64 tx_time; + __le64 on_time_rf; + __le64 on_time_scan; +} __packed; /* STATISTICS_OPERATIONAL_NTFY_API_S_VER_15 */ + +/** + * struct iwl_statistics_operational_ntfy_ver_14 + * + * @hdr: general statistics header + * @flags: bitmap of possible notification structures * @mac_id: mac on which the beacon was received * @beacon_filter_average_energy: Average energy [-dBm] of the 2 * antennas. @@ -469,7 +557,7 @@ struct iwl_statistics_ntfy_hdr { * @average_energy: in fact it is minus the energy.. * @reserved: reserved */ -struct iwl_statistics_operational_ntfy { +struct iwl_statistics_operational_ntfy_ver_14 { struct iwl_statistics_ntfy_hdr hdr; __le32 flags; __le32 mac_id; diff --git a/fw/api/soc.h b/fw/api/system.h index c5df1171462b..acf5d4b9a214 100644 --- a/fw/api/soc.h +++ b/fw/api/system.h @@ -1,11 +1,11 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2019-2020 Intel Corporation + * Copyright (C) 2012-2014, 2019-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ -#ifndef __iwl_fw_api_soc_h__ -#define __iwl_fw_api_soc_h__ +#ifndef __iwl_fw_api_system_h__ +#define __iwl_fw_api_system_h__ #define SOC_CONFIG_CMD_FLAGS_DISCRETE BIT(0) #define SOC_CONFIG_CMD_FLAGS_LOW_LATENCY BIT(1) @@ -32,4 +32,12 @@ struct iwl_soc_configuration_cmd { * SOC_CONFIGURATION_CMD_S_VER_2 */ -#endif /* __iwl_fw_api_soc_h__ */ +/** + * struct iwl_system_features_control_cmd - system features control command + * @features: bitmap of features to disable + */ +struct iwl_system_features_control_cmd { + __le32 features[4]; +} __packed; /* SYSTEM_FEATURES_CONTROL_CMD_API_S_VER_1 */ + +#endif /* __iwl_fw_api_system_h__ */ diff --git a/fw/api/tx.h b/fw/api/tx.h index 9b3bce83efb6..ecc6706f66ed 100644 --- a/fw/api/tx.h +++ b/fw/api/tx.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2022 Intel Corporation * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_api_tx_h__ @@ -177,6 +177,17 @@ enum iwl_tx_offload_assist_flags_pos { #define IWL_TX_CMD_OFFLD_MH_MASK 0x1f #define IWL_TX_CMD_OFFLD_IP_HDR_MASK 0x3f +enum iwl_tx_offload_assist_bz { + IWL_TX_CMD_OFFLD_BZ_RESULT_OFFS = 0x000003ff, + IWL_TX_CMD_OFFLD_BZ_START_OFFS = 0x001ff800, + IWL_TX_CMD_OFFLD_BZ_MH_LEN = 0x07c00000, + IWL_TX_CMD_OFFLD_BZ_MH_PAD = 0x08000000, + IWL_TX_CMD_OFFLD_BZ_AMSDU = 0x10000000, + IWL_TX_CMD_OFFLD_BZ_ZERO2ONES = 0x20000000, + IWL_TX_CMD_OFFLD_BZ_ENABLE_CSUM = 0x40000000, + IWL_TX_CMD_OFFLD_BZ_PARTIAL_CSUM = 0x80000000, +}; + /* TODO: complete documentation for try_cnt and btkill_cnt */ /** * struct iwl_tx_cmd - TX command struct to FW @@ -243,8 +254,10 @@ struct iwl_tx_cmd { u8 tid_tspec; __le16 pm_frame_timeout; __le16 reserved4; - u8 payload[0]; - struct ieee80211_hdr hdr[0]; + union { + DECLARE_FLEX_ARRAY(u8, payload); + DECLARE_FLEX_ARRAY(struct ieee80211_hdr, hdr); + }; } __packed; /* TX_CMD_API_S_VER_6 */ struct iwl_dram_sec_info { @@ -283,8 +296,7 @@ struct iwl_tx_cmd_gen2 { * @dram_info: FW internal DRAM storage * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is * cleared. Combination of RATE_MCS_* - * @ttl: time to live - packet lifetime limit. The FW should drop if - * passed. + * @reserved: reserved * @hdr: 802.11 header */ struct iwl_tx_cmd_gen3 { @@ -293,7 +305,7 @@ struct iwl_tx_cmd_gen3 { __le32 offload_assist; struct iwl_dram_sec_info dram_info; __le32 rate_n_flags; - __le64 ttl; + u8 reserved[8]; struct ieee80211_hdr hdr[]; } __packed; /* TX_CMD_API_S_VER_8, TX_CMD_API_S_VER_10 */ @@ -720,8 +732,10 @@ struct iwl_mvm_compressed_ba_notif { __le32 tx_rate; __le16 tfd_cnt; __le16 ra_tid_cnt; - struct iwl_mvm_compressed_ba_ratid ra_tid[0]; - struct iwl_mvm_compressed_ba_tfd tfd[]; + union { + DECLARE_FLEX_ARRAY(struct iwl_mvm_compressed_ba_ratid, ra_tid); + DECLARE_FLEX_ARRAY(struct iwl_mvm_compressed_ba_tfd, tfd); + }; } __packed; /* COMPRESSED_BA_RES_API_S_VER_4, COMPRESSED_BA_RES_API_S_VER_5 */ diff --git a/fw/api/txq.h b/fw/api/txq.h index 8b3a00df41da..e018946310d1 100644 --- a/fw/api/txq.h +++ b/fw/api/txq.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2019-2020 Intel Corporation + * Copyright (C) 2005-2014, 2019-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -76,6 +76,8 @@ enum iwl_tx_queue_cfg_actions { TX_QUEUE_CFG_TFD_SHORT_FORMAT = BIT(1), }; +#define IWL_DEFAULT_QUEUE_SIZE_EHT (1024 * 4) +#define IWL_DEFAULT_QUEUE_SIZE_HE 1024 #define IWL_DEFAULT_QUEUE_SIZE 256 #define IWL_MGMT_QUEUE_SIZE 16 #define IWL_CMD_QUEUE_SIZE 32 @@ -12,7 +12,7 @@ #include "iwl-io.h" #include "iwl-prph.h" #include "iwl-csr.h" - +#include "iwl-fh.h" /** * struct iwl_fw_dump_ptrs - set of pointers needed for the fw-error-dump * @@ -303,9 +303,6 @@ static void iwl_fw_dump_txf(struct iwl_fw_runtime *fwrt, iwl_trans_release_nic_access(fwrt->trans); } -#define IWL8260_ICCM_OFFSET 0x44000 /* Only for B-step */ -#define IWL8260_ICCM_LEN 0xC000 /* Only for B-step */ - struct iwl_prph_range { u32 start, end; }; @@ -880,7 +877,7 @@ iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt, dump_info->hw_type = cpu_to_le32(CSR_HW_REV_TYPE(fwrt->trans->hw_rev)); dump_info->hw_step = - cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev)); + cpu_to_le32(fwrt->trans->hw_rev_step); memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable, sizeof(dump_info->fw_human_readable)); strncpy(dump_info->dev_human_readable, fwrt->trans->name, @@ -1027,7 +1024,7 @@ struct iwl_dump_ini_region_data { static int iwl_dump_ini_prph_mac_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, - void *range_ptr, int idx) + void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_error_dump_range *range = range_ptr; @@ -1052,7 +1049,7 @@ iwl_dump_ini_prph_mac_iter(struct iwl_fw_runtime *fwrt, static int iwl_dump_ini_prph_phy_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, - void *range_ptr, int idx) + void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_error_dump_range *range = range_ptr; @@ -1102,7 +1099,7 @@ iwl_dump_ini_prph_phy_iter(struct iwl_fw_runtime *fwrt, static int iwl_dump_ini_csr_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, - void *range_ptr, int idx) + void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_error_dump_range *range = range_ptr; @@ -1121,7 +1118,7 @@ static int iwl_dump_ini_csr_iter(struct iwl_fw_runtime *fwrt, static int iwl_dump_ini_config_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, - void *range_ptr, int idx) + void *range_ptr, u32 range_len, int idx) { struct iwl_trans *trans = fwrt->trans; struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; @@ -1153,7 +1150,7 @@ static int iwl_dump_ini_config_iter(struct iwl_fw_runtime *fwrt, static int iwl_dump_ini_dev_mem_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, - void *range_ptr, int idx) + void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_error_dump_range *range = range_ptr; @@ -1165,8 +1162,7 @@ static int iwl_dump_ini_dev_mem_iter(struct iwl_fw_runtime *fwrt, iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data, le32_to_cpu(reg->dev_addr.size)); - if ((le32_to_cpu(reg->id) & IWL_FW_INI_REGION_V2_MASK) == - IWL_FW_INI_HW_SMEM_REGION_ID && + if (reg->sub_type == IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_HW_SMEM && fwrt->sanitize_ops && fwrt->sanitize_ops->frob_txf) fwrt->sanitize_ops->frob_txf(fwrt->sanitize_ctx, range->data, @@ -1176,7 +1172,7 @@ static int iwl_dump_ini_dev_mem_iter(struct iwl_fw_runtime *fwrt, } static int _iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt, - void *range_ptr, int idx) + void *range_ptr, u32 range_len, int idx) { struct page *page = fwrt->fw_paging_db[idx].fw_paging_block; struct iwl_fw_ini_error_dump_range *range = range_ptr; @@ -1196,7 +1192,7 @@ static int _iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt, static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, - void *range_ptr, int idx) + void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_error_dump_range *range; u32 page_size; @@ -1205,7 +1201,7 @@ static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt, idx++; if (!fwrt->trans->trans_cfg->gen2) - return _iwl_dump_ini_paging_iter(fwrt, range_ptr, idx); + return _iwl_dump_ini_paging_iter(fwrt, range_ptr, range_len, idx); range = range_ptr; page_size = fwrt->trans->init_dram.paging[idx].size; @@ -1221,7 +1217,7 @@ static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt, static int iwl_dump_ini_mon_dram_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, - void *range_ptr, int idx) + void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_error_dump_range *range = range_ptr; @@ -1240,7 +1236,7 @@ iwl_dump_ini_mon_dram_iter(struct iwl_fw_runtime *fwrt, static int iwl_dump_ini_mon_smem_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, - void *range_ptr, int idx) + void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_error_dump_range *range = range_ptr; @@ -1308,7 +1304,7 @@ static bool iwl_ini_txf_iter(struct iwl_fw_runtime *fwrt, static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, - void *range_ptr, int idx) + void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_error_dump_range *range = range_ptr; @@ -1443,7 +1439,7 @@ static void iwl_ini_get_rxf_data(struct iwl_fw_runtime *fwrt, static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, - void *range_ptr, int idx) + void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_error_dump_range *range = range_ptr; @@ -1510,7 +1506,7 @@ out: static int iwl_dump_ini_err_table_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, - void *range_ptr, int idx) + void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_region_err_table *err_table = ®->err_table; @@ -1529,7 +1525,7 @@ iwl_dump_ini_err_table_iter(struct iwl_fw_runtime *fwrt, static int iwl_dump_ini_special_mem_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, - void *range_ptr, int idx) + void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_region_special_device_memory *special_mem = @@ -1550,7 +1546,7 @@ iwl_dump_ini_special_mem_iter(struct iwl_fw_runtime *fwrt, static int iwl_dump_ini_dbgi_sram_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, - void *range_ptr, int idx) + void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_error_dump_range *range = range_ptr; @@ -1562,10 +1558,8 @@ iwl_dump_ini_dbgi_sram_iter(struct iwl_fw_runtime *fwrt, return -EBUSY; range->range_data_size = reg->dev_addr.size; - iwl_write_prph_no_grab(fwrt->trans, DBGI_SRAM_TARGET_ACCESS_CFG, - DBGI_SRAM_TARGET_ACCESS_CFG_RESET_ADDRESS_MSK); for (i = 0; i < (le32_to_cpu(reg->dev_addr.size) / 4); i++) { - prph_data = iwl_read_prph(fwrt->trans, (i % 2) ? + prph_data = iwl_read_prph_no_grab(fwrt->trans, (i % 2) ? DBGI_SRAM_TARGET_ACCESS_RDATA_MSB : DBGI_SRAM_TARGET_ACCESS_RDATA_LSB); if (prph_data == 0x5a5a5a5a) { @@ -1580,7 +1574,7 @@ iwl_dump_ini_dbgi_sram_iter(struct iwl_fw_runtime *fwrt, static int iwl_dump_ini_fw_pkt_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, - void *range_ptr, int idx) + void *range_ptr, u32 range_len, int idx) { struct iwl_fw_ini_error_dump_range *range = range_ptr; struct iwl_rx_packet *pkt = reg_data->dump_data->fw_pkt; @@ -1599,10 +1593,37 @@ static int iwl_dump_ini_fw_pkt_iter(struct iwl_fw_runtime *fwrt, return sizeof(*range) + le32_to_cpu(range->range_data_size); } +static int iwl_dump_ini_imr_iter(struct iwl_fw_runtime *fwrt, + struct iwl_dump_ini_region_data *reg_data, + void *range_ptr, u32 range_len, int idx) +{ + /* read the IMR memory and DMA it to SRAM */ + struct iwl_fw_ini_error_dump_range *range = range_ptr; + u64 imr_curr_addr = fwrt->trans->dbg.imr_data.imr_curr_addr; + u32 imr_rem_bytes = fwrt->trans->dbg.imr_data.imr2sram_remainbyte; + u32 sram_addr = fwrt->trans->dbg.imr_data.sram_addr; + u32 sram_size = fwrt->trans->dbg.imr_data.sram_size; + u32 size_to_dump = (imr_rem_bytes > sram_size) ? sram_size : imr_rem_bytes; + + range->range_data_size = cpu_to_le32(size_to_dump); + if (iwl_trans_write_imr_mem(fwrt->trans, sram_addr, + imr_curr_addr, size_to_dump)) { + IWL_ERR(fwrt, "WRT_DEBUG: IMR Memory transfer failed\n"); + return -1; + } + + fwrt->trans->dbg.imr_data.imr_curr_addr = imr_curr_addr + size_to_dump; + fwrt->trans->dbg.imr_data.imr2sram_remainbyte -= size_to_dump; + + iwl_trans_read_mem_bytes(fwrt->trans, sram_addr, range->data, + size_to_dump); + return sizeof(*range) + le32_to_cpu(range->range_data_size); +} + static void * iwl_dump_ini_mem_fill_header(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, - void *data) + void *data, u32 data_len) { struct iwl_fw_ini_error_dump *dump = data; @@ -1678,7 +1699,7 @@ iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt, static void * iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, - void *data) + void *data, u32 data_len) { struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data; @@ -1689,7 +1710,7 @@ iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt, static void * iwl_dump_ini_mon_smem_fill_header(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, - void *data) + void *data, u32 data_len) { struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data; @@ -1698,9 +1719,20 @@ iwl_dump_ini_mon_smem_fill_header(struct iwl_fw_runtime *fwrt, } static void * +iwl_dump_ini_mon_dbgi_fill_header(struct iwl_fw_runtime *fwrt, + struct iwl_dump_ini_region_data *reg_data, + void *data, u32 data_len) +{ + struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data; + + return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump, + &fwrt->trans->cfg->mon_dbgi_regs); +} + +static void * iwl_dump_ini_err_table_fill_header(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, - void *data) + void *data, u32 data_len) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_err_table_dump *dump = data; @@ -1714,7 +1746,7 @@ iwl_dump_ini_err_table_fill_header(struct iwl_fw_runtime *fwrt, static void * iwl_dump_ini_special_mem_fill_header(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, - void *data) + void *data, u32 data_len) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_special_device_memory *dump = data; @@ -1726,6 +1758,18 @@ iwl_dump_ini_special_mem_fill_header(struct iwl_fw_runtime *fwrt, return dump->data; } +static void * +iwl_dump_ini_imr_fill_header(struct iwl_fw_runtime *fwrt, + struct iwl_dump_ini_region_data *reg_data, + void *data, u32 data_len) +{ + struct iwl_fw_ini_error_dump *dump = data; + + dump->header.version = cpu_to_le32(IWL_INI_DUMP_VER); + + return dump->data; +} + static u32 iwl_dump_ini_mem_ranges(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { @@ -1785,6 +1829,26 @@ static u32 iwl_dump_ini_single_range(struct iwl_fw_runtime *fwrt, return 1; } +static u32 iwl_dump_ini_imr_ranges(struct iwl_fw_runtime *fwrt, + struct iwl_dump_ini_region_data *reg_data) +{ + /* range is total number of pages need to copied from + *IMR memory to SRAM and later from SRAM to DRAM + */ + u32 imr_enable = fwrt->trans->dbg.imr_data.imr_enable; + u32 imr_size = fwrt->trans->dbg.imr_data.imr_size; + u32 sram_size = fwrt->trans->dbg.imr_data.sram_size; + + if (imr_enable == 0 || imr_size == 0 || sram_size == 0) { + IWL_DEBUG_INFO(fwrt, + "WRT: Invalid imr data enable: %d, imr_size: %d, sram_size: %d\n", + imr_enable, imr_size, sram_size); + return 0; + } + + return((imr_size % sram_size) ? (imr_size / sram_size + 1) : (imr_size / sram_size)); +} + static u32 iwl_dump_ini_mem_get_size(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { @@ -1862,6 +1926,20 @@ iwl_dump_ini_mon_smem_get_size(struct iwl_fw_runtime *fwrt, return size; } +static u32 iwl_dump_ini_mon_dbgi_get_size(struct iwl_fw_runtime *fwrt, + struct iwl_dump_ini_region_data *reg_data) +{ + struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; + u32 size = le32_to_cpu(reg->dev_addr.size); + u32 ranges = iwl_dump_ini_mem_ranges(fwrt, reg_data); + + if (!size || !ranges) + return 0; + + return sizeof(struct iwl_fw_ini_monitor_dump) + ranges * + (size + sizeof(struct iwl_fw_ini_error_dump_range)); +} + static u32 iwl_dump_ini_txf_get_size(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data) { @@ -1949,6 +2027,33 @@ iwl_dump_ini_fw_pkt_get_size(struct iwl_fw_runtime *fwrt, return size; } +static u32 +iwl_dump_ini_imr_get_size(struct iwl_fw_runtime *fwrt, + struct iwl_dump_ini_region_data *reg_data) +{ + u32 size = 0; + u32 ranges = 0; + u32 imr_enable = fwrt->trans->dbg.imr_data.imr_enable; + u32 imr_size = fwrt->trans->dbg.imr_data.imr_size; + u32 sram_size = fwrt->trans->dbg.imr_data.sram_size; + + if (imr_enable == 0 || imr_size == 0 || sram_size == 0) { + IWL_DEBUG_INFO(fwrt, + "WRT: Invalid imr data enable: %d, imr_size: %d, sram_size: %d\n", + imr_enable, imr_size, sram_size); + return size; + } + size = imr_size; + ranges = iwl_dump_ini_imr_ranges(fwrt, reg_data); + if (!size && !ranges) { + IWL_ERR(fwrt, "WRT: imr_size :=%d, ranges :=%d\n", size, ranges); + return 0; + } + size += sizeof(struct iwl_fw_ini_error_dump) + + ranges * sizeof(struct iwl_fw_ini_error_dump_range); + return size; +} + /** * struct iwl_dump_ini_mem_ops - ini memory dump operations * @get_num_of_ranges: returns the number of memory ranges in the region. @@ -1965,10 +2070,10 @@ struct iwl_dump_ini_mem_ops { struct iwl_dump_ini_region_data *reg_data); void *(*fill_mem_hdr)(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, - void *data); + void *data, u32 data_len); int (*fill_range)(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, - void *range, int idx); + void *range, u32 range_len, int idx); }; /** @@ -1988,26 +2093,56 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list, { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_dump_entry *entry; - struct iwl_fw_error_dump_data *tlv; + struct iwl_fw_ini_error_dump_data *tlv; struct iwl_fw_ini_error_dump_header *header; - u32 type = le32_to_cpu(reg->type), id = le32_to_cpu(reg->id); + u32 type = reg->type; + u32 id = le32_get_bits(reg->id, IWL_FW_INI_REGION_ID_MASK); u32 num_of_ranges, i, size; - void *range; - - /* - * The higher part of the ID in version 2 is irrelevant for - * us, so mask it out. - */ - if (le32_to_cpu(reg->hdr.version) == 2) - id &= IWL_FW_INI_REGION_V2_MASK; + u8 *range; + u32 free_size; + u64 header_size; + u32 dump_policy = IWL_FW_INI_DUMP_VERBOSE; + + IWL_DEBUG_FW(fwrt, "WRT: Collecting region: dump type=%d, id=%d, type=%d\n", + dump_policy, id, type); + + if (le32_to_cpu(reg->hdr.version) >= 2) { + u32 dp = le32_get_bits(reg->id, + IWL_FW_INI_REGION_DUMP_POLICY_MASK); + + if (dump_policy == IWL_FW_INI_DUMP_VERBOSE && + !(dp & IWL_FW_INI_DEBUG_DUMP_POLICY_NO_LIMIT)) { + IWL_DEBUG_FW(fwrt, + "WRT: no dump - type %d and policy mismatch=%d\n", + dump_policy, dp); + return 0; + } else if (dump_policy == IWL_FW_INI_DUMP_MEDIUM && + !(dp & IWL_FW_IWL_DEBUG_DUMP_POLICY_MAX_LIMIT_5MB)) { + IWL_DEBUG_FW(fwrt, + "WRT: no dump - type %d and policy mismatch=%d\n", + dump_policy, dp); + return 0; + } else if (dump_policy == IWL_FW_INI_DUMP_BRIEF && + !(dp & IWL_FW_INI_DEBUG_DUMP_POLICY_MAX_LIMIT_600KB)) { + IWL_DEBUG_FW(fwrt, + "WRT: no dump - type %d and policy mismatch=%d\n", + dump_policy, dp); + return 0; + } + } if (!ops->get_num_of_ranges || !ops->get_size || !ops->fill_mem_hdr || - !ops->fill_range) + !ops->fill_range) { + IWL_DEBUG_FW(fwrt, "WRT: no ops for collecting data\n"); return 0; + } size = ops->get_size(fwrt, reg_data); - if (!size) + + if (size < sizeof(*header)) { + IWL_DEBUG_FW(fwrt, "WRT: size didn't include space for header\n"); return 0; + } entry = vzalloc(sizeof(*entry) + sizeof(*tlv) + size); if (!entry) @@ -2017,11 +2152,11 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list, tlv = (void *)entry->data; tlv->type = reg->type; + tlv->sub_type = reg->sub_type; + tlv->sub_type_ver = reg->sub_type_ver; + tlv->reserved = reg->reserved; tlv->len = cpu_to_le32(size); - IWL_DEBUG_FW(fwrt, "WRT: Collecting region: id=%d, type=%d\n", id, - type); - num_of_ranges = ops->get_num_of_ranges(fwrt, reg_data); header = (void *)tlv->data; @@ -2030,7 +2165,8 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list, header->name_len = cpu_to_le32(IWL_FW_INI_MAX_NAME); memcpy(header->name, reg->name, IWL_FW_INI_MAX_NAME); - range = ops->fill_mem_hdr(fwrt, reg_data, header); + free_size = size; + range = ops->fill_mem_hdr(fwrt, reg_data, header, free_size); if (!range) { IWL_ERR(fwrt, "WRT: Failed to fill region header: id=%d, type=%d\n", @@ -2038,8 +2174,21 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list, goto out_err; } + header_size = range - (u8 *)header; + + if (WARN(header_size > free_size, + "header size %llu > free_size %d", + header_size, free_size)) { + IWL_ERR(fwrt, + "WRT: fill_mem_hdr used more than given free_size\n"); + goto out_err; + } + + free_size -= header_size; + for (i = 0; i < num_of_ranges; i++) { - int range_size = ops->fill_range(fwrt, reg_data, range, i); + int range_size = ops->fill_range(fwrt, reg_data, range, + free_size, i); if (range_size < 0) { IWL_ERR(fwrt, @@ -2047,6 +2196,15 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list, id, type); goto out_err; } + + if (WARN(range_size > free_size, "range_size %d > free_size %d", + range_size, free_size)) { + IWL_ERR(fwrt, + "WRT: fill_raged used more than given free_size\n"); + goto out_err; + } + + free_size -= range_size; range = range + range_size; } @@ -2099,7 +2257,7 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt, dump->ver_type = cpu_to_le32(fwrt->dump.fw_ver.type); dump->ver_subtype = cpu_to_le32(fwrt->dump.fw_ver.subtype); - dump->hw_step = cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev)); + dump->hw_step = cpu_to_le32(fwrt->trans->hw_rev_step); /* * Several HWs all have type == 0x42, so we'll override this value @@ -2107,7 +2265,7 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt, */ hw_type = CSR_HW_REV_TYPE(fwrt->trans->hw_rev); if (hw_type == IWL_AX210_HW_TYPE) { - u32 prph_val = iwl_read_prph(fwrt->trans, WFPM_OTP_CFG1_ADDR_GEN2); + u32 prph_val = iwl_read_umac_prph(fwrt->trans, WFPM_OTP_CFG1_ADDR); u32 is_jacket = !!(prph_val & WFPM_OTP_CFG1_IS_JACKET_BIT); u32 is_cdb = !!(prph_val & WFPM_OTP_CFG1_IS_CDB_BIT); u32 masked_bits = is_jacket | (is_cdb << 1); @@ -2237,7 +2395,12 @@ static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = { .fill_mem_hdr = iwl_dump_ini_mem_fill_header, .fill_range = iwl_dump_ini_csr_iter, }, - [IWL_FW_INI_REGION_DRAM_IMR] = {}, + [IWL_FW_INI_REGION_DRAM_IMR] = { + .get_num_of_ranges = iwl_dump_ini_imr_ranges, + .get_size = iwl_dump_ini_imr_get_size, + .fill_mem_hdr = iwl_dump_ini_imr_fill_header, + .fill_range = iwl_dump_ini_imr_iter, + }, [IWL_FW_INI_REGION_PCI_IOSF_CONFIG] = { .get_num_of_ranges = iwl_dump_ini_mem_ranges, .get_size = iwl_dump_ini_mem_get_size, @@ -2252,8 +2415,8 @@ static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = { }, [IWL_FW_INI_REGION_DBGI_SRAM] = { .get_num_of_ranges = iwl_dump_ini_mem_ranges, - .get_size = iwl_dump_ini_mem_get_size, - .fill_mem_hdr = iwl_dump_ini_mem_fill_header, + .get_size = iwl_dump_ini_mon_dbgi_get_size, + .fill_mem_hdr = iwl_dump_ini_mon_dbgi_fill_header, .fill_range = iwl_dump_ini_dbgi_sram_iter, }, }; @@ -2291,7 +2454,7 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt, } reg = (void *)reg_data.reg_tlv->data; - reg_type = le32_to_cpu(reg->type); + reg_type = reg->type; if (reg_type >= ARRAY_SIZE(iwl_dump_ini_region_ops)) continue; @@ -2441,7 +2604,7 @@ static void iwl_fw_error_dump_data_free(struct iwl_fwrt_dump_data *dump_data) static void iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt, struct iwl_fwrt_dump_data *dump_data) { - struct list_head dump_list = LIST_HEAD_INIT(dump_list); + LIST_HEAD(dump_list); struct scatterlist *sg_dump_data; u32 file_len = iwl_dump_ini_file_gen(fwrt, dump_data, &dump_list); @@ -2586,7 +2749,7 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, delay = le32_to_cpu(trigger->stop_delay) * USEC_PER_MSEC; } - desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC); + desc = kzalloc(struct_size(desc, trig_desc.data, len), GFP_ATOMIC); if (!desc) return -ENOMEM; @@ -2682,6 +2845,28 @@ int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id) } IWL_EXPORT_SYMBOL(iwl_fw_start_dbg_conf); +void iwl_send_dbg_dump_complete_cmd(struct iwl_fw_runtime *fwrt, + u32 timepoint, + u32 timepoint_data) +{ + struct iwl_dbg_dump_complete_cmd hcmd_data; + struct iwl_host_cmd hcmd = { + .id = WIDE_ID(DEBUG_GROUP, FW_DUMP_COMPLETE_CMD), + .data[0] = &hcmd_data, + .len[0] = sizeof(hcmd_data), + }; + + if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) + return; + + if (fw_has_capa(&fwrt->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT)) { + hcmd_data.tp = cpu_to_le32(timepoint); + hcmd_data.tp_data = cpu_to_le32(timepoint_data); + iwl_trans_send_cmd(fwrt->trans, &hcmd); + } +} + /* this function assumes dump_start was called beforehand and dump_end will be * called afterwards */ @@ -2690,7 +2875,8 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx) struct iwl_fw_dbg_params params = {0}; struct iwl_fwrt_dump_data *dump_data = &fwrt->dump.wks[wk_idx].dump_data; - + u32 policy; + u32 time_point; if (!test_bit(wk_idx, &fwrt->dump.active_wks)) return; @@ -2716,6 +2902,16 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx) iwl_fw_dbg_stop_restart_recording(fwrt, ¶ms, false); + policy = le32_to_cpu(dump_data->trig->apply_policy); + time_point = le32_to_cpu(dump_data->trig->time_point); + + if (policy & IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD) { + IWL_DEBUG_FW_INFO(fwrt, "WRT: sending dump complete\n"); + iwl_send_dbg_dump_complete_cmd(fwrt, time_point, 0); + } + if (fwrt->trans->dbg.last_tp_resetfw == IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) + iwl_force_nmi(fwrt->trans); + out: if (iwl_trans_dbg_ini_valid(fwrt->trans)) { iwl_fw_error_dump_data_free(dump_data); @@ -2789,9 +2985,8 @@ void iwl_fw_error_dump_wk(struct work_struct *work) /* assumes the op mode mutex is locked in dump_start since * iwl_fw_dbg_collect_sync can't run in parallel */ - if (fwrt->ops && fwrt->ops->dump_start && - fwrt->ops->dump_start(fwrt->ops_ctx)) - return; + if (fwrt->ops && fwrt->ops->dump_start) + fwrt->ops->dump_start(fwrt->ops_ctx); iwl_fw_dbg_collect_sync(fwrt, wks->idx); @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2019, 2021 Intel Corporation + * Copyright (C) 2005-2014, 2018-2019, 2021-2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ @@ -324,4 +324,7 @@ static inline void iwl_fwrt_update_fw_versions(struct iwl_fw_runtime *fwrt, } void iwl_fwrt_dump_error_logs(struct iwl_fw_runtime *fwrt); +void iwl_send_dbg_dump_complete_cmd(struct iwl_fw_runtime *fwrt, + u32 timepoint, + u32 timepoint_data); #endif /* __iwl_fw_dbg_h__ */ diff --git a/fw/debugfs.c b/fw/debugfs.c new file mode 100644 index 000000000000..43e997283db0 --- /dev/null +++ b/fw/debugfs.c @@ -0,0 +1,409 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ +#include "api/commands.h" +#include "debugfs.h" +#include "dbg.h" +#include <linux/seq_file.h> + +#define FWRT_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype) \ +struct dbgfs_##name##_data { \ + argtype *arg; \ + bool read_done; \ + ssize_t rlen; \ + char rbuf[buflen]; \ +}; \ +static int _iwl_dbgfs_##name##_open(struct inode *inode, \ + struct file *file) \ +{ \ + struct dbgfs_##name##_data *data; \ + \ + data = kzalloc(sizeof(*data), GFP_KERNEL); \ + if (!data) \ + return -ENOMEM; \ + \ + data->read_done = false; \ + data->arg = inode->i_private; \ + file->private_data = data; \ + \ + return 0; \ +} + +#define FWRT_DEBUGFS_READ_WRAPPER(name) \ +static ssize_t _iwl_dbgfs_##name##_read(struct file *file, \ + char __user *user_buf, \ + size_t count, loff_t *ppos) \ +{ \ + struct dbgfs_##name##_data *data = file->private_data; \ + \ + if (!data->read_done) { \ + data->read_done = true; \ + data->rlen = iwl_dbgfs_##name##_read(data->arg, \ + sizeof(data->rbuf),\ + data->rbuf); \ + } \ + \ + if (data->rlen < 0) \ + return data->rlen; \ + return simple_read_from_buffer(user_buf, count, ppos, \ + data->rbuf, data->rlen); \ +} + +static int _iwl_dbgfs_release(struct inode *inode, struct file *file) +{ + kfree(file->private_data); + + return 0; +} + +#define _FWRT_DEBUGFS_READ_FILE_OPS(name, buflen, argtype) \ +FWRT_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype) \ +FWRT_DEBUGFS_READ_WRAPPER(name) \ +static const struct file_operations iwl_dbgfs_##name##_ops = { \ + .read = _iwl_dbgfs_##name##_read, \ + .open = _iwl_dbgfs_##name##_open, \ + .llseek = generic_file_llseek, \ + .release = _iwl_dbgfs_release, \ +} + +#define FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \ +static ssize_t _iwl_dbgfs_##name##_write(struct file *file, \ + const char __user *user_buf, \ + size_t count, loff_t *ppos) \ +{ \ + argtype *arg = \ + ((struct dbgfs_##name##_data *)file->private_data)->arg;\ + char buf[buflen] = {}; \ + size_t buf_size = min(count, sizeof(buf) - 1); \ + \ + if (copy_from_user(buf, user_buf, buf_size)) \ + return -EFAULT; \ + \ + return iwl_dbgfs_##name##_write(arg, buf, buf_size); \ +} + +#define _FWRT_DEBUGFS_READ_WRITE_FILE_OPS(name, buflen, argtype) \ +FWRT_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype) \ +FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \ +FWRT_DEBUGFS_READ_WRAPPER(name) \ +static const struct file_operations iwl_dbgfs_##name##_ops = { \ + .write = _iwl_dbgfs_##name##_write, \ + .read = _iwl_dbgfs_##name##_read, \ + .open = _iwl_dbgfs_##name##_open, \ + .llseek = generic_file_llseek, \ + .release = _iwl_dbgfs_release, \ +} + +#define _FWRT_DEBUGFS_WRITE_FILE_OPS(name, buflen, argtype) \ +FWRT_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype) \ +FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype) \ +static const struct file_operations iwl_dbgfs_##name##_ops = { \ + .write = _iwl_dbgfs_##name##_write, \ + .open = _iwl_dbgfs_##name##_open, \ + .llseek = generic_file_llseek, \ + .release = _iwl_dbgfs_release, \ +} + +#define FWRT_DEBUGFS_READ_FILE_OPS(name, bufsz) \ + _FWRT_DEBUGFS_READ_FILE_OPS(name, bufsz, struct iwl_fw_runtime) + +#define FWRT_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \ + _FWRT_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_fw_runtime) + +#define FWRT_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \ + _FWRT_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_fw_runtime) + +#define FWRT_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) do { \ + debugfs_create_file(alias, mode, parent, fwrt, \ + &iwl_dbgfs_##name##_ops); \ + } while (0) +#define FWRT_DEBUGFS_ADD_FILE(name, parent, mode) \ + FWRT_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode) + +static int iwl_fw_send_timestamp_marker_cmd(struct iwl_fw_runtime *fwrt) +{ + struct iwl_mvm_marker marker = { + .dw_len = sizeof(struct iwl_mvm_marker) / 4, + .marker_id = MARKER_ID_SYNC_CLOCK, + + /* the real timestamp is taken from the ftrace clock + * this is for finding the match between fw and kernel logs + */ + .timestamp = cpu_to_le64(fwrt->timestamp.seq++), + }; + + struct iwl_host_cmd hcmd = { + .id = MARKER_CMD, + .flags = CMD_ASYNC, + .data[0] = &marker, + .len[0] = sizeof(marker), + }; + + return iwl_trans_send_cmd(fwrt->trans, &hcmd); +} + +static int iwl_dbgfs_enabled_severities_write(struct iwl_fw_runtime *fwrt, + char *buf, size_t count) +{ + struct iwl_dbg_host_event_cfg_cmd event_cfg; + struct iwl_host_cmd hcmd = { + .id = WIDE_ID(DEBUG_GROUP, HOST_EVENT_CFG), + .flags = CMD_ASYNC, + .data[0] = &event_cfg, + .len[0] = sizeof(event_cfg), + }; + u32 enabled_severities; + int ret = kstrtou32(buf, 10, &enabled_severities); + + if (ret < 0) + return ret; + + event_cfg.enabled_severities = cpu_to_le32(enabled_severities); + + ret = iwl_trans_send_cmd(fwrt->trans, &hcmd); + IWL_INFO(fwrt, + "sent host event cfg with enabled_severities: %u, ret: %d\n", + enabled_severities, ret); + + return ret ?: count; +} + +FWRT_DEBUGFS_WRITE_FILE_OPS(enabled_severities, 16); + +static void iwl_fw_timestamp_marker_wk(struct work_struct *work) +{ + int ret; + struct iwl_fw_runtime *fwrt = + container_of(work, struct iwl_fw_runtime, timestamp.wk.work); + unsigned long delay = fwrt->timestamp.delay; + + ret = iwl_fw_send_timestamp_marker_cmd(fwrt); + if (!ret && delay) + schedule_delayed_work(&fwrt->timestamp.wk, + round_jiffies_relative(delay)); + else + IWL_INFO(fwrt, + "stopping timestamp_marker, ret: %d, delay: %u\n", + ret, jiffies_to_msecs(delay) / 1000); +} + +void iwl_fw_trigger_timestamp(struct iwl_fw_runtime *fwrt, u32 delay) +{ + IWL_INFO(fwrt, + "starting timestamp_marker trigger with delay: %us\n", + delay); + + iwl_fw_cancel_timestamp(fwrt); + + fwrt->timestamp.delay = msecs_to_jiffies(delay * 1000); + + schedule_delayed_work(&fwrt->timestamp.wk, + round_jiffies_relative(fwrt->timestamp.delay)); +} + +static ssize_t iwl_dbgfs_timestamp_marker_write(struct iwl_fw_runtime *fwrt, + char *buf, size_t count) +{ + int ret; + u32 delay; + + ret = kstrtou32(buf, 10, &delay); + if (ret < 0) + return ret; + + iwl_fw_trigger_timestamp(fwrt, delay); + + return count; +} + +static ssize_t iwl_dbgfs_timestamp_marker_read(struct iwl_fw_runtime *fwrt, + size_t size, char *buf) +{ + u32 delay_secs = jiffies_to_msecs(fwrt->timestamp.delay) / 1000; + + return scnprintf(buf, size, "%d\n", delay_secs); +} + +FWRT_DEBUGFS_READ_WRITE_FILE_OPS(timestamp_marker, 16); + +struct hcmd_write_data { + __be32 cmd_id; + __be32 flags; + __be16 length; + u8 data[]; +} __packed; + +static ssize_t iwl_dbgfs_send_hcmd_write(struct iwl_fw_runtime *fwrt, char *buf, + size_t count) +{ + size_t header_size = (sizeof(u32) * 2 + sizeof(u16)) * 2; + size_t data_size = (count - 1) / 2; + int ret; + struct hcmd_write_data *data; + struct iwl_host_cmd hcmd = { + .len = { 0, }, + .data = { NULL, }, + }; + + if (fwrt->ops && fwrt->ops->fw_running && + !fwrt->ops->fw_running(fwrt->ops_ctx)) + return -EIO; + + if (count < header_size + 1 || count > 1024 * 4) + return -EINVAL; + + data = kmalloc(data_size, GFP_KERNEL); + if (!data) + return -ENOMEM; + + ret = hex2bin((u8 *)data, buf, data_size); + if (ret) + goto out; + + hcmd.id = be32_to_cpu(data->cmd_id); + hcmd.flags = be32_to_cpu(data->flags); + hcmd.len[0] = be16_to_cpu(data->length); + hcmd.data[0] = data->data; + + if (count != header_size + hcmd.len[0] * 2 + 1) { + IWL_ERR(fwrt, + "host command data size does not match header length\n"); + ret = -EINVAL; + goto out; + } + + if (fwrt->ops && fwrt->ops->send_hcmd) + ret = fwrt->ops->send_hcmd(fwrt->ops_ctx, &hcmd); + else + ret = -EPERM; + + if (ret < 0) + goto out; + + if (hcmd.flags & CMD_WANT_SKB) + iwl_free_resp(&hcmd); +out: + kfree(data); + return ret ?: count; +} + +FWRT_DEBUGFS_WRITE_FILE_OPS(send_hcmd, 512); + +static ssize_t iwl_dbgfs_fw_dbg_domain_read(struct iwl_fw_runtime *fwrt, + size_t size, char *buf) +{ + return scnprintf(buf, size, "0x%08x\n", + fwrt->trans->dbg.domains_bitmap); +} + +FWRT_DEBUGFS_READ_FILE_OPS(fw_dbg_domain, 20); + +struct iwl_dbgfs_fw_info_priv { + struct iwl_fw_runtime *fwrt; +}; + +struct iwl_dbgfs_fw_info_state { + loff_t pos; +}; + +static void *iwl_dbgfs_fw_info_seq_next(struct seq_file *seq, + void *v, loff_t *pos) +{ + struct iwl_dbgfs_fw_info_state *state = v; + struct iwl_dbgfs_fw_info_priv *priv = seq->private; + const struct iwl_fw *fw = priv->fwrt->fw; + + *pos = ++state->pos; + if (*pos >= fw->ucode_capa.n_cmd_versions) + return NULL; + + return state; +} + +static void iwl_dbgfs_fw_info_seq_stop(struct seq_file *seq, + void *v) +{ + kfree(v); +} + +static void *iwl_dbgfs_fw_info_seq_start(struct seq_file *seq, loff_t *pos) +{ + struct iwl_dbgfs_fw_info_priv *priv = seq->private; + const struct iwl_fw *fw = priv->fwrt->fw; + struct iwl_dbgfs_fw_info_state *state; + + if (*pos >= fw->ucode_capa.n_cmd_versions) + return NULL; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return NULL; + state->pos = *pos; + return state; +}; + +static int iwl_dbgfs_fw_info_seq_show(struct seq_file *seq, void *v) +{ + struct iwl_dbgfs_fw_info_state *state = v; + struct iwl_dbgfs_fw_info_priv *priv = seq->private; + const struct iwl_fw *fw = priv->fwrt->fw; + const struct iwl_fw_cmd_version *ver; + u32 cmd_id; + + if (!state->pos) + seq_puts(seq, "fw_api_ver:\n"); + + ver = &fw->ucode_capa.cmd_versions[state->pos]; + + cmd_id = WIDE_ID(ver->group, ver->cmd); + + seq_printf(seq, " 0x%04x:\n", cmd_id); + seq_printf(seq, " name: %s\n", + iwl_get_cmd_string(priv->fwrt->trans, cmd_id)); + seq_printf(seq, " cmd_ver: %d\n", ver->cmd_ver); + seq_printf(seq, " notif_ver: %d\n", ver->notif_ver); + return 0; +} + +static const struct seq_operations iwl_dbgfs_info_seq_ops = { + .start = iwl_dbgfs_fw_info_seq_start, + .next = iwl_dbgfs_fw_info_seq_next, + .stop = iwl_dbgfs_fw_info_seq_stop, + .show = iwl_dbgfs_fw_info_seq_show, +}; + +static int iwl_dbgfs_fw_info_open(struct inode *inode, struct file *filp) +{ + struct iwl_dbgfs_fw_info_priv *priv; + + priv = __seq_open_private(filp, &iwl_dbgfs_info_seq_ops, + sizeof(*priv)); + + if (!priv) + return -ENOMEM; + + priv->fwrt = inode->i_private; + return 0; +} + +static const struct file_operations iwl_dbgfs_fw_info_ops = { + .owner = THIS_MODULE, + .open = iwl_dbgfs_fw_info_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_private, +}; + +void iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt, + struct dentry *dbgfs_dir) +{ + INIT_DELAYED_WORK(&fwrt->timestamp.wk, iwl_fw_timestamp_marker_wk); + FWRT_DEBUGFS_ADD_FILE(timestamp_marker, dbgfs_dir, 0200); + FWRT_DEBUGFS_ADD_FILE(fw_info, dbgfs_dir, 0200); + FWRT_DEBUGFS_ADD_FILE(send_hcmd, dbgfs_dir, 0200); + FWRT_DEBUGFS_ADD_FILE(enabled_severities, dbgfs_dir, 0200); + FWRT_DEBUGFS_ADD_FILE(fw_dbg_domain, dbgfs_dir, 0400); +} diff --git a/fw/dump.c b/fw/dump.c index 016b3a4c5f51..b90f1e9ce691 100644 --- a/fw/dump.c +++ b/fw/dump.c @@ -12,6 +12,7 @@ #include "iwl-io.h" #include "iwl-prph.h" #include "iwl-csr.h" +#include "pnvm.h" /* * Note: This structure is read from the device with IO accesses, @@ -19,53 +20,6 @@ * read with u32-sized accesses, any members with a different size * need to be ordered correctly though! */ -struct iwl_error_event_table_v1 { - u32 valid; /* (nonzero) valid, (0) log is empty */ - u32 error_id; /* type of error */ - u32 pc; /* program counter */ - u32 blink1; /* branch link */ - u32 blink2; /* branch link */ - u32 ilink1; /* interrupt link */ - u32 ilink2; /* interrupt link */ - u32 data1; /* error-specific data */ - u32 data2; /* error-specific data */ - u32 data3; /* error-specific data */ - u32 bcon_time; /* beacon timer */ - u32 tsf_low; /* network timestamp function timer */ - u32 tsf_hi; /* network timestamp function timer */ - u32 gp1; /* GP1 timer register */ - u32 gp2; /* GP2 timer register */ - u32 gp3; /* GP3 timer register */ - u32 ucode_ver; /* uCode version */ - u32 hw_ver; /* HW Silicon version */ - u32 brd_ver; /* HW board version */ - u32 log_pc; /* log program counter */ - u32 frame_ptr; /* frame pointer */ - u32 stack_ptr; /* stack pointer */ - u32 hcmd; /* last host command header */ - u32 isr0; /* isr status register LMPM_NIC_ISR0: - * rxtx_flag */ - u32 isr1; /* isr status register LMPM_NIC_ISR1: - * host_flag */ - u32 isr2; /* isr status register LMPM_NIC_ISR2: - * enc_flag */ - u32 isr3; /* isr status register LMPM_NIC_ISR3: - * time_flag */ - u32 isr4; /* isr status register LMPM_NIC_ISR4: - * wico interrupt */ - u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */ - u32 wait_event; /* wait event() caller address */ - u32 l2p_control; /* L2pControlField */ - u32 l2p_duration; /* L2pDurationField */ - u32 l2p_mhvalid; /* L2pMhValidBits */ - u32 l2p_addr_match; /* L2pAddrMatchStat */ - u32 lmpm_pmg_sel; /* indicate which clocks are turned on - * (LMPM_PMG_SEL) */ - u32 u_timestamp; /* indicate when the date and time of the - * compilation */ - u32 flow_handler; /* FH read/write pointers, RX credit */ -} __packed /* LOG_ERROR_TABLE_API_S_VER_1 */; - struct iwl_error_event_table { u32 valid; /* (nonzero) valid, (0) log is empty */ u32 error_id; /* type of error */ @@ -147,6 +101,7 @@ static void iwl_fwrt_dump_umac_error_log(struct iwl_fw_runtime *fwrt) struct iwl_trans *trans = fwrt->trans; struct iwl_umac_error_event_table table = {}; u32 base = fwrt->trans->dbg.umac_error_event_table; + char pnvm_name[MAX_PNVM_NAME]; if (!base && !(fwrt->trans->dbg.error_event_table_tlv_status & @@ -164,6 +119,13 @@ static void iwl_fwrt_dump_umac_error_log(struct iwl_fw_runtime *fwrt) fwrt->trans->status, table.valid); } + if ((table.error_id & ~FW_SYSASSERT_CPU_MASK) == + FW_SYSASSERT_PNVM_MISSING) { + iwl_pnvm_get_fs_name(trans, pnvm_name, sizeof(pnvm_name)); + IWL_ERR(fwrt, "PNVM data is missing, please install %s\n", + pnvm_name); + } + IWL_ERR(fwrt, "0x%08X | %s\n", table.error_id, iwl_fw_lookup_assert_desc(table.error_id)); IWL_ERR(fwrt, "0x%08X | umac branchlink1\n", table.blink1); @@ -212,7 +174,9 @@ static void iwl_fwrt_dump_lmac_error_log(struct iwl_fw_runtime *fwrt, u8 lmac_nu IWL_ERR(trans, "HW error, resetting before reading\n"); /* reset the device */ - iwl_trans_sw_reset(trans); + err = iwl_trans_sw_reset(trans, true); + if (err) + return; err = iwl_finish_nic_init(trans); if (err) @@ -295,21 +259,21 @@ struct iwl_tcm_error_event_table { u32 reserved[4]; } __packed; /* TCM_LOG_ERROR_TABLE_API_S_VER_1 */ -static void iwl_fwrt_dump_tcm_error_log(struct iwl_fw_runtime *fwrt) +static void iwl_fwrt_dump_tcm_error_log(struct iwl_fw_runtime *fwrt, int idx) { struct iwl_trans *trans = fwrt->trans; struct iwl_tcm_error_event_table table = {}; - u32 base = fwrt->trans->dbg.tcm_error_event_table; + u32 base = fwrt->trans->dbg.tcm_error_event_table[idx]; int i; + u32 flag = idx ? IWL_ERROR_EVENT_TABLE_TCM2 : + IWL_ERROR_EVENT_TABLE_TCM1; - if (!base || - !(fwrt->trans->dbg.error_event_table_tlv_status & - IWL_ERROR_EVENT_TABLE_TCM)) + if (!base || !(fwrt->trans->dbg.error_event_table_tlv_status & flag)) return; iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); - IWL_ERR(fwrt, "TCM status:\n"); + IWL_ERR(fwrt, "TCM%d status:\n", idx + 1); IWL_ERR(fwrt, "0x%08X | error ID\n", table.error_id); IWL_ERR(fwrt, "0x%08X | tcm branchlink2\n", table.blink2); IWL_ERR(fwrt, "0x%08X | tcm interruptlink1\n", table.ilink1); @@ -328,13 +292,72 @@ static void iwl_fwrt_dump_tcm_error_log(struct iwl_fw_runtime *fwrt) for (i = 0; i < ARRAY_SIZE(table.sw_status); i++) IWL_ERR(fwrt, "0x%08X | tcm SW status[%d]\n", table.sw_status[i], i); +} - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { - u32 scratch = iwl_read32(trans, CSR_FUNC_SCRATCH); +/* + * RCM error struct. + * Note: This structure is read from the device with IO accesses, + * and the reading already does the endian conversion. As it is + * read with u32-sized accesses, any members with a different size + * need to be ordered correctly though! + */ +struct iwl_rcm_error_event_table { + u32 valid; + u32 error_id; + u32 blink2; + u32 ilink1; + u32 ilink2; + u32 data1, data2, data3; + u32 logpc; + u32 frame_pointer; + u32 stack_pointer; + u32 msgid; + u32 isr; + u32 frame_hw_status; + u32 mbx_lmac_to_rcm_req; + u32 mbx_rcm_to_lmac_req; + u32 mh_ctl; + u32 mh_addr1_lo; + u32 mh_info; + u32 mh_err; + u32 reserved[3]; +} __packed; /* RCM_LOG_ERROR_TABLE_API_S_VER_1 */ + +static void iwl_fwrt_dump_rcm_error_log(struct iwl_fw_runtime *fwrt, int idx) +{ + struct iwl_trans *trans = fwrt->trans; + struct iwl_rcm_error_event_table table = {}; + u32 base = fwrt->trans->dbg.rcm_error_event_table[idx]; + u32 flag = idx ? IWL_ERROR_EVENT_TABLE_RCM2 : + IWL_ERROR_EVENT_TABLE_RCM1; - IWL_ERR(fwrt, "Function Scratch status:\n"); - IWL_ERR(fwrt, "0x%08X | Func Scratch\n", scratch); - } + if (!base || !(fwrt->trans->dbg.error_event_table_tlv_status & flag)) + return; + + iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); + + IWL_ERR(fwrt, "RCM%d status:\n", idx + 1); + IWL_ERR(fwrt, "0x%08X | error ID\n", table.error_id); + IWL_ERR(fwrt, "0x%08X | rcm branchlink2\n", table.blink2); + IWL_ERR(fwrt, "0x%08X | rcm interruptlink1\n", table.ilink1); + IWL_ERR(fwrt, "0x%08X | rcm interruptlink2\n", table.ilink2); + IWL_ERR(fwrt, "0x%08X | rcm data1\n", table.data1); + IWL_ERR(fwrt, "0x%08X | rcm data2\n", table.data2); + IWL_ERR(fwrt, "0x%08X | rcm data3\n", table.data3); + IWL_ERR(fwrt, "0x%08X | rcm log PC\n", table.logpc); + IWL_ERR(fwrt, "0x%08X | rcm frame pointer\n", table.frame_pointer); + IWL_ERR(fwrt, "0x%08X | rcm stack pointer\n", table.stack_pointer); + IWL_ERR(fwrt, "0x%08X | rcm msg ID\n", table.msgid); + IWL_ERR(fwrt, "0x%08X | rcm ISR status\n", table.isr); + IWL_ERR(fwrt, "0x%08X | frame HW status\n", table.frame_hw_status); + IWL_ERR(fwrt, "0x%08X | LMAC-to-RCM request mbox\n", + table.mbx_lmac_to_rcm_req); + IWL_ERR(fwrt, "0x%08X | RCM-to-LMAC request mbox\n", + table.mbx_rcm_to_lmac_req); + IWL_ERR(fwrt, "0x%08X | MAC header control\n", table.mh_ctl); + IWL_ERR(fwrt, "0x%08X | MAC header addr1 low\n", table.mh_addr1_lo); + IWL_ERR(fwrt, "0x%08X | MAC header info\n", table.mh_info); + IWL_ERR(fwrt, "0x%08X | MAC header error\n", table.mh_err); } static void iwl_fwrt_dump_iml_error_log(struct iwl_fw_runtime *fwrt) @@ -418,8 +441,18 @@ void iwl_fwrt_dump_error_logs(struct iwl_fw_runtime *fwrt) if (fwrt->trans->dbg.lmac_error_event_table[1]) iwl_fwrt_dump_lmac_error_log(fwrt, 1); iwl_fwrt_dump_umac_error_log(fwrt); - iwl_fwrt_dump_tcm_error_log(fwrt); + iwl_fwrt_dump_tcm_error_log(fwrt, 0); + iwl_fwrt_dump_rcm_error_log(fwrt, 0); + iwl_fwrt_dump_tcm_error_log(fwrt, 1); + iwl_fwrt_dump_rcm_error_log(fwrt, 1); iwl_fwrt_dump_iml_error_log(fwrt); iwl_fwrt_dump_fseq_regs(fwrt); + + if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { + u32 scratch = iwl_read32(fwrt->trans, CSR_FUNC_SCRATCH); + + IWL_ERR(fwrt, "Function Scratch status:\n"); + IWL_ERR(fwrt, "0x%08X | Func Scratch\n", scratch); + } } IWL_EXPORT_SYMBOL(iwl_fwrt_dump_error_logs); diff --git a/fw/error-dump.h b/fw/error-dump.h index 9036b32ec765..079fa0023bd8 100644 --- a/fw/error-dump.h +++ b/fw/error-dump.h @@ -232,6 +232,24 @@ struct iwl_fw_error_dump_mem { #define IWL_INI_DUMP_INFO_TYPE BIT(31) /** + * struct iwl_fw_error_dump_data - data for one type + * @type: &enum iwl_fw_ini_region_type + * @sub_type: sub type id + * @sub_type_ver: sub type version + * @reserved: not in use + * @len: the length starting from %data + * @data: the data itself + */ +struct iwl_fw_ini_error_dump_data { + u8 type; + u8 sub_type; + u8 sub_type_ver; + u8 reserved; + __le32 len; + __u8 data[]; +} __packed; + +/** * struct iwl_fw_ini_dump_entry * @list: list of dump entries * @size: size of the data diff --git a/fw/file.h b/fw/file.h index 3d572f5024bb..fa2a73ae4183 100644 --- a/fw/file.h +++ b/fw/file.h @@ -98,7 +98,6 @@ enum iwl_ucode_tlv_type { IWL_UCODE_TLV_PNVM_VERSION = 62, IWL_UCODE_TLV_PNVM_SKU = 64, - IWL_UCODE_TLV_TCM_DEBUG_ADDRS = 65, IWL_UCODE_TLV_SEC_TABLE_ADDR = 66, IWL_UCODE_TLV_D3_KEK_KCK_ADDR = 67, @@ -120,7 +119,7 @@ enum iwl_ucode_tlv_type { struct iwl_ucode_tlv { __le32 type; /* see above */ __le32 length; /* not including type/length fields */ - u8 data[0]; + u8 data[]; }; #define IWL_TLV_UCODE_MAGIC 0x0a4c5749 @@ -313,7 +312,6 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t; * @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching * @IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG: Consolidated D3-D0 image * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command - * @IWL_UCODE_TLV_CAPA_DC2DC_SUPPORT: supports DC2DC Command * @IWL_UCODE_TLV_CAPA_CSUM_SUPPORT: supports TCP Checksum Offload * @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics * @IWL_UCODE_TLV_CAPA_P2P_SCM_UAPSD: supports U-APSD on p2p interface when it @@ -371,6 +369,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t; * reset flow * @IWL_UCODE_TLV_CAPA_PASSIVE_6GHZ_SCAN: Support for passive scan on 6GHz PSC * channels even when these are not enabled. + * @IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT: Support for indicating dump collection + * complete to FW. * * @NUM_IWL_UCODE_TLV_CAPA: number of bits used */ @@ -389,7 +389,6 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = (__force iwl_ucode_tlv_capa_t)13, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG = (__force iwl_ucode_tlv_capa_t)17, IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = (__force iwl_ucode_tlv_capa_t)18, - IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT = (__force iwl_ucode_tlv_capa_t)19, IWL_UCODE_TLV_CAPA_CSUM_SUPPORT = (__force iwl_ucode_tlv_capa_t)21, IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = (__force iwl_ucode_tlv_capa_t)22, IWL_UCODE_TLV_CAPA_P2P_SCM_UAPSD = (__force iwl_ucode_tlv_capa_t)26, @@ -422,6 +421,7 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_BROADCAST_TWT = (__force iwl_ucode_tlv_capa_t)60, IWL_UCODE_TLV_CAPA_COEX_HIGH_PRIO = (__force iwl_ucode_tlv_capa_t)61, IWL_UCODE_TLV_CAPA_RFIM_SUPPORT = (__force iwl_ucode_tlv_capa_t)62, + IWL_UCODE_TLV_CAPA_BAID_ML_SUPPORT = (__force iwl_ucode_tlv_capa_t)63, /* set 2 */ IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64, @@ -456,6 +456,7 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT = (__force iwl_ucode_tlv_capa_t)100, IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT = (__force iwl_ucode_tlv_capa_t)104, + IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT = (__force iwl_ucode_tlv_capa_t)105, #ifdef __CHECKER__ /* sparse says it cannot increment the previous enum member */ @@ -1,14 +1,17 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright(c) 2019 - 2020 Intel Corporation + * Copyright(c) 2019 - 2021 Intel Corporation */ - +#include <fw/api/commands.h> #include "img.h" -u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def) +u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u32 cmd_id, u8 def) { const struct iwl_fw_cmd_version *entry; unsigned int i; + /* prior to LONG_GROUP, we never used this CMD version API */ + u8 grp = iwl_cmd_groupid(cmd_id) ?: LONG_GROUP; + u8 cmd = iwl_cmd_opcode(cmd_id); if (!fw->ucode_capa.cmd_versions || !fw->ucode_capa.n_cmd_versions) @@ -49,10 +52,9 @@ u8 iwl_fw_lookup_notif_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def) } EXPORT_SYMBOL_GPL(iwl_fw_lookup_notif_ver); -#define FW_SYSASSERT_CPU_MASK 0xf0000000 static const struct { const char *name; - u8 num; + u32 num; } advanced_lookup[] = { { "NMI_INTERRUPT_WDG", 0x34 }, { "SYSASSERT", 0x35 }, @@ -73,6 +75,7 @@ static const struct { { "NMI_INTERRUPT_ACTION_PT", 0x7C }, { "NMI_INTERRUPT_UNKNOWN", 0x84 }, { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 }, + { "PNVM_MISSING", FW_SYSASSERT_PNVM_MISSING }, { "ADVANCED_SYSASSERT", 0 }, }; @@ -275,8 +275,12 @@ iwl_get_ucode_image(const struct iwl_fw *fw, enum iwl_ucode_type ucode_type) return &fw->img[ucode_type]; } -u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def); +u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u32 cmd_id, u8 def); u8 iwl_fw_lookup_notif_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def); const char *iwl_fw_lookup_assert_desc(u32 num); + +#define FW_SYSASSERT_CPU_MASK 0xf0000000 +#define FW_SYSASSERT_PNVM_MISSING 0x0010070d + #endif /* __iwl_fw_img_h__ */ diff --git a/fw/init.c b/fw/init.c index 566957ac4539..135bd48bfe9f 100644 --- a/fw/init.c +++ b/fw/init.c @@ -8,7 +8,7 @@ #include "dbg.h" #include "debugfs.h" -#include "fw/api/soc.h" +#include "fw/api/system.h" #include "fw/api/commands.h" #include "fw/api/rx.h" #include "fw/api/datapath.h" @@ -58,7 +58,7 @@ int iwl_set_soc_latency(struct iwl_fw_runtime *fwrt) { struct iwl_soc_configuration_cmd cmd = {}; struct iwl_host_cmd hcmd = { - .id = iwl_cmd_id(SOC_CONFIGURATION_CMD, SYSTEM_GROUP, 0), + .id = WIDE_ID(SYSTEM_GROUP, SOC_CONFIGURATION_CMD), .data[0] = &cmd, .len[0] = sizeof(cmd), }; @@ -87,8 +87,7 @@ int iwl_set_soc_latency(struct iwl_fw_runtime *fwrt) cmd.flags |= le32_encode_bits(fwrt->trans->trans_cfg->ltr_delay, SOC_FLAGS_LTR_APPLY_DELAY_MASK); - if (iwl_fw_lookup_cmd_ver(fwrt->fw, IWL_ALWAYS_LONG_GROUP, - SCAN_REQ_UMAC, + if (iwl_fw_lookup_cmd_ver(fwrt->fw, SCAN_REQ_UMAC, IWL_FW_CMD_VER_UNKNOWN) >= 2 && fwrt->trans->trans_cfg->low_latency_xtal) cmd.flags |= cpu_to_le32(SOC_CONFIG_CMD_FLAGS_LOW_LATENCY); diff --git a/fw/paging.c b/fw/paging.c index 58ca3849d1f3..945bc4160cc9 100644 --- a/fw/paging.c +++ b/fw/paging.c @@ -197,7 +197,7 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt, } memcpy(page_address(block->fw_paging_block), - image->sec[sec_idx].data + offset, len); + (const u8 *)image->sec[sec_idx].data + offset, len); block->fw_offs = image->sec[sec_idx].offset + offset; dma_sync_single_for_device(fwrt->trans->dev, block->fw_paging_phys, @@ -243,7 +243,7 @@ static int iwl_send_paging_cmd(struct iwl_fw_runtime *fwrt, .block_num = cpu_to_le32(fwrt->num_of_paging_blk), }; struct iwl_host_cmd hcmd = { - .id = iwl_cmd_id(FW_PAGING_BLOCK_CMD, IWL_ALWAYS_LONG_GROUP, 0), + .id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, FW_PAGING_BLOCK_CMD), .len = { sizeof(paging_cmd), }, .data = { &paging_cmd, }, }; diff --git a/fw/runtime.h b/fw/runtime.h index 69799f1ed2c4..afc822cab674 100644 --- a/fw/runtime.h +++ b/fw/runtime.h @@ -16,7 +16,7 @@ #include "fw/acpi.h" struct iwl_fw_runtime_ops { - int (*dump_start)(void *ctx); + void (*dump_start)(void *ctx); void (*dump_end)(void *ctx); bool (*fw_running)(void *ctx); int (*send_hcmd)(void *ctx, struct iwl_host_cmd *host_cmd); @@ -156,8 +156,13 @@ struct iwl_fw_runtime { u8 sar_chain_b_profile; struct iwl_geo_profile geo_profiles[ACPI_NUM_GEO_PROFILES_REV3]; u32 geo_rev; - union iwl_ppag_table_cmd ppag_table; + u32 geo_num_profiles; + bool geo_enabled; + struct iwl_ppag_chain ppag_chains[IWL_NUM_CHAIN_LIMITS]; + u32 ppag_flags; u32 ppag_ver; + struct iwl_sar_offset_mapping_cmd sgom_table; + bool sgom_enabled; #endif }; diff --git a/fw/smem.c b/fw/smem.c index f2f1789f470d..3f1272014daf 100644 --- a/fw/smem.c +++ b/fw/smem.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2012-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -89,7 +89,7 @@ void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt) if (fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) - cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0); + cmd.id = WIDE_ID(SYSTEM_GROUP, SHARED_MEM_CFG_CMD); else cmd.id = SHARED_MEM_CFG; diff --git a/fw/uefi.c b/fw/uefi.c index c875bf35533c..23b1d689ba7b 100644 --- a/fw/uefi.c +++ b/fw/uefi.c @@ -11,6 +11,7 @@ #include "fw/uefi.h" #include "fw/api/alive.h" #include <linux/efi.h> +#include "fw/runtime.h" #define IWL_EFI_VAR_GUID EFI_GUID(0x92daaf2f, 0xc02b, 0x455b, \ 0xb2, 0xec, 0xf5, 0xa3, \ @@ -68,7 +69,7 @@ out: static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans, const u8 *data, size_t len) { - struct iwl_ucode_tlv *tlv; + const struct iwl_ucode_tlv *tlv; u8 *reduce_power_data = NULL, *tmp; u32 size = 0; @@ -78,7 +79,7 @@ static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans, u32 tlv_len, tlv_type; len -= sizeof(*tlv); - tlv = (void *)data; + tlv = (const void *)data; tlv_len = le32_to_cpu(tlv->length); tlv_type = le32_to_cpu(tlv->type); @@ -86,6 +87,7 @@ static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans, if (len < tlv_len) { IWL_ERR(trans, "invalid TLV len: %zd/%u\n", len, tlv_len); + kfree(reduce_power_data); reduce_power_data = ERR_PTR(-EINVAL); goto out; } @@ -105,6 +107,7 @@ static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans, IWL_DEBUG_FW(trans, "Couldn't allocate (more) reduce_power_data\n"); + kfree(reduce_power_data); reduce_power_data = ERR_PTR(-ENOMEM); goto out; } @@ -134,6 +137,10 @@ static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans, done: if (!size) { IWL_DEBUG_FW(trans, "Empty REDUCE_POWER, skipping.\n"); + /* Better safe than sorry, but 'reduce_power_data' should + * always be NULL if !size. + */ + kfree(reduce_power_data); reduce_power_data = ERR_PTR(-ENOENT); goto out; } @@ -147,7 +154,7 @@ out: static void *iwl_uefi_reduce_power_parse(struct iwl_trans *trans, const u8 *data, size_t len) { - struct iwl_ucode_tlv *tlv; + const struct iwl_ucode_tlv *tlv; void *sec_data; IWL_DEBUG_FW(trans, "Parsing REDUCE_POWER data\n"); @@ -156,7 +163,7 @@ static void *iwl_uefi_reduce_power_parse(struct iwl_trans *trans, u32 tlv_len, tlv_type; len -= sizeof(*tlv); - tlv = (void *)data; + tlv = (const void *)data; tlv_len = le32_to_cpu(tlv->length); tlv_type = le32_to_cpu(tlv->type); @@ -168,8 +175,8 @@ static void *iwl_uefi_reduce_power_parse(struct iwl_trans *trans, } if (tlv_type == IWL_UCODE_TLV_PNVM_SKU) { - struct iwl_sku_id *sku_id = - (void *)(data + sizeof(*tlv)); + const struct iwl_sku_id *sku_id = + (const void *)(data + sizeof(*tlv)); IWL_DEBUG_FW(trans, "Got IWL_UCODE_TLV_PNVM_SKU len %d\n", @@ -260,3 +267,90 @@ out: return data; } + +#ifdef CONFIG_ACPI +static int iwl_uefi_sgom_parse(struct uefi_cnv_wlan_sgom_data *sgom_data, + struct iwl_fw_runtime *fwrt) +{ + int i, j; + + if (sgom_data->revision != 1) + return -EINVAL; + + memcpy(fwrt->sgom_table.offset_map, sgom_data->offset_map, + sizeof(fwrt->sgom_table.offset_map)); + + for (i = 0; i < MCC_TO_SAR_OFFSET_TABLE_ROW_SIZE; i++) { + for (j = 0; j < MCC_TO_SAR_OFFSET_TABLE_COL_SIZE; j++) { + /* since each byte is composed of to values, */ + /* one for each letter, */ + /* extract and check each of them separately */ + u8 value = fwrt->sgom_table.offset_map[i][j]; + u8 low = value & 0xF; + u8 high = (value & 0xF0) >> 4; + + if (high > fwrt->geo_num_profiles) + high = 0; + if (low > fwrt->geo_num_profiles) + low = 0; + fwrt->sgom_table.offset_map[i][j] = (high << 4) | low; + } + } + + fwrt->sgom_enabled = true; + return 0; +} + +void iwl_uefi_get_sgom_table(struct iwl_trans *trans, + struct iwl_fw_runtime *fwrt) +{ + struct efivar_entry *sgom_efivar; + struct uefi_cnv_wlan_sgom_data *data; + unsigned long package_size; + int err, ret; + + if (!fwrt->geo_enabled) + return; + + sgom_efivar = kzalloc(sizeof(*sgom_efivar), GFP_KERNEL); + if (!sgom_efivar) + return; + + memcpy(&sgom_efivar->var.VariableName, IWL_UEFI_SGOM_NAME, + sizeof(IWL_UEFI_SGOM_NAME)); + sgom_efivar->var.VendorGuid = IWL_EFI_VAR_GUID; + + /* TODO: we hardcode a maximum length here, because reading + * from the UEFI is not working. To implement this properly, + * we have to call efivar_entry_size(). + */ + package_size = IWL_HARDCODED_SGOM_SIZE; + + data = kmalloc(package_size, GFP_KERNEL); + if (!data) { + data = ERR_PTR(-ENOMEM); + goto out; + } + + err = efivar_entry_get(sgom_efivar, NULL, &package_size, data); + if (err) { + IWL_DEBUG_FW(trans, + "SGOM UEFI variable not found %d\n", err); + goto out_free; + } + + IWL_DEBUG_FW(trans, "Read SGOM from UEFI with size %lu\n", + package_size); + + ret = iwl_uefi_sgom_parse(data, fwrt); + if (ret < 0) + IWL_DEBUG_FW(trans, "Cannot read SGOM tables. rev is invalid\n"); + +out_free: + kfree(data); + +out: + kfree(sgom_efivar); +} +IWL_EXPORT_SYMBOL(iwl_uefi_get_sgom_table); +#endif /* CONFIG_ACPI */ diff --git a/fw/uefi.h b/fw/uefi.h index d552c656ac9f..09d2a971b3a0 100644 --- a/fw/uefi.h +++ b/fw/uefi.h @@ -7,6 +7,7 @@ #define IWL_UEFI_OEM_PNVM_NAME L"UefiCnvWlanOemSignedPnvm" #define IWL_UEFI_REDUCED_POWER_NAME L"UefiCnvWlanReducedPower" +#define IWL_UEFI_SGOM_NAME L"UefiCnvWlanSarGeoOffsetMapping" /* * TODO: we have these hardcoded values that the caller must pass, @@ -16,6 +17,7 @@ */ #define IWL_HARDCODED_PNVM_SIZE 4096 #define IWL_HARDCODED_REDUCE_POWER_SIZE 32768 +#define IWL_HARDCODED_SGOM_SIZE 339 struct pnvm_sku_package { u8 rev; @@ -25,6 +27,16 @@ struct pnvm_sku_package { u8 data[]; } __packed; +struct uefi_cnv_wlan_sgom_data { + u8 revision; + u8 offset_map[IWL_HARDCODED_SGOM_SIZE - 1]; +} __packed; + +/* + * This is known to be broken on v4.19 and to work on v5.4. Until we + * figure out why this is the case and how to make it work, simply + * disable the feature in old kernels. + */ #ifdef CONFIG_EFI void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len); void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len); @@ -42,4 +54,12 @@ void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len) } #endif /* CONFIG_EFI */ +#if defined(CONFIG_EFI) && defined(CONFIG_ACPI) +void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt); +#else +static inline +void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt) +{ +} +#endif #endif /* __iwl_fw_uefi__ */ diff --git a/iwl-config.h b/iwl-config.h index 665167a223f6..b7e430ad5e2a 100644 --- a/iwl-config.h +++ b/iwl-config.h @@ -84,6 +84,10 @@ enum iwl_nvm_type { #define IWL_DEFAULT_MAX_TX_POWER 22 #define IWL_TX_CSUM_NETIF_FLAGS (NETIF_F_IPV6_CSUM | NETIF_F_IP_CSUM |\ NETIF_F_TSO | NETIF_F_TSO6) +#define IWL_TX_CSUM_NETIF_FLAGS_BZ (NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6) +#define IWL_CSUM_NETIF_FLAGS_MASK (IWL_TX_CSUM_NETIF_FLAGS | \ + IWL_TX_CSUM_NETIF_FLAGS_BZ | \ + NETIF_F_RXCSUM) /* Antenna presence definitions */ #define ANT_NONE 0x0 @@ -339,8 +343,8 @@ struct iwl_fw_mon_regs { * @bisr_workaround: BISR hardware workaround (for 22260 series devices) * @min_txq_size: minimum number of slots required in a TX queue * @uhb_supported: ultra high band channels supported - * @min_256_ba_txq_size: minimum number of slots required in a TX queue which - * supports 256 BA aggregation + * @min_ba_txq_size: minimum number of slots required in a TX queue which + * based on hardware support (HE - 256, EHT - 1K). * @num_rbds: number of receive buffer descriptors to use * (only used for multi-queue capable devices) * @mac_addr_csr_base: CSR base register for MAC address access, if not set @@ -401,9 +405,10 @@ struct iwl_cfg { u32 d3_debug_data_length; u32 min_txq_size; u32 gp2_reg_addr; - u32 min_256_ba_txq_size; + u32 min_ba_txq_size; const struct iwl_fw_mon_regs mon_dram_regs; const struct iwl_fw_mon_regs mon_smem_regs; + const struct iwl_fw_mon_regs mon_dbgi_regs; }; #define IWL_CFG_ANY (~0) @@ -429,6 +434,7 @@ struct iwl_cfg { #define IWL_CFG_RF_TYPE_HR1 0x10C #define IWL_CFG_RF_TYPE_GF 0x10D #define IWL_CFG_RF_TYPE_MR 0x110 +#define IWL_CFG_RF_TYPE_MS 0x111 #define IWL_CFG_RF_TYPE_FM 0x112 #define IWL_CFG_RF_ID_TH 0x1 @@ -448,6 +454,9 @@ struct iwl_cfg { #define IWL_CFG_NO_CDB 0x0 #define IWL_CFG_CDB 0x1 +#define IWL_CFG_NO_JACKET 0x0 +#define IWL_CFG_IS_JACKET 0x1 + #define IWL_SUBDEVICE_RF_ID(subdevice) ((u16)((subdevice) & 0x00F0) >> 4) #define IWL_SUBDEVICE_NO_160(subdevice) ((u16)((subdevice) & 0x0200) >> 9) #define IWL_SUBDEVICE_CORES(subdevice) ((u16)((subdevice) & 0x1C00) >> 10) @@ -462,6 +471,7 @@ struct iwl_dev_info { u8 no_160; u8 cores; u8 cdb; + u8 jacket; const struct iwl_cfg *cfg; const char *name; }; @@ -501,6 +511,7 @@ extern const char iwl9560_killer_1550i_name[]; extern const char iwl9560_killer_1550s_name[]; extern const char iwl_ax200_name[]; extern const char iwl_ax203_name[]; +extern const char iwl_ax204_name[]; extern const char iwl_ax201_name[]; extern const char iwl_ax101_name[]; extern const char iwl_ax200_killer_1650w_name[]; @@ -610,7 +621,6 @@ extern const struct iwl_cfg killer1650x_2ax_cfg; extern const struct iwl_cfg killer1650w_2ax_cfg; extern const struct iwl_cfg iwl_qnj_b0_hr_b0_cfg; extern const struct iwl_cfg iwlax210_2ax_cfg_so_jf_b0; -extern const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0; extern const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0; extern const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0_long; extern const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0; @@ -624,9 +634,12 @@ extern const struct iwl_cfg iwl_cfg_ma_a0_hr_b0; extern const struct iwl_cfg iwl_cfg_ma_a0_gf_a0; extern const struct iwl_cfg iwl_cfg_ma_a0_gf4_a0; extern const struct iwl_cfg iwl_cfg_ma_a0_mr_a0; +extern const struct iwl_cfg iwl_cfg_ma_a0_ms_a0; extern const struct iwl_cfg iwl_cfg_ma_a0_fm_a0; extern const struct iwl_cfg iwl_cfg_snj_a0_mr_a0; +extern const struct iwl_cfg iwl_cfg_snj_a0_ms_a0; extern const struct iwl_cfg iwl_cfg_so_a0_hr_a0; +extern const struct iwl_cfg iwl_cfg_so_a0_ms_a0; extern const struct iwl_cfg iwl_cfg_quz_a0_hr_b0; extern const struct iwl_cfg iwl_cfg_bz_a0_hr_b0; extern const struct iwl_cfg iwl_cfg_bz_a0_gf_a0; @@ -634,6 +647,12 @@ extern const struct iwl_cfg iwl_cfg_bz_a0_gf4_a0; extern const struct iwl_cfg iwl_cfg_bz_a0_mr_a0; extern const struct iwl_cfg iwl_cfg_bz_a0_fm_a0; extern const struct iwl_cfg iwl_cfg_gl_a0_fm_a0; +extern const struct iwl_cfg iwl_cfg_bz_z0_gf_a0; +extern const struct iwl_cfg iwl_cfg_bnj_a0_fm_a0; +extern const struct iwl_cfg iwl_cfg_bnj_a0_fm4_a0; +extern const struct iwl_cfg iwl_cfg_bnj_a0_gf_a0; +extern const struct iwl_cfg iwl_cfg_bnj_a0_gf4_a0; +extern const struct iwl_cfg iwl_cfg_bnj_a0_hr_b0; #endif /* CONFIG_IWLMVM */ #endif /* __IWL_CONFIG_H__ */ diff --git a/iwl-csr.h b/iwl-csr.h index ff79a2ecb242..c0a18e820b51 100644 --- a/iwl-csr.h +++ b/iwl-csr.h @@ -105,9 +105,14 @@ /* GIO Chicken Bits (PCI Express bus link power management) */ #define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100) -/* Doorbell NMI (since Bz) */ +#define CSR_IPC_SLEEP_CONTROL (CSR_BASE + 0x114) +#define CSR_IPC_SLEEP_CONTROL_SUSPEND 0x3 +#define CSR_IPC_SLEEP_CONTROL_RESUME 0 + +/* Doorbell - since Bz + * connected to UREG_DOORBELL_TO_ISR6 (lower 16 bits only) + */ #define CSR_DOORBELL_VECTOR (CSR_BASE + 0x130) -#define CSR_DOORBELL_VECTOR_NMI BIT(1) /* host chicken bits */ #define CSR_HOST_CHICKEN (CSR_BASE + 0x204) @@ -143,8 +148,7 @@ #define CSR_FUNC_SCRATCH_INIT_VALUE (0x01010101) /* Bits for CSR_HW_IF_CONFIG_REG */ -#define CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH (0x00000003) -#define CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP (0x0000000C) +#define CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP_DASH (0x0000000F) #define CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM (0x00000080) #define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x000000C0) #define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100) @@ -287,8 +291,7 @@ #define CSR_GP_CNTRL_REG_FLAG_SW_RESET BIT(31) /* HW REV */ -#define CSR_HW_REV_DASH(_val) (((_val) & 0x0000003) >> 0) -#define CSR_HW_REV_STEP(_val) (((_val) & 0x000000C) >> 2) +#define CSR_HW_REV_STEP_DASH(_val) ((_val) & CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP_DASH) #define CSR_HW_REV_TYPE(_val) (((_val) & 0x000FFF0) >> 4) /* HW RFID */ @@ -306,6 +309,7 @@ enum { SILICON_A_STEP = 0, SILICON_B_STEP, SILICON_C_STEP, + SILICON_Z_STEP = 0xf, }; @@ -328,10 +332,10 @@ enum { #define CSR_HW_REV_TYPE_7265D (0x0000210) #define CSR_HW_REV_TYPE_NONE (0x00001F0) #define CSR_HW_REV_TYPE_QNJ (0x0000360) -#define CSR_HW_REV_TYPE_QNJ_B0 (0x0000364) -#define CSR_HW_REV_TYPE_QU_B0 (0x0000334) -#define CSR_HW_REV_TYPE_QU_C0 (0x0000338) -#define CSR_HW_REV_TYPE_QUZ (0x0000354) +#define CSR_HW_REV_TYPE_QNJ_B0 (0x0000361) +#define CSR_HW_REV_TYPE_QU_B0 (0x0000331) +#define CSR_HW_REV_TYPE_QU_C0 (0x0000332) +#define CSR_HW_REV_TYPE_QUZ (0x0000351) #define CSR_HW_REV_TYPE_HR_CDB (0x0000340) #define CSR_HW_REV_TYPE_SO (0x0000370) #define CSR_HW_REV_TYPE_TY (0x0000420) @@ -529,6 +533,9 @@ enum { * 11-8: queue selector */ #define HBUS_TARG_WRPTR (HBUS_BASE+0x060) +/* This register is common for Tx and Rx, Rx queues start from 512 */ +#define HBUS_TARG_WRPTR_Q_SHIFT (16) +#define HBUS_TARG_WRPTR_RX_Q(q) (((q) + 512) << HBUS_TARG_WRPTR_Q_SHIFT) /********************************************************** * CSR values diff --git a/iwl-dbg-tlv.c b/iwl-dbg-tlv.c index 7ab98b419cc1..866a33f49915 100644 --- a/iwl-dbg-tlv.c +++ b/iwl-dbg-tlv.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2018-2021 Intel Corporation + * Copyright (C) 2018-2022 Intel Corporation */ #include <linux/firmware.h> #include "iwl-drv.h" @@ -59,7 +59,7 @@ dbg_ver_table[IWL_DBG_TLV_TYPE_NUM] = { [IWL_DBG_TLV_TYPE_DEBUG_INFO] = {.min_ver = 1, .max_ver = 1,}, [IWL_DBG_TLV_TYPE_BUF_ALLOC] = {.min_ver = 1, .max_ver = 1,}, [IWL_DBG_TLV_TYPE_HCMD] = {.min_ver = 1, .max_ver = 1,}, - [IWL_DBG_TLV_TYPE_REGION] = {.min_ver = 1, .max_ver = 2,}, + [IWL_DBG_TLV_TYPE_REGION] = {.min_ver = 1, .max_ver = 3,}, [IWL_DBG_TLV_TYPE_TRIGGER] = {.min_ver = 1, .max_ver = 1,}, [IWL_DBG_TLV_TYPE_CONF_SET] = {.min_ver = 1, .max_ver = 1,}, }; @@ -74,7 +74,8 @@ static int iwl_dbg_tlv_add(const struct iwl_ucode_tlv *tlv, if (!node) return -ENOMEM; - memcpy(&node->tlv, tlv, sizeof(node->tlv) + len); + memcpy(&node->tlv, tlv, sizeof(node->tlv)); + memcpy(node->tlv.data, tlv->data, len); list_add_tail(&node->list, list); return 0; @@ -177,15 +178,15 @@ static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans, const struct iwl_fw_ini_region_tlv *reg = (const void *)tlv->data; struct iwl_ucode_tlv **active_reg; u32 id = le32_to_cpu(reg->id); - u32 type = le32_to_cpu(reg->type); + u8 type = reg->type; u32 tlv_len = sizeof(*tlv) + le32_to_cpu(tlv->length); /* - * The higher part of the ID in version 2 is irrelevant for - * us, so mask it out. + * The higher part of the ID from version 2 is debug policy. + * The id will be only lsb 16 bits, so mask it out. */ - if (le32_to_cpu(reg->hdr.version) == 2) - id &= IWL_FW_INI_REGION_V2_MASK; + if (le32_to_cpu(reg->hdr.version) >= 2) + id &= IWL_FW_INI_REGION_ID_MASK; if (le32_to_cpu(tlv->length) < sizeof(*reg)) return -EINVAL; @@ -211,6 +212,14 @@ static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans, return -EOPNOTSUPP; } + if (type == IWL_FW_INI_REGION_INTERNAL_BUFFER) { + trans->dbg.imr_data.sram_addr = + le32_to_cpu(reg->internal_buffer.base_addr); + trans->dbg.imr_data.sram_size = + le32_to_cpu(reg->internal_buffer.size); + } + + active_reg = &trans->dbg.active_regions[id]; if (*active_reg) { IWL_WARN(trans, "WRT: Overriding region id %u\n", id); @@ -233,6 +242,7 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans, const struct iwl_fw_ini_trigger_tlv *trig = (const void *)tlv->data; struct iwl_fw_ini_trigger_tlv *dup_trig; u32 tp = le32_to_cpu(trig->time_point); + u32 rf = le32_to_cpu(trig->reset_fw); struct iwl_ucode_tlv *dup = NULL; int ret; @@ -247,6 +257,10 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans, return -EINVAL; } + IWL_DEBUG_FW(trans, + "WRT: time point %u for trigger TLV with reset_fw %u\n", + tp, rf); + trans->dbg.last_tp_resetfw = 0xFF; if (!le32_to_cpu(trig->occurrences)) { dup = kmemdup(tlv, sizeof(*tlv) + le32_to_cpu(tlv->length), GFP_KERNEL); @@ -266,7 +280,7 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans, static int iwl_dbg_tlv_config_set(struct iwl_trans *trans, const struct iwl_ucode_tlv *tlv) { - struct iwl_fw_ini_conf_set_tlv *conf_set = (void *)tlv->data; + const struct iwl_fw_ini_conf_set_tlv *conf_set = (const void *)tlv->data; u32 tp = le32_to_cpu(conf_set->time_point); u32 type = le32_to_cpu(conf_set->set_type); @@ -300,14 +314,21 @@ static int (*dbg_tlv_alloc[])(struct iwl_trans *trans, void iwl_dbg_tlv_alloc(struct iwl_trans *trans, const struct iwl_ucode_tlv *tlv, bool ext) { - const struct iwl_fw_ini_header *hdr = (const void *)&tlv->data[0]; - u32 type = le32_to_cpu(tlv->type); - u32 tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE; - u32 domain = le32_to_cpu(hdr->domain); enum iwl_ini_cfg_state *cfg_state = ext ? &trans->dbg.external_ini_cfg : &trans->dbg.internal_ini_cfg; + const struct iwl_fw_ini_header *hdr = (const void *)&tlv->data[0]; + u32 type; + u32 tlv_idx; + u32 domain; int ret; + if (le32_to_cpu(tlv->length) < sizeof(*hdr)) + return; + + type = le32_to_cpu(tlv->type); + tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE; + domain = le32_to_cpu(hdr->domain); + if (domain != IWL_FW_INI_DOMAIN_ALWAYS_ON && !(domain & trans->dbg.domains_bitmap)) { IWL_DEBUG_FW(trans, @@ -448,7 +469,7 @@ static int iwl_dbg_tlv_parse_bin(struct iwl_trans *trans, const u8 *data, while (len >= sizeof(*tlv)) { len -= sizeof(*tlv); - tlv = (void *)data; + tlv = (const void *)data; tlv_len = le32_to_cpu(tlv->length); @@ -473,7 +494,7 @@ void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans) int res; if (!iwlwifi_mod_params.enable_ini || - trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_9000) + trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_8000) return; res = firmware_request_nowarn(&fw, yoyo_bin, dev); @@ -565,8 +586,7 @@ static int iwl_dbg_tlv_alloc_fragments(struct iwl_fw_runtime *fwrt, return 0; num_frags = le32_to_cpu(fw_mon_cfg->max_frags_num); - if (!fw_has_capa(&fwrt->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP)) { + if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) { if (alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1) return -EIO; num_frags = 1; @@ -750,33 +770,40 @@ static int iwl_dbg_tlv_update_dram(struct iwl_fw_runtime *fwrt, static void iwl_dbg_tlv_update_drams(struct iwl_fw_runtime *fwrt) { - int ret, i, dram_alloc = 0; - struct iwl_dram_info dram_info; + int ret, i; + bool dram_alloc = false; struct iwl_dram_data *frags = &fwrt->trans->dbg.fw_mon_ini[IWL_FW_INI_ALLOCATION_ID_DBGC1].frags[0]; + struct iwl_dram_info *dram_info; + + if (!frags || !frags->block) + return; + + dram_info = frags->block; if (!fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT)) return; - dram_info.first_word = cpu_to_le32(DRAM_INFO_FIRST_MAGIC_WORD); - dram_info.second_word = cpu_to_le32(DRAM_INFO_SECOND_MAGIC_WORD); + dram_info->first_word = cpu_to_le32(DRAM_INFO_FIRST_MAGIC_WORD); + dram_info->second_word = cpu_to_le32(DRAM_INFO_SECOND_MAGIC_WORD); for (i = IWL_FW_INI_ALLOCATION_ID_DBGC1; i <= IWL_FW_INI_ALLOCATION_ID_DBGC3; i++) { - ret = iwl_dbg_tlv_update_dram(fwrt, i, &dram_info); + ret = iwl_dbg_tlv_update_dram(fwrt, i, dram_info); if (!ret) - dram_alloc++; + dram_alloc = true; else IWL_WARN(fwrt, "WRT: Failed to set DRAM buffer for alloc id %d, ret=%d\n", i, ret); } - if (dram_alloc) { - memcpy(frags->block, &dram_info, sizeof(dram_info)); - IWL_DEBUG_FW(fwrt, "block data after %016x\n", - *((int *)fwrt->trans->dbg.fw_mon_ini[1].frags[0].block)); - } + + if (dram_alloc) + IWL_DEBUG_FW(fwrt, "block data after %08x\n", + dram_info->first_word); + else + memset(frags->block, 0, sizeof(*dram_info)); } static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime *fwrt, @@ -799,11 +826,11 @@ static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime *fwrt, } static void iwl_dbg_tlv_apply_config(struct iwl_fw_runtime *fwrt, - struct list_head *config_list) + struct list_head *conf_list) { struct iwl_dbg_tlv_node *node; - list_for_each_entry(node, config_list, list) { + list_for_each_entry(node, conf_list, list) { struct iwl_fw_ini_conf_set_tlv *config_list = (void *)node->tlv.data; u32 count, address, value; u32 len = (le32_to_cpu(node->tlv.length) - sizeof(*config_list)) / 8; @@ -849,11 +876,18 @@ static void iwl_dbg_tlv_apply_config(struct iwl_fw_runtime *fwrt, case IWL_FW_INI_CONFIG_SET_TYPE_DBGC_DRAM_ADDR: { struct iwl_dbgc1_info dram_info = {}; struct iwl_dram_data *frags = &fwrt->trans->dbg.fw_mon_ini[1].frags[0]; - __le64 dram_base_addr = cpu_to_le64(frags->physical); - __le32 dram_size = cpu_to_le32(frags->size); - u64 dram_addr = le64_to_cpu(dram_base_addr); + __le64 dram_base_addr; + __le32 dram_size; + u64 dram_addr; u32 ret; + if (!frags) + break; + + dram_base_addr = cpu_to_le64(frags->physical); + dram_size = cpu_to_le32(frags->size); + dram_addr = le64_to_cpu(dram_base_addr); + IWL_DEBUG_FW(fwrt, "WRT: dram_base_addr 0x%016llx, dram_size 0x%x\n", dram_base_addr, dram_size); IWL_DEBUG_FW(fwrt, "WRT: config_list->addr_offset: %u\n", @@ -1159,6 +1193,8 @@ iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync, u32 num_data = iwl_tlv_array_len(&node->tlv, dump_data.trig, data); int ret, i; + u32 tp = le32_to_cpu(dump_data.trig->time_point); + if (!num_data) { ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data, sync); @@ -1177,8 +1213,42 @@ iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync, break; } } - } + fwrt->trans->dbg.restart_required = FALSE; + IWL_DEBUG_INFO(fwrt, "WRT: tp %d, reset_fw %d\n", + tp, dump_data.trig->reset_fw); + IWL_DEBUG_INFO(fwrt, "WRT: restart_required %d, last_tp_resetfw %d\n", + fwrt->trans->dbg.restart_required, + fwrt->trans->dbg.last_tp_resetfw); + + if (fwrt->trans->trans_cfg->device_family == + IWL_DEVICE_FAMILY_9000) { + fwrt->trans->dbg.restart_required = TRUE; + } else if (tp == IWL_FW_INI_TIME_POINT_FW_ASSERT && + fwrt->trans->dbg.last_tp_resetfw == + IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) { + fwrt->trans->dbg.restart_required = FALSE; + fwrt->trans->dbg.last_tp_resetfw = 0xFF; + IWL_DEBUG_FW(fwrt, "WRT: FW_ASSERT due to reset_fw_mode-no restart\n"); + } else if (le32_to_cpu(dump_data.trig->reset_fw) == + IWL_FW_INI_RESET_FW_MODE_STOP_AND_RELOAD_FW) { + IWL_DEBUG_INFO(fwrt, "WRT: stop and reload firmware\n"); + fwrt->trans->dbg.restart_required = TRUE; + } else if (le32_to_cpu(dump_data.trig->reset_fw) == + IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) { + IWL_DEBUG_INFO(fwrt, "WRT: stop only and no reload firmware\n"); + fwrt->trans->dbg.restart_required = FALSE; + fwrt->trans->dbg.last_tp_resetfw = + le32_to_cpu(dump_data.trig->reset_fw); + } else if (le32_to_cpu(dump_data.trig->reset_fw) == + IWL_FW_INI_RESET_FW_MODE_NOTHING) { + IWL_DEBUG_INFO(fwrt, + "WRT: nothing need to be done after debug collection\n"); + } else { + IWL_ERR(fwrt, "WRT: wrong resetfw %d\n", + le32_to_cpu(dump_data.trig->reset_fw)); + } + } return 0; } @@ -1244,7 +1314,7 @@ static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt) } reg = (void *)(*active_reg)->data; - reg_type = le32_to_cpu(reg->type); + reg_type = reg->type; if (reg_type != IWL_FW_INI_REGION_DRAM_BUFFER || !(BIT(le32_to_cpu(reg->dram_alloc_id)) & failed_alloc)) diff --git a/iwl-drv.c b/iwl-drv.c index 36196e07b1a0..ab52136cffb4 100644 --- a/iwl-drv.c +++ b/iwl-drv.c @@ -130,6 +130,9 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv) for (i = 0; i < IWL_UCODE_TYPE_MAX; i++) iwl_free_fw_img(drv, drv->fw.img + i); + + /* clear the data for the aborted load case */ + memset(&drv->fw, 0, sizeof(drv->fw)); } static int iwl_alloc_fw_desc(struct iwl_drv *drv, struct fw_desc *desc, @@ -163,8 +166,8 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first) char tag[8]; if (drv->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_9000 && - (CSR_HW_REV_STEP(drv->trans->hw_rev) != SILICON_B_STEP && - CSR_HW_REV_STEP(drv->trans->hw_rev) != SILICON_C_STEP)) { + (drv->trans->hw_rev_step != SILICON_B_STEP && + drv->trans->hw_rev_step != SILICON_C_STEP)) { IWL_ERR(drv, "Only HW steps B and C are currently supported (0x%0x)\n", drv->trans->hw_rev); @@ -240,14 +243,14 @@ struct iwl_firmware_pieces { /* FW debug data parsed for driver usage */ bool dbg_dest_tlv_init; - u8 *dbg_dest_ver; + const u8 *dbg_dest_ver; union { - struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv; - struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv_v1; + const struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv; + const struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv_v1; }; - struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX]; + const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX]; size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX]; - struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX]; + const struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX]; size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX]; struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv; size_t n_mem_tlv; @@ -324,8 +327,9 @@ static void set_sec_offset(struct iwl_firmware_pieces *pieces, static int iwl_store_cscheme(struct iwl_fw *fw, const u8 *data, const u32 len) { int i, j; - struct iwl_fw_cscheme_list *l = (struct iwl_fw_cscheme_list *)data; - struct iwl_fw_cipher_scheme *fwcs; + const struct iwl_fw_cscheme_list *l = + (const struct iwl_fw_cscheme_list *)data; + const struct iwl_fw_cipher_scheme *fwcs; if (len < sizeof(*l) || len < sizeof(l->size) + l->size * sizeof(l->cs[0])) @@ -353,13 +357,13 @@ static int iwl_store_ucode_sec(struct iwl_firmware_pieces *pieces, { struct fw_img_parsing *img; struct fw_sec *sec; - struct fw_sec_parsing *sec_parse; + const struct fw_sec_parsing *sec_parse; size_t alloc_size; if (WARN_ON(!pieces || !data || type >= IWL_UCODE_TYPE_MAX)) return -1; - sec_parse = (struct fw_sec_parsing *)data; + sec_parse = (const struct fw_sec_parsing *)data; img = &pieces->img[type]; @@ -382,8 +386,8 @@ static int iwl_store_ucode_sec(struct iwl_firmware_pieces *pieces, static int iwl_set_default_calib(struct iwl_drv *drv, const u8 *data) { - struct iwl_tlv_calib_data *def_calib = - (struct iwl_tlv_calib_data *)data; + const struct iwl_tlv_calib_data *def_calib = + (const struct iwl_tlv_calib_data *)data; u32 ucode_type = le32_to_cpu(def_calib->ucode_type); if (ucode_type >= IWL_UCODE_TYPE_MAX) { IWL_ERR(drv, "Wrong ucode_type %u for default calibration.\n", @@ -401,7 +405,7 @@ static int iwl_set_default_calib(struct iwl_drv *drv, const u8 *data) static void iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data, struct iwl_ucode_capabilities *capa) { - const struct iwl_ucode_api *ucode_api = (void *)data; + const struct iwl_ucode_api *ucode_api = (const void *)data; u32 api_index = le32_to_cpu(ucode_api->api_index); u32 api_flags = le32_to_cpu(ucode_api->api_flags); int i; @@ -422,7 +426,7 @@ static void iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data, static void iwl_set_ucode_capabilities(struct iwl_drv *drv, const u8 *data, struct iwl_ucode_capabilities *capa) { - const struct iwl_ucode_capa *ucode_capa = (void *)data; + const struct iwl_ucode_capa *ucode_capa = (const void *)data; u32 api_index = le32_to_cpu(ucode_capa->api_index); u32 api_flags = le32_to_cpu(ucode_capa->api_capa); int i; @@ -454,7 +458,7 @@ static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv, const struct firmware *ucode_raw, struct iwl_firmware_pieces *pieces) { - struct iwl_ucode_header *ucode = (void *)ucode_raw->data; + const struct iwl_ucode_header *ucode = (const void *)ucode_raw->data; u32 api_ver, hdr_size, build; char buildstr[25]; const u8 *src; @@ -586,13 +590,73 @@ static void iwl_drv_set_dump_exclude(struct iwl_drv *drv, excl->size = le32_to_cpu(fw->size); } +static void iwl_parse_dbg_tlv_assert_tables(struct iwl_drv *drv, + const struct iwl_ucode_tlv *tlv) +{ + const struct iwl_fw_ini_region_tlv *region; + u32 length = le32_to_cpu(tlv->length); + u32 addr; + + if (length < offsetof(typeof(*region), special_mem) + + sizeof(region->special_mem)) + return; + + region = (const void *)tlv->data; + addr = le32_to_cpu(region->special_mem.base_addr); + addr += le32_to_cpu(region->special_mem.offset); + addr &= ~FW_ADDR_CACHE_CONTROL; + + if (region->type != IWL_FW_INI_REGION_SPECIAL_DEVICE_MEMORY) + return; + + switch (region->sub_type) { + case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_UMAC_ERROR_TABLE: + drv->trans->dbg.umac_error_event_table = addr; + drv->trans->dbg.error_event_table_tlv_status |= + IWL_ERROR_EVENT_TABLE_UMAC; + break; + case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_LMAC_1_ERROR_TABLE: + drv->trans->dbg.lmac_error_event_table[0] = addr; + drv->trans->dbg.error_event_table_tlv_status |= + IWL_ERROR_EVENT_TABLE_LMAC1; + break; + case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_LMAC_2_ERROR_TABLE: + drv->trans->dbg.lmac_error_event_table[1] = addr; + drv->trans->dbg.error_event_table_tlv_status |= + IWL_ERROR_EVENT_TABLE_LMAC2; + break; + case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_TCM_1_ERROR_TABLE: + drv->trans->dbg.tcm_error_event_table[0] = addr; + drv->trans->dbg.error_event_table_tlv_status |= + IWL_ERROR_EVENT_TABLE_TCM1; + break; + case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_TCM_2_ERROR_TABLE: + drv->trans->dbg.tcm_error_event_table[1] = addr; + drv->trans->dbg.error_event_table_tlv_status |= + IWL_ERROR_EVENT_TABLE_TCM2; + break; + case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_RCM_1_ERROR_TABLE: + drv->trans->dbg.rcm_error_event_table[0] = addr; + drv->trans->dbg.error_event_table_tlv_status |= + IWL_ERROR_EVENT_TABLE_RCM1; + break; + case IWL_FW_INI_REGION_DEVICE_MEMORY_SUBTYPE_RCM_2_ERROR_TABLE: + drv->trans->dbg.rcm_error_event_table[1] = addr; + drv->trans->dbg.error_event_table_tlv_status |= + IWL_ERROR_EVENT_TABLE_RCM2; + break; + default: + break; + } +} + static int iwl_parse_tlv_firmware(struct iwl_drv *drv, const struct firmware *ucode_raw, struct iwl_firmware_pieces *pieces, struct iwl_ucode_capabilities *capa, bool *usniffer_images) { - struct iwl_tlv_ucode_header *ucode = (void *)ucode_raw->data; + const struct iwl_tlv_ucode_header *ucode = (const void *)ucode_raw->data; const struct iwl_ucode_tlv *tlv; size_t len = ucode_raw->size; const u8 *data; @@ -641,8 +705,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, while (len >= sizeof(*tlv)) { len -= sizeof(*tlv); - tlv = (void *)data; + tlv = (const void *)data; tlv_len = le32_to_cpu(tlv->length); tlv_type = le32_to_cpu(tlv->type); tlv_data = tlv->data; @@ -699,7 +763,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, if (tlv_len != sizeof(u32)) goto invalid_tlv_len; capa->max_probe_length = - le32_to_cpup((__le32 *)tlv_data); + le32_to_cpup((const __le32 *)tlv_data); break; case IWL_UCODE_TLV_PAN: if (tlv_len) @@ -720,7 +784,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, * will not work with the new firmware, or * it'll not take advantage of new features. */ - capa->flags = le32_to_cpup((__le32 *)tlv_data); + capa->flags = le32_to_cpup((const __le32 *)tlv_data); break; case IWL_UCODE_TLV_API_CHANGES_SET: if (tlv_len != sizeof(struct iwl_ucode_api)) @@ -736,37 +800,37 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, if (tlv_len != sizeof(u32)) goto invalid_tlv_len; pieces->init_evtlog_ptr = - le32_to_cpup((__le32 *)tlv_data); + le32_to_cpup((const __le32 *)tlv_data); break; case IWL_UCODE_TLV_INIT_EVTLOG_SIZE: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; pieces->init_evtlog_size = - le32_to_cpup((__le32 *)tlv_data); + le32_to_cpup((const __le32 *)tlv_data); break; case IWL_UCODE_TLV_INIT_ERRLOG_PTR: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; pieces->init_errlog_ptr = - le32_to_cpup((__le32 *)tlv_data); + le32_to_cpup((const __le32 *)tlv_data); break; case IWL_UCODE_TLV_RUNT_EVTLOG_PTR: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; pieces->inst_evtlog_ptr = - le32_to_cpup((__le32 *)tlv_data); + le32_to_cpup((const __le32 *)tlv_data); break; case IWL_UCODE_TLV_RUNT_EVTLOG_SIZE: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; pieces->inst_evtlog_size = - le32_to_cpup((__le32 *)tlv_data); + le32_to_cpup((const __le32 *)tlv_data); break; case IWL_UCODE_TLV_RUNT_ERRLOG_PTR: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; pieces->inst_errlog_ptr = - le32_to_cpup((__le32 *)tlv_data); + le32_to_cpup((const __le32 *)tlv_data); break; case IWL_UCODE_TLV_ENHANCE_SENS_TBL: if (tlv_len) @@ -795,7 +859,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, if (tlv_len != sizeof(u32)) goto invalid_tlv_len; capa->standard_phy_calibration_size = - le32_to_cpup((__le32 *)tlv_data); + le32_to_cpup((const __le32 *)tlv_data); break; case IWL_UCODE_TLV_SEC_RT: iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR, @@ -821,7 +885,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, case IWL_UCODE_TLV_PHY_SKU: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; - drv->fw.phy_config = le32_to_cpup((__le32 *)tlv_data); + drv->fw.phy_config = le32_to_cpup((const __le32 *)tlv_data); drv->fw.valid_tx_ant = (drv->fw.phy_config & FW_PHY_CFG_TX_CHAIN) >> FW_PHY_CFG_TX_CHAIN_POS; @@ -848,7 +912,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, if (tlv_len != sizeof(u32)) goto invalid_tlv_len; num_of_cpus = - le32_to_cpup((__le32 *)tlv_data); + le32_to_cpup((const __le32 *)tlv_data); if (num_of_cpus == 2) { drv->fw.img[IWL_UCODE_REGULAR].is_dual_cpus = @@ -870,10 +934,10 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, if (tlv_len != sizeof(u32)) goto invalid_tlv_len; capa->n_scan_channels = - le32_to_cpup((__le32 *)tlv_data); + le32_to_cpup((const __le32 *)tlv_data); break; case IWL_UCODE_TLV_FW_VERSION: { - __le32 *ptr = (void *)tlv_data; + const __le32 *ptr = (const void *)tlv_data; u32 major, minor; u8 local_comp; @@ -897,15 +961,15 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, break; } case IWL_UCODE_TLV_FW_DBG_DEST: { - struct iwl_fw_dbg_dest_tlv *dest = NULL; - struct iwl_fw_dbg_dest_tlv_v1 *dest_v1 = NULL; + const struct iwl_fw_dbg_dest_tlv *dest = NULL; + const struct iwl_fw_dbg_dest_tlv_v1 *dest_v1 = NULL; u8 mon_mode; - pieces->dbg_dest_ver = (u8 *)tlv_data; + pieces->dbg_dest_ver = (const u8 *)tlv_data; if (*pieces->dbg_dest_ver == 1) { - dest = (void *)tlv_data; + dest = (const void *)tlv_data; } else if (*pieces->dbg_dest_ver == 0) { - dest_v1 = (void *)tlv_data; + dest_v1 = (const void *)tlv_data; } else { IWL_ERR(drv, "The version is %d, and it is invalid\n", @@ -946,7 +1010,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, break; } case IWL_UCODE_TLV_FW_DBG_CONF: { - struct iwl_fw_dbg_conf_tlv *conf = (void *)tlv_data; + const struct iwl_fw_dbg_conf_tlv *conf = + (const void *)tlv_data; if (!pieces->dbg_dest_tlv_init) { IWL_ERR(drv, @@ -980,8 +1045,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, break; } case IWL_UCODE_TLV_FW_DBG_TRIGGER: { - struct iwl_fw_dbg_trigger_tlv *trigger = - (void *)tlv_data; + const struct iwl_fw_dbg_trigger_tlv *trigger = + (const void *)tlv_data; u32 trigger_id = le32_to_cpu(trigger->id); if (trigger_id >= ARRAY_SIZE(drv->fw.dbg.trigger_tlv)) { @@ -1012,7 +1077,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, } drv->fw.dbg.dump_mask = - le32_to_cpup((__le32 *)tlv_data); + le32_to_cpup((const __le32 *)tlv_data); break; } case IWL_UCODE_TLV_SEC_RT_USNIFFER: @@ -1024,7 +1089,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, case IWL_UCODE_TLV_PAGING: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; - paging_mem_size = le32_to_cpup((__le32 *)tlv_data); + paging_mem_size = le32_to_cpup((const __le32 *)tlv_data); IWL_DEBUG_FW(drv, "Paging: paging enabled (size = %u bytes)\n", @@ -1054,8 +1119,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, /* ignored */ break; case IWL_UCODE_TLV_FW_MEM_SEG: { - struct iwl_fw_dbg_mem_seg_tlv *dbg_mem = - (void *)tlv_data; + const struct iwl_fw_dbg_mem_seg_tlv *dbg_mem = + (const void *)tlv_data; size_t size; struct iwl_fw_dbg_mem_seg_tlv *n; @@ -1083,10 +1148,10 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, break; } case IWL_UCODE_TLV_FW_RECOVERY_INFO: { - struct { + const struct { __le32 buf_addr; __le32 buf_size; - } *recov_info = (void *)tlv_data; + } *recov_info = (const void *)tlv_data; if (tlv_len != sizeof(*recov_info)) goto invalid_tlv_len; @@ -1097,10 +1162,10 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, } break; case IWL_UCODE_TLV_FW_FSEQ_VERSION: { - struct { + const struct { u8 version[32]; u8 sha1[20]; - } *fseq_ver = (void *)tlv_data; + } *fseq_ver = (const void *)tlv_data; if (tlv_len != sizeof(*fseq_ver)) goto invalid_tlv_len; @@ -1111,19 +1176,19 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, case IWL_UCODE_TLV_FW_NUM_STATIONS: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; - if (le32_to_cpup((__le32 *)tlv_data) > + if (le32_to_cpup((const __le32 *)tlv_data) > IWL_MVM_STATION_COUNT_MAX) { IWL_ERR(drv, "%d is an invalid number of station\n", - le32_to_cpup((__le32 *)tlv_data)); + le32_to_cpup((const __le32 *)tlv_data)); goto tlv_error; } capa->num_stations = - le32_to_cpup((__le32 *)tlv_data); + le32_to_cpup((const __le32 *)tlv_data); break; case IWL_UCODE_TLV_UMAC_DEBUG_ADDRS: { - struct iwl_umac_debug_addrs *dbg_ptrs = - (void *)tlv_data; + const struct iwl_umac_debug_addrs *dbg_ptrs = + (const void *)tlv_data; if (tlv_len != sizeof(*dbg_ptrs)) goto invalid_tlv_len; @@ -1138,8 +1203,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, break; } case IWL_UCODE_TLV_LMAC_DEBUG_ADDRS: { - struct iwl_lmac_debug_addrs *dbg_ptrs = - (void *)tlv_data; + const struct iwl_lmac_debug_addrs *dbg_ptrs = + (const void *)tlv_data; if (tlv_len != sizeof(*dbg_ptrs)) goto invalid_tlv_len; @@ -1153,21 +1218,12 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, IWL_ERROR_EVENT_TABLE_LMAC1; break; } - case IWL_UCODE_TLV_TCM_DEBUG_ADDRS: { - struct iwl_fw_tcm_error_addr *ptr = (void *)tlv_data; - - if (tlv_len != sizeof(*ptr)) - goto invalid_tlv_len; - drv->trans->dbg.tcm_error_event_table = - le32_to_cpu(ptr->addr) & ~FW_ADDR_CACHE_CONTROL; - drv->trans->dbg.error_event_table_tlv_status |= - IWL_ERROR_EVENT_TABLE_TCM; - break; - } + case IWL_UCODE_TLV_TYPE_REGIONS: + iwl_parse_dbg_tlv_assert_tables(drv, tlv); + fallthrough; case IWL_UCODE_TLV_TYPE_DEBUG_INFO: case IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION: case IWL_UCODE_TLV_TYPE_HCMD: - case IWL_UCODE_TLV_TYPE_REGIONS: case IWL_UCODE_TLV_TYPE_TRIGGERS: case IWL_UCODE_TLV_TYPE_CONF_SET: if (iwlwifi_mod_params.enable_ini) @@ -1223,7 +1279,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, if (len) { IWL_ERR(drv, "invalid TLV after parsing: %zd\n", len); - iwl_print_hex_dump(drv, IWL_DL_FW, (u8 *)data, len); + iwl_print_hex_dump(drv, IWL_DL_FW, data, len); return -EINVAL; } @@ -1313,23 +1369,31 @@ _iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op) const struct iwl_op_mode_ops *ops = op->ops; struct dentry *dbgfs_dir = NULL; struct iwl_op_mode *op_mode = NULL; + int retry, max_retry = !!iwlwifi_mod_params.fw_restart * IWL_MAX_INIT_RETRY; + + for (retry = 0; retry <= max_retry; retry++) { #ifdef CONFIG_IWLWIFI_DEBUGFS - drv->dbgfs_op_mode = debugfs_create_dir(op->name, - drv->dbgfs_drv); - dbgfs_dir = drv->dbgfs_op_mode; + drv->dbgfs_op_mode = debugfs_create_dir(op->name, + drv->dbgfs_drv); + dbgfs_dir = drv->dbgfs_op_mode; #endif - op_mode = ops->start(drv->trans, drv->trans->cfg, &drv->fw, dbgfs_dir); + op_mode = ops->start(drv->trans, drv->trans->cfg, + &drv->fw, dbgfs_dir); + + if (op_mode) + return op_mode; + + IWL_ERR(drv, "retry init count %d\n", retry); #ifdef CONFIG_IWLWIFI_DEBUGFS - if (!op_mode) { debugfs_remove_recursive(drv->dbgfs_op_mode); drv->dbgfs_op_mode = NULL; - } #endif + } - return op_mode; + return NULL; } static void _iwl_op_mode_stop(struct iwl_drv *drv) @@ -1356,7 +1420,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) { struct iwl_drv *drv = context; struct iwl_fw *fw = &drv->fw; - struct iwl_ucode_header *ucode; + const struct iwl_ucode_header *ucode; struct iwlwifi_opmode_table *op; int err; struct iwl_firmware_pieces *pieces; @@ -1367,6 +1431,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) int i; bool load_module = false; bool usniffer_images = false; + bool failure = true; fw->ucode_capa.max_probe_length = IWL_DEFAULT_MAX_PROBE_LENGTH; fw->ucode_capa.standard_phy_calibration_size = @@ -1393,7 +1458,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) } /* Data from ucode file: header followed by uCode images */ - ucode = (struct iwl_ucode_header *)ucode_raw->data; + ucode = (const struct iwl_ucode_header *)ucode_raw->data; if (ucode->ver) err = iwl_parse_v1_v2_firmware(drv, ucode_raw, pieces); @@ -1582,6 +1647,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) /* We have our copies now, allow OS release its copies */ release_firmware(ucode_raw); + iwl_dbg_tlv_load_bin(drv->trans->dev, drv->trans); + mutex_lock(&iwlwifi_opmode_table_mtx); switch (fw->type) { case IWL_FW_DVM: @@ -1598,8 +1665,6 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) IWL_INFO(drv, "loaded firmware version %s op_mode %s\n", drv->fw.fw_version, op->name); - iwl_dbg_tlv_load_bin(drv->trans->dev, drv->trans); - /* add this device to the list of devices using this op_mode */ list_add_tail(&drv->list, &op->drv); @@ -1627,15 +1692,9 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) * else from proceeding if the module fails to load * or hangs loading. */ - if (load_module) { + if (load_module) request_module("%s", op->name); -#ifdef CONFIG_IWLWIFI_OPMODE_MODULAR - if (err) - IWL_ERR(drv, - "failed to load module %s (error %d), is dynamic loading enabled?\n", - op->name, err); -#endif - } + failure = false; goto free; try_again: @@ -1651,6 +1710,9 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) complete(&drv->request_firmware_complete); device_release_driver(drv->trans->dev); free: + if (failure) + iwl_dealloc_ucode(drv); + if (pieces) { for (i = 0; i < ARRAY_SIZE(pieces->img); i++) kfree(pieces->img[i].sec); diff --git a/iwl-drv.h b/iwl-drv.h index 2e2d60a58692..80073f973334 100644 --- a/iwl-drv.h +++ b/iwl-drv.h @@ -84,9 +84,12 @@ void iwl_drv_stop(struct iwl_drv *drv); * everything is built-in, then we can avoid that. */ #ifdef CONFIG_IWLWIFI_OPMODE_MODULAR -#define IWL_EXPORT_SYMBOL(sym) EXPORT_SYMBOL_GPL(sym) +#define IWL_EXPORT_SYMBOL(sym) EXPORT_SYMBOL_NS_GPL(sym, IWLWIFI) #else #define IWL_EXPORT_SYMBOL(sym) #endif +/* max retry for init flow */ +#define IWL_MAX_INIT_RETRY 2 + #endif /* __iwl_drv_h__ */ diff --git a/iwl-eeprom-read.c b/iwl-eeprom-read.c index b9e86bf972e5..5f386bb1a353 100644 --- a/iwl-eeprom-read.c +++ b/iwl-eeprom-read.c @@ -23,26 +23,22 @@ */ #define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */ -#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */ -#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */ - - /* * The device's EEPROM semaphore prevents conflicts between driver and uCode * when accessing the EEPROM; each access is a series of pulses to/from the * EEPROM chip, not a single event, so even reads could conflict if they * weren't arbitrated by the semaphore. */ +#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */ +#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */ -#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */ -#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */ static int iwl_eeprom_acquire_semaphore(struct iwl_trans *trans) { u16 count; int ret; - for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) { + for (count = 0; count < IWL_EEPROM_SEM_RETRY_LIMIT; count++) { /* Request semaphore */ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM); @@ -51,7 +47,7 @@ static int iwl_eeprom_acquire_semaphore(struct iwl_trans *trans) ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, - EEPROM_SEM_TIMEOUT); + IWL_EEPROM_SEM_TIMEOUT); if (ret >= 0) { IWL_DEBUG_EEPROM(trans->dev, "Acquired semaphore after %d tries.\n", @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2020 Intel Corporation + * Copyright (C) 2005-2014, 2018-2021 Intel Corporation * Copyright (C) 2015-2017 Intel Deutschland GmbH */ #ifndef __iwl_fh_h__ @@ -580,7 +580,7 @@ struct iwl_rb_status { __le16 closed_fr_num; __le16 finished_rb_num; __le16 finished_fr_nam; - __le32 __unused; + __le32 __spare; } __packed; @@ -590,11 +590,31 @@ struct iwl_rb_status { #define TFD_QUEUE_CB_SIZE(x) (ilog2(x) - 3) #define TFD_QUEUE_SIZE_BC_DUP (64) #define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP) -#define TFD_QUEUE_BC_SIZE_GEN3 1024 +#define TFD_QUEUE_BC_SIZE_GEN3_AX210 1024 +#define TFD_QUEUE_BC_SIZE_GEN3_BZ (1024 * 4) #define IWL_TX_DMA_MASK DMA_BIT_MASK(36) #define IWL_NUM_OF_TBS 20 #define IWL_TFH_NUM_TBS 25 +/* IMR DMA registers */ +#define IMR_TFH_SRV_DMA_CHNL0_CTRL 0x00a0a51c +#define IMR_TFH_SRV_DMA_CHNL0_SRAM_ADDR 0x00a0a520 +#define IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_LSB 0x00a0a524 +#define IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_MSB 0x00a0a528 +#define IMR_TFH_SRV_DMA_CHNL0_BC 0x00a0a52c +#define TFH_SRV_DMA_CHNL0_LEFT_BC 0x00a0a530 + +/* RFH S2D DMA registers */ +#define IMR_RFH_GEN_CFG_SERVICE_DMA_RS_MSK 0x0000000c +#define IMR_RFH_GEN_CFG_SERVICE_DMA_SNOOP_MSK 0x00000002 + +/* TFH D2S DMA registers */ +#define IMR_UREG_CHICK_HALT_UMAC_PERMANENTLY_MSK 0x80000000 +#define IMR_UREG_CHICK 0x00d05c00 +#define IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_IRQ_TARGET_POS 0x00800000 +#define IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_RS_MSK 0x00000030 +#define IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_DMA_EN_POS 0x80000000 + static inline u8 iwl_get_dma_hi_addr(dma_addr_t addr) { return (sizeof(addr) > sizeof(u32) ? upper_32_bits(addr) : 0) & 0xF; @@ -707,14 +727,14 @@ struct iwlagn_scd_bc_tbl { } __packed; /** - * struct iwl_gen3_bc_tbl scheduler byte count table gen3 + * struct iwl_gen3_bc_tbl_entry scheduler byte count table entry gen3 * For AX210 and on: * @tfd_offset: 0-12 - tx command byte count * 12-13 - number of 64 byte chunks * 14-16 - reserved */ -struct iwl_gen3_bc_tbl { - __le16 tfd_offset[TFD_QUEUE_BC_SIZE_GEN3]; +struct iwl_gen3_bc_tbl_entry { + __le16 tfd_offset; } __packed; #endif /* !__iwl_fh_h__ */ @@ -65,14 +65,14 @@ IWL_EXPORT_SYMBOL(iwl_poll_bit); u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg) { - u32 value = 0x5a5a5a5a; - if (iwl_trans_grab_nic_access(trans)) { - value = iwl_read32(trans, reg); + u32 value = iwl_read32(trans, reg); + iwl_trans_release_nic_access(trans); + return value; } - return value; + return 0x5a5a5a5a; } IWL_EXPORT_SYMBOL(iwl_read_direct32); @@ -135,13 +135,15 @@ IWL_EXPORT_SYMBOL(iwl_write_prph64_no_grab); u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs) { - u32 val = 0x5a5a5a5a; - if (iwl_trans_grab_nic_access(trans)) { - val = iwl_read_prph_no_grab(trans, ofs); + u32 val = iwl_read_prph_no_grab(trans, ofs); + iwl_trans_release_nic_access(trans); + + return val; } - return val; + + return 0x5a5a5a5a; } IWL_EXPORT_SYMBOL(iwl_read_prph); @@ -218,7 +220,7 @@ void iwl_force_nmi(struct iwl_trans *trans) UREG_DOORBELL_TO_ISR6_NMI_BIT); else iwl_write32(trans, CSR_DOORBELL_VECTOR, - CSR_DOORBELL_VECTOR_NMI); + UREG_DOORBELL_TO_ISR6_NMI_BIT); } IWL_EXPORT_SYMBOL(iwl_force_nmi); diff --git a/iwl-nvm-parse.c b/iwl-nvm-parse.c index f470f9aea50f..0dfd69fcd5d7 100644 --- a/iwl-nvm-parse.c +++ b/iwl-nvm-parse.c @@ -22,6 +22,7 @@ #include "fw/api/commands.h" #include "fw/api/cmdhdr.h" #include "fw/img.h" +#include "mei/iwl-mei.h" /* NVM offsets (in words) definitions */ enum nvm_offsets { @@ -374,10 +375,10 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, if (v4) ch_flags = - __le32_to_cpup((__le32 *)nvm_ch_flags + ch_idx); + __le32_to_cpup((const __le32 *)nvm_ch_flags + ch_idx); else ch_flags = - __le16_to_cpup((__le16 *)nvm_ch_flags + ch_idx); + __le16_to_cpup((const __le16 *)nvm_ch_flags + ch_idx); if (band == NL80211_BAND_5GHZ && !data->sku_cap_band_52ghz_enable) @@ -583,9 +584,9 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = { IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ, .phy_cap_info[3] = - IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM | + IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK | IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 | - IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM | + IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK | IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1, .phy_cap_info[4] = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE | @@ -607,7 +608,8 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = { .phy_cap_info[9] = IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB | IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB | - IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED, + (IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED << + IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_POS), .phy_cap_info[10] = IEEE80211_HE_PHY_CAP10_HE_MU_M1RU_MAX_LTF, }, @@ -652,9 +654,9 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = { IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ | IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US, .phy_cap_info[3] = - IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM | + IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK | IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 | - IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM | + IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK | IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1, .phy_cap_info[6] = IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, @@ -664,7 +666,8 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = { IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI | IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_242, .phy_cap_info[9] = - IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED, + IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED + << IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_POS, }, /* * Set default Tx/Rx HE MCS NSS Support field. @@ -729,7 +732,7 @@ static void iwl_init_he_6ghz_capa(struct iwl_trans *trans, IWL_DEBUG_EEPROM(trans->dev, "he_6ghz_capa=0x%x\n", he_6ghz_capa); /* we know it's writable - we set it before ourselves */ - iftype_data = (void *)sband->iftype_data; + iftype_data = (void *)(uintptr_t)sband->iftype_data; for (i = 0; i < sband->n_iftype_data; i++) iftype_data[i].he_6ghz_capa.capa = cpu_to_le16(he_6ghz_capa); } @@ -781,6 +784,7 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans, switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) { case IWL_CFG_RF_TYPE_GF: case IWL_CFG_RF_TYPE_MR: + case IWL_CFG_RF_TYPE_MS: iftype_data->he_cap.he_cap_elem.phy_cap_info[9] |= IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU; if (!is_ap) @@ -909,7 +913,7 @@ static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw, if (cfg->nvm_type != IWL_NVM_EXT) return le16_to_cpup(nvm_sw + SKU); - return le32_to_cpup((__le32 *)(phy_sku + SKU_FAMILY_8000)); + return le32_to_cpup((const __le32 *)(phy_sku + SKU_FAMILY_8000)); } static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw) @@ -917,8 +921,8 @@ static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw) if (cfg->nvm_type != IWL_NVM_EXT) return le16_to_cpup(nvm_sw + NVM_VERSION); else - return le32_to_cpup((__le32 *)(nvm_sw + - NVM_VERSION_EXT_NVM)); + return le32_to_cpup((const __le32 *)(nvm_sw + + NVM_VERSION_EXT_NVM)); } static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw, @@ -927,7 +931,7 @@ static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw, if (cfg->nvm_type != IWL_NVM_EXT) return le16_to_cpup(nvm_sw + RADIO_CFG); - return le32_to_cpup((__le32 *)(phy_sku + RADIO_CFG_FAMILY_EXT_NVM)); + return le32_to_cpup((const __le32 *)(phy_sku + RADIO_CFG_FAMILY_EXT_NVM)); } @@ -938,7 +942,7 @@ static int iwl_get_n_hw_addrs(const struct iwl_cfg *cfg, const __le16 *nvm_sw) if (cfg->nvm_type != IWL_NVM_EXT) return le16_to_cpup(nvm_sw + N_HW_ADDRS); - n_hw_addr = le32_to_cpup((__le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000)); + n_hw_addr = le32_to_cpup((const __le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000)); return n_hw_addr & N_HW_ADDR_MASK; } @@ -1077,7 +1081,9 @@ static int iwl_set_hw_address(struct iwl_trans *trans, return -EINVAL; } - IWL_INFO(trans, "base HW address: %pM\n", data->hw_addr); + if (!trans->csme_own) + IWL_INFO(trans, "base HW address: %pM, OTP minor version: 0x%x\n", + data->hw_addr, iwl_read_prph(trans, REG_OTP_MINOR)); return 0; } @@ -1115,6 +1121,66 @@ iwl_nvm_no_wide_in_5ghz(struct iwl_trans *trans, const struct iwl_cfg *cfg, } struct iwl_nvm_data * +iwl_parse_mei_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, + const struct iwl_mei_nvm *mei_nvm, + const struct iwl_fw *fw) +{ + struct iwl_nvm_data *data; + u32 sbands_flags = 0; + u8 rx_chains = fw->valid_rx_ant; + u8 tx_chains = fw->valid_rx_ant; + + if (cfg->uhb_supported) + data = kzalloc(struct_size(data, channels, + IWL_NVM_NUM_CHANNELS_UHB), + GFP_KERNEL); + else + data = kzalloc(struct_size(data, channels, + IWL_NVM_NUM_CHANNELS_EXT), + GFP_KERNEL); + if (!data) + return NULL; + + BUILD_BUG_ON(ARRAY_SIZE(mei_nvm->channels) != + IWL_NVM_NUM_CHANNELS_UHB); + data->nvm_version = mei_nvm->nvm_version; + + iwl_set_radio_cfg(cfg, data, mei_nvm->radio_cfg); + if (data->valid_tx_ant) + tx_chains &= data->valid_tx_ant; + if (data->valid_rx_ant) + rx_chains &= data->valid_rx_ant; + + data->sku_cap_mimo_disabled = false; + data->sku_cap_band_24ghz_enable = true; + data->sku_cap_band_52ghz_enable = true; + data->sku_cap_11n_enable = + !(iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL); + data->sku_cap_11ac_enable = true; + data->sku_cap_11ax_enable = + mei_nvm->caps & MEI_NVM_CAPS_11AX_SUPPORT; + + data->lar_enabled = mei_nvm->caps & MEI_NVM_CAPS_LARI_SUPPORT; + + data->n_hw_addrs = mei_nvm->n_hw_addrs; + /* If no valid mac address was found - bail out */ + if (iwl_set_hw_address(trans, cfg, data, NULL, NULL)) { + kfree(data); + return NULL; + } + + if (data->lar_enabled && + fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT)) + sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR; + + iwl_init_sbands(trans, data, mei_nvm->channels, tx_chains, rx_chains, + sbands_flags, true, fw); + + return data; +} +IWL_EXPORT_SYMBOL(iwl_parse_mei_nvm_data); + +struct iwl_nvm_data * iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, const struct iwl_fw *fw, const __be16 *nvm_hw, const __le16 *nvm_sw, @@ -1322,8 +1388,12 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, nvm_chan = iwl_nvm_channels; } - if (WARN_ON(num_of_ch > max_num_ch)) + if (num_of_ch > max_num_ch) { + IWL_DEBUG_DEV(dev, IWL_DL_LAR, + "Num of channels (%d) is greater than expected. Truncating to %d\n", + num_of_ch, max_num_ch); num_of_ch = max_num_ch; + } if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES)) return ERR_PTR(-EINVAL); @@ -1529,7 +1599,7 @@ int iwl_read_external_nvm(struct iwl_trans *trans, } eof = fw_entry->data + fw_entry->size; - dword_buff = (__le32 *)fw_entry->data; + dword_buff = (const __le32 *)fw_entry->data; /* some NVM file will contain a header. * The header is identified by 2 dwords header as follow: @@ -1541,20 +1611,20 @@ int iwl_read_external_nvm(struct iwl_trans *trans, if (fw_entry->size > NVM_HEADER_SIZE && dword_buff[0] == cpu_to_le32(NVM_HEADER_0) && dword_buff[1] == cpu_to_le32(NVM_HEADER_1)) { - file_sec = (void *)(fw_entry->data + NVM_HEADER_SIZE); + file_sec = (const void *)(fw_entry->data + NVM_HEADER_SIZE); IWL_INFO(trans, "NVM Version %08X\n", le32_to_cpu(dword_buff[2])); IWL_INFO(trans, "NVM Manufacturing date %08X\n", le32_to_cpu(dword_buff[3])); /* nvm file validation, dword_buff[2] holds the file version */ if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_8000 && - CSR_HW_REV_STEP(trans->hw_rev) == SILICON_C_STEP && + trans->hw_rev_step == SILICON_C_STEP && le32_to_cpu(dword_buff[2]) < 0xE4A) { ret = -EFAULT; goto out; } } else { - file_sec = (void *)fw_entry->data; + file_sec = (const void *)fw_entry->data; } while (true) { @@ -1622,7 +1692,7 @@ int iwl_read_external_nvm(struct iwl_trans *trans, nvm_sections[section_id].length = section_size; /* advance to the next section */ - file_sec = (void *)(file_sec->data + section_size); + file_sec = (const void *)(file_sec->data + section_size); } out: release_firmware(fw_entry); diff --git a/iwl-nvm-parse.h b/iwl-nvm-parse.h index e1f5a9741850..e01f7751cf11 100644 --- a/iwl-nvm-parse.h +++ b/iwl-nvm-parse.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2015, 2018-2020 Intel Corporation + * Copyright (C) 2005-2015, 2018-2021 Intel Corporation * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __iwl_nvm_parse_h__ @@ -8,6 +8,7 @@ #include <net/cfg80211.h> #include "iwl-eeprom-parse.h" +#include "mei/iwl-mei.h" /** * enum iwl_nvm_sbands_flags - modification flags for the channel profiles @@ -81,4 +82,12 @@ void iwl_nvm_fixups(u32 hw_id, unsigned int section, u8 *data, struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, const struct iwl_fw *fw); +/** + * iwl_parse_mei_nvm_data - parse the mei_nvm_data and get an iwl_nvm_data + */ +struct iwl_nvm_data * +iwl_parse_mei_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, + const struct iwl_mei_nvm *mei_nvm, + const struct iwl_fw *fw); + #endif /* __iwl_nvm_parse_h__ */ diff --git a/iwl-phy-db.c b/iwl-phy-db.c index 5378315d0179..0a93ac769f66 100644 --- a/iwl-phy-db.c +++ b/iwl-phy-db.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2005-2014, 2020 Intel Corporation + * Copyright (C) 2005-2014, 2020-2021 Intel Corporation * Copyright (C) 2016 Intel Deutschland GmbH */ #include <linux/slab.h> @@ -13,8 +13,6 @@ #include "iwl-op-mode.h" #include "iwl-trans.h" -#define CHANNEL_NUM_SIZE 4 /* num of channels in calib_ch size */ - struct iwl_phy_db_entry { u16 size; u8 *data; diff --git a/iwl-prph.h b/iwl-prph.h index a84ab02cf9d7..a22788a68168 100644 --- a/iwl-prph.h +++ b/iwl-prph.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2021 Intel Corporation + * Copyright (C) 2005-2014, 2018-2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016 Intel Deutschland GmbH */ @@ -347,19 +347,17 @@ #define RADIO_REG_SYS_MANUAL_DFT_0 0xAD4078 #define RFIC_REG_RD 0xAD0470 #define WFPM_CTRL_REG 0xA03030 -#define WFPM_CTRL_REG_GEN2 0xd03030 #define WFPM_OTP_CFG1_ADDR 0x00a03098 -#define WFPM_OTP_CFG1_ADDR_GEN2 0x00d03098 #define WFPM_OTP_CFG1_IS_JACKET_BIT BIT(4) #define WFPM_OTP_CFG1_IS_CDB_BIT BIT(5) #define WFPM_GP2 0xA030B4 /* DBGI SRAM Register details */ -#define DBGI_SRAM_TARGET_ACCESS_CFG 0x00A2E14C -#define DBGI_SRAM_TARGET_ACCESS_CFG_RESET_ADDRESS_MSK 0x10000 #define DBGI_SRAM_TARGET_ACCESS_RDATA_LSB 0x00A2E154 #define DBGI_SRAM_TARGET_ACCESS_RDATA_MSB 0x00A2E158 +#define DBGI_SRAM_FIFO_POINTERS 0x00A2E148 +#define DBGI_SRAM_FIFO_POINTERS_WR_PTR_MSK 0x00000FFF enum { ENABLE_WFPM = BIT(31), @@ -388,6 +386,11 @@ enum { #define UREG_LMAC1_CURRENT_PC 0xa05c1c #define UREG_LMAC2_CURRENT_PC 0xa05c20 +#define WFPM_LMAC1_PD_NOTIFICATION 0xa0338c +#define WFPM_ARC1_PD_NOTIFICATION 0xa03044 +#define HPM_SECONDARY_DEVICE_STATE 0xa03404 + + /* For UMAG_GEN_HW_STATUS reg check */ enum { UMAG_GEN_HW_IS_FPGA = BIT(1), @@ -455,6 +458,13 @@ enum { #define UREG_DOORBELL_TO_ISR6_RESUME BIT(19) #define UREG_DOORBELL_TO_ISR6_PNVM BIT(20) +/* + * From BZ family driver triggers this bit for suspend and resume + * The driver should update CSR_IPC_SLEEP_CONTROL before triggering + * this interrupt with suspend/resume value + */ +#define UREG_DOORBELL_TO_ISR6_SLEEP_CTRL BIT(31) + #define CNVI_MBOX_C 0xA3400C #define FSEQ_ERROR_CODE 0xA340C8 @@ -486,4 +496,6 @@ enum { #define HBUS_TIMEOUT 0xA5A5A5A1 #define WFPM_DPHY_OFF 0xDF10FF +#define REG_OTP_MINOR 0xA0333C + #endif /* __iwl_prph_h__ */ diff --git a/iwl-trans.c b/iwl-trans.c index 9236f9106826..b1af9359cea5 100644 --- a/iwl-trans.c +++ b/iwl-trans.c @@ -78,8 +78,12 @@ int iwl_trans_init(struct iwl_trans *trans) if (WARN_ON(trans->trans_cfg->gen2 && txcmd_size >= txcmd_align)) return -EINVAL; - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) - trans->txqs.bc_tbl_size = sizeof(struct iwl_gen3_bc_tbl); + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + trans->txqs.bc_tbl_size = + sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_BZ; + else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + trans->txqs.bc_tbl_size = + sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_AX210; else trans->txqs.bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl); /* @@ -203,10 +207,10 @@ IWL_EXPORT_SYMBOL(iwl_trans_send_cmd); static int iwl_hcmd_names_cmp(const void *key, const void *elt) { const struct iwl_hcmd_names *name = elt; - u8 cmd1 = *(u8 *)key; + const u8 *cmd1 = key; u8 cmd2 = name->cmd_id; - return (cmd1 - cmd2); + return (*cmd1 - cmd2); } const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id) diff --git a/iwl-trans.h b/iwl-trans.h index 4ebb1871bd1f..d659ccd065f7 100644 --- a/iwl-trans.h +++ b/iwl-trans.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2021 Intel Corporation + * Copyright (C) 2005-2014, 2018-2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -193,7 +193,10 @@ enum iwl_error_event_table_status { IWL_ERROR_EVENT_TABLE_LMAC1 = BIT(0), IWL_ERROR_EVENT_TABLE_LMAC2 = BIT(1), IWL_ERROR_EVENT_TABLE_UMAC = BIT(2), - IWL_ERROR_EVENT_TABLE_TCM = BIT(3), + IWL_ERROR_EVENT_TABLE_TCM1 = BIT(3), + IWL_ERROR_EVENT_TABLE_TCM2 = BIT(4), + IWL_ERROR_EVENT_TABLE_RCM1 = BIT(5), + IWL_ERROR_EVENT_TABLE_RCM2 = BIT(6), }; /** @@ -296,6 +299,8 @@ enum iwl_d3_status { * are sent * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation + * @STATUS_SUPPRESS_CMD_ERROR_ONCE: suppress "FW error in SYNC CMD" once, + * e.g. for testing */ enum iwl_trans_status { STATUS_SYNC_HCMD_ACTIVE, @@ -308,6 +313,7 @@ enum iwl_trans_status { STATUS_TRANS_GOING_IDLE, STATUS_TRANS_IDLE, STATUS_TRANS_DEAD, + STATUS_SUPPRESS_CMD_ERROR_ONCE, }; static inline int @@ -400,6 +406,9 @@ struct iwl_dump_sanitize_ops { * @cb_data_offs: offset inside skb->cb to store transport data at, must have * space for at least two pointers * @fw_reset_handshake: firmware supports reset flow handshake + * @queue_alloc_cmd_ver: queue allocation command version, set to 0 + * for using the older SCD_QUEUE_CFG, set to the version of + * SCD_QUEUE_CONFIG_CMD otherwise. */ struct iwl_trans_config { struct iwl_op_mode *op_mode; @@ -418,6 +427,7 @@ struct iwl_trans_config { u8 cb_data_offs; bool fw_reset_handshake; + u8 queue_alloc_cmd_ver; }; struct iwl_trans_dump_data { @@ -563,10 +573,9 @@ struct iwl_trans_ops { void (*txq_disable)(struct iwl_trans *trans, int queue, bool configure_scd); /* 22000 functions */ - int (*txq_alloc)(struct iwl_trans *trans, - __le16 flags, u8 sta_id, u8 tid, - int cmd_id, int size, - unsigned int queue_wdg_timeout); + int (*txq_alloc)(struct iwl_trans *trans, u32 flags, + u32 sta_mask, u8 tid, + int size, unsigned int queue_wdg_timeout); void (*txq_free)(struct iwl_trans *trans, int queue); int (*rxq_dma_data)(struct iwl_trans *trans, int queue, struct iwl_trans_rxq_dma_data *data); @@ -593,7 +602,7 @@ struct iwl_trans_ops { void (*configure)(struct iwl_trans *trans, const struct iwl_trans_config *trans_cfg); void (*set_pmi)(struct iwl_trans *trans, bool state); - void (*sw_reset)(struct iwl_trans *trans); + int (*sw_reset)(struct iwl_trans *trans, bool retake_ownership); bool (*grab_nic_access)(struct iwl_trans *trans); void (*release_nic_access)(struct iwl_trans *trans); void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask, @@ -609,6 +618,10 @@ struct iwl_trans_ops { int (*set_reduce_power)(struct iwl_trans *trans, const void *data, u32 len); void (*interrupts)(struct iwl_trans *trans, bool enable); + int (*imr_dma_data)(struct iwl_trans *trans, + u32 dst_addr, u64 src_addr, + u32 byte_cnt); + }; /** @@ -716,6 +729,26 @@ struct iwl_self_init_dram { }; /** + * struct iwl_imr_data - imr dram data used during debug process + * @imr_enable: imr enable status received from fw + * @imr_size: imr dram size received from fw + * @sram_addr: sram address from debug tlv + * @sram_size: sram size from debug tlv + * @imr2sram_remainbyte`: size remained after each dma transfer + * @imr_curr_addr: current dst address used during dma transfer + * @imr_base_addr: imr address received from fw + */ +struct iwl_imr_data { + u32 imr_enable; + u32 imr_size; + u32 sram_addr; + u32 sram_size; + u32 imr2sram_remainbyte; + u64 imr_curr_addr; + __le64 imr_base_addr; +}; + +/** * struct iwl_trans_debug - transport debug related data * * @n_dest_reg: num of reg_ops in %dbg_dest_tlv @@ -725,7 +758,8 @@ struct iwl_self_init_dram { * @trigger_tlv: array of pointers to triggers TLVs for debug * @lmac_error_event_table: addrs of lmacs error tables * @umac_error_event_table: addr of umac error table - * @tcm_error_event_table: address of TCM error table + * @tcm_error_event_table: address(es) of TCM error table(s) + * @rcm_error_event_table: address(es) of RCM error table(s) * @error_event_table_tlv_status: bitmap that indicates what error table * pointers was recevied via TLV. uses enum &iwl_error_event_table_status * @internal_ini_cfg: internal debug cfg state. Uses &enum iwl_ini_cfg_state @@ -752,7 +786,8 @@ struct iwl_trans_debug { u32 lmac_error_event_table[2]; u32 umac_error_event_table; - u32 tcm_error_event_table; + u32 tcm_error_event_table[2]; + u32 rcm_error_event_table[2]; unsigned int error_event_table_tlv_status; enum iwl_ini_cfg_state internal_ini_cfg; @@ -775,6 +810,9 @@ struct iwl_trans_debug { u32 domains_bitmap; u32 ucode_preset; + bool restart_required; + u32 last_tp_resetfw; + struct iwl_imr_data imr_data; }; struct iwl_dma_ptr { @@ -894,6 +932,7 @@ struct iwl_txq { * @queue_used - bit mask of used queues * @queue_stopped - bit mask of stopped queues * @scd_bc_tbls: gen1 pointer to the byte count table of the scheduler + * @queue_alloc_cmd_ver: queue allocation command version */ struct iwl_trans_txqs { unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; @@ -919,11 +958,14 @@ struct iwl_trans_txqs { } tfd; struct iwl_dma_ptr scd_bc_tbls; + + u8 queue_alloc_cmd_ver; }; /** * struct iwl_trans - transport common data * + * @csme_own - true if we couldn't get ownership on the device * @ops - pointer to iwl_trans_ops * @op_mode - pointer to the op_mode * @trans_cfg: the trans-specific configuration part @@ -937,6 +979,7 @@ struct iwl_trans_txqs { * @hw_id: a u32 with the ID of the device / sub-device. * Set during transport allocation. * @hw_id_str: a string with info about HW ID. Set during transport allocation. + * @hw_rev_step: The mac step of the HW * @pm_support: set to true in start_hw if link pm is supported * @ltr_enabled: set to true if the LTR is enabled * @wide_cmd_header: true when ucode supports wide command header format @@ -958,6 +1001,7 @@ struct iwl_trans_txqs { * @iwl_trans_txqs: transport tx queues data. */ struct iwl_trans { + bool csme_own; const struct iwl_trans_ops *ops; struct iwl_op_mode *op_mode; const struct iwl_cfg_trans_params *trans_cfg; @@ -969,6 +1013,7 @@ struct iwl_trans { struct device *dev; u32 max_skb_frags; u32 hw_rev; + u32 hw_rev_step; u32 hw_rf_id; u32 hw_id; char hw_id_str[52]; @@ -1206,9 +1251,8 @@ iwl_trans_txq_free(struct iwl_trans *trans, int queue) static inline int iwl_trans_txq_alloc(struct iwl_trans *trans, - __le16 flags, u8 sta_id, u8 tid, - int cmd_id, int size, - unsigned int wdg_timeout) + u32 flags, u32 sta_mask, u8 tid, + int size, unsigned int wdg_timeout) { might_sleep(); @@ -1220,8 +1264,8 @@ iwl_trans_txq_alloc(struct iwl_trans *trans, return -EIO; } - return trans->ops->txq_alloc(trans, flags, sta_id, tid, - cmd_id, size, wdg_timeout); + return trans->ops->txq_alloc(trans, flags, sta_mask, tid, + size, wdg_timeout); } static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans, @@ -1354,6 +1398,15 @@ static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr, iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\ } while (0) +static inline int iwl_trans_write_imr_mem(struct iwl_trans *trans, + u32 dst_addr, u64 src_addr, + u32 byte_cnt) +{ + if (trans->ops->imr_dma_data) + return trans->ops->imr_dma_data(trans, dst_addr, src_addr, byte_cnt); + return 0; +} + static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr) { u32 value; @@ -1382,10 +1435,12 @@ static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state) trans->ops->set_pmi(trans, state); } -static inline void iwl_trans_sw_reset(struct iwl_trans *trans) +static inline int iwl_trans_sw_reset(struct iwl_trans *trans, + bool retake_ownership) { if (trans->ops->sw_reset) - trans->ops->sw_reset(trans); + return trans->ops->sw_reset(trans, retake_ownership); + return 0; } static inline void diff --git a/mvm/constants.h b/mvm/constants.h index 9d0d01f27d92..c604f9f39b24 100644 --- a/mvm/constants.h +++ b/mvm/constants.h @@ -107,7 +107,7 @@ #define IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR 1000 #define IWL_MVM_D3_DEBUG false #define IWL_MVM_USE_TWT true -#define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA 10 +#define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA 20 #define IWL_MVM_USE_NSSN_SYNC 0 #define IWL_MVM_PHY_FILTER_CHAIN_A 0 #define IWL_MVM_PHY_FILTER_CHAIN_B 0 diff --git a/mvm/d3.c b/mvm/d3.c new file mode 100644 index 000000000000..a995bba0ba81 --- /dev/null +++ b/mvm/d3.c @@ -0,0 +1,2797 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2012-2014, 2018-2021 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ +#include <linux/etherdevice.h> +#include <linux/ip.h> +#include <linux/fs.h> +#include <net/cfg80211.h> +#include <net/ipv6.h> +#include <net/tcp.h> +#include <net/addrconf.h> +#include "iwl-modparams.h" +#include "fw-api.h" +#include "mvm.h" +#include "fw/img.h" + +void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_gtk_rekey_data *data) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + mutex_lock(&mvm->mutex); + + mvmvif->rekey_data.kek_len = data->kek_len; + mvmvif->rekey_data.kck_len = data->kck_len; + memcpy(mvmvif->rekey_data.kek, data->kek, data->kek_len); + memcpy(mvmvif->rekey_data.kck, data->kck, data->kck_len); + mvmvif->rekey_data.akm = data->akm & 0xFF; + mvmvif->rekey_data.replay_ctr = + cpu_to_le64(be64_to_cpup((const __be64 *)data->replay_ctr)); + mvmvif->rekey_data.valid = true; + + mutex_unlock(&mvm->mutex); +} + +#if IS_ENABLED(CONFIG_IPV6) +void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct inet6_dev *idev) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct inet6_ifaddr *ifa; + int idx = 0; + + memset(mvmvif->tentative_addrs, 0, sizeof(mvmvif->tentative_addrs)); + + read_lock_bh(&idev->lock); + list_for_each_entry(ifa, &idev->addr_list, if_list) { + mvmvif->target_ipv6_addrs[idx] = ifa->addr; + if (ifa->flags & IFA_F_TENTATIVE) + __set_bit(idx, mvmvif->tentative_addrs); + idx++; + if (idx >= IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX) + break; + } + read_unlock_bh(&idev->lock); + + mvmvif->num_target_ipv6_addrs = idx; +} +#endif + +void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, int idx) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + mvmvif->tx_key_idx = idx; +} + +static void iwl_mvm_convert_p1k(u16 *p1k, __le16 *out) +{ + int i; + + for (i = 0; i < IWL_P1K_SIZE; i++) + out[i] = cpu_to_le16(p1k[i]); +} + +static const u8 *iwl_mvm_find_max_pn(struct ieee80211_key_conf *key, + struct iwl_mvm_key_pn *ptk_pn, + struct ieee80211_key_seq *seq, + int tid, int queues) +{ + const u8 *ret = seq->ccmp.pn; + int i; + + /* get the PN from mac80211, used on the default queue */ + ieee80211_get_key_rx_seq(key, tid, seq); + + /* and use the internal data for the other queues */ + for (i = 1; i < queues; i++) { + const u8 *tmp = ptk_pn->q[i].pn[tid]; + + if (memcmp(ret, tmp, IEEE80211_CCMP_PN_LEN) <= 0) + ret = tmp; + } + + return ret; +} + +struct wowlan_key_reprogram_data { + bool error; + int wep_key_idx; +}; + +static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *_data) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct wowlan_key_reprogram_data *data = _data; + int ret; + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: { /* hack it for now */ + struct { + struct iwl_mvm_wep_key_cmd wep_key_cmd; + struct iwl_mvm_wep_key wep_key; + } __packed wkc = { + .wep_key_cmd.mac_id_n_color = + cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, + mvmvif->color)), + .wep_key_cmd.num_keys = 1, + /* firmware sets STA_KEY_FLG_WEP_13BYTES */ + .wep_key_cmd.decryption_type = STA_KEY_FLG_WEP, + .wep_key.key_index = key->keyidx, + .wep_key.key_size = key->keylen, + }; + + /* + * This will fail -- the key functions don't set support + * pairwise WEP keys. However, that's better than silently + * failing WoWLAN. Or maybe not? + */ + if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) + break; + + memcpy(&wkc.wep_key.key[3], key->key, key->keylen); + if (key->keyidx == mvmvif->tx_key_idx) { + /* TX key must be at offset 0 */ + wkc.wep_key.key_offset = 0; + } else { + /* others start at 1 */ + data->wep_key_idx++; + wkc.wep_key.key_offset = data->wep_key_idx; + } + + mutex_lock(&mvm->mutex); + ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, 0, sizeof(wkc), &wkc); + data->error = ret != 0; + + mvm->ptk_ivlen = key->iv_len; + mvm->ptk_icvlen = key->icv_len; + mvm->gtk_ivlen = key->iv_len; + mvm->gtk_icvlen = key->icv_len; + mutex_unlock(&mvm->mutex); + + /* don't upload key again */ + return; + } + default: + data->error = true; + return; + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + return; + case WLAN_CIPHER_SUITE_AES_CMAC: + /* + * Ignore CMAC keys -- the WoWLAN firmware doesn't support them + * but we also shouldn't abort suspend due to that. It does have + * support for the IGTK key renewal, but doesn't really use the + * IGTK for anything. This means we could spuriously wake up or + * be deauthenticated, but that was considered acceptable. + */ + return; + case WLAN_CIPHER_SUITE_TKIP: + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + break; + } + + mutex_lock(&mvm->mutex); + /* + * The D3 firmware hardcodes the key offset 0 as the key it + * uses to transmit packets to the AP, i.e. the PTK. + */ + if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { + mvm->ptk_ivlen = key->iv_len; + mvm->ptk_icvlen = key->icv_len; + ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 0); + } else { + /* + * firmware only supports TSC/RSC for a single key, + * so if there are multiple keep overwriting them + * with new ones -- this relies on mac80211 doing + * list_add_tail(). + */ + mvm->gtk_ivlen = key->iv_len; + mvm->gtk_icvlen = key->icv_len; + ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 1); + } + mutex_unlock(&mvm->mutex); + data->error = ret != 0; +} + +struct wowlan_key_rsc_tsc_data { + struct iwl_wowlan_rsc_tsc_params_cmd_v4 *rsc_tsc; + bool have_rsc_tsc; +}; + +static void iwl_mvm_wowlan_get_rsc_tsc_data(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *_data) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct wowlan_key_rsc_tsc_data *data = _data; + struct aes_sc *aes_sc; + struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL; + struct ieee80211_key_seq seq; + int i; + + switch (key->cipher) { + default: + break; + case WLAN_CIPHER_SUITE_TKIP: + if (sta) { + u64 pn64; + + tkip_sc = + data->rsc_tsc->params.all_tsc_rsc.tkip.unicast_rsc; + tkip_tx_sc = + &data->rsc_tsc->params.all_tsc_rsc.tkip.tsc; + + pn64 = atomic64_read(&key->tx_pn); + tkip_tx_sc->iv16 = cpu_to_le16(TKIP_PN_TO_IV16(pn64)); + tkip_tx_sc->iv32 = cpu_to_le32(TKIP_PN_TO_IV32(pn64)); + } else { + tkip_sc = + data->rsc_tsc->params.all_tsc_rsc.tkip.multicast_rsc; + } + + /* + * For non-QoS this relies on the fact that both the uCode and + * mac80211 use TID 0 (as they need to to avoid replay attacks) + * for checking the IV in the frames. + */ + for (i = 0; i < IWL_NUM_RSC; i++) { + ieee80211_get_key_rx_seq(key, i, &seq); + tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16); + tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32); + } + + data->have_rsc_tsc = true; + break; + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + if (sta) { + struct aes_sc *aes_tx_sc; + u64 pn64; + + aes_sc = + data->rsc_tsc->params.all_tsc_rsc.aes.unicast_rsc; + aes_tx_sc = + &data->rsc_tsc->params.all_tsc_rsc.aes.tsc; + + pn64 = atomic64_read(&key->tx_pn); + aes_tx_sc->pn = cpu_to_le64(pn64); + } else { + aes_sc = + data->rsc_tsc->params.all_tsc_rsc.aes.multicast_rsc; + } + + /* + * For non-QoS this relies on the fact that both the uCode and + * mac80211/our RX code use TID 0 for checking the PN. + */ + if (sta && iwl_mvm_has_new_rx_api(mvm)) { + struct iwl_mvm_sta *mvmsta; + struct iwl_mvm_key_pn *ptk_pn; + const u8 *pn; + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + rcu_read_lock(); + ptk_pn = rcu_dereference(mvmsta->ptk_pn[key->keyidx]); + if (WARN_ON(!ptk_pn)) { + rcu_read_unlock(); + break; + } + + for (i = 0; i < IWL_MAX_TID_COUNT; i++) { + pn = iwl_mvm_find_max_pn(key, ptk_pn, &seq, i, + mvm->trans->num_rx_queues); + aes_sc[i].pn = cpu_to_le64((u64)pn[5] | + ((u64)pn[4] << 8) | + ((u64)pn[3] << 16) | + ((u64)pn[2] << 24) | + ((u64)pn[1] << 32) | + ((u64)pn[0] << 40)); + } + + rcu_read_unlock(); + } else { + for (i = 0; i < IWL_NUM_RSC; i++) { + u8 *pn = seq.ccmp.pn; + + ieee80211_get_key_rx_seq(key, i, &seq); + aes_sc[i].pn = cpu_to_le64((u64)pn[5] | + ((u64)pn[4] << 8) | + ((u64)pn[3] << 16) | + ((u64)pn[2] << 24) | + ((u64)pn[1] << 32) | + ((u64)pn[0] << 40)); + } + } + data->have_rsc_tsc = true; + break; + } +} + +struct wowlan_key_rsc_v5_data { + struct iwl_wowlan_rsc_tsc_params_cmd *rsc; + bool have_rsc; + int gtks; + int gtk_ids[4]; +}; + +static void iwl_mvm_wowlan_get_rsc_v5_data(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *_data) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct wowlan_key_rsc_v5_data *data = _data; + struct ieee80211_key_seq seq; + __le64 *rsc; + int i; + + /* only for ciphers that can be PTK/GTK */ + switch (key->cipher) { + default: + return; + case WLAN_CIPHER_SUITE_TKIP: + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + break; + } + + if (sta) { + rsc = data->rsc->ucast_rsc; + } else { + if (WARN_ON(data->gtks >= ARRAY_SIZE(data->gtk_ids))) + return; + data->gtk_ids[data->gtks] = key->keyidx; + rsc = data->rsc->mcast_rsc[data->gtks % 2]; + if (WARN_ON(key->keyidx >= + ARRAY_SIZE(data->rsc->mcast_key_id_map))) + return; + data->rsc->mcast_key_id_map[key->keyidx] = data->gtks % 2; + if (data->gtks >= 2) { + int prev = data->gtks - 2; + int prev_idx = data->gtk_ids[prev]; + + data->rsc->mcast_key_id_map[prev_idx] = + IWL_MCAST_KEY_MAP_INVALID; + } + data->gtks++; + } + + switch (key->cipher) { + default: + WARN_ON(1); + break; + case WLAN_CIPHER_SUITE_TKIP: + + /* + * For non-QoS this relies on the fact that both the uCode and + * mac80211 use TID 0 (as they need to to avoid replay attacks) + * for checking the IV in the frames. + */ + for (i = 0; i < IWL_MAX_TID_COUNT; i++) { + ieee80211_get_key_rx_seq(key, i, &seq); + + rsc[i] = cpu_to_le64(((u64)seq.tkip.iv32 << 16) | + seq.tkip.iv16); + } + + data->have_rsc = true; + break; + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + /* + * For non-QoS this relies on the fact that both the uCode and + * mac80211/our RX code use TID 0 for checking the PN. + */ + if (sta) { + struct iwl_mvm_sta *mvmsta; + struct iwl_mvm_key_pn *ptk_pn; + const u8 *pn; + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + rcu_read_lock(); + ptk_pn = rcu_dereference(mvmsta->ptk_pn[key->keyidx]); + if (WARN_ON(!ptk_pn)) { + rcu_read_unlock(); + break; + } + + for (i = 0; i < IWL_MAX_TID_COUNT; i++) { + pn = iwl_mvm_find_max_pn(key, ptk_pn, &seq, i, + mvm->trans->num_rx_queues); + rsc[i] = cpu_to_le64((u64)pn[5] | + ((u64)pn[4] << 8) | + ((u64)pn[3] << 16) | + ((u64)pn[2] << 24) | + ((u64)pn[1] << 32) | + ((u64)pn[0] << 40)); + } + + rcu_read_unlock(); + } else { + for (i = 0; i < IWL_MAX_TID_COUNT; i++) { + u8 *pn = seq.ccmp.pn; + + ieee80211_get_key_rx_seq(key, i, &seq); + rsc[i] = cpu_to_le64((u64)pn[5] | + ((u64)pn[4] << 8) | + ((u64)pn[3] << 16) | + ((u64)pn[2] << 24) | + ((u64)pn[1] << 32) | + ((u64)pn[0] << 40)); + } + } + data->have_rsc = true; + break; + } +} + +static int iwl_mvm_wowlan_config_rsc_tsc(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ver = iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_TSC_RSC_PARAM, + IWL_FW_CMD_VER_UNKNOWN); + int ret; + + if (ver == 5) { + struct wowlan_key_rsc_v5_data data = {}; + int i; + + data.rsc = kmalloc(sizeof(*data.rsc), GFP_KERNEL); + if (!data.rsc) + return -ENOMEM; + + memset(data.rsc, 0xff, sizeof(*data.rsc)); + + for (i = 0; i < ARRAY_SIZE(data.rsc->mcast_key_id_map); i++) + data.rsc->mcast_key_id_map[i] = + IWL_MCAST_KEY_MAP_INVALID; + data.rsc->sta_id = cpu_to_le32(mvmvif->ap_sta_id); + + ieee80211_iter_keys(mvm->hw, vif, + iwl_mvm_wowlan_get_rsc_v5_data, + &data); + + if (data.have_rsc) + ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_TSC_RSC_PARAM, + CMD_ASYNC, sizeof(*data.rsc), + data.rsc); + else + ret = 0; + kfree(data.rsc); + } else if (ver == 4 || ver == 2 || ver == IWL_FW_CMD_VER_UNKNOWN) { + struct wowlan_key_rsc_tsc_data data = {}; + int size; + + data.rsc_tsc = kzalloc(sizeof(*data.rsc_tsc), GFP_KERNEL); + if (!data.rsc_tsc) + return -ENOMEM; + + if (ver == 4) { + size = sizeof(*data.rsc_tsc); + data.rsc_tsc->sta_id = cpu_to_le32(mvmvif->ap_sta_id); + } else { + /* ver == 2 || ver == IWL_FW_CMD_VER_UNKNOWN */ + size = sizeof(data.rsc_tsc->params); + } + + ieee80211_iter_keys(mvm->hw, vif, + iwl_mvm_wowlan_get_rsc_tsc_data, + &data); + + if (data.have_rsc_tsc) + ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_TSC_RSC_PARAM, + CMD_ASYNC, size, + data.rsc_tsc); + else + ret = 0; + kfree(data.rsc_tsc); + } else { + ret = 0; + WARN_ON_ONCE(1); + } + + return ret; +} + +struct wowlan_key_tkip_data { + struct iwl_wowlan_tkip_params_cmd tkip; + bool have_tkip_keys; +}; + +static void iwl_mvm_wowlan_get_tkip_data(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *_data) +{ + struct wowlan_key_tkip_data *data = _data; + struct iwl_p1k_cache *rx_p1ks; + u8 *rx_mic_key; + struct ieee80211_key_seq seq; + u32 cur_rx_iv32 = 0; + u16 p1k[IWL_P1K_SIZE]; + int i; + + switch (key->cipher) { + default: + break; + case WLAN_CIPHER_SUITE_TKIP: + if (sta) { + u64 pn64; + + rx_p1ks = data->tkip.rx_uni; + + pn64 = atomic64_read(&key->tx_pn); + + ieee80211_get_tkip_p1k_iv(key, TKIP_PN_TO_IV32(pn64), + p1k); + iwl_mvm_convert_p1k(p1k, data->tkip.tx.p1k); + + memcpy(data->tkip.mic_keys.tx, + &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY], + IWL_MIC_KEY_SIZE); + + rx_mic_key = data->tkip.mic_keys.rx_unicast; + } else { + rx_p1ks = data->tkip.rx_multi; + rx_mic_key = data->tkip.mic_keys.rx_mcast; + } + + for (i = 0; i < IWL_NUM_RSC; i++) { + /* wrapping isn't allowed, AP must rekey */ + if (seq.tkip.iv32 > cur_rx_iv32) + cur_rx_iv32 = seq.tkip.iv32; + } + + ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid, + cur_rx_iv32, p1k); + iwl_mvm_convert_p1k(p1k, rx_p1ks[0].p1k); + ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid, + cur_rx_iv32 + 1, p1k); + iwl_mvm_convert_p1k(p1k, rx_p1ks[1].p1k); + + memcpy(rx_mic_key, + &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY], + IWL_MIC_KEY_SIZE); + + data->have_tkip_keys = true; + break; + } +} + +struct wowlan_key_gtk_type_iter { + struct iwl_wowlan_kek_kck_material_cmd_v4 *kek_kck_cmd; +}; + +static void iwl_mvm_wowlan_gtk_type_iter(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *_data) +{ + struct wowlan_key_gtk_type_iter *data = _data; + + switch (key->cipher) { + default: + return; + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_GCMP); + return; + case WLAN_CIPHER_SUITE_AES_CMAC: + data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_CCM); + return; + case WLAN_CIPHER_SUITE_CCMP: + if (!sta) + data->kek_kck_cmd->gtk_cipher = + cpu_to_le32(STA_KEY_FLG_CCM); + break; + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + if (!sta) + data->kek_kck_cmd->gtk_cipher = + cpu_to_le32(STA_KEY_FLG_GCMP); + break; + } +} + +static int iwl_mvm_send_patterns_v1(struct iwl_mvm *mvm, + struct cfg80211_wowlan *wowlan) +{ + struct iwl_wowlan_patterns_cmd_v1 *pattern_cmd; + struct iwl_host_cmd cmd = { + .id = WOWLAN_PATTERNS, + .dataflags[0] = IWL_HCMD_DFL_NOCOPY, + }; + int i, err; + + if (!wowlan->n_patterns) + return 0; + + cmd.len[0] = struct_size(pattern_cmd, patterns, wowlan->n_patterns); + + pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL); + if (!pattern_cmd) + return -ENOMEM; + + pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns); + + for (i = 0; i < wowlan->n_patterns; i++) { + int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8); + + memcpy(&pattern_cmd->patterns[i].mask, + wowlan->patterns[i].mask, mask_len); + memcpy(&pattern_cmd->patterns[i].pattern, + wowlan->patterns[i].pattern, + wowlan->patterns[i].pattern_len); + pattern_cmd->patterns[i].mask_size = mask_len; + pattern_cmd->patterns[i].pattern_size = + wowlan->patterns[i].pattern_len; + } + + cmd.data[0] = pattern_cmd; + err = iwl_mvm_send_cmd(mvm, &cmd); + kfree(pattern_cmd); + return err; +} + +static int iwl_mvm_send_patterns(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct cfg80211_wowlan *wowlan) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_wowlan_patterns_cmd *pattern_cmd; + struct iwl_host_cmd cmd = { + .id = WOWLAN_PATTERNS, + .dataflags[0] = IWL_HCMD_DFL_NOCOPY, + }; + int i, err; + int ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id, + IWL_FW_CMD_VER_UNKNOWN); + + if (!wowlan->n_patterns) + return 0; + + cmd.len[0] = sizeof(*pattern_cmd) + + wowlan->n_patterns * sizeof(struct iwl_wowlan_pattern_v2); + + pattern_cmd = kzalloc(cmd.len[0], GFP_KERNEL); + if (!pattern_cmd) + return -ENOMEM; + + pattern_cmd->n_patterns = wowlan->n_patterns; + if (ver >= 3) + pattern_cmd->sta_id = mvmvif->ap_sta_id; + + for (i = 0; i < wowlan->n_patterns; i++) { + int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8); + + pattern_cmd->patterns[i].pattern_type = + WOWLAN_PATTERN_TYPE_BITMASK; + + memcpy(&pattern_cmd->patterns[i].u.bitmask.mask, + wowlan->patterns[i].mask, mask_len); + memcpy(&pattern_cmd->patterns[i].u.bitmask.pattern, + wowlan->patterns[i].pattern, + wowlan->patterns[i].pattern_len); + pattern_cmd->patterns[i].u.bitmask.mask_size = mask_len; + pattern_cmd->patterns[i].u.bitmask.pattern_size = + wowlan->patterns[i].pattern_len; + } + + cmd.data[0] = pattern_cmd; + err = iwl_mvm_send_cmd(mvm, &cmd); + kfree(pattern_cmd); + return err; +} + +static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct ieee80211_sta *ap_sta) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct ieee80211_chanctx_conf *ctx; + u8 chains_static, chains_dynamic; + struct cfg80211_chan_def chandef; + int ret, i; + struct iwl_binding_cmd_v1 binding_cmd = {}; + struct iwl_time_quota_cmd quota_cmd = {}; + struct iwl_time_quota_data *quota; + u32 status; + + if (WARN_ON_ONCE(iwl_mvm_is_cdb_supported(mvm))) + return -EINVAL; + + /* add back the PHY */ + if (WARN_ON(!mvmvif->phy_ctxt)) + return -EINVAL; + + rcu_read_lock(); + ctx = rcu_dereference(vif->chanctx_conf); + if (WARN_ON(!ctx)) { + rcu_read_unlock(); + return -EINVAL; + } + chandef = ctx->def; + chains_static = ctx->rx_chains_static; + chains_dynamic = ctx->rx_chains_dynamic; + rcu_read_unlock(); + + ret = iwl_mvm_phy_ctxt_add(mvm, mvmvif->phy_ctxt, &chandef, + chains_static, chains_dynamic); + if (ret) + return ret; + + /* add back the MAC */ + mvmvif->uploaded = false; + + if (WARN_ON(!vif->bss_conf.assoc)) + return -EINVAL; + + ret = iwl_mvm_mac_ctxt_add(mvm, vif); + if (ret) + return ret; + + /* add back binding - XXX refactor? */ + binding_cmd.id_and_color = + cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id, + mvmvif->phy_ctxt->color)); + binding_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD); + binding_cmd.phy = + cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id, + mvmvif->phy_ctxt->color)); + binding_cmd.macs[0] = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, + mvmvif->color)); + for (i = 1; i < MAX_MACS_IN_BINDING; i++) + binding_cmd.macs[i] = cpu_to_le32(FW_CTXT_INVALID); + + status = 0; + ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD, + IWL_BINDING_CMD_SIZE_V1, &binding_cmd, + &status); + if (ret) { + IWL_ERR(mvm, "Failed to add binding: %d\n", ret); + return ret; + } + + if (status) { + IWL_ERR(mvm, "Binding command failed: %u\n", status); + return -EIO; + } + + ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false, 0); + if (ret) + return ret; + rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta); + + ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); + if (ret) + return ret; + + /* and some quota */ + quota = iwl_mvm_quota_cmd_get_quota(mvm, "a_cmd, 0); + quota->id_and_color = + cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id, + mvmvif->phy_ctxt->color)); + quota->quota = cpu_to_le32(IWL_MVM_MAX_QUOTA); + quota->max_duration = cpu_to_le32(IWL_MVM_MAX_QUOTA); + + for (i = 1; i < MAX_BINDINGS; i++) { + quota = iwl_mvm_quota_cmd_get_quota(mvm, "a_cmd, i); + quota->id_and_color = cpu_to_le32(FW_CTXT_INVALID); + } + + ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0, + iwl_mvm_quota_cmd_size(mvm), "a_cmd); + if (ret) + IWL_ERR(mvm, "Failed to send quota: %d\n", ret); + + if (iwl_mvm_is_lar_supported(mvm) && iwl_mvm_init_fw_regd(mvm)) + IWL_ERR(mvm, "Failed to initialize D3 LAR information\n"); + + return 0; +} + +static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_nonqos_seq_query_cmd query_cmd = { + .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_GET), + .mac_id_n_color = + cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, + mvmvif->color)), + }; + struct iwl_host_cmd cmd = { + .id = NON_QOS_TX_COUNTER_CMD, + .flags = CMD_WANT_SKB, + }; + int err; + u32 size; + + cmd.data[0] = &query_cmd; + cmd.len[0] = sizeof(query_cmd); + + err = iwl_mvm_send_cmd(mvm, &cmd); + if (err) + return err; + + size = iwl_rx_packet_payload_len(cmd.resp_pkt); + if (size < sizeof(__le16)) { + err = -EINVAL; + } else { + err = le16_to_cpup((__le16 *)cmd.resp_pkt->data); + /* firmware returns next, not last-used seqno */ + err = (u16) (err - 0x10); + } + + iwl_free_resp(&cmd); + return err; +} + +void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_nonqos_seq_query_cmd query_cmd = { + .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_SET), + .mac_id_n_color = + cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, + mvmvif->color)), + .value = cpu_to_le16(mvmvif->seqno), + }; + + /* return if called during restart, not resume from D3 */ + if (!mvmvif->seqno_valid) + return; + + mvmvif->seqno_valid = false; + + if (iwl_mvm_send_cmd_pdu(mvm, NON_QOS_TX_COUNTER_CMD, 0, + sizeof(query_cmd), &query_cmd)) + IWL_ERR(mvm, "failed to set non-QoS seqno\n"); +} + +static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm) +{ + iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); + + iwl_mvm_stop_device(mvm); + /* + * Set the HW restart bit -- this is mostly true as we're + * going to load new firmware and reprogram that, though + * the reprogramming is going to be manual to avoid adding + * all the MACs that aren't support. + * We don't have to clear up everything though because the + * reprogramming is manual. When we resume, we'll actually + * go through a proper restart sequence again to switch + * back to the runtime firmware image. + */ + set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); + + /* the fw is reset, so all the keys are cleared */ + memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); + + mvm->ptk_ivlen = 0; + mvm->ptk_icvlen = 0; + mvm->ptk_ivlen = 0; + mvm->ptk_icvlen = 0; + + return iwl_mvm_load_d3_fw(mvm); +} + +static int +iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm, + struct cfg80211_wowlan *wowlan, + struct iwl_wowlan_config_cmd *wowlan_config_cmd, + struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif, + struct ieee80211_sta *ap_sta) +{ + struct iwl_mvm_sta *mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta); + + /* TODO: wowlan_config_cmd->wowlan_ba_teardown_tids */ + + wowlan_config_cmd->is_11n_connection = + ap_sta->ht_cap.ht_supported; + wowlan_config_cmd->flags = ENABLE_L3_FILTERING | + ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING; + + if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_CONFIGURATION, 0) < 6) { + /* Query the last used seqno and set it */ + int ret = iwl_mvm_get_last_nonqos_seq(mvm, vif); + + if (ret < 0) + return ret; + + wowlan_config_cmd->non_qos_seq = cpu_to_le16(ret); + } + + iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, wowlan_config_cmd); + + if (wowlan->disconnect) + wowlan_config_cmd->wakeup_filter |= + cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS | + IWL_WOWLAN_WAKEUP_LINK_CHANGE); + if (wowlan->magic_pkt) + wowlan_config_cmd->wakeup_filter |= + cpu_to_le32(IWL_WOWLAN_WAKEUP_MAGIC_PACKET); + if (wowlan->gtk_rekey_failure) + wowlan_config_cmd->wakeup_filter |= + cpu_to_le32(IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL); + if (wowlan->eap_identity_req) + wowlan_config_cmd->wakeup_filter |= + cpu_to_le32(IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ); + if (wowlan->four_way_handshake) + wowlan_config_cmd->wakeup_filter |= + cpu_to_le32(IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE); + if (wowlan->n_patterns) + wowlan_config_cmd->wakeup_filter |= + cpu_to_le32(IWL_WOWLAN_WAKEUP_PATTERN_MATCH); + + if (wowlan->rfkill_release) + wowlan_config_cmd->wakeup_filter |= + cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT); + + if (wowlan->tcp) { + /* + * Set the "link change" (really "link lost") flag as well + * since that implies losing the TCP connection. + */ + wowlan_config_cmd->wakeup_filter |= + cpu_to_le32(IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS | + IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE | + IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET | + IWL_WOWLAN_WAKEUP_LINK_CHANGE); + } + + if (wowlan->any) { + wowlan_config_cmd->wakeup_filter |= + cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS | + IWL_WOWLAN_WAKEUP_LINK_CHANGE | + IWL_WOWLAN_WAKEUP_RX_FRAME | + IWL_WOWLAN_WAKEUP_BCN_FILTERING); + } + + return 0; +} + +static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + bool unified = fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); + struct wowlan_key_reprogram_data key_data = {}; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int ret; + u8 cmd_ver; + size_t cmd_size; + + if (!unified) { + /* + * if we have to configure keys, call ieee80211_iter_keys(), + * as we need non-atomic context in order to take the + * required locks. + */ + /* + * Note that currently we don't use CMD_ASYNC in the iterator. + * In case of key_data.configure_keys, all the configured + * commands are SYNC, and iwl_mvm_wowlan_program_keys() will + * take care of locking/unlocking mvm->mutex. + */ + ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_wowlan_program_keys, + &key_data); + + if (key_data.error) + return -EIO; + } + + ret = iwl_mvm_wowlan_config_rsc_tsc(mvm, vif); + if (ret) + return ret; + + if (!fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_TKIP_MIC_KEYS)) { + int ver = iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_TKIP_PARAM, + IWL_FW_CMD_VER_UNKNOWN); + struct wowlan_key_tkip_data tkip_data = {}; + int size; + + if (ver == 2) { + size = sizeof(tkip_data.tkip); + tkip_data.tkip.sta_id = + cpu_to_le32(mvmvif->ap_sta_id); + } else if (ver == 1 || ver == IWL_FW_CMD_VER_UNKNOWN) { + size = sizeof(struct iwl_wowlan_tkip_params_cmd_ver_1); + } else { + WARN_ON_ONCE(1); + return -EINVAL; + } + + ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_wowlan_get_tkip_data, + &tkip_data); + + if (tkip_data.have_tkip_keys) { + /* send relevant data according to CMD version */ + ret = iwl_mvm_send_cmd_pdu(mvm, + WOWLAN_TKIP_PARAM, + CMD_ASYNC, size, + &tkip_data.tkip); + if (ret) + return ret; + } + } + + /* configure rekey data only if offloaded rekey is supported (d3) */ + if (mvmvif->rekey_data.valid) { + struct iwl_wowlan_kek_kck_material_cmd_v4 kek_kck_cmd = {}; + struct iwl_wowlan_kek_kck_material_cmd_v4 *_kek_kck_cmd = + &kek_kck_cmd; + struct wowlan_key_gtk_type_iter gtk_type_data = { + .kek_kck_cmd = _kek_kck_cmd, + }; + + cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, + WOWLAN_KEK_KCK_MATERIAL, + IWL_FW_CMD_VER_UNKNOWN); + if (WARN_ON(cmd_ver != 2 && cmd_ver != 3 && cmd_ver != 4 && + cmd_ver != IWL_FW_CMD_VER_UNKNOWN)) + return -EINVAL; + + ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_wowlan_gtk_type_iter, + >k_type_data); + + memcpy(kek_kck_cmd.kck, mvmvif->rekey_data.kck, + mvmvif->rekey_data.kck_len); + kek_kck_cmd.kck_len = cpu_to_le16(mvmvif->rekey_data.kck_len); + memcpy(kek_kck_cmd.kek, mvmvif->rekey_data.kek, + mvmvif->rekey_data.kek_len); + kek_kck_cmd.kek_len = cpu_to_le16(mvmvif->rekey_data.kek_len); + kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr; + kek_kck_cmd.akm = cpu_to_le32(mvmvif->rekey_data.akm); + kek_kck_cmd.sta_id = cpu_to_le32(mvmvif->ap_sta_id); + + if (cmd_ver == 4) { + cmd_size = sizeof(struct iwl_wowlan_kek_kck_material_cmd_v4); + } else { + if (cmd_ver == 3) + cmd_size = + sizeof(struct iwl_wowlan_kek_kck_material_cmd_v3); + else + cmd_size = + sizeof(struct iwl_wowlan_kek_kck_material_cmd_v2); + /* skip the sta_id at the beginning */ + _kek_kck_cmd = (void *) + ((u8 *)_kek_kck_cmd + sizeof(kek_kck_cmd.sta_id)); + } + + IWL_DEBUG_WOWLAN(mvm, "setting akm %d\n", + mvmvif->rekey_data.akm); + + ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_KEK_KCK_MATERIAL, + CMD_ASYNC, cmd_size, _kek_kck_cmd); + if (ret) + return ret; + } + + return 0; +} + +static int +iwl_mvm_wowlan_config(struct iwl_mvm *mvm, + struct cfg80211_wowlan *wowlan, + struct iwl_wowlan_config_cmd *wowlan_config_cmd, + struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif, + struct ieee80211_sta *ap_sta) +{ + int ret; + bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); + + mvm->offload_tid = wowlan_config_cmd->offloading_tid; + + if (!unified_image) { + ret = iwl_mvm_switch_to_d3(mvm); + if (ret) + return ret; + + ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta); + if (ret) + return ret; + } + + /* + * This needs to be unlocked due to lock ordering + * constraints. Since we're in the suspend path + * that isn't really a problem though. + */ + mutex_unlock(&mvm->mutex); + ret = iwl_mvm_wowlan_config_key_params(mvm, vif); + mutex_lock(&mvm->mutex); + if (ret) + return ret; + + ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0, + sizeof(*wowlan_config_cmd), + wowlan_config_cmd); + if (ret) + return ret; + + if (fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE)) + ret = iwl_mvm_send_patterns(mvm, vif, wowlan); + else + ret = iwl_mvm_send_patterns_v1(mvm, wowlan); + if (ret) + return ret; + + return iwl_mvm_send_proto_offload(mvm, vif, false, true, 0); +} + +static int +iwl_mvm_netdetect_config(struct iwl_mvm *mvm, + struct cfg80211_wowlan *wowlan, + struct cfg80211_sched_scan_request *nd_config, + struct ieee80211_vif *vif) +{ + int ret; + bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); + + if (!unified_image) { + ret = iwl_mvm_switch_to_d3(mvm); + if (ret) + return ret; + } else { + /* In theory, we wouldn't have to stop a running sched + * scan in order to start another one (for + * net-detect). But in practice this doesn't seem to + * work properly, so stop any running sched_scan now. + */ + ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); + if (ret) + return ret; + } + + ret = iwl_mvm_sched_scan_start(mvm, vif, nd_config, &mvm->nd_ies, + IWL_MVM_SCAN_NETDETECT); + if (ret) + return ret; + + if (WARN_ON(mvm->nd_match_sets || mvm->nd_channels)) + return -EBUSY; + + /* save the sched scan matchsets... */ + if (nd_config->n_match_sets) { + mvm->nd_match_sets = kmemdup(nd_config->match_sets, + sizeof(*nd_config->match_sets) * + nd_config->n_match_sets, + GFP_KERNEL); + if (mvm->nd_match_sets) + mvm->n_nd_match_sets = nd_config->n_match_sets; + } + + /* ...and the sched scan channels for later reporting */ + mvm->nd_channels = kmemdup(nd_config->channels, + sizeof(*nd_config->channels) * + nd_config->n_channels, + GFP_KERNEL); + if (mvm->nd_channels) + mvm->n_nd_channels = nd_config->n_channels; + + return 0; +} + +static void iwl_mvm_free_nd(struct iwl_mvm *mvm) +{ + kfree(mvm->nd_match_sets); + mvm->nd_match_sets = NULL; + mvm->n_nd_match_sets = 0; + kfree(mvm->nd_channels); + mvm->nd_channels = NULL; + mvm->n_nd_channels = 0; +} + +static int __iwl_mvm_suspend(struct ieee80211_hw *hw, + struct cfg80211_wowlan *wowlan, + bool test) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct ieee80211_vif *vif = NULL; + struct iwl_mvm_vif *mvmvif = NULL; + struct ieee80211_sta *ap_sta = NULL; + struct iwl_d3_manager_config d3_cfg_cmd_data = { + /* + * Program the minimum sleep time to 10 seconds, as many + * platforms have issues processing a wakeup signal while + * still being in the process of suspending. + */ + .min_sleep_time = cpu_to_le32(10 * 1000 * 1000), + }; + struct iwl_host_cmd d3_cfg_cmd = { + .id = D3_CONFIG_CMD, + .flags = CMD_WANT_SKB | CMD_SEND_IN_D3, + .data[0] = &d3_cfg_cmd_data, + .len[0] = sizeof(d3_cfg_cmd_data), + }; + int ret; + int len __maybe_unused; + bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); + + if (!wowlan) { + /* + * mac80211 shouldn't get here, but for D3 test + * it doesn't warrant a warning + */ + WARN_ON(!test); + return -EINVAL; + } + + mutex_lock(&mvm->mutex); + + set_bit(IWL_MVM_STATUS_IN_D3, &mvm->status); + + synchronize_net(); + + vif = iwl_mvm_get_bss_vif(mvm); + if (IS_ERR_OR_NULL(vif)) { + ret = 1; + goto out_noreset; + } + + mvmvif = iwl_mvm_vif_from_mac80211(vif); + + if (mvmvif->ap_sta_id == IWL_MVM_INVALID_STA) { + /* if we're not associated, this must be netdetect */ + if (!wowlan->nd_config) { + ret = 1; + goto out_noreset; + } + + ret = iwl_mvm_netdetect_config( + mvm, wowlan, wowlan->nd_config, vif); + if (ret) + goto out; + + mvm->net_detect = true; + } else { + struct iwl_wowlan_config_cmd wowlan_config_cmd = {}; + + wowlan_config_cmd.sta_id = mvmvif->ap_sta_id; + + ap_sta = rcu_dereference_protected( + mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], + lockdep_is_held(&mvm->mutex)); + if (IS_ERR_OR_NULL(ap_sta)) { + ret = -EINVAL; + goto out_noreset; + } + + ret = iwl_mvm_get_wowlan_config(mvm, wowlan, &wowlan_config_cmd, + vif, mvmvif, ap_sta); + if (ret) + goto out_noreset; + ret = iwl_mvm_wowlan_config(mvm, wowlan, &wowlan_config_cmd, + vif, mvmvif, ap_sta); + if (ret) + goto out; + + mvm->net_detect = false; + } + + ret = iwl_mvm_power_update_device(mvm); + if (ret) + goto out; + + ret = iwl_mvm_power_update_mac(mvm); + if (ret) + goto out; + +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (mvm->d3_wake_sysassert) + d3_cfg_cmd_data.wakeup_flags |= + cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR); +#endif + + /* + * Prior to 9000 device family the driver needs to stop the dbg + * recording before entering D3. In later devices the FW stops the + * recording automatically. + */ + if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000) + iwl_fw_dbg_stop_restart_recording(&mvm->fwrt, NULL, true); + + mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3; + + /* must be last -- this switches firmware state */ + ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd); + if (ret) + goto out; +#ifdef CONFIG_IWLWIFI_DEBUGFS + len = iwl_rx_packet_payload_len(d3_cfg_cmd.resp_pkt); + if (len >= sizeof(u32)) { + mvm->d3_test_pme_ptr = + le32_to_cpup((__le32 *)d3_cfg_cmd.resp_pkt->data); + } +#endif + iwl_free_resp(&d3_cfg_cmd); + + clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); + + ret = iwl_trans_d3_suspend(mvm->trans, test, !unified_image); + out: + if (ret < 0) { + iwl_mvm_free_nd(mvm); + + if (!unified_image) { + if (mvm->fw_restart > 0) { + mvm->fw_restart--; + ieee80211_restart_hw(mvm->hw); + } + } + + clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status); + } + out_noreset: + mutex_unlock(&mvm->mutex); + + return ret; +} + +int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + iwl_mvm_pause_tcm(mvm, true); + + iwl_fw_runtime_suspend(&mvm->fwrt); + + return __iwl_mvm_suspend(hw, wowlan, false); +} + +/* converted data from the different status responses */ +struct iwl_wowlan_status_data { + u64 replay_ctr; + u32 num_of_gtk_rekeys; + u32 received_beacons; + u32 wakeup_reasons; + u32 wake_packet_length; + u32 wake_packet_bufsize; + u16 pattern_number; + u16 non_qos_seq_ctr; + u16 qos_seq_ctr[8]; + u8 tid_tear_down; + + struct { + /* including RX MIC key for TKIP */ + u8 key[WOWLAN_KEY_MAX_SIZE]; + u8 len; + u8 flags; + } gtk; + + struct { + /* + * We store both the TKIP and AES representations + * coming from the firmware because we decode the + * data from there before we iterate the keys and + * know which one we need. + */ + struct { + struct ieee80211_key_seq seq[IWL_MAX_TID_COUNT]; + } tkip, aes; + + /* + * We use -1 for when we have valid data but don't know + * the key ID from firmware, and thus it needs to be + * installed with the last key (depending on rekeying). + */ + s8 key_id; + bool valid; + } gtk_seq[2]; + + struct { + /* Same as above */ + struct { + struct ieee80211_key_seq seq[IWL_MAX_TID_COUNT]; + u64 tx_pn; + } tkip, aes; + } ptk; + + struct { + u64 ipn; + u8 key[WOWLAN_KEY_MAX_SIZE]; + u8 len; + u8 flags; + } igtk; + + u8 wake_packet[]; +}; + +static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_wowlan_status_data *status) +{ + struct sk_buff *pkt = NULL; + struct cfg80211_wowlan_wakeup wakeup = { + .pattern_idx = -1, + }; + struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup; + u32 reasons = status->wakeup_reasons; + + if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) { + wakeup_report = NULL; + goto report; + } + + pm_wakeup_event(mvm->dev, 0); + + if (reasons & IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET) + wakeup.magic_pkt = true; + + if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN) + wakeup.pattern_idx = + status->pattern_number; + + if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON | + IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH)) + wakeup.disconnect = true; + + if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE) + wakeup.gtk_rekey_failure = true; + + if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) + wakeup.rfkill_release = true; + + if (reasons & IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST) + wakeup.eap_identity_req = true; + + if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE) + wakeup.four_way_handshake = true; + + if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS) + wakeup.tcp_connlost = true; + + if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE) + wakeup.tcp_nomoretokens = true; + + if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET) + wakeup.tcp_match = true; + + if (status->wake_packet_bufsize) { + int pktsize = status->wake_packet_bufsize; + int pktlen = status->wake_packet_length; + const u8 *pktdata = status->wake_packet; + const struct ieee80211_hdr *hdr = (const void *)pktdata; + int truncated = pktlen - pktsize; + + /* this would be a firmware bug */ + if (WARN_ON_ONCE(truncated < 0)) + truncated = 0; + + if (ieee80211_is_data(hdr->frame_control)) { + int hdrlen = ieee80211_hdrlen(hdr->frame_control); + int ivlen = 0, icvlen = 4; /* also FCS */ + + pkt = alloc_skb(pktsize, GFP_KERNEL); + if (!pkt) + goto report; + + skb_put_data(pkt, pktdata, hdrlen); + pktdata += hdrlen; + pktsize -= hdrlen; + + if (ieee80211_has_protected(hdr->frame_control)) { + /* + * This is unlocked and using gtk_i(c)vlen, + * but since everything is under RTNL still + * that's not really a problem - changing + * it would be difficult. + */ + if (is_multicast_ether_addr(hdr->addr1)) { + ivlen = mvm->gtk_ivlen; + icvlen += mvm->gtk_icvlen; + } else { + ivlen = mvm->ptk_ivlen; + icvlen += mvm->ptk_icvlen; + } + } + + /* if truncated, FCS/ICV is (partially) gone */ + if (truncated >= icvlen) { + icvlen = 0; + truncated -= icvlen; + } else { + icvlen -= truncated; + truncated = 0; + } + + pktsize -= ivlen + icvlen; + pktdata += ivlen; + + skb_put_data(pkt, pktdata, pktsize); + + if (ieee80211_data_to_8023(pkt, vif->addr, vif->type)) + goto report; + wakeup.packet = pkt->data; + wakeup.packet_present_len = pkt->len; + wakeup.packet_len = pkt->len - truncated; + wakeup.packet_80211 = false; + } else { + int fcslen = 4; + + if (truncated >= 4) { + truncated -= 4; + fcslen = 0; + } else { + fcslen -= truncated; + truncated = 0; + } + pktsize -= fcslen; + wakeup.packet = status->wake_packet; + wakeup.packet_present_len = pktsize; + wakeup.packet_len = pktlen - truncated; + wakeup.packet_80211 = true; + } + } + + report: + ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL); + kfree_skb(pkt); +} + +static void iwl_mvm_le64_to_aes_seq(__le64 le_pn, struct ieee80211_key_seq *seq) +{ + u64 pn = le64_to_cpu(le_pn); + + seq->ccmp.pn[0] = pn >> 40; + seq->ccmp.pn[1] = pn >> 32; + seq->ccmp.pn[2] = pn >> 24; + seq->ccmp.pn[3] = pn >> 16; + seq->ccmp.pn[4] = pn >> 8; + seq->ccmp.pn[5] = pn; +} + +static void iwl_mvm_aes_sc_to_seq(struct aes_sc *sc, + struct ieee80211_key_seq *seq) +{ + iwl_mvm_le64_to_aes_seq(sc->pn, seq); +} + +static void iwl_mvm_le64_to_tkip_seq(__le64 le_pn, struct ieee80211_key_seq *seq) +{ + u64 pn = le64_to_cpu(le_pn); + + seq->tkip.iv16 = (u16)pn; + seq->tkip.iv32 = (u32)(pn >> 16); +} + +static void iwl_mvm_tkip_sc_to_seq(struct tkip_sc *sc, + struct ieee80211_key_seq *seq) +{ + seq->tkip.iv32 = le32_to_cpu(sc->iv32); + seq->tkip.iv16 = le16_to_cpu(sc->iv16); +} + +static void iwl_mvm_set_key_rx_seq_tids(struct ieee80211_key_conf *key, + struct ieee80211_key_seq *seq) +{ + int tid; + + for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) + ieee80211_set_key_rx_seq(key, tid, &seq[tid]); +} + +static void iwl_mvm_set_aes_ptk_rx_seq(struct iwl_mvm *mvm, + struct iwl_wowlan_status_data *status, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_key_pn *ptk_pn; + int tid; + + iwl_mvm_set_key_rx_seq_tids(key, status->ptk.aes.seq); + + if (!iwl_mvm_has_new_rx_api(mvm)) + return; + + + rcu_read_lock(); + ptk_pn = rcu_dereference(mvmsta->ptk_pn[key->keyidx]); + if (WARN_ON(!ptk_pn)) { + rcu_read_unlock(); + return; + } + + for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { + int i; + + for (i = 1; i < mvm->trans->num_rx_queues; i++) + memcpy(ptk_pn->q[i].pn[tid], + status->ptk.aes.seq[tid].ccmp.pn, + IEEE80211_CCMP_PN_LEN); + } + rcu_read_unlock(); +} + +static void iwl_mvm_convert_key_counters(struct iwl_wowlan_status_data *status, + union iwl_all_tsc_rsc *sc) +{ + int i; + + BUILD_BUG_ON(IWL_MAX_TID_COUNT > IWL_MAX_TID_COUNT); + BUILD_BUG_ON(IWL_MAX_TID_COUNT > IWL_NUM_RSC); + + /* GTK RX counters */ + for (i = 0; i < IWL_MAX_TID_COUNT; i++) { + iwl_mvm_tkip_sc_to_seq(&sc->tkip.multicast_rsc[i], + &status->gtk_seq[0].tkip.seq[i]); + iwl_mvm_aes_sc_to_seq(&sc->aes.multicast_rsc[i], + &status->gtk_seq[0].aes.seq[i]); + } + status->gtk_seq[0].valid = true; + status->gtk_seq[0].key_id = -1; + + /* PTK TX counter */ + status->ptk.tkip.tx_pn = (u64)le16_to_cpu(sc->tkip.tsc.iv16) | + ((u64)le32_to_cpu(sc->tkip.tsc.iv32) << 16); + status->ptk.aes.tx_pn = le64_to_cpu(sc->aes.tsc.pn); + + /* PTK RX counters */ + for (i = 0; i < IWL_MAX_TID_COUNT; i++) { + iwl_mvm_tkip_sc_to_seq(&sc->tkip.unicast_rsc[i], + &status->ptk.tkip.seq[i]); + iwl_mvm_aes_sc_to_seq(&sc->aes.unicast_rsc[i], + &status->ptk.aes.seq[i]); + } +} + +static void +iwl_mvm_convert_key_counters_v5_gtk_seq(struct iwl_wowlan_status_data *status, + struct iwl_wowlan_all_rsc_tsc_v5 *sc, + unsigned int idx, unsigned int key_id) +{ + int tid; + + for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { + iwl_mvm_le64_to_tkip_seq(sc->mcast_rsc[idx][tid], + &status->gtk_seq[idx].tkip.seq[tid]); + iwl_mvm_le64_to_aes_seq(sc->mcast_rsc[idx][tid], + &status->gtk_seq[idx].aes.seq[tid]); + } + + status->gtk_seq[idx].valid = true; + status->gtk_seq[idx].key_id = key_id; +} + +static void +iwl_mvm_convert_key_counters_v5(struct iwl_wowlan_status_data *status, + struct iwl_wowlan_all_rsc_tsc_v5 *sc) +{ + int i, tid; + + BUILD_BUG_ON(IWL_MAX_TID_COUNT > IWL_MAX_TID_COUNT); + BUILD_BUG_ON(IWL_MAX_TID_COUNT > IWL_NUM_RSC); + BUILD_BUG_ON(ARRAY_SIZE(sc->mcast_rsc) != ARRAY_SIZE(status->gtk_seq)); + + /* GTK RX counters */ + for (i = 0; i < ARRAY_SIZE(sc->mcast_key_id_map); i++) { + u8 entry = sc->mcast_key_id_map[i]; + + if (entry < ARRAY_SIZE(sc->mcast_rsc)) + iwl_mvm_convert_key_counters_v5_gtk_seq(status, sc, + entry, i); + } + + /* PTK TX counters not needed, assigned in device */ + + /* PTK RX counters */ + for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { + iwl_mvm_le64_to_tkip_seq(sc->ucast_rsc[tid], + &status->ptk.tkip.seq[tid]); + iwl_mvm_le64_to_aes_seq(sc->ucast_rsc[tid], + &status->ptk.aes.seq[tid]); + } +} + +static void iwl_mvm_set_key_rx_seq_idx(struct ieee80211_key_conf *key, + struct iwl_wowlan_status_data *status, + int idx) +{ + switch (key->cipher) { + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + iwl_mvm_set_key_rx_seq_tids(key, status->gtk_seq[idx].aes.seq); + break; + case WLAN_CIPHER_SUITE_TKIP: + iwl_mvm_set_key_rx_seq_tids(key, status->gtk_seq[idx].tkip.seq); + break; + default: + WARN_ON(1); + } +} + +static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key, + struct iwl_wowlan_status_data *status, + bool installed) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(status->gtk_seq); i++) { + if (!status->gtk_seq[i].valid) + continue; + + /* Handle the case where we know the key ID */ + if (status->gtk_seq[i].key_id == key->keyidx) { + s8 new_key_id = -1; + + if (status->num_of_gtk_rekeys) + new_key_id = status->gtk.flags & + IWL_WOWLAN_GTK_IDX_MASK; + + /* Don't install a new key's value to an old key */ + if (new_key_id != key->keyidx) + iwl_mvm_set_key_rx_seq_idx(key, status, i); + continue; + } + + /* handle the case where we didn't, last key only */ + if (status->gtk_seq[i].key_id == -1 && + (!status->num_of_gtk_rekeys || installed)) + iwl_mvm_set_key_rx_seq_idx(key, status, i); + } +} + +struct iwl_mvm_d3_gtk_iter_data { + struct iwl_mvm *mvm; + struct iwl_wowlan_status_data *status; + void *last_gtk; + u32 cipher; + bool find_phase, unhandled_cipher; + int num_keys; +}; + +static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *_data) +{ + struct iwl_mvm_d3_gtk_iter_data *data = _data; + struct iwl_wowlan_status_data *status = data->status; + + if (data->unhandled_cipher) + return; + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + /* ignore WEP completely, nothing to do */ + return; + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + case WLAN_CIPHER_SUITE_TKIP: + /* we support these */ + break; + default: + /* everything else (even CMAC for MFP) - disconnect from AP */ + data->unhandled_cipher = true; + return; + } + + data->num_keys++; + + /* + * pairwise key - update sequence counters only; + * note that this assumes no TDLS sessions are active + */ + if (sta) { + if (data->find_phase) + return; + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + atomic64_set(&key->tx_pn, status->ptk.aes.tx_pn); + iwl_mvm_set_aes_ptk_rx_seq(data->mvm, status, sta, key); + break; + case WLAN_CIPHER_SUITE_TKIP: + atomic64_set(&key->tx_pn, status->ptk.tkip.tx_pn); + iwl_mvm_set_key_rx_seq_tids(key, status->ptk.tkip.seq); + break; + } + + /* that's it for this key */ + return; + } + + if (data->find_phase) { + data->last_gtk = key; + data->cipher = key->cipher; + return; + } + + if (data->status->num_of_gtk_rekeys) + ieee80211_remove_key(key); + + if (data->last_gtk == key) + iwl_mvm_set_key_rx_seq(key, data->status, false); +} + +static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_wowlan_status_data *status) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_d3_gtk_iter_data gtkdata = { + .mvm = mvm, + .status = status, + }; + u32 disconnection_reasons = + IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON | + IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH; + + if (!status || !vif->bss_conf.bssid) + return false; + + if (status->wakeup_reasons & disconnection_reasons) + return false; + + /* find last GTK that we used initially, if any */ + gtkdata.find_phase = true; + ieee80211_iter_keys(mvm->hw, vif, + iwl_mvm_d3_update_keys, >kdata); + /* not trying to keep connections with MFP/unhandled ciphers */ + if (gtkdata.unhandled_cipher) + return false; + if (!gtkdata.num_keys) + goto out; + if (!gtkdata.last_gtk) + return false; + + /* + * invalidate all other GTKs that might still exist and update + * the one that we used + */ + gtkdata.find_phase = false; + ieee80211_iter_keys(mvm->hw, vif, + iwl_mvm_d3_update_keys, >kdata); + + IWL_DEBUG_WOWLAN(mvm, "num of GTK rekeying %d\n", + status->num_of_gtk_rekeys); + if (status->num_of_gtk_rekeys) { + struct ieee80211_key_conf *key; + struct { + struct ieee80211_key_conf conf; + u8 key[32]; + } conf = { + .conf.cipher = gtkdata.cipher, + .conf.keyidx = + status->gtk.flags & IWL_WOWLAN_GTK_IDX_MASK, + }; + __be64 replay_ctr; + + IWL_DEBUG_WOWLAN(mvm, + "Received from FW GTK cipher %d, key index %d\n", + conf.conf.cipher, conf.conf.keyidx); + + BUILD_BUG_ON(WLAN_KEY_LEN_CCMP != WLAN_KEY_LEN_GCMP); + BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_CCMP); + BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_GCMP_256); + BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_TKIP); + BUILD_BUG_ON(sizeof(conf.key) < sizeof(status->gtk.key)); + + memcpy(conf.conf.key, status->gtk.key, sizeof(status->gtk.key)); + + switch (gtkdata.cipher) { + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_GCMP: + conf.conf.keylen = WLAN_KEY_LEN_CCMP; + break; + case WLAN_CIPHER_SUITE_GCMP_256: + conf.conf.keylen = WLAN_KEY_LEN_GCMP_256; + break; + case WLAN_CIPHER_SUITE_TKIP: + conf.conf.keylen = WLAN_KEY_LEN_TKIP; + break; + } + + key = ieee80211_gtk_rekey_add(vif, &conf.conf); + if (IS_ERR(key)) + return false; + iwl_mvm_set_key_rx_seq(key, status, true); + + replay_ctr = cpu_to_be64(status->replay_ctr); + + ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid, + (void *)&replay_ctr, GFP_KERNEL); + } + +out: + if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, + WOWLAN_GET_STATUSES, 0) < 10) { + mvmvif->seqno_valid = true; + /* +0x10 because the set API expects next-to-use, not last-used */ + mvmvif->seqno = status->non_qos_seq_ctr + 0x10; + } + + return true; +} + +/* Occasionally, templates would be nice. This is one of those times ... */ +#define iwl_mvm_parse_wowlan_status_common(_ver) \ +static struct iwl_wowlan_status_data * \ +iwl_mvm_parse_wowlan_status_common_ ## _ver(struct iwl_mvm *mvm, \ + struct iwl_wowlan_status_ ##_ver *data,\ + int len) \ +{ \ + struct iwl_wowlan_status_data *status; \ + int data_size, i; \ + \ + if (len < sizeof(*data)) { \ + IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); \ + return ERR_PTR(-EIO); \ + } \ + \ + data_size = ALIGN(le32_to_cpu(data->wake_packet_bufsize), 4); \ + if (len != sizeof(*data) + data_size) { \ + IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); \ + return ERR_PTR(-EIO); \ + } \ + \ + status = kzalloc(sizeof(*status) + data_size, GFP_KERNEL); \ + if (!status) \ + return ERR_PTR(-ENOMEM); \ + \ + /* copy all the common fields */ \ + status->replay_ctr = le64_to_cpu(data->replay_ctr); \ + status->pattern_number = le16_to_cpu(data->pattern_number); \ + status->non_qos_seq_ctr = le16_to_cpu(data->non_qos_seq_ctr); \ + for (i = 0; i < 8; i++) \ + status->qos_seq_ctr[i] = \ + le16_to_cpu(data->qos_seq_ctr[i]); \ + status->wakeup_reasons = le32_to_cpu(data->wakeup_reasons); \ + status->num_of_gtk_rekeys = \ + le32_to_cpu(data->num_of_gtk_rekeys); \ + status->received_beacons = le32_to_cpu(data->received_beacons); \ + status->wake_packet_length = \ + le32_to_cpu(data->wake_packet_length); \ + status->wake_packet_bufsize = \ + le32_to_cpu(data->wake_packet_bufsize); \ + memcpy(status->wake_packet, data->wake_packet, \ + status->wake_packet_bufsize); \ + \ + return status; \ +} + +iwl_mvm_parse_wowlan_status_common(v6) +iwl_mvm_parse_wowlan_status_common(v7) +iwl_mvm_parse_wowlan_status_common(v9) +iwl_mvm_parse_wowlan_status_common(v12) + +static void iwl_mvm_convert_gtk_v2(struct iwl_wowlan_status_data *status, + struct iwl_wowlan_gtk_status_v2 *data) +{ + BUILD_BUG_ON(sizeof(status->gtk.key) < sizeof(data->key)); + BUILD_BUG_ON(NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY + + sizeof(data->tkip_mic_key) > + sizeof(status->gtk.key)); + + status->gtk.len = data->key_len; + status->gtk.flags = data->key_flags; + + memcpy(status->gtk.key, data->key, sizeof(data->key)); + + /* if it's as long as the TKIP encryption key, copy MIC key */ + if (status->gtk.len == NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY) + memcpy(status->gtk.key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY, + data->tkip_mic_key, sizeof(data->tkip_mic_key)); +} + +static void iwl_mvm_convert_gtk_v3(struct iwl_wowlan_status_data *status, + struct iwl_wowlan_gtk_status_v3 *data) +{ + /* The parts we need are identical in v2 and v3 */ +#define CHECK(_f) do { \ + BUILD_BUG_ON(offsetof(struct iwl_wowlan_gtk_status_v2, _f) != \ + offsetof(struct iwl_wowlan_gtk_status_v3, _f)); \ + BUILD_BUG_ON(offsetofend(struct iwl_wowlan_gtk_status_v2, _f) !=\ + offsetofend(struct iwl_wowlan_gtk_status_v3, _f)); \ +} while (0) + + CHECK(key); + CHECK(key_len); + CHECK(key_flags); + CHECK(tkip_mic_key); +#undef CHECK + + iwl_mvm_convert_gtk_v2(status, (void *)data); +} + +static void iwl_mvm_convert_igtk(struct iwl_wowlan_status_data *status, + struct iwl_wowlan_igtk_status *data) +{ + const u8 *ipn = data->ipn; + + BUILD_BUG_ON(sizeof(status->igtk.key) < sizeof(data->key)); + + status->igtk.len = data->key_len; + status->igtk.flags = data->key_flags; + + memcpy(status->igtk.key, data->key, sizeof(data->key)); + + status->igtk.ipn = ((u64)ipn[5] << 0) | + ((u64)ipn[4] << 8) | + ((u64)ipn[3] << 16) | + ((u64)ipn[2] << 24) | + ((u64)ipn[1] << 32) | + ((u64)ipn[0] << 40); +} + +static struct iwl_wowlan_status_data * +iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id) +{ + struct iwl_wowlan_status_data *status; + struct iwl_wowlan_get_status_cmd get_status_cmd = { + .sta_id = cpu_to_le32(sta_id), + }; + struct iwl_host_cmd cmd = { + .id = WOWLAN_GET_STATUSES, + .flags = CMD_WANT_SKB, + .data = { &get_status_cmd, }, + .len = { sizeof(get_status_cmd), }, + }; + int ret, len; + u8 notif_ver; + u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id, + IWL_FW_CMD_VER_UNKNOWN); + + if (cmd_ver == IWL_FW_CMD_VER_UNKNOWN) + cmd.len[0] = 0; + + lockdep_assert_held(&mvm->mutex); + + ret = iwl_mvm_send_cmd(mvm, &cmd); + if (ret) { + IWL_ERR(mvm, "failed to query wakeup status (%d)\n", ret); + return ERR_PTR(ret); + } + + len = iwl_rx_packet_payload_len(cmd.resp_pkt); + + /* default to 7 (when we have IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL) */ + notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, + WOWLAN_GET_STATUSES, 0); + if (!notif_ver) + notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, + WOWLAN_GET_STATUSES, 7); + + if (!fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL)) { + struct iwl_wowlan_status_v6 *v6 = (void *)cmd.resp_pkt->data; + + status = iwl_mvm_parse_wowlan_status_common_v6(mvm, v6, len); + if (IS_ERR(status)) + goto out_free_resp; + + BUILD_BUG_ON(sizeof(v6->gtk.decrypt_key) > + sizeof(status->gtk.key)); + BUILD_BUG_ON(NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY + + sizeof(v6->gtk.tkip_mic_key) > + sizeof(status->gtk.key)); + + /* copy GTK info to the right place */ + memcpy(status->gtk.key, v6->gtk.decrypt_key, + sizeof(v6->gtk.decrypt_key)); + memcpy(status->gtk.key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY, + v6->gtk.tkip_mic_key, + sizeof(v6->gtk.tkip_mic_key)); + + iwl_mvm_convert_key_counters(status, &v6->gtk.rsc.all_tsc_rsc); + + /* hardcode the key length to 16 since v6 only supports 16 */ + status->gtk.len = 16; + + /* + * The key index only uses 2 bits (values 0 to 3) and + * we always set bit 7 which means this is the + * currently used key. + */ + status->gtk.flags = v6->gtk.key_index | BIT(7); + } else if (notif_ver == 7) { + struct iwl_wowlan_status_v7 *v7 = (void *)cmd.resp_pkt->data; + + status = iwl_mvm_parse_wowlan_status_common_v7(mvm, v7, len); + if (IS_ERR(status)) + goto out_free_resp; + + iwl_mvm_convert_key_counters(status, &v7->gtk[0].rsc.all_tsc_rsc); + iwl_mvm_convert_gtk_v2(status, &v7->gtk[0]); + iwl_mvm_convert_igtk(status, &v7->igtk[0]); + } else if (notif_ver == 9 || notif_ver == 10 || notif_ver == 11) { + struct iwl_wowlan_status_v9 *v9 = (void *)cmd.resp_pkt->data; + + /* these three command versions have same layout and size, the + * difference is only in a few not used (reserved) fields. + */ + status = iwl_mvm_parse_wowlan_status_common_v9(mvm, v9, len); + if (IS_ERR(status)) + goto out_free_resp; + + iwl_mvm_convert_key_counters(status, &v9->gtk[0].rsc.all_tsc_rsc); + iwl_mvm_convert_gtk_v2(status, &v9->gtk[0]); + iwl_mvm_convert_igtk(status, &v9->igtk[0]); + + status->tid_tear_down = v9->tid_tear_down; + } else if (notif_ver == 12) { + struct iwl_wowlan_status_v12 *v12 = (void *)cmd.resp_pkt->data; + + status = iwl_mvm_parse_wowlan_status_common_v12(mvm, v12, len); + if (IS_ERR(status)) + goto out_free_resp; + + iwl_mvm_convert_key_counters_v5(status, &v12->gtk[0].sc); + iwl_mvm_convert_gtk_v3(status, &v12->gtk[0]); + iwl_mvm_convert_igtk(status, &v12->igtk[0]); + + status->tid_tear_down = v12->tid_tear_down; + } else { + IWL_ERR(mvm, + "Firmware advertises unknown WoWLAN status response %d!\n", + notif_ver); + status = ERR_PTR(-EIO); + } + +out_free_resp: + iwl_free_resp(&cmd); + return status; +} + +static struct iwl_wowlan_status_data * +iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, u8 sta_id) +{ + u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, OFFLOADS_QUERY_CMD, + IWL_FW_CMD_VER_UNKNOWN); + __le32 station_id = cpu_to_le32(sta_id); + u32 cmd_size = cmd_ver != IWL_FW_CMD_VER_UNKNOWN ? sizeof(station_id) : 0; + + if (!mvm->net_detect) { + /* only for tracing for now */ + int ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0, + cmd_size, &station_id); + if (ret) + IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret); + } + + return iwl_mvm_send_wowlan_get_status(mvm, sta_id); +} + +/* releases the MVM mutex */ +static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_wowlan_status_data *status; + int i; + bool keep; + struct iwl_mvm_sta *mvm_ap_sta; + + status = iwl_mvm_get_wakeup_status(mvm, mvmvif->ap_sta_id); + if (IS_ERR(status)) + goto out_unlock; + + IWL_DEBUG_WOWLAN(mvm, "wakeup reason 0x%x\n", + status->wakeup_reasons); + + /* still at hard-coded place 0 for D3 image */ + mvm_ap_sta = iwl_mvm_sta_from_staid_protected(mvm, 0); + if (!mvm_ap_sta) + goto out_free; + + for (i = 0; i < IWL_MAX_TID_COUNT; i++) { + u16 seq = status->qos_seq_ctr[i]; + /* firmware stores last-used value, we store next value */ + seq += 0x10; + mvm_ap_sta->tid_data[i].seq_number = seq; + } + + if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) { + i = mvm->offload_tid; + iwl_trans_set_q_ptrs(mvm->trans, + mvm_ap_sta->tid_data[i].txq_id, + mvm_ap_sta->tid_data[i].seq_number >> 4); + } + + /* now we have all the data we need, unlock to avoid mac80211 issues */ + mutex_unlock(&mvm->mutex); + + iwl_mvm_report_wakeup_reasons(mvm, vif, status); + + keep = iwl_mvm_setup_connection_keep(mvm, vif, status); + + kfree(status); + return keep; + +out_free: + kfree(status); +out_unlock: + mutex_unlock(&mvm->mutex); + return false; +} + +#define ND_QUERY_BUF_LEN (sizeof(struct iwl_scan_offload_profile_match) * \ + IWL_SCAN_MAX_PROFILES) + +struct iwl_mvm_nd_query_results { + u32 matched_profiles; + u8 matches[ND_QUERY_BUF_LEN]; +}; + +static int +iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm, + struct iwl_mvm_nd_query_results *results) +{ + struct iwl_scan_offload_profiles_query *query; + struct iwl_host_cmd cmd = { + .id = SCAN_OFFLOAD_PROFILES_QUERY_CMD, + .flags = CMD_WANT_SKB, + }; + int ret, len; + size_t query_len, matches_len; + int max_profiles = iwl_umac_scan_get_max_profiles(mvm->fw); + + ret = iwl_mvm_send_cmd(mvm, &cmd); + if (ret) { + IWL_ERR(mvm, "failed to query matched profiles (%d)\n", ret); + return ret; + } + + if (fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) { + query_len = sizeof(struct iwl_scan_offload_profiles_query); + matches_len = sizeof(struct iwl_scan_offload_profile_match) * + max_profiles; + } else { + query_len = sizeof(struct iwl_scan_offload_profiles_query_v1); + matches_len = sizeof(struct iwl_scan_offload_profile_match_v1) * + max_profiles; + } + + len = iwl_rx_packet_payload_len(cmd.resp_pkt); + if (len < query_len) { + IWL_ERR(mvm, "Invalid scan offload profiles query response!\n"); + ret = -EIO; + goto out_free_resp; + } + + query = (void *)cmd.resp_pkt->data; + + results->matched_profiles = le32_to_cpu(query->matched_profiles); + memcpy(results->matches, query->matches, matches_len); + +#ifdef CONFIG_IWLWIFI_DEBUGFS + mvm->last_netdetect_scans = le32_to_cpu(query->n_scans_done); +#endif + +out_free_resp: + iwl_free_resp(&cmd); + return ret; +} + +static int iwl_mvm_query_num_match_chans(struct iwl_mvm *mvm, + struct iwl_mvm_nd_query_results *query, + int idx) +{ + int n_chans = 0, i; + + if (fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) { + struct iwl_scan_offload_profile_match *matches = + (struct iwl_scan_offload_profile_match *)query->matches; + + for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; i++) + n_chans += hweight8(matches[idx].matching_channels[i]); + } else { + struct iwl_scan_offload_profile_match_v1 *matches = + (struct iwl_scan_offload_profile_match_v1 *)query->matches; + + for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1; i++) + n_chans += hweight8(matches[idx].matching_channels[i]); + } + + return n_chans; +} + +static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm, + struct iwl_mvm_nd_query_results *query, + struct cfg80211_wowlan_nd_match *match, + int idx) +{ + int i; + + if (fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) { + struct iwl_scan_offload_profile_match *matches = + (struct iwl_scan_offload_profile_match *)query->matches; + + for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; i++) + if (matches[idx].matching_channels[i / 8] & (BIT(i % 8))) + match->channels[match->n_channels++] = + mvm->nd_channels[i]->center_freq; + } else { + struct iwl_scan_offload_profile_match_v1 *matches = + (struct iwl_scan_offload_profile_match_v1 *)query->matches; + + for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1 * 8; i++) + if (matches[idx].matching_channels[i / 8] & (BIT(i % 8))) + match->channels[match->n_channels++] = + mvm->nd_channels[i]->center_freq; + } +} + +static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct cfg80211_wowlan_nd_info *net_detect = NULL; + struct cfg80211_wowlan_wakeup wakeup = { + .pattern_idx = -1, + }; + struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup; + struct iwl_wowlan_status_data *status; + struct iwl_mvm_nd_query_results query; + unsigned long matched_profiles; + u32 reasons = 0; + int i, n_matches, ret; + + status = iwl_mvm_get_wakeup_status(mvm, IWL_MVM_INVALID_STA); + if (!IS_ERR(status)) { + reasons = status->wakeup_reasons; + kfree(status); + } + + if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) + wakeup.rfkill_release = true; + + if (reasons != IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) + goto out; + + ret = iwl_mvm_netdetect_query_results(mvm, &query); + if (ret || !query.matched_profiles) { + wakeup_report = NULL; + goto out; + } + + matched_profiles = query.matched_profiles; + if (mvm->n_nd_match_sets) { + n_matches = hweight_long(matched_profiles); + } else { + IWL_ERR(mvm, "no net detect match information available\n"); + n_matches = 0; + } + + net_detect = kzalloc(struct_size(net_detect, matches, n_matches), + GFP_KERNEL); + if (!net_detect || !n_matches) + goto out_report_nd; + + for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) { + struct cfg80211_wowlan_nd_match *match; + int idx, n_channels = 0; + + n_channels = iwl_mvm_query_num_match_chans(mvm, &query, i); + + match = kzalloc(struct_size(match, channels, n_channels), + GFP_KERNEL); + if (!match) + goto out_report_nd; + + net_detect->matches[net_detect->n_matches++] = match; + + /* We inverted the order of the SSIDs in the scan + * request, so invert the index here. + */ + idx = mvm->n_nd_match_sets - i - 1; + match->ssid.ssid_len = mvm->nd_match_sets[idx].ssid.ssid_len; + memcpy(match->ssid.ssid, mvm->nd_match_sets[idx].ssid.ssid, + match->ssid.ssid_len); + + if (mvm->n_nd_channels < n_channels) + continue; + + iwl_mvm_query_set_freqs(mvm, &query, match, i); + } + +out_report_nd: + wakeup.net_detect = net_detect; +out: + iwl_mvm_free_nd(mvm); + + mutex_unlock(&mvm->mutex); + ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL); + + if (net_detect) { + for (i = 0; i < net_detect->n_matches; i++) + kfree(net_detect->matches[i]); + kfree(net_detect); + } +} + +static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + /* skip the one we keep connection on */ + if (data == vif) + return; + + if (vif->type == NL80211_IFTYPE_STATION) + ieee80211_resume_disconnect(vif); +} + +static bool iwl_mvm_rt_status(struct iwl_trans *trans, u32 base, u32 *err_id) +{ + struct error_table_start { + /* cf. struct iwl_error_event_table */ + u32 valid; + __le32 err_id; + } err_info; + + if (!base) + return false; + + iwl_trans_read_mem_bytes(trans, base, + &err_info, sizeof(err_info)); + if (err_info.valid && err_id) + *err_id = le32_to_cpu(err_info.err_id); + + return !!err_info.valid; +} + +static bool iwl_mvm_check_rt_status(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + u32 err_id; + + /* check for lmac1 error */ + if (iwl_mvm_rt_status(mvm->trans, + mvm->trans->dbg.lmac_error_event_table[0], + &err_id)) { + if (err_id == RF_KILL_INDICATOR_FOR_WOWLAN) { + struct cfg80211_wowlan_wakeup wakeup = { + .rfkill_release = true, + }; + ieee80211_report_wowlan_wakeup(vif, &wakeup, + GFP_KERNEL); + } + return true; + } + + /* check if we have lmac2 set and check for error */ + if (iwl_mvm_rt_status(mvm->trans, + mvm->trans->dbg.lmac_error_event_table[1], NULL)) + return true; + + /* check for umac error */ + if (iwl_mvm_rt_status(mvm->trans, + mvm->trans->dbg.umac_error_event_table, NULL)) + return true; + + return false; +} + +static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) +{ + struct ieee80211_vif *vif = NULL; + int ret = 1; + enum iwl_d3_status d3_status; + bool keep = false; + bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); + bool d0i3_first = fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_D0I3_END_FIRST); + + mutex_lock(&mvm->mutex); + + mvm->last_reset_or_resume_time_jiffies = jiffies; + + /* get the BSS vif pointer again */ + vif = iwl_mvm_get_bss_vif(mvm); + if (IS_ERR_OR_NULL(vif)) + goto err; + + iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt); + + if (iwl_mvm_check_rt_status(mvm, vif)) { + set_bit(STATUS_FW_ERROR, &mvm->trans->status); + iwl_mvm_dump_nic_error_log(mvm); + iwl_dbg_tlv_time_point(&mvm->fwrt, + IWL_FW_INI_TIME_POINT_FW_ASSERT, NULL); + iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert, + false, 0); + ret = 1; + goto err; + } + + ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test, !unified_image); + if (ret) + goto err; + + if (d3_status != IWL_D3_STATUS_ALIVE) { + IWL_INFO(mvm, "Device was reset during suspend\n"); + goto err; + } + + if (d0i3_first) { + struct iwl_host_cmd cmd = { + .id = D0I3_END_CMD, + .flags = CMD_WANT_SKB | CMD_SEND_IN_D3, + }; + int len; + + ret = iwl_mvm_send_cmd(mvm, &cmd); + if (ret < 0) { + IWL_ERR(mvm, "Failed to send D0I3_END_CMD first (%d)\n", + ret); + goto err; + } + switch (mvm->cmd_ver.d0i3_resp) { + case 0: + break; + case 1: + len = iwl_rx_packet_payload_len(cmd.resp_pkt); + if (len != sizeof(u32)) { + IWL_ERR(mvm, + "Error with D0I3_END_CMD response size (%d)\n", + len); + goto err; + } + if (IWL_D0I3_RESET_REQUIRE & + le32_to_cpu(*(__le32 *)cmd.resp_pkt->data)) { + iwl_write32(mvm->trans, CSR_RESET, + CSR_RESET_REG_FLAG_FORCE_NMI); + iwl_free_resp(&cmd); + } + break; + default: + WARN_ON(1); + } + } + + /* after the successful handshake, we're out of D3 */ + mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; + + /* + * Query the current location and source from the D3 firmware so we + * can play it back when we re-intiailize the D0 firmware + */ + iwl_mvm_update_changed_regdom(mvm); + + /* Re-configure PPAG settings */ + iwl_mvm_ppag_send_cmd(mvm); + + if (!unified_image) + /* Re-configure default SAR profile */ + iwl_mvm_sar_select_profile(mvm, 1, 1); + + if (mvm->net_detect) { + /* If this is a non-unified image, we restart the FW, + * so no need to stop the netdetect scan. If that + * fails, continue and try to get the wake-up reasons, + * but trigger a HW restart by keeping a failure code + * in ret. + */ + if (unified_image) + ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_NETDETECT, + false); + + iwl_mvm_query_netdetect_reasons(mvm, vif); + /* has unlocked the mutex, so skip that */ + goto out; + } else { + keep = iwl_mvm_query_wakeup_reasons(mvm, vif); +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (keep) + mvm->keep_vif = vif; +#endif + /* has unlocked the mutex, so skip that */ + goto out_iterate; + } + +err: + iwl_mvm_free_nd(mvm); + mutex_unlock(&mvm->mutex); + +out_iterate: + if (!test) + ieee80211_iterate_active_interfaces_mtx(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_d3_disconnect_iter, keep ? vif : NULL); + +out: + clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status); + + /* no need to reset the device in unified images, if successful */ + if (unified_image && !ret) { + /* nothing else to do if we already sent D0I3_END_CMD */ + if (d0i3_first) + return 0; + + ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL); + if (!ret) + return 0; + } + + /* + * Reconfigure the device in one of the following cases: + * 1. We are not using a unified image + * 2. We are using a unified image but had an error while exiting D3 + */ + set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); + + /* regardless of what happened, we're now out of D3 */ + mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; + + return 1; +} + +int iwl_mvm_resume(struct ieee80211_hw *hw) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + int ret; + + ret = __iwl_mvm_resume(mvm, false); + + iwl_mvm_resume_tcm(mvm); + + iwl_fw_runtime_resume(&mvm->fwrt); + + return ret; +} + +void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + device_set_wakeup_enable(mvm->trans->dev, enabled); +} + +#ifdef CONFIG_IWLWIFI_DEBUGFS +static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file) +{ + struct iwl_mvm *mvm = inode->i_private; + int err; + + if (mvm->d3_test_active) + return -EBUSY; + + file->private_data = inode->i_private; + + iwl_mvm_pause_tcm(mvm, true); + + iwl_fw_runtime_suspend(&mvm->fwrt); + + /* start pseudo D3 */ + rtnl_lock(); + wiphy_lock(mvm->hw->wiphy); + err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true); + wiphy_unlock(mvm->hw->wiphy); + rtnl_unlock(); + if (err > 0) + err = -EINVAL; + if (err) + return err; + + mvm->d3_test_active = true; + mvm->keep_vif = NULL; + return 0; +} + +static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + u32 pme_asserted; + + while (true) { + /* read pme_ptr if available */ + if (mvm->d3_test_pme_ptr) { + pme_asserted = iwl_trans_read_mem32(mvm->trans, + mvm->d3_test_pme_ptr); + if (pme_asserted) + break; + } + + if (msleep_interruptible(100)) + break; + } + + return 0; +} + +static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + /* skip the one we keep connection on */ + if (_data == vif) + return; + + if (vif->type == NL80211_IFTYPE_STATION) + ieee80211_connection_loss(vif); +} + +static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file) +{ + struct iwl_mvm *mvm = inode->i_private; + bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); + + mvm->d3_test_active = false; + + iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt); + + rtnl_lock(); + wiphy_lock(mvm->hw->wiphy); + __iwl_mvm_resume(mvm, true); + wiphy_unlock(mvm->hw->wiphy); + rtnl_unlock(); + + iwl_mvm_resume_tcm(mvm); + + iwl_fw_runtime_resume(&mvm->fwrt); + + iwl_abort_notification_waits(&mvm->notif_wait); + if (!unified_image) { + int remaining_time = 10; + + ieee80211_restart_hw(mvm->hw); + + /* wait for restart and disconnect all interfaces */ + while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && + remaining_time > 0) { + remaining_time--; + msleep(1000); + } + + if (remaining_time == 0) + IWL_ERR(mvm, "Timed out waiting for HW restart!\n"); + } + + ieee80211_iterate_active_interfaces_atomic( + mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_d3_test_disconn_work_iter, mvm->keep_vif); + + return 0; +} + +const struct file_operations iwl_dbgfs_d3_test_ops = { + .llseek = no_llseek, + .open = iwl_mvm_d3_test_open, + .read = iwl_mvm_d3_test_read, + .release = iwl_mvm_d3_test_release, +}; +#endif diff --git a/mvm/debugfs.c b/mvm/debugfs.c new file mode 100644 index 000000000000..fecd7d4a7bdc --- /dev/null +++ b/mvm/debugfs.c @@ -0,0 +1,2145 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2012-2014, 2018-2021 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ +#include <linux/vmalloc.h> +#include <linux/ieee80211.h> +#include <linux/netdevice.h> + +#include "mvm.h" +#include "sta.h" +#include "iwl-io.h" +#include "debugfs.h" +#include "iwl-modparams.h" +#include "fw/error-dump.h" + +static ssize_t iwl_dbgfs_ctdp_budget_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + char buf[16]; + int pos, budget; + + if (!iwl_mvm_is_ctdp_supported(mvm)) + return -EOPNOTSUPP; + + if (!iwl_mvm_firmware_running(mvm) || + mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) + return -EIO; + + mutex_lock(&mvm->mutex); + budget = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_REPORT, 0); + mutex_unlock(&mvm->mutex); + + if (budget < 0) + return budget; + + pos = scnprintf(buf, sizeof(buf), "%d\n", budget); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_stop_ctdp_write(struct iwl_mvm *mvm, char *buf, + size_t count, loff_t *ppos) +{ + int ret; + + if (!iwl_mvm_is_ctdp_supported(mvm)) + return -EOPNOTSUPP; + + if (!iwl_mvm_firmware_running(mvm) || + mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) + return -EIO; + + mutex_lock(&mvm->mutex); + ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_STOP, 0); + mutex_unlock(&mvm->mutex); + + return ret ?: count; +} + +static ssize_t iwl_dbgfs_force_ctkill_write(struct iwl_mvm *mvm, char *buf, + size_t count, loff_t *ppos) +{ + if (!iwl_mvm_firmware_running(mvm) || + mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) + return -EIO; + + iwl_mvm_enter_ctkill(mvm); + + return count; +} + +static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf, + size_t count, loff_t *ppos) +{ + int ret; + u32 flush_arg; + + if (!iwl_mvm_firmware_running(mvm) || + mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) + return -EIO; + + if (kstrtou32(buf, 0, &flush_arg)) + return -EINVAL; + + if (iwl_mvm_has_new_tx_api(mvm)) { + IWL_DEBUG_TX_QUEUES(mvm, + "FLUSHING all tids queues on sta_id = %d\n", + flush_arg); + mutex_lock(&mvm->mutex); + ret = iwl_mvm_flush_sta_tids(mvm, flush_arg, 0xFFFF) + ? : count; + mutex_unlock(&mvm->mutex); + return ret; + } + + IWL_DEBUG_TX_QUEUES(mvm, "FLUSHING queues mask to flush = 0x%x\n", + flush_arg); + + mutex_lock(&mvm->mutex); + ret = iwl_mvm_flush_tx_path(mvm, flush_arg) ? : count; + mutex_unlock(&mvm->mutex); + + return ret; +} + +static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm_sta *mvmsta; + int sta_id, drain, ret; + + if (!iwl_mvm_firmware_running(mvm) || + mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) + return -EIO; + + if (sscanf(buf, "%d %d", &sta_id, &drain) != 2) + return -EINVAL; + if (sta_id < 0 || sta_id >= mvm->fw->ucode_capa.num_stations) + return -EINVAL; + if (drain < 0 || drain > 1) + return -EINVAL; + + mutex_lock(&mvm->mutex); + + mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id); + + if (!mvmsta) + ret = -ENOENT; + else + ret = iwl_mvm_drain_sta(mvm, mvmsta, drain) ? : count; + + mutex_unlock(&mvm->mutex); + + return ret; +} + +static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + const struct fw_img *img; + unsigned int ofs, len; + size_t ret; + u8 *ptr; + + if (!iwl_mvm_firmware_running(mvm)) + return -EINVAL; + + /* default is to dump the entire data segment */ + img = &mvm->fw->img[mvm->fwrt.cur_fw_img]; + ofs = img->sec[IWL_UCODE_SECTION_DATA].offset; + len = img->sec[IWL_UCODE_SECTION_DATA].len; + + if (mvm->dbgfs_sram_len) { + ofs = mvm->dbgfs_sram_offset; + len = mvm->dbgfs_sram_len; + } + + ptr = kzalloc(len, GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + iwl_trans_read_mem_bytes(mvm->trans, ofs, ptr, len); + + ret = simple_read_from_buffer(user_buf, count, ppos, ptr, len); + + kfree(ptr); + + return ret; +} + +static ssize_t iwl_dbgfs_sram_write(struct iwl_mvm *mvm, char *buf, + size_t count, loff_t *ppos) +{ + const struct fw_img *img; + u32 offset, len; + u32 img_offset, img_len; + + if (!iwl_mvm_firmware_running(mvm)) + return -EINVAL; + + img = &mvm->fw->img[mvm->fwrt.cur_fw_img]; + img_offset = img->sec[IWL_UCODE_SECTION_DATA].offset; + img_len = img->sec[IWL_UCODE_SECTION_DATA].len; + + if (sscanf(buf, "%x,%x", &offset, &len) == 2) { + if ((offset & 0x3) || (len & 0x3)) + return -EINVAL; + + if (offset + len > img_offset + img_len) + return -EINVAL; + + mvm->dbgfs_sram_offset = offset; + mvm->dbgfs_sram_len = len; + } else { + mvm->dbgfs_sram_offset = 0; + mvm->dbgfs_sram_len = 0; + } + + return count; +} + +static ssize_t iwl_dbgfs_set_nic_temperature_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + char buf[16]; + int pos; + + if (!mvm->temperature_test) + pos = scnprintf(buf , sizeof(buf), "disabled\n"); + else + pos = scnprintf(buf , sizeof(buf), "%d\n", mvm->temperature); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +/* + * Set NIC Temperature + * Cause the driver to ignore the actual NIC temperature reported by the FW + * Enable: any value between IWL_MVM_DEBUG_SET_TEMPERATURE_MIN - + * IWL_MVM_DEBUG_SET_TEMPERATURE_MAX + * Disable: IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE + */ +static ssize_t iwl_dbgfs_set_nic_temperature_write(struct iwl_mvm *mvm, + char *buf, size_t count, + loff_t *ppos) +{ + int temperature; + + if (!iwl_mvm_firmware_running(mvm) && !mvm->temperature_test) + return -EIO; + + if (kstrtoint(buf, 10, &temperature)) + return -EINVAL; + /* not a legal temperature */ + if ((temperature > IWL_MVM_DEBUG_SET_TEMPERATURE_MAX && + temperature != IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE) || + temperature < IWL_MVM_DEBUG_SET_TEMPERATURE_MIN) + return -EINVAL; + + mutex_lock(&mvm->mutex); + if (temperature == IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE) { + if (!mvm->temperature_test) + goto out; + + mvm->temperature_test = false; + /* Since we can't read the temp while awake, just set + * it to zero until we get the next RX stats from the + * firmware. + */ + mvm->temperature = 0; + } else { + mvm->temperature_test = true; + mvm->temperature = temperature; + } + IWL_DEBUG_TEMP(mvm, "%sabling debug set temperature (temp = %d)\n", + mvm->temperature_test ? "En" : "Dis" , + mvm->temperature); + /* handle the temperature change */ + iwl_mvm_tt_handler(mvm); + +out: + mutex_unlock(&mvm->mutex); + + return count; +} + +static ssize_t iwl_dbgfs_nic_temp_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + char buf[16]; + int pos, ret; + s32 temp; + + if (!iwl_mvm_firmware_running(mvm)) + return -EIO; + + mutex_lock(&mvm->mutex); + ret = iwl_mvm_get_temp(mvm, &temp); + mutex_unlock(&mvm->mutex); + + if (ret) + return -EIO; + + pos = scnprintf(buf , sizeof(buf), "%d\n", temp); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +#ifdef CONFIG_ACPI +static ssize_t iwl_dbgfs_sar_geo_profile_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + char buf[256]; + int pos = 0; + int bufsz = sizeof(buf); + int tbl_idx; + + if (!iwl_mvm_firmware_running(mvm)) + return -EIO; + + mutex_lock(&mvm->mutex); + tbl_idx = iwl_mvm_get_sar_geo_profile(mvm); + if (tbl_idx < 0) { + mutex_unlock(&mvm->mutex); + return tbl_idx; + } + + if (!tbl_idx) { + pos = scnprintf(buf, bufsz, + "SAR geographic profile disabled\n"); + } else { + pos += scnprintf(buf + pos, bufsz - pos, + "Use geographic profile %d\n", tbl_idx); + pos += scnprintf(buf + pos, bufsz - pos, + "2.4GHz:\n\tChain A offset: %hhu dBm\n\tChain B offset: %hhu dBm\n\tmax tx power: %hhu dBm\n", + mvm->fwrt.geo_profiles[tbl_idx - 1].bands[0].chains[0], + mvm->fwrt.geo_profiles[tbl_idx - 1].bands[0].chains[1], + mvm->fwrt.geo_profiles[tbl_idx - 1].bands[0].max); + pos += scnprintf(buf + pos, bufsz - pos, + "5.2GHz:\n\tChain A offset: %hhu dBm\n\tChain B offset: %hhu dBm\n\tmax tx power: %hhu dBm\n", + mvm->fwrt.geo_profiles[tbl_idx - 1].bands[1].chains[0], + mvm->fwrt.geo_profiles[tbl_idx - 1].bands[1].chains[1], + mvm->fwrt.geo_profiles[tbl_idx - 1].bands[1].max); + } + mutex_unlock(&mvm->mutex); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} +#endif + +static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + struct ieee80211_sta *sta; + char buf[400]; + int i, pos = 0, bufsz = sizeof(buf); + + mutex_lock(&mvm->mutex); + + for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { + pos += scnprintf(buf + pos, bufsz - pos, "%.2d: ", i); + sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], + lockdep_is_held(&mvm->mutex)); + if (!sta) + pos += scnprintf(buf + pos, bufsz - pos, "N/A\n"); + else if (IS_ERR(sta)) + pos += scnprintf(buf + pos, bufsz - pos, "%ld\n", + PTR_ERR(sta)); + else + pos += scnprintf(buf + pos, bufsz - pos, "%pM\n", + sta->addr); + } + + mutex_unlock(&mvm->mutex); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_rs_data_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_sta *sta = file->private_data; + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw; + struct iwl_mvm *mvm = lq_sta->pers.drv; + static const size_t bufsz = 2048; + char *buff; + int desc = 0; + ssize_t ret; + + buff = kmalloc(bufsz, GFP_KERNEL); + if (!buff) + return -ENOMEM; + + mutex_lock(&mvm->mutex); + + desc += scnprintf(buff + desc, bufsz - desc, "sta_id %d\n", + lq_sta->pers.sta_id); + desc += scnprintf(buff + desc, bufsz - desc, + "fixed rate 0x%X\n", + lq_sta->pers.dbg_fixed_rate); + desc += scnprintf(buff + desc, bufsz - desc, + "A-MPDU size limit %d\n", + lq_sta->pers.dbg_agg_frame_count_lim); + desc += scnprintf(buff + desc, bufsz - desc, + "valid_tx_ant %s%s\n", + (iwl_mvm_get_valid_tx_ant(mvm) & ANT_A) ? "ANT_A," : "", + (iwl_mvm_get_valid_tx_ant(mvm) & ANT_B) ? "ANT_B," : ""); + desc += scnprintf(buff + desc, bufsz - desc, + "last tx rate=0x%X ", + lq_sta->last_rate_n_flags); + + desc += rs_pretty_print_rate(buff + desc, bufsz - desc, + lq_sta->last_rate_n_flags); + if (desc < bufsz - 1) + buff[desc++] = '\n'; + mutex_unlock(&mvm->mutex); + + ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); + kfree(buff); + return ret; +} + +static ssize_t iwl_dbgfs_amsdu_len_write(struct ieee80211_sta *sta, + char *buf, size_t count, + loff_t *ppos) +{ + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + int i; + u16 amsdu_len; + + if (kstrtou16(buf, 0, &amsdu_len)) + return -EINVAL; + + /* only change from debug set <-> debug unset */ + if (amsdu_len && mvmsta->orig_amsdu_len) + return -EBUSY; + + if (amsdu_len) { + mvmsta->orig_amsdu_len = sta->max_amsdu_len; + sta->max_amsdu_len = amsdu_len; + for (i = 0; i < ARRAY_SIZE(sta->max_tid_amsdu_len); i++) + sta->max_tid_amsdu_len[i] = amsdu_len; + } else { + sta->max_amsdu_len = mvmsta->orig_amsdu_len; + mvmsta->orig_amsdu_len = 0; + } + return count; +} + +static ssize_t iwl_dbgfs_amsdu_len_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_sta *sta = file->private_data; + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + + char buf[32]; + int pos; + + pos = scnprintf(buf, sizeof(buf), "current %d ", sta->max_amsdu_len); + pos += scnprintf(buf + pos, sizeof(buf) - pos, "stored %d\n", + mvmsta->orig_amsdu_len); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_disable_power_off_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + char buf[64]; + int bufsz = sizeof(buf); + int pos = 0; + + pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off_d0=%d\n", + mvm->disable_power_off); + pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off_d3=%d\n", + mvm->disable_power_off_d3); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_disable_power_off_write(struct iwl_mvm *mvm, char *buf, + size_t count, loff_t *ppos) +{ + int ret, val; + + if (!iwl_mvm_firmware_running(mvm)) + return -EIO; + + if (!strncmp("disable_power_off_d0=", buf, 21)) { + if (sscanf(buf + 21, "%d", &val) != 1) + return -EINVAL; + mvm->disable_power_off = val; + } else if (!strncmp("disable_power_off_d3=", buf, 21)) { + if (sscanf(buf + 21, "%d", &val) != 1) + return -EINVAL; + mvm->disable_power_off_d3 = val; + } else { + return -EINVAL; + } + + mutex_lock(&mvm->mutex); + ret = iwl_mvm_power_update_device(mvm); + mutex_unlock(&mvm->mutex); + + return ret ?: count; +} + +static +int iwl_mvm_coex_dump_mbox(struct iwl_bt_coex_profile_notif *notif, char *buf, + int pos, int bufsz) +{ + pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw0:\n"); + + BT_MBOX_PRINT(0, LE_SLAVE_LAT, false); + BT_MBOX_PRINT(0, LE_PROF1, false); + BT_MBOX_PRINT(0, LE_PROF2, false); + BT_MBOX_PRINT(0, LE_PROF_OTHER, false); + BT_MBOX_PRINT(0, CHL_SEQ_N, false); + BT_MBOX_PRINT(0, INBAND_S, false); + BT_MBOX_PRINT(0, LE_MIN_RSSI, false); + BT_MBOX_PRINT(0, LE_SCAN, false); + BT_MBOX_PRINT(0, LE_ADV, false); + BT_MBOX_PRINT(0, LE_MAX_TX_POWER, false); + BT_MBOX_PRINT(0, OPEN_CON_1, true); + + pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw1:\n"); + + BT_MBOX_PRINT(1, BR_MAX_TX_POWER, false); + BT_MBOX_PRINT(1, IP_SR, false); + BT_MBOX_PRINT(1, LE_MSTR, false); + BT_MBOX_PRINT(1, AGGR_TRFC_LD, false); + BT_MBOX_PRINT(1, MSG_TYPE, false); + BT_MBOX_PRINT(1, SSN, true); + + pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw2:\n"); + + BT_MBOX_PRINT(2, SNIFF_ACT, false); + BT_MBOX_PRINT(2, PAG, false); + BT_MBOX_PRINT(2, INQUIRY, false); + BT_MBOX_PRINT(2, CONN, false); + BT_MBOX_PRINT(2, SNIFF_INTERVAL, false); + BT_MBOX_PRINT(2, DISC, false); + BT_MBOX_PRINT(2, SCO_TX_ACT, false); + BT_MBOX_PRINT(2, SCO_RX_ACT, false); + BT_MBOX_PRINT(2, ESCO_RE_TX, false); + BT_MBOX_PRINT(2, SCO_DURATION, true); + + pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw3:\n"); + + BT_MBOX_PRINT(3, SCO_STATE, false); + BT_MBOX_PRINT(3, SNIFF_STATE, false); + BT_MBOX_PRINT(3, A2DP_STATE, false); + BT_MBOX_PRINT(3, A2DP_SRC, false); + BT_MBOX_PRINT(3, ACL_STATE, false); + BT_MBOX_PRINT(3, MSTR_STATE, false); + BT_MBOX_PRINT(3, OBX_STATE, false); + BT_MBOX_PRINT(3, OPEN_CON_2, false); + BT_MBOX_PRINT(3, TRAFFIC_LOAD, false); + BT_MBOX_PRINT(3, CHL_SEQN_LSB, false); + BT_MBOX_PRINT(3, INBAND_P, false); + BT_MBOX_PRINT(3, MSG_TYPE_2, false); + BT_MBOX_PRINT(3, SSN_2, false); + BT_MBOX_PRINT(3, UPDATE_REQUEST, true); + + return pos; +} + +static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + struct iwl_bt_coex_profile_notif *notif = &mvm->last_bt_notif; + char *buf; + int ret, pos = 0, bufsz = sizeof(char) * 1024; + + buf = kmalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + mutex_lock(&mvm->mutex); + + pos += iwl_mvm_coex_dump_mbox(notif, buf, pos, bufsz); + + pos += scnprintf(buf + pos, bufsz - pos, "bt_ci_compliance = %d\n", + notif->bt_ci_compliance); + pos += scnprintf(buf + pos, bufsz - pos, "primary_ch_lut = %d\n", + le32_to_cpu(notif->primary_ch_lut)); + pos += scnprintf(buf + pos, bufsz - pos, "secondary_ch_lut = %d\n", + le32_to_cpu(notif->secondary_ch_lut)); + pos += scnprintf(buf + pos, + bufsz - pos, "bt_activity_grading = %d\n", + le32_to_cpu(notif->bt_activity_grading)); + pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n", + notif->rrc_status & 0xF); + pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n", + notif->ttc_status & 0xF); + + pos += scnprintf(buf + pos, bufsz - pos, "sync_sco = %d\n", + IWL_MVM_BT_COEX_SYNC2SCO); + pos += scnprintf(buf + pos, bufsz - pos, "mplut = %d\n", + IWL_MVM_BT_COEX_MPLUT); + + mutex_unlock(&mvm->mutex); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + + return ret; +} +#undef BT_MBOX_PRINT + +static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd; + char buf[256]; + int bufsz = sizeof(buf); + int pos = 0; + + mutex_lock(&mvm->mutex); + + pos += scnprintf(buf + pos, bufsz - pos, "Channel inhibition CMD\n"); + pos += scnprintf(buf + pos, bufsz - pos, + "\tPrimary Channel Bitmap 0x%016llx\n", + le64_to_cpu(cmd->bt_primary_ci)); + pos += scnprintf(buf + pos, bufsz - pos, + "\tSecondary Channel Bitmap 0x%016llx\n", + le64_to_cpu(cmd->bt_secondary_ci)); + + mutex_unlock(&mvm->mutex); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t +iwl_dbgfs_bt_tx_prio_write(struct iwl_mvm *mvm, char *buf, + size_t count, loff_t *ppos) +{ + u32 bt_tx_prio; + + if (sscanf(buf, "%u", &bt_tx_prio) != 1) + return -EINVAL; + if (bt_tx_prio > 4) + return -EINVAL; + + mvm->bt_tx_prio = bt_tx_prio; + + return count; +} + +static ssize_t +iwl_dbgfs_bt_force_ant_write(struct iwl_mvm *mvm, char *buf, + size_t count, loff_t *ppos) +{ + static const char * const modes_str[BT_FORCE_ANT_MAX] = { + [BT_FORCE_ANT_DIS] = "dis", + [BT_FORCE_ANT_AUTO] = "auto", + [BT_FORCE_ANT_BT] = "bt", + [BT_FORCE_ANT_WIFI] = "wifi", + }; + int ret, bt_force_ant_mode; + + ret = match_string(modes_str, ARRAY_SIZE(modes_str), buf); + if (ret < 0) + return ret; + + bt_force_ant_mode = ret; + ret = 0; + mutex_lock(&mvm->mutex); + if (mvm->bt_force_ant_mode == bt_force_ant_mode) + goto out; + + mvm->bt_force_ant_mode = bt_force_ant_mode; + IWL_DEBUG_COEX(mvm, "Force mode: %s\n", + modes_str[mvm->bt_force_ant_mode]); + + if (iwl_mvm_firmware_running(mvm)) + ret = iwl_mvm_send_bt_init_conf(mvm); + else + ret = 0; + +out: + mutex_unlock(&mvm->mutex); + return ret ?: count; +} + +static ssize_t iwl_dbgfs_fw_ver_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + char *buff, *pos, *endpos; + static const size_t bufsz = 1024; + int ret; + + buff = kmalloc(bufsz, GFP_KERNEL); + if (!buff) + return -ENOMEM; + + pos = buff; + endpos = pos + bufsz; + + pos += scnprintf(pos, endpos - pos, "FW prefix: %s\n", + mvm->trans->cfg->fw_name_pre); + pos += scnprintf(pos, endpos - pos, "FW: %s\n", + mvm->fwrt.fw->human_readable); + pos += scnprintf(pos, endpos - pos, "Device: %s\n", + mvm->fwrt.trans->name); + pos += scnprintf(pos, endpos - pos, "Bus: %s\n", + mvm->fwrt.dev->bus->name); + + ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff); + kfree(buff); + + return ret; +} + +static ssize_t iwl_dbgfs_phy_integration_ver_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + char *buf; + size_t bufsz; + int pos; + ssize_t ret; + + bufsz = mvm->fw->phy_integration_ver_len + 2; + buf = kmalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + pos = scnprintf(buf, bufsz, "%.*s\n", mvm->fw->phy_integration_ver_len, + mvm->fw->phy_integration_ver); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + + kfree(buf); + return ret; +} + +#define PRINT_STATS_LE32(_struct, _memb) \ + pos += scnprintf(buf + pos, bufsz - pos, \ + fmt_table, #_memb, \ + le32_to_cpu(_struct->_memb)) + +static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file, + char __user *user_buf, size_t count, + loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + static const char *fmt_table = "\t%-30s %10u\n"; + static const char *fmt_header = "%-32s\n"; + int pos = 0; + char *buf; + int ret; + size_t bufsz; + + if (iwl_mvm_has_new_rx_stats_api(mvm)) + bufsz = ((sizeof(struct mvm_statistics_rx) / + sizeof(__le32)) * 43) + (4 * 33) + 1; + else + /* 43 = size of each data line; 33 = size of each header */ + bufsz = ((sizeof(struct mvm_statistics_rx_v3) / + sizeof(__le32)) * 43) + (4 * 33) + 1; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + mutex_lock(&mvm->mutex); + + if (iwl_mvm_firmware_running(mvm)) + iwl_mvm_request_statistics(mvm, false); + + pos += scnprintf(buf + pos, bufsz - pos, fmt_header, + "Statistics_Rx - OFDM"); + if (!iwl_mvm_has_new_rx_stats_api(mvm)) { + struct mvm_statistics_rx_phy_v2 *ofdm = &mvm->rx_stats_v3.ofdm; + + PRINT_STATS_LE32(ofdm, ina_cnt); + PRINT_STATS_LE32(ofdm, fina_cnt); + PRINT_STATS_LE32(ofdm, plcp_err); + PRINT_STATS_LE32(ofdm, crc32_err); + PRINT_STATS_LE32(ofdm, overrun_err); + PRINT_STATS_LE32(ofdm, early_overrun_err); + PRINT_STATS_LE32(ofdm, crc32_good); + PRINT_STATS_LE32(ofdm, false_alarm_cnt); + PRINT_STATS_LE32(ofdm, fina_sync_err_cnt); + PRINT_STATS_LE32(ofdm, sfd_timeout); + PRINT_STATS_LE32(ofdm, fina_timeout); + PRINT_STATS_LE32(ofdm, unresponded_rts); + PRINT_STATS_LE32(ofdm, rxe_frame_lmt_overrun); + PRINT_STATS_LE32(ofdm, sent_ack_cnt); + PRINT_STATS_LE32(ofdm, sent_cts_cnt); + PRINT_STATS_LE32(ofdm, sent_ba_rsp_cnt); + PRINT_STATS_LE32(ofdm, dsp_self_kill); + PRINT_STATS_LE32(ofdm, mh_format_err); + PRINT_STATS_LE32(ofdm, re_acq_main_rssi_sum); + PRINT_STATS_LE32(ofdm, reserved); + } else { + struct mvm_statistics_rx_phy *ofdm = &mvm->rx_stats.ofdm; + + PRINT_STATS_LE32(ofdm, unresponded_rts); + PRINT_STATS_LE32(ofdm, rxe_frame_lmt_overrun); + PRINT_STATS_LE32(ofdm, sent_ba_rsp_cnt); + PRINT_STATS_LE32(ofdm, dsp_self_kill); + PRINT_STATS_LE32(ofdm, reserved); + } + + pos += scnprintf(buf + pos, bufsz - pos, fmt_header, + "Statistics_Rx - CCK"); + if (!iwl_mvm_has_new_rx_stats_api(mvm)) { + struct mvm_statistics_rx_phy_v2 *cck = &mvm->rx_stats_v3.cck; + + PRINT_STATS_LE32(cck, ina_cnt); + PRINT_STATS_LE32(cck, fina_cnt); + PRINT_STATS_LE32(cck, plcp_err); + PRINT_STATS_LE32(cck, crc32_err); + PRINT_STATS_LE32(cck, overrun_err); + PRINT_STATS_LE32(cck, early_overrun_err); + PRINT_STATS_LE32(cck, crc32_good); + PRINT_STATS_LE32(cck, false_alarm_cnt); + PRINT_STATS_LE32(cck, fina_sync_err_cnt); + PRINT_STATS_LE32(cck, sfd_timeout); + PRINT_STATS_LE32(cck, fina_timeout); + PRINT_STATS_LE32(cck, unresponded_rts); + PRINT_STATS_LE32(cck, rxe_frame_lmt_overrun); + PRINT_STATS_LE32(cck, sent_ack_cnt); + PRINT_STATS_LE32(cck, sent_cts_cnt); + PRINT_STATS_LE32(cck, sent_ba_rsp_cnt); + PRINT_STATS_LE32(cck, dsp_self_kill); + PRINT_STATS_LE32(cck, mh_format_err); + PRINT_STATS_LE32(cck, re_acq_main_rssi_sum); + PRINT_STATS_LE32(cck, reserved); + } else { + struct mvm_statistics_rx_phy *cck = &mvm->rx_stats.cck; + + PRINT_STATS_LE32(cck, unresponded_rts); + PRINT_STATS_LE32(cck, rxe_frame_lmt_overrun); + PRINT_STATS_LE32(cck, sent_ba_rsp_cnt); + PRINT_STATS_LE32(cck, dsp_self_kill); + PRINT_STATS_LE32(cck, reserved); + } + + pos += scnprintf(buf + pos, bufsz - pos, fmt_header, + "Statistics_Rx - GENERAL"); + if (!iwl_mvm_has_new_rx_stats_api(mvm)) { + struct mvm_statistics_rx_non_phy_v3 *general = + &mvm->rx_stats_v3.general; + + PRINT_STATS_LE32(general, bogus_cts); + PRINT_STATS_LE32(general, bogus_ack); + PRINT_STATS_LE32(general, non_bssid_frames); + PRINT_STATS_LE32(general, filtered_frames); + PRINT_STATS_LE32(general, non_channel_beacons); + PRINT_STATS_LE32(general, channel_beacons); + PRINT_STATS_LE32(general, num_missed_bcon); + PRINT_STATS_LE32(general, adc_rx_saturation_time); + PRINT_STATS_LE32(general, ina_detection_search_time); + PRINT_STATS_LE32(general, beacon_silence_rssi_a); + PRINT_STATS_LE32(general, beacon_silence_rssi_b); + PRINT_STATS_LE32(general, beacon_silence_rssi_c); + PRINT_STATS_LE32(general, interference_data_flag); + PRINT_STATS_LE32(general, channel_load); + PRINT_STATS_LE32(general, dsp_false_alarms); + PRINT_STATS_LE32(general, beacon_rssi_a); + PRINT_STATS_LE32(general, beacon_rssi_b); + PRINT_STATS_LE32(general, beacon_rssi_c); + PRINT_STATS_LE32(general, beacon_energy_a); + PRINT_STATS_LE32(general, beacon_energy_b); + PRINT_STATS_LE32(general, beacon_energy_c); + PRINT_STATS_LE32(general, num_bt_kills); + PRINT_STATS_LE32(general, mac_id); + PRINT_STATS_LE32(general, directed_data_mpdu); + } else { + struct mvm_statistics_rx_non_phy *general = + &mvm->rx_stats.general; + + PRINT_STATS_LE32(general, bogus_cts); + PRINT_STATS_LE32(general, bogus_ack); + PRINT_STATS_LE32(general, non_channel_beacons); + PRINT_STATS_LE32(general, channel_beacons); + PRINT_STATS_LE32(general, num_missed_bcon); + PRINT_STATS_LE32(general, adc_rx_saturation_time); + PRINT_STATS_LE32(general, ina_detection_search_time); + PRINT_STATS_LE32(general, beacon_silence_rssi_a); + PRINT_STATS_LE32(general, beacon_silence_rssi_b); + PRINT_STATS_LE32(general, beacon_silence_rssi_c); + PRINT_STATS_LE32(general, interference_data_flag); + PRINT_STATS_LE32(general, channel_load); + PRINT_STATS_LE32(general, beacon_rssi_a); + PRINT_STATS_LE32(general, beacon_rssi_b); + PRINT_STATS_LE32(general, beacon_rssi_c); + PRINT_STATS_LE32(general, beacon_energy_a); + PRINT_STATS_LE32(general, beacon_energy_b); + PRINT_STATS_LE32(general, beacon_energy_c); + PRINT_STATS_LE32(general, num_bt_kills); + PRINT_STATS_LE32(general, mac_id); + } + + pos += scnprintf(buf + pos, bufsz - pos, fmt_header, + "Statistics_Rx - HT"); + if (!iwl_mvm_has_new_rx_stats_api(mvm)) { + struct mvm_statistics_rx_ht_phy_v1 *ht = + &mvm->rx_stats_v3.ofdm_ht; + + PRINT_STATS_LE32(ht, plcp_err); + PRINT_STATS_LE32(ht, overrun_err); + PRINT_STATS_LE32(ht, early_overrun_err); + PRINT_STATS_LE32(ht, crc32_good); + PRINT_STATS_LE32(ht, crc32_err); + PRINT_STATS_LE32(ht, mh_format_err); + PRINT_STATS_LE32(ht, agg_crc32_good); + PRINT_STATS_LE32(ht, agg_mpdu_cnt); + PRINT_STATS_LE32(ht, agg_cnt); + PRINT_STATS_LE32(ht, unsupport_mcs); + } else { + struct mvm_statistics_rx_ht_phy *ht = + &mvm->rx_stats.ofdm_ht; + + PRINT_STATS_LE32(ht, mh_format_err); + PRINT_STATS_LE32(ht, agg_mpdu_cnt); + PRINT_STATS_LE32(ht, agg_cnt); + PRINT_STATS_LE32(ht, unsupport_mcs); + } + + mutex_unlock(&mvm->mutex); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + + return ret; +} +#undef PRINT_STAT_LE32 + +static ssize_t iwl_dbgfs_frame_stats_read(struct iwl_mvm *mvm, + char __user *user_buf, size_t count, + loff_t *ppos, + struct iwl_mvm_frame_stats *stats) +{ + char *buff, *pos, *endpos; + int idx, i; + int ret; + static const size_t bufsz = 1024; + + buff = kmalloc(bufsz, GFP_KERNEL); + if (!buff) + return -ENOMEM; + + spin_lock_bh(&mvm->drv_stats_lock); + + pos = buff; + endpos = pos + bufsz; + + pos += scnprintf(pos, endpos - pos, + "Legacy/HT/VHT\t:\t%d/%d/%d\n", + stats->legacy_frames, + stats->ht_frames, + stats->vht_frames); + pos += scnprintf(pos, endpos - pos, "20/40/80\t:\t%d/%d/%d\n", + stats->bw_20_frames, + stats->bw_40_frames, + stats->bw_80_frames); + pos += scnprintf(pos, endpos - pos, "NGI/SGI\t\t:\t%d/%d\n", + stats->ngi_frames, + stats->sgi_frames); + pos += scnprintf(pos, endpos - pos, "SISO/MIMO2\t:\t%d/%d\n", + stats->siso_frames, + stats->mimo2_frames); + pos += scnprintf(pos, endpos - pos, "FAIL/SCSS\t:\t%d/%d\n", + stats->fail_frames, + stats->success_frames); + pos += scnprintf(pos, endpos - pos, "MPDUs agg\t:\t%d\n", + stats->agg_frames); + pos += scnprintf(pos, endpos - pos, "A-MPDUs\t\t:\t%d\n", + stats->ampdu_count); + pos += scnprintf(pos, endpos - pos, "Avg MPDUs/A-MPDU:\t%d\n", + stats->ampdu_count > 0 ? + (stats->agg_frames / stats->ampdu_count) : 0); + + pos += scnprintf(pos, endpos - pos, "Last Rates\n"); + + idx = stats->last_frame_idx - 1; + for (i = 0; i < ARRAY_SIZE(stats->last_rates); i++) { + idx = (idx + 1) % ARRAY_SIZE(stats->last_rates); + if (stats->last_rates[idx] == 0) + continue; + pos += scnprintf(pos, endpos - pos, "Rate[%d]: ", + (int)(ARRAY_SIZE(stats->last_rates) - i)); + pos += rs_pretty_print_rate_v1(pos, endpos - pos, + stats->last_rates[idx]); + if (pos < endpos - 1) + *pos++ = '\n'; + } + spin_unlock_bh(&mvm->drv_stats_lock); + + ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff); + kfree(buff); + + return ret; +} + +static ssize_t iwl_dbgfs_drv_rx_stats_read(struct file *file, + char __user *user_buf, size_t count, + loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + + return iwl_dbgfs_frame_stats_read(mvm, user_buf, count, ppos, + &mvm->drv_rx_stats); +} + +static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf, + size_t count, loff_t *ppos) +{ + int __maybe_unused ret; + + if (!iwl_mvm_firmware_running(mvm)) + return -EIO; + + mutex_lock(&mvm->mutex); + + /* allow one more restart that we're provoking here */ + if (mvm->fw_restart >= 0) + mvm->fw_restart++; + + if (count == 6 && !strcmp(buf, "nolog\n")) { + set_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE, &mvm->status); + set_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE, &mvm->trans->status); + } + + /* take the return value to make compiler happy - it will fail anyway */ + ret = iwl_mvm_send_cmd_pdu(mvm, + WIDE_ID(LONG_GROUP, REPLY_ERROR), + 0, 0, NULL); + + mutex_unlock(&mvm->mutex); + + return count; +} + +static ssize_t iwl_dbgfs_fw_nmi_write(struct iwl_mvm *mvm, char *buf, + size_t count, loff_t *ppos) +{ + if (!iwl_mvm_firmware_running(mvm)) + return -EIO; + + if (count == 6 && !strcmp(buf, "nolog\n")) + set_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE, &mvm->status); + + iwl_force_nmi(mvm->trans); + + return count; +} + +static ssize_t +iwl_dbgfs_scan_ant_rxchain_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + int pos = 0; + char buf[32]; + const size_t bufsz = sizeof(buf); + + /* print which antennas were set for the scan command by the user */ + pos += scnprintf(buf + pos, bufsz - pos, "Antennas for scan: "); + if (mvm->scan_rx_ant & ANT_A) + pos += scnprintf(buf + pos, bufsz - pos, "A"); + if (mvm->scan_rx_ant & ANT_B) + pos += scnprintf(buf + pos, bufsz - pos, "B"); + pos += scnprintf(buf + pos, bufsz - pos, " (%hhx)\n", mvm->scan_rx_ant); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t +iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf, + size_t count, loff_t *ppos) +{ + u8 scan_rx_ant; + + if (!iwl_mvm_firmware_running(mvm)) + return -EIO; + + if (sscanf(buf, "%hhx", &scan_rx_ant) != 1) + return -EINVAL; + if (scan_rx_ant > ANT_ABC) + return -EINVAL; + if (scan_rx_ant & ~(iwl_mvm_get_valid_rx_ant(mvm))) + return -EINVAL; + + if (mvm->scan_rx_ant != scan_rx_ant) { + mvm->scan_rx_ant = scan_rx_ant; + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_UMAC_SCAN)) + iwl_mvm_config_scan(mvm); + } + + return count; +} + +static ssize_t iwl_dbgfs_indirection_tbl_write(struct iwl_mvm *mvm, + char *buf, size_t count, + loff_t *ppos) +{ + struct iwl_rss_config_cmd cmd = { + .flags = cpu_to_le32(IWL_RSS_ENABLE), + .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP | + IWL_RSS_HASH_TYPE_IPV4_UDP | + IWL_RSS_HASH_TYPE_IPV4_PAYLOAD | + IWL_RSS_HASH_TYPE_IPV6_TCP | + IWL_RSS_HASH_TYPE_IPV6_UDP | + IWL_RSS_HASH_TYPE_IPV6_PAYLOAD, + }; + int ret, i, num_repeats, nbytes = count / 2; + + ret = hex2bin(cmd.indirection_table, buf, nbytes); + if (ret) + return ret; + + /* + * The input is the redirection table, partial or full. + * Repeat the pattern if needed. + * For example, input of 01020F will be repeated 42 times, + * indirecting RSS hash results to queues 1, 2, 15 (skipping + * queues 3 - 14). + */ + num_repeats = ARRAY_SIZE(cmd.indirection_table) / nbytes; + for (i = 1; i < num_repeats; i++) + memcpy(&cmd.indirection_table[i * nbytes], + cmd.indirection_table, nbytes); + /* handle cut in the middle pattern for the last places */ + memcpy(&cmd.indirection_table[i * nbytes], cmd.indirection_table, + ARRAY_SIZE(cmd.indirection_table) % nbytes); + + netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key)); + + mutex_lock(&mvm->mutex); + if (iwl_mvm_firmware_running(mvm)) + ret = iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, + sizeof(cmd), &cmd); + else + ret = 0; + mutex_unlock(&mvm->mutex); + + return ret ?: count; +} + +static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm, + char *buf, size_t count, + loff_t *ppos) +{ + struct iwl_op_mode *opmode = container_of((void *)mvm, + struct iwl_op_mode, + op_mode_specific); + struct iwl_rx_cmd_buffer rxb = { + ._rx_page_order = 0, + .truesize = 0, /* not used */ + ._offset = 0, + }; + struct iwl_rx_packet *pkt; + int bin_len = count / 2; + int ret = -EINVAL; + + if (!iwl_mvm_firmware_running(mvm)) + return -EIO; + + /* supporting only MQ RX */ + if (!mvm->trans->trans_cfg->mq_rx_supported) + return -ENOTSUPP; + + rxb._page = alloc_pages(GFP_ATOMIC, 0); + if (!rxb._page) + return -ENOMEM; + pkt = rxb_addr(&rxb); + + ret = hex2bin(page_address(rxb._page), buf, bin_len); + if (ret) + goto out; + + /* avoid invalid memory access and malformed packet */ + if (bin_len < sizeof(*pkt) || + bin_len != sizeof(*pkt) + iwl_rx_packet_payload_len(pkt)) + goto out; + + local_bh_disable(); + iwl_mvm_rx_mq(opmode, NULL, &rxb); + local_bh_enable(); + ret = 0; + +out: + iwl_free_rxb(&rxb); + + return ret ?: count; +} + +static int _iwl_dbgfs_inject_beacon_ie(struct iwl_mvm *mvm, char *bin, int len) +{ + struct ieee80211_vif *vif; + struct iwl_mvm_vif *mvmvif; + struct sk_buff *beacon; + struct ieee80211_tx_info *info; + struct iwl_mac_beacon_cmd beacon_cmd = {}; + u8 rate; + int i; + + len /= 2; + + /* Element len should be represented by u8 */ + if (len >= U8_MAX) + return -EINVAL; + + if (!iwl_mvm_firmware_running(mvm)) + return -EIO; + + if (!iwl_mvm_has_new_tx_api(mvm) && + !fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE)) + return -EINVAL; + + mutex_lock(&mvm->mutex); + + for (i = 0; i < NUM_MAC_INDEX_DRIVER; i++) { + vif = iwl_mvm_rcu_dereference_vif_id(mvm, i, false); + if (!vif) + continue; + + if (vif->type == NL80211_IFTYPE_AP) + break; + } + + if (i == NUM_MAC_INDEX_DRIVER || !vif) + goto out_err; + + mvm->hw->extra_beacon_tailroom = len; + + beacon = ieee80211_beacon_get_template(mvm->hw, vif, NULL); + if (!beacon) + goto out_err; + + if (len && hex2bin(skb_put_zero(beacon, len), bin, len)) { + dev_kfree_skb(beacon); + goto out_err; + } + + mvm->beacon_inject_active = true; + + mvmvif = iwl_mvm_vif_from_mac80211(vif); + info = IEEE80211_SKB_CB(beacon); + rate = iwl_mvm_mac_ctxt_get_lowest_rate(info, vif); + + beacon_cmd.flags = + cpu_to_le16(iwl_mvm_mac_ctxt_get_beacon_flags(mvm->fw, rate)); + beacon_cmd.byte_cnt = cpu_to_le16((u16)beacon->len); + beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id); + + iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx, + &beacon_cmd.tim_size, + beacon->data, beacon->len); + + iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd, + sizeof(beacon_cmd)); + mutex_unlock(&mvm->mutex); + + dev_kfree_skb(beacon); + + return 0; + +out_err: + mutex_unlock(&mvm->mutex); + return -EINVAL; +} + +static ssize_t iwl_dbgfs_inject_beacon_ie_write(struct iwl_mvm *mvm, + char *buf, size_t count, + loff_t *ppos) +{ + int ret = _iwl_dbgfs_inject_beacon_ie(mvm, buf, count); + + mvm->hw->extra_beacon_tailroom = 0; + return ret ?: count; +} + +static ssize_t iwl_dbgfs_inject_beacon_ie_restore_write(struct iwl_mvm *mvm, + char *buf, + size_t count, + loff_t *ppos) +{ + int ret = _iwl_dbgfs_inject_beacon_ie(mvm, NULL, 0); + + mvm->hw->extra_beacon_tailroom = 0; + mvm->beacon_inject_active = false; + return ret ?: count; +} + +static ssize_t iwl_dbgfs_fw_dbg_conf_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + int conf; + char buf[8]; + const size_t bufsz = sizeof(buf); + int pos = 0; + + mutex_lock(&mvm->mutex); + conf = mvm->fwrt.dump.conf; + mutex_unlock(&mvm->mutex); + + pos += scnprintf(buf + pos, bufsz - pos, "%d\n", conf); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_fw_dbg_conf_write(struct iwl_mvm *mvm, + char *buf, size_t count, + loff_t *ppos) +{ + unsigned int conf_id; + int ret; + + if (!iwl_mvm_firmware_running(mvm)) + return -EIO; + + ret = kstrtouint(buf, 0, &conf_id); + if (ret) + return ret; + + if (WARN_ON(conf_id >= FW_DBG_CONF_MAX)) + return -EINVAL; + + mutex_lock(&mvm->mutex); + ret = iwl_fw_start_dbg_conf(&mvm->fwrt, conf_id); + mutex_unlock(&mvm->mutex); + + return ret ?: count; +} + +static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm, + char *buf, size_t count, + loff_t *ppos) +{ + if (count == 0) + return 0; + + iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_USER_TRIGGER, + NULL); + + iwl_fw_dbg_collect(&mvm->fwrt, FW_DBG_TRIGGER_USER, buf, + (count - 1), NULL); + + return count; +} + +static ssize_t iwl_dbgfs_dbg_time_point_write(struct iwl_mvm *mvm, + char *buf, size_t count, + loff_t *ppos) +{ + u32 timepoint; + + if (kstrtou32(buf, 0, &timepoint)) + return -EINVAL; + + if (timepoint == IWL_FW_INI_TIME_POINT_INVALID || + timepoint >= IWL_FW_INI_TIME_POINT_NUM) + return -EINVAL; + + iwl_dbg_tlv_time_point(&mvm->fwrt, timepoint, NULL); + + return count; +} + +#define ADD_TEXT(...) pos += scnprintf(buf + pos, bufsz - pos, __VA_ARGS__) +#ifdef CONFIG_IWLWIFI_BCAST_FILTERING +static ssize_t iwl_dbgfs_bcast_filters_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + struct iwl_bcast_filter_cmd cmd; + const struct iwl_fw_bcast_filter *filter; + char *buf; + int bufsz = 1024; + int i, j, pos = 0; + ssize_t ret; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + mutex_lock(&mvm->mutex); + if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) { + ADD_TEXT("None\n"); + mutex_unlock(&mvm->mutex); + goto out; + } + mutex_unlock(&mvm->mutex); + + for (i = 0; cmd.filters[i].attrs[0].mask; i++) { + filter = &cmd.filters[i]; + + ADD_TEXT("Filter [%d]:\n", i); + ADD_TEXT("\tDiscard=%d\n", filter->discard); + ADD_TEXT("\tFrame Type: %s\n", + filter->frame_type ? "IPv4" : "Generic"); + + for (j = 0; j < ARRAY_SIZE(filter->attrs); j++) { + const struct iwl_fw_bcast_filter_attr *attr; + + attr = &filter->attrs[j]; + if (!attr->mask) + break; + + ADD_TEXT("\tAttr [%d]: offset=%d (from %s), mask=0x%x, value=0x%x reserved=0x%x\n", + j, attr->offset, + attr->offset_type ? "IP End" : + "Payload Start", + be32_to_cpu(attr->mask), + be32_to_cpu(attr->val), + le16_to_cpu(attr->reserved1)); + } + } +out: + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_dbgfs_bcast_filters_write(struct iwl_mvm *mvm, char *buf, + size_t count, loff_t *ppos) +{ + int pos, next_pos; + struct iwl_fw_bcast_filter filter = {}; + struct iwl_bcast_filter_cmd cmd; + u32 filter_id, attr_id, mask, value; + int err = 0; + + if (sscanf(buf, "%d %hhi %hhi %n", &filter_id, &filter.discard, + &filter.frame_type, &pos) != 3) + return -EINVAL; + + if (filter_id >= ARRAY_SIZE(mvm->dbgfs_bcast_filtering.cmd.filters) || + filter.frame_type > BCAST_FILTER_FRAME_TYPE_IPV4) + return -EINVAL; + + for (attr_id = 0; attr_id < ARRAY_SIZE(filter.attrs); + attr_id++) { + struct iwl_fw_bcast_filter_attr *attr = + &filter.attrs[attr_id]; + + if (pos >= count) + break; + + if (sscanf(&buf[pos], "%hhi %hhi %i %i %n", + &attr->offset, &attr->offset_type, + &mask, &value, &next_pos) != 4) + return -EINVAL; + + attr->mask = cpu_to_be32(mask); + attr->val = cpu_to_be32(value); + if (mask) + filter.num_attrs++; + + pos += next_pos; + } + + mutex_lock(&mvm->mutex); + memcpy(&mvm->dbgfs_bcast_filtering.cmd.filters[filter_id], + &filter, sizeof(filter)); + + /* send updated bcast filtering configuration */ + if (iwl_mvm_firmware_running(mvm) && + mvm->dbgfs_bcast_filtering.override && + iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) + err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0, + sizeof(cmd), &cmd); + mutex_unlock(&mvm->mutex); + + return err ?: count; +} + +static ssize_t iwl_dbgfs_bcast_filters_macs_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + struct iwl_bcast_filter_cmd cmd; + char *buf; + int bufsz = 1024; + int i, pos = 0; + ssize_t ret; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + mutex_lock(&mvm->mutex); + if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) { + ADD_TEXT("None\n"); + mutex_unlock(&mvm->mutex); + goto out; + } + mutex_unlock(&mvm->mutex); + + for (i = 0; i < ARRAY_SIZE(cmd.macs); i++) { + const struct iwl_fw_bcast_mac *mac = &cmd.macs[i]; + + ADD_TEXT("Mac [%d]: discard=%d attached_filters=0x%x\n", + i, mac->default_discard, mac->attached_filters); + } +out: + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_dbgfs_bcast_filters_macs_write(struct iwl_mvm *mvm, + char *buf, size_t count, + loff_t *ppos) +{ + struct iwl_bcast_filter_cmd cmd; + struct iwl_fw_bcast_mac mac = {}; + u32 mac_id, attached_filters; + int err = 0; + + if (!mvm->bcast_filters) + return -ENOENT; + + if (sscanf(buf, "%d %hhi %i", &mac_id, &mac.default_discard, + &attached_filters) != 3) + return -EINVAL; + + if (mac_id >= ARRAY_SIZE(cmd.macs) || + mac.default_discard > 1 || + attached_filters >= BIT(ARRAY_SIZE(cmd.filters))) + return -EINVAL; + + mac.attached_filters = cpu_to_le16(attached_filters); + + mutex_lock(&mvm->mutex); + memcpy(&mvm->dbgfs_bcast_filtering.cmd.macs[mac_id], + &mac, sizeof(mac)); + + /* send updated bcast filtering configuration */ + if (iwl_mvm_firmware_running(mvm) && + mvm->dbgfs_bcast_filtering.override && + iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) + err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0, + sizeof(cmd), &cmd); + mutex_unlock(&mvm->mutex); + + return err ?: count; +} +#endif + +#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \ + _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm) +#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \ + _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm) +#define MVM_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) do { \ + debugfs_create_file(alias, mode, parent, mvm, \ + &iwl_dbgfs_##name##_ops); \ + } while (0) +#define MVM_DEBUGFS_ADD_FILE(name, parent, mode) \ + MVM_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode) + +#define MVM_DEBUGFS_WRITE_STA_FILE_OPS(name, bufsz) \ + _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_sta) +#define MVM_DEBUGFS_READ_WRITE_STA_FILE_OPS(name, bufsz) \ + _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct ieee80211_sta) + +#define MVM_DEBUGFS_ADD_STA_FILE_ALIAS(alias, name, parent, mode) do { \ + debugfs_create_file(alias, mode, parent, sta, \ + &iwl_dbgfs_##name##_ops); \ + } while (0) +#define MVM_DEBUGFS_ADD_STA_FILE(name, parent, mode) \ + MVM_DEBUGFS_ADD_STA_FILE_ALIAS(#name, name, parent, mode) + +static ssize_t +iwl_dbgfs_prph_reg_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + int pos = 0; + char buf[32]; + const size_t bufsz = sizeof(buf); + + if (!mvm->dbgfs_prph_reg_addr) + return -EINVAL; + + pos += scnprintf(buf + pos, bufsz - pos, "Reg 0x%x: (0x%x)\n", + mvm->dbgfs_prph_reg_addr, + iwl_read_prph(mvm->trans, mvm->dbgfs_prph_reg_addr)); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t +iwl_dbgfs_prph_reg_write(struct iwl_mvm *mvm, char *buf, + size_t count, loff_t *ppos) +{ + u8 args; + u32 value; + + args = sscanf(buf, "%i %i", &mvm->dbgfs_prph_reg_addr, &value); + /* if we only want to set the reg address - nothing more to do */ + if (args == 1) + goto out; + + /* otherwise, make sure we have both address and value */ + if (args != 2) + return -EINVAL; + + iwl_write_prph(mvm->trans, mvm->dbgfs_prph_reg_addr, value); + +out: + return count; +} + +static ssize_t +iwl_dbgfs_send_echo_cmd_write(struct iwl_mvm *mvm, char *buf, + size_t count, loff_t *ppos) +{ + int ret; + + if (!iwl_mvm_firmware_running(mvm)) + return -EIO; + + mutex_lock(&mvm->mutex); + ret = iwl_mvm_send_cmd_pdu(mvm, ECHO_CMD, 0, 0, NULL); + mutex_unlock(&mvm->mutex); + + return ret ?: count; +} + +struct iwl_mvm_sniffer_apply { + struct iwl_mvm *mvm; + u8 *bssid; + u16 aid; +}; + +static bool iwl_mvm_sniffer_apply(struct iwl_notif_wait_data *notif_data, + struct iwl_rx_packet *pkt, void *data) +{ + struct iwl_mvm_sniffer_apply *apply = data; + + apply->mvm->cur_aid = cpu_to_le16(apply->aid); + memcpy(apply->mvm->cur_bssid, apply->bssid, + sizeof(apply->mvm->cur_bssid)); + + return true; +} + +static ssize_t +iwl_dbgfs_he_sniffer_params_write(struct iwl_mvm *mvm, char *buf, + size_t count, loff_t *ppos) +{ + struct iwl_notification_wait wait; + struct iwl_he_monitor_cmd he_mon_cmd = {}; + struct iwl_mvm_sniffer_apply apply = { + .mvm = mvm, + }; + u16 wait_cmds[] = { + WIDE_ID(DATA_PATH_GROUP, HE_AIR_SNIFFER_CONFIG_CMD), + }; + u32 aid; + int ret; + + if (!iwl_mvm_firmware_running(mvm)) + return -EIO; + + ret = sscanf(buf, "%x %2hhx:%2hhx:%2hhx:%2hhx:%2hhx:%2hhx", &aid, + &he_mon_cmd.bssid[0], &he_mon_cmd.bssid[1], + &he_mon_cmd.bssid[2], &he_mon_cmd.bssid[3], + &he_mon_cmd.bssid[4], &he_mon_cmd.bssid[5]); + if (ret != 7) + return -EINVAL; + + he_mon_cmd.aid = cpu_to_le16(aid); + + apply.aid = aid; + apply.bssid = (void *)he_mon_cmd.bssid; + + mutex_lock(&mvm->mutex); + + /* + * Use the notification waiter to get our function triggered + * in sequence with other RX. This ensures that frames we get + * on the RX queue _before_ the new configuration is applied + * still have mvm->cur_aid pointing to the old AID, and that + * frames on the RX queue _after_ the firmware processed the + * new configuration (and sent the response, synchronously) + * get mvm->cur_aid correctly set to the new AID. + */ + iwl_init_notification_wait(&mvm->notif_wait, &wait, + wait_cmds, ARRAY_SIZE(wait_cmds), + iwl_mvm_sniffer_apply, &apply); + + ret = iwl_mvm_send_cmd_pdu(mvm, + WIDE_ID(DATA_PATH_GROUP, HE_AIR_SNIFFER_CONFIG_CMD), + 0, + sizeof(he_mon_cmd), &he_mon_cmd); + + /* no need to really wait, we already did anyway */ + iwl_remove_notification(&mvm->notif_wait, &wait); + + mutex_unlock(&mvm->mutex); + + return ret ?: count; +} + +static ssize_t +iwl_dbgfs_he_sniffer_params_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + u8 buf[32]; + int len; + + len = scnprintf(buf, sizeof(buf), + "%d %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx\n", + le16_to_cpu(mvm->cur_aid), mvm->cur_bssid[0], + mvm->cur_bssid[1], mvm->cur_bssid[2], mvm->cur_bssid[3], + mvm->cur_bssid[4], mvm->cur_bssid[5]); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t +iwl_dbgfs_uapsd_noagg_bssids_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + u8 buf[IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM * ETH_ALEN * 3 + 1]; + unsigned int pos = 0; + size_t bufsz = sizeof(buf); + int i; + + mutex_lock(&mvm->mutex); + + for (i = 0; i < IWL_MVM_UAPSD_NOAGG_LIST_LEN; i++) + pos += scnprintf(buf + pos, bufsz - pos, "%pM\n", + mvm->uapsd_noagg_bssids[i].addr); + + mutex_unlock(&mvm->mutex); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t +iwl_dbgfs_ltr_config_write(struct iwl_mvm *mvm, + char *buf, size_t count, loff_t *ppos) +{ + int ret; + struct iwl_ltr_config_cmd ltr_config = {0}; + + if (!iwl_mvm_firmware_running(mvm)) + return -EIO; + + if (sscanf(buf, "%x,%x,%x,%x,%x,%x,%x", + <r_config.flags, + <r_config.static_long, + <r_config.static_short, + <r_config.ltr_cfg_values[0], + <r_config.ltr_cfg_values[1], + <r_config.ltr_cfg_values[2], + <r_config.ltr_cfg_values[3]) != 7) { + return -EINVAL; + } + + mutex_lock(&mvm->mutex); + ret = iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0, sizeof(ltr_config), + <r_config); + mutex_unlock(&mvm->mutex); + + if (ret) + IWL_ERR(mvm, "failed to send ltr configuration cmd\n"); + + return ret ?: count; +} + +static ssize_t iwl_dbgfs_rfi_freq_table_write(struct iwl_mvm *mvm, char *buf, + size_t count, loff_t *ppos) +{ + int ret = 0; + u16 op_id; + + if (kstrtou16(buf, 10, &op_id)) + return -EINVAL; + + /* value zero triggers re-sending the default table to the device */ + if (!op_id) { + mutex_lock(&mvm->mutex); + ret = iwl_rfi_send_config_cmd(mvm, NULL); + mutex_unlock(&mvm->mutex); + } else { + ret = -EOPNOTSUPP; /* in the future a new table will be added */ + } + + return ret ?: count; +} + +/* The size computation is as follows: + * each number needs at most 3 characters, number of rows is the size of + * the table; So, need 5 chars for the "freq: " part and each tuple afterwards + * needs 6 characters for numbers and 5 for the punctuation around. + */ +#define IWL_RFI_BUF_SIZE (IWL_RFI_LUT_INSTALLED_SIZE *\ + (5 + IWL_RFI_LUT_ENTRY_CHANNELS_NUM * (6 + 5))) + +static ssize_t iwl_dbgfs_rfi_freq_table_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + struct iwl_rfi_freq_table_resp_cmd *resp; + u32 status; + char buf[IWL_RFI_BUF_SIZE]; + int i, j, pos = 0; + + resp = iwl_rfi_get_freq_table(mvm); + if (IS_ERR(resp)) + return PTR_ERR(resp); + + status = le32_to_cpu(resp->status); + if (status != RFI_FREQ_TABLE_OK) { + scnprintf(buf, IWL_RFI_BUF_SIZE, "status = %d\n", status); + goto out; + } + + for (i = 0; i < ARRAY_SIZE(resp->table); i++) { + pos += scnprintf(buf + pos, IWL_RFI_BUF_SIZE - pos, "%d: ", + resp->table[i].freq); + + for (j = 0; j < ARRAY_SIZE(resp->table[i].channels); j++) + pos += scnprintf(buf + pos, IWL_RFI_BUF_SIZE - pos, + "(%d, %d) ", + resp->table[i].channels[j], + resp->table[i].bands[j]); + pos += scnprintf(buf + pos, IWL_RFI_BUF_SIZE - pos, "\n"); + } + +out: + kfree(resp); + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +MVM_DEBUGFS_READ_WRITE_FILE_OPS(prph_reg, 64); + +/* Device wide debugfs entries */ +MVM_DEBUGFS_READ_FILE_OPS(ctdp_budget); +MVM_DEBUGFS_WRITE_FILE_OPS(stop_ctdp, 8); +MVM_DEBUGFS_WRITE_FILE_OPS(force_ctkill, 8); +MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush, 16); +MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain, 8); +MVM_DEBUGFS_WRITE_FILE_OPS(send_echo_cmd, 8); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram, 64); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(set_nic_temperature, 64); +MVM_DEBUGFS_READ_FILE_OPS(nic_temp); +MVM_DEBUGFS_READ_FILE_OPS(stations); +MVM_DEBUGFS_READ_FILE_OPS(rs_data); +MVM_DEBUGFS_READ_FILE_OPS(bt_notif); +MVM_DEBUGFS_READ_FILE_OPS(bt_cmd); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off, 64); +MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats); +MVM_DEBUGFS_READ_FILE_OPS(drv_rx_stats); +MVM_DEBUGFS_READ_FILE_OPS(fw_ver); +MVM_DEBUGFS_READ_FILE_OPS(phy_integration_ver); +MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart, 10); +MVM_DEBUGFS_WRITE_FILE_OPS(fw_nmi, 10); +MVM_DEBUGFS_WRITE_FILE_OPS(bt_tx_prio, 10); +MVM_DEBUGFS_WRITE_FILE_OPS(bt_force_ant, 10); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8); +MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 64); +MVM_DEBUGFS_WRITE_FILE_OPS(dbg_time_point, 64); +MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl, + (IWL_RSS_INDIRECTION_TABLE_SIZE * 2)); +MVM_DEBUGFS_WRITE_FILE_OPS(inject_packet, 512); +MVM_DEBUGFS_WRITE_FILE_OPS(inject_beacon_ie, 512); +MVM_DEBUGFS_WRITE_FILE_OPS(inject_beacon_ie_restore, 512); + +MVM_DEBUGFS_READ_FILE_OPS(uapsd_noagg_bssids); + +#ifdef CONFIG_IWLWIFI_BCAST_FILTERING +MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters_macs, 256); +#endif + +#ifdef CONFIG_ACPI +MVM_DEBUGFS_READ_FILE_OPS(sar_geo_profile); +#endif + +MVM_DEBUGFS_READ_WRITE_STA_FILE_OPS(amsdu_len, 16); + +MVM_DEBUGFS_READ_WRITE_FILE_OPS(he_sniffer_params, 32); + +MVM_DEBUGFS_WRITE_FILE_OPS(ltr_config, 512); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(rfi_freq_table, 16); + +static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + struct iwl_dbg_mem_access_cmd cmd = {}; + struct iwl_dbg_mem_access_rsp *rsp; + struct iwl_host_cmd hcmd = { + .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, + .data = { &cmd, }, + .len = { sizeof(cmd) }, + }; + size_t delta; + ssize_t ret, len; + + if (!iwl_mvm_firmware_running(mvm)) + return -EIO; + + hcmd.id = WIDE_ID(DEBUG_GROUP, *ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR); + cmd.op = cpu_to_le32(DEBUG_MEM_OP_READ); + + /* Take care of alignment of both the position and the length */ + delta = *ppos & 0x3; + cmd.addr = cpu_to_le32(*ppos - delta); + cmd.len = cpu_to_le32(min(ALIGN(count + delta, 4) / 4, + (size_t)DEBUG_MEM_MAX_SIZE_DWORDS)); + + mutex_lock(&mvm->mutex); + ret = iwl_mvm_send_cmd(mvm, &hcmd); + mutex_unlock(&mvm->mutex); + + if (ret < 0) + return ret; + + rsp = (void *)hcmd.resp_pkt->data; + if (le32_to_cpu(rsp->status) != DEBUG_MEM_STATUS_SUCCESS) { + ret = -ENXIO; + goto out; + } + + len = min((size_t)le32_to_cpu(rsp->len) << 2, + iwl_rx_packet_payload_len(hcmd.resp_pkt) - sizeof(*rsp)); + len = min(len - delta, count); + if (len < 0) { + ret = -EFAULT; + goto out; + } + + ret = len - copy_to_user(user_buf, (u8 *)rsp->data + delta, len); + *ppos += ret; + +out: + iwl_free_resp(&hcmd); + return ret; +} + +static ssize_t iwl_dbgfs_mem_write(struct file *file, + const char __user *user_buf, size_t count, + loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + struct iwl_dbg_mem_access_cmd *cmd; + struct iwl_dbg_mem_access_rsp *rsp; + struct iwl_host_cmd hcmd = {}; + size_t cmd_size; + size_t data_size; + u32 op, len; + ssize_t ret; + + if (!iwl_mvm_firmware_running(mvm)) + return -EIO; + + hcmd.id = WIDE_ID(DEBUG_GROUP, *ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR); + + if (*ppos & 0x3 || count < 4) { + op = DEBUG_MEM_OP_WRITE_BYTES; + len = min(count, (size_t)(4 - (*ppos & 0x3))); + data_size = len; + } else { + op = DEBUG_MEM_OP_WRITE; + len = min(count >> 2, (size_t)DEBUG_MEM_MAX_SIZE_DWORDS); + data_size = len << 2; + } + + cmd_size = sizeof(*cmd) + ALIGN(data_size, 4); + cmd = kzalloc(cmd_size, GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->op = cpu_to_le32(op); + cmd->len = cpu_to_le32(len); + cmd->addr = cpu_to_le32(*ppos); + if (copy_from_user((void *)cmd->data, user_buf, data_size)) { + kfree(cmd); + return -EFAULT; + } + + hcmd.flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, + hcmd.data[0] = (void *)cmd; + hcmd.len[0] = cmd_size; + + mutex_lock(&mvm->mutex); + ret = iwl_mvm_send_cmd(mvm, &hcmd); + mutex_unlock(&mvm->mutex); + + kfree(cmd); + + if (ret < 0) + return ret; + + rsp = (void *)hcmd.resp_pkt->data; + if (rsp->status != DEBUG_MEM_STATUS_SUCCESS) { + ret = -ENXIO; + goto out; + } + + ret = data_size; + *ppos += ret; + +out: + iwl_free_resp(&hcmd); + return ret; +} + +static const struct file_operations iwl_dbgfs_mem_ops = { + .read = iwl_dbgfs_mem_read, + .write = iwl_dbgfs_mem_write, + .open = simple_open, + .llseek = default_llseek, +}; + +void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct dentry *dir) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + if (iwl_mvm_has_tlc_offload(mvm)) { + MVM_DEBUGFS_ADD_STA_FILE(rs_data, dir, 0400); + } + MVM_DEBUGFS_ADD_STA_FILE(amsdu_len, dir, 0600); +} + +void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm) +{ + struct dentry *bcast_dir __maybe_unused; + char buf[100]; + + spin_lock_init(&mvm->drv_stats_lock); + + MVM_DEBUGFS_ADD_FILE(tx_flush, mvm->debugfs_dir, 0200); + MVM_DEBUGFS_ADD_FILE(sta_drain, mvm->debugfs_dir, 0200); + MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, 0600); + MVM_DEBUGFS_ADD_FILE(set_nic_temperature, mvm->debugfs_dir, 0600); + MVM_DEBUGFS_ADD_FILE(nic_temp, mvm->debugfs_dir, 0400); + MVM_DEBUGFS_ADD_FILE(ctdp_budget, mvm->debugfs_dir, 0400); + MVM_DEBUGFS_ADD_FILE(stop_ctdp, mvm->debugfs_dir, 0200); + MVM_DEBUGFS_ADD_FILE(force_ctkill, mvm->debugfs_dir, 0200); + MVM_DEBUGFS_ADD_FILE(stations, mvm->debugfs_dir, 0400); + MVM_DEBUGFS_ADD_FILE(bt_notif, mvm->debugfs_dir, 0400); + MVM_DEBUGFS_ADD_FILE(bt_cmd, mvm->debugfs_dir, 0400); + MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir, 0600); + MVM_DEBUGFS_ADD_FILE(fw_ver, mvm->debugfs_dir, 0400); + MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, 0400); + MVM_DEBUGFS_ADD_FILE(drv_rx_stats, mvm->debugfs_dir, 0400); + MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, 0200); + MVM_DEBUGFS_ADD_FILE(fw_nmi, mvm->debugfs_dir, 0200); + MVM_DEBUGFS_ADD_FILE(bt_tx_prio, mvm->debugfs_dir, 0200); + MVM_DEBUGFS_ADD_FILE(bt_force_ant, mvm->debugfs_dir, 0200); + MVM_DEBUGFS_ADD_FILE(scan_ant_rxchain, mvm->debugfs_dir, 0600); + MVM_DEBUGFS_ADD_FILE(prph_reg, mvm->debugfs_dir, 0600); + MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, 0600); + MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, 0200); + MVM_DEBUGFS_ADD_FILE(dbg_time_point, mvm->debugfs_dir, 0200); + MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, 0200); + MVM_DEBUGFS_ADD_FILE(indirection_tbl, mvm->debugfs_dir, 0200); + MVM_DEBUGFS_ADD_FILE(inject_packet, mvm->debugfs_dir, 0200); + MVM_DEBUGFS_ADD_FILE(inject_beacon_ie, mvm->debugfs_dir, 0200); + MVM_DEBUGFS_ADD_FILE(inject_beacon_ie_restore, mvm->debugfs_dir, 0200); + MVM_DEBUGFS_ADD_FILE(rfi_freq_table, mvm->debugfs_dir, 0600); + + if (mvm->fw->phy_integration_ver) + MVM_DEBUGFS_ADD_FILE(phy_integration_ver, mvm->debugfs_dir, 0400); +#ifdef CONFIG_ACPI + MVM_DEBUGFS_ADD_FILE(sar_geo_profile, mvm->debugfs_dir, 0400); +#endif + MVM_DEBUGFS_ADD_FILE(he_sniffer_params, mvm->debugfs_dir, 0600); + + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_LTR_GEN2)) + MVM_DEBUGFS_ADD_FILE(ltr_config, mvm->debugfs_dir, 0200); + + debugfs_create_bool("enable_scan_iteration_notif", 0600, + mvm->debugfs_dir, &mvm->scan_iter_notif_enabled); + debugfs_create_bool("drop_bcn_ap_mode", 0600, mvm->debugfs_dir, + &mvm->drop_bcn_ap_mode); + + MVM_DEBUGFS_ADD_FILE(uapsd_noagg_bssids, mvm->debugfs_dir, S_IRUSR); + +#ifdef CONFIG_IWLWIFI_BCAST_FILTERING + if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) { + bcast_dir = debugfs_create_dir("bcast_filtering", + mvm->debugfs_dir); + + debugfs_create_bool("override", 0600, bcast_dir, + &mvm->dbgfs_bcast_filtering.override); + + MVM_DEBUGFS_ADD_FILE_ALIAS("filters", bcast_filters, + bcast_dir, 0600); + MVM_DEBUGFS_ADD_FILE_ALIAS("macs", bcast_filters_macs, + bcast_dir, 0600); + } +#endif + +#ifdef CONFIG_PM_SLEEP + MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, 0400); + debugfs_create_bool("d3_wake_sysassert", 0600, mvm->debugfs_dir, + &mvm->d3_wake_sysassert); + debugfs_create_u32("last_netdetect_scans", 0400, mvm->debugfs_dir, + &mvm->last_netdetect_scans); +#endif + + debugfs_create_u8("ps_disabled", 0400, mvm->debugfs_dir, + &mvm->ps_disabled); + debugfs_create_blob("nvm_hw", 0400, mvm->debugfs_dir, + &mvm->nvm_hw_blob); + debugfs_create_blob("nvm_sw", 0400, mvm->debugfs_dir, + &mvm->nvm_sw_blob); + debugfs_create_blob("nvm_calib", 0400, mvm->debugfs_dir, + &mvm->nvm_calib_blob); + debugfs_create_blob("nvm_prod", 0400, mvm->debugfs_dir, + &mvm->nvm_prod_blob); + debugfs_create_blob("nvm_phy_sku", 0400, mvm->debugfs_dir, + &mvm->nvm_phy_sku_blob); + debugfs_create_blob("nvm_reg", S_IRUSR, + mvm->debugfs_dir, &mvm->nvm_reg_blob); + + debugfs_create_file("mem", 0600, mvm->debugfs_dir, mvm, + &iwl_dbgfs_mem_ops); + + /* + * Create a symlink with mac80211. It will be removed when mac80211 + * exists (before the opmode exists which removes the target.) + */ + snprintf(buf, 100, "../../%pd2", mvm->debugfs_dir->d_parent); + debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir, buf); +} diff --git a/mvm/ftm-initiator.c b/mvm/ftm-initiator.c index 949fb790f8fb..430044bc4755 100644 --- a/mvm/ftm-initiator.c +++ b/mvm/ftm-initiator.c @@ -346,8 +346,8 @@ iwl_mvm_ftm_target_chandef_v2(struct iwl_mvm *mvm, *format_bw |= IWL_LOCATION_BW_80MHZ << LOCATION_BW_POS; break; case NL80211_CHAN_WIDTH_160: - cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP, - TOF_RANGE_REQ_CMD, + cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, + WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), IWL_FW_CMD_VER_UNKNOWN); if (cmd_ver >= 13) { @@ -511,7 +511,7 @@ iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif, rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id]); - if (sta->mfp) + if (sta->mfp && (peer->ftm.trigger_based || peer->ftm.non_trigger_based)) FTM_PUT_FLAG(PMF); rcu_read_unlock(); @@ -548,7 +548,7 @@ static int iwl_mvm_ftm_start_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif, { struct iwl_tof_range_req_cmd_v5 cmd_v5; struct iwl_host_cmd hcmd = { - .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0), + .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), .dataflags[0] = IWL_HCMD_DFL_DUP, .data[0] = &cmd_v5, .len[0] = sizeof(cmd_v5), @@ -574,7 +574,7 @@ static int iwl_mvm_ftm_start_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif, { struct iwl_tof_range_req_cmd_v7 cmd_v7; struct iwl_host_cmd hcmd = { - .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0), + .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), .dataflags[0] = IWL_HCMD_DFL_DUP, .data[0] = &cmd_v7, .len[0] = sizeof(cmd_v7), @@ -604,7 +604,7 @@ static int iwl_mvm_ftm_start_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif, { struct iwl_tof_range_req_cmd_v8 cmd; struct iwl_host_cmd hcmd = { - .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0), + .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), .dataflags[0] = IWL_HCMD_DFL_DUP, .data[0] = &cmd, .len[0] = sizeof(cmd), @@ -630,7 +630,7 @@ static int iwl_mvm_ftm_start_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif, { struct iwl_tof_range_req_cmd_v9 cmd; struct iwl_host_cmd hcmd = { - .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0), + .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), .dataflags[0] = IWL_HCMD_DFL_DUP, .data[0] = &cmd, .len[0] = sizeof(cmd), @@ -728,7 +728,7 @@ static int iwl_mvm_ftm_start_v11(struct iwl_mvm *mvm, { struct iwl_tof_range_req_cmd_v11 cmd; struct iwl_host_cmd hcmd = { - .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0), + .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), .dataflags[0] = IWL_HCMD_DFL_DUP, .data[0] = &cmd, .len[0] = sizeof(cmd), @@ -799,7 +799,7 @@ static int iwl_mvm_ftm_start_v12(struct iwl_mvm *mvm, { struct iwl_tof_range_req_cmd_v12 cmd; struct iwl_host_cmd hcmd = { - .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0), + .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), .dataflags[0] = IWL_HCMD_DFL_DUP, .data[0] = &cmd, .len[0] = sizeof(cmd), @@ -827,7 +827,7 @@ static int iwl_mvm_ftm_start_v13(struct iwl_mvm *mvm, { struct iwl_tof_range_req_cmd_v13 cmd; struct iwl_host_cmd hcmd = { - .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0), + .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), .dataflags[0] = IWL_HCMD_DFL_DUP, .data[0] = &cmd, .len[0] = sizeof(cmd), @@ -877,8 +877,8 @@ int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return -EBUSY; if (new_api) { - u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP, - TOF_RANGE_REQ_CMD, + u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, + WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), IWL_FW_CMD_VER_UNKNOWN); switch (cmd_ver) { @@ -927,8 +927,7 @@ void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req) iwl_mvm_ftm_reset(mvm); - if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_RANGE_ABORT_CMD, - LOCATION_GROUP, 0), + if (iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(LOCATION_GROUP, TOF_RANGE_ABORT_CMD), 0, sizeof(cmd), &cmd)) IWL_ERR(mvm, "failed to abort FTM process\n"); } @@ -1066,7 +1065,7 @@ static void iwl_mvm_ftm_rtt_smoothing(struct iwl_mvm *mvm, overshoot = IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT; alpha = IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA; - rtt_avg = (alpha * rtt + (100 - alpha) * resp->rtt_avg) / 100; + rtt_avg = div_s64(alpha * rtt + (100 - alpha) * resp->rtt_avg, 100); IWL_DEBUG_INFO(mvm, "%pM: prev rtt_avg=%lld, new rtt_avg=%lld, rtt=%lld\n", diff --git a/mvm/ftm-responder.c b/mvm/ftm-responder.c index bda6da7d988e..9729680476fd 100644 --- a/mvm/ftm-responder.c +++ b/mvm/ftm-responder.c @@ -106,6 +106,7 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_chan_def *chandef) { + u32 cmd_id = WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_CONFIG_CMD); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); /* * The command structure is the same for versions 6, 7 and 8 (only the @@ -120,8 +121,7 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm, IWL_TOF_RESPONDER_CMD_VALID_STA_ID), .sta_id = mvmvif->bcast_sta.sta_id, }; - u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP, - TOF_RESPONDER_CONFIG_CMD, 6); + u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 6); int err; int cmd_size; @@ -161,9 +161,7 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm, memcpy(cmd.bssid, vif->addr, ETH_ALEN); - return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_RESPONDER_CONFIG_CMD, - LOCATION_GROUP, 0), - 0, cmd_size, &cmd); + return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &cmd); } static int @@ -177,8 +175,7 @@ iwl_mvm_ftm_responder_dyn_cfg_v2(struct iwl_mvm *mvm, }; u8 data[IWL_LCI_CIVIC_IE_MAX_SIZE] = {0}; struct iwl_host_cmd hcmd = { - .id = iwl_cmd_id(TOF_RESPONDER_DYN_CONFIG_CMD, - LOCATION_GROUP, 0), + .id = WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_DYN_CONFIG_CMD), .data[0] = &cmd, .len[0] = sizeof(cmd), .data[1] = &data, @@ -220,8 +217,7 @@ iwl_mvm_ftm_responder_dyn_cfg_v3(struct iwl_mvm *mvm, { struct iwl_tof_responder_dyn_config_cmd cmd; struct iwl_host_cmd hcmd = { - .id = iwl_cmd_id(TOF_RESPONDER_DYN_CONFIG_CMD, - LOCATION_GROUP, 0), + .id = WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_DYN_CONFIG_CMD), .data[0] = &cmd, .len[0] = sizeof(cmd), /* may not be able to DMA from stack */ @@ -278,8 +274,9 @@ iwl_mvm_ftm_responder_dyn_cfg_cmd(struct iwl_mvm *mvm, struct ieee80211_ftm_responder_params *params) { int ret; - u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP, - TOF_RESPONDER_DYN_CONFIG_CMD, 2); + u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, + WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_DYN_CONFIG_CMD), + 2); switch (cmd_ver) { case 2: @@ -320,8 +317,9 @@ int iwl_mvm_ftm_respoder_add_pasn_sta(struct iwl_mvm *mvm, .addr = addr, .hltk = hltk, }; - u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP, - TOF_RESPONDER_DYN_CONFIG_CMD, 2); + u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, + WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_DYN_CONFIG_CMD), + 2); lockdep_assert_held(&mvm->mutex); diff --git a/mvm/fw-api.h b/mvm/fw-api.h index 73a82f07dc59..083f86fa5017 100644 --- a/mvm/fw-api.h +++ b/mvm/fw-api.h @@ -15,7 +15,7 @@ #include "fw/api/datapath.h" #include "fw/api/phy.h" #include "fw/api/config.h" -#include "fw/api/soc.h" +#include "fw/api/system.h" #include "fw/api/alive.h" #include "fw/api/binding.h" #include "fw/api/cmdhdr.h" @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -12,8 +12,6 @@ #include "iwl-op-mode.h" #include "fw/img.h" #include "iwl-debug.h" -#include "iwl-csr.h" /* for iwl_mvm_rx_card_state_notif */ -#include "iwl-io.h" /* for iwl_mvm_rx_card_state_notif */ #include "iwl-prph.h" #include "fw/acpi.h" #include "fw/pnvm.h" @@ -27,11 +25,12 @@ #define MVM_UCODE_ALIVE_TIMEOUT (HZ) #define MVM_UCODE_CALIB_TIMEOUT (2 * HZ) -#define UCODE_VALID_OK cpu_to_le32(0x1) - #define IWL_PPAG_MASK 3 #define IWL_PPAG_ETSI_MASK BIT(0) +#define IWL_TAS_US_MCC 0x5553 +#define IWL_TAS_CANADA_MCC 0x4341 + struct iwl_mvm_alive_data { bool valid; u32 scd_base_addr; @@ -78,7 +77,7 @@ static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm) struct iwl_dqa_enable_cmd dqa_cmd = { .cmd_queue = cpu_to_le32(IWL_MVM_DQA_CMD_QUEUE), }; - u32 cmd_id = iwl_cmd_id(DQA_ENABLE_CMD, DATA_PATH_GROUP, 0); + u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, DQA_ENABLE_CMD); int ret; ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd); @@ -123,13 +122,33 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, struct iwl_lmac_alive *lmac2 = NULL; u16 status; u32 lmac_error_event_table, umac_error_table; + u32 version = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, + UCODE_ALIVE_NTFY, 0); - /* - * For v5 and above, we can check the version, for older - * versions we need to check the size. - */ - if (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, - UCODE_ALIVE_NTFY, 0) == 5) { + if (version == 6) { + struct iwl_alive_ntf_v6 *palive; + + if (pkt_len < sizeof(*palive)) + return false; + + palive = (void *)pkt->data; + mvm->trans->dbg.imr_data.imr_enable = + le32_to_cpu(palive->imr.enabled); + mvm->trans->dbg.imr_data.imr_size = + le32_to_cpu(palive->imr.size); + mvm->trans->dbg.imr_data.imr2sram_remainbyte = + mvm->trans->dbg.imr_data.imr_size; + mvm->trans->dbg.imr_data.imr_base_addr = + palive->imr.base_addr; + mvm->trans->dbg.imr_data.imr_curr_addr = + le64_to_cpu(mvm->trans->dbg.imr_data.imr_base_addr); + IWL_DEBUG_FW(mvm, "IMR Enabled: 0x0%x size 0x0%x Address 0x%016llx\n", + mvm->trans->dbg.imr_data.imr_enable, + mvm->trans->dbg.imr_data.imr_size, + le64_to_cpu(mvm->trans->dbg.imr_data.imr_base_addr)); + } + + if (version >= 5) { struct iwl_alive_ntf_v5 *palive; if (pkt_len < sizeof(*palive)) @@ -246,6 +265,26 @@ static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait, return false; } +static void iwl_mvm_print_pd_notification(struct iwl_mvm *mvm) +{ + struct iwl_trans *trans = mvm->trans; + enum iwl_device_family device_family = trans->trans_cfg->device_family; + + if (device_family < IWL_DEVICE_FAMILY_8000) + return; + + if (device_family <= IWL_DEVICE_FAMILY_9000) + IWL_ERR(mvm, "WFPM_ARC1_PD_NOTIFICATION: 0x%x\n", + iwl_read_umac_prph(trans, WFPM_ARC1_PD_NOTIFICATION)); + else + IWL_ERR(mvm, "WFPM_LMAC1_PD_NOTIFICATION: 0x%x\n", + iwl_read_umac_prph(trans, WFPM_LMAC1_PD_NOTIFICATION)); + + IWL_ERR(mvm, "HPM_SECONDARY_DEVICE_STATE: 0x%x\n", + iwl_read_umac_prph(trans, HPM_SECONDARY_DEVICE_STATE)); + +} + static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, enum iwl_ucode_type ucode_type) { @@ -311,6 +350,8 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, iwl_read_prph(trans, SB_CPU_2_STATUS)); } + iwl_mvm_print_pd_notification(mvm); + /* LMAC/UMAC PC info */ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000) { @@ -516,7 +557,6 @@ static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm, cpu_to_le32(IWL_MVM_PHY_FILTER_CHAIN_D); } } - #else /* CONFIG_ACPI */ static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm, @@ -525,8 +565,51 @@ static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm, } #endif /* CONFIG_ACPI */ +#if defined(CONFIG_ACPI) && defined(CONFIG_EFI) +static int iwl_mvm_sgom_init(struct iwl_mvm *mvm) +{ + u8 cmd_ver; + int ret; + struct iwl_host_cmd cmd = { + .id = WIDE_ID(REGULATORY_AND_NVM_GROUP, + SAR_OFFSET_MAPPING_TABLE_CMD), + .flags = 0, + .data[0] = &mvm->fwrt.sgom_table, + .len[0] = sizeof(mvm->fwrt.sgom_table), + .dataflags[0] = IWL_HCMD_DFL_NOCOPY, + }; + + if (!mvm->fwrt.sgom_enabled) { + IWL_DEBUG_RADIO(mvm, "SGOM table is disabled\n"); + return 0; + } + + cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id, + IWL_FW_CMD_VER_UNKNOWN); + + if (cmd_ver != 2) { + IWL_DEBUG_RADIO(mvm, "command version is unsupported. version = %d\n", + cmd_ver); + return 0; + } + + ret = iwl_mvm_send_cmd(mvm, &cmd); + if (ret < 0) + IWL_ERR(mvm, "failed to send SAR_OFFSET_MAPPING_CMD (%d)\n", ret); + + return ret; +} +#else + +static int iwl_mvm_sgom_init(struct iwl_mvm *mvm) +{ + return 0; +} +#endif + static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) { + u32 cmd_id = PHY_CONFIGURATION_CMD; struct iwl_phy_cfg_cmd_v3 phy_cfg_cmd; enum iwl_ucode_type ucode_type = mvm->fwrt.cur_fw_img; struct iwl_phy_specific_cfg phy_filters = {}; @@ -558,8 +641,7 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) phy_cfg_cmd.calib_control.flow_trigger = mvm->fw->default_calib[ucode_type].flow_trigger; - cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP, - PHY_CONFIGURATION_CMD, + cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, IWL_FW_CMD_VER_UNKNOWN); if (cmd_ver == 3) { iwl_mvm_phy_filter_init(mvm, &phy_filters); @@ -571,8 +653,7 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) phy_cfg_cmd.phy_cfg); cmd_size = (cmd_ver == 3) ? sizeof(struct iwl_phy_cfg_cmd_v3) : sizeof(struct iwl_phy_cfg_cmd_v1); - return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, 0, - cmd_size, &phy_cfg_cmd); + return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &phy_cfg_cmd); } int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm) @@ -692,7 +773,7 @@ out: mvm->nvm_data->bands[0].n_channels = 1; mvm->nvm_data->bands[0].n_bitrates = 1; mvm->nvm_data->bands[0].bitrates = - (void *)mvm->nvm_data->channels + 1; + (void *)((u8 *)mvm->nvm_data->channels + 1); mvm->nvm_data->bands[0].bitrates->hw_value = 10; } @@ -715,6 +796,7 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm) #ifdef CONFIG_ACPI int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b) { + u32 cmd_id = REDUCE_TX_POWER_CMD; struct iwl_dev_tx_power_cmd cmd = { .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS), }; @@ -722,8 +804,7 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b) int ret; u16 len = 0; u32 n_subbands; - u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, - REDUCE_TX_POWER_CMD, + u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, IWL_FW_CMD_VER_UNKNOWN); if (cmd_ver == 6) { @@ -757,8 +838,10 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b) if (ret) return ret; + iwl_mei_set_power_limit(per_chain); + IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n"); - return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd); + return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd); } int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm) @@ -767,9 +850,12 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm) struct iwl_geo_tx_power_profiles_resp *resp; u16 len; int ret; - struct iwl_host_cmd cmd; - u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP, - PER_CHAIN_LIMIT_OFFSET_CMD, + struct iwl_host_cmd cmd = { + .id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD), + .flags = CMD_WANT_SKB, + .data = { &geo_tx_cmd }, + }; + u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id, IWL_FW_CMD_VER_UNKNOWN); /* the ops field is at the same spot for all versions, so set in v1 */ @@ -791,12 +877,7 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm) if (!iwl_sar_geo_support(&mvm->fwrt)) return -EOPNOTSUPP; - cmd = (struct iwl_host_cmd){ - .id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD), - .len = { len, }, - .flags = CMD_WANT_SKB, - .data = { &geo_tx_cmd }, - }; + cmd.len[0] = len; ret = iwl_mvm_send_cmd(mvm, &cmd); if (ret) { @@ -816,13 +897,14 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm) static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) { + u32 cmd_id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD); union iwl_geo_tx_power_profiles_cmd cmd; u16 len; u32 n_bands; u32 n_profiles; + u32 sk = 0; int ret; - u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP, - PER_CHAIN_LIMIT_OFFSET_CMD, + u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, IWL_FW_CMD_VER_UNKNOWN); BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v1, ops) != @@ -879,24 +961,28 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) if (ret) return 0; + /* Only set to South Korea if the table revision is 1 */ + if (mvm->fwrt.geo_rev == 1) + sk = 1; + /* - * Set the revision on versions that contain it. + * Set the table_revision to South Korea (1) or not (0). The + * element name is misleading, as it doesn't contain the table + * revision number, but whether the South Korea variation + * should be used. * This must be done after calling iwl_sar_geo_init(). */ if (cmd_ver == 5) - cmd.v5.table_revision = cpu_to_le32(mvm->fwrt.geo_rev); + cmd.v5.table_revision = cpu_to_le32(sk); else if (cmd_ver == 4) - cmd.v4.table_revision = cpu_to_le32(mvm->fwrt.geo_rev); + cmd.v4.table_revision = cpu_to_le32(sk); else if (cmd_ver == 3) - cmd.v3.table_revision = cpu_to_le32(mvm->fwrt.geo_rev); + cmd.v3.table_revision = cpu_to_le32(sk); else if (fw_has_api(&mvm->fwrt.fw->ucode_capa, IWL_UCODE_TLV_API_SAR_TABLE_VER)) - cmd.v2.table_revision = cpu_to_le32(mvm->fwrt.geo_rev); + cmd.v2.table_revision = cpu_to_le32(sk); - return iwl_mvm_send_cmd_pdu(mvm, - WIDE_ID(PHY_OPS_GROUP, - PER_CHAIN_LIMIT_OFFSET_CMD), - 0, len, &cmd); + return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd); } static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm) @@ -904,13 +990,8 @@ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm) union acpi_object *wifi_pkg, *data, *flags; int i, j, ret, tbl_rev, num_sub_bands; int idx = 2; - s8 *gain; - /* - * The 'flags' field is the same in v1 and in v2 so we can just - * use v1 to access it. - */ - mvm->fwrt.ppag_table.v1.flags = cpu_to_le32(0); + mvm->fwrt.ppag_flags = 0; data = iwl_acpi_get_object(mvm->dev, ACPI_PPAG_METHOD); if (IS_ERR(data)) @@ -922,8 +1003,6 @@ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm) if (!IS_ERR(wifi_pkg)) { if (tbl_rev == 1 || tbl_rev == 2) { num_sub_bands = IWL_NUM_SUB_BANDS_V2; - gain = mvm->fwrt.ppag_table.v2.gain[0]; - mvm->fwrt.ppag_ver = tbl_rev; IWL_DEBUG_RADIO(mvm, "Reading PPAG table v2 (tbl_rev=%d)\n", tbl_rev); @@ -943,8 +1022,6 @@ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm) goto out_free; } num_sub_bands = IWL_NUM_SUB_BANDS_V1; - gain = mvm->fwrt.ppag_table.v1.gain[0]; - mvm->fwrt.ppag_ver = 0; IWL_DEBUG_RADIO(mvm, "Reading PPAG table v1 (tbl_rev=0)\n"); goto read_table; } @@ -952,6 +1029,7 @@ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm) goto out_free; read_table: + mvm->fwrt.ppag_ver = tbl_rev; flags = &wifi_pkg->package.elements[1]; if (flags->type != ACPI_TYPE_INTEGER) { @@ -959,10 +1037,9 @@ read_table: goto out_free; } - mvm->fwrt.ppag_table.v1.flags = cpu_to_le32(flags->integer.value & - IWL_PPAG_MASK); + mvm->fwrt.ppag_flags = flags->integer.value & IWL_PPAG_MASK; - if (!mvm->fwrt.ppag_table.v1.flags) { + if (!mvm->fwrt.ppag_flags) { ret = 0; goto out_free; } @@ -982,15 +1059,15 @@ read_table: goto out_free; } - gain[i * num_sub_bands + j] = ent->integer.value; + mvm->fwrt.ppag_chains[i].subbands[j] = ent->integer.value; if ((j == 0 && - (gain[i * num_sub_bands + j] > ACPI_PPAG_MAX_LB || - gain[i * num_sub_bands + j] < ACPI_PPAG_MIN_LB)) || + (mvm->fwrt.ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_LB || + mvm->fwrt.ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_LB)) || (j != 0 && - (gain[i * num_sub_bands + j] > ACPI_PPAG_MAX_HB || - gain[i * num_sub_bands + j] < ACPI_PPAG_MIN_HB))) { - mvm->fwrt.ppag_table.v1.flags = cpu_to_le32(0); + (mvm->fwrt.ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_HB || + mvm->fwrt.ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_HB))) { + mvm->fwrt.ppag_flags = 0; ret = -EINVAL; goto out_free; } @@ -1005,6 +1082,7 @@ out_free: int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm) { + union iwl_ppag_table_cmd cmd; u8 cmd_ver; int i, j, ret, num_sub_bands, cmd_size; s8 *gain; @@ -1014,37 +1092,39 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm) "PPAG capability not supported by FW, command not sent.\n"); return 0; } - if (!mvm->fwrt.ppag_table.v1.flags) { + if (!mvm->fwrt.ppag_flags) { IWL_DEBUG_RADIO(mvm, "PPAG not enabled, command not sent.\n"); return 0; } - cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP, - PER_PLATFORM_ANT_GAIN_CMD, + /* The 'flags' field is the same in v1 and in v2 so we can just + * use v1 to access it. + */ + cmd.v1.flags = cpu_to_le32(mvm->fwrt.ppag_flags); + cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, + WIDE_ID(PHY_OPS_GROUP, PER_PLATFORM_ANT_GAIN_CMD), IWL_FW_CMD_VER_UNKNOWN); if (cmd_ver == 1) { num_sub_bands = IWL_NUM_SUB_BANDS_V1; - gain = mvm->fwrt.ppag_table.v1.gain[0]; - cmd_size = sizeof(mvm->fwrt.ppag_table.v1); + gain = cmd.v1.gain[0]; + cmd_size = sizeof(cmd.v1); if (mvm->fwrt.ppag_ver == 1 || mvm->fwrt.ppag_ver == 2) { IWL_DEBUG_RADIO(mvm, "PPAG table rev is %d but FW supports v1, sending truncated table\n", mvm->fwrt.ppag_ver); - mvm->fwrt.ppag_table.v1.flags &= - cpu_to_le32(IWL_PPAG_ETSI_MASK); + cmd.v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK); } } else if (cmd_ver == 2 || cmd_ver == 3) { num_sub_bands = IWL_NUM_SUB_BANDS_V2; - gain = mvm->fwrt.ppag_table.v2.gain[0]; - cmd_size = sizeof(mvm->fwrt.ppag_table.v2); + gain = cmd.v2.gain[0]; + cmd_size = sizeof(cmd.v2); if (mvm->fwrt.ppag_ver == 0) { IWL_DEBUG_RADIO(mvm, "PPAG table is v1 but FW supports v2, sending padded table\n"); } else if (cmd_ver == 2 && mvm->fwrt.ppag_ver == 2) { IWL_DEBUG_RADIO(mvm, "PPAG table is v3 but FW supports v2, sending partial bitmap.\n"); - mvm->fwrt.ppag_table.v1.flags &= - cpu_to_le32(IWL_PPAG_ETSI_MASK); + cmd.v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK); } } else { IWL_DEBUG_RADIO(mvm, "Unsupported PPAG command version\n"); @@ -1053,6 +1133,8 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm) for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) { for (j = 0; j < num_sub_bands; j++) { + gain[i * num_sub_bands + j] = + mvm->fwrt.ppag_chains[i].subbands[j]; IWL_DEBUG_RADIO(mvm, "PPAG table: chain[%d] band[%d]: gain = %d\n", i, j, gain[i * num_sub_bands + j]); @@ -1061,7 +1143,7 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm) IWL_DEBUG_RADIO(mvm, "Sending PER_PLATFORM_ANT_GAIN_CMD\n"); ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(PHY_OPS_GROUP, PER_PLATFORM_ANT_GAIN_CMD), - 0, cmd_size, &mvm->fwrt.ppag_table); + 0, cmd_size, &cmd); if (ret < 0) IWL_ERR(mvm, "failed to send PER_PLATFORM_ANT_GAIN_CMD (%d)\n", ret); @@ -1100,20 +1182,66 @@ static int iwl_mvm_ppag_init(struct iwl_mvm *mvm) IWL_DEBUG_RADIO(mvm, "System vendor '%s' is not in the approved list, disabling PPAG.\n", dmi_get_system_info(DMI_SYS_VENDOR)); - mvm->fwrt.ppag_table.v1.flags = cpu_to_le32(0); + mvm->fwrt.ppag_flags = 0; return 0; } return iwl_mvm_ppag_send_cmd(mvm); } +static const struct dmi_system_id dmi_tas_approved_list[] = { + { .ident = "HP", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + }, + }, + { .ident = "SAMSUNG", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"), + }, + }, + { .ident = "LENOVO", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Lenovo"), + }, + }, + { .ident = "DELL", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + }, + }, + + /* keep last */ + {} +}; + +static bool iwl_mvm_add_to_tas_block_list(__le32 *list, __le32 *le_size, unsigned int mcc) +{ + int i; + u32 size = le32_to_cpu(*le_size); + + /* Verify that there is room for another country */ + if (size >= IWL_TAS_BLOCK_LIST_MAX) + return false; + + for (i = 0; i < size; i++) { + if (list[i] == cpu_to_le32(mcc)) + return true; + } + + list[size++] = cpu_to_le32(mcc); + *le_size = cpu_to_le32(size); + return true; +} + static void iwl_mvm_tas_init(struct iwl_mvm *mvm) { + u32 cmd_id = WIDE_ID(REGULATORY_AND_NVM_GROUP, TAS_CONFIG); int ret; - struct iwl_tas_config_cmd cmd = {}; - int list_size; + union iwl_tas_config_cmd cmd = {}; + int cmd_size, fw_ver; - BUILD_BUG_ON(ARRAY_SIZE(cmd.block_list_array) < + BUILD_BUG_ON(ARRAY_SIZE(cmd.v3.block_list_array) < APCI_WTAS_BLACK_LIST_MAX); if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TAS_CFG)) { @@ -1121,7 +1249,10 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm) return; } - ret = iwl_acpi_get_tas(&mvm->fwrt, cmd.block_list_array, &list_size); + fw_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, + IWL_FW_CMD_VER_UNKNOWN); + + ret = iwl_acpi_get_tas(&mvm->fwrt, &cmd, fw_ver); if (ret < 0) { IWL_DEBUG_RADIO(mvm, "TAS table invalid or unavailable. (%d)\n", @@ -1129,15 +1260,31 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm) return; } - if (list_size < 0) + if (ret == 0) return; - /* list size if TAS enabled can only be non-negative */ - cmd.block_list_size = cpu_to_le32((u32)list_size); + if (!dmi_check_system(dmi_tas_approved_list)) { + IWL_DEBUG_RADIO(mvm, + "System vendor '%s' is not in the approved list, disabling TAS in US and Canada.\n", + dmi_get_system_info(DMI_SYS_VENDOR)); + if ((!iwl_mvm_add_to_tas_block_list(cmd.v4.block_list_array, + &cmd.v4.block_list_size, + IWL_TAS_US_MCC)) || + (!iwl_mvm_add_to_tas_block_list(cmd.v4.block_list_array, + &cmd.v4.block_list_size, + IWL_TAS_CANADA_MCC))) { + IWL_DEBUG_RADIO(mvm, + "Unable to add US/Canada to TAS block list, disabling TAS\n"); + return; + } + } + + /* v4 is the same size as v3, so no need to differentiate here */ + cmd_size = fw_ver < 3 ? + sizeof(struct iwl_tas_config_cmd_v2) : + sizeof(struct iwl_tas_config_cmd_v3); - ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP, - TAS_CONFIG), - 0, sizeof(cmd), &cmd); + ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &cmd); if (ret < 0) IWL_DEBUG_RADIO(mvm, "failed to send TAS_CONFIG (%d)\n", ret); } @@ -1170,7 +1317,7 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm) { int ret; u32 value; - struct iwl_lari_config_change_cmd_v5 cmd = {}; + struct iwl_lari_config_change_cmd_v6 cmd = {}; cmd.config_bitmap = iwl_acpi_get_lari_config_bitmap(&mvm->fwrt); @@ -1197,25 +1344,43 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm) if (!ret) cmd.oem_uhb_allow_bitmap = cpu_to_le32(value); + ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0, + DSM_FUNC_FORCE_DISABLE_CHANNELS, + &iwl_guid, &value); + if (!ret) + cmd.force_disable_channels_bitmap = cpu_to_le32(value); + if (cmd.config_bitmap || cmd.oem_uhb_allow_bitmap || cmd.oem_11ax_allow_bitmap || cmd.oem_unii4_allow_bitmap || - cmd.chan_state_active_bitmap) { + cmd.chan_state_active_bitmap || + cmd.force_disable_channels_bitmap) { size_t cmd_size; u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, - REGULATORY_AND_NVM_GROUP, - LARI_CONFIG_CHANGE, 1); - if (cmd_ver == 5) + WIDE_ID(REGULATORY_AND_NVM_GROUP, + LARI_CONFIG_CHANGE), + 1); + switch (cmd_ver) { + case 6: + cmd_size = sizeof(struct iwl_lari_config_change_cmd_v6); + break; + case 5: cmd_size = sizeof(struct iwl_lari_config_change_cmd_v5); - else if (cmd_ver == 4) + break; + case 4: cmd_size = sizeof(struct iwl_lari_config_change_cmd_v4); - else if (cmd_ver == 3) + break; + case 3: cmd_size = sizeof(struct iwl_lari_config_change_cmd_v3); - else if (cmd_ver == 2) + break; + case 2: cmd_size = sizeof(struct iwl_lari_config_change_cmd_v2); - else + break; + default: cmd_size = sizeof(struct iwl_lari_config_change_cmd_v1); + break; + } IWL_DEBUG_RADIO(mvm, "sending LARI_CONFIG_CHANGE, config_bitmap=0x%x, oem_11ax_allow_bitmap=0x%x\n", @@ -1227,8 +1392,9 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm) le32_to_cpu(cmd.chan_state_active_bitmap), cmd_ver); IWL_DEBUG_RADIO(mvm, - "sending LARI_CONFIG_CHANGE, oem_uhb_allow_bitmap=0x%x\n", - le32_to_cpu(cmd.oem_uhb_allow_bitmap)); + "sending LARI_CONFIG_CHANGE, oem_uhb_allow_bitmap=0x%x, force_disable_channels_bitmap=0x%x\n", + le32_to_cpu(cmd.oem_uhb_allow_bitmap), + le32_to_cpu(cmd.force_disable_channels_bitmap)); ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP, LARI_CONFIG_CHANGE), @@ -1336,6 +1502,7 @@ static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm) void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm) { } + #endif /* CONFIG_ACPI */ void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags) @@ -1401,7 +1568,6 @@ static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm) if (iwl_mvm_has_unified_ucode(mvm)) return iwl_run_unified_mvm_ucode(mvm); - WARN_ON(!mvm->nvm_data); ret = iwl_run_init_mvm_ucode(mvm); if (ret) { @@ -1528,9 +1694,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) * internal aux station for all aux activities that don't * requires a dedicated data queue. */ - if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, - ADD_STA, - 0) < 12) { + if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) { /* * In old version the aux station uses mac id like other * station and not lmac id @@ -1545,8 +1709,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm) while (!sband && i < NUM_NL80211_BANDS) sband = mvm->hw->wiphy->bands[i++]; - if (WARN_ON_ONCE(!sband)) + if (WARN_ON_ONCE(!sband)) { + ret = -ENODEV; goto error; + } chan = &sband->channels[0]; @@ -1631,6 +1797,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm) else if (ret < 0) goto error; + ret = iwl_mvm_sgom_init(mvm); + if (ret) + goto error; + iwl_mvm_tas_init(mvm); iwl_mvm_leds_sync(mvm); @@ -1683,9 +1853,7 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm) for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL); - if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, - ADD_STA, - 0) < 12) { + if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) { /* * Add auxiliary station for scanning. * Newer versions of this command implies that the fw uses @@ -1705,20 +1873,6 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm) return ret; } -void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_card_state_notif *card_state_notif = (void *)pkt->data; - u32 flags = le32_to_cpu(card_state_notif->flags); - - IWL_DEBUG_RF_KILL(mvm, "Card state received: HW:%s SW:%s CT:%s\n", - (flags & HW_CARD_DISABLED) ? "Kill" : "On", - (flags & SW_CARD_DISABLED) ? "Kill" : "On", - (flags & CT_KILL_CARD_DISABLED) ? - "Reached" : "Not reached"); -} - void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { diff --git a/mvm/mac-ctxt.c b/mvm/mac-ctxt.c index fd7d4abfb454..5aa4520b70ac 100644 --- a/mvm/mac-ctxt.c +++ b/mvm/mac-ctxt.c @@ -821,10 +821,7 @@ u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct ieee80211_tx_info *info, u16 iwl_mvm_mac_ctxt_get_beacon_flags(const struct iwl_fw *fw, u8 rate_idx) { u16 flags = iwl_mvm_mac80211_idx_to_hwrate(fw, rate_idx); - bool is_new_rate = iwl_fw_lookup_cmd_ver(fw, - LONG_GROUP, - BEACON_TEMPLATE_CMD, - 0) > 10; + bool is_new_rate = iwl_fw_lookup_cmd_ver(fw, BEACON_TEMPLATE_CMD, 0) > 10; if (rate_idx <= IWL_FIRST_CCK_RATE) flags |= is_new_rate ? IWL_MAC_BEACON_CCK @@ -960,8 +957,7 @@ static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm *mvm, WARN_ON(channel == 0); if (cfg80211_channel_is_psc(ctx->def.chan) && !IWL_MVM_DISABLE_AP_FILS) { - flags |= iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, - BEACON_TEMPLATE_CMD, + flags |= iwl_fw_lookup_cmd_ver(mvm->fw, BEACON_TEMPLATE_CMD, 0) > 10 ? IWL_MAC_BEACON_FILS : IWL_MAC_BEACON_FILS_V1; @@ -1458,8 +1454,9 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, struct sk_buff *skb; u8 *data; u32 size = le32_to_cpu(sb->byte_count); - int ver = iwl_fw_lookup_cmd_ver(mvm->fw, PROT_OFFLOAD_GROUP, - STORED_BEACON_NTF, 0); + int ver = iwl_fw_lookup_cmd_ver(mvm->fw, + WIDE_ID(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF), + 0); if (size == 0) return; @@ -1602,6 +1599,18 @@ void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm, RCU_INIT_POINTER(mvm->csa_vif, NULL); return; case NL80211_IFTYPE_STATION: + /* + * if we don't know about an ongoing channel switch, + * make sure FW cancels it + */ + if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP, + CHANNEL_SWITCH_ERROR_NOTIF, + 0) && !vif->csa_active) { + IWL_DEBUG_INFO(mvm, "Channel Switch was canceled\n"); + iwl_mvm_cancel_channel_switch(mvm, vif, mac_id); + break; + } + iwl_mvm_csa_client_absent(mvm, vif); cancel_delayed_work(&mvmvif->csa_work); ieee80211_chswitch_done(vif, true); @@ -1615,6 +1624,31 @@ out_unlock: rcu_read_unlock(); } +void iwl_mvm_channel_switch_error_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_channel_switch_error_notif *notif = (void *)pkt->data; + struct ieee80211_vif *vif; + u32 id = le32_to_cpu(notif->mac_id); + u32 csa_err_mask = le32_to_cpu(notif->csa_err_mask); + + rcu_read_lock(); + vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true); + if (!vif) { + rcu_read_unlock(); + return; + } + + IWL_DEBUG_INFO(mvm, "FW reports CSA error: mac_id=%u, csa_err_mask=%u\n", + id, csa_err_mask); + if (csa_err_mask & (CS_ERR_COUNT_ERROR | + CS_ERR_LONG_DELAY_AFTER_CS | + CS_ERR_TX_BLOCK_TIMER_EXPIRED)) + ieee80211_channel_switch_disconnect(vif, true); + rcu_read_unlock(); +} + void iwl_mvm_rx_missed_vap_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { diff --git a/mvm/mac80211.c b/mvm/mac80211.c index 9fb9c7dad314..6a9d9ce0007a 100644 --- a/mvm/mac80211.c +++ b/mvm/mac80211.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2012-2014, 2018-2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -16,6 +16,7 @@ #include <net/ieee80211_radiotap.h> #include <net/tcp.h> +#include "iwl-drv.h" #include "iwl-op-mode.h" #include "iwl-io.h" #include "mvm.h" @@ -190,6 +191,7 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, if (IS_ERR_OR_NULL(resp)) { IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n", PTR_ERR_OR_ZERO(resp)); + resp = NULL; goto out; } @@ -211,7 +213,6 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, __le16_to_cpu(resp->cap), resp_ver); /* Store the return source id */ src_id = resp->source_id; - kfree(resp); if (IS_ERR_OR_NULL(regd)) { IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n", PTR_ERR_OR_ZERO(regd)); @@ -223,7 +224,10 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, mvm->lar_regdom_set = true; mvm->mcc_src = src_id; + iwl_mei_set_country_code(__le16_to_cpu(resp->mcc)); + out: + kfree(resp); return regd; } @@ -623,8 +627,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES; - if (iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP, - WOWLAN_KEK_KCK_MATERIAL, + if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_KEK_KCK_MATERIAL, IWL_FW_CMD_VER_UNKNOWN) == 3) hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK; @@ -637,14 +640,19 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) } if (iwl_mvm_is_oce_supported(mvm)) { + u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_REQ_UMAC, 0); + wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP); wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME); wiphy_ext_feature_set(hw->wiphy, - NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION); - wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE); + + /* Old firmware also supports probe deferral and suppression */ + if (scan_ver < 15) + wiphy_ext_feature_set(hw->wiphy, + NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION); } if (mvm->nvm_data->sku_cap_11ax_enable && @@ -706,8 +714,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->netdev_features |= mvm->cfg->features; if (!iwl_mvm_is_csum_supported(mvm)) - hw->netdev_features &= ~(IWL_TX_CSUM_NETIF_FLAGS | - NETIF_F_RXCSUM); + hw->netdev_features &= ~IWL_CSUM_NETIF_FLAGS_MASK; if (mvm->cfg->vht_mu_mimo_supported) wiphy_ext_feature_set(hw->wiphy, @@ -717,6 +724,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_PROTECTED_TWT); + iwl_mvm_vendor_cmds_register(mvm); + hw->wiphy->available_antennas_tx = iwl_mvm_get_valid_tx_ant(mvm); hw->wiphy->available_antennas_rx = iwl_mvm_get_valid_rx_ant(mvm); @@ -1083,6 +1092,27 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm) lockdep_assert_held(&mvm->mutex); + ret = iwl_mvm_mei_get_ownership(mvm); + if (ret) + return ret; + + if (mvm->mei_nvm_data) { + /* We got the NIC, we can now free the MEI NVM data */ + kfree(mvm->mei_nvm_data); + mvm->mei_nvm_data = NULL; + + /* + * We can't free the nvm_data we allocated based on the SAP + * data because we registered to cfg80211 with the channels + * allocated on mvm->nvm_data. Keep a pointer in temp_nvm_data + * just in order to be able free it later. + * NULLify nvm_data so that we will read the NVM from the + * firmware this time. + */ + mvm->temp_nvm_data = mvm->nvm_data; + mvm->nvm_data = NULL; + } + if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) { /* * Now convert the HW_RESTART_REQUESTED flag to IN_HW_RESTART @@ -1117,11 +1147,34 @@ static int iwl_mvm_mac_start(struct ieee80211_hw *hw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; + int retry, max_retry = 0; mutex_lock(&mvm->mutex); - ret = __iwl_mvm_mac_start(mvm); + + /* we are starting the mac not in error flow, and restart is enabled */ + if (!test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) && + iwlwifi_mod_params.fw_restart) { + max_retry = IWL_MAX_INIT_RETRY; + /* + * This will prevent mac80211 recovery flows to trigger during + * init failures + */ + set_bit(IWL_MVM_STATUS_STARTING, &mvm->status); + } + + for (retry = 0; retry <= max_retry; retry++) { + ret = __iwl_mvm_mac_start(mvm); + if (!ret) + break; + + IWL_ERR(mvm, "mac start retry %d\n", retry); + } + clear_bit(IWL_MVM_STATUS_STARTING, &mvm->status); + mutex_unlock(&mvm->mutex); + iwl_mvm_mei_set_sw_rfkill_state(mvm); + return ret; } @@ -1177,7 +1230,7 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) /* async_handlers_wk is now blocked */ - if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, ADD_STA, 0) < 12) + if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) iwl_mvm_rm_aux_sta(mvm); iwl_mvm_stop_device(mvm); @@ -1239,6 +1292,8 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw) */ flush_work(&mvm->roc_done_wk); + iwl_mvm_mei_set_sw_rfkill_state(mvm); + mutex_lock(&mvm->mutex); __iwl_mvm_mac_stop(mvm); mutex_unlock(&mvm->mutex); @@ -1267,6 +1322,7 @@ static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm) static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, s16 tx_power) { + u32 cmd_id = REDUCE_TX_POWER_CMD; int len; struct iwl_dev_tx_power_cmd cmd = { .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC), @@ -1274,8 +1330,7 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id), .common.pwr_restriction = cpu_to_le16(8 * tx_power), }; - u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, - REDUCE_TX_POWER_CMD, + u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, IWL_FW_CMD_VER_UNKNOWN); if (tx_power == IWL_DEFAULT_MAX_TX_POWER) @@ -1295,7 +1350,7 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, /* all structs have the same common part, add it */ len += sizeof(cmd.common); - return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd); + return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd); } static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, @@ -1356,6 +1411,15 @@ static void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw, .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE), }; + /* + * In the new flow since FW is in charge of the timing, + * if driver has canceled the channel switch he will receive the + * CHANNEL_SWITCH_START_NOTIF notification from FW and then cancel it + */ + if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP, + CHANNEL_SWITCH_ERROR_NOTIF, 0)) + return; + IWL_DEBUG_MAC80211(mvm, "Abort CSA on mac %d\n", mvmvif->id); mutex_lock(&mvm->mutex); @@ -1509,6 +1573,15 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, mvm->monitor_on = true; iwl_mvm_vif_dbgfs_register(mvm, vif); + + if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && + vif->type == NL80211_IFTYPE_STATION && !vif->p2p && + !mvm->csme_vif && mvm->mei_registered) { + iwl_mei_set_nic_info(vif->addr, mvm->nvm_data->hw_addr); + iwl_mei_set_netdev(ieee80211_vif_to_wdev(vif)->netdev); + mvm->csme_vif = vif; + } + goto out_unlock; out_unbind: @@ -1561,6 +1634,11 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, mutex_lock(&mvm->mutex); + if (vif == mvm->csme_vif) { + iwl_mei_set_netdev(NULL); + mvm->csme_vif = NULL; + } + probe_data = rcu_dereference_protected(mvmvif->probe_resp_data, lockdep_is_held(&mvm->mutex)); RCU_INIT_POINTER(mvmvif->probe_resp_data, NULL); @@ -1666,6 +1744,7 @@ static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm) struct iwl_mvm_mc_iter_data iter_data = { .mvm = mvm, }; + int ret; lockdep_assert_held(&mvm->mutex); @@ -1675,6 +1754,22 @@ static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm) ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_mc_iface_iterator, &iter_data); + + /* + * Send a (synchronous) ech command so that we wait for the + * multiple asynchronous MCAST_FILTER_CMD commands sent by + * the interface iterator. Otherwise, we might get here over + * and over again (by userspace just sending a lot of these) + * and the CPU can send them faster than the firmware can + * process them. + * Note that the CPU is still faster - but with this we'll + * actually send fewer commands overall because the CPU will + * not schedule the work in mac80211 as frequently if it's + * still running when rescheduled (possibly multiple times). + */ + ret = iwl_mvm_send_cmd_pdu(mvm, ECHO_CMD, 0, 0, NULL); + if (ret) + IWL_ERR(mvm, "Failed to synchronize multicast groups update\n"); } static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw, @@ -1991,11 +2086,108 @@ static u8 iwl_mvm_he_get_ppe_val(u8 *ppe, u8 ppe_pos_bit) return res; } +static void iwl_mvm_parse_ppe(struct iwl_mvm *mvm, + struct iwl_he_pkt_ext_v2 *pkt_ext, u8 nss, + u8 ru_index_bitmap, u8 *ppe, u8 ppe_pos_bit) +{ + int i; + + /* + * FW currently supports only nss == MAX_HE_SUPP_NSS + * + * If nss > MAX: we can ignore values we don't support + * If nss < MAX: we can set zeros in other streams + */ + if (nss > MAX_HE_SUPP_NSS) { + IWL_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss, + MAX_HE_SUPP_NSS); + nss = MAX_HE_SUPP_NSS; + } + + for (i = 0; i < nss; i++) { + u8 ru_index_tmp = ru_index_bitmap << 1; + u8 low_th = IWL_HE_PKT_EXT_NONE, high_th = IWL_HE_PKT_EXT_NONE; + u8 bw; + + for (bw = 0; + bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]); + bw++) { + ru_index_tmp >>= 1; + + if (!(ru_index_tmp & 1)) + continue; + + high_th = iwl_mvm_he_get_ppe_val(ppe, ppe_pos_bit); + ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE; + low_th = iwl_mvm_he_get_ppe_val(ppe, ppe_pos_bit); + ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE; + + pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th; + pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th; + } + } +} + +static void iwl_mvm_set_pkt_ext_from_he_ppe(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + struct iwl_he_pkt_ext_v2 *pkt_ext) +{ + u8 nss = (sta->he_cap.ppe_thres[0] & IEEE80211_PPE_THRES_NSS_MASK) + 1; + u8 *ppe = &sta->he_cap.ppe_thres[0]; + u8 ru_index_bitmap = + u8_get_bits(*ppe, + IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK); + /* Starting after PPE header */ + u8 ppe_pos_bit = IEEE80211_HE_PPE_THRES_INFO_HEADER_SIZE; + + iwl_mvm_parse_ppe(mvm, pkt_ext, nss, ru_index_bitmap, ppe, ppe_pos_bit); +} + +static void iwl_mvm_set_pkt_ext_from_nominal_padding(struct iwl_he_pkt_ext_v2 *pkt_ext, + u8 nominal_padding, + u32 *flags) +{ + int low_th = -1; + int high_th = -1; + int i; + + switch (nominal_padding) { + case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_0US: + low_th = IWL_HE_PKT_EXT_NONE; + high_th = IWL_HE_PKT_EXT_NONE; + break; + case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_8US: + low_th = IWL_HE_PKT_EXT_BPSK; + high_th = IWL_HE_PKT_EXT_NONE; + break; + case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US: + low_th = IWL_HE_PKT_EXT_NONE; + high_th = IWL_HE_PKT_EXT_BPSK; + break; + } + + /* Set the PPE thresholds accordingly */ + if (low_th >= 0 && high_th >= 0) { + for (i = 0; i < MAX_HE_SUPP_NSS; i++) { + u8 bw; + + for (bw = 0; + bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]); + bw++) { + pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th; + pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th; + } + } + + *flags |= STA_CTXT_HE_PACKET_EXT; + } +} + static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u8 sta_id) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_he_sta_context_cmd sta_ctxt_cmd = { + struct iwl_he_sta_context_cmd_v3 sta_ctxt_cmd = { .sta_id = sta_id, .tid_limit = IWL_MAX_TID_COUNT, .bss_color = vif->bss_conf.he_bss_color.color, @@ -2003,16 +2195,39 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, .frame_time_rts_th = cpu_to_le16(vif->bss_conf.frame_time_rts_th), }; - int size = fw_has_api(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_API_MBSSID_HE) ? - sizeof(sta_ctxt_cmd) : - sizeof(struct iwl_he_sta_context_cmd_v1); + struct iwl_he_sta_context_cmd_v2 sta_ctxt_cmd_v2 = {}; + u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, STA_HE_CTXT_CMD); + u8 ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 2); + int size; struct ieee80211_sta *sta; u32 flags; int i; const struct ieee80211_sta_he_cap *own_he_cap = NULL; struct ieee80211_chanctx_conf *chanctx_conf; const struct ieee80211_supported_band *sband; + void *cmd; + + if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_MBSSID_HE)) + ver = 1; + + switch (ver) { + case 1: + /* same layout as v2 except some data at the end */ + cmd = &sta_ctxt_cmd_v2; + size = sizeof(struct iwl_he_sta_context_cmd_v1); + break; + case 2: + cmd = &sta_ctxt_cmd_v2; + size = sizeof(struct iwl_he_sta_context_cmd_v2); + break; + case 3: + cmd = &sta_ctxt_cmd; + size = sizeof(struct iwl_he_sta_context_cmd_v3); + break; + default: + IWL_ERR(mvm, "bad STA_HE_CTXT_CMD version %d\n", ver); + return; + } rcu_read_lock(); @@ -2077,97 +2292,25 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, * Initialize the PPE thresholds to "None" (7), as described in Table * 9-262ac of 80211.ax/D3.0. */ - memset(&sta_ctxt_cmd.pkt_ext, 7, sizeof(sta_ctxt_cmd.pkt_ext)); + memset(&sta_ctxt_cmd.pkt_ext, IWL_HE_PKT_EXT_NONE, + sizeof(sta_ctxt_cmd.pkt_ext)); /* If PPE Thresholds exist, parse them into a FW-familiar format. */ if (sta->he_cap.he_cap_elem.phy_cap_info[6] & - IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) { - u8 nss = (sta->he_cap.ppe_thres[0] & - IEEE80211_PPE_THRES_NSS_MASK) + 1; - u8 ru_index_bitmap = - (sta->he_cap.ppe_thres[0] & - IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK) >> - IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS; - u8 *ppe = &sta->he_cap.ppe_thres[0]; - u8 ppe_pos_bit = 7; /* Starting after PPE header */ - - /* - * FW currently supports only nss == MAX_HE_SUPP_NSS - * - * If nss > MAX: we can ignore values we don't support - * If nss < MAX: we can set zeros in other streams - */ - if (nss > MAX_HE_SUPP_NSS) { - IWL_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss, - MAX_HE_SUPP_NSS); - nss = MAX_HE_SUPP_NSS; - } - - for (i = 0; i < nss; i++) { - u8 ru_index_tmp = ru_index_bitmap << 1; - u8 bw; - - for (bw = 0; bw < MAX_HE_CHANNEL_BW_INDX; bw++) { - ru_index_tmp >>= 1; - if (!(ru_index_tmp & 1)) - continue; - - sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][1] = - iwl_mvm_he_get_ppe_val(ppe, - ppe_pos_bit); - ppe_pos_bit += - IEEE80211_PPE_THRES_INFO_PPET_SIZE; - sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][0] = - iwl_mvm_he_get_ppe_val(ppe, - ppe_pos_bit); - ppe_pos_bit += - IEEE80211_PPE_THRES_INFO_PPET_SIZE; - } - } - + IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) { + iwl_mvm_set_pkt_ext_from_he_ppe(mvm, sta, + &sta_ctxt_cmd.pkt_ext); flags |= STA_CTXT_HE_PACKET_EXT; - } else if ((sta->he_cap.he_cap_elem.phy_cap_info[9] & - IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_MASK) != - IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED) { - int low_th = -1; - int high_th = -1; - - /* Take the PPE thresholds from the nominal padding info */ - switch (sta->he_cap.he_cap_elem.phy_cap_info[9] & - IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_MASK) { - case IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_0US: - low_th = IWL_HE_PKT_EXT_NONE; - high_th = IWL_HE_PKT_EXT_NONE; - break; - case IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_8US: - low_th = IWL_HE_PKT_EXT_BPSK; - high_th = IWL_HE_PKT_EXT_NONE; - break; - case IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_16US: - low_th = IWL_HE_PKT_EXT_NONE; - high_th = IWL_HE_PKT_EXT_BPSK; - break; - } - - /* Set the PPE thresholds accordingly */ - if (low_th >= 0 && high_th >= 0) { - struct iwl_he_pkt_ext *pkt_ext = - (struct iwl_he_pkt_ext *)&sta_ctxt_cmd.pkt_ext; - - for (i = 0; i < MAX_HE_SUPP_NSS; i++) { - u8 bw; - - for (bw = 0; bw < MAX_HE_CHANNEL_BW_INDX; - bw++) { - pkt_ext->pkt_ext_qam_th[i][bw][0] = - low_th; - pkt_ext->pkt_ext_qam_th[i][bw][1] = - high_th; - } - } - - flags |= STA_CTXT_HE_PACKET_EXT; - } + /* PPE Thresholds doesn't exist - set the API PPE values + * according to Common Nominal Packet Padding fiels. */ + } else { + u8 nominal_padding = + u8_get_bits(sta->he_cap.he_cap_elem.phy_cap_info[9], + IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK); + if (nominal_padding != IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED) + iwl_mvm_set_pkt_ext_from_nominal_padding(&sta_ctxt_cmd.pkt_ext, + nominal_padding, + &flags); } if (sta->he_cap.he_cap_elem.mac_cap_info[2] & @@ -2230,9 +2373,46 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, sta_ctxt_cmd.flags = cpu_to_le32(flags); - if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(STA_HE_CTXT_CMD, - DATA_PATH_GROUP, 0), - 0, size, &sta_ctxt_cmd)) + if (ver < 3) { + /* fields before pkt_ext */ + BUILD_BUG_ON(offsetof(typeof(sta_ctxt_cmd), pkt_ext) != + offsetof(typeof(sta_ctxt_cmd_v2), pkt_ext)); + memcpy(&sta_ctxt_cmd_v2, &sta_ctxt_cmd, + offsetof(typeof(sta_ctxt_cmd), pkt_ext)); + + /* pkt_ext */ + for (i = 0; + i < ARRAY_SIZE(sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th); + i++) { + u8 bw; + + for (bw = 0; + bw < ARRAY_SIZE(sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th[i]); + bw++) { + BUILD_BUG_ON(sizeof(sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw]) != + sizeof(sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th[i][bw])); + + memcpy(&sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th[i][bw], + &sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw], + sizeof(sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw])); + } + } + + /* fields after pkt_ext */ + BUILD_BUG_ON(sizeof(sta_ctxt_cmd) - + offsetofend(typeof(sta_ctxt_cmd), pkt_ext) != + sizeof(sta_ctxt_cmd_v2) - + offsetofend(typeof(sta_ctxt_cmd_v2), pkt_ext)); + memcpy((u8 *)&sta_ctxt_cmd_v2 + + offsetofend(typeof(sta_ctxt_cmd_v2), pkt_ext), + (u8 *)&sta_ctxt_cmd + + offsetofend(typeof(sta_ctxt_cmd), pkt_ext), + sizeof(sta_ctxt_cmd) - + offsetofend(typeof(sta_ctxt_cmd), pkt_ext)); + sta_ctxt_cmd_v2.reserved3 = 0; + } + + if (iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, size, cmd)) IWL_ERR(mvm, "Failed to config FW to work HE!\n"); } @@ -2371,6 +2551,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, IEEE80211_SMPS_DYNAMIC); } } else if (mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) { + iwl_mvm_mei_host_disassociated(mvm); /* * If update fails - SF might be running in associated * mode while disassociated - which is forbidden. @@ -2447,11 +2628,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, /* * We received a beacon from the associated AP so * remove the session protection. - * A firmware with the new API will remove it automatically. */ - if (!fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) - iwl_mvm_stop_session_protection(mvm, vif); + iwl_mvm_stop_session_protection(mvm, vif); iwl_mvm_sf_update(mvm, vif, false); WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); @@ -3100,13 +3278,76 @@ static void iwl_mvm_reset_cca_40mhz_workaround(struct iwl_mvm *mvm, if (he_cap) { /* we know that ours is writable */ - struct ieee80211_sta_he_cap *he = (void *)he_cap; + struct ieee80211_sta_he_cap *he = (void *)(uintptr_t)he_cap; he->he_cap_elem.phy_cap_info[0] |= IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G; } } +static void iwl_mvm_mei_host_associated(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_mvm_sta *mvm_sta) +{ +#if IS_ENABLED(CONFIG_IWLMEI) + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mei_conn_info conn_info = { + .ssid_len = vif->bss_conf.ssid_len, + .channel = vif->bss_conf.chandef.chan->hw_value, + }; + + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + return; + + if (!mvm->mei_registered) + return; + + switch (mvm_sta->pairwise_cipher) { + case WLAN_CIPHER_SUITE_CCMP: + conn_info.pairwise_cipher = IWL_MEI_CIPHER_CCMP; + break; + case WLAN_CIPHER_SUITE_GCMP: + conn_info.pairwise_cipher = IWL_MEI_CIPHER_GCMP; + break; + case WLAN_CIPHER_SUITE_GCMP_256: + conn_info.pairwise_cipher = IWL_MEI_CIPHER_GCMP_256; + break; + case 0: + /* open profile */ + break; + default: + /* cipher not supported, don't send anything to iwlmei */ + return; + } + + switch (mvmvif->rekey_data.akm) { + case WLAN_AKM_SUITE_SAE & 0xff: + conn_info.auth_mode = IWL_MEI_AKM_AUTH_SAE; + break; + case WLAN_AKM_SUITE_PSK & 0xff: + conn_info.auth_mode = IWL_MEI_AKM_AUTH_RSNA_PSK; + break; + case WLAN_AKM_SUITE_8021X & 0xff: + conn_info.auth_mode = IWL_MEI_AKM_AUTH_RSNA; + break; + case 0: + /* open profile */ + conn_info.auth_mode = IWL_MEI_AKM_AUTH_OPEN; + break; + default: + /* auth method / AKM not supported */ + /* TODO: All the FT vesions of these? */ + return; + } + + memcpy(conn_info.ssid, vif->bss_conf.ssid, vif->bss_conf.ssid_len); + memcpy(conn_info.bssid, vif->bss_conf.bssid, ETH_ALEN); + + /* TODO: add support for collocated AP data */ + iwl_mei_host_associated(&conn_info, NULL); +#endif +} + static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, @@ -3251,12 +3492,18 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, * multicast data frames can be forwarded to the driver */ iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); + iwl_mvm_mei_host_associated(mvm, vif, mvm_sta); } iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, true); } else if (old_state == IEEE80211_STA_AUTHORIZED && new_state == IEEE80211_STA_ASSOC) { + /* once we move into assoc state, need to update rate scale to + * disable using wide bandwidth + */ + iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, + false); if (!sta->tdls) { /* Multicast data frames are no longer allowed */ iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); @@ -3279,16 +3526,16 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, if (vif->type == NL80211_IFTYPE_AP) { mvmvif->ap_assoc_sta_count--; iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); - } else if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) { - /* remove session protection if still running */ + } else if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) iwl_mvm_stop_session_protection(mvm, vif); - } ret = 0; } else if (old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_NONE) { ret = 0; } else if (old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_NOTEXIST) { + if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) + iwl_mvm_stop_session_protection(mvm, vif); ret = iwl_mvm_rm_sta(mvm, vif, sta); if (sta->tdls) { iwl_mvm_recalc_tdls_state(mvm, vif, false); @@ -3454,12 +3701,15 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - struct iwl_mvm_sta *mvmsta; + struct iwl_mvm_sta *mvmsta = NULL; struct iwl_mvm_key_pn *ptk_pn; int keyidx = key->keyidx; int ret, i; u8 key_offset; + if (sta) + mvmsta = iwl_mvm_sta_from_mac80211(sta); + switch (key->cipher) { case WLAN_CIPHER_SUITE_TKIP: if (!mvm->trans->trans_cfg->gen2) { @@ -3560,7 +3810,7 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, } if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && - sta && iwl_mvm_has_new_rx_api(mvm) && + mvmsta && iwl_mvm_has_new_rx_api(mvm) && key->flags & IEEE80211_KEY_FLAG_PAIRWISE && (key->cipher == WLAN_CIPHER_SUITE_CCMP || key->cipher == WLAN_CIPHER_SUITE_GCMP || @@ -3568,7 +3818,6 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, struct ieee80211_key_seq seq; int tid, q; - mvmsta = iwl_mvm_sta_from_mac80211(sta); WARN_ON(rcu_access_pointer(mvmsta->ptk_pn[keyidx])); ptk_pn = kzalloc(struct_size(ptk_pn, q, mvm->trans->num_rx_queues), @@ -3595,6 +3844,9 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, else key_offset = STA_KEY_IDX_INVALID; + if (mvmsta && key->flags & IEEE80211_KEY_FLAG_PAIRWISE) + mvmsta->pairwise_cipher = key->cipher; + IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n"); ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset); if (ret) { @@ -3635,12 +3887,11 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, break; } - if (sta && iwl_mvm_has_new_rx_api(mvm) && + if (mvmsta && iwl_mvm_has_new_rx_api(mvm) && key->flags & IEEE80211_KEY_FLAG_PAIRWISE && (key->cipher == WLAN_CIPHER_SUITE_CCMP || key->cipher == WLAN_CIPHER_SUITE_GCMP || key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) { - mvmsta = iwl_mvm_sta_from_mac80211(sta); ptk_pn = rcu_dereference_protected( mvmsta->ptk_pn[keyidx], lockdep_is_held(&mvm->mutex)); @@ -3879,8 +4130,7 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw, if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) { /* Use aux roc framework (HS20) */ - if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, - ADD_STA, 0) >= 12) { + if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12) { u32 lmac_id; lmac_id = iwl_mvm_get_lmac_id(mvm->fw, @@ -4684,6 +4934,15 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, break; case NL80211_IFTYPE_STATION: /* + * In the new flow FW is in charge of timing the switch so there + * is no need for all of this + */ + if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP, + CHANNEL_SWITCH_ERROR_NOTIF, + 0)) + break; + + /* * We haven't configured the firmware to be associated yet since * we don't know the dtim period. In this case, the firmware can't * track the beacons. @@ -4754,6 +5013,14 @@ static void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw, .cs_mode = chsw->block_tx, }; + /* + * In the new flow FW is in charge of timing the switch so there is no + * need for all of this + */ + if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP, + CHANNEL_SWITCH_ERROR_NOTIF, 0)) + return; + if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CS_MODIFY)) return; @@ -5360,6 +5627,10 @@ static bool iwl_mvm_mac_can_aggregate(struct ieee80211_hw *hw, { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + return iwl_mvm_tx_csum_bz(mvm, head, true) == + iwl_mvm_tx_csum_bz(mvm, skb, true); + /* For now don't aggregate IPv6 in AMSDU */ if (skb->protocol != htons(ETH_P_IP)) return false; diff --git a/mvm/mvm.h b/mvm/mvm.h index 2b1dcd60e00f..ecfe322ebef2 100644 --- a/mvm/mvm.h +++ b/mvm/mvm.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2012-2014, 2018-2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -30,6 +30,7 @@ #include "fw/runtime.h" #include "fw/dbg.h" #include "fw/acpi.h" +#include "mei/iwl-mei.h" #include "iwl-nvm-parse.h" #include <linux/average.h> @@ -93,11 +94,10 @@ struct iwl_mvm_phy_ctxt { enum nl80211_chan_width width; - /* - * TODO: This should probably be removed. Currently here only for rate - * scaling algorithm - */ struct ieee80211_channel *channel; + + /* track for RLC config command */ + u32 center_freq1; }; struct iwl_mvm_time_event_data { @@ -830,6 +830,18 @@ struct iwl_mvm { const char *nvm_file_name; struct iwl_nvm_data *nvm_data; + struct iwl_mei_nvm *mei_nvm_data; + struct iwl_mvm_csme_conn_info __rcu *csme_conn_info; + bool mei_rfkill_blocked; + bool mei_registered; + struct work_struct sap_connected_wk; + + /* + * NVM built based on the SAP data but that we can't free even after + * we get ownership because it contains the cfg80211's channel. + */ + struct iwl_nvm_data *temp_nvm_data; + /* NVM sections */ struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS]; @@ -1021,6 +1033,8 @@ struct iwl_mvm { /* Indicate if 32Khz external clock is valid */ u32 ext_clock_valid; + /* This vif used by CSME to send / receive traffic */ + struct ieee80211_vif *csme_vif; struct ieee80211_vif __rcu *csa_vif; struct ieee80211_vif __rcu *csa_tx_blocked_vif; u8 csa_tx_block_bcn_timeout; @@ -1083,7 +1097,6 @@ struct iwl_mvm { } cmd_ver; struct ieee80211_vif *nan_vif; -#define IWL_MAX_BAID 32 struct iwl_mvm_baid_data __rcu *baid_map[IWL_MAX_BAID]; /* @@ -1103,6 +1116,8 @@ struct iwl_mvm { unsigned long last_6ghz_passive_scan_jiffies; unsigned long last_reset_or_resume_time_jiffies; + + bool sta_remove_requires_queue_remove; }; /* Extract MVM priv from op_mode and _hw */ @@ -1123,6 +1138,10 @@ struct iwl_mvm { * @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running * @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA * @IWL_MVM_STATUS_IN_D3: in D3 (or at least about to go into it) + * @IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE: suppress one error log + * if this is set, when intentionally triggered + * @IWL_MVM_STATUS_STARTING: starting mac, + * used to disable restart flow while in STARTING state */ enum iwl_mvm_status { IWL_MVM_STATUS_HW_RFKILL, @@ -1134,6 +1153,13 @@ enum iwl_mvm_status { IWL_MVM_STATUS_FIRMWARE_RUNNING, IWL_MVM_STATUS_NEED_FLUSH_P2P, IWL_MVM_STATUS_IN_D3, + IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE, + IWL_MVM_STATUS_STARTING, +}; + +struct iwl_mvm_csme_conn_info { + struct rcu_head rcu_head; + struct iwl_mei_conn_info conn_info; }; /* Keep track of completed init configuration */ @@ -1493,6 +1519,7 @@ void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq); unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm, struct ieee80211_sta *sta, unsigned int tid); +u32 iwl_mvm_tx_csum_bz(struct iwl_mvm *mvm, struct sk_buff *skb, bool amsdu); #ifdef CONFIG_IWLWIFI_DEBUG const char *iwl_mvm_get_tx_fail_reason(u32 status); @@ -1598,8 +1625,6 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); -void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm, @@ -1660,6 +1685,8 @@ void iwl_mvm_rx_missed_vap_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_channel_switch_error_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); /* Bindings */ int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); @@ -1921,10 +1948,6 @@ static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm) void iwl_mvm_stop_device(struct iwl_mvm *mvm); -/* Re-configure the SCD for a queue that has already been configured */ -int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id, - int tid, int frame_limit, u16 ssn); - /* Thermal management and CT-kill */ void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff); void iwl_mvm_temp_notif(struct iwl_mvm *mvm, @@ -1939,6 +1962,17 @@ void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm); int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm); int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 budget); +#if IS_ENABLED(CONFIG_IWLMEI) + +/* vendor commands */ +void iwl_mvm_vendor_cmds_register(struct iwl_mvm *mvm); + +#else + +static inline void iwl_mvm_vendor_cmds_register(struct iwl_mvm *mvm) {} + +#endif + /* Location Aware Regulatory */ struct iwl_mcc_update_resp * iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, @@ -2063,6 +2097,8 @@ void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw, int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm, struct iwl_rfi_lut_entry *rfi_table); struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm); +void iwl_rfi_deactivate_notif_handler(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); static inline u8 iwl_mvm_phy_band_from_nl80211(enum nl80211_band band) { @@ -2137,8 +2173,7 @@ iwl_mvm_set_chan_info_chandef(struct iwl_mvm *mvm, static inline int iwl_umac_scan_get_max_profiles(const struct iwl_fw *fw) { - u8 ver = iwl_fw_lookup_cmd_ver(fw, IWL_ALWAYS_LONG_GROUP, - SCAN_OFFLOAD_UPDATE_PROFILES_CMD, + u8 ver = iwl_fw_lookup_cmd_ver(fw, SCAN_OFFLOAD_UPDATE_PROFILES_CMD, IWL_FW_CMD_VER_UNKNOWN); return (ver == IWL_FW_CMD_VER_UNKNOWN || ver < 3) ? IWL_SCAN_MAX_PROFILES : IWL_SCAN_MAX_PROFILES_V2; @@ -2158,4 +2193,47 @@ enum iwl_location_cipher iwl_mvm_cipher_to_location_cipher(u32 cipher) return IWL_LOCATION_CIPHER_INVALID; } } + +struct iwl_mvm_csme_conn_info *iwl_mvm_get_csme_conn_info(struct iwl_mvm *mvm); +static inline int iwl_mvm_mei_get_ownership(struct iwl_mvm *mvm) +{ + if (mvm->mei_registered) + return iwl_mei_get_ownership(); + return 0; +} + +static inline void iwl_mvm_mei_tx_copy_to_csme(struct iwl_mvm *mvm, + struct sk_buff *skb, + unsigned int ivlen) +{ + if (mvm->mei_registered) + iwl_mei_tx_copy_to_csme(skb, ivlen); +} + +static inline void iwl_mvm_mei_host_disassociated(struct iwl_mvm *mvm) +{ + if (mvm->mei_registered) + iwl_mei_host_disassociated(); +} + +static inline void iwl_mvm_mei_device_down(struct iwl_mvm *mvm) +{ + if (mvm->mei_registered) + iwl_mei_device_down(); +} + +static inline void iwl_mvm_mei_set_sw_rfkill_state(struct iwl_mvm *mvm) +{ + bool sw_rfkill = + mvm->hw_registered ? rfkill_blocked(mvm->hw->wiphy->rfkill) : false; + + if (mvm->mei_registered) + iwl_mei_set_rfkill_state(iwl_mvm_is_radio_killed(mvm), + sw_rfkill); +} + +void iwl_mvm_send_roaming_forbidden_event(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + bool forbidden); + #endif /* __IWL_MVM_H__ */ diff --git a/mvm/offloading.c b/mvm/offloading.c index 41880517e8bb..c7dabc6b3765 100644 --- a/mvm/offloading.c +++ b/mvm/offloading.c @@ -47,8 +47,7 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm, struct iwl_proto_offload_cmd_common *common; u32 enabled = 0, size; u32 capa_flags = mvm->fw->ucode_capa.flags; - int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, - PROT_OFFLOAD_CONFIG_CMD, 0); + int ver = iwl_fw_lookup_cmd_ver(mvm->fw, hcmd.id, 0); #if IS_ENABLED(CONFIG_IPV6) struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); diff --git a/mvm/ops.c b/mvm/ops.c index 232ad531d612..36ac870e3bae 100644 --- a/mvm/ops.c +++ b/mvm/ops.c @@ -5,6 +5,7 @@ * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #include <linux/module.h> +#include <linux/rtnetlink.h> #include <linux/vmalloc.h> #include <net/mac80211.h> @@ -23,13 +24,16 @@ #include "iwl-prph.h" #include "rs.h" #include "fw/api/scan.h" +#include "fw/api/rfi.h" #include "time-event.h" #include "fw-api.h" #include "fw/acpi.h" +#include "fw/uefi.h" #define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux" MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(IWLWIFI); static const struct iwl_op_mode_ops iwl_mvm_ops; static const struct iwl_op_mode_ops iwl_mvm_ops_mq; @@ -78,7 +82,7 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash; - u32 reg_val = 0; + u32 reg_val; u32 phy_config = iwl_mvm_get_phy_config(mvm); radio_cfg_type = (phy_config & FW_PHY_CFG_RADIO_TYPE) >> @@ -89,10 +93,7 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode) FW_PHY_CFG_RADIO_DASH_POS; /* SKU control */ - reg_val |= CSR_HW_REV_STEP(mvm->trans->hw_rev) << - CSR_HW_IF_CONFIG_REG_POS_MAC_STEP; - reg_val |= CSR_HW_REV_DASH(mvm->trans->hw_rev) << - CSR_HW_IF_CONFIG_REG_POS_MAC_DASH; + reg_val = CSR_HW_REV_STEP_DASH(mvm->trans->hw_rev); /* radio configuration */ reg_val |= radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE; @@ -117,8 +118,7 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode) reg_val |= CSR_HW_IF_CONFIG_REG_D3_DEBUG; iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH | - CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP | + CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP_DASH | CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE | CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP | CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH | @@ -193,7 +193,7 @@ static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm, if (he_cap) { /* we know that ours is writable */ - struct ieee80211_sta_he_cap *he = (void *)he_cap; + struct ieee80211_sta_he_cap *he = (void *)(uintptr_t)he_cap; WARN_ON(!he->has_he); WARN_ON(!(he->he_cap_elem.phy_cap_info[0] & @@ -237,7 +237,8 @@ static void iwl_mvm_rx_thermal_dual_chain_req(struct iwl_mvm *mvm, */ mvm->fw_static_smps_request = req->event == cpu_to_le32(THERMAL_DUAL_CHAIN_REQ_DISABLE); - ieee80211_iterate_interfaces(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + ieee80211_iterate_interfaces(mvm->hw, + IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER, iwl_mvm_intf_dual_chain_req, NULL); } @@ -260,6 +261,7 @@ enum iwl_rx_handler_context { /** * struct iwl_rx_handlers handler for FW notification * @cmd_id: command id + * @min_size: minimum size to expect for the notification * @context: see &iwl_rx_handler_context * @fn: the function is called when notification is received */ @@ -334,9 +336,6 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { iwl_mvm_rx_umac_scan_iter_complete_notif, RX_HANDLER_SYNC, struct iwl_umac_scan_iter_complete_notif), - RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, - RX_HANDLER_SYNC, struct iwl_card_state_notif), - RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif, RX_HANDLER_SYNC, struct iwl_missed_beacons_notif), @@ -386,6 +385,10 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { RX_HANDLER_GRP(MAC_CONF_GROUP, CHANNEL_SWITCH_START_NOTIF, iwl_mvm_channel_switch_start_notif, RX_HANDLER_SYNC, struct iwl_channel_switch_start_notif), + RX_HANDLER_GRP(MAC_CONF_GROUP, CHANNEL_SWITCH_ERROR_NOTIF, + iwl_mvm_channel_switch_error_notif, + RX_HANDLER_ASYNC_UNLOCKED, + struct iwl_channel_switch_error_notif), RX_HANDLER_GRP(DATA_PATH_GROUP, MONITOR_NOTIF, iwl_mvm_rx_monitor_notif, RX_HANDLER_ASYNC_LOCKED, struct iwl_datapath_monitor_notif), @@ -394,6 +397,10 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { iwl_mvm_rx_thermal_dual_chain_req, RX_HANDLER_ASYNC_LOCKED, struct iwl_thermal_dual_chain_request), + + RX_HANDLER_GRP(SYSTEM_GROUP, RFI_DEACTIVATE_NOTIF, + iwl_rfi_deactivate_notif_handler, RX_HANDLER_ASYNC_UNLOCKED, + struct iwl_rfi_deactivate_notif), }; #undef RX_HANDLER #undef RX_HANDLER_GRP @@ -447,7 +454,6 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { HCMD_NAME(POWER_TABLE_CMD), HCMD_NAME(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION), HCMD_NAME(REPLY_THERMAL_MNG_BACKOFF), - HCMD_NAME(DC2DC_CONFIG_CMD), HCMD_NAME(NVM_ACCESS_CMD), HCMD_NAME(BEACON_NOTIFICATION), HCMD_NAME(BEACON_TEMPLATE_CMD), @@ -457,7 +463,6 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { HCMD_NAME(STATISTICS_NOTIFICATION), HCMD_NAME(EOSP_NOTIFICATION), HCMD_NAME(REDUCE_TX_POWER_CMD), - HCMD_NAME(CARD_STATE_NOTIFICATION), HCMD_NAME(MISSED_BEACONS_NOTIFICATION), HCMD_NAME(TDLS_CONFIG_CMD), HCMD_NAME(MAC_PM_POWER_TABLE), @@ -502,6 +507,10 @@ static const struct iwl_hcmd_names iwl_mvm_system_names[] = { HCMD_NAME(SHARED_MEM_CFG_CMD), HCMD_NAME(INIT_EXTENDED_CFG_CMD), HCMD_NAME(FW_ERROR_RECOVERY_CMD), + HCMD_NAME(RFI_CONFIG_CMD), + HCMD_NAME(RFI_GET_FREQ_TABLE_CMD), + HCMD_NAME(SYSTEM_FEATURES_CONTROL_CMD), + HCMD_NAME(RFI_DEACTIVATE_NOTIF), }; /* Please keep this array *SORTED* by hex value. @@ -534,9 +543,11 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = { HCMD_NAME(UPDATE_MU_GROUPS_CMD), HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD), HCMD_NAME(STA_HE_CTXT_CMD), + HCMD_NAME(RLC_CONFIG_CMD), HCMD_NAME(RFH_QUEUE_CONFIG_CMD), HCMD_NAME(TLC_MNG_CONFIG_CMD), HCMD_NAME(CHEST_COLLECTOR_FILTER_CONFIG_CMD), + HCMD_NAME(SCD_QUEUE_CONFIG_CMD), HCMD_NAME(MONITOR_NOTIF), HCMD_NAME(THERMAL_DUAL_CHAIN_REQUEST), HCMD_NAME(STA_PM_NOTIF), @@ -635,13 +646,11 @@ unlock: mutex_unlock(&mvm->mutex); } -static int iwl_mvm_fwrt_dump_start(void *ctx) +static void iwl_mvm_fwrt_dump_start(void *ctx) { struct iwl_mvm *mvm = ctx; mutex_lock(&mvm->mutex); - - return 0; } static void iwl_mvm_fwrt_dump_end(void *ctx) @@ -683,13 +692,45 @@ static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = { static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm) { + struct iwl_trans *trans = mvm->trans; int ret; + if (trans->csme_own) { + if (WARN(!mvm->mei_registered, + "csme is owner, but we aren't registered to iwlmei\n")) + goto get_nvm_from_fw; + + mvm->mei_nvm_data = iwl_mei_get_nvm(); + if (mvm->mei_nvm_data) { + /* + * mvm->mei_nvm_data is set and because of that, + * we'll load the NVM from the FW when we'll get + * ownership. + */ + mvm->nvm_data = + iwl_parse_mei_nvm_data(trans, trans->cfg, + mvm->mei_nvm_data, mvm->fw); + return 0; + } + + IWL_ERR(mvm, + "Got a NULL NVM from CSME, trying to get it from the device\n"); + } + +get_nvm_from_fw: rtnl_lock(); + wiphy_lock(mvm->hw->wiphy); mutex_lock(&mvm->mutex); - ret = iwl_run_init_mvm_ucode(mvm); + ret = iwl_trans_start_hw(mvm->trans); + if (ret) { + mutex_unlock(&mvm->mutex); + wiphy_unlock(mvm->hw->wiphy); + rtnl_unlock(); + return ret; + } + ret = iwl_run_init_mvm_ucode(mvm); if (ret && ret != -ERFKILL) iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER); if (!ret && iwl_mvm_is_lar_supported(mvm)) { @@ -701,9 +742,10 @@ static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm) iwl_mvm_stop_device(mvm); mutex_unlock(&mvm->mutex); + wiphy_unlock(mvm->hw->wiphy); rtnl_unlock(); - if (ret < 0) + if (ret) IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret); return ret; @@ -711,6 +753,7 @@ static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm) static int iwl_mvm_start_post_nvm(struct iwl_mvm *mvm) { + struct iwl_mvm_csme_conn_info *csme_conn_info __maybe_unused; int ret; iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx); @@ -718,10 +761,17 @@ static int iwl_mvm_start_post_nvm(struct iwl_mvm *mvm) ret = iwl_mvm_mac_setup_register(mvm); if (ret) return ret; + mvm->hw_registered = true; iwl_mvm_dbgfs_register(mvm); + wiphy_rfkill_set_hw_state_reason(mvm->hw->wiphy, + mvm->mei_rfkill_blocked, + RFKILL_HARD_BLOCK_NOT_OWNER); + + iwl_mvm_mei_set_sw_rfkill_state(mvm); + return 0; } @@ -902,6 +952,109 @@ static const struct iwl_dump_sanitize_ops iwl_mvm_sanitize_ops = { .frob_mem = iwl_mvm_frob_mem, }; +static void iwl_mvm_me_conn_status(void *priv, const struct iwl_mei_conn_info *conn_info) +{ + struct iwl_mvm *mvm = priv; + struct iwl_mvm_csme_conn_info *prev_conn_info, *curr_conn_info; + + /* + * This is protected by the guarantee that this function will not be + * called twice on two different threads + */ + prev_conn_info = rcu_dereference_protected(mvm->csme_conn_info, true); + + curr_conn_info = kzalloc(sizeof(*curr_conn_info), GFP_KERNEL); + if (!curr_conn_info) + return; + + curr_conn_info->conn_info = *conn_info; + + rcu_assign_pointer(mvm->csme_conn_info, curr_conn_info); + + if (prev_conn_info) + kfree_rcu(prev_conn_info, rcu_head); +} + +static void iwl_mvm_mei_rfkill(void *priv, bool blocked) +{ + struct iwl_mvm *mvm = priv; + + mvm->mei_rfkill_blocked = blocked; + if (!mvm->hw_registered) + return; + + wiphy_rfkill_set_hw_state_reason(mvm->hw->wiphy, + mvm->mei_rfkill_blocked, + RFKILL_HARD_BLOCK_NOT_OWNER); +} + +static void iwl_mvm_mei_roaming_forbidden(void *priv, bool forbidden) +{ + struct iwl_mvm *mvm = priv; + + if (!mvm->hw_registered || !mvm->csme_vif) + return; + + iwl_mvm_send_roaming_forbidden_event(mvm, mvm->csme_vif, forbidden); +} + +static void iwl_mvm_sap_connected_wk(struct work_struct *wk) +{ + struct iwl_mvm *mvm = + container_of(wk, struct iwl_mvm, sap_connected_wk); + int ret; + + ret = iwl_mvm_start_get_nvm(mvm); + if (ret) + goto out_free; + + ret = iwl_mvm_start_post_nvm(mvm); + if (ret) + goto out_free; + + return; + +out_free: + IWL_ERR(mvm, "Couldn't get started...\n"); + iwl_mei_start_unregister(); + iwl_mei_unregister_complete(); + iwl_fw_flush_dumps(&mvm->fwrt); + iwl_mvm_thermal_exit(mvm); + iwl_fw_runtime_free(&mvm->fwrt); + iwl_phy_db_free(mvm->phy_db); + kfree(mvm->scan_cmd); + iwl_trans_op_mode_leave(mvm->trans); + kfree(mvm->nvm_data); + kfree(mvm->mei_nvm_data); + + ieee80211_free_hw(mvm->hw); +} + +static void iwl_mvm_mei_sap_connected(void *priv) +{ + struct iwl_mvm *mvm = priv; + + if (!mvm->hw_registered) + schedule_work(&mvm->sap_connected_wk); +} + +static void iwl_mvm_mei_nic_stolen(void *priv) +{ + struct iwl_mvm *mvm = priv; + + rtnl_lock(); + cfg80211_shutdown_all_interfaces(mvm->hw->wiphy); + rtnl_unlock(); +} + +static const struct iwl_mei_ops mei_ops = { + .me_conn_status = iwl_mvm_me_conn_status, + .rfkill = iwl_mvm_mei_rfkill, + .roaming_forbidden = iwl_mvm_mei_roaming_forbidden, + .sap_connected = iwl_mvm_mei_sap_connected, + .nic_stolen = iwl_mvm_mei_nic_stolen, +}; + static struct iwl_op_mode * iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, const struct iwl_fw *fw, struct dentry *dbgfs_dir) @@ -913,9 +1066,9 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, static const u8 no_reclaim_cmds[] = { TX_CMD, }; - int err, scan_size; + int scan_size; u32 min_backoff; - enum iwl_amsdu_size rb_size_default; + struct iwl_mvm_csme_conn_info *csme_conn_info __maybe_unused; /* * We use IWL_MVM_STATION_COUNT_MAX to check the validity of the station @@ -934,12 +1087,12 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, if (!hw) return NULL; - hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; + hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE; if (cfg->max_tx_agg_size) hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size; else - hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; + hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE; op_mode = hw->priv; @@ -954,6 +1107,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, &iwl_mvm_sanitize_ops, mvm, dbgfs_dir); iwl_mvm_get_acpi_tables(mvm); + iwl_uefi_get_sgom_table(trans, &mvm->fwrt); mvm->init_status = 0; @@ -1015,6 +1169,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk); INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk); + INIT_WORK(&mvm->sap_connected_wk, iwl_mvm_sap_connected_wk); INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work); INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk); INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk); @@ -1056,14 +1211,9 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, trans_cfg.no_reclaim_cmds = no_reclaim_cmds; trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds); - if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) - rb_size_default = IWL_AMSDU_2K; - else - rb_size_default = IWL_AMSDU_4K; - switch (iwlwifi_mod_params.amsdu_size) { case IWL_AMSDU_DEF: - trans_cfg.rx_buf_size = rb_size_default; + trans_cfg.rx_buf_size = IWL_AMSDU_4K; break; case IWL_AMSDU_4K: trans_cfg.rx_buf_size = IWL_AMSDU_4K; @@ -1077,7 +1227,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, default: pr_err("%s: Unsupported amsdu_size: %d\n", KBUILD_MODNAME, iwlwifi_mod_params.amsdu_size); - trans_cfg.rx_buf_size = rb_size_default; + trans_cfg.rx_buf_size = IWL_AMSDU_4K; } trans->wide_cmd_header = true; @@ -1105,6 +1255,14 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, trans_cfg.fw_reset_handshake = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE); + trans_cfg.queue_alloc_cmd_ver = + iwl_fw_lookup_cmd_ver(mvm->fw, + WIDE_ID(DATA_PATH_GROUP, + SCD_QUEUE_CONFIG_CMD), + 0); + mvm->sta_remove_requires_queue_remove = + trans_cfg.queue_alloc_cmd_ver > 0; + /* Configure transport layer */ iwl_trans_configure(mvm->trans, &trans_cfg); @@ -1137,10 +1295,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, IWL_DEBUG_EEPROM(mvm->trans->dev, "working without external nvm file\n"); - err = iwl_trans_start_hw(mvm->trans); - if (err) - goto out_free; - scan_size = iwl_mvm_scan_size(mvm); mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL); @@ -1165,8 +1319,20 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, mvm->debugfs_dir = dbgfs_dir; - if (iwl_mvm_start_get_nvm(mvm)) + mvm->mei_registered = !iwl_mei_register(mvm, &mei_ops); + + if (iwl_mvm_start_get_nvm(mvm)) { + /* + * Getting NVM failed while CSME is the owner, but we are + * registered to MEI, we'll get the NVM later when it'll be + * possible to get it from CSME. + */ + if (trans->csme_own && mvm->mei_registered) + return op_mode; + goto out_thermal_exit; + } + if (iwl_mvm_start_post_nvm(mvm)) goto out_thermal_exit; @@ -1175,6 +1341,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, out_thermal_exit: iwl_mvm_thermal_exit(mvm); + if (mvm->mei_registered) { + iwl_mei_start_unregister(); + iwl_mei_unregister_complete(); + } out_free: iwl_fw_flush_dumps(&mvm->fwrt); iwl_fw_runtime_free(&mvm->fwrt); @@ -1201,6 +1371,7 @@ void iwl_mvm_stop_device(struct iwl_mvm *mvm) iwl_trans_stop_device(mvm->trans); iwl_free_fw_paging(&mvm->fwrt); iwl_fw_dump_conf_clear(&mvm->fwrt); + iwl_mvm_mei_device_down(mvm); } static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) @@ -1208,11 +1379,33 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); int i; + if (mvm->mei_registered) { + rtnl_lock(); + iwl_mei_set_netdev(NULL); + rtnl_unlock(); + iwl_mei_start_unregister(); + } + + /* + * After we unregister from mei, the worker can't be scheduled + * anymore. + */ + cancel_work_sync(&mvm->sap_connected_wk); + iwl_mvm_leds_exit(mvm); iwl_mvm_thermal_exit(mvm); - ieee80211_unregister_hw(mvm->hw); + /* + * If we couldn't get ownership on the device and we couldn't + * get the NVM from CSME, we haven't registered to mac80211. + * In that case, we didn't fail op_mode_start, because we are + * waiting for CSME to allow us to get the NVM to register to + * mac80211. If that didn't happen, we haven't registered to + * mac80211, hence the if below. + */ + if (mvm->hw_registered) + ieee80211_unregister_hw(mvm->hw); kfree(mvm->scan_cmd); kfree(mvm->mcast_filter_cmd); @@ -1227,6 +1420,9 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) mvm->phy_db = NULL; kfree(mvm->nvm_data); + kfree(mvm->mei_nvm_data); + kfree(rcu_access_pointer(mvm->csme_conn_info)); + kfree(mvm->temp_nvm_data); for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++) kfree(mvm->nvm_sections[i].data); @@ -1235,6 +1431,9 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) iwl_fw_runtime_free(&mvm->fwrt); mutex_destroy(&mvm->mutex); + if (mvm->mei_registered) + iwl_mei_unregister_complete(); + ieee80211_free_hw(mvm->hw); } @@ -1517,6 +1716,12 @@ void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state) iwl_mvm_set_rfkill_state(mvm); } +struct iwl_mvm_csme_conn_info *iwl_mvm_get_csme_conn_info(struct iwl_mvm *mvm) +{ + return rcu_dereference_protected(mvm->csme_conn_info, + lockdep_is_held(&mvm->mutex)); +} + static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); @@ -1600,6 +1805,9 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) */ if (!mvm->fw_restart && fw_error) { iwl_fw_error_collect(&mvm->fwrt, false); + } else if (test_bit(IWL_MVM_STATUS_STARTING, + &mvm->status)) { + IWL_ERR(mvm, "Starting mac, retry will be triggered anyway\n"); } else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { struct iwl_mvm_reprobe *reprobe; @@ -1652,9 +1860,16 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) iwl_fw_error_collect(&mvm->fwrt, false); - if (fw_error && mvm->fw_restart > 0) + if (fw_error && mvm->fw_restart > 0) { mvm->fw_restart--; - ieee80211_restart_hw(mvm->hw); + ieee80211_restart_hw(mvm->hw); + } else if (mvm->fwrt.trans->dbg.restart_required) { + IWL_DEBUG_INFO(mvm, "FW restart requested after debug collection\n"); + mvm->fwrt.trans->dbg.restart_required = FALSE; + ieee80211_restart_hw(mvm->hw); + } else if (mvm->trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_8000) { + ieee80211_restart_hw(mvm->hw); + } } } @@ -1662,7 +1877,9 @@ static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode, bool sync) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - if (!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) + if (!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status) && + !test_and_clear_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE, + &mvm->status)) iwl_mvm_dump_nic_error_log(mvm); if (sync) { @@ -1683,7 +1900,7 @@ static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode, bool sync) if (!test_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status)) return; - iwl_mvm_nic_restart(mvm, true); + iwl_mvm_nic_restart(mvm, false); } static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode) @@ -1732,6 +1949,9 @@ static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode, struct iwl_rx_packet *pkt = rxb_addr(rxb); u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); + if (unlikely(queue >= mvm->trans->num_rx_queues)) + return; + if (unlikely(cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))) iwl_mvm_rx_frame_release(mvm, napi, rxb, queue); else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP, diff --git a/mvm/phy-ctxt.c b/mvm/phy-ctxt.c index 035336a9e755..a3cefbc43e80 100644 --- a/mvm/phy-ctxt.c +++ b/mvm/phy-ctxt.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2022 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2017 Intel Deutschland GmbH */ @@ -157,8 +157,41 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm, /* Set the channel info data */ iwl_mvm_set_chan_info_chandef(mvm, &cmd->ci, chandef); - iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &cmd->rxchain_info, + /* we only support RLC command version 2 */ + if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP, RLC_CONFIG_CMD), 0) < 2) + iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &cmd->rxchain_info, + chains_static, chains_dynamic); +} + +static int iwl_mvm_phy_send_rlc(struct iwl_mvm *mvm, + struct iwl_mvm_phy_ctxt *ctxt, + u8 chains_static, u8 chains_dynamic) +{ + struct iwl_rlc_config_cmd cmd = { + .phy_id = cpu_to_le32(ctxt->id), + }; + + if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP, RLC_CONFIG_CMD), 0) < 2) + return 0; + + BUILD_BUG_ON(IWL_RLC_CHAIN_INFO_DRIVER_FORCE != + PHY_RX_CHAIN_DRIVER_FORCE_MSK); + BUILD_BUG_ON(IWL_RLC_CHAIN_INFO_VALID != + PHY_RX_CHAIN_VALID_MSK); + BUILD_BUG_ON(IWL_RLC_CHAIN_INFO_FORCE != + PHY_RX_CHAIN_FORCE_SEL_MSK); + BUILD_BUG_ON(IWL_RLC_CHAIN_INFO_FORCE_MIMO != + PHY_RX_CHAIN_FORCE_MIMO_SEL_MSK); + BUILD_BUG_ON(IWL_RLC_CHAIN_INFO_COUNT != PHY_RX_CHAIN_CNT_MSK); + BUILD_BUG_ON(IWL_RLC_CHAIN_INFO_MIMO_COUNT != + PHY_RX_CHAIN_MIMO_CNT_MSK); + + iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &cmd.rlc.rx_chain_info, chains_static, chains_dynamic); + + return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(RLC_CONFIG_CMD, + DATA_PATH_GROUP, 2), + 0, sizeof(cmd), &cmd); } /* @@ -174,10 +207,9 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm, u32 action) { int ret; - int ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP, - PHY_CONTEXT_CMD, 1); + int ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_CONTEXT_CMD, 1); - if (ver == 3) { + if (ver == 3 || ver == 4) { struct iwl_phy_context_cmd cmd = {}; /* Set the command header fields */ @@ -211,9 +243,16 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm, } - if (ret) + if (ret) { IWL_ERR(mvm, "PHY ctxt cmd error. ret=%d\n", ret); - return ret; + return ret; + } + + if (action != FW_CTXT_ACTION_REMOVE) + return iwl_mvm_phy_send_rlc(mvm, ctxt, chains_static, + chains_dynamic); + + return 0; } /* @@ -228,6 +267,8 @@ int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, lockdep_assert_held(&mvm->mutex); ctxt->channel = chandef->chan; + ctxt->width = chandef->width; + ctxt->center_freq1 = chandef->center_freq1; return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, chains_static, chains_dynamic, @@ -257,6 +298,13 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, lockdep_assert_held(&mvm->mutex); + if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(DATA_PATH_GROUP, RLC_CONFIG_CMD), 0) >= 2 && + ctxt->channel == chandef->chan && + ctxt->width == chandef->width && + ctxt->center_freq1 == chandef->center_freq1) + return iwl_mvm_phy_send_rlc(mvm, ctxt, chains_static, + chains_dynamic); + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) && ctxt->channel->band != chandef->chan->band) { @@ -275,6 +323,8 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, ctxt->channel = chandef->chan; ctxt->width = chandef->width; + ctxt->center_freq1 = chandef->center_freq1; + return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, chains_static, chains_dynamic, action); @@ -295,18 +345,31 @@ void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt) * otherwise we might not be able to reuse this phy. */ if (ctxt->ref == 0) { - struct ieee80211_channel *chan; + struct ieee80211_channel *chan = NULL; struct cfg80211_chan_def chandef; - struct ieee80211_supported_band *sband = NULL; - enum nl80211_band band = NL80211_BAND_2GHZ; + struct ieee80211_supported_band *sband; + enum nl80211_band band; + int channel; - while (!sband && band < NUM_NL80211_BANDS) - sband = mvm->hw->wiphy->bands[band++]; + for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { + sband = mvm->hw->wiphy->bands[band]; - if (WARN_ON(!sband)) - return; + if (!sband) + continue; - chan = &sband->channels[0]; + for (channel = 0; channel < sband->n_channels; channel++) + if (!(sband->channels[channel].flags & + IEEE80211_CHAN_DISABLED)) { + chan = &sband->channels[channel]; + break; + } + + if (chan) + break; + } + + if (WARN_ON(!chan)) + return; cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT); iwl_mvm_phy_ctxt_changed(mvm, ctxt, &chandef, 1, 1); diff --git a/mvm/quota.c b/mvm/quota.c index 3d0166df2002..c862bd243b55 100644 --- a/mvm/quota.c +++ b/mvm/quota.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018 Intel Corporation + * Copyright (C) 2012-2014, 2018, 2021 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ diff --git a/mvm/rfi.c b/mvm/rfi.c index 44344216a1a9..bb77bc9aa821 100644 --- a/mvm/rfi.c +++ b/mvm/rfi.c @@ -7,39 +7,57 @@ #include "fw/api/commands.h" #include "fw/api/phy-ctxt.h" -/** +/* * DDR needs frequency in units of 16.666MHz, so provide FW with the * frequency values in the adjusted format. */ static const struct iwl_rfi_lut_entry iwl_rfi_table[IWL_RFI_LUT_SIZE] = { - /* LPDDR4 */ + /* frequency 2667MHz */ + {cpu_to_le16(160), {50, 58, 60, 62, 64, 52, 54, 56}, + {PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, + PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,}}, + + /* frequency 2933MHz */ + {cpu_to_le16(176), {149, 151, 153, 157, 159, 161, 165, 163, 167, 169, + 171, 173, 175}, + {PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, + PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, + PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,}}, + + /* frequency 3200MHz */ + {cpu_to_le16(192), {79, 81, 83, 85, 87, 89, 91, 93}, + {PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, + PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,}}, /* frequency 3733MHz */ - {cpu_to_le16(223), {114, 116, 118, 120, 122,}, - {PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,}}, + {cpu_to_le16(223), {114, 116, 118, 120, 122, 106, 110, 124, 126}, + {PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, + PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,}}, + + /* frequency 4000MHz */ + {cpu_to_le16(240), {114, 151, 155, 157, 159, 161, 165}, + {PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, + PHY_BAND_5, PHY_BAND_5,}}, /* frequency 4267MHz */ {cpu_to_le16(256), {79, 83, 85, 87, 89, 91, 93,}, {PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,}}, - /* DDR5ePOR */ - - /* frequency 4000MHz */ - {cpu_to_le16(240), {3, 5, 7, 9, 11, 13, 15,}, - {PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, - PHY_BAND_6, PHY_BAND_6,}}, - /* frequency 4400MHz */ {cpu_to_le16(264), {111, 119, 123, 125, 129, 131, 133, 135, 143,}, {PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6, PHY_BAND_6,}}, - /* LPDDR5iPOR */ - /* frequency 5200MHz */ - {cpu_to_le16(312), {36, 38, 40, 42, 50,}, - {PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,}}, + {cpu_to_le16(312), {36, 38, 40, 42, 44, 46, 50,}, + {PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, + PHY_BAND_5, PHY_BAND_5,}}, + + /* frequency 5600MHz */ + {cpu_to_le16(336), {106, 110, 112, 114, 116, 118, 120, 122}, + {PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, PHY_BAND_5, + PHY_BAND_5, PHY_BAND_5, PHY_BAND_5,}}, /* frequency 6000MHz */ {cpu_to_le16(360), {3, 5, 7, 9, 11, 13, 15,}, @@ -107,12 +125,19 @@ struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm) if (WARN_ON_ONCE(iwl_rx_packet_payload_len(cmd.resp_pkt) != resp_size)) return ERR_PTR(-EIO); - resp = kzalloc(resp_size, GFP_KERNEL); + resp = kmemdup(cmd.resp_pkt->data, resp_size, GFP_KERNEL); if (!resp) return ERR_PTR(-ENOMEM); - memcpy(resp, cmd.resp_pkt->data, resp_size); - iwl_free_resp(&cmd); return resp; } + +void iwl_rfi_deactivate_notif_handler(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_rfi_deactivate_notif *notif = (void *)pkt->data; + + IWL_INFO(mvm, "RFIm is deactivated, reason = %d\n", notif->reason); +} diff --git a/mvm/rs-fw.c b/mvm/rs-fw.c index 958702403a45..9830d2663689 100644 --- a/mvm/rs-fw.c +++ b/mvm/rs-fw.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2021 Intel Corporation + * Copyright (C) 2018-2022 Intel Corporation */ #include "rs.h" #include "fw-api.h" @@ -97,7 +97,10 @@ static u16 rs_fw_get_config_flags(struct iwl_mvm *mvm, if (he_cap->has_he && (he_cap->he_cap_elem.phy_cap_info[3] & - IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK)) + IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK && + sband->iftype_data && + sband->iftype_data->he_cap.he_cap_elem.phy_cap_info[3] & + IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK)) flags |= IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK; return flags; @@ -129,7 +132,7 @@ int rs_fw_vht_highest_rx_mcs_index(const struct ieee80211_sta_vht_cap *vht_cap, static void rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta, const struct ieee80211_sta_vht_cap *vht_cap, - struct iwl_tlc_config_cmd *cmd) + struct iwl_tlc_config_cmd_v4 *cmd) { u16 supp; int i, highest_mcs; @@ -154,7 +157,7 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta, if (sta->bandwidth == IEEE80211_STA_RX_BW_20) supp &= ~BIT(IWL_TLC_MNG_HT_RATE_MCS9); - cmd->ht_rates[i][IWL_TLC_HT_BW_NONE_160] = cpu_to_le16(supp); + cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80] = cpu_to_le16(supp); /* * Check if VHT extended NSS indicates that the bandwidth/NSS * configuration is supported - only for MCS 0 since we already @@ -164,8 +167,8 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta, ieee80211_get_vht_max_nss(&ieee_vht_cap, IEEE80211_VHT_CHANWIDTH_160MHZ, 0, true, nss) >= nss) - cmd->ht_rates[i][IWL_TLC_HT_BW_160] = - cmd->ht_rates[i][IWL_TLC_HT_BW_NONE_160]; + cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_160] = + cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80]; } } @@ -189,7 +192,7 @@ static u16 rs_fw_he_ieee80211_mcs_to_rs_mcs(u16 mcs) static void rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta, struct ieee80211_supported_band *sband, - struct iwl_tlc_config_cmd *cmd) + struct iwl_tlc_config_cmd_v4 *cmd) { const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap; u16 mcs_160 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160); @@ -219,7 +222,7 @@ rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta, } if (_mcs_80 > _tx_mcs_80) _mcs_80 = _tx_mcs_80; - cmd->ht_rates[i][IWL_TLC_HT_BW_NONE_160] = + cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80] = cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_80)); /* If one side doesn't support - mark both as not supporting */ @@ -230,14 +233,14 @@ rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta, } if (_mcs_160 > _tx_mcs_160) _mcs_160 = _tx_mcs_160; - cmd->ht_rates[i][IWL_TLC_HT_BW_160] = + cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_160] = cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_160)); } } static void rs_fw_set_supp_rates(struct ieee80211_sta *sta, struct ieee80211_supported_band *sband, - struct iwl_tlc_config_cmd *cmd) + struct iwl_tlc_config_cmd_v4 *cmd) { int i; u16 supp = 0; @@ -263,15 +266,15 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta, rs_fw_vht_set_enabled_rates(sta, vht_cap, cmd); } else if (ht_cap->ht_supported) { cmd->mode = IWL_TLC_MNG_MODE_HT; - cmd->ht_rates[IWL_TLC_NSS_1][IWL_TLC_HT_BW_NONE_160] = + cmd->ht_rates[IWL_TLC_NSS_1][IWL_TLC_MCS_PER_BW_80] = cpu_to_le16(ht_cap->mcs.rx_mask[0]); /* the station support only a single receive chain */ if (sta->smps_mode == IEEE80211_SMPS_STATIC) - cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_HT_BW_NONE_160] = + cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_MCS_PER_BW_80] = 0; else - cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_HT_BW_NONE_160] = + cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_MCS_PER_BW_80] = cpu_to_le16(ht_cap->mcs.rx_mask[1]); } } @@ -291,8 +294,12 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, notif = (void *)pkt->data; sta = rcu_dereference(mvm->fw_id_to_mac_id[notif->sta_id]); if (IS_ERR_OR_NULL(sta)) { - IWL_ERR(mvm, "Invalid sta id (%d) in FW TLC notification\n", - notif->sta_id); + /* can happen in remove station flow where mvm removed internally + * the station before removing from FW + */ + IWL_DEBUG_RATE(mvm, + "Invalid mvm RCU pointer for sta id (%d) in TLC notification\n", + notif->sta_id); goto out; } @@ -311,18 +318,19 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, if (flags & IWL_TLC_NOTIF_FLAG_RATE) { char pretty_rate[100]; - if (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP, - TLC_MNG_UPDATE_NOTIF, 0) < 3) { - rs_pretty_print_rate_v1(pretty_rate, sizeof(pretty_rate), - le32_to_cpu(notif->rate)); - IWL_DEBUG_RATE(mvm, - "Got rate in old format. Rate: %s. Converting.\n", - pretty_rate); - lq_sta->last_rate_n_flags = - iwl_new_rate_from_v1(le32_to_cpu(notif->rate)); - } else { - lq_sta->last_rate_n_flags = le32_to_cpu(notif->rate); - } + if (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP, + TLC_MNG_UPDATE_NOTIF, 0) < 3) { + rs_pretty_print_rate_v1(pretty_rate, + sizeof(pretty_rate), + le32_to_cpu(notif->rate)); + IWL_DEBUG_RATE(mvm, + "Got rate in old format. Rate: %s. Converting.\n", + pretty_rate); + lq_sta->last_rate_n_flags = + iwl_new_rate_from_v1(le32_to_cpu(notif->rate)); + } else { + lq_sta->last_rate_n_flags = le32_to_cpu(notif->rate); + } rs_pretty_print_rate(pretty_rate, sizeof(pretty_rate), lq_sta->last_rate_n_flags); IWL_DEBUG_RATE(mvm, "new rate: %s\n", pretty_rate); @@ -415,26 +423,21 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct ieee80211_hw *hw = mvm->hw; struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw; - u32 cmd_id = iwl_cmd_id(TLC_MNG_CONFIG_CMD, DATA_PATH_GROUP, 0); + u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, TLC_MNG_CONFIG_CMD); struct ieee80211_supported_band *sband = hw->wiphy->bands[band]; u16 max_amsdu_len = rs_fw_get_max_amsdu_len(sta); - struct iwl_tlc_config_cmd cfg_cmd = { + struct iwl_tlc_config_cmd_v4 cfg_cmd = { .sta_id = mvmsta->sta_id, .max_ch_width = update ? rs_fw_bw_from_sta_bw(sta) : RATE_MCS_CHAN_WIDTH_20, .flags = cpu_to_le16(rs_fw_get_config_flags(mvm, sta, sband)), .chains = rs_fw_set_active_chains(iwl_mvm_get_valid_tx_ant(mvm)), .sgi_ch_width_supp = rs_fw_sgi_cw_support(sta), - .max_mpdu_len = cpu_to_le16(max_amsdu_len), - .amsdu = iwl_mvm_is_csum_supported(mvm), + .max_mpdu_len = iwl_mvm_is_csum_supported(mvm) ? + cpu_to_le16(max_amsdu_len) : 0, }; int ret; - u16 cmd_size = sizeof(cfg_cmd); - - /* In old versions of the API the struct is 4 bytes smaller */ - if (iwl_fw_lookup_cmd_ver(mvm->fw, DATA_PATH_GROUP, - TLC_MNG_CONFIG_CMD, 0) < 3) - cmd_size -= 4; + int cmd_ver; memset(lq_sta, 0, offsetof(typeof(*lq_sta), pers)); @@ -449,8 +452,56 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, */ sta->max_amsdu_len = max_amsdu_len; - ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC, cmd_size, - &cfg_cmd); + cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, + WIDE_ID(DATA_PATH_GROUP, + TLC_MNG_CONFIG_CMD), + 0); + IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, sta_id=%d, max_ch_width=%d, mode=%d\n", + cfg_cmd.sta_id, cfg_cmd.max_ch_width, cfg_cmd.mode); + IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, chains=0x%X, ch_wid_supp=%d, flags=0x%X\n", + cfg_cmd.chains, cfg_cmd.sgi_ch_width_supp, cfg_cmd.flags); + IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, mpdu_len=%d, no_ht_rate=0x%X, tx_op=%d\n", + cfg_cmd.max_mpdu_len, cfg_cmd.non_ht_rates, cfg_cmd.max_tx_op); + IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, ht_rate[0][0]=0x%X, ht_rate[1][0]=0x%X\n", + cfg_cmd.ht_rates[0][0], cfg_cmd.ht_rates[1][0]); + IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, ht_rate[0][1]=0x%X, ht_rate[1][1]=0x%X\n", + cfg_cmd.ht_rates[0][1], cfg_cmd.ht_rates[1][1]); + IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, ht_rate[0][2]=0x%X, ht_rate[1][2]=0x%X\n", + cfg_cmd.ht_rates[0][2], cfg_cmd.ht_rates[1][2]); + if (cmd_ver == 4) { + ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC, + sizeof(cfg_cmd), &cfg_cmd); + } else if (cmd_ver < 4) { + struct iwl_tlc_config_cmd_v3 cfg_cmd_v3 = { + .sta_id = cfg_cmd.sta_id, + .max_ch_width = cfg_cmd.max_ch_width, + .mode = cfg_cmd.mode, + .chains = cfg_cmd.chains, + .amsdu = !!cfg_cmd.max_mpdu_len, + .flags = cfg_cmd.flags, + .non_ht_rates = cfg_cmd.non_ht_rates, + .ht_rates[0][0] = cfg_cmd.ht_rates[0][0], + .ht_rates[0][1] = cfg_cmd.ht_rates[0][1], + .ht_rates[1][0] = cfg_cmd.ht_rates[1][0], + .ht_rates[1][1] = cfg_cmd.ht_rates[1][1], + .sgi_ch_width_supp = cfg_cmd.sgi_ch_width_supp, + .max_mpdu_len = cfg_cmd.max_mpdu_len, + }; + + u16 cmd_size = sizeof(cfg_cmd_v3); + + /* In old versions of the API the struct is 4 bytes smaller */ + if (iwl_fw_lookup_cmd_ver(mvm->fw, + WIDE_ID(DATA_PATH_GROUP, + TLC_MNG_CONFIG_CMD), 0) < 3) + cmd_size -= 4; + + ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC, cmd_size, + &cfg_cmd_v3); + } else { + ret = -EINVAL; + } + if (ret) IWL_ERR(mvm, "Failed to send rate scale config (%d)\n", ret); } @@ -83,8 +83,8 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, fraglen = len - hdrlen; if (fraglen) { - int offset = (void *)hdr + hdrlen - - rxb_addr(rxb) + rxb_offset(rxb); + int offset = (u8 *)hdr + hdrlen - + (u8 *)rxb_addr(rxb) + rxb_offset(rxb); skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset, fraglen, rxb->truesize); @@ -527,40 +527,19 @@ struct iwl_mvm_stat_data { u8 *beacon_average_energy; }; -static void iwl_mvm_stat_iterator(void *_data, u8 *mac, - struct ieee80211_vif *vif) +struct iwl_mvm_stat_data_all_macs { + struct iwl_mvm *mvm; + __le32 flags; + struct iwl_statistics_ntfy_per_mac *per_mac_stats; +}; + +static void iwl_mvm_update_vif_sig(struct ieee80211_vif *vif, int sig) { - struct iwl_mvm_stat_data *data = _data; - struct iwl_mvm *mvm = data->mvm; - int sig = -data->beacon_filter_average_energy; - int last_event; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; int thold = vif->bss_conf.cqm_rssi_thold; int hyst = vif->bss_conf.cqm_rssi_hyst; - u16 id = le32_to_cpu(data->mac_id); - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - u16 vif_id = mvmvif->id; - - /* This doesn't need the MAC ID check since it's not taking the - * data copied into the "data" struct, but rather the data from - * the notification directly. - */ - mvmvif->beacon_stats.num_beacons = - le32_to_cpu(data->beacon_counter[vif_id]); - mvmvif->beacon_stats.avg_signal = - -data->beacon_average_energy[vif_id]; - - /* make sure that beacon statistics don't go backwards with TCM - * request to clear statistics - */ - if (le32_to_cpu(data->flags) & IWL_STATISTICS_REPLY_FLG_CLEAR) - mvmvif->beacon_stats.accu_num_beacons += - mvmvif->beacon_stats.num_beacons; - - if (mvmvif->id != id) - return; - - if (vif->type != NL80211_IFTYPE_STATION) - return; + int last_event; if (sig == 0) { IWL_DEBUG_RX(mvm, "RSSI is 0 - skip signal based decision\n"); @@ -618,6 +597,73 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac, } } +static void iwl_mvm_stat_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_stat_data *data = _data; + int sig = -data->beacon_filter_average_energy; + u16 id = le32_to_cpu(data->mac_id); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + u16 vif_id = mvmvif->id; + + /* This doesn't need the MAC ID check since it's not taking the + * data copied into the "data" struct, but rather the data from + * the notification directly. + */ + mvmvif->beacon_stats.num_beacons = + le32_to_cpu(data->beacon_counter[vif_id]); + mvmvif->beacon_stats.avg_signal = + -data->beacon_average_energy[vif_id]; + + if (mvmvif->id != id) + return; + + if (vif->type != NL80211_IFTYPE_STATION) + return; + + /* make sure that beacon statistics don't go backwards with TCM + * request to clear statistics + */ + if (le32_to_cpu(data->flags) & IWL_STATISTICS_REPLY_FLG_CLEAR) + mvmvif->beacon_stats.accu_num_beacons += + mvmvif->beacon_stats.num_beacons; + + iwl_mvm_update_vif_sig(vif, sig); +} + +static void iwl_mvm_stat_iterator_all_macs(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_stat_data_all_macs *data = _data; + struct iwl_statistics_ntfy_per_mac *mac_stats; + int sig; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + u16 vif_id = mvmvif->id; + + if (WARN_ONCE(vif_id >= MAC_INDEX_AUX, "invalid vif id: %d", vif_id)) + return; + + if (vif->type != NL80211_IFTYPE_STATION) + return; + + mac_stats = &data->per_mac_stats[vif_id]; + + mvmvif->beacon_stats.num_beacons = + le32_to_cpu(mac_stats->beacon_counter); + mvmvif->beacon_stats.avg_signal = + -le32_to_cpu(mac_stats->beacon_average_energy); + + /* make sure that beacon statistics don't go backwards with TCM + * request to clear statistics + */ + if (le32_to_cpu(data->flags) & IWL_STATISTICS_REPLY_FLG_CLEAR) + mvmvif->beacon_stats.accu_num_beacons += + mvmvif->beacon_stats.num_beacons; + + sig = -le32_to_cpu(mac_stats->beacon_filter_average_energy); + iwl_mvm_update_vif_sig(vif, sig); +} + static inline void iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { @@ -684,47 +730,41 @@ iwl_mvm_update_tcm_from_stats(struct iwl_mvm *mvm, __le32 *air_time_le, } static void -iwl_mvm_handle_rx_statistics_tlv(struct iwl_mvm *mvm, - struct iwl_rx_packet *pkt) +iwl_mvm_stats_ver_15(struct iwl_mvm *mvm, + struct iwl_statistics_operational_ntfy *stats) +{ + struct iwl_mvm_stat_data_all_macs data = { + .mvm = mvm, + .flags = stats->flags, + .per_mac_stats = stats->per_mac_stats, + }; + + ieee80211_iterate_active_interfaces(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_stat_iterator_all_macs, + &data); +} + +static void +iwl_mvm_stats_ver_14(struct iwl_mvm *mvm, + struct iwl_statistics_operational_ntfy_ver_14 *stats) { struct iwl_mvm_stat_data data = { .mvm = mvm, }; + u8 beacon_average_energy[MAC_INDEX_AUX]; - u8 average_energy[IWL_MVM_STATION_COUNT_MAX]; - struct iwl_statistics_operational_ntfy *stats; - int expected_size; __le32 flags; int i; - expected_size = sizeof(*stats); - if (WARN_ONCE(iwl_rx_packet_payload_len(pkt) < expected_size, - "received invalid statistics size (%d)!, expected_size: %d\n", - iwl_rx_packet_payload_len(pkt), expected_size)) - return; - - stats = (void *)&pkt->data; - - if (WARN_ONCE(stats->hdr.type != FW_STATISTICS_OPERATIONAL || - stats->hdr.version != - iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, STATISTICS_CMD, 0), - "received unsupported hdr type %d, version %d\n", - stats->hdr.type, stats->hdr.version)) - return; - flags = stats->flags; - mvm->radio_stats.rx_time = le64_to_cpu(stats->rx_time); - mvm->radio_stats.tx_time = le64_to_cpu(stats->tx_time); - mvm->radio_stats.on_time_rf = le64_to_cpu(stats->on_time_rf); - mvm->radio_stats.on_time_scan = le64_to_cpu(stats->on_time_scan); - - iwl_mvm_rx_stats_check_trigger(mvm, pkt); data.mac_id = stats->mac_id; data.beacon_filter_average_energy = le32_to_cpu(stats->beacon_filter_average_energy); data.flags = flags; data.beacon_counter = stats->beacon_counter; + for (i = 0; i < ARRAY_SIZE(beacon_average_energy); i++) beacon_average_energy[i] = le32_to_cpu(stats->beacon_average_energy[i]); @@ -735,9 +775,105 @@ iwl_mvm_handle_rx_statistics_tlv(struct iwl_mvm *mvm, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_stat_iterator, &data); +} + +static bool iwl_mvm_verify_stats_len(struct iwl_mvm *mvm, + struct iwl_rx_packet *pkt, + u32 expected_size) +{ + struct iwl_statistics_ntfy_hdr *hdr; + + if (WARN_ONCE(iwl_rx_packet_payload_len(pkt) < expected_size, + "received invalid statistics size (%d)!, expected_size: %d\n", + iwl_rx_packet_payload_len(pkt), expected_size)) + return false; + + hdr = (void *)&pkt->data; + + if (WARN_ONCE((hdr->type & IWL_STATISTICS_TYPE_MSK) != FW_STATISTICS_OPERATIONAL || + hdr->version != + iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, STATISTICS_NOTIFICATION, 0), + "received unsupported hdr type %d, version %d\n", + hdr->type, hdr->version)) + return false; + + if (WARN_ONCE(le16_to_cpu(hdr->size) != expected_size, + "received invalid statistics size in header (%d)!, expected_size: %d\n", + le16_to_cpu(hdr->size), expected_size)) + return false; + + return true; +} + +static void +iwl_mvm_handle_rx_statistics_tlv(struct iwl_mvm *mvm, + struct iwl_rx_packet *pkt) +{ + u8 average_energy[IWL_MVM_STATION_COUNT_MAX]; + __le32 air_time[MAC_INDEX_AUX]; + __le32 rx_bytes[MAC_INDEX_AUX]; + __le32 flags = 0; + int i; + u32 notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, + STATISTICS_NOTIFICATION, 0); + + if (WARN_ONCE(notif_ver > 15, + "invalid statistics version id: %d\n", notif_ver)) + return; + + if (notif_ver == 14) { + struct iwl_statistics_operational_ntfy_ver_14 *stats = + (void *)pkt->data; + + if (!iwl_mvm_verify_stats_len(mvm, pkt, sizeof(*stats))) + return; + + iwl_mvm_stats_ver_14(mvm, stats); + + flags = stats->flags; + mvm->radio_stats.rx_time = le64_to_cpu(stats->rx_time); + mvm->radio_stats.tx_time = le64_to_cpu(stats->tx_time); + mvm->radio_stats.on_time_rf = le64_to_cpu(stats->on_time_rf); + mvm->radio_stats.on_time_scan = + le64_to_cpu(stats->on_time_scan); + + for (i = 0; i < ARRAY_SIZE(average_energy); i++) + average_energy[i] = le32_to_cpu(stats->average_energy[i]); + + for (i = 0; i < ARRAY_SIZE(air_time); i++) { + air_time[i] = stats->air_time[i]; + rx_bytes[i] = stats->rx_bytes[i]; + } + } + + if (notif_ver == 15) { + struct iwl_statistics_operational_ntfy *stats = + (void *)pkt->data; + + if (!iwl_mvm_verify_stats_len(mvm, pkt, sizeof(*stats))) + return; + + iwl_mvm_stats_ver_15(mvm, stats); + + flags = stats->flags; + mvm->radio_stats.rx_time = le64_to_cpu(stats->rx_time); + mvm->radio_stats.tx_time = le64_to_cpu(stats->tx_time); + mvm->radio_stats.on_time_rf = le64_to_cpu(stats->on_time_rf); + mvm->radio_stats.on_time_scan = + le64_to_cpu(stats->on_time_scan); + + for (i = 0; i < ARRAY_SIZE(average_energy); i++) + average_energy[i] = + le32_to_cpu(stats->per_sta_stats[i].average_energy); + + for (i = 0; i < ARRAY_SIZE(air_time); i++) { + air_time[i] = stats->per_mac_stats[i].air_time; + rx_bytes[i] = stats->per_mac_stats[i].rx_bytes; + } + } + + iwl_mvm_rx_stats_check_trigger(mvm, pkt); - for (i = 0; i < ARRAY_SIZE(average_energy); i++) - average_energy[i] = le32_to_cpu(stats->average_energy[i]); ieee80211_iterate_stations_atomic(mvm->hw, iwl_mvm_stats_energy_iter, average_energy); /* @@ -746,8 +882,7 @@ iwl_mvm_handle_rx_statistics_tlv(struct iwl_mvm *mvm, * request and once in statistics notification. */ if (le32_to_cpu(flags) & IWL_STATISTICS_REPLY_FLG_CLEAR) - iwl_mvm_update_tcm_from_stats(mvm, stats->air_time, - stats->rx_bytes); + iwl_mvm_update_tcm_from_stats(mvm, air_time, rx_bytes); } void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, @@ -761,8 +896,8 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, u8 *energy; /* From ver 14 and up we use TLV statistics format */ - if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, - STATISTICS_CMD, 0) >= 14) + if (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, + STATISTICS_NOTIFICATION, 0) >= 14) return iwl_mvm_handle_rx_statistics_tlv(mvm, pkt); if (!iwl_mvm_has_new_rx_stats_api(mvm)) { diff --git a/mvm/rxmq.c b/mvm/rxmq.c index e0601f802628..2c43a9989783 100644 --- a/mvm/rxmq.c +++ b/mvm/rxmq.c @@ -121,12 +121,39 @@ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb, struct iwl_rx_mpdu_desc *desc = (void *)pkt->data; unsigned int headlen, fraglen, pad_len = 0; unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control); + u8 mic_crc_len = u8_get_bits(desc->mac_flags1, + IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_MASK) << 1; if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) { len -= 2; pad_len = 2; } + /* + * For non monitor interface strip the bytes the RADA might not have + * removed. As monitor interface cannot exist with other interfaces + * this removal is safe. + */ + if (mic_crc_len && !ieee80211_hw_check(mvm->hw, RX_INCLUDES_FCS)) { + u32 pkt_flags = le32_to_cpu(pkt->len_n_flags); + + /* + * If RADA was not enabled then decryption was not performed so + * the MIC cannot be removed. + */ + if (!(pkt_flags & FH_RSCSR_RADA_EN)) { + if (WARN_ON(crypt_len > mic_crc_len)) + return -EINVAL; + + mic_crc_len -= crypt_len; + } + + if (WARN_ON(mic_crc_len > len)) + return -EINVAL; + + len -= mic_crc_len; + } + /* If frame is small enough to fit in skb->head, pull it completely. * If not, only pull ieee80211_hdr (including crypto if present, and * an additional 8 bytes for SNAP/ethertype, see below) so that @@ -149,18 +176,8 @@ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb, */ hdrlen += crypt_len; - if (WARN_ONCE(headlen < hdrlen, - "invalid packet lengths (hdrlen=%d, len=%d, crypt_len=%d)\n", - hdrlen, len, crypt_len)) { - /* - * We warn and trace because we want to be able to see - * it in trace-cmd as well. - */ - IWL_DEBUG_RX(mvm, - "invalid packet lengths (hdrlen=%d, len=%d, crypt_len=%d)\n", - hdrlen, len, crypt_len); + if (unlikely(headlen < hdrlen)) return -EINVAL; - } skb_put_data(skb, hdr, hdrlen); skb_put_data(skb, (u8 *)hdr + hdrlen + pad_len, headlen - hdrlen); @@ -172,8 +189,12 @@ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb, * in the cases the hardware didn't handle, since it's rare to see * such packets, even though the hardware did calculate the checksum * in this case, just starting after the MAC header instead. + * + * Starting from Bz hardware, it calculates starting directly after + * the MAC header, so that matches mac80211's expectation. */ - if (skb->ip_summed == CHECKSUM_COMPLETE) { + if (skb->ip_summed == CHECKSUM_COMPLETE && + mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ) { struct { u8 hdr[6]; __be16 type; @@ -188,13 +209,16 @@ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb, shdr->type != htons(ETH_P_PAE) && shdr->type != htons(ETH_P_TDLS)))) skb->ip_summed = CHECKSUM_NONE; + else + /* mac80211 assumes full CSUM including SNAP header */ + skb_postpush_rcsum(skb, shdr, sizeof(*shdr)); } fraglen = len - headlen; if (fraglen) { - int offset = (void *)hdr + headlen + pad_len - - rxb_addr(rxb) + rxb_offset(rxb); + int offset = (u8 *)hdr + headlen + pad_len - + (u8 *)rxb_addr(rxb) + rxb_offset(rxb); skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset, fraglen, rxb->truesize); @@ -766,8 +790,11 @@ static void iwl_mvm_release_frames_from_notif(struct iwl_mvm *mvm, rcu_read_lock(); ba_data = rcu_dereference(mvm->baid_map[baid]); - if (WARN_ON_ONCE(!ba_data)) + if (!ba_data) { + WARN(!(flags & IWL_MVM_RELEASE_FROM_RSS_SYNC), + "BAID %d not found in map\n", baid); goto out; + } sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]); if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) @@ -1961,8 +1988,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, } else if (format == RATE_MCS_VHT_MSK) { u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS; - rx_status->nss = - ((rate_n_flags & RATE_MCS_NSS_MSK) >> + rx_status->nss = ((rate_n_flags & RATE_MCS_NSS_MSK) >> RATE_MCS_NSS_POS) + 1; rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK; rx_status->encoding = RX_ENC_VHT; diff --git a/mvm/scan.c b/mvm/scan.c index a138b5c4cce8..a4077053e374 100644 --- a/mvm/scan.c +++ b/mvm/scan.c @@ -20,7 +20,6 @@ #define IWL_SCAN_DWELL_FRAGMENTED 44 #define IWL_SCAN_DWELL_EXTENDED 90 #define IWL_SCAN_NUM_OF_FRAGS 3 -#define IWL_SCAN_LAST_2_4_CHN 14 /* adaptive dwell max budget time [TU] for full scan */ #define IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300 @@ -98,6 +97,7 @@ struct iwl_mvm_scan_params { u32 n_6ghz_params; bool scan_6ghz; bool enable_6ghz_passive; + bool respect_p2p_go, respect_p2p_go_hb; }; static inline void *iwl_mvm_get_scan_req_umac_data(struct iwl_mvm *mvm) @@ -169,17 +169,6 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum nl80211_band band, return cpu_to_le32(IWL_RATE_6M_PLCP | tx_ant); } -static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac, - struct ieee80211_vif *vif) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - int *global_cnt = data; - - if (vif->type != NL80211_IFTYPE_P2P_DEVICE && mvmvif->phy_ctxt && - mvmvif->phy_ctxt->id < NUM_PHY_CTX) - *global_cnt += 1; -} - static enum iwl_mvm_traffic_load iwl_mvm_get_traffic_load(struct iwl_mvm *mvm) { return mvm->tcm.result.global_load; @@ -191,26 +180,31 @@ iwl_mvm_get_traffic_load_band(struct iwl_mvm *mvm, enum nl80211_band band) return mvm->tcm.result.band_load[band]; } -struct iwl_is_dcm_with_go_iterator_data { +struct iwl_mvm_scan_iter_data { + u32 global_cnt; struct ieee80211_vif *current_vif; bool is_dcm_with_p2p_go; }; -static void iwl_mvm_is_dcm_with_go_iterator(void *_data, u8 *mac, - struct ieee80211_vif *vif) +static void iwl_mvm_scan_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) { - struct iwl_is_dcm_with_go_iterator_data *data = _data; - struct iwl_mvm_vif *other_mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm_vif *curr_mvmvif = - iwl_mvm_vif_from_mac80211(data->current_vif); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_scan_iter_data *data = _data; + struct iwl_mvm_vif *curr_mvmvif; - /* exclude the given vif */ - if (vif == data->current_vif) + if (vif->type != NL80211_IFTYPE_P2P_DEVICE && mvmvif->phy_ctxt && + mvmvif->phy_ctxt->id < NUM_PHY_CTX) + data->global_cnt += 1; + + if (!data->current_vif || vif == data->current_vif) return; + curr_mvmvif = iwl_mvm_vif_from_mac80211(data->current_vif); + if (vif->type == NL80211_IFTYPE_AP && vif->p2p && - other_mvmvif->phy_ctxt && curr_mvmvif->phy_ctxt && - other_mvmvif->phy_ctxt->id != curr_mvmvif->phy_ctxt->id) + mvmvif->phy_ctxt && curr_mvmvif->phy_ctxt && + mvmvif->phy_ctxt->id != curr_mvmvif->phy_ctxt->id) data->is_dcm_with_p2p_go = true; } @@ -220,13 +214,18 @@ iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm, enum iwl_mvm_traffic_load load, bool low_latency) { - int global_cnt = 0; + struct iwl_mvm_scan_iter_data data = { + .current_vif = vif, + .is_dcm_with_p2p_go = false, + .global_cnt = 0, + }; ieee80211_iterate_active_interfaces_atomic(mvm->hw, - IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_scan_condition_iterator, - &global_cnt); - if (!global_cnt) + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_scan_iterator, + &data); + + if (!data.global_cnt) return IWL_SCAN_TYPE_UNASSOC; if (fw_has_api(&mvm->fw->ucode_capa, @@ -235,23 +234,14 @@ iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm, (!vif || vif->type != NL80211_IFTYPE_P2P_DEVICE)) return IWL_SCAN_TYPE_FRAGMENTED; - /* in case of DCM with GO where BSS DTIM interval < 220msec + /* + * in case of DCM with GO where BSS DTIM interval < 220msec * set all scan requests as fast-balance scan - * */ + */ if (vif && vif->type == NL80211_IFTYPE_STATION && - vif->bss_conf.dtim_period < 220) { - struct iwl_is_dcm_with_go_iterator_data data = { - .current_vif = vif, - .is_dcm_with_p2p_go = false, - }; - - ieee80211_iterate_active_interfaces_atomic(mvm->hw, - IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_is_dcm_with_go_iterator, - &data); - if (data.is_dcm_with_p2p_go) - return IWL_SCAN_TYPE_FAST_BALANCE; - } + vif->bss_conf.dtim_period < 220 && + data.is_dcm_with_p2p_go) + return IWL_SCAN_TYPE_FAST_BALANCE; } if (load >= IWL_MVM_TRAFFIC_MEDIUM || low_latency) @@ -579,7 +569,9 @@ iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm, profile->ssid_index = i; /* Support any cipher and auth algorithm */ profile->unicast_cipher = 0xff; - profile->auth_alg = 0xff; + profile->auth_alg = IWL_AUTH_ALGO_UNSUPPORTED | + IWL_AUTH_ALGO_NONE | IWL_AUTH_ALGO_PSK | IWL_AUTH_ALGO_8021X | + IWL_AUTH_ALGO_SAE | IWL_AUTH_ALGO_8021X_SHA384 | IWL_AUTH_ALGO_OWE; profile->network_type = IWL_NETWORK_TYPE_ANY; profile->band_selection = IWL_SCAN_OFFLOAD_SELECT_ANY; profile->client_bitmap = SCAN_CLIENT_SCHED_SCAN; @@ -649,9 +641,7 @@ static void iwl_mvm_scan_fill_tx_cmd(struct iwl_mvm *mvm, NL80211_BAND_2GHZ, no_cck); - if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, - ADD_STA, - 0) < 12) { + if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) { tx_cmd[0].sta_id = mvm->aux_sta.sta_id; tx_cmd[1].sta_id = mvm->aux_sta.sta_id; @@ -1088,8 +1078,7 @@ static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config, memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN); /* This function should not be called when using ADD_STA ver >=12 */ - WARN_ON_ONCE(iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, - ADD_STA, 0) >= 12); + WARN_ON_ONCE(iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12); cfg->bcast_sta_id = mvm->aux_sta.sta_id; cfg->channel_flags = channel_flags; @@ -1140,8 +1129,7 @@ static void iwl_mvm_fill_scan_config_v2(struct iwl_mvm *mvm, void *config, memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN); /* This function should not be called when using ADD_STA ver >=12 */ - WARN_ON_ONCE(iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, - ADD_STA, 0) >= 12); + WARN_ON_ONCE(iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12); cfg->bcast_sta_id = mvm->aux_sta.sta_id; cfg->channel_flags = channel_flags; @@ -1154,7 +1142,7 @@ static int iwl_mvm_legacy_config_scan(struct iwl_mvm *mvm) void *cfg; int ret, cmd_size; struct iwl_host_cmd cmd = { - .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0), + .id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_CFG_CMD), }; enum iwl_mvm_scan_type type; enum iwl_mvm_scan_type hb_type = IWL_SCAN_TYPE_NOT_SET; @@ -1245,7 +1233,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) { struct iwl_scan_config cfg; struct iwl_host_cmd cmd = { - .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0), + .id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_CFG_CMD), .len[0] = sizeof(cfg), .data[0] = &cfg, .dataflags[0] = IWL_HCMD_DFL_NOCOPY, @@ -1256,11 +1244,9 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) memset(&cfg, 0, sizeof(cfg)); - if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, - ADD_STA, 0) < 12) { + if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) { cfg.bcast_sta_id = mvm->aux_sta.sta_id; - } else if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, - SCAN_CFG_CMD, 0) < 5) { + } else if (iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_CFG_CMD, 0) < 5) { /* * Fw doesn't use this sta anymore. Deprecated on SCAN_CFG_CMD * version 5. @@ -1394,8 +1380,8 @@ static u32 iwl_mvm_scan_umac_ooc_priority(struct iwl_mvm_scan_params *params) } static void -iwl_mvm_scan_umac_dwell_v10(struct iwl_mvm *mvm, - struct iwl_scan_general_params_v10 *general_params, +iwl_mvm_scan_umac_dwell_v11(struct iwl_mvm *mvm, + struct iwl_scan_general_params_v11 *general_params, struct iwl_mvm_scan_params *params) { struct iwl_mvm_scan_timing_params *timing, *hb_timing; @@ -1660,7 +1646,7 @@ iwl_mvm_umac_scan_cfg_channels_v6(struct iwl_mvm *mvm, } } -static int +static void iwl_mvm_umac_scan_fill_6g_chan_list(struct iwl_mvm *mvm, struct iwl_mvm_scan_params *params, struct iwl_scan_probe_params_v4 *pp) @@ -1729,31 +1715,40 @@ iwl_mvm_umac_scan_fill_6g_chan_list(struct iwl_mvm *mvm, pp->short_ssid_num = idex_s; pp->bssid_num = idex_b; - return 0; } /* TODO: this function can be merged with iwl_mvm_scan_umac_fill_ch_p_v6 */ -static void -iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm_scan_params *params, +static u32 +iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm *mvm, + struct iwl_mvm_scan_params *params, u32 n_channels, struct iwl_scan_probe_params_v4 *pp, struct iwl_scan_channel_params_v6 *cp, enum nl80211_iftype vif_type) { - struct iwl_scan_channel_cfg_umac *channel_cfg = cp->channel_config; int i; struct cfg80211_scan_6ghz_params *scan_6ghz_params = params->scan_6ghz_params; + u32 ch_cnt; - for (i = 0; i < params->n_channels; i++) { + for (i = 0, ch_cnt = 0; i < params->n_channels; i++) { struct iwl_scan_channel_cfg_umac *cfg = - &cp->channel_config[i]; + &cp->channel_config[ch_cnt]; u32 s_ssid_bitmap = 0, bssid_bitmap = 0, flags = 0; u8 j, k, s_max = 0, b_max = 0, n_used_bssid_entries; bool force_passive, found = false, allow_passive = true, unsolicited_probe_on_chan = false, psc_no_listen = false; + /* + * Avoid performing passive scan on non PSC channels unless the + * scan is specifically a passive scan, i.e., no SSIDs + * configured in the scan command. + */ + if (!cfg80211_channel_is_psc(params->channels[i]) && + !params->n_6ghz_params && params->n_ssids) + continue; + cfg->v1.channel_num = params->channels[i]->hw_value; cfg->v2.band = 2; cfg->v2.iter_count = 1; @@ -1826,8 +1821,6 @@ iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm_scan_params *params, } } - flags = bssid_bitmap | (s_ssid_bitmap << 16); - if (cfg80211_channel_is_psc(params->channels[i]) && psc_no_listen) flags |= IWL_UHB_CHAN_CFG_FLAG_PSC_CHAN_NO_LISTEN; @@ -1869,11 +1862,22 @@ iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm_scan_params *params, (s_max > 1 || b_max > 3)); } if ((allow_passive && force_passive) || - (!flags && !cfg80211_channel_is_psc(params->channels[i]))) + (!(bssid_bitmap | s_ssid_bitmap) && + !cfg80211_channel_is_psc(params->channels[i]))) flags |= IWL_UHB_CHAN_CFG_FLAG_FORCE_PASSIVE; + else + flags |= bssid_bitmap | (s_ssid_bitmap << 16); - channel_cfg[i].flags |= cpu_to_le32(flags); + cfg->flags |= cpu_to_le32(flags); + ch_cnt++; } + + if (params->n_channels > ch_cnt) + IWL_DEBUG_SCAN(mvm, + "6GHz: reducing number channels: (%u->%u)\n", + params->n_channels, ch_cnt); + + return ch_cnt; } static u8 iwl_mvm_scan_umac_chan_flags_v2(struct iwl_mvm *mvm, @@ -1890,9 +1894,25 @@ static u8 iwl_mvm_scan_umac_chan_flags_v2(struct iwl_mvm *mvm, IWL_SCAN_CHANNEL_FLAG_CACHE_ADD; /* set fragmented ebs for fragmented scan on HB channels */ - if (iwl_mvm_is_scan_fragmented(params->hb_type)) + if ((!iwl_mvm_is_cdb_supported(mvm) && + iwl_mvm_is_scan_fragmented(params->type)) || + (iwl_mvm_is_cdb_supported(mvm) && + iwl_mvm_is_scan_fragmented(params->hb_type))) flags |= IWL_SCAN_CHANNEL_FLAG_EBS_FRAG; + /* + * force EBS in case the scan is a fragmented and there is a need to take P2P + * GO operation into consideration during scan operation. + */ + if ((!iwl_mvm_is_cdb_supported(mvm) && + iwl_mvm_is_scan_fragmented(params->type) && params->respect_p2p_go) || + (iwl_mvm_is_cdb_supported(mvm) && + iwl_mvm_is_scan_fragmented(params->hb_type) && + params->respect_p2p_go_hb)) { + IWL_DEBUG_SCAN(mvm, "Respect P2P GO. Force EBS\n"); + flags |= IWL_SCAN_CHANNEL_FLAG_FORCE_EBS; + } + return flags; } @@ -1924,22 +1944,19 @@ static void iwl_mvm_scan_6ghz_passive_scan(struct iwl_mvm *mvm, } /* - * 6GHz passive scan is allowed while associated in a defined time - * interval following HW reset or resume flow + * 6GHz passive scan is allowed in a defined time interval following HW + * reset or resume flow, or while not associated and a large interval + * has passed since the last 6GHz passive scan. */ - if (vif->bss_conf.assoc && + if ((vif->bss_conf.assoc || + time_after(mvm->last_6ghz_passive_scan_jiffies + + (IWL_MVM_6GHZ_PASSIVE_SCAN_TIMEOUT * HZ), jiffies)) && (time_before(mvm->last_reset_or_resume_time_jiffies + (IWL_MVM_6GHZ_PASSIVE_SCAN_ASSOC_TIMEOUT * HZ), jiffies))) { - IWL_DEBUG_SCAN(mvm, "6GHz passive scan: associated\n"); - return; - } - - /* No need for 6GHz passive scan if not enough time elapsed */ - if (time_after(mvm->last_6ghz_passive_scan_jiffies + - (IWL_MVM_6GHZ_PASSIVE_SCAN_TIMEOUT * HZ), jiffies)) { - IWL_DEBUG_SCAN(mvm, - "6GHz passive scan: timeout did not expire\n"); + IWL_DEBUG_SCAN(mvm, "6GHz passive scan: %s\n", + vif->bss_conf.assoc ? "associated" : + "timeout did not expire"); return; } @@ -2037,6 +2054,32 @@ static u16 iwl_mvm_scan_umac_flags_v2(struct iwl_mvm *mvm, if (params->enable_6ghz_passive) flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN; + if (iwl_mvm_is_oce_supported(mvm) && + (params->flags & (NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP | + NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE | + NL80211_SCAN_FLAG_FILS_MAX_CHANNEL_TIME))) + flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_OCE; + + return flags; +} + +static u8 iwl_mvm_scan_umac_flags2(struct iwl_mvm *mvm, + struct iwl_mvm_scan_params *params, + struct ieee80211_vif *vif, int type) +{ + u8 flags = 0; + + if (iwl_mvm_is_cdb_supported(mvm)) { + if (params->respect_p2p_go) + flags |= IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_LB; + if (params->respect_p2p_go_hb) + flags |= IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_HB; + } else { + if (params->respect_p2p_go) + flags = IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_LB | + IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_HB; + } + return flags; } @@ -2158,7 +2201,7 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_scan_req_umac *cmd = mvm->scan_cmd; struct iwl_scan_umac_chan_param *chan_param; void *cmd_data = iwl_mvm_get_scan_req_umac_data(mvm); - void *sec_part = cmd_data + sizeof(struct iwl_scan_channel_cfg_umac) * + void *sec_part = (u8 *)cmd_data + sizeof(struct iwl_scan_channel_cfg_umac) * mvm->fw->ucode_capa.n_scan_channels; struct iwl_scan_req_umac_tail_v2 *tail_v2 = (struct iwl_scan_req_umac_tail_v2 *)sec_part; @@ -2238,17 +2281,21 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, } static void -iwl_mvm_scan_umac_fill_general_p_v10(struct iwl_mvm *mvm, +iwl_mvm_scan_umac_fill_general_p_v11(struct iwl_mvm *mvm, struct iwl_mvm_scan_params *params, struct ieee80211_vif *vif, - struct iwl_scan_general_params_v10 *gp, - u16 gen_flags) + struct iwl_scan_general_params_v11 *gp, + u16 gen_flags, u8 gen_flags2) { struct iwl_mvm_vif *scan_vif = iwl_mvm_vif_from_mac80211(vif); - iwl_mvm_scan_umac_dwell_v10(mvm, gp, params); + iwl_mvm_scan_umac_dwell_v11(mvm, gp, params); + + IWL_DEBUG_SCAN(mvm, "Gerenal: flags=0x%x, flags2=0x%x\n", + gen_flags, gen_flags2); gp->flags = cpu_to_le16(gen_flags); + gp->flags2 = gen_flags2; if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1) gp->num_of_fragments[SCAN_LB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS; @@ -2350,9 +2397,9 @@ static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif, cmd->uid = cpu_to_le32(uid); gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type); - iwl_mvm_scan_umac_fill_general_p_v10(mvm, params, vif, + iwl_mvm_scan_umac_fill_general_p_v11(mvm, params, vif, &scan_p->general_params, - gen_flags); + gen_flags, 0); ret = iwl_mvm_fill_scan_sched_params(params, scan_p->periodic_params.schedule, @@ -2367,16 +2414,18 @@ static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return 0; } -static int iwl_mvm_scan_umac_v14(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct iwl_mvm_scan_params *params, int type, - int uid) +static int iwl_mvm_scan_umac_v14_and_above(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_mvm_scan_params *params, + int type, int uid, u32 version) { - struct iwl_scan_req_umac_v14 *cmd = mvm->scan_cmd; - struct iwl_scan_req_params_v14 *scan_p = &cmd->scan_params; + struct iwl_scan_req_umac_v15 *cmd = mvm->scan_cmd; + struct iwl_scan_req_params_v15 *scan_p = &cmd->scan_params; struct iwl_scan_channel_params_v6 *cp = &scan_p->channel_params; struct iwl_scan_probe_params_v4 *pb = &scan_p->probe_params; int ret; u16 gen_flags; + u8 gen_flags2; u32 bitmap_ssid = 0; mvm->scan_uid_status[uid] = type; @@ -2385,9 +2434,15 @@ static int iwl_mvm_scan_umac_v14(struct iwl_mvm *mvm, struct ieee80211_vif *vif, cmd->uid = cpu_to_le32(uid); gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type); - iwl_mvm_scan_umac_fill_general_p_v10(mvm, params, vif, + + if (version >= 15) + gen_flags2 = iwl_mvm_scan_umac_flags2(mvm, params, vif, type); + else + gen_flags2 = 0; + + iwl_mvm_scan_umac_fill_general_p_v11(mvm, params, vif, &scan_p->general_params, - gen_flags); + gen_flags, gen_flags2); ret = iwl_mvm_fill_scan_sched_params(params, scan_p->periodic_params.schedule, @@ -2410,14 +2465,16 @@ static int iwl_mvm_scan_umac_v14(struct iwl_mvm *mvm, struct ieee80211_vif *vif, cp->n_aps_override[0] = IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY; cp->n_aps_override[1] = IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS; - ret = iwl_mvm_umac_scan_fill_6g_chan_list(mvm, params, pb); - if (ret) - return ret; + iwl_mvm_umac_scan_fill_6g_chan_list(mvm, params, pb); + + cp->count = iwl_mvm_umac_scan_cfg_channels_v6_6g(mvm, params, + params->n_channels, + pb, cp, vif->type); + if (!cp->count) { + mvm->scan_uid_status[uid] = 0; + return -EINVAL; + } - iwl_mvm_umac_scan_cfg_channels_v6_6g(params, - params->n_channels, - pb, cp, vif->type); - cp->count = params->n_channels; if (!params->n_ssids || (params->n_ssids == 1 && !params->ssids[0].ssid_len)) cp->flags |= IWL_SCAN_CHANNEL_FLAG_6G_PSC_NO_FILTER; @@ -2425,6 +2482,20 @@ static int iwl_mvm_scan_umac_v14(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return 0; } +static int iwl_mvm_scan_umac_v14(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct iwl_mvm_scan_params *params, int type, + int uid) +{ + return iwl_mvm_scan_umac_v14_and_above(mvm, vif, params, type, uid, 14); +} + +static int iwl_mvm_scan_umac_v15(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct iwl_mvm_scan_params *params, int type, + int uid) +{ + return iwl_mvm_scan_umac_v14_and_above(mvm, vif, params, type, uid, 15); +} + static int iwl_mvm_num_scans(struct iwl_mvm *mvm) { return hweight32(mvm->scan_status & IWL_MVM_SCAN_MASK); @@ -2498,7 +2569,7 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type) return -EIO; } -#define SCAN_TIMEOUT 20000 +#define SCAN_TIMEOUT 30000 void iwl_mvm_scan_timeout_wk(struct work_struct *work) { @@ -2540,6 +2611,7 @@ struct iwl_scan_umac_handler { static const struct iwl_scan_umac_handler iwl_scan_umac_handlers[] = { /* set the newest version first to shorten the list traverse time */ + IWL_SCAN_UMAC_HANDLER(15), IWL_SCAN_UMAC_HANDLER(14), IWL_SCAN_UMAC_HANDLER(12), }; @@ -2566,10 +2638,9 @@ static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm, if (uid < 0) return uid; - hcmd->id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0); + hcmd->id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_REQ_UMAC); - scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP, - SCAN_REQ_UMAC, + scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_REQ_UMAC, IWL_FW_CMD_VER_UNKNOWN); for (i = 0; i < ARRAY_SIZE(iwl_scan_umac_handlers); i++) { @@ -2589,6 +2660,85 @@ static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm, return uid; } +struct iwl_mvm_scan_respect_p2p_go_iter_data { + struct ieee80211_vif *current_vif; + bool p2p_go; + enum nl80211_band band; +}; + +static void iwl_mvm_scan_respect_p2p_go_iter(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_scan_respect_p2p_go_iter_data *data = _data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + /* exclude the given vif */ + if (vif == data->current_vif) + return; + + if (vif->type == NL80211_IFTYPE_AP && vif->p2p && + mvmvif->phy_ctxt->id < NUM_PHY_CTX && + (data->band == NUM_NL80211_BANDS || + mvmvif->phy_ctxt->channel->band == data->band)) + data->p2p_go = true; +} + +static bool _iwl_mvm_get_respect_p2p_go(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + bool low_latency, + enum nl80211_band band) +{ + struct iwl_mvm_scan_respect_p2p_go_iter_data data = { + .current_vif = vif, + .p2p_go = false, + .band = band, + }; + + if (!low_latency) + return false; + + ieee80211_iterate_active_interfaces_atomic(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_scan_respect_p2p_go_iter, + &data); + + return data.p2p_go; +} + +static bool iwl_mvm_get_respect_p2p_go_band(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + enum nl80211_band band) +{ + bool low_latency = iwl_mvm_low_latency_band(mvm, band); + + return _iwl_mvm_get_respect_p2p_go(mvm, vif, low_latency, band); +} + +static bool iwl_mvm_get_respect_p2p_go(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + bool low_latency = iwl_mvm_low_latency(mvm); + + return _iwl_mvm_get_respect_p2p_go(mvm, vif, low_latency, + NUM_NL80211_BANDS); +} + +static void iwl_mvm_fill_respect_p2p_go(struct iwl_mvm *mvm, + struct iwl_mvm_scan_params *params, + struct ieee80211_vif *vif) +{ + if (iwl_mvm_is_cdb_supported(mvm)) { + params->respect_p2p_go = + iwl_mvm_get_respect_p2p_go_band(mvm, vif, + NL80211_BAND_2GHZ); + params->respect_p2p_go_hb = + iwl_mvm_get_respect_p2p_go_band(mvm, vif, + NL80211_BAND_5GHZ); + } else { + params->respect_p2p_go = iwl_mvm_get_respect_p2p_go(mvm, vif); + } +} + int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_scan_request *req, struct ieee80211_scan_ies *ies) @@ -2640,6 +2790,7 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, params.scan_6ghz_params = req->scan_6ghz_params; params.scan_6ghz = req->scan_6ghz; iwl_mvm_fill_scan_type(mvm, ¶ms, vif); + iwl_mvm_fill_respect_p2p_go(mvm, ¶ms, vif); if (req->duration) params.iter_notif = true; @@ -2731,6 +2882,7 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, params.scan_plans = req->scan_plans; iwl_mvm_fill_scan_type(mvm, ¶ms, vif); + iwl_mvm_fill_respect_p2p_go(mvm, ¶ms, vif); /* In theory, LMAC scans can handle a 32-bit delay, but since * waiting for over 18 hours to start the scan is a bit silly @@ -2900,8 +3052,7 @@ static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type) IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid); ret = iwl_mvm_send_cmd_pdu(mvm, - iwl_cmd_id(SCAN_ABORT_UMAC, - IWL_ALWAYS_LONG_GROUP, 0), + WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_ABORT_UMAC), 0, sizeof(cmd), &cmd); if (!ret) mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT; @@ -2940,15 +3091,14 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type) 1 * HZ); } -#define IWL_SCAN_REQ_UMAC_HANDLE_SIZE(_ver) { \ - case (_ver): return sizeof(struct iwl_scan_req_umac_v##_ver); \ -} - static int iwl_scan_req_umac_get_size(u8 scan_ver) { switch (scan_ver) { - IWL_SCAN_REQ_UMAC_HANDLE_SIZE(14); - IWL_SCAN_REQ_UMAC_HANDLE_SIZE(12); + case 12: + return sizeof(struct iwl_scan_req_umac_v12); + case 14: + case 15: + return sizeof(struct iwl_scan_req_umac_v15); } return 0; @@ -2957,8 +3107,7 @@ static int iwl_scan_req_umac_get_size(u8 scan_ver) int iwl_mvm_scan_size(struct iwl_mvm *mvm) { int base_size, tail_size; - u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP, - SCAN_REQ_UMAC, + u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_REQ_UMAC, IWL_FW_CMD_VER_UNKNOWN); base_size = iwl_scan_req_umac_get_size(scan_ver); diff --git a/mvm/sta.c b/mvm/sta.c index a64874c05ced..c7f9d3870f21 100644 --- a/mvm/sta.c +++ b/mvm/sta.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2015, 2018-2021 Intel Corporation + * Copyright (C) 2012-2015, 2018-2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -87,6 +87,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, } switch (sta->bandwidth) { + case IEEE80211_STA_RX_BW_320: case IEEE80211_STA_RX_BW_160: add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ); fallthrough; @@ -316,7 +317,7 @@ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue, } static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - u16 *queueptr, u8 tid, u8 flags) + u16 *queueptr, u8 tid) { int queue = *queueptr; struct iwl_scd_txq_cfg_cmd cmd = { @@ -325,11 +326,28 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta, }; int ret; + lockdep_assert_held(&mvm->mutex); + if (iwl_mvm_has_new_tx_api(mvm)) { + if (mvm->sta_remove_requires_queue_remove) { + u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, + SCD_QUEUE_CONFIG_CMD); + struct iwl_scd_queue_cfg_cmd remove_cmd = { + .operation = cpu_to_le32(IWL_SCD_QUEUE_REMOVE), + .u.remove.queue = cpu_to_le32(queue), + }; + + ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, + sizeof(remove_cmd), + &remove_cmd); + } else { + ret = 0; + } + iwl_trans_txq_free(mvm->trans, queue); *queueptr = IWL_MVM_INVALID_QUEUE; - return 0; + return ret; } if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0)) @@ -373,7 +391,7 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta, mvm->queue_info[queue].reserved = false; iwl_trans_txq_disable(mvm->trans, queue, false); - ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags, + ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(struct iwl_scd_txq_cfg_cmd), &cmd); if (ret) @@ -512,7 +530,7 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue, iwl_mvm_invalidate_sta_queue(mvm, queue, disable_agg_tids, false); - ret = iwl_mvm_disable_txq(mvm, old_sta, &queue_tmp, tid, 0); + ret = iwl_mvm_disable_txq(mvm, old_sta, &queue_tmp, tid); if (ret) { IWL_ERR(mvm, "Failed to free inactive queue %d (ret=%d)\n", @@ -596,6 +614,39 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm, return queue; } +/* Re-configure the SCD for a queue that has already been configured */ +static int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, + int sta_id, int tid, int frame_limit, u16 ssn) +{ + struct iwl_scd_txq_cfg_cmd cmd = { + .scd_queue = queue, + .action = SCD_CFG_ENABLE_QUEUE, + .window = frame_limit, + .sta_id = sta_id, + .ssn = cpu_to_le16(ssn), + .tx_fifo = fifo, + .aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE || + queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE), + .tid = tid, + }; + int ret; + + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) + return -EINVAL; + + if (WARN(mvm->queue_info[queue].tid_bitmap == 0, + "Trying to reconfig unallocated queue %d\n", queue)) + return -ENXIO; + + IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue); + + ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); + WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n", + queue, fifo, ret); + + return ret; +} + /* * If a given queue has a higher AC than the TID stream that is being compared * to, the queue needs to be redirected to the lower AC. This function does that @@ -716,21 +767,40 @@ static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, u8 sta_id, u8 tid, unsigned int timeout) { - int queue, size = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, - mvm->trans->cfg->min_256_ba_txq_size); + int queue, size; if (tid == IWL_MAX_TID_COUNT) { tid = IWL_MGMT_TID; size = max_t(u32, IWL_MGMT_QUEUE_SIZE, mvm->trans->cfg->min_txq_size); + } else { + struct ieee80211_sta *sta; + + rcu_read_lock(); + sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); + + /* this queue isn't used for traffic (cab_queue) */ + if (IS_ERR_OR_NULL(sta)) { + size = IWL_MGMT_QUEUE_SIZE; + } else if (sta->he_cap.has_he) { + /* support for 256 ba size */ + size = IWL_DEFAULT_QUEUE_SIZE_HE; + } else { + size = IWL_DEFAULT_QUEUE_SIZE; + } + + rcu_read_unlock(); } - do { - __le16 enable = cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE); + /* take the min with bc tbl entries allowed */ + size = min_t(u32, size, mvm->trans->txqs.bc_tbl_size / sizeof(u16)); - queue = iwl_trans_txq_alloc(mvm->trans, enable, - sta_id, tid, SCD_QUEUE_CFG, - size, timeout); + /* size needs to be power of 2 values for calculating read/write pointers */ + size = rounddown_pow_of_two(size); + + do { + queue = iwl_trans_txq_alloc(mvm->trans, 0, BIT(sta_id), + tid, size, timeout); if (queue < 0) IWL_DEBUG_TX_QUEUES(mvm, @@ -1019,12 +1089,12 @@ static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm, * Remove the ones that did. */ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { - u16 tid_bitmap; + u16 q_tid_bitmap; mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE; mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); - tid_bitmap = mvm->queue_info[queue].tid_bitmap; + q_tid_bitmap = mvm->queue_info[queue].tid_bitmap; /* * We need to take into account a situation in which a TXQ was @@ -1037,7 +1107,7 @@ static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm, * Mark this queue in the right bitmap, we'll send the command * to the firmware later. */ - if (!(tid_bitmap & BIT(mvm->queue_info[queue].txq_tid))) + if (!(q_tid_bitmap & BIT(mvm->queue_info[queue].txq_tid))) set_bit(queue, changetid_queues); IWL_DEBUG_TX_QUEUES(mvm, @@ -1337,7 +1407,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, out_err: queue_tmp = queue; - iwl_mvm_disable_txq(mvm, sta, &queue_tmp, tid, 0); + iwl_mvm_disable_txq(mvm, sta, &queue_tmp, tid); return ret; } @@ -1516,8 +1586,7 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm, memset(&cmd, 0, sizeof(cmd)); cmd.sta_id = sta->sta_id; - if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, ADD_STA, - 0) >= 12 && + if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12 && sta->type == IWL_STA_AUX_ACTIVITY) cmd.mac_id_n_color = cpu_to_le32(mac_id); else @@ -1784,8 +1853,7 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm, if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE) continue; - iwl_mvm_disable_txq(mvm, sta, &mvm_sta->tid_data[i].txq_id, i, - 0); + iwl_mvm_disable_txq(mvm, sta, &mvm_sta->tid_data[i].txq_id, i); mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE; } @@ -1993,7 +2061,7 @@ static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx, if (ret) { if (!iwl_mvm_has_new_tx_api(mvm)) iwl_mvm_disable_txq(mvm, NULL, queue, - IWL_MAX_TID_COUNT, 0); + IWL_MAX_TID_COUNT); return ret; } @@ -2065,7 +2133,7 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA)) return -EINVAL; - iwl_mvm_disable_txq(mvm, NULL, &mvm->snif_queue, IWL_MAX_TID_COUNT, 0); + iwl_mvm_disable_txq(mvm, NULL, &mvm->snif_queue, IWL_MAX_TID_COUNT); ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id); if (ret) IWL_WARN(mvm, "Failed sending remove station\n"); @@ -2082,7 +2150,7 @@ int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm) if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA)) return -EINVAL; - iwl_mvm_disable_txq(mvm, NULL, &mvm->aux_queue, IWL_MAX_TID_COUNT, 0); + iwl_mvm_disable_txq(mvm, NULL, &mvm->aux_queue, IWL_MAX_TID_COUNT); ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id); if (ret) IWL_WARN(mvm, "Failed sending remove station\n"); @@ -2199,7 +2267,7 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm, } queue = *queueptr; - iwl_mvm_disable_txq(mvm, NULL, queueptr, IWL_MAX_TID_COUNT, 0); + iwl_mvm_disable_txq(mvm, NULL, queueptr, IWL_MAX_TID_COUNT); if (iwl_mvm_has_new_tx_api(mvm)) return; @@ -2434,7 +2502,7 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true); - iwl_mvm_disable_txq(mvm, NULL, &mvmvif->cab_queue, 0, 0); + iwl_mvm_disable_txq(mvm, NULL, &mvmvif->cab_queue, 0); ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id); if (ret) @@ -2443,8 +2511,6 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) return ret; } -#define IWL_MAX_RX_BA_SESSIONS 16 - static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid) { struct iwl_mvm_delba_data notif = { @@ -2526,18 +2592,126 @@ static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm, } } +static int iwl_mvm_fw_baid_op_sta(struct iwl_mvm *mvm, + struct iwl_mvm_sta *mvm_sta, + bool start, int tid, u16 ssn, + u16 buf_size) +{ + struct iwl_mvm_add_sta_cmd cmd = { + .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color), + .sta_id = mvm_sta->sta_id, + .add_modify = STA_MODE_MODIFY, + }; + u32 status; + int ret; + + if (start) { + cmd.add_immediate_ba_tid = tid; + cmd.add_immediate_ba_ssn = cpu_to_le16(ssn); + cmd.rx_ba_window = cpu_to_le16(buf_size); + cmd.modify_mask = STA_MODIFY_ADD_BA_TID; + } else { + cmd.remove_immediate_ba_tid = tid; + cmd.modify_mask = STA_MODIFY_REMOVE_BA_TID; + } + + status = ADD_STA_SUCCESS; + ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, + iwl_mvm_add_sta_cmd_size(mvm), + &cmd, &status); + if (ret) + return ret; + + switch (status & IWL_ADD_STA_STATUS_MASK) { + case ADD_STA_SUCCESS: + IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n", + start ? "start" : "stopp"); + if (WARN_ON(start && iwl_mvm_has_new_rx_api(mvm) && + !(status & IWL_ADD_STA_BAID_VALID_MASK))) + return -EINVAL; + return u32_get_bits(status, IWL_ADD_STA_BAID_MASK); + case ADD_STA_IMMEDIATE_BA_FAILURE: + IWL_WARN(mvm, "RX BA Session refused by fw\n"); + return -ENOSPC; + default: + IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n", + start ? "start" : "stopp", status); + return -EIO; + } +} + +static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm, + struct iwl_mvm_sta *mvm_sta, + bool start, int tid, u16 ssn, + u16 buf_size, int baid) +{ + struct iwl_rx_baid_cfg_cmd cmd = { + .action = start ? cpu_to_le32(IWL_RX_BAID_ACTION_ADD) : + cpu_to_le32(IWL_RX_BAID_ACTION_REMOVE), + }; + u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD); + int ret; + + BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid)); + + if (start) { + cmd.alloc.sta_id_mask = cpu_to_le32(BIT(mvm_sta->sta_id)); + cmd.alloc.tid = tid; + cmd.alloc.ssn = cpu_to_le16(ssn); + cmd.alloc.win_size = cpu_to_le16(buf_size); + baid = -EIO; + } else if (iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1) == 1) { + cmd.remove_v1.baid = cpu_to_le32(baid); + BUILD_BUG_ON(sizeof(cmd.remove_v1) > sizeof(cmd.remove)); + } else { + cmd.remove.sta_id_mask = cpu_to_le32(BIT(mvm_sta->sta_id)); + cmd.remove.tid = cpu_to_le32(tid); + } + + ret = iwl_mvm_send_cmd_pdu_status(mvm, cmd_id, sizeof(cmd), + &cmd, &baid); + if (ret) + return ret; + + if (!start) { + /* ignore firmware baid on remove */ + baid = 0; + } + + IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n", + start ? "start" : "stopp"); + + if (baid < 0 || baid >= ARRAY_SIZE(mvm->baid_map)) + return -EINVAL; + + return baid; +} + +static int iwl_mvm_fw_baid_op(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvm_sta, + bool start, int tid, u16 ssn, u16 buf_size, + int baid) +{ + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_BAID_ML_SUPPORT)) + return iwl_mvm_fw_baid_op_cmd(mvm, mvm_sta, start, + tid, ssn, buf_size, baid); + + return iwl_mvm_fw_baid_op_sta(mvm, mvm_sta, start, + tid, ssn, buf_size); +} + int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int tid, u16 ssn, bool start, u16 buf_size, u16 timeout) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); - struct iwl_mvm_add_sta_cmd cmd = {}; struct iwl_mvm_baid_data *baid_data = NULL; - int ret; - u32 status; + int ret, baid; + u32 max_ba_id_sessions = iwl_mvm_has_new_tx_api(mvm) ? IWL_MAX_BAID : + IWL_MAX_BAID_OLD; lockdep_assert_held(&mvm->mutex); - if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) { + if (start && mvm->rx_ba_sessions >= max_ba_id_sessions) { IWL_WARN(mvm, "Not enough RX BA SESSIONS\n"); return -ENOSPC; } @@ -2583,59 +2757,29 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, reorder_buf_size / sizeof(baid_data->entries[0]); } - cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color); - cmd.sta_id = mvm_sta->sta_id; - cmd.add_modify = STA_MODE_MODIFY; - if (start) { - cmd.add_immediate_ba_tid = (u8) tid; - cmd.add_immediate_ba_ssn = cpu_to_le16(ssn); - cmd.rx_ba_window = cpu_to_le16(buf_size); + if (iwl_mvm_has_new_rx_api(mvm) && !start) { + baid = mvm_sta->tid_to_baid[tid]; } else { - cmd.remove_immediate_ba_tid = (u8) tid; + /* we don't really need it in this case */ + baid = -1; } - cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID : - STA_MODIFY_REMOVE_BA_TID; - status = ADD_STA_SUCCESS; - ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, - iwl_mvm_add_sta_cmd_size(mvm), - &cmd, &status); - if (ret) - goto out_free; - - switch (status & IWL_ADD_STA_STATUS_MASK) { - case ADD_STA_SUCCESS: - IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n", - start ? "start" : "stopp"); - break; - case ADD_STA_IMMEDIATE_BA_FAILURE: - IWL_WARN(mvm, "RX BA Session refused by fw\n"); - ret = -ENOSPC; - break; - default: - ret = -EIO; - IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n", - start ? "start" : "stopp", status); - break; - } + /* Don't send command to remove (start=0) BAID during restart */ + if (start || !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + baid = iwl_mvm_fw_baid_op(mvm, mvm_sta, start, tid, ssn, buf_size, + baid); - if (ret) + if (baid < 0) { + ret = baid; goto out_free; + } if (start) { - u8 baid; - mvm->rx_ba_sessions++; if (!iwl_mvm_has_new_rx_api(mvm)) return 0; - if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) { - ret = -EINVAL; - goto out_free; - } - baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >> - IWL_ADD_STA_BAID_SHIFT); baid_data->baid = baid; baid_data->timeout = timeout; baid_data->last_rx = jiffies; @@ -2663,7 +2807,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, WARN_ON(rcu_access_pointer(mvm->baid_map[baid])); rcu_assign_pointer(mvm->baid_map[baid], baid_data); } else { - u8 baid = mvm_sta->tid_to_baid[tid]; + baid = mvm_sta->tid_to_baid[tid]; if (mvm->rx_ba_sessions > 0) /* check that restart flow didn't zero the counter */ @@ -2684,6 +2828,16 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, RCU_INIT_POINTER(mvm->baid_map[baid], NULL); kfree_rcu(baid_data, rcu_head); IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid); + + /* + * After we've deleted it, do another queue sync + * so if an IWL_MVM_RXQ_NSSN_SYNC was concurrently + * running it won't find a new session in the old + * BAID. It can find the NULL pointer for the BAID, + * but we must not have it find a different session. + */ + iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_EMPTY, + true, NULL, 0); } return 0; @@ -3228,8 +3382,7 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, int i, size; bool new_api = fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TKIP_MIC_KEYS); - int api_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, - ADD_STA_KEY, + int api_ver = iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA_KEY, new_api ? 2 : 1); if (sta_id == IWL_MVM_INVALID_STA) @@ -3929,7 +4082,7 @@ void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif) mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id); - if (!WARN_ON(!mvmsta)) + if (mvmsta) iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true); rcu_read_unlock(); @@ -3988,3 +4141,21 @@ out: iwl_mvm_dealloc_int_sta(mvm, sta); return ret; } + +void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 mac_id) +{ + struct iwl_cancel_channel_switch_cmd cancel_channel_switch_cmd = { + .mac_id = cpu_to_le32(mac_id), + }; + int ret; + + ret = iwl_mvm_send_cmd_pdu(mvm, + WIDE_ID(MAC_CONF_GROUP, CANCEL_CHANNEL_SWITCH_CMD), + CMD_ASYNC, + sizeof(cancel_channel_switch_cmd), + &cancel_channel_switch_cmd); + if (ret) + IWL_ERR(mvm, "Failed to cancel the channel switch\n"); +} diff --git a/mvm/sta.h b/mvm/sta.h index 32b4d1935788..f1a4fc3e4038 100644 --- a/mvm/sta.h +++ b/mvm/sta.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2012-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2016 Intel Deutschland GmbH */ @@ -373,6 +373,7 @@ struct iwl_mvm_rxq_dup_data { * @tx_ant: the index of the antenna to use for data tx to this station. Only * used during connection establishment (e.g. for the 4 way handshake * exchange). + * @pairwise_cipher: used to feed iwlmei upon authorization * * When mac80211 creates a station it reserves some space (hw->sta_data_size) * in the structure for use by driver. This structure is placed in that @@ -415,6 +416,7 @@ struct iwl_mvm_sta { u8 sleep_tx_count; u8 avg_energy; u8 tx_ant; + u32 pairwise_cipher; }; u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data); @@ -546,4 +548,7 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk); int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher, u8 *key, u32 key_len); +void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 mac_id); #endif /* __sta_h__ */ diff --git a/mvm/time-event.c b/mvm/time-event.c index e91f8e889df7..6edf2b79db43 100644 --- a/mvm/time-event.c +++ b/mvm/time-event.c @@ -49,14 +49,13 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk) struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk); /* - * Clear the ROC_RUNNING /ROC_AUX_RUNNING status bit. + * Clear the ROC_RUNNING status bit. * This will cause the TX path to drop offchannel transmissions. * That would also be done by mac80211, but it is racy, in particular * in the case that the time event actually completed in the firmware * (which is handled in iwl_mvm_te_handle_notif). */ clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status); - clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status); synchronize_net(); @@ -82,14 +81,23 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk) mvmvif = iwl_mvm_vif_from_mac80211(mvm->p2p_device_vif); iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true); } - } else { + } + + /* + * Clear the ROC_AUX_RUNNING status bit. + * This will cause the TX path to drop offchannel transmissions. + * That would also be done by mac80211, but it is racy, in particular + * in the case that the time event actually completed in the firmware + * (which is handled in iwl_mvm_te_handle_notif). + */ + if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) { /* do the same in case of hot spot 2.0 */ iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true); + /* In newer version of this command an aux station is added only * in cases of dedicated tx queue and need to be removed in end * of use */ - if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, - ADD_STA, 0) >= 12) + if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12) iwl_mvm_rm_aux_sta(mvm); } @@ -649,8 +657,8 @@ static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm, }; int ret; - ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD, - MAC_CONF_GROUP, 0), + ret = iwl_mvm_send_cmd_pdu(mvm, + WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD), 0, sizeof(cmd), &cmd); if (ret) IWL_ERR(mvm, @@ -687,11 +695,14 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm, iwl_mvm_te_clear_data(mvm, te_data); spin_unlock_bh(&mvm->time_event_lock); - /* When session protection is supported, the te_data->id field + /* When session protection is used, the te_data->id field * is reused to save session protection's configuration. + * For AUX ROC, HOT_SPOT_CMD is used and the te_data->id field is set + * to HOT_SPOT_CMD. */ if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) { + IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD) && + id != HOT_SPOT_CMD) { if (mvmvif && id < SESSION_PROTECT_CONF_MAX_ID) { /* Session protection is still ongoing. Cancel it */ iwl_mvm_cancel_session_protection(mvm, mvmvif, id); @@ -911,8 +922,8 @@ iwl_mvm_start_p2p_roc_session_protection(struct iwl_mvm *mvm, } cmd.conf_id = cpu_to_le32(mvmvif->time_event_data.id); - return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD, - MAC_CONF_GROUP, 0), + return iwl_mvm_send_cmd_pdu(mvm, + WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD), 0, sizeof(cmd), &cmd); } @@ -1027,7 +1038,7 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif) iwl_mvm_p2p_roc_finished(mvm); } else { iwl_mvm_remove_aux_roc_te(mvm, mvmvif, - &mvmvif->time_event_data); + &mvmvif->hs_time_event_data); iwl_mvm_roc_finished(mvm); } @@ -1150,23 +1161,17 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm, { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; - const u16 notif[] = { iwl_cmd_id(SESSION_PROTECTION_NOTIF, - MAC_CONF_GROUP, 0) }; + const u16 notif[] = { WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF) }; struct iwl_notification_wait wait_notif; struct iwl_mvm_session_prot_cmd cmd = { .id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)), .action = cpu_to_le32(FW_CTXT_ACTION_ADD), + .conf_id = cpu_to_le32(SESSION_PROTECT_CONF_ASSOC), .duration_tu = cpu_to_le32(MSEC_TO_TU(duration)), }; - /* The time_event_data.id field is reused to save session - * protection's configuration. - */ - mvmvif->time_event_data.id = SESSION_PROTECT_CONF_ASSOC; - cmd.conf_id = cpu_to_le32(mvmvif->time_event_data.id); - lockdep_assert_held(&mvm->mutex); spin_lock_bh(&mvm->time_event_lock); @@ -1180,6 +1185,11 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm, } iwl_mvm_te_clear_data(mvm, te_data); + /* + * The time_event_data.id field is reused to save session + * protection's configuration. + */ + te_data->id = le32_to_cpu(cmd.conf_id); te_data->duration = le32_to_cpu(cmd.duration_tu); te_data->vif = vif; spin_unlock_bh(&mvm->time_event_lock); @@ -1189,8 +1199,7 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm, if (!wait_for_notif) { if (iwl_mvm_send_cmd_pdu(mvm, - iwl_cmd_id(SESSION_PROTECTION_CMD, - MAC_CONF_GROUP, 0), + WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD), 0, sizeof(cmd), &cmd)) { IWL_ERR(mvm, "Couldn't send the SESSION_PROTECTION_CMD\n"); @@ -1207,8 +1216,7 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm, iwl_mvm_session_prot_notif, NULL); if (iwl_mvm_send_cmd_pdu(mvm, - iwl_cmd_id(SESSION_PROTECTION_CMD, - MAC_CONF_GROUP, 0), + WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_CMD), 0, sizeof(cmd), &cmd)) { IWL_ERR(mvm, "Couldn't send the SESSION_PROTECTION_CMD\n"); @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2019-2020 Intel Corporation + * Copyright (C) 2012-2014, 2019-2021 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2016 Intel Deutschland GmbH */ @@ -160,6 +160,11 @@ void iwl_mvm_ct_kill_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) notif = (struct ct_kill_notif *)pkt->data; IWL_DEBUG_TEMP(mvm, "CT Kill notification temperature = %d\n", notif->temperature); + if (iwl_fw_lookup_notif_ver(mvm->fw, PHY_OPS_GROUP, + CT_KILL_NOTIFICATION, 0) > 1) + IWL_DEBUG_TEMP(mvm, + "CT kill notification DTS bitmap = 0x%x, Scheme = %d\n", + notif->dts, notif->scheme); iwl_mvm_enter_ctkill(mvm); } @@ -240,8 +245,8 @@ int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp) * a response. For older versions we send the command and wait for a * notification (no command TLV for previous versions). */ - cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP, - CMD_DTS_MEASUREMENT_TRIGGER_WIDE, + cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, + WIDE_ID(PHY_OPS_GROUP, CMD_DTS_MEASUREMENT_TRIGGER_WIDE), IWL_FW_CMD_VER_UNKNOWN); if (cmd_ver == 1) return iwl_mvm_send_temp_cmd(mvm, true, temp); @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2012-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -39,11 +39,11 @@ iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr, #define OPT_HDR(type, skb, off) \ (type *)(skb_network_header(skb) + (off)) -static u16 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb, - struct ieee80211_hdr *hdr, - struct ieee80211_tx_info *info, - u16 offload_assist) +static u16 iwl_mvm_tx_csum_pre_bz(struct iwl_mvm *mvm, struct sk_buff *skb, + struct ieee80211_tx_info *info, bool amsdu) { + struct ieee80211_hdr *hdr = (void *)skb->data; + u16 offload_assist = 0; #if IS_ENABLED(CONFIG_INET) u16 mh_len = ieee80211_hdrlen(hdr->frame_control); u8 protocol = 0; @@ -106,8 +106,7 @@ static u16 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb, offload_assist |= (4 << TX_CMD_OFFLD_IP_HDR); /* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */ - if (skb->protocol == htons(ETH_P_IP) && - (offload_assist & BIT(TX_CMD_OFFLD_AMSDU))) { + if (skb->protocol == htons(ETH_P_IP) && amsdu) { ip_hdr(skb)->check = 0; offload_assist |= BIT(TX_CMD_OFFLD_L3_EN); } @@ -132,9 +131,63 @@ static u16 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb, out: #endif + if (amsdu) + offload_assist |= BIT(TX_CMD_OFFLD_AMSDU); + else if (ieee80211_hdrlen(hdr->frame_control) % 4) + /* padding is inserted later in transport */ + offload_assist |= BIT(TX_CMD_OFFLD_PAD); + return offload_assist; } +u32 iwl_mvm_tx_csum_bz(struct iwl_mvm *mvm, struct sk_buff *skb, bool amsdu) +{ + struct ieee80211_hdr *hdr = (void *)skb->data; + u32 offload_assist = IWL_TX_CMD_OFFLD_BZ_PARTIAL_CSUM; + unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control); + unsigned int csum_start = skb_checksum_start_offset(skb); + + offload_assist |= u32_encode_bits(hdrlen / 2, + IWL_TX_CMD_OFFLD_BZ_MH_LEN); + if (amsdu) + offload_assist |= IWL_TX_CMD_OFFLD_BZ_AMSDU; + else if (hdrlen % 4) + /* padding is inserted later in transport */ + offload_assist |= IWL_TX_CMD_OFFLD_BZ_MH_PAD; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return offload_assist; + + offload_assist |= IWL_TX_CMD_OFFLD_BZ_ENABLE_CSUM | + IWL_TX_CMD_OFFLD_BZ_ZERO2ONES; + + /* + * mac80211 will always calculate checksum in software for + * non-fast-xmit, and so we can only do offloaded checksum + * for fast-xmit frames. In this case, we always have the + * RFC 1042 header present. skb_checksum_start_offset() + * returns the offset from the beginning, but the hardware + * needs it from after the header & SNAP header. + */ + csum_start -= hdrlen + 8; + + offload_assist |= u32_encode_bits(csum_start, + IWL_TX_CMD_OFFLD_BZ_START_OFFS); + offload_assist |= u32_encode_bits(csum_start + skb->csum_offset, + IWL_TX_CMD_OFFLD_BZ_RESULT_OFFS); + + return offload_assist; +} + +static u32 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb, + struct ieee80211_tx_info *info, + bool amsdu) +{ + if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ) + return iwl_mvm_tx_csum_pre_bz(mvm, skb, info, amsdu); + return iwl_mvm_tx_csum_bz(mvm, skb, amsdu); +} + /* * Sets most of the Tx cmd's fields */ @@ -146,7 +199,7 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, __le16 fc = hdr->frame_control; u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags); u32 len = skb->len + FCS_LEN; - u16 offload_assist = 0; + bool amsdu = false; u8 ac; if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) || @@ -166,8 +219,7 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, u8 *qc = ieee80211_get_qos_ctl(hdr); tx_cmd->tid_tspec = qc[0] & 0xf; tx_flags &= ~TX_CMD_FLG_SEQ_CTL; - if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT) - offload_assist |= BIT(TX_CMD_OFFLD_AMSDU); + amsdu = *qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT; } else if (ieee80211_is_back_req(fc)) { struct ieee80211_bar *bar = (void *)skb->data; u16 control = le16_to_cpu(bar->control); @@ -234,14 +286,8 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); tx_cmd->sta_id = sta_id; - /* padding is inserted later in transport */ - if (ieee80211_hdrlen(fc) % 4 && - !(offload_assist & BIT(TX_CMD_OFFLD_AMSDU))) - offload_assist |= BIT(TX_CMD_OFFLD_PAD); - - tx_cmd->offload_assist |= - cpu_to_le16(iwl_mvm_tx_csum(mvm, skb, hdr, info, - offload_assist)); + tx_cmd->offload_assist = + cpu_to_le16(iwl_mvm_tx_csum_pre_bz(mvm, skb, info, amsdu)); } static u32 iwl_mvm_get_tx_ant(struct iwl_mvm *mvm, @@ -269,7 +315,6 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm, u8 rate_plcp; u32 rate_flags = 0; bool is_cck; - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); /* info->control is only relevant for non HW rate control */ if (!ieee80211_hw_check(mvm->hw, HAS_RATE_CONTROL)) { @@ -279,7 +324,8 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm, "Got a HT rate (flags:0x%x/mcs:%d/fc:0x%x/state:%d) for a non data frame\n", info->control.rates[0].flags, info->control.rates[0].idx, - le16_to_cpu(fc), mvmsta->sta_state); + le16_to_cpu(fc), + sta ? iwl_mvm_sta_from_mac80211(sta)->sta_state : -1); rate_idx = info->control.rates[0].idx; } @@ -304,7 +350,7 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm, is_cck = (rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE); /* Set CCK or OFDM flag */ - if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, TX_CMD, 0) > 8) { + if (iwl_fw_lookup_cmd_ver(mvm->fw, TX_CMD, 0) > 8) { if (!is_cck) rate_flags |= RATE_MCS_LEGACY_OFDM_MSK; else @@ -462,27 +508,18 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, dev_cmd->hdr.cmd = TX_CMD; if (iwl_mvm_has_new_tx_api(mvm)) { - u16 offload_assist = 0; u32 rate_n_flags = 0; u16 flags = 0; struct iwl_mvm_sta *mvmsta = sta ? iwl_mvm_sta_from_mac80211(sta) : NULL; + bool amsdu = false; if (ieee80211_is_data_qos(hdr->frame_control)) { u8 *qc = ieee80211_get_qos_ctl(hdr); - if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT) - offload_assist |= BIT(TX_CMD_OFFLD_AMSDU); + amsdu = *qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT; } - offload_assist = iwl_mvm_tx_csum(mvm, skb, hdr, info, - offload_assist); - - /* padding is inserted later in transport */ - if (ieee80211_hdrlen(hdr->frame_control) % 4 && - !(offload_assist & BIT(TX_CMD_OFFLD_AMSDU))) - offload_assist |= BIT(TX_CMD_OFFLD_PAD); - if (!info->control.hw_key) flags |= IWL_TX_FLAGS_ENCRYPT_DIS; @@ -502,8 +539,10 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { struct iwl_tx_cmd_gen3 *cmd = (void *)dev_cmd->payload; + u32 offload_assist = iwl_mvm_tx_csum(mvm, skb, + info, amsdu); - cmd->offload_assist |= cpu_to_le32(offload_assist); + cmd->offload_assist = cpu_to_le32(offload_assist); /* Total # bytes to be transmitted */ cmd->len = cpu_to_le16((u16)skb->len); @@ -515,8 +554,11 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, cmd->rate_n_flags = cpu_to_le32(rate_n_flags); } else { struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload; + u16 offload_assist = iwl_mvm_tx_csum_pre_bz(mvm, skb, + info, + amsdu); - cmd->offload_assist |= cpu_to_le16(offload_assist); + cmd->offload_assist = cpu_to_le16(offload_assist); /* Total # bytes to be transmitted */ cmd->len = cpu_to_le16((u16)skb->len); @@ -611,7 +653,8 @@ static void iwl_mvm_probe_resp_set_noa(struct iwl_mvm *mvm, struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; int base_len = (u8 *)mgmt->u.probe_resp.variable - (u8 *)mgmt; struct iwl_probe_resp_data *resp_data; - u8 *ie, *pos; + const u8 *ie; + u8 *pos; u8 match[] = { (WLAN_OUI_WFA >> 16) & 0xff, (WLAN_OUI_WFA >> 8) & 0xff, @@ -628,10 +671,10 @@ static void iwl_mvm_probe_resp_set_noa(struct iwl_mvm *mvm, if (!resp_data->notif.noa_active) goto out; - ie = (u8 *)cfg80211_find_ie_match(WLAN_EID_VENDOR_SPECIFIC, - mgmt->u.probe_resp.variable, - skb->len - base_len, - match, 4, 2); + ie = cfg80211_find_ie_match(WLAN_EID_VENDOR_SPECIFIC, + mgmt->u.probe_resp.variable, + skb->len - base_len, + match, 4, 2); if (!ie) { IWL_DEBUG_TX(mvm, "probe resp doesn't have P2P IE\n"); goto out; @@ -1128,6 +1171,11 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, /* From now on, we cannot access info->control */ iwl_mvm_skb_prepare_status(skb, dev_cmd); + if (ieee80211_is_data(fc)) + iwl_mvm_mei_tx_copy_to_csme(mvm, skb, + info->control.hw_key ? + info->control.hw_key->iv_len : 0); + if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id)) goto drop_unlock_sta; @@ -1554,8 +1602,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, seq_ctl = le16_to_cpu(hdr->seq_ctrl); if (unlikely(!seq_ctl)) { - struct ieee80211_hdr *hdr = (void *)skb->data; - /* * If it is an NDP, we can't update next_reclaim since * its sequence control is 0. Note that for that same diff --git a/mvm/utils.c b/mvm/utils.c index caf1dcf48888..bc947733d982 100644 --- a/mvm/utils.c +++ b/mvm/utils.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2022 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ @@ -169,8 +169,7 @@ int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags, u8 iwl_mvm_mac80211_idx_to_hwrate(const struct iwl_fw *fw, int rate_idx) { - if (iwl_fw_lookup_cmd_ver(fw, LONG_GROUP, - TX_CMD, 0) > 8) + if (iwl_fw_lookup_cmd_ver(fw, TX_CMD, 0) > 8) /* In the new rate legacy rates are indexed: * 0 - 3 for CCK and 0 - 7 for OFDM. */ @@ -241,38 +240,6 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx) return last_idx; } -int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id, - int tid, int frame_limit, u16 ssn) -{ - struct iwl_scd_txq_cfg_cmd cmd = { - .scd_queue = queue, - .action = SCD_CFG_ENABLE_QUEUE, - .window = frame_limit, - .sta_id = sta_id, - .ssn = cpu_to_le16(ssn), - .tx_fifo = fifo, - .aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE || - queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE), - .tid = tid, - }; - int ret; - - if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) - return -EINVAL; - - if (WARN(mvm->queue_info[queue].tid_bitmap == 0, - "Trying to reconfig unallocated queue %d\n", queue)) - return -ENXIO; - - IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue); - - ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); - WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n", - queue, fifo, ret); - - return ret; -} - /** * iwl_mvm_send_lq_cmd() - Send link quality command * @mvm: Driver data. @@ -340,25 +307,64 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ieee80211_request_smps(vif, smps_mode); } +static bool iwl_wait_stats_complete(struct iwl_notif_wait_data *notif_wait, + struct iwl_rx_packet *pkt, void *data) +{ + WARN_ON(pkt->hdr.cmd != STATISTICS_NOTIFICATION); + + return true; +} + int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear) { struct iwl_statistics_cmd scmd = { .flags = clear ? cpu_to_le32(IWL_STATISTICS_FLG_CLEAR) : 0, }; + struct iwl_host_cmd cmd = { .id = STATISTICS_CMD, .len[0] = sizeof(scmd), .data[0] = &scmd, - .flags = CMD_WANT_SKB, }; int ret; - ret = iwl_mvm_send_cmd(mvm, &cmd); - if (ret) - return ret; + /* From version 15 - STATISTICS_NOTIFICATION, the reply for + * STATISTICS_CMD is empty, and the response is with + * STATISTICS_NOTIFICATION notification + */ + if (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP, + STATISTICS_NOTIFICATION, 0) < 15) { + cmd.flags = CMD_WANT_SKB; - iwl_mvm_handle_rx_statistics(mvm, cmd.resp_pkt); - iwl_free_resp(&cmd); + ret = iwl_mvm_send_cmd(mvm, &cmd); + if (ret) + return ret; + + iwl_mvm_handle_rx_statistics(mvm, cmd.resp_pkt); + iwl_free_resp(&cmd); + } else { + struct iwl_notification_wait stats_wait; + static const u16 stats_complete[] = { + STATISTICS_NOTIFICATION, + }; + + iwl_init_notification_wait(&mvm->notif_wait, &stats_wait, + stats_complete, ARRAY_SIZE(stats_complete), + iwl_wait_stats_complete, NULL); + + ret = iwl_mvm_send_cmd(mvm, &cmd); + if (ret) { + iwl_remove_notification(&mvm->notif_wait, &stats_wait); + return ret; + } + + /* 200ms should be enough for FW to collect data from all + * LMACs and send STATISTICS_NOTIFICATION to host + */ + ret = iwl_wait_notification(&mvm->notif_wait, &stats_wait, HZ / 5); + if (ret) + return ret; + } if (clear) iwl_mvm_accu_radio_stats(mvm); @@ -441,8 +447,7 @@ void iwl_mvm_send_low_latency_cmd(struct iwl_mvm *mvm, cmd.low_latency_tx = 1; } - if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(LOW_LATENCY_CMD, - MAC_CONF_GROUP, 0), + if (iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, LOW_LATENCY_CMD), 0, sizeof(cmd), &cmd)) IWL_ERR(mvm, "Failed to send low latency command\n"); } diff --git a/pcie/drv.c b/pcie/drv.c index c574f041f096..58a7111d4f40 100644 --- a/pcie/drv.c +++ b/pcie/drv.c @@ -495,14 +495,16 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x7AF0, PCI_ANY_ID, iwl_so_trans_cfg)}, {IWL_PCI_DEVICE(0x51F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)}, {IWL_PCI_DEVICE(0x54F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)}, + {IWL_PCI_DEVICE(0x7F70, PCI_ANY_ID, iwl_so_trans_cfg)}, /* Ma devices */ {IWL_PCI_DEVICE(0x2729, PCI_ANY_ID, iwl_ma_trans_cfg)}, {IWL_PCI_DEVICE(0x7E40, PCI_ANY_ID, iwl_ma_trans_cfg)}, - {IWL_PCI_DEVICE(0x7F70, PCI_ANY_ID, iwl_ma_trans_cfg)}, /* Bz devices */ {IWL_PCI_DEVICE(0x2727, PCI_ANY_ID, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, PCI_ANY_ID, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0x7740, PCI_ANY_ID, iwl_bz_trans_cfg)}, #endif /* CONFIG_IWLMVM */ {0} @@ -510,16 +512,16 @@ static const struct pci_device_id iwl_hw_card_ids[] = { MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); #define _IWL_DEV_INFO(_device, _subdevice, _mac_type, _mac_step, _rf_type, \ - _rf_id, _no_160, _cores, _cdb, _cfg, _name) \ + _rf_id, _no_160, _cores, _cdb, _jacket, _cfg, _name) \ { .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg), \ .name = _name, .mac_type = _mac_type, .rf_type = _rf_type, \ .no_160 = _no_160, .cores = _cores, .rf_id = _rf_id, \ - .mac_step = _mac_step, .cdb = _cdb } + .mac_step = _mac_step, .cdb = _cdb, .jacket = _jacket } #define IWL_DEV_INFO(_device, _subdevice, _cfg, _name) \ _IWL_DEV_INFO(_device, _subdevice, IWL_CFG_ANY, IWL_CFG_ANY, \ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, \ - IWL_CFG_ANY, _cfg, _name) + IWL_CFG_ANY, IWL_CFG_ANY, _cfg, _name) static const struct iwl_dev_info iwl_dev_info_table[] = { #if IS_ENABLED(CONFIG_IWLMVM) @@ -562,6 +564,7 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { IWL_DEV_INFO(0x43F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, iwl_ax201_killer_1650i_name), IWL_DEV_INFO(0x43F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0x43F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL), + IWL_DEV_INFO(0x43F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, iwl_ax201_killer_1650s_name), IWL_DEV_INFO(0xA0F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0xA0F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL), IWL_DEV_INFO(0xA0F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL), @@ -665,97 +668,113 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { IWL_DEV_INFO(0x2726, 0x0510, iwlax211_cfg_snj_gf_a0, NULL), IWL_DEV_INFO(0x2726, 0x1651, iwl_cfg_snj_hr_b0, iwl_ax201_killer_1650s_name), IWL_DEV_INFO(0x2726, 0x1652, iwl_cfg_snj_hr_b0, iwl_ax201_killer_1650i_name), - IWL_DEV_INFO(0x2726, 0x1671, iwlax211_cfg_snj_gf_a0, iwl_ax211_killer_1675s_name), - IWL_DEV_INFO(0x2726, 0x1672, iwlax211_cfg_snj_gf_a0, iwl_ax211_killer_1675i_name), IWL_DEV_INFO(0x2726, 0x1691, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690s_name), IWL_DEV_INFO(0x2726, 0x1692, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690i_name), - IWL_DEV_INFO(0x7F70, 0x1691, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690s_name), - IWL_DEV_INFO(0x7F70, 0x1692, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690i_name), + IWL_DEV_INFO(0x7F70, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name), + IWL_DEV_INFO(0x7F70, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), + + /* SO with GF2 */ + IWL_DEV_INFO(0x2726, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), + IWL_DEV_INFO(0x2726, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), + IWL_DEV_INFO(0x51F0, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), + IWL_DEV_INFO(0x51F0, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), + IWL_DEV_INFO(0x54F0, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), + IWL_DEV_INFO(0x54F0, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), + IWL_DEV_INFO(0x7A70, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), + IWL_DEV_INFO(0x7A70, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), + IWL_DEV_INFO(0x7AF0, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), + IWL_DEV_INFO(0x7AF0, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), + IWL_DEV_INFO(0x7F70, 0x1671, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675s_name), + IWL_DEV_INFO(0x7F70, 0x1672, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_killer_1675i_name), + + /* MA with GF2 */ + IWL_DEV_INFO(0x7E40, 0x1671, iwl_cfg_ma_a0_gf_a0, iwl_ax211_killer_1675s_name), + IWL_DEV_INFO(0x7E40, 0x1672, iwl_cfg_ma_a0_gf_a0, iwl_ax211_killer_1675i_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_2ac_cfg_soc, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_2ac_cfg_soc, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_2ac_cfg_soc, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_2ac_cfg_soc, iwl9462_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_2ac_cfg_soc, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_2ac_cfg_soc, iwl9560_name), _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9260_2ac_cfg, iwl9461_160_name), _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9260_2ac_cfg, iwl9461_name), _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9260_2ac_cfg, iwl9462_160_name), _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9260_2ac_cfg, iwl9462_name), _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9260_2ac_cfg, iwl9270_160_name), _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9260_2ac_cfg, iwl9270_name), _IWL_DEV_INFO(0x271B, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9260_2ac_cfg, iwl9162_160_name), _IWL_DEV_INFO(0x271B, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9260_2ac_cfg, iwl9162_name), _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9260_2ac_cfg, iwl9260_160_name), _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9260_2ac_cfg, iwl9260_name), /* Qu with Jf */ @@ -763,176 +782,176 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_b0_jf_b0_cfg, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_b0_jf_b0_cfg, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_b0_jf_b0_cfg, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_b0_jf_b0_cfg, iwl9462_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_b0_jf_b0_cfg, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_b0_jf_b0_cfg, iwl9560_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_b0_jf_b0_cfg, iwl9560_killer_1550s_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_b0_jf_b0_cfg, iwl9560_killer_1550i_name), /* Qu C step */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_c0_jf_b0_cfg, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_c0_jf_b0_cfg, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_c0_jf_b0_cfg, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_c0_jf_b0_cfg, iwl9462_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_c0_jf_b0_cfg, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_c0_jf_b0_cfg, iwl9560_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_c0_jf_b0_cfg, iwl9560_killer_1550s_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qu_c0_jf_b0_cfg, iwl9560_killer_1550i_name), /* QuZ */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_quz_a0_jf_b0_cfg, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_quz_a0_jf_b0_cfg, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_quz_a0_jf_b0_cfg, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_quz_a0_jf_b0_cfg, iwl9462_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_quz_a0_jf_b0_cfg, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_quz_a0_jf_b0_cfg, iwl9560_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_quz_a0_jf_b0_cfg, iwl9560_killer_1550s_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_quz_a0_jf_b0_cfg, iwl9560_killer_1550i_name), /* QnJ */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qnj_b0_jf_b0_cfg, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qnj_b0_jf_b0_cfg, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qnj_b0_jf_b0_cfg, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qnj_b0_jf_b0_cfg, iwl9462_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qnj_b0_jf_b0_cfg, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qnj_b0_jf_b0_cfg, iwl9560_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551, IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qnj_b0_jf_b0_cfg, iwl9560_killer_1550s_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552, IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl9560_qnj_b0_jf_b0_cfg, iwl9560_killer_1550i_name), /* Qu with Hr */ @@ -940,304 +959,376 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_qu_b0_hr1_b0, iwl_ax101_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_qu_b0_hr_b0, iwl_ax203_name), /* Qu C step */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_qu_c0_hr1_b0, iwl_ax101_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_qu_c0_hr_b0, iwl_ax203_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, + IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl_qu_c0_hr_b0, iwl_ax201_name), /* QuZ */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_quz_a0_hr1_b0, iwl_ax101_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, SILICON_B_STEP, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_quz_a0_hr_b0, iwl_ax203_name), /* QnJ with Hr */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_qnj_b0_hr_b0_cfg, iwl_ax201_name), /* SnJ with Jf */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_snj_a0_jf_b0, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_snj_a0_jf_b0, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_snj_a0_jf_b0, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_snj_a0_jf_b0, iwl9462_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_snj_a0_jf_b0, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_snj_a0_jf_b0, iwl9560_name), /* SnJ with Hr */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_snj_hr_b0, iwl_ax101_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_snj_hr_b0, iwl_ax201_name), /* Ma */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_ma_a0_hr_b0, iwl_ax201_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_ma_a0_gf_a0, iwl_ax211_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_ANY, iwl_cfg_ma_a0_gf4_a0, iwl_ax211_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_ma_a0_mr_a0, iwl_ax221_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_ma_a0_fm_a0, iwl_ax231_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_snj_a0_mr_a0, iwl_ax221_name), /* So with Hr */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_so_a0_hr_a0, iwl_ax203_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_so_a0_hr_a0, iwl_ax101_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_so_a0_hr_a0, iwl_ax201_name), /* So-F with Hr */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_so_a0_hr_a0, iwl_ax203_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_so_a0_hr_a0, iwl_ax101_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_so_a0_hr_a0, iwl_ax201_name), /* So-F with Gf */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_ANY, + iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_name), /* Bz */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_bz_a0_hr_b0, iwl_bz_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_bz_a0_gf_a0, iwl_bz_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_ANY, iwl_cfg_bz_a0_gf4_a0, iwl_bz_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_bz_a0_mr_a0, iwl_bz_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwl_cfg_bz_a0_fm_a0, iwl_bz_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_NO_JACKET, iwl_cfg_gl_a0_fm_a0, iwl_bz_name), +/* BZ Z step */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_BZ, SILICON_Z_STEP, + IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl_cfg_bz_z0_gf_a0, iwl_bz_name), + +/* BNJ */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_IS_JACKET, + iwl_cfg_bnj_a0_fm_a0, iwl_bz_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_IS_JACKET, + iwl_cfg_bnj_a0_fm4_a0, iwl_bz_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_IS_JACKET, + iwl_cfg_bnj_a0_gf_a0, iwl_bz_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_IS_JACKET, + iwl_cfg_bnj_a0_gf4_a0, iwl_bz_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_IS_JACKET, + iwl_cfg_bnj_a0_hr_b0, iwl_bz_name), + /* SoF with JF2 */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9560_name), /* SoF with JF */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9462_name), /* SoF with JF2 */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9560_name), /* SoF with JF */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9462_name), /* So with GF */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB, IWL_CFG_ANY, + iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_name), /* So with JF2 */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9560_name), /* So with JF */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, iwlax210_2ax_cfg_so_jf_b0, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, - iwlax210_2ax_cfg_so_jf_b0, iwl9462_name) + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwlax210_2ax_cfg_so_jf_b0, iwl9462_name), + +/* MsP */ +/* For now we use the same FW as MR, but this will change in the future. */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl_cfg_so_a0_ms_a0, iwl_ax204_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl_cfg_so_a0_ms_a0, iwl_ax204_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl_cfg_ma_a0_ms_a0, iwl_ax204_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, IWL_CFG_ANY, + iwl_cfg_snj_a0_ms_a0, iwl_ax204_name) #endif /* CONFIG_IWLMVM */ }; @@ -1249,22 +1340,14 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { static int get_crf_id(struct iwl_trans *iwl_trans) { int ret = 0; - u32 wfpm_ctrl_addr; - u32 wfpm_otp_cfg_addr; u32 sd_reg_ver_addr; u32 cdb = 0; u32 val; - if (iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { - wfpm_ctrl_addr = WFPM_CTRL_REG_GEN2; - wfpm_otp_cfg_addr = WFPM_OTP_CFG1_ADDR_GEN2; + if (iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) sd_reg_ver_addr = SD_REG_VER_GEN2; - /* Qu/Pu families have other addresses */ - } else { - wfpm_ctrl_addr = WFPM_CTRL_REG; - wfpm_otp_cfg_addr = WFPM_OTP_CFG1_ADDR; + else sd_reg_ver_addr = SD_REG_VER; - } if (!iwl_trans_grab_nic_access(iwl_trans)) { IWL_ERR(iwl_trans, "Failed to grab nic access before reading crf id\n"); @@ -1273,15 +1356,15 @@ static int get_crf_id(struct iwl_trans *iwl_trans) } /* Enable access to peripheral registers */ - val = iwl_read_umac_prph_no_grab(iwl_trans, wfpm_ctrl_addr); + val = iwl_read_umac_prph_no_grab(iwl_trans, WFPM_CTRL_REG); val |= ENABLE_WFPM; - iwl_write_umac_prph_no_grab(iwl_trans, wfpm_ctrl_addr, val); + iwl_write_umac_prph_no_grab(iwl_trans, WFPM_CTRL_REG, val); /* Read crf info */ val = iwl_read_prph_no_grab(iwl_trans, sd_reg_ver_addr); /* Read cdb info (also contains the jacket info if needed in the future */ - cdb = iwl_read_umac_prph_no_grab(iwl_trans, wfpm_otp_cfg_addr); + cdb = iwl_read_umac_prph_no_grab(iwl_trans, WFPM_OTP_CFG1_ADDR); /* Map between crf id to rf id */ switch (REG_CRF_ID_TYPE(val)) { @@ -1337,11 +1420,15 @@ out: static const struct iwl_dev_info * iwl_pci_find_dev_info(u16 device, u16 subsystem_device, u16 mac_type, u8 mac_step, - u16 rf_type, u8 cdb, u8 rf_id, u8 no_160, u8 cores) + u16 rf_type, u8 cdb, u8 jacket, u8 rf_id, u8 no_160, u8 cores) { + int num_devices = ARRAY_SIZE(iwl_dev_info_table); int i; - for (i = ARRAY_SIZE(iwl_dev_info_table) - 1; i >= 0; i--) { + if (!num_devices) + return NULL; + + for (i = num_devices - 1; i >= 0; i--) { const struct iwl_dev_info *dev_info = &iwl_dev_info_table[i]; if (dev_info->device != (u16)IWL_CFG_ANY && @@ -1368,6 +1455,10 @@ iwl_pci_find_dev_info(u16 device, u16 subsystem_device, dev_info->cdb != cdb) continue; + if (dev_info->jacket != (u8)IWL_CFG_ANY && + dev_info->jacket != jacket) + continue; + if (dev_info->rf_id != (u8)IWL_CFG_ANY && dev_info->rf_id != rf_id) continue; @@ -1422,15 +1513,18 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * first trying to load the firmware etc. and potentially only * detecting any problems when the first interface is brought up. */ - ret = iwl_finish_nic_init(iwl_trans); - if (ret) - goto out_free_trans; - if (iwl_trans_grab_nic_access(iwl_trans)) { - /* all good */ - iwl_trans_release_nic_access(iwl_trans); - } else { - ret = -EIO; - goto out_free_trans; + ret = iwl_pcie_prepare_card_hw(iwl_trans); + if (!ret) { + ret = iwl_finish_nic_init(iwl_trans); + if (ret) + goto out_free_trans; + if (iwl_trans_grab_nic_access(iwl_trans)) { + /* all good */ + iwl_trans_release_nic_access(iwl_trans); + } else { + ret = -EIO; + goto out_free_trans; + } } iwl_trans->hw_rf_id = iwl_read32(iwl_trans, CSR_HW_RF_ID); @@ -1442,14 +1536,17 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) */ if (iwl_trans->trans_cfg->rf_id && iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000 && - !CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) && get_crf_id(iwl_trans)) + !CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) && get_crf_id(iwl_trans)) { + ret = -EINVAL; goto out_free_trans; + } dev_info = iwl_pci_find_dev_info(pdev->device, pdev->subsystem_device, CSR_HW_REV_TYPE(iwl_trans->hw_rev), - CSR_HW_REV_STEP(iwl_trans->hw_rev), + iwl_trans->hw_rev_step, CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id), CSR_HW_RFID_IS_CDB(iwl_trans->hw_rf_id), + CSR_HW_RFID_IS_JACKET(iwl_trans->hw_rf_id), IWL_SUBDEVICE_RF_ID(pdev->subsystem_device), IWL_SUBDEVICE_NO_160(pdev->subsystem_device), IWL_SUBDEVICE_CORES(pdev->subsystem_device)); @@ -1488,21 +1585,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) iwl_trans->cfg = cfg_7265d; - if (cfg == &iwlax210_2ax_cfg_so_hr_a0) { - if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_TY) { - iwl_trans->cfg = &iwlax210_2ax_cfg_ty_gf_a0; - } else if (CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) == - CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF)) { - iwl_trans->cfg = &iwlax210_2ax_cfg_so_jf_b0; - } else if (CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) == - CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_GF)) { - iwl_trans->cfg = &iwlax211_2ax_cfg_so_gf_a0; - } else if (CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) == - CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_GF4)) { - iwl_trans->cfg = &iwlax411_2ax_cfg_so_gf4_a0; - } - } - /* * This is a hack to switch from Qu B0 to Qu C0. We need to * do this for all cfgs that use Qu B0, except for those using @@ -1563,6 +1645,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_free_trans; pci_set_drvdata(pdev, iwl_trans); + + /* try to get ownership so that we'll know if we don't own it */ + iwl_pcie_prepare_card_hw(iwl_trans); + iwl_trans->drv = iwl_drv_start(iwl_trans); if (IS_ERR(iwl_trans->drv)) { diff --git a/pcie/internal.h b/pcie/internal.h index a43e56c7689f..f7e4f868363d 100644 --- a/pcie/internal.h +++ b/pcie/internal.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2003-2015, 2018-2021 Intel Corporation + * Copyright (C) 2003-2015, 2018-2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -104,6 +104,18 @@ struct iwl_rx_completion_desc { } __packed; /** + * struct iwl_rx_completion_desc_bz - Bz completion descriptor + * @rbid: unique tag of the received buffer + * @flags: flags (0: fragmented, all others: reserved) + * @reserved: reserved + */ +struct iwl_rx_completion_desc_bz { + __le16 rbid; + u8 flags; + u8 reserved[1]; +} __packed; + +/** * struct iwl_rxq - Rx queue * @id: queue index * @bd: driver's pointer to buffer of receive buffer descriptors (rbd). @@ -133,11 +145,7 @@ struct iwl_rxq { int id; void *bd; dma_addr_t bd_dma; - union { - void *used_bd; - __le32 *bd_32; - struct iwl_rx_completion_desc *cd; - }; + void *used_bd; dma_addr_t used_bd_dma; u32 read; u32 write; @@ -262,6 +270,20 @@ enum iwl_pcie_fw_reset_state { }; /** + * enum wl_pcie_imr_status - imr dma transfer state + * @IMR_D2S_IDLE: default value of the dma transfer + * @IMR_D2S_REQUESTED: dma transfer requested + * @IMR_D2S_COMPLETED: dma transfer completed + * @IMR_D2S_ERROR: dma transfer error + */ +enum iwl_pcie_imr_status { + IMR_D2S_IDLE, + IMR_D2S_REQUESTED, + IMR_D2S_COMPLETED, + IMR_D2S_ERROR, +}; + +/** * struct iwl_trans_pcie - PCIe transport specific data * @rxq: all the RX queue data * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues @@ -319,6 +341,8 @@ enum iwl_pcie_fw_reset_state { * @alloc_page_lock: spinlock for the page allocator * @alloc_page: allocated page to still use parts of * @alloc_page_used: how much of the allocated page was already used (bytes) + * @imr_status: imr dma state machine + * @wait_queue_head_t: imr wait queue for dma completion * @rf_name: name/version of the CRF, if any */ struct iwl_trans_pcie { @@ -363,7 +387,7 @@ struct iwl_trans_pcie { /* PCI bus related data */ struct pci_dev *pci_dev; - void __iomem *hw_base; + u8 __iomem *hw_base; bool ucode_write_complete; bool sx_complete; @@ -414,7 +438,8 @@ struct iwl_trans_pcie { bool fw_reset_handshake; enum iwl_pcie_fw_reset_state fw_reset_state; wait_queue_head_t fw_reset_waitq; - + enum iwl_pcie_imr_status imr_status; + wait_queue_head_t imr_waitq; char rf_name[32]; }; @@ -809,4 +834,9 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); +void iwl_trans_pcie_copy_imr_fh(struct iwl_trans *trans, + u32 dst_addr, u64 src_addr, u32 byte_cnt); +int iwl_trans_pcie_copy_imr(struct iwl_trans *trans, + u32 dst_addr, u64 src_addr, u32 byte_cnt); + #endif /* __iwl_trans_int_pcie_h__ */ diff --git a/pcie/rx.c b/pcie/rx.c index 14602d6d6699..68a4572cee53 100644 --- a/pcie/rx.c +++ b/pcie/rx.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2003-2014, 2018-2021 Intel Corporation + * Copyright (C) 2003-2014, 2018-2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -190,11 +190,14 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, } rxq->write_actual = round_down(rxq->write, 8); - if (trans->trans_cfg->mq_rx_supported) + if (!trans->trans_cfg->mq_rx_supported) + iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); + else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + iwl_write32(trans, HBUS_TARG_WRPTR, rxq->write_actual | + HBUS_TARG_WRPTR_RX_Q(rxq->id)); + else iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id), rxq->write_actual); - else - iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); } static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans) @@ -652,23 +655,30 @@ void iwl_pcie_rx_allocator_work(struct work_struct *data) iwl_pcie_rx_allocator(trans_pcie->trans); } -static int iwl_pcie_free_bd_size(struct iwl_trans *trans, bool use_rx_td) +static int iwl_pcie_free_bd_size(struct iwl_trans *trans) { - struct iwl_rx_transfer_desc *rx_td; + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + return sizeof(struct iwl_rx_transfer_desc); - if (use_rx_td) - return sizeof(*rx_td); - else - return trans->trans_cfg->mq_rx_supported ? sizeof(__le64) : - sizeof(__le32); + return trans->trans_cfg->mq_rx_supported ? + sizeof(__le64) : sizeof(__le32); +} + +static int iwl_pcie_used_bd_size(struct iwl_trans *trans) +{ + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + return sizeof(struct iwl_rx_completion_desc_bz); + + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + return sizeof(struct iwl_rx_completion_desc); + + return sizeof(__le32); } static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans, struct iwl_rxq *rxq) { - bool use_rx_td = (trans->trans_cfg->device_family >= - IWL_DEVICE_FAMILY_AX210); - int free_size = iwl_pcie_free_bd_size(trans, use_rx_td); + int free_size = iwl_pcie_free_bd_size(trans); if (rxq->bd) dma_free_coherent(trans->dev, @@ -682,8 +692,8 @@ static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans, if (rxq->used_bd) dma_free_coherent(trans->dev, - (use_rx_td ? sizeof(*rxq->cd) : - sizeof(__le32)) * rxq->queue_size, + iwl_pcie_used_bd_size(trans) * + rxq->queue_size, rxq->used_bd, rxq->used_bd_dma); rxq->used_bd_dma = 0; rxq->used_bd = NULL; @@ -707,7 +717,7 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, else rxq->queue_size = RX_QUEUE_SIZE; - free_size = iwl_pcie_free_bd_size(trans, use_rx_td); + free_size = iwl_pcie_free_bd_size(trans); /* * Allocate the circular buffer of Read Buffer Descriptors @@ -720,14 +730,15 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, if (trans->trans_cfg->mq_rx_supported) { rxq->used_bd = dma_alloc_coherent(dev, - (use_rx_td ? sizeof(*rxq->cd) : sizeof(__le32)) * rxq->queue_size, + iwl_pcie_used_bd_size(trans) * + rxq->queue_size, &rxq->used_bd_dma, GFP_KERNEL); if (!rxq->used_bd) goto err; } - rxq->rb_stts = trans_pcie->base_rb_stts + rxq->id * rb_stts_size; + rxq->rb_stts = (u8 *)trans_pcie->base_rb_stts + rxq->id * rb_stts_size; rxq->rb_stts_dma = trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size; @@ -1307,9 +1318,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, "Q %d: cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n", rxq->id, offset, iwl_get_cmd_string(trans, - iwl_cmd_id(pkt->hdr.cmd, - pkt->hdr.group_id, - 0)), + WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)), pkt->hdr.group_id, pkt->hdr.cmd, le16_to_cpu(pkt->hdr.sequence)); @@ -1319,7 +1328,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN); /* check that what the device tells us made sense */ - if (offset > max_len) + if (len < sizeof(*pkt) || offset > max_len) break; trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len); @@ -1419,6 +1428,7 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans, u16 vid; BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32); + BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc_bz) != 4); if (!trans->trans_cfg->mq_rx_supported) { rxb = rxq->queue[i]; @@ -1426,11 +1436,20 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans, return rxb; } - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { - vid = le16_to_cpu(rxq->cd[i].rbid); - *join = rxq->cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED; + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { + struct iwl_rx_completion_desc_bz *cd = rxq->used_bd; + + vid = le16_to_cpu(cd[i].rbid); + *join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED; + } else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { + struct iwl_rx_completion_desc *cd = rxq->used_bd; + + vid = le16_to_cpu(cd[i].rbid); + *join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED; } else { - vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF; /* 12-bit VID */ + __le32 *cd = rxq->used_bd; + + vid = le32_to_cpu(cd[i]) & 0x0FFF; /* 12-bit VID */ } if (!vid || vid > RX_POOL_SIZE(trans_pcie->num_rx_bufs)) @@ -1608,10 +1627,13 @@ irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id) if (WARN_ON(entry->entry >= trans->num_rx_queues)) return IRQ_NONE; - if (WARN_ONCE(!rxq, - "[%d] Got MSI-X interrupt before we have Rx queues", - entry->entry)) + if (!rxq) { + if (net_ratelimit()) + IWL_ERR(trans, + "[%d] Got MSI-X interrupt before we have Rx queues\n", + entry->entry); return IRQ_NONE; + } lock_map_acquire(&trans->sync_cmd_lockdep_map); IWL_DEBUG_ISR(trans, "[%d] Got interrupt\n", entry->entry); @@ -1954,7 +1976,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) CSR_INT, CSR_INT_BIT_RX_PERIODIC); } /* Sending RX interrupt require many steps to be done in the - * the device: + * device: * 1- write interrupt to current index in ICT table. * 2- dma RX frame. * 3- update RX shared data to indicate last write index. @@ -1998,6 +2020,11 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) /* Wake up uCode load routine, now that load is complete */ trans_pcie->ucode_write_complete = true; wake_up(&trans_pcie->ucode_write_waitq); + /* Wake up IMR write routine, now that write to SRAM is complete */ + if (trans_pcie->imr_status == IMR_D2S_REQUESTED) { + trans_pcie->imr_status = IMR_D2S_COMPLETED; + wake_up(&trans_pcie->ucode_write_waitq); + } } if (inta & ~handled) { @@ -2211,7 +2238,17 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) } /* This "Tx" DMA channel is used only for loading uCode */ - if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) { + if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM && + trans_pcie->imr_status == IMR_D2S_REQUESTED) { + IWL_DEBUG_ISR(trans, "IMR Complete interrupt\n"); + isr_stats->tx++; + + /* Wake up IMR routine once write to SRAM is complete */ + if (trans_pcie->imr_status == IMR_D2S_REQUESTED) { + trans_pcie->imr_status = IMR_D2S_COMPLETED; + wake_up(&trans_pcie->ucode_write_waitq); + } + } else if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) { IWL_DEBUG_ISR(trans, "uCode load interrupt\n"); isr_stats->tx++; /* @@ -2220,6 +2257,12 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) */ trans_pcie->ucode_write_complete = true; wake_up(&trans_pcie->ucode_write_waitq); + + /* Wake up IMR routine once write to SRAM is complete */ + if (trans_pcie->imr_status == IMR_D2S_REQUESTED) { + trans_pcie->imr_status = IMR_D2S_COMPLETED; + wake_up(&trans_pcie->ucode_write_waitq); + } } if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) @@ -2234,7 +2277,10 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) inta_fh); isr_stats->sw++; /* during FW reset flow report errors from there */ - if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) { + if (trans_pcie->imr_status == IMR_D2S_REQUESTED) { + trans_pcie->imr_status = IMR_D2S_ERROR; + wake_up(&trans_pcie->imr_waitq); + } else if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) { trans_pcie->fw_reset_state = FW_RESET_ERROR; wake_up(&trans_pcie->fw_reset_waitq); } else { @@ -2266,7 +2312,12 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) } } - if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) { + /* + * In some rare cases when the HW is in a bad state, we may + * get this interrupt too early, when prph_info is still NULL. + * So make sure that it's not NULL to prevent crashing. + */ + if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP && trans_pcie->prph_info) { u32 sleep_notif = le32_to_cpu(trans_pcie->prph_info->sleep_notif); if (sleep_notif == IWL_D3_SLEEP_STATUS_SUSPEND || diff --git a/pcie/trans-gen2.c b/pcie/trans-gen2.c index 645cb4dd4e5a..0febdcacbd42 100644 --- a/pcie/trans-gen2.c +++ b/pcie/trans-gen2.c @@ -81,7 +81,7 @@ static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave) /* Stop device's DMA activity */ iwl_pcie_apm_stop_master(trans); - iwl_trans_sw_reset(trans); + iwl_trans_sw_reset(trans, false); /* * Clear "initialization complete" bit to move adapter from @@ -105,9 +105,12 @@ static void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans) if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) iwl_write_umac_prph(trans, UREG_NIC_SET_NMI_DRIVER, UREG_NIC_SET_NMI_DRIVER_RESET_HANDSHAKE); - else + else if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6, UREG_DOORBELL_TO_ISR6_RESET_HANDSHAKE); + else + iwl_write32(trans, CSR_DOORBELL_VECTOR, + UREG_DOORBELL_TO_ISR6_RESET_HANDSHAKE); /* wait 200ms */ ret = wait_event_timeout(trans_pcie->fw_reset_waitq, @@ -166,7 +169,8 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans) /* Stop the device, and put it in low power state */ iwl_pcie_gen2_apm_stop(trans, false); - iwl_trans_sw_reset(trans); + /* re-take ownership to prevent other users from stealing the device */ + iwl_trans_sw_reset(trans, true); /* * Upon stop, the IVAR table gets erased, so msi-x won't @@ -196,9 +200,6 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans) * interrupt */ iwl_enable_rfkill_int(trans); - - /* re-take ownership to prevent other users from stealing the device */ - iwl_pcie_prepare_card_hw(trans); } void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans) diff --git a/pcie/trans.c b/pcie/trans.c index 1efb53f78a62..518700388fdd 100644 --- a/pcie/trans.c +++ b/pcie/trans.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2007-2015, 2018-2020 Intel Corporation + * Copyright (C) 2007-2015, 2018-2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -24,6 +24,7 @@ #include "fw/error-dump.h" #include "fw/dbg.h" #include "fw/api/tx.h" +#include "mei/iwl-mei.h" #include "internal.h" #include "iwl-fh.h" #include "iwl-context-info-gen3.h" @@ -126,7 +127,8 @@ out: kfree(buf); } -static void iwl_trans_pcie_sw_reset(struct iwl_trans *trans) +static int iwl_trans_pcie_sw_reset(struct iwl_trans *trans, + bool retake_ownership) { /* Reset entire device - do controller reset (results in SHRD_HW_RST) */ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) @@ -136,6 +138,11 @@ static void iwl_trans_pcie_sw_reset(struct iwl_trans *trans) iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); usleep_range(5000, 6000); + + if (retake_ownership) + return iwl_pcie_prepare_card_hw(trans); + + return 0; } static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans) @@ -381,9 +388,11 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans) __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_XTAL_ON); - iwl_trans_pcie_sw_reset(trans); + ret = iwl_trans_pcie_sw_reset(trans, true); + + if (!ret) + ret = iwl_finish_nic_init(trans); - ret = iwl_finish_nic_init(trans); if (WARN_ON(ret)) { /* Release XTAL ON request */ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, @@ -408,7 +417,10 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans) apmg_xtal_cfg_reg | SHR_APMG_XTAL_CFG_XTAL_ON_REQ); - iwl_trans_pcie_sw_reset(trans); + ret = iwl_trans_pcie_sw_reset(trans, true); + if (ret) + IWL_ERR(trans, + "iwl_pcie_apm_lp_xtal_enable: failed to retake NIC ownership\n"); /* Enable LP XTAL by indirect access through CSR */ apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG); @@ -514,7 +526,7 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave) return; } - iwl_trans_pcie_sw_reset(trans); + iwl_trans_pcie_sw_reset(trans, false); /* * Clear "initialization complete" bit to move adapter from @@ -594,8 +606,10 @@ int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) ret = iwl_pcie_set_hw_ready(trans); /* If the card is ready, exit 0 */ - if (ret >= 0) + if (ret >= 0) { + trans->csme_own = false; return 0; + } iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, CSR_RESET_LINK_PWR_MGMT_DISABLED); @@ -608,8 +622,22 @@ int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) do { ret = iwl_pcie_set_hw_ready(trans); - if (ret >= 0) + if (ret >= 0) { + trans->csme_own = false; return 0; + } + + if (iwl_mei_is_connected()) { + IWL_DEBUG_INFO(trans, + "Couldn't prepare the card but SAP is connected\n"); + trans->csme_own = true; + if (trans->trans_cfg->device_family != + IWL_DEVICE_FAMILY_9000) + IWL_ERR(trans, + "SAP not supported for this NIC family\n"); + + return -EBUSY; + } usleep_range(200, 1000); t += 200; @@ -717,7 +745,7 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num, iwl_set_bits_prph(trans, LMPM_CHICK, LMPM_CHICK_EXTENDED_ADDR_SPACE); - memcpy(v_addr, (u8 *)section->data + offset, copy_size); + memcpy(v_addr, (const u8 *)section->data + offset, copy_size); ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr, copy_size); @@ -1244,7 +1272,8 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans) /* Stop the device, and put it in low power state */ iwl_pcie_apm_stop(trans, false); - iwl_trans_pcie_sw_reset(trans); + /* re-take ownership to prevent other users from stealing the device */ + iwl_trans_pcie_sw_reset(trans, true); /* * Upon stop, the IVAR table gets erased, so msi-x won't @@ -1274,9 +1303,6 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans) * interrupt */ iwl_enable_rfkill_int(trans); - - /* re-take ownership to prevent other users from stealing the device */ - iwl_pcie_prepare_card_hw(trans); } void iwl_pcie_synchronize_irqs(struct iwl_trans *trans) @@ -1482,33 +1508,54 @@ void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans, iwl_pcie_set_pwr(trans, true); } +static int iwl_pcie_d3_handshake(struct iwl_trans *trans, bool suspend) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int ret; + + if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) { + iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6, + suspend ? UREG_DOORBELL_TO_ISR6_SUSPEND : + UREG_DOORBELL_TO_ISR6_RESUME); + } else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { + iwl_write32(trans, CSR_IPC_SLEEP_CONTROL, + suspend ? CSR_IPC_SLEEP_CONTROL_SUSPEND : + CSR_IPC_SLEEP_CONTROL_RESUME); + iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6, + UREG_DOORBELL_TO_ISR6_SLEEP_CTRL); + } else { + return 0; + } + + ret = wait_event_timeout(trans_pcie->sx_waitq, + trans_pcie->sx_complete, 2 * HZ); + + /* Invalidate it toward next suspend or resume */ + trans_pcie->sx_complete = false; + + if (!ret) { + IWL_ERR(trans, "Timeout %s D3\n", + suspend ? "entering" : "exiting"); + return -ETIMEDOUT; + } + + return 0; +} + static int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, bool reset) { int ret; - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); if (!reset) /* Enable persistence mode to avoid reset */ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PERSIST_MODE); - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { - iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6, - UREG_DOORBELL_TO_ISR6_SUSPEND); - - ret = wait_event_timeout(trans_pcie->sx_waitq, - trans_pcie->sx_complete, 2 * HZ); - /* - * Invalidate it toward resume. - */ - trans_pcie->sx_complete = false; + ret = iwl_pcie_d3_handshake(trans, true); + if (ret) + return ret; - if (!ret) { - IWL_ERR(trans, "Timeout entering D3\n"); - return -ETIMEDOUT; - } - } iwl_pcie_d3_complete_suspend(trans, test, reset); return 0; @@ -1525,6 +1572,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, if (test) { iwl_enable_interrupts(trans); *status = IWL_D3_STATUS_ALIVE; + ret = 0; goto out; } @@ -1573,25 +1621,10 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, *status = IWL_D3_STATUS_ALIVE; out: - if (*status == IWL_D3_STATUS_ALIVE && - trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { - trans_pcie->sx_complete = false; - iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6, - UREG_DOORBELL_TO_ISR6_RESUME); - - ret = wait_event_timeout(trans_pcie->sx_waitq, - trans_pcie->sx_complete, 2 * HZ); - /* - * Invalidate it toward next suspend. - */ - trans_pcie->sx_complete = false; + if (*status == IWL_D3_STATUS_ALIVE) + ret = iwl_pcie_d3_handshake(trans, false); - if (!ret) { - IWL_ERR(trans, "Timeout exiting D3\n"); - return -ETIMEDOUT; - } - } - return 0; + return ret; } static void @@ -1778,9 +1811,7 @@ static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans) iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG, HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE); - iwl_trans_pcie_sw_reset(trans); - - return 0; + return iwl_trans_pcie_sw_reset(trans, true); } static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans) @@ -1800,7 +1831,9 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans) if (err) return err; - iwl_trans_pcie_sw_reset(trans); + err = iwl_trans_pcie_sw_reset(trans, true); + if (err) + return err; if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 && trans->trans_cfg->integrated) { @@ -1916,6 +1949,7 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans, trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout; trans->txqs.page_offs = trans_cfg->cb_data_offs; trans->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *); + trans->txqs.queue_alloc_cmd_ver = trans_cfg->queue_alloc_cmd_ver; if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS)) trans_pcie->n_no_reclaim_cmds = 0; @@ -2831,7 +2865,7 @@ static ssize_t iwl_dbgfs_monitor_data_read(struct file *file, { struct iwl_trans *trans = file->private_data; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - void *cpu_addr = (void *)trans->dbg.fw_mon.block, *curr_buf; + u8 *cpu_addr = (void *)trans->dbg.fw_mon.block, *curr_buf; struct cont_rec *data = &trans_pcie->fw_mon_data; u32 write_ptr_addr, wrap_cnt_addr, write_ptr, wrap_cnt; ssize_t size, bytes_copied = 0; @@ -3436,7 +3470,8 @@ static void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans) .d3_suspend = iwl_trans_pcie_d3_suspend, \ .d3_resume = iwl_trans_pcie_d3_resume, \ .interrupts = iwl_trans_pci_interrupts, \ - .sync_nmi = iwl_trans_pcie_sync_nmi \ + .sync_nmi = iwl_trans_pcie_sync_nmi, \ + .imr_dma_data = iwl_trans_pcie_copy_imr \ static const struct iwl_trans_ops trans_ops_pcie = { IWL_TRANS_COMMON_OPS, @@ -3521,6 +3556,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, mutex_init(&trans_pcie->mutex); init_waitqueue_head(&trans_pcie->ucode_write_waitq); init_waitqueue_head(&trans_pcie->fw_reset_waitq); + init_waitqueue_head(&trans_pcie->imr_waitq); trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator", WQ_HIGHPRI | WQ_UNBOUND, 1); @@ -3599,8 +3635,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, * in the old format. */ if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_8000) - trans->hw_rev = (trans->hw_rev & 0xfff0) | - (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2); + trans->hw_rev_step = trans->hw_rev & 0xF; + else + trans->hw_rev_step = (trans->hw_rev & 0xC) >> 2; IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", trans->hw_rev); @@ -3648,3 +3685,41 @@ out_free_trans: iwl_trans_free(trans); return ERR_PTR(ret); } + +void iwl_trans_pcie_copy_imr_fh(struct iwl_trans *trans, + u32 dst_addr, u64 src_addr, u32 byte_cnt) +{ + iwl_write_prph(trans, IMR_UREG_CHICK, + iwl_read_prph(trans, IMR_UREG_CHICK) | + IMR_UREG_CHICK_HALT_UMAC_PERMANENTLY_MSK); + iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_SRAM_ADDR, dst_addr); + iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_LSB, + (u32)(src_addr & 0xFFFFFFFF)); + iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_MSB, + iwl_get_dma_hi_addr(src_addr)); + iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_BC, byte_cnt); + iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_CTRL, + IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_IRQ_TARGET_POS | + IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_DMA_EN_POS | + IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_RS_MSK); +} + +int iwl_trans_pcie_copy_imr(struct iwl_trans *trans, + u32 dst_addr, u64 src_addr, u32 byte_cnt) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int ret = -1; + + trans_pcie->imr_status = IMR_D2S_REQUESTED; + iwl_trans_pcie_copy_imr_fh(trans, dst_addr, src_addr, byte_cnt); + ret = wait_event_timeout(trans_pcie->imr_waitq, + trans_pcie->imr_status != + IMR_D2S_REQUESTED, 5 * HZ); + if (!ret || trans_pcie->imr_status == IMR_D2S_ERROR) { + IWL_ERR(trans, "Failed to copy IMR Memory chunk!\n"); + iwl_trans_pcie_dump_regs(trans); + return -ETIMEDOUT; + } + trans_pcie->imr_status = IMR_D2S_IDLE; + return 0; +} diff --git a/pcie/tx.c b/pcie/tx.c index 4f6c187eed69..3546c5269c3b 100644 --- a/pcie/tx.c +++ b/pcie/tx.c @@ -154,7 +154,7 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, void *tfd; u32 num_tbs; - tfd = txq->tfds + trans->txqs.tfd.size * txq->write_ptr; + tfd = (u8 *)txq->tfds + trans->txqs.tfd.size * txq->write_ptr; if (reset) memset(tfd, 0, trans->txqs.tfd.size); @@ -540,7 +540,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans) trans->cfg->min_txq_size); else slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, - trans->cfg->min_256_ba_txq_size); + trans->cfg->min_ba_txq_size); trans->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id]; ret = iwl_txq_alloc(trans, trans->txqs.txq[txq_id], slots_num, cmd_queue); @@ -594,7 +594,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans) trans->cfg->min_txq_size); else slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, - trans->cfg->min_256_ba_txq_size); + trans->cfg->min_ba_txq_size); ret = iwl_txq_init(trans, trans->txqs.txq[txq_id], slots_num, cmd_queue); if (ret) { @@ -877,7 +877,7 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, if (configure_scd) { iwl_scd_txq_set_inactive(trans, txq_id); - iwl_trans_write_mem(trans, stts_addr, (void *)zero_val, + iwl_trans_write_mem(trans, stts_addr, (const void *)zero_val, ARRAY_SIZE(zero_val)); } @@ -1114,7 +1114,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, /* map the remaining (adjusted) nocopy/dup fragments */ for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { - const void *data = cmddata[i]; + void *data = (void *)(uintptr_t)cmddata[i]; if (!cmdlen[i]) continue; @@ -1123,7 +1123,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, continue; if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) data = dup_buf; - phys_addr = dma_map_single(trans->dev, (void *)data, + phys_addr = dma_map_single(trans->dev, data, cmdlen[i], DMA_TO_DEVICE); if (dma_mapping_error(trans->dev, phys_addr)) { iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, @@ -1201,7 +1201,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, cmd = txq->entries[cmd_index].cmd; meta = &txq->entries[cmd_index].meta; group_id = cmd->hdr.group_id; - cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0); + cmd_id = WIDE_ID(group_id, cmd->hdr.cmd); iwl_txq_gen1_tfd_unmap(trans, meta, txq, index); diff --git a/queue/tx.c b/queue/tx.c index 451b06069350..42e631cc16e8 100644 --- a/queue/tx.c +++ b/queue/tx.c @@ -1,13 +1,15 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2020-2021 Intel Corporation + * Copyright (C) 2020-2022 Intel Corporation */ #include <net/tso.h> #include <linux/tcp.h> #include "iwl-debug.h" #include "iwl-io.h" +#include "fw/api/commands.h" #include "fw/api/tx.h" +#include "fw/api/datapath.h" #include "queue/tx.h" #include "iwl-fh.h" #include "iwl-scd.h" @@ -41,13 +43,13 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans, num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1; if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { - struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr; + struct iwl_gen3_bc_tbl_entry *scd_bc_tbl_gen3 = txq->bc_tbl.addr; /* Starting from AX210, the HW expects bytes */ WARN_ON(trans->txqs.bc_table_dword); WARN_ON(len > 0x3FFF); bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14)); - scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent; + scd_bc_tbl_gen3[idx].tfd_offset = bc_ent; } else { struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr; @@ -189,7 +191,7 @@ static struct page *get_workaround_page(struct iwl_trans *trans, return NULL; /* set the chaining pointer to the previous page if there */ - *(void **)(page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr; + *(void **)((u8 *)page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr; *page_ptr = ret; return ret; @@ -314,7 +316,7 @@ alloc: return NULL; p->pos = page_address(p->page); /* set the chaining pointer to NULL */ - *(void **)(page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL; + *(void **)((u8 *)page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL; out: *page_ptr = p->page; get_page(p->page); @@ -963,7 +965,7 @@ void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb) while (next) { struct page *tmp = next; - next = *(void **)(page_address(next) + PAGE_SIZE - + next = *(void **)((u8 *)page_address(next) + PAGE_SIZE - sizeof(void *)); __free_page(tmp); } @@ -1072,6 +1074,7 @@ int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, return 0; err_free_tfds: dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr); + txq->tfds = NULL; error: if (txq->entries && cmd_queue) for (i = 0; i < slots_num; i++) @@ -1082,9 +1085,8 @@ error: return -ENOMEM; } -static int iwl_txq_dyn_alloc_dma(struct iwl_trans *trans, - struct iwl_txq **intxq, int size, - unsigned int timeout) +static struct iwl_txq * +iwl_txq_dyn_alloc_dma(struct iwl_trans *trans, int size, unsigned int timeout) { size_t bc_tbl_size, bc_tbl_entries; struct iwl_txq *txq; @@ -1096,18 +1098,18 @@ static int iwl_txq_dyn_alloc_dma(struct iwl_trans *trans, bc_tbl_entries = bc_tbl_size / sizeof(u16); if (WARN_ON(size > bc_tbl_entries)) - return -EINVAL; + return ERR_PTR(-EINVAL); txq = kzalloc(sizeof(*txq), GFP_KERNEL); if (!txq) - return -ENOMEM; + return ERR_PTR(-ENOMEM); txq->bc_tbl.addr = dma_pool_alloc(trans->txqs.bc_pool, GFP_KERNEL, &txq->bc_tbl.dma); if (!txq->bc_tbl.addr) { IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); kfree(txq); - return -ENOMEM; + return ERR_PTR(-ENOMEM); } ret = iwl_txq_alloc(trans, txq, size, false); @@ -1123,12 +1125,11 @@ static int iwl_txq_dyn_alloc_dma(struct iwl_trans *trans, txq->wd_timeout = msecs_to_jiffies(timeout); - *intxq = txq; - return 0; + return txq; error: iwl_txq_gen2_free_memory(trans, txq); - return ret; + return ERR_PTR(ret); } static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq, @@ -1185,30 +1186,57 @@ error_free_resp: return ret; } -int iwl_txq_dyn_alloc(struct iwl_trans *trans, __le16 flags, u8 sta_id, u8 tid, - int cmd_id, int size, unsigned int timeout) +int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask, + u8 tid, int size, unsigned int timeout) { - struct iwl_txq *txq = NULL; - struct iwl_tx_queue_cfg_cmd cmd = { - .flags = flags, - .sta_id = sta_id, - .tid = tid, - }; + struct iwl_txq *txq; + union { + struct iwl_tx_queue_cfg_cmd old; + struct iwl_scd_queue_cfg_cmd new; + } cmd; struct iwl_host_cmd hcmd = { - .id = cmd_id, - .len = { sizeof(cmd) }, - .data = { &cmd, }, .flags = CMD_WANT_SKB, }; int ret; - ret = iwl_txq_dyn_alloc_dma(trans, &txq, size, timeout); - if (ret) - return ret; + txq = iwl_txq_dyn_alloc_dma(trans, size, timeout); + if (IS_ERR(txq)) + return PTR_ERR(txq); + + if (trans->txqs.queue_alloc_cmd_ver == 0) { + memset(&cmd.old, 0, sizeof(cmd.old)); + cmd.old.tfdq_addr = cpu_to_le64(txq->dma_addr); + cmd.old.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma); + cmd.old.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size)); + cmd.old.flags = cpu_to_le16(flags | TX_QUEUE_CFG_ENABLE_QUEUE); + cmd.old.tid = tid; - cmd.tfdq_addr = cpu_to_le64(txq->dma_addr); - cmd.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma); - cmd.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size)); + if (hweight32(sta_mask) != 1) { + ret = -EINVAL; + goto error; + } + cmd.old.sta_id = ffs(sta_mask) - 1; + + hcmd.id = SCD_QUEUE_CFG; + hcmd.len[0] = sizeof(cmd.old); + hcmd.data[0] = &cmd.old; + } else if (trans->txqs.queue_alloc_cmd_ver == 3) { + memset(&cmd.new, 0, sizeof(cmd.new)); + cmd.new.operation = cpu_to_le32(IWL_SCD_QUEUE_ADD); + cmd.new.u.add.tfdq_dram_addr = cpu_to_le64(txq->dma_addr); + cmd.new.u.add.bc_dram_addr = cpu_to_le64(txq->bc_tbl.dma); + cmd.new.u.add.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size)); + cmd.new.u.add.flags = cpu_to_le32(flags); + cmd.new.u.add.sta_mask = cpu_to_le32(sta_mask); + cmd.new.u.add.tid = tid; + + hcmd.id = WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD); + hcmd.len[0] = sizeof(cmd.new); + hcmd.data[0] = &cmd.new; + } else { + ret = -EOPNOTSUPP; + goto error; + } ret = iwl_trans_send_cmd(trans, &hcmd); if (ret) @@ -1306,10 +1334,10 @@ static inline dma_addr_t iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans *trans, dma_addr_t hi_len; if (trans->trans_cfg->use_tfh) { - struct iwl_tfh_tfd *tfd = _tfd; - struct iwl_tfh_tb *tb = &tfd->tbs[idx]; + struct iwl_tfh_tfd *tfh_tfd = _tfd; + struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx]; - return (dma_addr_t)(le64_to_cpu(tb->addr)); + return (dma_addr_t)(le64_to_cpu(tfh_tb->addr)); } tfd = _tfd; @@ -1752,8 +1780,11 @@ static int iwl_trans_txq_send_hcmd_sync(struct iwl_trans *trans, } if (test_bit(STATUS_FW_ERROR, &trans->status)) { - IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str); - dump_stack(); + if (!test_and_clear_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE, + &trans->status)) { + IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str); + dump_stack(); + } ret = -EIO; goto cancel; } diff --git a/queue/tx.h b/queue/tx.h index 20efc62acf13..eca53bfd326d 100644 --- a/queue/tx.h +++ b/queue/tx.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2020-2021 Intel Corporation + * Copyright (C) 2020-2022 Intel Corporation */ #ifndef __iwl_trans_queue_tx_h__ #define __iwl_trans_queue_tx_h__ @@ -41,7 +41,7 @@ static inline void *iwl_txq_get_tfd(struct iwl_trans *trans, if (trans->trans_cfg->use_tfh) idx = iwl_txq_get_cmd_index(txq, idx); - return txq->tfds + trans->txqs.tfd.size * idx; + return (u8 *)txq->tfds + trans->txqs.tfd.size * idx; } int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, @@ -112,10 +112,9 @@ void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta, struct iwl_tfh_tfd *tfd); -int iwl_txq_dyn_alloc(struct iwl_trans *trans, - __le16 flags, u8 sta_id, u8 tid, - int cmd_id, int size, - unsigned int timeout); +int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, + u32 sta_mask, u8 tid, + int size, unsigned int timeout); int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_tx_cmd *dev_cmd, int txq_id); @@ -137,9 +136,9 @@ static inline u8 iwl_txq_gen1_tfd_get_num_tbs(struct iwl_trans *trans, struct iwl_tfd *tfd; if (trans->trans_cfg->use_tfh) { - struct iwl_tfh_tfd *tfd = _tfd; + struct iwl_tfh_tfd *tfh_tfd = _tfd; - return le16_to_cpu(tfd->num_tbs) & 0x1f; + return le16_to_cpu(tfh_tfd->num_tbs) & 0x1f; } tfd = (struct iwl_tfd *)_tfd; @@ -153,10 +152,10 @@ static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans, struct iwl_tfd_tb *tb; if (trans->trans_cfg->use_tfh) { - struct iwl_tfh_tfd *tfd = _tfd; - struct iwl_tfh_tb *tb = &tfd->tbs[idx]; + struct iwl_tfh_tfd *tfh_tfd = _tfd; + struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx]; - return le16_to_cpu(tb->tb_len); + return le16_to_cpu(tfh_tb->tb_len); } tfd = (struct iwl_tfd *)_tfd; |